FFmpegKit iOS / macOS / tvOS API 4.5
fftools_ffmpeg.c
Go to the documentation of this file.
1/*
2 * Copyright (c) 2000-2003 Fabrice Bellard
3 *
4 * This file is part of FFmpeg.
5 *
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
26/*
27 * CHANGES 06.2020
28 * - ignoring signals implemented
29 * - cancel_operation() method signature updated with id
30 * - cancel by execution id implemented
31 *
32 * CHANGES 01.2020
33 * - ffprobe support changes
34 *
35 * CHANGES 12.2019
36 * - concurrent execution support
37 *
38 * CHANGES 08.2018
39 * --------------------------------------------------------
40 * - fftools_ prefix added to file name and parent headers
41 * - forward_report() method, report_callback function pointer and set_report_callback() setter
42 * method added to forward stats.
43 * - forward_report() call added from print_report()
44 * - cancel_operation() method added to trigger sigterm_handler
45 * - (!received_sigterm) validation added inside ifilter_send_eof() to complete cancellation
46 *
47 * CHANGES 07.2018
48 * --------------------------------------------------------
49 * - main() function renamed as ffmpeg_execute()
50 * - exit_program() implemented with setjmp
51 * - extern longjmp_value added to access exit code stored in exit_program()
52 * - ffmpeg_var_cleanup() method added
53 */
54
55#include "config.h"
56#include <ctype.h>
57#include <string.h>
58#include <math.h>
59#include <stdlib.h>
60#include <errno.h>
61#include <limits.h>
62#include <stdatomic.h>
63#include <stdint.h>
64
65#include "ffmpegkit_exception.h"
66
67#if HAVE_IO_H
68#include <io.h>
69#endif
70#if HAVE_UNISTD_H
71#include <unistd.h>
72#endif
73
74#include "libavformat/avformat.h"
75#include "libavdevice/avdevice.h"
76#include "libswresample/swresample.h"
77#include "libavutil/opt.h"
78#include "libavutil/channel_layout.h"
79#include "libavutil/parseutils.h"
80#include "libavutil/samplefmt.h"
81#include "libavutil/fifo.h"
82#include "libavutil/hwcontext.h"
83#include "libavutil/internal.h"
84#include "libavutil/intreadwrite.h"
85#include "libavutil/dict.h"
86#include "libavutil/display.h"
87#include "libavutil/mathematics.h"
88#include "libavutil/pixdesc.h"
89#include "libavutil/avstring.h"
90#include "libavutil/libm.h"
91#include "libavutil/imgutils.h"
92#include "libavutil/timestamp.h"
93#include "libavutil/bprint.h"
94#include "libavutil/time.h"
95#include "libavutil/thread.h"
96#include "libavutil/threadmessage.h"
97#include "libavcodec/mathops.h"
98#include "libavformat/os_support.h"
99
100# include "libavfilter/avfilter.h"
101# include "libavfilter/buffersrc.h"
102# include "libavfilter/buffersink.h"
103
104#if HAVE_SYS_RESOURCE_H
105#include <sys/time.h>
106#include <sys/types.h>
107#include <sys/resource.h>
108#elif HAVE_GETPROCESSTIMES
109#include <windows.h>
110#endif
111#if HAVE_GETPROCESSMEMORYINFO
112#include <windows.h>
113#include <psapi.h>
114#endif
115#if HAVE_SETCONSOLECTRLHANDLER
116#include <windows.h>
117#endif
118
119
120#if HAVE_SYS_SELECT_H
121#include <sys/select.h>
122#endif
123
124#if HAVE_TERMIOS_H
125#include <fcntl.h>
126#include <sys/ioctl.h>
127#include <sys/time.h>
128#include <termios.h>
129#elif HAVE_KBHIT
130#include <conio.h>
131#endif
132
133#include <time.h>
134
135#include "fftools_ffmpeg.h"
136#include "fftools_cmdutils.h"
137
138#include "libavutil/avassert.h"
139
140static FILE *vstats_file;
141
142const char *const forced_keyframes_const_names[] = {
143 "n",
144 "n_forced",
145 "prev_forced_n",
146 "prev_forced_t",
147 "t",
148 NULL
149};
150
151typedef struct BenchmarkTimeStamps {
152 int64_t real_usec;
153 int64_t user_usec;
154 int64_t sys_usec;
156
157static void do_video_stats(OutputStream *ost, int frame_size);
159static int64_t getmaxrss(void);
161
162__thread int run_as_daemon = 0;
163__thread int nb_frames_dup = 0;
164__thread unsigned dup_warning = 1000;
165__thread int nb_frames_drop = 0;
166__thread int64_t decode_error_stat[2];
167__thread unsigned nb_output_dumped = 0;
168
169__thread int want_sdp = 1;
170
172__thread AVIOContext *progress_avio = NULL;
173
174__thread uint8_t *subtitle_out;
175
176__thread InputStream **input_streams = NULL;
177__thread int nb_input_streams = 0;
178__thread InputFile **input_files = NULL;
179__thread int nb_input_files = 0;
180
181__thread OutputStream **output_streams = NULL;
182__thread int nb_output_streams = 0;
183__thread OutputFile **output_files = NULL;
184__thread int nb_output_files = 0;
185
187__thread int nb_filtergraphs;
188
189__thread int64_t last_time = -1;
190__thread int64_t keyboard_last_time = 0;
191__thread int first_report = 1;
192__thread int qp_histogram[52];
193
194void (*report_callback)(int, float, float, int64_t, int, double, double) = NULL;
195
196extern __thread int file_overwrite;
197extern __thread int no_file_overwrite;
198extern __thread int ignore_unknown_streams;
199extern __thread int copy_unknown_streams;
200extern int opt_map(void *optctx, const char *opt, const char *arg);
201extern int opt_map_channel(void *optctx, const char *opt, const char *arg);
202extern int opt_recording_timestamp(void *optctx, const char *opt, const char *arg);
203extern int opt_data_frames(void *optctx, const char *opt, const char *arg);
204extern int opt_progress(void *optctx, const char *opt, const char *arg);
205extern int opt_target(void *optctx, const char *opt, const char *arg);
206extern int opt_vsync(void *optctx, const char *opt, const char *arg);
207extern int opt_abort_on(void *optctx, const char *opt, const char *arg);
208extern int opt_stats_period(void *optctx, const char *opt, const char *arg);
209extern int opt_qscale(void *optctx, const char *opt, const char *arg);
210extern int opt_profile(void *optctx, const char *opt, const char *arg);
211extern int opt_filter_complex(void *optctx, const char *opt, const char *arg);
212extern int opt_filter_complex_script(void *optctx, const char *opt, const char *arg);
213extern int opt_attach(void *optctx, const char *opt, const char *arg);
214extern int opt_video_frames(void *optctx, const char *opt, const char *arg);
215extern __thread int intra_only;
216extern int opt_video_codec(void *optctx, const char *opt, const char *arg);
217extern int opt_sameq(void *optctx, const char *opt, const char *arg);
218extern int opt_timecode(void *optctx, const char *opt, const char *arg);
219extern __thread int do_psnr;
220extern int opt_vstats_file(void *optctx, const char *opt, const char *arg);
221extern int opt_vstats(void *optctx, const char *opt, const char *arg);
222extern int opt_video_frames(void *optctx, const char *opt, const char *arg);
223extern int opt_old2new(void *optctx, const char *opt, const char *arg);
224extern int opt_streamid(void *optctx, const char *opt, const char *arg);
225extern int opt_bitrate(void *optctx, const char *opt, const char *arg);
226extern int show_hwaccels(void *optctx, const char *opt, const char *arg);
227extern int opt_video_filters(void *optctx, const char *opt, const char *arg);
228extern int opt_audio_frames(void *optctx, const char *opt, const char *arg);
229extern int opt_audio_qscale(void *optctx, const char *opt, const char *arg);
230extern int opt_audio_codec(void *optctx, const char *opt, const char *arg);
231extern int opt_channel_layout(void *optctx, const char *opt, const char *arg);
232extern int opt_preset(void *optctx, const char *opt, const char *arg);
233extern int opt_audio_filters(void *optctx, const char *opt, const char *arg);
234extern int opt_subtitle_codec(void *optctx, const char *opt, const char *arg);
235extern int opt_video_channel(void *optctx, const char *opt, const char *arg);
236extern int opt_video_standard(void *optctx, const char *opt, const char *arg);
237extern int opt_sdp_file(void *optctx, const char *opt, const char *arg);
238extern int opt_data_codec(void *optctx, const char *opt, const char *arg);
239extern int opt_init_hw_device(void *optctx, const char *opt, const char *arg);
240extern int opt_filter_hw_device(void *optctx, const char *opt, const char *arg);
241extern __thread int input_sync;
242
243#if HAVE_TERMIOS_H
244
245/* init terminal so that we can grab keys */
246__thread struct termios oldtty;
247__thread int restore_tty;
248#endif
249
250#if HAVE_THREADS
251static void free_input_threads(void);
252#endif
253
254extern volatile int handleSIGQUIT;
255extern volatile int handleSIGINT;
256extern volatile int handleSIGTERM;
257extern volatile int handleSIGXCPU;
258extern volatile int handleSIGPIPE;
259
260extern __thread volatile long globalSessionId;
261extern void cancelSession(long sessionId);
262extern int cancelRequested(long sessionId);
263
264/* sub2video hack:
265 Convert subtitles to video with alpha to insert them in filter graphs.
266 This is a temporary solution until libavfilter gets real subtitles support.
267 */
268
270{
271 int ret;
272 AVFrame *frame = ist->sub2video.frame;
273
274 av_frame_unref(frame);
275 ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
276 ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
277 ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
278 if ((ret = av_frame_get_buffer(frame, 0)) < 0)
279 return ret;
280 memset(frame->data[0], 0, frame->height * frame->linesize[0]);
281 return 0;
282}
283
284static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
285 AVSubtitleRect *r)
286{
287 uint32_t *pal, *dst2;
288 uint8_t *src, *src2;
289 int x, y;
290
291 if (r->type != SUBTITLE_BITMAP) {
292 av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
293 return;
294 }
295 if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
296 av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
297 r->x, r->y, r->w, r->h, w, h
298 );
299 return;
300 }
301
302 dst += r->y * dst_linesize + r->x * 4;
303 src = r->data[0];
304 pal = (uint32_t *)r->data[1];
305 for (y = 0; y < r->h; y++) {
306 dst2 = (uint32_t *)dst;
307 src2 = src;
308 for (x = 0; x < r->w; x++)
309 *(dst2++) = pal[*(src2++)];
310 dst += dst_linesize;
311 src += r->linesize[0];
312 }
313}
314
315static void sub2video_push_ref(InputStream *ist, int64_t pts)
316{
317 AVFrame *frame = ist->sub2video.frame;
318 int i;
319 int ret;
320
321 av_assert1(frame->data[0]);
322 ist->sub2video.last_pts = frame->pts = pts;
323 for (i = 0; i < ist->nb_filters; i++) {
324 ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
325 AV_BUFFERSRC_FLAG_KEEP_REF |
326 AV_BUFFERSRC_FLAG_PUSH);
327 if (ret != AVERROR_EOF && ret < 0)
328 av_log(NULL, AV_LOG_WARNING, "Error while add the frame to buffer source(%s).\n",
329 av_err2str(ret));
330 }
331}
332
333void sub2video_update(InputStream *ist, int64_t heartbeat_pts, AVSubtitle *sub)
334{
335 AVFrame *frame = ist->sub2video.frame;
336 int8_t *dst;
337 int dst_linesize;
338 int num_rects, i;
339 int64_t pts, end_pts;
340
341 if (!frame)
342 return;
343 if (sub) {
344 pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
345 AV_TIME_BASE_Q, ist->st->time_base);
346 end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
347 AV_TIME_BASE_Q, ist->st->time_base);
348 num_rects = sub->num_rects;
349 } else {
350 /* If we are initializing the system, utilize current heartbeat
351 PTS as the start time, and show until the following subpicture
352 is received. Otherwise, utilize the previous subpicture's end time
353 as the fall-back value. */
354 pts = ist->sub2video.initialize ?
355 heartbeat_pts : ist->sub2video.end_pts;
356 end_pts = INT64_MAX;
357 num_rects = 0;
358 }
360 av_log(ist->dec_ctx, AV_LOG_ERROR,
361 "Impossible to get a blank canvas.\n");
362 return;
363 }
364 dst = frame->data [0];
365 dst_linesize = frame->linesize[0];
366 for (i = 0; i < num_rects; i++)
367 sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
369 ist->sub2video.end_pts = end_pts;
370 ist->sub2video.initialize = 0;
371}
372
373static void sub2video_heartbeat(InputStream *ist, int64_t pts)
374{
375 InputFile *infile = input_files[ist->file_index];
376 int i, j, nb_reqs;
377 int64_t pts2;
378
379 /* When a frame is read from a file, examine all sub2video streams in
380 the same file and send the sub2video frame again. Otherwise, decoded
381 video frames could be accumulating in the filter graph while a filter
382 (possibly overlay) is desperately waiting for a subtitle frame. */
383 for (i = 0; i < infile->nb_streams; i++) {
384 InputStream *ist2 = input_streams[infile->ist_index + i];
385 if (!ist2->sub2video.frame)
386 continue;
387 /* subtitles seem to be usually muxed ahead of other streams;
388 if not, subtracting a larger time here is necessary */
389 pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
390 /* do not send the heartbeat frame if the subtitle is already ahead */
391 if (pts2 <= ist2->sub2video.last_pts)
392 continue;
393 if (pts2 >= ist2->sub2video.end_pts || ist2->sub2video.initialize)
394 /* if we have hit the end of the current displayed subpicture,
395 or if we need to initialize the system, update the
396 overlayed subpicture and its start/end times */
397 sub2video_update(ist2, pts2 + 1, NULL);
398 for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
399 nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
400 if (nb_reqs)
401 sub2video_push_ref(ist2, pts2);
402 }
403}
404
406{
407 int i;
408 int ret;
409
410 if (ist->sub2video.end_pts < INT64_MAX)
411 sub2video_update(ist, INT64_MAX, NULL);
412 for (i = 0; i < ist->nb_filters; i++) {
413 ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
414 if (ret != AVERROR_EOF && ret < 0)
415 av_log(NULL, AV_LOG_WARNING, "Flush the frame error.\n");
416 }
417}
418
419/* end of sub2video hack */
420
421static void term_exit_sigsafe(void)
422{
423#if HAVE_TERMIOS_H
424 if(restore_tty)
425 tcsetattr (0, TCSANOW, &oldtty);
426#endif
427}
428
429void term_exit(void)
430{
431 av_log(NULL, AV_LOG_QUIET, "%s", "");
433}
434
435static volatile int received_sigterm = 0;
436static volatile int received_nb_signals = 0;
437__thread atomic_int transcode_init_done = ATOMIC_VAR_INIT(0);
438__thread volatile int ffmpeg_exited = 0;
439__thread volatile int main_ffmpeg_return_code = 0;
440__thread int64_t copy_ts_first_pts = AV_NOPTS_VALUE;
441extern __thread volatile int longjmp_value;
442
443static void
445{
446 received_sigterm = sig;
449}
450
451#if HAVE_SETCONSOLECTRLHANDLER
452static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
453{
454 av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
455
456 switch (fdwCtrlType)
457 {
458 case CTRL_C_EVENT:
459 case CTRL_BREAK_EVENT:
460 sigterm_handler(SIGINT);
461 return TRUE;
462
463 case CTRL_CLOSE_EVENT:
464 case CTRL_LOGOFF_EVENT:
465 case CTRL_SHUTDOWN_EVENT:
466 sigterm_handler(SIGTERM);
467 /* Basically, with these 3 events, when we return from this method the
468 process is hard terminated, so stall as long as we need to
469 to try and let the main thread(s) clean up and gracefully terminate
470 (we have at most 5 seconds, but should be done far before that). */
471 while (!ffmpeg_exited) {
472 Sleep(0);
473 }
474 return TRUE;
475
476 default:
477 av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
478 return FALSE;
479 }
480}
481#endif
482
483#ifdef __linux__
484#define SIGNAL(sig, func) \
485 do { \
486 action.sa_handler = func; \
487 sigaction(sig, &action, NULL); \
488 } while (0)
489#else
490#define SIGNAL(sig, func) \
491 signal(sig, func)
492#endif
493
494void term_init(void)
495{
496#if defined __linux__
497 #if defined __aarch64__ || defined __amd64__ || defined __x86_64__
498 struct sigaction action = {0};
499 #else
500 struct sigaction action = {{0}};
501 #endif
502
503 action.sa_handler = sigterm_handler;
504
505 /* block other interrupts while processing this one */
506 sigfillset(&action.sa_mask);
507
508 /* restart interruptible functions (i.e. don't fail with EINTR) */
509 action.sa_flags = SA_RESTART;
510#endif
511
512#if HAVE_TERMIOS_H
514 struct termios tty;
515 if (tcgetattr (0, &tty) == 0) {
516 oldtty = tty;
517 restore_tty = 1;
518
519 tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
520 |INLCR|IGNCR|ICRNL|IXON);
521 tty.c_oflag |= OPOST;
522 tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
523 tty.c_cflag &= ~(CSIZE|PARENB);
524 tty.c_cflag |= CS8;
525 tty.c_cc[VMIN] = 1;
526 tty.c_cc[VTIME] = 0;
527
528 tcsetattr (0, TCSANOW, &tty);
529 }
530 if (handleSIGQUIT == 1) {
531 signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
532 }
533 }
534#endif
535
536 if (handleSIGINT == 1) {
537 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
538 }
539 if (handleSIGTERM == 1) {
540 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
541 }
542#ifdef SIGXCPU
543 if (handleSIGXCPU == 1) {
544 signal(SIGXCPU, sigterm_handler);
545 }
546#endif
547#ifdef SIGPIPE
548 if (handleSIGPIPE == 1) {
549 signal(SIGPIPE, SIG_IGN); /* Broken pipe (POSIX). */
550 }
551#endif
552#if HAVE_SETCONSOLECTRLHANDLER
553 SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
554#endif
555}
556
557/* read a key without blocking */
558static int read_key(void)
559{
560 unsigned char ch;
561#if HAVE_TERMIOS_H
562 int n = 1;
563 struct timeval tv;
564 fd_set rfds;
565
566 FD_ZERO(&rfds);
567 FD_SET(0, &rfds);
568 tv.tv_sec = 0;
569 tv.tv_usec = 0;
570 n = select(1, &rfds, NULL, NULL, &tv);
571 if (n > 0) {
572 n = read(0, &ch, 1);
573 if (n == 1)
574 return ch;
575
576 return n;
577 }
578#elif HAVE_KBHIT
579# if HAVE_PEEKNAMEDPIPE
580 static int is_pipe;
581 static HANDLE input_handle;
582 DWORD dw, nchars;
583 if(!input_handle){
584 input_handle = GetStdHandle(STD_INPUT_HANDLE);
585 is_pipe = !GetConsoleMode(input_handle, &dw);
586 }
587
588 if (is_pipe) {
589 /* When running under a GUI, you will end here. */
590 if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
591 // input pipe may have been closed by the program that ran ffmpeg
592 return -1;
593 }
594 //Read it
595 if(nchars != 0) {
596 read(0, &ch, 1);
597 return ch;
598 }else{
599 return -1;
600 }
601 }
602# endif
603 if(kbhit())
604 return(getch());
605#endif
606 return -1;
607}
608
609int decode_interrupt_cb(void *ctx);
610
612{
613 return received_nb_signals > atomic_load(&transcode_init_done);
614}
615
616__thread const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
617
618static void ffmpeg_cleanup(int ret)
619{
620 int i, j;
621
622 if (do_benchmark) {
623 int maxrss = getmaxrss() / 1024;
624 av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
625 }
626
627 for (i = 0; i < nb_filtergraphs; i++) {
628 FilterGraph *fg = filtergraphs[i];
629 avfilter_graph_free(&fg->graph);
630 for (j = 0; j < fg->nb_inputs; j++) {
631 InputFilter *ifilter = fg->inputs[j];
632 struct InputStream *ist = ifilter->ist;
633
634 while (av_fifo_size(ifilter->frame_queue)) {
635 AVFrame *frame;
636 av_fifo_generic_read(ifilter->frame_queue, &frame,
637 sizeof(frame), NULL);
638 av_frame_free(&frame);
639 }
640 av_fifo_freep(&ifilter->frame_queue);
641 if (ist->sub2video.sub_queue) {
642 while (av_fifo_size(ist->sub2video.sub_queue)) {
643 AVSubtitle sub;
644 av_fifo_generic_read(ist->sub2video.sub_queue,
645 &sub, sizeof(sub), NULL);
646 avsubtitle_free(&sub);
647 }
648 av_fifo_freep(&ist->sub2video.sub_queue);
649 }
650 av_buffer_unref(&ifilter->hw_frames_ctx);
651 av_freep(&ifilter->name);
652 av_freep(&fg->inputs[j]);
653 }
654 av_freep(&fg->inputs);
655 for (j = 0; j < fg->nb_outputs; j++) {
656 OutputFilter *ofilter = fg->outputs[j];
657
658 avfilter_inout_free(&ofilter->out_tmp);
659 av_freep(&ofilter->name);
660 av_freep(&ofilter->formats);
661 av_freep(&ofilter->channel_layouts);
662 av_freep(&ofilter->sample_rates);
663 av_freep(&fg->outputs[j]);
664 }
665 av_freep(&fg->outputs);
666 av_freep(&fg->graph_desc);
667
668 av_freep(&filtergraphs[i]);
669 }
670 av_freep(&filtergraphs);
671
672 av_freep(&subtitle_out);
673
674 /* close files */
675 for (i = 0; i < nb_output_files; i++) {
676 OutputFile *of = output_files[i];
677 AVFormatContext *s;
678 if (!of)
679 continue;
680 s = of->ctx;
681 if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
682 avio_closep(&s->pb);
683 avformat_free_context(s);
684 av_dict_free(&of->opts);
685
686 av_freep(&output_files[i]);
687 }
688 for (i = 0; i < nb_output_streams; i++) {
690
691 if (!ost)
692 continue;
693
694 av_bsf_free(&ost->bsf_ctx);
695
696 av_frame_free(&ost->filtered_frame);
697 av_frame_free(&ost->last_frame);
698 av_packet_free(&ost->pkt);
699 av_dict_free(&ost->encoder_opts);
700
701 av_freep(&ost->forced_keyframes);
702 av_expr_free(ost->forced_keyframes_pexpr);
703 av_freep(&ost->avfilter);
704 av_freep(&ost->logfile_prefix);
705
706 av_freep(&ost->audio_channels_map);
708
709 av_dict_free(&ost->sws_dict);
710 av_dict_free(&ost->swr_opts);
711
712 avcodec_free_context(&ost->enc_ctx);
713 avcodec_parameters_free(&ost->ref_par);
714
715 if (ost->muxing_queue) {
716 while (av_fifo_size(ost->muxing_queue)) {
717 AVPacket *pkt;
718 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
719 av_packet_free(&pkt);
720 }
721 av_fifo_freep(&ost->muxing_queue);
722 }
723
724 av_freep(&output_streams[i]);
725 }
726#if HAVE_THREADS
727 free_input_threads();
728#endif
729 for (i = 0; i < nb_input_files; i++) {
730 avformat_close_input(&input_files[i]->ctx);
731 av_packet_free(&input_files[i]->pkt);
732 av_freep(&input_files[i]);
733 }
734 for (i = 0; i < nb_input_streams; i++) {
736
737 av_frame_free(&ist->decoded_frame);
738 av_frame_free(&ist->filter_frame);
739 av_packet_free(&ist->pkt);
740 av_dict_free(&ist->decoder_opts);
741 avsubtitle_free(&ist->prev_sub.subtitle);
742 av_frame_free(&ist->sub2video.frame);
743 av_freep(&ist->filters);
744 av_freep(&ist->hwaccel_device);
745 av_freep(&ist->dts_buffer);
746
747 avcodec_free_context(&ist->dec_ctx);
748
749 av_freep(&input_streams[i]);
750 }
751
752 if (vstats_file) {
753 if (fclose(vstats_file))
754 av_log(NULL, AV_LOG_ERROR,
755 "Error closing vstats file, loss of information possible: %s\n",
756 av_err2str(AVERROR(errno)));
757 }
758 av_freep(&vstats_filename);
759
760 av_freep(&input_streams);
761 av_freep(&input_files);
762 av_freep(&output_streams);
763 av_freep(&output_files);
764
765 uninit_opts();
766
767 avformat_network_deinit();
768
769 if (received_sigterm) {
770 av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
771 (int) received_sigterm);
772 } else if (cancelRequested(globalSessionId)) {
773 av_log(NULL, AV_LOG_INFO, "Exiting normally, received cancel request.\n");
774 } else if (ret && atomic_load(&transcode_init_done)) {
775 av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
776 }
777 term_exit();
778 ffmpeg_exited = 1;
779}
780
781void remove_avoptions(AVDictionary **a, AVDictionary *b)
782{
783 AVDictionaryEntry *t = NULL;
784
785 while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
786 av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
787 }
788}
789
790void assert_avoptions(AVDictionary *m)
791{
792 AVDictionaryEntry *t;
793 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
794 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
795 exit_program(1);
796 }
797}
798
799static void abort_codec_experimental(const AVCodec *c, int encoder)
800{
801 exit_program(1);
802}
803
804static void update_benchmark(const char *fmt, ...)
805{
806 if (do_benchmark_all) {
808 va_list va;
809 char buf[1024];
810
811 if (fmt) {
812 va_start(va, fmt);
813 vsnprintf(buf, sizeof(buf), fmt, va);
814 va_end(va);
815 av_log(NULL, AV_LOG_INFO,
816 "bench: %8" PRIu64 " user %8" PRIu64 " sys %8" PRIu64 " real %s \n",
820 }
821 current_time = t;
822 }
823}
824
826{
827 int i;
828 for (i = 0; i < nb_output_streams; i++) {
829 OutputStream *ost2 = output_streams[i];
830 ost2->finished |= ost == ost2 ? this_stream : others;
831 }
832}
833
834static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
835{
836 AVFormatContext *s = of->ctx;
837 AVStream *st = ost->st;
838 int ret;
839
840 /*
841 * Audio encoders may split the packets -- #frames in != #packets out.
842 * But there is no reordering, so we can limit the number of output packets
843 * by simply dropping them here.
844 * Counting encoded video frames needs to be done separately because of
845 * reordering, see do_video_out().
846 * Do not count the packet when unqueued because it has been counted when queued.
847 */
848 if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed) && !unqueue) {
849 if (ost->frame_number >= ost->max_frames) {
850 av_packet_unref(pkt);
851 return;
852 }
853 ost->frame_number++;
854 }
855
856 if (!of->header_written) {
857 AVPacket *tmp_pkt;
858 /* the muxer is not initialized yet, buffer the packet */
859 if (!av_fifo_space(ost->muxing_queue)) {
860 unsigned int are_we_over_size =
862 int new_size = are_we_over_size ?
863 FFMIN(2 * av_fifo_size(ost->muxing_queue),
865 2 * av_fifo_size(ost->muxing_queue);
866
867 if (new_size <= av_fifo_size(ost->muxing_queue)) {
868 av_log(NULL, AV_LOG_ERROR,
869 "Too many packets buffered for output stream %d:%d.\n",
870 ost->file_index, ost->st->index);
871 exit_program(1);
872 }
873 ret = av_fifo_realloc2(ost->muxing_queue, new_size);
874 if (ret < 0)
875 exit_program(1);
876 }
877 ret = av_packet_make_refcounted(pkt);
878 if (ret < 0)
879 exit_program(1);
880 tmp_pkt = av_packet_alloc();
881 if (!tmp_pkt)
882 exit_program(1);
883 av_packet_move_ref(tmp_pkt, pkt);
884 ost->muxing_queue_data_size += tmp_pkt->size;
885 av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
886 return;
887 }
888
889 if ((st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
890 (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
891 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
892
893 if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
894 int i;
895 uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
896 NULL);
897 ost->quality = sd ? AV_RL32(sd) : -1;
898 ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
899
900 for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
901 if (sd && i < sd[5])
902 ost->error[i] = AV_RL64(sd + 8 + 8*i);
903 else
904 ost->error[i] = -1;
905 }
906
907 if (ost->frame_rate.num && ost->is_cfr) {
908 if (pkt->duration > 0)
909 av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
910 pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
912 }
913 }
914
915 av_packet_rescale_ts(pkt, ost->mux_timebase, ost->st->time_base);
916
917 if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
918 if (pkt->dts != AV_NOPTS_VALUE &&
919 pkt->pts != AV_NOPTS_VALUE &&
920 pkt->dts > pkt->pts) {
921 av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
922 pkt->dts, pkt->pts,
923 ost->file_index, ost->st->index);
924 pkt->pts =
925 pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
926 - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
927 - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
928 }
929 if ((st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO || st->codecpar->codec_type == AVMEDIA_TYPE_SUBTITLE) &&
930 pkt->dts != AV_NOPTS_VALUE &&
931 !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
932 ost->last_mux_dts != AV_NOPTS_VALUE) {
933 int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
934 if (pkt->dts < max) {
935 int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
936 if (exit_on_error)
937 loglevel = AV_LOG_ERROR;
938 av_log(s, loglevel, "Non-monotonous DTS in output stream "
939 "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
940 ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
941 if (exit_on_error) {
942 av_log(NULL, AV_LOG_FATAL, "aborting.\n");
943 exit_program(1);
944 }
945 av_log(s, loglevel, "changing to %"PRId64". This may result "
946 "in incorrect timestamps in the output file.\n",
947 max);
948 if (pkt->pts >= pkt->dts)
949 pkt->pts = FFMAX(pkt->pts, max);
950 pkt->dts = max;
951 }
952 }
953 }
954 ost->last_mux_dts = pkt->dts;
955
956 ost->data_size += pkt->size;
958
959 pkt->stream_index = ost->index;
960
961 if (debug_ts) {
962 av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
963 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
964 av_get_media_type_string(ost->enc_ctx->codec_type),
965 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
966 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
967 pkt->size
968 );
969 }
970
971 ret = av_interleaved_write_frame(s, pkt);
972 if (ret < 0) {
973 print_error("av_interleaved_write_frame()", ret);
976 }
977 av_packet_unref(pkt);
978}
979
981{
983
985 if (of->shortest) {
986 int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
987 of->recording_time = FFMIN(of->recording_time, end);
988 }
989}
990
991/*
992 * Send a single packet to the output, applying any bitstream filters
993 * associated with the output stream. This may result in any number
994 * of packets actually being written, depending on what bitstream
995 * filters are applied. The supplied packet is consumed and will be
996 * blank (as if newly-allocated) when this function returns.
997 *
998 * If eof is set, instead indicate EOF to all bitstream filters and
999 * therefore flush any delayed packets to the output. A blank packet
1000 * must be supplied in this case.
1001 */
1002static void output_packet(OutputFile *of, AVPacket *pkt,
1003 OutputStream *ost, int eof)
1004{
1005 int ret = 0;
1006
1007 /* apply the output bitstream filters */
1008 if (ost->bsf_ctx) {
1009 ret = av_bsf_send_packet(ost->bsf_ctx, eof ? NULL : pkt);
1010 if (ret < 0)
1011 goto finish;
1012 while ((ret = av_bsf_receive_packet(ost->bsf_ctx, pkt)) >= 0)
1013 write_packet(of, pkt, ost, 0);
1014 if (ret == AVERROR(EAGAIN))
1015 ret = 0;
1016 } else if (!eof)
1017 write_packet(of, pkt, ost, 0);
1018
1019finish:
1020 if (ret < 0 && ret != AVERROR_EOF) {
1021 av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
1022 "packet for stream #%d:%d.\n", ost->file_index, ost->index);
1023 if(exit_on_error)
1024 exit_program(1);
1025 }
1026}
1027
1029{
1031
1032 if (of->recording_time != INT64_MAX &&
1033 av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
1034 AV_TIME_BASE_Q) >= 0) {
1036 return 0;
1037 }
1038 return 1;
1039}
1040
1041static double adjust_frame_pts_to_encoder_tb(OutputFile *of, OutputStream *ost, AVFrame *frame)
1042{
1043 double float_pts = AV_NOPTS_VALUE; // this is identical to frame.pts but with higher precision
1044 AVCodecContext *enc = ost->enc_ctx;
1045 if (!frame || frame->pts == AV_NOPTS_VALUE ||
1046 !enc || !ost->filter || !ost->filter->graph->graph)
1047 goto early_exit;
1048
1049 {
1050 AVFilterContext *filter = ost->filter->filter;
1051
1052 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1053 AVRational filter_tb = av_buffersink_get_time_base(filter);
1054 AVRational tb = enc->time_base;
1055 int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1056
1057 tb.den <<= extra_bits;
1058 float_pts =
1059 av_rescale_q(frame->pts, filter_tb, tb) -
1060 av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1061 float_pts /= 1 << extra_bits;
1062 // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1063 float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1064
1065 frame->pts =
1066 av_rescale_q(frame->pts, filter_tb, enc->time_base) -
1067 av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1068 }
1069
1070early_exit:
1071
1072 if (debug_ts) {
1073 av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1074 frame ? av_ts2str(frame->pts) : "NULL",
1075 frame ? av_ts2timestr(frame->pts, &enc->time_base) : "NULL",
1076 float_pts,
1077 enc ? enc->time_base.num : -1,
1078 enc ? enc->time_base.den : -1);
1079 }
1080
1081 return float_pts;
1082}
1083
1084static int init_output_stream(OutputStream *ost, AVFrame *frame, char *error, int error_len);
1085
1086static int init_output_stream_wrapper(OutputStream *ost, AVFrame *frame, unsigned int fatal)
1087{
1088 int ret = AVERROR_BUG;
1089 char error[1024] = {0};
1090
1091 if (ost->initialized)
1092 return 0;
1093
1094 ret = init_output_stream(ost, frame, error, sizeof(error));
1095 if (ret < 0) {
1096 av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1097 ost->file_index, ost->index, error);
1098
1099 if (fatal)
1100 exit_program(1);
1101 }
1102
1103 return ret;
1104}
1105
1107 AVFrame *frame)
1108{
1109 AVCodecContext *enc = ost->enc_ctx;
1110 AVPacket *pkt = ost->pkt;
1111 int ret;
1112
1114
1116 return;
1117
1118 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
1119 frame->pts = ost->sync_opts;
1120 ost->sync_opts = frame->pts + frame->nb_samples;
1121 ost->samples_encoded += frame->nb_samples;
1123
1124 update_benchmark(NULL);
1125 if (debug_ts) {
1126 av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
1127 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1128 av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
1129 enc->time_base.num, enc->time_base.den);
1130 }
1131
1132 ret = avcodec_send_frame(enc, frame);
1133 if (ret < 0)
1134 goto error;
1135
1136 while (1) {
1137 av_packet_unref(pkt);
1138 ret = avcodec_receive_packet(enc, pkt);
1139 if (ret == AVERROR(EAGAIN))
1140 break;
1141 if (ret < 0)
1142 goto error;
1143
1144 update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
1145
1146 av_packet_rescale_ts(pkt, enc->time_base, ost->mux_timebase);
1147
1148 if (debug_ts) {
1149 av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
1150 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1151 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &enc->time_base),
1152 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &enc->time_base));
1153 }
1154
1155 output_packet(of, pkt, ost, 0);
1156 }
1157
1158 return;
1159error:
1160 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
1161 exit_program(1);
1162}
1163
1166 AVSubtitle *sub)
1167{
1168 int subtitle_out_max_size = 1024 * 1024;
1169 int subtitle_out_size, nb, i;
1170 AVCodecContext *enc;
1171 AVPacket *pkt = ost->pkt;
1172 int64_t pts;
1173
1174 if (sub->pts == AV_NOPTS_VALUE) {
1175 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
1176 if (exit_on_error)
1177 exit_program(1);
1178 return;
1179 }
1180
1181 enc = ost->enc_ctx;
1182
1183 if (!subtitle_out) {
1184 subtitle_out = av_malloc(subtitle_out_max_size);
1185 if (!subtitle_out) {
1186 av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
1187 exit_program(1);
1188 }
1189 }
1190
1191 /* Note: DVB subtitle need one packet to draw them and one other
1192 packet to clear them */
1193 /* XXX: signal it in the codec context ? */
1194 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
1195 nb = 2;
1196 else
1197 nb = 1;
1198
1199 /* shift timestamp to honor -ss and make check_recording_time() work with -t */
1200 pts = sub->pts;
1201 if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
1203 for (i = 0; i < nb; i++) {
1204 unsigned save_num_rects = sub->num_rects;
1205
1206 ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
1208 return;
1209
1210 sub->pts = pts;
1211 // start_display_time is required to be 0
1212 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
1213 sub->end_display_time -= sub->start_display_time;
1214 sub->start_display_time = 0;
1215 if (i == 1)
1216 sub->num_rects = 0;
1217
1219
1220 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1221 subtitle_out_max_size, sub);
1222 if (i == 1)
1223 sub->num_rects = save_num_rects;
1224 if (subtitle_out_size < 0) {
1225 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1226 exit_program(1);
1227 }
1228
1229 av_packet_unref(pkt);
1230 pkt->data = subtitle_out;
1231 pkt->size = subtitle_out_size;
1232 pkt->pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase);
1233 pkt->duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1234 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
1235 /* XXX: the pts correction is handled here. Maybe handling
1236 it in the codec would be better */
1237 if (i == 0)
1238 pkt->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1239 else
1240 pkt->pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1241 }
1242 pkt->dts = pkt->pts;
1243 output_packet(of, pkt, ost, 0);
1244 }
1245}
1246
1247static void do_video_out(OutputFile *of,
1249 AVFrame *next_picture)
1250{
1251 int ret, format_video_sync;
1252 AVPacket *pkt = ost->pkt;
1253 AVCodecContext *enc = ost->enc_ctx;
1254 AVRational frame_rate;
1255 int nb_frames, nb0_frames, i;
1256 double delta, delta0;
1257 double duration = 0;
1258 double sync_ipts = AV_NOPTS_VALUE;
1259 int frame_size = 0;
1260 InputStream *ist = NULL;
1261 AVFilterContext *filter = ost->filter->filter;
1262
1263 init_output_stream_wrapper(ost, next_picture, 1);
1264 sync_ipts = adjust_frame_pts_to_encoder_tb(of, ost, next_picture);
1265
1266 if (ost->source_index >= 0)
1268
1269 frame_rate = av_buffersink_get_frame_rate(filter);
1270 if (frame_rate.num > 0 && frame_rate.den > 0)
1271 duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1272
1273 if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1274 duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1275
1276 if (!ost->filters_script &&
1277 !ost->filters &&
1278 (nb_filtergraphs == 0 || !filtergraphs[0]->graph_desc) &&
1279 next_picture &&
1280 ist &&
1281 lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1282 duration = lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1283 }
1284
1285 if (!next_picture) {
1286 //end, flushing
1287 nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1288 ost->last_nb0_frames[1],
1289 ost->last_nb0_frames[2]);
1290 } else {
1291 delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1292 delta = delta0 + duration;
1293
1294 /* by default, we output a single frame */
1295 nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1296 nb_frames = 1;
1297
1298 format_video_sync = video_sync_method;
1299 if (format_video_sync == VSYNC_AUTO) {
1300 if(!strcmp(of->ctx->oformat->name, "avi")) {
1301 format_video_sync = VSYNC_VFR;
1302 } else
1303 format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1304 if ( ist
1305 && format_video_sync == VSYNC_CFR
1306 && input_files[ist->file_index]->ctx->nb_streams == 1
1307 && input_files[ist->file_index]->input_ts_offset == 0) {
1308 format_video_sync = VSYNC_VSCFR;
1309 }
1310 if (format_video_sync == VSYNC_CFR && copy_ts) {
1311 format_video_sync = VSYNC_VSCFR;
1312 }
1313 }
1314 ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1315
1316 if (delta0 < 0 &&
1317 delta > 0 &&
1318 format_video_sync != VSYNC_PASSTHROUGH &&
1319 format_video_sync != VSYNC_DROP) {
1320 if (delta0 < -0.6) {
1321 av_log(NULL, AV_LOG_VERBOSE, "Past duration %f too large\n", -delta0);
1322 } else
1323 av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1324 sync_ipts = ost->sync_opts;
1325 duration += delta0;
1326 delta0 = 0;
1327 }
1328
1329 switch (format_video_sync) {
1330 case VSYNC_VSCFR:
1331 if (ost->frame_number == 0 && delta0 >= 0.5) {
1332 av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1333 delta = duration;
1334 delta0 = 0;
1335 ost->sync_opts = llrint(sync_ipts);
1336 }
1337 case VSYNC_CFR:
1338 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1339 if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1340 nb_frames = 0;
1341 } else if (delta < -1.1)
1342 nb_frames = 0;
1343 else if (delta > 1.1) {
1344 nb_frames = lrintf(delta);
1345 if (delta0 > 1.1)
1346 nb0_frames = llrintf(delta0 - 0.6);
1347 }
1348 break;
1349 case VSYNC_VFR:
1350 if (delta <= -0.6)
1351 nb_frames = 0;
1352 else if (delta > 0.6)
1353 ost->sync_opts = llrint(sync_ipts);
1354 break;
1355 case VSYNC_DROP:
1356 case VSYNC_PASSTHROUGH:
1357 ost->sync_opts = llrint(sync_ipts);
1358 break;
1359 default:
1360 av_assert0(0);
1361 }
1362 }
1363
1364 nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1365 nb0_frames = FFMIN(nb0_frames, nb_frames);
1366
1367 memmove(ost->last_nb0_frames + 1,
1369 sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1370 ost->last_nb0_frames[0] = nb0_frames;
1371
1372 if (nb0_frames == 0 && ost->last_dropped) {
1374 av_log(NULL, AV_LOG_VERBOSE,
1375 "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1376 ost->frame_number, ost->st->index, ost->last_frame->pts);
1377 }
1378 if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1379 if (nb_frames > dts_error_threshold * 30) {
1380 av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1382 return;
1383 }
1384 nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1385 av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1386 if (nb_frames_dup > dup_warning) {
1387 av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
1388 dup_warning *= 10;
1389 }
1390 }
1391 ost->last_dropped = nb_frames == nb0_frames && next_picture;
1392 ost->dropped_keyframe = ost->last_dropped && next_picture && next_picture->key_frame;
1393
1394 /* duplicates frame if needed */
1395 for (i = 0; i < nb_frames; i++) {
1396 AVFrame *in_picture;
1397 int forced_keyframe = 0;
1398 double pts_time;
1399
1400 if (i < nb0_frames && ost->last_frame) {
1401 in_picture = ost->last_frame;
1402 } else
1403 in_picture = next_picture;
1404
1405 if (!in_picture)
1406 return;
1407
1408 in_picture->pts = ost->sync_opts;
1409
1411 return;
1412
1413 in_picture->quality = enc->global_quality;
1414 in_picture->pict_type = 0;
1415
1416 if (ost->forced_kf_ref_pts == AV_NOPTS_VALUE &&
1417 in_picture->pts != AV_NOPTS_VALUE)
1418 ost->forced_kf_ref_pts = in_picture->pts;
1419
1420 pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1421 (in_picture->pts - ost->forced_kf_ref_pts) * av_q2d(enc->time_base) : NAN;
1423 in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1425 forced_keyframe = 1;
1426 } else if (ost->forced_keyframes_pexpr) {
1427 double res;
1429 res = av_expr_eval(ost->forced_keyframes_pexpr,
1431 ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1437 res);
1438 if (res) {
1439 forced_keyframe = 1;
1445 }
1446
1448 } else if ( ost->forced_keyframes
1449 && !strncmp(ost->forced_keyframes, "source", 6)
1450 && in_picture->key_frame==1
1451 && !i) {
1452 forced_keyframe = 1;
1453 } else if ( ost->forced_keyframes
1454 && !strncmp(ost->forced_keyframes, "source_no_drop", 14)
1455 && !i) {
1456 forced_keyframe = (in_picture->key_frame == 1) || ost->dropped_keyframe;
1457 ost->dropped_keyframe = 0;
1458 }
1459
1460 if (forced_keyframe) {
1461 in_picture->pict_type = AV_PICTURE_TYPE_I;
1462 av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1463 }
1464
1465 update_benchmark(NULL);
1466 if (debug_ts) {
1467 av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1468 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1469 av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1470 enc->time_base.num, enc->time_base.den);
1471 }
1472
1474
1475 ret = avcodec_send_frame(enc, in_picture);
1476 if (ret < 0)
1477 goto error;
1478 // Make sure Closed Captions will not be duplicated
1479 av_frame_remove_side_data(in_picture, AV_FRAME_DATA_A53_CC);
1480
1481 while (1) {
1482 av_packet_unref(pkt);
1483 ret = avcodec_receive_packet(enc, pkt);
1484 update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1485 if (ret == AVERROR(EAGAIN))
1486 break;
1487 if (ret < 0)
1488 goto error;
1489
1490 if (debug_ts) {
1491 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1492 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1493 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &enc->time_base),
1494 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &enc->time_base));
1495 }
1496
1497 if (pkt->pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1498 pkt->pts = ost->sync_opts;
1499
1500 av_packet_rescale_ts(pkt, enc->time_base, ost->mux_timebase);
1501
1502 if (debug_ts) {
1503 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1504 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1505 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->mux_timebase),
1506 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->mux_timebase));
1507 }
1508
1509 frame_size = pkt->size;
1510 output_packet(of, pkt, ost, 0);
1511
1512 /* if two pass, output log */
1513 if (ost->logfile && enc->stats_out) {
1514 fprintf(ost->logfile, "%s", enc->stats_out);
1515 }
1516 }
1517 ost->sync_opts++;
1518 /*
1519 * For video, number of frames in == number of packets out.
1520 * But there may be reordering, so we can't throw away frames on encoder
1521 * flush, we need to limit them here, before they go into encoder.
1522 */
1523 ost->frame_number++;
1524
1525 if (vstats_filename && frame_size)
1526 do_video_stats(ost, frame_size);
1527 }
1528
1529 if (!ost->last_frame)
1530 ost->last_frame = av_frame_alloc();
1531 av_frame_unref(ost->last_frame);
1532 if (next_picture && ost->last_frame)
1533 av_frame_ref(ost->last_frame, next_picture);
1534 else
1535 av_frame_free(&ost->last_frame);
1536
1537 return;
1538error:
1539 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1540 exit_program(1);
1541}
1542
1543static double psnr(double d)
1544{
1545 return -10.0 * log10(d);
1546}
1547
1548static void do_video_stats(OutputStream *ost, int frame_size)
1549{
1550 AVCodecContext *enc;
1551 int frame_number;
1552 double ti1, bitrate, avg_bitrate;
1553
1554 /* this is executed just the first time do_video_stats is called */
1555 if (!vstats_file) {
1556 vstats_file = fopen(vstats_filename, "w");
1557 if (!vstats_file) {
1558 perror("fopen");
1559 exit_program(1);
1560 }
1561 }
1562
1563 enc = ost->enc_ctx;
1564 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1565 frame_number = ost->st->nb_frames;
1566 if (vstats_version <= 1) {
1567 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1568 ost->quality / (float)FF_QP2LAMBDA);
1569 } else {
1570 fprintf(vstats_file, "out= %2d st= %2d frame= %5d q= %2.1f ", ost->file_index, ost->index, frame_number,
1571 ost->quality / (float)FF_QP2LAMBDA);
1572 }
1573
1574 if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1575 fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1576
1577 fprintf(vstats_file,"f_size= %6d ", frame_size);
1578 /* compute pts value */
1579 ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1580 if (ti1 < 0.01)
1581 ti1 = 0.01;
1582
1583 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1584 avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1585 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1586 (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1587 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1588 }
1589}
1590
1592{
1594 int i;
1595
1597
1598 if (of->shortest) {
1599 for (i = 0; i < of->ctx->nb_streams; i++)
1601 }
1602}
1603
1610static int reap_filters(int flush)
1611{
1612 AVFrame *filtered_frame = NULL;
1613 int i;
1614
1615 /* Reap all buffers present in the buffer sinks */
1616 for (i = 0; i < nb_output_streams; i++) {
1619 AVFilterContext *filter;
1620 AVCodecContext *enc = ost->enc_ctx;
1621 int ret = 0;
1622
1623 if (!ost->filter || !ost->filter->graph->graph)
1624 continue;
1625 filter = ost->filter->filter;
1626
1627 /*
1628 * Unlike video, with audio the audio frame size matters.
1629 * Currently we are fully reliant on the lavfi filter chain to
1630 * do the buffering deed for us, and thus the frame size parameter
1631 * needs to be set accordingly. Where does one get the required
1632 * frame size? From the initialized AVCodecContext of an audio
1633 * encoder. Thus, if we have gotten to an audio stream, initialize
1634 * the encoder earlier than receiving the first AVFrame.
1635 */
1636 if (av_buffersink_get_type(filter) == AVMEDIA_TYPE_AUDIO)
1638
1639 if (!ost->pkt && !(ost->pkt = av_packet_alloc())) {
1640 return AVERROR(ENOMEM);
1641 }
1642 if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1643 return AVERROR(ENOMEM);
1644 }
1645 filtered_frame = ost->filtered_frame;
1646
1647 while (1) {
1648 ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1649 AV_BUFFERSINK_FLAG_NO_REQUEST);
1650 if (ret < 0) {
1651 if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1652 av_log(NULL, AV_LOG_WARNING,
1653 "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1654 } else if (flush && ret == AVERROR_EOF) {
1655 if (av_buffersink_get_type(filter) == AVMEDIA_TYPE_VIDEO)
1656 do_video_out(of, ost, NULL);
1657 }
1658 break;
1659 }
1660 if (ost->finished) {
1661 av_frame_unref(filtered_frame);
1662 continue;
1663 }
1664
1665 switch (av_buffersink_get_type(filter)) {
1666 case AVMEDIA_TYPE_VIDEO:
1667 if (!ost->frame_aspect_ratio.num)
1668 enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1669
1670 do_video_out(of, ost, filtered_frame);
1671 break;
1672 case AVMEDIA_TYPE_AUDIO:
1673 if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1674 enc->channels != filtered_frame->channels) {
1675 av_log(NULL, AV_LOG_ERROR,
1676 "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1677 break;
1678 }
1679 do_audio_out(of, ost, filtered_frame);
1680 break;
1681 default:
1682 // TODO support subtitle filters
1683 av_assert0(0);
1684 }
1685
1686 av_frame_unref(filtered_frame);
1687 }
1688 }
1689
1690 return 0;
1691}
1692
1693static void print_final_stats(int64_t total_size)
1694{
1695 uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1696 uint64_t subtitle_size = 0;
1697 uint64_t data_size = 0;
1698 float percent = -1.0;
1699 int i, j;
1700 int pass1_used = 1;
1701
1702 for (i = 0; i < nb_output_streams; i++) {
1704 switch (ost->enc_ctx->codec_type) {
1705 case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1706 case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1707 case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1708 default: other_size += ost->data_size; break;
1709 }
1710 extra_size += ost->enc_ctx->extradata_size;
1712 if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | AV_CODEC_FLAG_PASS2))
1713 != AV_CODEC_FLAG_PASS1)
1714 pass1_used = 0;
1715 }
1716
1717 if (data_size && total_size>0 && total_size >= data_size)
1718 percent = 100.0 * (total_size - data_size) / data_size;
1719
1720 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1721 video_size / 1024.0,
1722 audio_size / 1024.0,
1723 subtitle_size / 1024.0,
1724 other_size / 1024.0,
1725 extra_size / 1024.0);
1726 if (percent >= 0.0)
1727 av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1728 else
1729 av_log(NULL, AV_LOG_INFO, "unknown");
1730 av_log(NULL, AV_LOG_INFO, "\n");
1731
1732 /* print verbose per-stream stats */
1733 for (i = 0; i < nb_input_files; i++) {
1734 InputFile *f = input_files[i];
1735 uint64_t total_packets = 0, total_size = 0;
1736
1737 av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1738 i, f->ctx->url);
1739
1740 for (j = 0; j < f->nb_streams; j++) {
1742 enum AVMediaType type = ist->dec_ctx->codec_type;
1743
1744 total_size += ist->data_size;
1745 total_packets += ist->nb_packets;
1746
1747 av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1748 i, j, media_type_string(type));
1749 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1750 ist->nb_packets, ist->data_size);
1751
1752 if (ist->decoding_needed) {
1753 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1754 ist->frames_decoded);
1755 if (type == AVMEDIA_TYPE_AUDIO)
1756 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1757 av_log(NULL, AV_LOG_VERBOSE, "; ");
1758 }
1759
1760 av_log(NULL, AV_LOG_VERBOSE, "\n");
1761 }
1762
1763 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1764 total_packets, total_size);
1765 }
1766
1767 for (i = 0; i < nb_output_files; i++) {
1768 OutputFile *of = output_files[i];
1769 uint64_t total_packets = 0, total_size = 0;
1770
1771 av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1772 i, of->ctx->url);
1773
1774 for (j = 0; j < of->ctx->nb_streams; j++) {
1776 enum AVMediaType type = ost->enc_ctx->codec_type;
1777
1778 total_size += ost->data_size;
1779 total_packets += ost->packets_written;
1780
1781 av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1782 i, j, media_type_string(type));
1783 if (ost->encoding_needed) {
1784 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1786 if (type == AVMEDIA_TYPE_AUDIO)
1787 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1788 av_log(NULL, AV_LOG_VERBOSE, "; ");
1789 }
1790
1791 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1793
1794 av_log(NULL, AV_LOG_VERBOSE, "\n");
1795 }
1796
1797 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1798 total_packets, total_size);
1799 }
1800 if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1801 av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1802 if (pass1_used) {
1803 av_log(NULL, AV_LOG_WARNING, "\n");
1804 } else {
1805 av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1806 }
1807 }
1808}
1809
1810static void forward_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1811{
1812 AVFormatContext *oc = NULL;
1813 AVCodecContext *enc = NULL;
1814 OutputStream *ost = NULL;
1815 int64_t pts = INT64_MIN + 1;
1816 int vid, i;
1817
1818 int frame_number = 0;
1819 float fps = 0;
1820 float quality = 0;
1821 int64_t total_size = 0;
1822 int seconds = 0;
1823 double bitrate = 0.0;
1824 double speed = 0.0;
1825
1826 float t = (cur_time-timer_start) / 1000000.0;
1827
1828 oc = output_files[0]->ctx;
1829
1830 // 1. calculate size
1831 total_size = avio_size(oc->pb);
1832 if (total_size <= 0) {
1833 total_size = avio_tell(oc->pb);
1834 }
1835
1836 vid = 0;
1837 for (i = 0; i < nb_output_streams; i++) {
1838 ost = output_streams[i];
1839 enc = ost->enc_ctx;
1840
1841 if (!ost->stream_copy) {
1842
1843 // 2. extract quality
1844 quality = ost->quality / (float) FF_QP2LAMBDA;
1845 }
1846
1847 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1848
1849 // 3. extract frame number
1850 frame_number = ost->frame_number;
1851
1852 // 4. calculate fps
1853 fps = t > 1 ? frame_number / t : 0;
1854 }
1855
1856 // 5. calculate time
1857 if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE)
1858 pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1859 ost->st->time_base, AV_TIME_BASE_Q));
1860
1861 vid = 1;
1862 }
1863
1864 // 6. calculate time, with microseconds to milliseconds conversion
1865 seconds = FFABS(pts) / 1000;
1866
1867 // 7. calculating kbit/s value
1868 bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1869
1870 // 9. calculate processing speed = processed stream duration/operation duration
1871 speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1872
1873 // FORWARD DATA
1874 if (report_callback != NULL) {
1875 report_callback(frame_number, fps, quality, total_size, seconds, bitrate, speed);
1876 }
1877}
1878
1879static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1880{
1881 AVBPrint buf, buf_script;
1883 AVFormatContext *oc;
1884 int64_t total_size;
1885 AVCodecContext *enc;
1886 int frame_number, vid, i;
1887 double bitrate;
1888 double speed;
1889 int64_t pts = INT64_MIN + 1;
1890 int hours, mins, secs, us;
1891 const char *hours_sign;
1892 int ret;
1893 float t;
1894
1895 if (!is_last_report) {
1896 if (last_time == -1) {
1897 last_time = cur_time;
1898 }
1899 if (((cur_time - last_time) < stats_period && !first_report) ||
1901 return;
1902 last_time = cur_time;
1903 }
1904
1905 forward_report(is_last_report, timer_start, cur_time);
1906
1907 if (!print_stats && !is_last_report && !progress_avio)
1908 return;
1909
1910 t = (cur_time-timer_start) / 1000000.0;
1911
1912
1913 oc = output_files[0]->ctx;
1914
1915 total_size = avio_size(oc->pb);
1916 if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1917 total_size = avio_tell(oc->pb);
1918
1919 vid = 0;
1920 av_bprint_init(&buf, 0, AV_BPRINT_SIZE_AUTOMATIC);
1921 av_bprint_init(&buf_script, 0, AV_BPRINT_SIZE_AUTOMATIC);
1922 for (i = 0; i < nb_output_streams; i++) {
1923 float q = -1;
1924 ost = output_streams[i];
1925 enc = ost->enc_ctx;
1926 if (!ost->stream_copy)
1927 q = ost->quality / (float) FF_QP2LAMBDA;
1928
1929 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1930 av_bprintf(&buf, "q=%2.1f ", q);
1931 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1932 ost->file_index, ost->index, q);
1933 }
1934 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1935 float fps;
1936
1937 frame_number = ost->frame_number;
1938 fps = t > 1 ? frame_number / t : 0;
1939 av_bprintf(&buf, "frame=%5d fps=%3.*f q=%3.1f ",
1940 frame_number, fps < 9.95, fps, q);
1941 av_bprintf(&buf_script, "frame=%d\n", frame_number);
1942 av_bprintf(&buf_script, "fps=%.2f\n", fps);
1943 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1944 ost->file_index, ost->index, q);
1945 if (is_last_report)
1946 av_bprintf(&buf, "L");
1947 if (qp_hist) {
1948 int j;
1949 int qp = lrintf(q);
1950 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1951 qp_histogram[qp]++;
1952 for (j = 0; j < 32; j++)
1953 av_bprintf(&buf, "%X", av_log2(qp_histogram[j] + 1));
1954 }
1955
1956 if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1957 int j;
1958 double error, error_sum = 0;
1959 double scale, scale_sum = 0;
1960 double p;
1961 char type[3] = { 'Y','U','V' };
1962 av_bprintf(&buf, "PSNR=");
1963 for (j = 0; j < 3; j++) {
1964 if (is_last_report) {
1965 error = enc->error[j];
1966 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1967 } else {
1968 error = ost->error[j];
1969 scale = enc->width * enc->height * 255.0 * 255.0;
1970 }
1971 if (j)
1972 scale /= 4;
1973 error_sum += error;
1974 scale_sum += scale;
1975 p = psnr(error / scale);
1976 av_bprintf(&buf, "%c:%2.2f ", type[j], p);
1977 av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1978 ost->file_index, ost->index, type[j] | 32, p);
1979 }
1980 p = psnr(error_sum / scale_sum);
1981 av_bprintf(&buf, "*:%2.2f ", psnr(error_sum / scale_sum));
1982 av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1983 ost->file_index, ost->index, p);
1984 }
1985 vid = 1;
1986 }
1987 /* compute min output value */
1988 if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE) {
1989 pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1990 ost->st->time_base, AV_TIME_BASE_Q));
1991 if (copy_ts) {
1992 if (copy_ts_first_pts == AV_NOPTS_VALUE && pts > 1)
1994 if (copy_ts_first_pts != AV_NOPTS_VALUE)
1996 }
1997 }
1998
1999 if (is_last_report)
2001 }
2002
2003 secs = FFABS(pts) / AV_TIME_BASE;
2004 us = FFABS(pts) % AV_TIME_BASE;
2005 mins = secs / 60;
2006 secs %= 60;
2007 hours = mins / 60;
2008 mins %= 60;
2009 hours_sign = (pts < 0) ? "-" : "";
2010
2011 bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
2012 speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
2013
2014 if (total_size < 0) av_bprintf(&buf, "size=N/A time=");
2015 else av_bprintf(&buf, "size=%8.0fkB time=", total_size / 1024.0);
2016 if (pts == AV_NOPTS_VALUE) {
2017 av_bprintf(&buf, "N/A ");
2018 } else {
2019 av_bprintf(&buf, "%s%02d:%02d:%02d.%02d ",
2020 hours_sign, hours, mins, secs, (100 * us) / AV_TIME_BASE);
2021 }
2022
2023 if (bitrate < 0) {
2024 av_bprintf(&buf, "bitrate=N/A");
2025 av_bprintf(&buf_script, "bitrate=N/A\n");
2026 }else{
2027 av_bprintf(&buf, "bitrate=%6.1fkbits/s", bitrate);
2028 av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
2029 }
2030
2031 if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
2032 else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
2033 if (pts == AV_NOPTS_VALUE) {
2034 av_bprintf(&buf_script, "out_time_us=N/A\n");
2035 av_bprintf(&buf_script, "out_time_ms=N/A\n");
2036 av_bprintf(&buf_script, "out_time=N/A\n");
2037 } else {
2038 av_bprintf(&buf_script, "out_time_us=%"PRId64"\n", pts);
2039 av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
2040 av_bprintf(&buf_script, "out_time=%s%02d:%02d:%02d.%06d\n",
2041 hours_sign, hours, mins, secs, us);
2042 }
2043
2045 av_bprintf(&buf, " dup=%d drop=%d", nb_frames_dup, nb_frames_drop);
2046 av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
2047 av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
2048
2049 if (speed < 0) {
2050 av_bprintf(&buf, " speed=N/A");
2051 av_bprintf(&buf_script, "speed=N/A\n");
2052 } else {
2053 av_bprintf(&buf, " speed=%4.3gx", speed);
2054 av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
2055 }
2056
2057 if (print_stats || is_last_report) {
2058 const char end = is_last_report ? '\n' : '\r';
2059 if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
2060 fprintf(stderr, "%s %c", buf.str, end);
2061 } else
2062 av_log(NULL, AV_LOG_INFO, "%s %c", buf.str, end);
2063
2064 fflush(stderr);
2065 }
2066 av_bprint_finalize(&buf, NULL);
2067
2068 if (progress_avio) {
2069 av_bprintf(&buf_script, "progress=%s\n",
2070 is_last_report ? "end" : "continue");
2071 avio_write(progress_avio, buf_script.str,
2072 FFMIN(buf_script.len, buf_script.size - 1));
2073 avio_flush(progress_avio);
2074 av_bprint_finalize(&buf_script, NULL);
2075 if (is_last_report) {
2076 if ((ret = avio_closep(&progress_avio)) < 0)
2077 av_log(NULL, AV_LOG_ERROR,
2078 "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
2079 }
2080 }
2081
2082 first_report = 0;
2083
2084 if (is_last_report)
2085 print_final_stats(total_size);
2086}
2087
2088static void ifilter_parameters_from_codecpar(InputFilter *ifilter, AVCodecParameters *par)
2089{
2090 // We never got any input. Set a fake format, which will
2091 // come from libavformat.
2092 ifilter->format = par->format;
2093 ifilter->sample_rate = par->sample_rate;
2094 ifilter->channels = par->channels;
2095 ifilter->channel_layout = par->channel_layout;
2096 ifilter->width = par->width;
2097 ifilter->height = par->height;
2098 ifilter->sample_aspect_ratio = par->sample_aspect_ratio;
2099}
2100
2101static void flush_encoders(void)
2102{
2103 int i, ret;
2104
2105 for (i = 0; i < nb_output_streams; i++) {
2107 AVCodecContext *enc = ost->enc_ctx;
2109
2110 if (!ost->encoding_needed)
2111 continue;
2112
2113 // Try to enable encoding with no input frames.
2114 // Maybe we should just let encoding fail instead.
2115 if (!ost->initialized) {
2116 FilterGraph *fg = ost->filter->graph;
2117
2118 av_log(NULL, AV_LOG_WARNING,
2119 "Finishing stream %d:%d without any data written to it.\n",
2120 ost->file_index, ost->st->index);
2121
2122 if (ost->filter && !fg->graph) {
2123 int x;
2124 for (x = 0; x < fg->nb_inputs; x++) {
2125 InputFilter *ifilter = fg->inputs[x];
2126 if (ifilter->format < 0)
2127 ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
2128 }
2129
2131 continue;
2132
2134 if (ret < 0) {
2135 av_log(NULL, AV_LOG_ERROR, "Error configuring filter graph\n");
2136 exit_program(1);
2137 }
2138
2140 }
2141
2143 }
2144
2145 if (enc->codec_type != AVMEDIA_TYPE_VIDEO && enc->codec_type != AVMEDIA_TYPE_AUDIO)
2146 continue;
2147
2148 for (;;) {
2149 const char *desc = NULL;
2150 AVPacket *pkt = ost->pkt;
2151 int pkt_size;
2152
2153 switch (enc->codec_type) {
2154 case AVMEDIA_TYPE_AUDIO:
2155 desc = "audio";
2156 break;
2157 case AVMEDIA_TYPE_VIDEO:
2158 desc = "video";
2159 break;
2160 default:
2161 av_assert0(0);
2162 }
2163
2164 update_benchmark(NULL);
2165
2166 av_packet_unref(pkt);
2167 while ((ret = avcodec_receive_packet(enc, pkt)) == AVERROR(EAGAIN)) {
2168 ret = avcodec_send_frame(enc, NULL);
2169 if (ret < 0) {
2170 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
2171 desc,
2172 av_err2str(ret));
2173 exit_program(1);
2174 }
2175 }
2176
2177 update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
2178 if (ret < 0 && ret != AVERROR_EOF) {
2179 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
2180 desc,
2181 av_err2str(ret));
2182 exit_program(1);
2183 }
2184 if (ost->logfile && enc->stats_out) {
2185 fprintf(ost->logfile, "%s", enc->stats_out);
2186 }
2187 if (ret == AVERROR_EOF) {
2188 output_packet(of, pkt, ost, 1);
2189 break;
2190 }
2191 if (ost->finished & MUXER_FINISHED) {
2192 av_packet_unref(pkt);
2193 continue;
2194 }
2195 av_packet_rescale_ts(pkt, enc->time_base, ost->mux_timebase);
2196 pkt_size = pkt->size;
2197 output_packet(of, pkt, ost, 0);
2198 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
2199 do_video_stats(ost, pkt_size);
2200 }
2201 }
2202 }
2203}
2204
2205/*
2206 * Check whether a packet from ist should be written into ost at this time
2207 */
2209{
2211 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
2212
2213 if (ost->source_index != ist_index)
2214 return 0;
2215
2216 if (ost->finished)
2217 return 0;
2218
2219 if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
2220 return 0;
2221
2222 return 1;
2223}
2224
2225static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
2226{
2228 InputFile *f = input_files [ist->file_index];
2229 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
2230 int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
2231 AVPacket *opkt = ost->pkt;
2232
2233 av_packet_unref(opkt);
2234 // EOF: flush output bitstream filters.
2235 if (!pkt) {
2236 output_packet(of, opkt, ost, 1);
2237 return;
2238 }
2239
2240 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
2242 return;
2243
2244 if (!ost->frame_number && !ost->copy_prior_start) {
2245 int64_t comp_start = start_time;
2246 if (copy_ts && f->start_time != AV_NOPTS_VALUE)
2247 comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
2248 if (pkt->pts == AV_NOPTS_VALUE ?
2249 ist->pts < comp_start :
2250 pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
2251 return;
2252 }
2253
2254 if (of->recording_time != INT64_MAX &&
2255 ist->pts >= of->recording_time + start_time) {
2257 return;
2258 }
2259
2260 if (f->recording_time != INT64_MAX) {
2261 start_time = 0;
2262 if (copy_ts) {
2263 start_time += f->start_time != AV_NOPTS_VALUE ? f->start_time : 0;
2264 start_time += start_at_zero ? 0 : f->ctx->start_time;
2265 }
2266 if (ist->pts >= f->recording_time + start_time) {
2268 return;
2269 }
2270 }
2271
2272 /* force the input stream PTS */
2273 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2274 ost->sync_opts++;
2275
2276 if (av_packet_ref(opkt, pkt) < 0)
2277 exit_program(1);
2278
2279 if (pkt->pts != AV_NOPTS_VALUE)
2280 opkt->pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
2281
2282 if (pkt->dts == AV_NOPTS_VALUE) {
2283 opkt->dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase);
2284 } else if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
2285 int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
2286 if(!duration)
2287 duration = ist->dec_ctx->frame_size;
2288 opkt->dts = av_rescale_delta(ist->st->time_base, pkt->dts,
2289 (AVRational){1, ist->dec_ctx->sample_rate}, duration,
2290 &ist->filter_in_rescale_delta_last, ost->mux_timebase);
2291 /* dts will be set immediately afterwards to what pts is now */
2292 opkt->pts = opkt->dts - ost_tb_start_time;
2293 } else
2294 opkt->dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
2295 opkt->dts -= ost_tb_start_time;
2296
2297 opkt->duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
2298
2299 output_packet(of, opkt, ost, 0);
2300}
2301
2303{
2304 AVCodecContext *dec = ist->dec_ctx;
2305
2306 if (!dec->channel_layout) {
2307 char layout_name[256];
2308
2309 if (dec->channels > ist->guess_layout_max)
2310 return 0;
2311 dec->channel_layout = av_get_default_channel_layout(dec->channels);
2312 if (!dec->channel_layout)
2313 return 0;
2314 av_get_channel_layout_string(layout_name, sizeof(layout_name),
2315 dec->channels, dec->channel_layout);
2316 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2317 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2318 }
2319 return 1;
2320}
2321
2323{
2324 if (*got_output || ret<0)
2325 decode_error_stat[ret<0] ++;
2326
2327 if (ret < 0 && exit_on_error)
2328 exit_program(1);
2329
2330 if (*got_output && ist) {
2331 if (ist->decoded_frame->decode_error_flags || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) {
2332 av_log(NULL, exit_on_error ? AV_LOG_FATAL : AV_LOG_WARNING,
2333 "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->url, ist->st->index);
2334 if (exit_on_error)
2335 exit_program(1);
2336 }
2337 }
2338}
2339
2340// Filters can be configured only if the formats of all inputs are known.
2342{
2343 int i;
2344 for (i = 0; i < fg->nb_inputs; i++) {
2345 if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
2346 fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
2347 return 0;
2348 }
2349 return 1;
2350}
2351
2352static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame)
2353{
2354 FilterGraph *fg = ifilter->graph;
2355 int need_reinit, ret, i;
2356
2357 /* determine if the parameters for this input changed */
2358 need_reinit = ifilter->format != frame->format;
2359
2360 switch (ifilter->ist->st->codecpar->codec_type) {
2361 case AVMEDIA_TYPE_AUDIO:
2362 need_reinit |= ifilter->sample_rate != frame->sample_rate ||
2363 ifilter->channels != frame->channels ||
2364 ifilter->channel_layout != frame->channel_layout;
2365 break;
2366 case AVMEDIA_TYPE_VIDEO:
2367 need_reinit |= ifilter->width != frame->width ||
2368 ifilter->height != frame->height;
2369 break;
2370 }
2371
2372 if (!ifilter->ist->reinit_filters && fg->graph)
2373 need_reinit = 0;
2374
2375 if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
2376 (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2377 need_reinit = 1;
2378
2379 if (need_reinit) {
2380 ret = ifilter_parameters_from_frame(ifilter, frame);
2381 if (ret < 0)
2382 return ret;
2383 }
2384
2385 /* (re)init the graph if possible, otherwise buffer the frame and return */
2386 if (need_reinit || !fg->graph) {
2387 for (i = 0; i < fg->nb_inputs; i++) {
2389 AVFrame *tmp = av_frame_clone(frame);
2390 if (!tmp)
2391 return AVERROR(ENOMEM);
2392 av_frame_unref(frame);
2393
2394 if (!av_fifo_space(ifilter->frame_queue)) {
2395 ret = av_fifo_realloc2(ifilter->frame_queue, 2 * av_fifo_size(ifilter->frame_queue));
2396 if (ret < 0) {
2397 av_frame_free(&tmp);
2398 return ret;
2399 }
2400 }
2401 av_fifo_generic_write(ifilter->frame_queue, &tmp, sizeof(tmp), NULL);
2402 return 0;
2403 }
2404 }
2405
2406 ret = reap_filters(1);
2407 if (ret < 0 && ret != AVERROR_EOF) {
2408 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2409 return ret;
2410 }
2411
2413 if (ret < 0) {
2414 av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
2415 return ret;
2416 }
2417 }
2418
2419 ret = av_buffersrc_add_frame_flags(ifilter->filter, frame, AV_BUFFERSRC_FLAG_PUSH);
2420 if (ret < 0) {
2421 if (ret != AVERROR_EOF)
2422 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2423 return ret;
2424 }
2425
2426 return 0;
2427}
2428
2429static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
2430{
2431 int ret = 0;
2432
2433 ifilter->eof = 1;
2434
2435 if (ifilter->filter) {
2436
2437 /* THIS VALIDATION IS REQUIRED TO COMPLETE CANCELLATION */
2439 ret = av_buffersrc_close(ifilter->filter, pts, AV_BUFFERSRC_FLAG_PUSH);
2440 }
2441 if (ret < 0)
2442 return ret;
2443 } else {
2444 // the filtergraph was never configured
2445 if (ifilter->format < 0)
2446 ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
2447 if (ifilter->format < 0 && (ifilter->type == AVMEDIA_TYPE_AUDIO || ifilter->type == AVMEDIA_TYPE_VIDEO)) {
2448 av_log(NULL, AV_LOG_ERROR, "Cannot determine format of input stream %d:%d after EOF\n", ifilter->ist->file_index, ifilter->ist->st->index);
2449 return AVERROR_INVALIDDATA;
2450 }
2451 }
2452
2453 return 0;
2454}
2455
2456// This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2457// There is the following difference: if you got a frame, you must call
2458// it again with pkt=NULL. pkt==NULL is treated differently from pkt->size==0
2459// (pkt==NULL means get more output, pkt->size==0 is a flush/drain packet)
2460static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2461{
2462 int ret;
2463
2464 *got_frame = 0;
2465
2466 if (pkt) {
2467 ret = avcodec_send_packet(avctx, pkt);
2468 // In particular, we don't expect AVERROR(EAGAIN), because we read all
2469 // decoded frames with avcodec_receive_frame() until done.
2470 if (ret < 0 && ret != AVERROR_EOF)
2471 return ret;
2472 }
2473
2474 ret = avcodec_receive_frame(avctx, frame);
2475 if (ret < 0 && ret != AVERROR(EAGAIN))
2476 return ret;
2477 if (ret >= 0)
2478 *got_frame = 1;
2479
2480 return 0;
2481}
2482
2484{
2485 int i, ret;
2486 AVFrame *f;
2487
2488 av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2489 for (i = 0; i < ist->nb_filters; i++) {
2490 if (i < ist->nb_filters - 1) {
2491 f = ist->filter_frame;
2492 ret = av_frame_ref(f, decoded_frame);
2493 if (ret < 0)
2494 break;
2495 } else
2496 f = decoded_frame;
2497 ret = ifilter_send_frame(ist->filters[i], f);
2498 if (ret == AVERROR_EOF)
2499 ret = 0; /* ignore */
2500 if (ret < 0) {
2501 av_log(NULL, AV_LOG_ERROR,
2502 "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2503 break;
2504 }
2505 }
2506 return ret;
2507}
2508
2509static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output,
2510 int *decode_failed)
2511{
2512 AVFrame *decoded_frame;
2513 AVCodecContext *avctx = ist->dec_ctx;
2514 int ret, err = 0;
2515 AVRational decoded_frame_tb;
2516
2517 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2518 return AVERROR(ENOMEM);
2519 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2520 return AVERROR(ENOMEM);
2521 decoded_frame = ist->decoded_frame;
2522
2523 update_benchmark(NULL);
2525 update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2526 if (ret < 0)
2527 *decode_failed = 1;
2528
2529 if (ret >= 0 && avctx->sample_rate <= 0) {
2530 av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2531 ret = AVERROR_INVALIDDATA;
2532 }
2533
2534 if (ret != AVERROR_EOF)
2536
2537 if (!*got_output || ret < 0)
2538 return ret;
2539
2540 ist->samples_decoded += decoded_frame->nb_samples;
2541 ist->frames_decoded++;
2542
2543 /* increment next_dts to use for the case where the input stream does not
2544 have timestamps or there are multiple frames in the packet */
2545 ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2546 avctx->sample_rate;
2547 ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2548 avctx->sample_rate;
2549
2550 if (decoded_frame->pts != AV_NOPTS_VALUE) {
2551 decoded_frame_tb = ist->st->time_base;
2552 } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2553 decoded_frame->pts = pkt->pts;
2554 decoded_frame_tb = ist->st->time_base;
2555 }else {
2556 decoded_frame->pts = ist->dts;
2557 decoded_frame_tb = AV_TIME_BASE_Q;
2558 }
2559 if (decoded_frame->pts != AV_NOPTS_VALUE)
2560 decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2561 (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2562 (AVRational){1, avctx->sample_rate});
2563 ist->nb_samples = decoded_frame->nb_samples;
2565
2566 av_frame_unref(ist->filter_frame);
2567 av_frame_unref(decoded_frame);
2568 return err < 0 ? err : ret;
2569}
2570
2571static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof,
2572 int *decode_failed)
2573{
2574 AVFrame *decoded_frame;
2575 int i, ret = 0, err = 0;
2576 int64_t best_effort_timestamp;
2577 int64_t dts = AV_NOPTS_VALUE;
2578
2579 // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2580 // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2581 // skip the packet.
2582 if (!eof && pkt && pkt->size == 0)
2583 return 0;
2584
2585 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2586 return AVERROR(ENOMEM);
2587 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2588 return AVERROR(ENOMEM);
2589 decoded_frame = ist->decoded_frame;
2590 if (ist->dts != AV_NOPTS_VALUE)
2591 dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2592 if (pkt) {
2593 pkt->dts = dts; // ffmpeg.c probably shouldn't do this
2594 }
2595
2596 // The old code used to set dts on the drain packet, which does not work
2597 // with the new API anymore.
2598 if (eof) {
2599 void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2600 if (!new)
2601 return AVERROR(ENOMEM);
2602 ist->dts_buffer = new;
2603 ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2604 }
2605
2606 update_benchmark(NULL);
2607 ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt);
2608 update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2609 if (ret < 0)
2610 *decode_failed = 1;
2611
2612 // The following line may be required in some cases where there is no parser
2613 // or the parser does not has_b_frames correctly
2614 if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2615 if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2616 ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2617 } else
2618 av_log(ist->dec_ctx, AV_LOG_WARNING,
2619 "video_delay is larger in decoder than demuxer %d > %d.\n"
2620 "If you want to help, upload a sample "
2621 "of this file to https://streams.videolan.org/upload/ "
2622 "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
2623 ist->dec_ctx->has_b_frames,
2624 ist->st->codecpar->video_delay);
2625 }
2626
2627 if (ret != AVERROR_EOF)
2629
2630 if (*got_output && ret >= 0) {
2631 if (ist->dec_ctx->width != decoded_frame->width ||
2632 ist->dec_ctx->height != decoded_frame->height ||
2633 ist->dec_ctx->pix_fmt != decoded_frame->format) {
2634 av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2635 decoded_frame->width,
2636 decoded_frame->height,
2637 decoded_frame->format,
2638 ist->dec_ctx->width,
2639 ist->dec_ctx->height,
2640 ist->dec_ctx->pix_fmt);
2641 }
2642 }
2643
2644 if (!*got_output || ret < 0)
2645 return ret;
2646
2647 if(ist->top_field_first>=0)
2648 decoded_frame->top_field_first = ist->top_field_first;
2649
2650 ist->frames_decoded++;
2651
2652 if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2653 err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2654 if (err < 0)
2655 goto fail;
2656 }
2657 ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2658
2659 best_effort_timestamp= decoded_frame->best_effort_timestamp;
2660 *duration_pts = decoded_frame->pkt_duration;
2661
2662 if (ist->framerate.num)
2663 best_effort_timestamp = ist->cfr_next_pts++;
2664
2665 if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2666 best_effort_timestamp = ist->dts_buffer[0];
2667
2668 for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2669 ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2670 ist->nb_dts_buffer--;
2671 }
2672
2673 if(best_effort_timestamp != AV_NOPTS_VALUE) {
2674 int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2675
2676 if (ts != AV_NOPTS_VALUE)
2677 ist->next_pts = ist->pts = ts;
2678 }
2679
2680 if (debug_ts) {
2681 av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2682 "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2683 ist->st->index, av_ts2str(decoded_frame->pts),
2684 av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2685 best_effort_timestamp,
2686 av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2687 decoded_frame->key_frame, decoded_frame->pict_type,
2688 ist->st->time_base.num, ist->st->time_base.den);
2689 }
2690
2691 if (ist->st->sample_aspect_ratio.num)
2692 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2693
2695
2696fail:
2697 av_frame_unref(ist->filter_frame);
2698 av_frame_unref(decoded_frame);
2699 return err < 0 ? err : ret;
2700}
2701
2702static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output,
2703 int *decode_failed)
2704{
2705 AVSubtitle subtitle;
2706 int free_sub = 1;
2707 int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2709
2711
2712 if (ret < 0 || !*got_output) {
2713 *decode_failed = 1;
2714 if (!pkt->size)
2716 return ret;
2717 }
2718
2719 if (ist->fix_sub_duration) {
2720 int end = 1;
2721 if (ist->prev_sub.got_output) {
2722 end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2723 1000, AV_TIME_BASE);
2724 if (end < ist->prev_sub.subtitle.end_display_time) {
2725 av_log(ist->dec_ctx, AV_LOG_DEBUG,
2726 "Subtitle duration reduced from %"PRId32" to %d%s\n",
2727 ist->prev_sub.subtitle.end_display_time, end,
2728 end <= 0 ? ", dropping it" : "");
2729 ist->prev_sub.subtitle.end_display_time = end;
2730 }
2731 }
2732 FFSWAP(int, *got_output, ist->prev_sub.got_output);
2733 FFSWAP(int, ret, ist->prev_sub.ret);
2734 FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2735 if (end <= 0)
2736 goto out;
2737 }
2738
2739 if (!*got_output)
2740 return ret;
2741
2742 if (ist->sub2video.frame) {
2743 sub2video_update(ist, INT64_MIN, &subtitle);
2744 } else if (ist->nb_filters) {
2745 if (!ist->sub2video.sub_queue)
2746 ist->sub2video.sub_queue = av_fifo_alloc(8 * sizeof(AVSubtitle));
2747 if (!ist->sub2video.sub_queue)
2748 exit_program(1);
2749 if (!av_fifo_space(ist->sub2video.sub_queue)) {
2750 ret = av_fifo_realloc2(ist->sub2video.sub_queue, 2 * av_fifo_size(ist->sub2video.sub_queue));
2751 if (ret < 0)
2752 exit_program(1);
2753 }
2754 av_fifo_generic_write(ist->sub2video.sub_queue, &subtitle, sizeof(subtitle), NULL);
2755 free_sub = 0;
2756 }
2757
2758 if (!subtitle.num_rects)
2759 goto out;
2760
2761 ist->frames_decoded++;
2762
2763 for (i = 0; i < nb_output_streams; i++) {
2765
2766 if (!ost->pkt && !(ost->pkt = av_packet_alloc()))
2767 exit_program(1);
2769 || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2770 continue;
2771
2773 }
2774
2775out:
2776 if (free_sub)
2777 avsubtitle_free(&subtitle);
2778 return ret;
2779}
2780
2782{
2783 int i, ret;
2784 /* TODO keep pts also in stream time base to avoid converting back */
2785 int64_t pts = av_rescale_q_rnd(ist->pts, AV_TIME_BASE_Q, ist->st->time_base,
2786 AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX);
2787
2788 for (i = 0; i < ist->nb_filters; i++) {
2789 ret = ifilter_send_eof(ist->filters[i], pts);
2790 if (ret < 0)
2791 return ret;
2792 }
2793 return 0;
2794}
2795
2796/* pkt = NULL means EOF (needed to flush decoder buffers) */
2797static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2798{
2799 int ret = 0, i;
2800 int repeating = 0;
2801 int eof_reached = 0;
2802
2803 AVPacket *avpkt;
2804
2805 if (!ist->pkt && !(ist->pkt = av_packet_alloc()))
2806 return AVERROR(ENOMEM);
2807 avpkt = ist->pkt;
2808
2809 if (!ist->saw_first_ts) {
2810 ist->first_dts =
2811 ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2812 ist->pts = 0;
2813 if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2814 ist->first_dts =
2815 ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2816 ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2817 }
2818 ist->saw_first_ts = 1;
2819 }
2820
2821 if (ist->next_dts == AV_NOPTS_VALUE)
2822 ist->next_dts = ist->dts;
2823 if (ist->next_pts == AV_NOPTS_VALUE)
2824 ist->next_pts = ist->pts;
2825
2826 if (pkt) {
2827 av_packet_unref(avpkt);
2828 ret = av_packet_ref(avpkt, pkt);
2829 if (ret < 0)
2830 return ret;
2831 }
2832
2833 if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2834 ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2835 if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2836 ist->next_pts = ist->pts = ist->dts;
2837 }
2838
2839 // while we have more to decode or while the decoder did output something on EOF
2840 while (ist->decoding_needed) {
2841 int64_t duration_dts = 0;
2842 int64_t duration_pts = 0;
2843 int got_output = 0;
2844 int decode_failed = 0;
2845
2846 ist->pts = ist->next_pts;
2847 ist->dts = ist->next_dts;
2848
2849 switch (ist->dec_ctx->codec_type) {
2850 case AVMEDIA_TYPE_AUDIO:
2851 ret = decode_audio (ist, repeating ? NULL : avpkt, &got_output,
2852 &decode_failed);
2853 av_packet_unref(avpkt);
2854 break;
2855 case AVMEDIA_TYPE_VIDEO:
2856 ret = decode_video (ist, repeating ? NULL : avpkt, &got_output, &duration_pts, !pkt,
2857 &decode_failed);
2858 if (!repeating || !pkt || got_output) {
2859 if (pkt && pkt->duration) {
2860 duration_dts = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2861 } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2862 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2863 duration_dts = ((int64_t)AV_TIME_BASE *
2864 ist->dec_ctx->framerate.den * ticks) /
2865 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2866 }
2867
2868 if(ist->dts != AV_NOPTS_VALUE && duration_dts) {
2869 ist->next_dts += duration_dts;
2870 }else
2871 ist->next_dts = AV_NOPTS_VALUE;
2872 }
2873
2874 if (got_output) {
2875 if (duration_pts > 0) {
2876 ist->next_pts += av_rescale_q(duration_pts, ist->st->time_base, AV_TIME_BASE_Q);
2877 } else {
2878 ist->next_pts += duration_dts;
2879 }
2880 }
2881 av_packet_unref(avpkt);
2882 break;
2883 case AVMEDIA_TYPE_SUBTITLE:
2884 if (repeating)
2885 break;
2886 ret = transcode_subtitles(ist, avpkt, &got_output, &decode_failed);
2887 if (!pkt && ret >= 0)
2888 ret = AVERROR_EOF;
2889 av_packet_unref(avpkt);
2890 break;
2891 default:
2892 return -1;
2893 }
2894
2895 if (ret == AVERROR_EOF) {
2896 eof_reached = 1;
2897 break;
2898 }
2899
2900 if (ret < 0) {
2901 if (decode_failed) {
2902 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2903 ist->file_index, ist->st->index, av_err2str(ret));
2904 } else {
2905 av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded "
2906 "data for stream #%d:%d\n", ist->file_index, ist->st->index);
2907 }
2908 if (!decode_failed || exit_on_error)
2909 exit_program(1);
2910 break;
2911 }
2912
2913 if (got_output)
2914 ist->got_output = 1;
2915
2916 if (!got_output)
2917 break;
2918
2919 // During draining, we might get multiple output frames in this loop.
2920 // ffmpeg.c does not drain the filter chain on configuration changes,
2921 // which means if we send multiple frames at once to the filters, and
2922 // one of those frames changes configuration, the buffered frames will
2923 // be lost. This can upset certain FATE tests.
2924 // Decode only 1 frame per call on EOF to appease these FATE tests.
2925 // The ideal solution would be to rewrite decoding to use the new
2926 // decoding API in a better way.
2927 if (!pkt)
2928 break;
2929
2930 repeating = 1;
2931 }
2932
2933 /* after flushing, send an EOF on all the filter inputs attached to the stream */
2934 /* except when looping we need to flush but not to send an EOF */
2935 if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2936 int ret = send_filter_eof(ist);
2937 if (ret < 0) {
2938 av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2939 exit_program(1);
2940 }
2941 }
2942
2943 /* handle stream copy */
2944 if (!ist->decoding_needed && pkt) {
2945 ist->dts = ist->next_dts;
2946 switch (ist->dec_ctx->codec_type) {
2947 case AVMEDIA_TYPE_AUDIO:
2948 av_assert1(pkt->duration >= 0);
2949 if (ist->dec_ctx->sample_rate) {
2950 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2951 ist->dec_ctx->sample_rate;
2952 } else {
2953 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2954 }
2955 break;
2956 case AVMEDIA_TYPE_VIDEO:
2957 if (ist->framerate.num) {
2958 // TODO: Remove work-around for c99-to-c89 issue 7
2959 AVRational time_base_q = AV_TIME_BASE_Q;
2960 int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2961 ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2962 } else if (pkt->duration) {
2963 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2964 } else if(ist->dec_ctx->framerate.num != 0) {
2965 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2966 ist->next_dts += ((int64_t)AV_TIME_BASE *
2967 ist->dec_ctx->framerate.den * ticks) /
2968 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2969 }
2970 break;
2971 }
2972 ist->pts = ist->dts;
2973 ist->next_pts = ist->next_dts;
2974 }
2975 for (i = 0; i < nb_output_streams; i++) {
2977
2978 if (!ost->pkt && !(ost->pkt = av_packet_alloc()))
2979 exit_program(1);
2981 continue;
2982
2984 }
2985
2986 return !eof_reached;
2987}
2988
2989static void print_sdp(void)
2990{
2991 char sdp[16384];
2992 int i;
2993 int j;
2994 AVIOContext *sdp_pb;
2995 AVFormatContext **avc;
2996
2997 for (i = 0; i < nb_output_files; i++) {
2998 if (!output_files[i]->header_written)
2999 return;
3000 }
3001
3002 avc = av_malloc_array(nb_output_files, sizeof(*avc));
3003 if (!avc)
3004 exit_program(1);
3005 for (i = 0, j = 0; i < nb_output_files; i++) {
3006 if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
3007 avc[j] = output_files[i]->ctx;
3008 j++;
3009 }
3010 }
3011
3012 if (!j)
3013 goto fail;
3014
3015 av_sdp_create(avc, j, sdp, sizeof(sdp));
3016
3017 if (!sdp_filename) {
3018 av_log(NULL, AV_LOG_STDERR, "SDP:\n%s\n", sdp);
3019 fflush(stdout);
3020 } else {
3021 if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
3022 av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
3023 } else {
3024 avio_print(sdp_pb, sdp);
3025 avio_closep(&sdp_pb);
3026 av_freep(&sdp_filename);
3027 }
3028 }
3029
3030fail:
3031 av_freep(&avc);
3032}
3033
3034static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
3035{
3036 InputStream *ist = s->opaque;
3037 const enum AVPixelFormat *p;
3038 int ret;
3039
3040 for (p = pix_fmts; *p != AV_PIX_FMT_NONE; p++) {
3041 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
3042 const AVCodecHWConfig *config = NULL;
3043 int i;
3044
3045 if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
3046 break;
3047
3048 if (ist->hwaccel_id == HWACCEL_GENERIC ||
3049 ist->hwaccel_id == HWACCEL_AUTO) {
3050 for (i = 0;; i++) {
3051 config = avcodec_get_hw_config(s->codec, i);
3052 if (!config)
3053 break;
3054 if (!(config->methods &
3055 AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX))
3056 continue;
3057 if (config->pix_fmt == *p)
3058 break;
3059 }
3060 }
3061 if (config) {
3062 if (config->device_type != ist->hwaccel_device_type) {
3063 // Different hwaccel offered, ignore.
3064 continue;
3065 }
3066
3068 if (ret < 0) {
3069 if (ist->hwaccel_id == HWACCEL_GENERIC) {
3070 av_log(NULL, AV_LOG_FATAL,
3071 "%s hwaccel requested for input stream #%d:%d, "
3072 "but cannot be initialized.\n",
3073 av_hwdevice_get_type_name(config->device_type),
3074 ist->file_index, ist->st->index);
3075 return AV_PIX_FMT_NONE;
3076 }
3077 continue;
3078 }
3079 } else {
3080 const HWAccel *hwaccel = NULL;
3081 int i;
3082 for (i = 0; hwaccels[i].name; i++) {
3083 if (hwaccels[i].pix_fmt == *p) {
3084 hwaccel = &hwaccels[i];
3085 break;
3086 }
3087 }
3088 if (!hwaccel) {
3089 // No hwaccel supporting this pixfmt.
3090 continue;
3091 }
3092 if (hwaccel->id != ist->hwaccel_id) {
3093 // Does not match requested hwaccel.
3094 continue;
3095 }
3096
3097 ret = hwaccel->init(s);
3098 if (ret < 0) {
3099 av_log(NULL, AV_LOG_FATAL,
3100 "%s hwaccel requested for input stream #%d:%d, "
3101 "but cannot be initialized.\n", hwaccel->name,
3102 ist->file_index, ist->st->index);
3103 return AV_PIX_FMT_NONE;
3104 }
3105 }
3106
3107 if (ist->hw_frames_ctx) {
3108 s->hw_frames_ctx = av_buffer_ref(ist->hw_frames_ctx);
3109 if (!s->hw_frames_ctx)
3110 return AV_PIX_FMT_NONE;
3111 }
3112
3113 ist->hwaccel_pix_fmt = *p;
3114 break;
3115 }
3116
3117 return *p;
3118}
3119
3120static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
3121{
3122 InputStream *ist = s->opaque;
3123
3124 if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
3125 return ist->hwaccel_get_buffer(s, frame, flags);
3126
3127 return avcodec_default_get_buffer2(s, frame, flags);
3128}
3129
3130static int init_input_stream(int ist_index, char *error, int error_len)
3131{
3132 int ret;
3133 InputStream *ist = input_streams[ist_index];
3134
3135 if (ist->decoding_needed) {
3136 const AVCodec *codec = ist->dec;
3137 if (!codec) {
3138 snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
3139 avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
3140 return AVERROR(EINVAL);
3141 }
3142
3143 ist->dec_ctx->opaque = ist;
3144 ist->dec_ctx->get_format = get_format;
3145 ist->dec_ctx->get_buffer2 = get_buffer;
3146#if LIBAVCODEC_VERSION_MAJOR < 60
3147FF_DISABLE_DEPRECATION_WARNINGS
3148 ist->dec_ctx->thread_safe_callbacks = 1;
3149FF_ENABLE_DEPRECATION_WARNINGS
3150#endif
3151
3152 if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
3153 (ist->decoding_needed & DECODING_FOR_OST)) {
3154 av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
3155 if (ist->decoding_needed & DECODING_FOR_FILTER)
3156 av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
3157 }
3158
3159 av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
3160
3161 /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
3162 * audio, and video decoders such as cuvid or mediacodec */
3163 ist->dec_ctx->pkt_timebase = ist->st->time_base;
3164
3165 if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
3166 av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
3167 /* Attached pics are sparse, therefore we would not want to delay their decoding till EOF. */
3168 if (ist->st->disposition & AV_DISPOSITION_ATTACHED_PIC)
3169 av_dict_set(&ist->decoder_opts, "threads", "1", 0);
3170
3172 if (ret < 0) {
3173 snprintf(error, error_len, "Device setup failed for "
3174 "decoder on input stream #%d:%d : %s",
3175 ist->file_index, ist->st->index, av_err2str(ret));
3176 return ret;
3177 }
3178
3179 if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
3180 if (ret == AVERROR_EXPERIMENTAL)
3181 abort_codec_experimental(codec, 0);
3182
3183 snprintf(error, error_len,
3184 "Error while opening decoder for input stream "
3185 "#%d:%d : %s",
3186 ist->file_index, ist->st->index, av_err2str(ret));
3187 return ret;
3188 }
3189 assert_avoptions(ist->decoder_opts);
3190 }
3191
3192 ist->next_pts = AV_NOPTS_VALUE;
3193 ist->next_dts = AV_NOPTS_VALUE;
3194
3195 return 0;
3196}
3197
3199{
3200 if (ost->source_index >= 0)
3202 return NULL;
3203}
3204
3205static int compare_int64(const void *a, const void *b)
3206{
3207 return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
3208}
3209
3210/* open the muxer when all the streams are initialized */
3212{
3213 int ret, i;
3214
3215 for (i = 0; i < of->ctx->nb_streams; i++) {
3217 if (!ost->initialized)
3218 return 0;
3219 }
3220
3221 of->ctx->interrupt_callback = int_cb;
3222
3223 ret = avformat_write_header(of->ctx, &of->opts);
3224 if (ret < 0) {
3225 av_log(NULL, AV_LOG_ERROR,
3226 "Could not write header for output file #%d "
3227 "(incorrect codec parameters ?): %s\n",
3228 file_index, av_err2str(ret));
3229 return ret;
3230 }
3231 //assert_avoptions(of->opts);
3232 of->header_written = 1;
3233
3234 av_dump_format(of->ctx, file_index, of->ctx->url, 1);
3236
3237 if (sdp_filename || want_sdp)
3238 print_sdp();
3239
3240 /* flush the muxing queues */
3241 for (i = 0; i < of->ctx->nb_streams; i++) {
3243
3244 /* try to improve muxing time_base (only possible if nothing has been written yet) */
3245 if (!av_fifo_size(ost->muxing_queue))
3246 ost->mux_timebase = ost->st->time_base;
3247
3248 while (av_fifo_size(ost->muxing_queue)) {
3249 AVPacket *pkt;
3250 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
3251 ost->muxing_queue_data_size -= pkt->size;
3252 write_packet(of, pkt, ost, 1);
3253 av_packet_free(&pkt);
3254 }
3255 }
3256
3257 return 0;
3258}
3259
3261{
3262 AVBSFContext *ctx = ost->bsf_ctx;
3263 int ret;
3264
3265 if (!ctx)
3266 return 0;
3267
3268 ret = avcodec_parameters_copy(ctx->par_in, ost->st->codecpar);
3269 if (ret < 0)
3270 return ret;
3271
3272 ctx->time_base_in = ost->st->time_base;
3273
3274 ret = av_bsf_init(ctx);
3275 if (ret < 0) {
3276 av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
3277 ctx->filter->name);
3278 return ret;
3279 }
3280
3281 ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
3282 if (ret < 0)
3283 return ret;
3284
3285 ost->st->time_base = ctx->time_base_out;
3286
3287 return 0;
3288}
3289
3291{
3294 AVCodecParameters *par_dst = ost->st->codecpar;
3295 AVCodecParameters *par_src = ost->ref_par;
3296 AVRational sar;
3297 int i, ret;
3298 uint32_t codec_tag = par_dst->codec_tag;
3299
3300 av_assert0(ist && !ost->filter);
3301
3302 ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
3303 if (ret >= 0)
3304 ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
3305 if (ret < 0) {
3306 av_log(NULL, AV_LOG_FATAL,
3307 "Error setting up codec context options.\n");
3308 return ret;
3309 }
3310
3311 ret = avcodec_parameters_from_context(par_src, ost->enc_ctx);
3312 if (ret < 0) {
3313 av_log(NULL, AV_LOG_FATAL,
3314 "Error getting reference codec parameters.\n");
3315 return ret;
3316 }
3317
3318 if (!codec_tag) {
3319 unsigned int codec_tag_tmp;
3320 if (!of->ctx->oformat->codec_tag ||
3321 av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
3322 !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
3323 codec_tag = par_src->codec_tag;
3324 }
3325
3326 ret = avcodec_parameters_copy(par_dst, par_src);
3327 if (ret < 0)
3328 return ret;
3329
3330 par_dst->codec_tag = codec_tag;
3331
3332 if (!ost->frame_rate.num)
3333 ost->frame_rate = ist->framerate;
3334
3335 if (ost->frame_rate.num)
3336 ost->st->avg_frame_rate = ost->frame_rate;
3337 else
3338 ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3339
3340 ret = avformat_transfer_internal_stream_timing_info(of->ctx->oformat, ost->st, ist->st, copy_tb);
3341 if (ret < 0)
3342 return ret;
3343
3344 // copy timebase while removing common factors
3345 if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0) {
3346 if (ost->frame_rate.num)
3347 ost->st->time_base = av_inv_q(ost->frame_rate);
3348 else
3349 ost->st->time_base = av_add_q(av_stream_get_codec_timebase(ost->st), (AVRational){0, 1});
3350 }
3351
3352 // copy estimated duration as a hint to the muxer
3353 if (ost->st->duration <= 0 && ist->st->duration > 0)
3354 ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3355
3356 // copy disposition
3357 ost->st->disposition = ist->st->disposition;
3358
3359 if (ist->st->nb_side_data) {
3360 for (i = 0; i < ist->st->nb_side_data; i++) {
3361 const AVPacketSideData *sd_src = &ist->st->side_data[i];
3362 uint8_t *dst_data;
3363
3364 dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3365 if (!dst_data)
3366 return AVERROR(ENOMEM);
3367 memcpy(dst_data, sd_src->data, sd_src->size);
3368 }
3369 }
3370
3371 if (ost->rotate_overridden) {
3372 uint8_t *sd = av_stream_new_side_data(ost->st, AV_PKT_DATA_DISPLAYMATRIX,
3373 sizeof(int32_t) * 9);
3374 if (sd)
3375 av_display_rotation_set((int32_t *)sd, -ost->rotate_override_value);
3376 }
3377
3378 switch (par_dst->codec_type) {
3379 case AVMEDIA_TYPE_AUDIO:
3380 if (audio_volume != 256) {
3381 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
3382 exit_program(1);
3383 }
3384 if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
3385 par_dst->block_align= 0;
3386 if(par_dst->codec_id == AV_CODEC_ID_AC3)
3387 par_dst->block_align= 0;
3388 break;
3389 case AVMEDIA_TYPE_VIDEO:
3390 if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3391 sar =
3392 av_mul_q(ost->frame_aspect_ratio,
3393 (AVRational){ par_dst->height, par_dst->width });
3394 av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3395 "with stream copy may produce invalid files\n");
3396 }
3397 else if (ist->st->sample_aspect_ratio.num)
3398 sar = ist->st->sample_aspect_ratio;
3399 else
3400 sar = par_src->sample_aspect_ratio;
3401 ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
3402 ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3403 ost->st->r_frame_rate = ist->st->r_frame_rate;
3404 break;
3405 }
3406
3407 ost->mux_timebase = ist->st->time_base;
3408
3409 return 0;
3410}
3411
3413{
3414 AVDictionaryEntry *e;
3415
3416 uint8_t *encoder_string;
3417 int encoder_string_len;
3418 int format_flags = 0;
3419 int codec_flags = ost->enc_ctx->flags;
3420
3421 if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
3422 return;
3423
3424 e = av_dict_get(of->opts, "fflags", NULL, 0);
3425 if (e) {
3426 const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3427 if (!o)
3428 return;
3429 av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3430 }
3431 e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3432 if (e) {
3433 const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3434 if (!o)
3435 return;
3436 av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3437 }
3438
3439 encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3440 encoder_string = av_mallocz(encoder_string_len);
3441 if (!encoder_string)
3442 exit_program(1);
3443
3444 if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3445 av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3446 else
3447 av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3448 av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3449 av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3450 AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
3451}
3452
3454 AVCodecContext *avctx)
3455{
3456 char *p;
3457 int n = 1, i, size, index = 0;
3458 int64_t t, *pts;
3459
3460 for (p = kf; *p; p++)
3461 if (*p == ',')
3462 n++;
3463 size = n;
3464 pts = av_malloc_array(size, sizeof(*pts));
3465 if (!pts) {
3466 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3467 exit_program(1);
3468 }
3469
3470 p = kf;
3471 for (i = 0; i < n; i++) {
3472 char *next = strchr(p, ',');
3473
3474 if (next)
3475 *next++ = 0;
3476
3477 if (!memcmp(p, "chapters", 8)) {
3478
3479 AVFormatContext *avf = output_files[ost->file_index]->ctx;
3480 int j;
3481
3482 if (avf->nb_chapters > INT_MAX - size ||
3483 !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3484 sizeof(*pts)))) {
3485 av_log(NULL, AV_LOG_FATAL,
3486 "Could not allocate forced key frames array.\n");
3487 exit_program(1);
3488 }
3489 t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3490 t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3491
3492 for (j = 0; j < avf->nb_chapters; j++) {
3493 AVChapter *c = avf->chapters[j];
3494 av_assert1(index < size);
3495 pts[index++] = av_rescale_q(c->start, c->time_base,
3496 avctx->time_base) + t;
3497 }
3498
3499 } else {
3500
3501 t = parse_time_or_die("force_key_frames", p, 1);
3502 av_assert1(index < size);
3503 pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3504
3505 }
3506
3507 p = next;
3508 }
3509
3510 av_assert0(index == size);
3511 qsort(pts, size, sizeof(*pts), compare_int64);
3512 ost->forced_kf_count = size;
3514}
3515
3516static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
3517{
3519 AVCodecContext *enc_ctx = ost->enc_ctx;
3520 AVFormatContext *oc;
3521
3522 if (ost->enc_timebase.num > 0) {
3523 enc_ctx->time_base = ost->enc_timebase;
3524 return;
3525 }
3526
3527 if (ost->enc_timebase.num < 0) {
3528 if (ist) {
3529 enc_ctx->time_base = ist->st->time_base;
3530 return;
3531 }
3532
3534 av_log(oc, AV_LOG_WARNING, "Input stream data not available, using default time base\n");
3535 }
3536
3537 enc_ctx->time_base = default_time_base;
3538}
3539
3540static int init_output_stream_encode(OutputStream *ost, AVFrame *frame)
3541{
3543 AVCodecContext *enc_ctx = ost->enc_ctx;
3544 AVCodecContext *dec_ctx = NULL;
3545 AVFormatContext *oc = output_files[ost->file_index]->ctx;
3546 int j, ret;
3547
3549
3550 // Muxers use AV_PKT_DATA_DISPLAYMATRIX to signal rotation. On the other
3551 // hand, the legacy API makes demuxers set "rotate" metadata entries,
3552 // which have to be filtered out to prevent leaking them to output files.
3553 av_dict_set(&ost->st->metadata, "rotate", NULL, 0);
3554
3555 if (ist) {
3556 ost->st->disposition = ist->st->disposition;
3557
3558 dec_ctx = ist->dec_ctx;
3559
3560 enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
3561 } else {
3562 for (j = 0; j < oc->nb_streams; j++) {
3563 AVStream *st = oc->streams[j];
3564 if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3565 break;
3566 }
3567 if (j == oc->nb_streams)
3568 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3569 ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
3570 ost->st->disposition = AV_DISPOSITION_DEFAULT;
3571 }
3572
3573 if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3574 if (!ost->frame_rate.num)
3575 ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
3576 if (ist && !ost->frame_rate.num)
3577 ost->frame_rate = ist->framerate;
3578 if (ist && !ost->frame_rate.num)
3579 ost->frame_rate = ist->st->r_frame_rate;
3580 if (ist && !ost->frame_rate.num && !ost->max_frame_rate.num) {
3581 ost->frame_rate = (AVRational){25, 1};
3582 av_log(NULL, AV_LOG_WARNING,
3583 "No information "
3584 "about the input framerate is available. Falling "
3585 "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3586 "if you want a different framerate.\n",
3587 ost->file_index, ost->index);
3588 }
3589
3590 if (ost->max_frame_rate.num &&
3591 (av_q2d(ost->frame_rate) > av_q2d(ost->max_frame_rate) ||
3592 !ost->frame_rate.den))
3594
3595 if (ost->enc->supported_framerates && !ost->force_fps) {
3596 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3597 ost->frame_rate = ost->enc->supported_framerates[idx];
3598 }
3599 // reduce frame rate for mpeg4 to be within the spec limits
3600 if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3601 av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3602 ost->frame_rate.num, ost->frame_rate.den, 65535);
3603 }
3604 }
3605
3606 switch (enc_ctx->codec_type) {
3607 case AVMEDIA_TYPE_AUDIO:
3608 enc_ctx->sample_fmt = av_buffersink_get_format(ost->filter->filter);
3609 if (dec_ctx)
3610 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3611 av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3612 enc_ctx->sample_rate = av_buffersink_get_sample_rate(ost->filter->filter);
3613 enc_ctx->channel_layout = av_buffersink_get_channel_layout(ost->filter->filter);
3614 enc_ctx->channels = av_buffersink_get_channels(ost->filter->filter);
3615
3616 init_encoder_time_base(ost, av_make_q(1, enc_ctx->sample_rate));
3617 break;
3618
3619 case AVMEDIA_TYPE_VIDEO:
3621
3622 if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3623 enc_ctx->time_base = av_buffersink_get_time_base(ost->filter->filter);
3624 if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3625 && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
3626 av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3627 "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3628 }
3629
3630 enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
3631 enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
3632 enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3633 ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3634 av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3635 av_buffersink_get_sample_aspect_ratio(ost->filter->filter);
3636
3637 enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter);
3638 if (dec_ctx)
3639 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3640 av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3641
3642 if (frame) {
3643 enc_ctx->color_range = frame->color_range;
3644 enc_ctx->color_primaries = frame->color_primaries;
3645 enc_ctx->color_trc = frame->color_trc;
3646 enc_ctx->colorspace = frame->colorspace;
3647 enc_ctx->chroma_sample_location = frame->chroma_location;
3648 }
3649
3650 enc_ctx->framerate = ost->frame_rate;
3651
3652 ost->st->avg_frame_rate = ost->frame_rate;
3653
3654 if (!dec_ctx ||
3655 enc_ctx->width != dec_ctx->width ||
3656 enc_ctx->height != dec_ctx->height ||
3657 enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3658 enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
3659 }
3660
3661 // Field order: autodetection
3662 if (frame) {
3663 if (enc_ctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
3664 ost->top_field_first >= 0)
3665 frame->top_field_first = !!ost->top_field_first;
3666
3667 if (frame->interlaced_frame) {
3668 if (enc_ctx->codec->id == AV_CODEC_ID_MJPEG)
3669 enc_ctx->field_order = frame->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
3670 else
3671 enc_ctx->field_order = frame->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
3672 } else
3673 enc_ctx->field_order = AV_FIELD_PROGRESSIVE;
3674 }
3675
3676 // Field order: override
3677 if (ost->top_field_first == 0) {
3678 enc_ctx->field_order = AV_FIELD_BB;
3679 } else if (ost->top_field_first == 1) {
3680 enc_ctx->field_order = AV_FIELD_TT;
3681 }
3682
3683 if (ost->forced_keyframes) {
3684 if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3685 ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3686 forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
3687 if (ret < 0) {
3688 av_log(NULL, AV_LOG_ERROR,
3689 "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3690 return ret;
3691 }
3696
3697 // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3698 // parse it only for static kf timings
3699 } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3701 }
3702 }
3703 break;
3704 case AVMEDIA_TYPE_SUBTITLE:
3705 enc_ctx->time_base = AV_TIME_BASE_Q;
3706 if (!enc_ctx->width) {
3707 enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3708 enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3709 }
3710 break;
3711 case AVMEDIA_TYPE_DATA:
3712 break;
3713 default:
3714 abort();
3715 break;
3716 }
3717
3718 ost->mux_timebase = enc_ctx->time_base;
3719
3720 return 0;
3721}
3722
3723static int init_output_stream(OutputStream *ost, AVFrame *frame, char *error, int error_len)
3724{
3725 int ret = 0;
3726
3727 if (ost->encoding_needed) {
3728 const AVCodec *codec = ost->enc;
3729 AVCodecContext *dec = NULL;
3731
3733 if (ret < 0)
3734 return ret;
3735
3736 if ((ist = get_input_stream(ost)))
3737 dec = ist->dec_ctx;
3738 if (dec && dec->subtitle_header) {
3739 /* ASS code assumes this buffer is null terminated so add extra byte. */
3740 ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
3741 if (!ost->enc_ctx->subtitle_header)
3742 return AVERROR(ENOMEM);
3743 memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3744 ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
3745 }
3746 if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3747 av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3748 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3749 !codec->defaults &&
3750 !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
3751 !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
3752 av_dict_set(&ost->encoder_opts, "b", "128000", 0);
3753
3755 if (ret < 0) {
3756 snprintf(error, error_len, "Device setup failed for "
3757 "encoder on output stream #%d:%d : %s",
3758 ost->file_index, ost->index, av_err2str(ret));
3759 return ret;
3760 }
3761
3762 if (ist && ist->dec->type == AVMEDIA_TYPE_SUBTITLE && ost->enc->type == AVMEDIA_TYPE_SUBTITLE) {
3763 int input_props = 0, output_props = 0;
3764 AVCodecDescriptor const *input_descriptor =
3765 avcodec_descriptor_get(dec->codec_id);
3766 AVCodecDescriptor const *output_descriptor =
3767 avcodec_descriptor_get(ost->enc_ctx->codec_id);
3768 if (input_descriptor)
3769 input_props = input_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3770 if (output_descriptor)
3771 output_props = output_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3772 if (input_props && output_props && input_props != output_props) {
3773 snprintf(error, error_len,
3774 "Subtitle encoding currently only possible from text to text "
3775 "or bitmap to bitmap");
3776 return AVERROR_INVALIDDATA;
3777 }
3778 }
3779
3780 if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3781 if (ret == AVERROR_EXPERIMENTAL)
3782 abort_codec_experimental(codec, 1);
3783 snprintf(error, error_len,
3784 "Error while opening encoder for output stream #%d:%d - "
3785 "maybe incorrect parameters such as bit_rate, rate, width or height",
3786 ost->file_index, ost->index);
3787 return ret;
3788 }
3789 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3790 !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3791 av_buffersink_set_frame_size(ost->filter->filter,
3792 ost->enc_ctx->frame_size);
3794 if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000 &&
3795 ost->enc_ctx->codec_id != AV_CODEC_ID_CODEC2 /* don't complain about 700 bit/s modes */)
3796 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3797 " It takes bits/s as argument, not kbits/s\n");
3798
3799 ret = avcodec_parameters_from_context(ost->st->codecpar, ost->enc_ctx);
3800 if (ret < 0) {
3801 av_log(NULL, AV_LOG_FATAL,
3802 "Error initializing the output stream codec context.\n");
3803 exit_program(1);
3804 }
3805
3806 if (ost->enc_ctx->nb_coded_side_data) {
3807 int i;
3808
3809 for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3810 const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3811 uint8_t *dst_data;
3812
3813 dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3814 if (!dst_data)
3815 return AVERROR(ENOMEM);
3816 memcpy(dst_data, sd_src->data, sd_src->size);
3817 }
3818 }
3819
3820 /*
3821 * Add global input side data. For now this is naive, and copies it
3822 * from the input stream's global side data. All side data should
3823 * really be funneled over AVFrame and libavfilter, then added back to
3824 * packet side data, and then potentially using the first packet for
3825 * global side data.
3826 */
3827 if (ist) {
3828 int i;
3829 for (i = 0; i < ist->st->nb_side_data; i++) {
3830 AVPacketSideData *sd = &ist->st->side_data[i];
3831 if (sd->type != AV_PKT_DATA_CPB_PROPERTIES) {
3832 uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size);
3833 if (!dst)
3834 return AVERROR(ENOMEM);
3835 memcpy(dst, sd->data, sd->size);
3836 if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3837 av_display_rotation_set((uint32_t *)dst, 0);
3838 }
3839 }
3840 }
3841
3842 // copy timebase while removing common factors
3843 if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3844 ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3845
3846 // copy estimated duration as a hint to the muxer
3847 if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
3848 ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3849 } else if (ost->stream_copy) {
3851 if (ret < 0)
3852 return ret;
3853 }
3854
3855 // parse user provided disposition, and update stream values
3856 if (ost->disposition) {
3857 static const AVOption opts[] = {
3858 { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, (double)INT64_MAX, .unit = "flags" },
3859 { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3860 { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3861 { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3862 { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3863 { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3864 { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3865 { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3866 { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3867 { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3868 { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3869 { "attached_pic" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ATTACHED_PIC }, .unit = "flags" },
3870 { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3871 { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3872 { "dependent" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEPENDENT }, .unit = "flags" },
3873 { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3874 { NULL },
3875 };
3876 static const AVClass class = {
3877 .class_name = "",
3878 .item_name = av_default_item_name,
3879 .option = opts,
3880 .version = LIBAVUTIL_VERSION_INT,
3881 };
3882 const AVClass *pclass = &class;
3883
3884 ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3885 if (ret < 0)
3886 return ret;
3887 }
3888
3889 /* initialize bitstream filters for the output stream
3890 * needs to be done here, because the codec id for streamcopy is not
3891 * known until now */
3892 ret = init_output_bsfs(ost);
3893 if (ret < 0)
3894 return ret;
3895
3896 ost->initialized = 1;
3897
3899 if (ret < 0)
3900 return ret;
3901
3902 return ret;
3903}
3904
3905static void report_new_stream(int input_index, AVPacket *pkt)
3906{
3907 InputFile *file = input_files[input_index];
3908 AVStream *st = file->ctx->streams[pkt->stream_index];
3909
3910 if (pkt->stream_index < file->nb_streams_warn)
3911 return;
3912 av_log(file->ctx, AV_LOG_WARNING,
3913 "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3914 av_get_media_type_string(st->codecpar->codec_type),
3915 input_index, pkt->stream_index,
3916 pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3917 file->nb_streams_warn = pkt->stream_index + 1;
3918}
3919
3920static int transcode_init(void)
3921{
3922 int ret = 0, i, j, k;
3923 AVFormatContext *oc;
3926 char error[1024] = {0};
3927
3928 for (i = 0; i < nb_filtergraphs; i++) {
3929 FilterGraph *fg = filtergraphs[i];
3930 for (j = 0; j < fg->nb_outputs; j++) {
3931 OutputFilter *ofilter = fg->outputs[j];
3932 if (!ofilter->ost || ofilter->ost->source_index >= 0)
3933 continue;
3934 if (fg->nb_inputs != 1)
3935 continue;
3936 for (k = nb_input_streams-1; k >= 0 ; k--)
3937 if (fg->inputs[0]->ist == input_streams[k])
3938 break;
3939 ofilter->ost->source_index = k;
3940 }
3941 }
3942
3943 /* init framerate emulation */
3944 for (i = 0; i < nb_input_files; i++) {
3945 InputFile *ifile = input_files[i];
3946 if (ifile->readrate || ifile->rate_emu)
3947 for (j = 0; j < ifile->nb_streams; j++)
3948 input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3949 }
3950
3951 /* init input streams */
3952 for (i = 0; i < nb_input_streams; i++)
3953 if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3954 for (i = 0; i < nb_output_streams; i++) {
3955 ost = output_streams[i];
3956 avcodec_close(ost->enc_ctx);
3957 }
3958 goto dump_format;
3959 }
3960
3961 /*
3962 * initialize stream copy and subtitle/data streams.
3963 * Encoded AVFrame based streams will get initialized as follows:
3964 * - when the first AVFrame is received in do_video_out
3965 * - just before the first AVFrame is received in either transcode_step
3966 * or reap_filters due to us requiring the filter chain buffer sink
3967 * to be configured with the correct audio frame size, which is only
3968 * known after the encoder is initialized.
3969 */
3970 for (i = 0; i < nb_output_streams; i++) {
3971 if (!output_streams[i]->stream_copy &&
3972 (output_streams[i]->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3973 output_streams[i]->enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO))
3974 continue;
3975
3976 ret = init_output_stream_wrapper(output_streams[i], NULL, 0);
3977 if (ret < 0)
3978 goto dump_format;
3979 }
3980
3981 /* discard unused programs */
3982 for (i = 0; i < nb_input_files; i++) {
3983 InputFile *ifile = input_files[i];
3984 for (j = 0; j < ifile->ctx->nb_programs; j++) {
3985 AVProgram *p = ifile->ctx->programs[j];
3986 int discard = AVDISCARD_ALL;
3987
3988 for (k = 0; k < p->nb_stream_indexes; k++)
3989 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3990 discard = AVDISCARD_DEFAULT;
3991 break;
3992 }
3993 p->discard = discard;
3994 }
3995 }
3996
3997 /* write headers for files with no streams */
3998 for (i = 0; i < nb_output_files; i++) {
3999 oc = output_files[i]->ctx;
4000 if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
4002 if (ret < 0)
4003 goto dump_format;
4004 }
4005 }
4006
4007 dump_format:
4008 /* dump the stream mapping */
4009 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
4010 for (i = 0; i < nb_input_streams; i++) {
4011 ist = input_streams[i];
4012
4013 for (j = 0; j < ist->nb_filters; j++) {
4014 if (!filtergraph_is_simple(ist->filters[j]->graph)) {
4015 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
4016 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
4017 ist->filters[j]->name);
4018 if (nb_filtergraphs > 1)
4019 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
4020 av_log(NULL, AV_LOG_INFO, "\n");
4021 }
4022 }
4023 }
4024
4025 for (i = 0; i < nb_output_streams; i++) {
4026 ost = output_streams[i];
4027
4028 if (ost->attachment_filename) {
4029 /* an attached file */
4030 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
4032 continue;
4033 }
4034
4036 /* output from a complex graph */
4037 av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
4038 if (nb_filtergraphs > 1)
4039 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
4040
4041 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
4042 ost->index, ost->enc ? ost->enc->name : "?");
4043 continue;
4044 }
4045
4046 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
4048 input_streams[ost->source_index]->st->index,
4049 ost->file_index,
4050 ost->index);
4052 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
4054 ost->sync_ist->st->index);
4055 if (ost->stream_copy)
4056 av_log(NULL, AV_LOG_INFO, " (copy)");
4057 else {
4058 const AVCodec *in_codec = input_streams[ost->source_index]->dec;
4059 const AVCodec *out_codec = ost->enc;
4060 const char *decoder_name = "?";
4061 const char *in_codec_name = "?";
4062 const char *encoder_name = "?";
4063 const char *out_codec_name = "?";
4064 const AVCodecDescriptor *desc;
4065
4066 if (in_codec) {
4067 decoder_name = in_codec->name;
4068 desc = avcodec_descriptor_get(in_codec->id);
4069 if (desc)
4070 in_codec_name = desc->name;
4071 if (!strcmp(decoder_name, in_codec_name))
4072 decoder_name = "native";
4073 }
4074
4075 if (out_codec) {
4076 encoder_name = out_codec->name;
4077 desc = avcodec_descriptor_get(out_codec->id);
4078 if (desc)
4079 out_codec_name = desc->name;
4080 if (!strcmp(encoder_name, out_codec_name))
4081 encoder_name = "native";
4082 }
4083
4084 av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
4085 in_codec_name, decoder_name,
4086 out_codec_name, encoder_name);
4087 }
4088 av_log(NULL, AV_LOG_INFO, "\n");
4089 }
4090
4091 if (ret) {
4092 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
4093 return ret;
4094 }
4095
4096 atomic_store(&transcode_init_done, 1);
4097
4098 return 0;
4099}
4100
4101/* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
4102static int need_output(void)
4103{
4104 int i;
4105
4106 for (i = 0; i < nb_output_streams; i++) {
4109 AVFormatContext *os = output_files[ost->file_index]->ctx;
4110
4111 if (ost->finished ||
4112 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
4113 continue;
4114 if (ost->frame_number >= ost->max_frames) {
4115 int j;
4116 for (j = 0; j < of->ctx->nb_streams; j++)
4118 continue;
4119 }
4120
4121 return 1;
4122 }
4123
4124 return 0;
4125}
4126
4133{
4134 int i;
4135 int64_t opts_min = INT64_MAX;
4136 OutputStream *ost_min = NULL;
4137
4138 for (i = 0; i < nb_output_streams; i++) {
4140 int64_t opts = ost->last_mux_dts == AV_NOPTS_VALUE ? INT64_MIN :
4141 av_rescale_q(ost->last_mux_dts, ost->st->time_base,
4142 AV_TIME_BASE_Q);
4143 if (ost->last_mux_dts == AV_NOPTS_VALUE)
4144 av_log(NULL, AV_LOG_DEBUG,
4145 "cur_dts is invalid st:%d (%d) [init:%d i_done:%d finish:%d] (this is harmless if it occurs once at the start per stream)\n",
4146 ost->st->index, ost->st->id, ost->initialized, ost->inputs_done, ost->finished);
4147
4148 if (!ost->initialized && !ost->inputs_done)
4149 return ost->unavailable ? NULL : ost;
4150
4151 if (!ost->finished && opts < opts_min) {
4152 opts_min = opts;
4153 ost_min = ost->unavailable ? NULL : ost;
4154 }
4155 }
4156 return ost_min;
4157}
4158
4159static void set_tty_echo(int on)
4160{
4161#if HAVE_TERMIOS_H
4162 struct termios tty;
4163 if (tcgetattr(0, &tty) == 0) {
4164 if (on) tty.c_lflag |= ECHO;
4165 else tty.c_lflag &= ~ECHO;
4166 tcsetattr(0, TCSANOW, &tty);
4167 }
4168#endif
4169}
4170
4171static int check_keyboard_interaction(int64_t cur_time)
4172{
4173 int i, ret, key;
4175 return AVERROR_EXIT;
4176 /* read_key() returns 0 on EOF */
4177 if(cur_time - keyboard_last_time >= 100000 && !run_as_daemon){
4178 key = read_key();
4179 keyboard_last_time = cur_time;
4180 }else
4181 key = -1;
4182 if (key == 'q')
4183 return AVERROR_EXIT;
4184 if (key == '+') av_log_set_level(av_log_get_level()+10);
4185 if (key == '-') av_log_set_level(av_log_get_level()-10);
4186 if (key == 's') qp_hist ^= 1;
4187 if (key == 'h'){
4188 if (do_hex_dump){
4190 } else if(do_pkt_dump){
4191 do_hex_dump = 1;
4192 } else
4193 do_pkt_dump = 1;
4194 av_log_set_level(AV_LOG_DEBUG);
4195 }
4196 if (key == 'c' || key == 'C'){
4197 char buf[4096], target[64], command[256], arg[256] = {0};
4198 double time;
4199 int k, n = 0;
4200 fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
4201 i = 0;
4202 set_tty_echo(1);
4203 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
4204 if (k > 0)
4205 buf[i++] = k;
4206 buf[i] = 0;
4207 set_tty_echo(0);
4208 fprintf(stderr, "\n");
4209 if (k > 0 &&
4210 (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
4211 av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
4212 target, time, command, arg);
4213 for (i = 0; i < nb_filtergraphs; i++) {
4214 FilterGraph *fg = filtergraphs[i];
4215 if (fg->graph) {
4216 if (time < 0) {
4217 ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
4218 key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
4219 fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
4220 } else if (key == 'c') {
4221 fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
4222 ret = AVERROR_PATCHWELCOME;
4223 } else {
4224 ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
4225 if (ret < 0)
4226 fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
4227 }
4228 }
4229 }
4230 } else {
4231 av_log(NULL, AV_LOG_ERROR,
4232 "Parse error, at least 3 arguments were expected, "
4233 "only %d given in string '%s'\n", n, buf);
4234 }
4235 }
4236 if (key == 'd' || key == 'D'){
4237 int debug=0;
4238 if(key == 'D') {
4239 debug = input_streams[0]->dec_ctx->debug << 1;
4240 if(!debug) debug = 1;
4241 while (debug & FF_DEBUG_DCT_COEFF) //unsupported, would just crash
4242 debug += debug;
4243 }else{
4244 char buf[32];
4245 int k = 0;
4246 i = 0;
4247 set_tty_echo(1);
4248 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
4249 if (k > 0)
4250 buf[i++] = k;
4251 buf[i] = 0;
4252 set_tty_echo(0);
4253 fprintf(stderr, "\n");
4254 if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
4255 fprintf(stderr,"error parsing debug value\n");
4256 }
4257 for(i=0;i<nb_input_streams;i++) {
4258 input_streams[i]->dec_ctx->debug = debug;
4259 }
4260 for(i=0;i<nb_output_streams;i++) {
4262 ost->enc_ctx->debug = debug;
4263 }
4264 if(debug) av_log_set_level(AV_LOG_DEBUG);
4265 fprintf(stderr,"debug=%d\n", debug);
4266 }
4267 if (key == '?'){
4268 fprintf(stderr, "key function\n"
4269 "? show this help\n"
4270 "+ increase verbosity\n"
4271 "- decrease verbosity\n"
4272 "c Send command to first matching filter supporting it\n"
4273 "C Send/Queue command to all matching filters\n"
4274 "D cycle through available debug modes\n"
4275 "h dump packets/hex press to cycle through the 3 states\n"
4276 "q quit\n"
4277 "s Show QP histogram\n"
4278 );
4279 }
4280 return 0;
4281}
4282
4283#if HAVE_THREADS
4284static void *input_thread(void *arg)
4285{
4286 InputFile *f = arg;
4287 AVPacket *pkt = f->pkt, *queue_pkt;
4288 unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
4289 int ret = 0;
4290
4291 while (1) {
4292 ret = av_read_frame(f->ctx, pkt);
4293
4294 if (ret == AVERROR(EAGAIN)) {
4295 av_usleep(10000);
4296 continue;
4297 }
4298 if (ret < 0) {
4299 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4300 break;
4301 }
4302 queue_pkt = av_packet_alloc();
4303 if (!queue_pkt) {
4304 av_packet_unref(pkt);
4305 av_thread_message_queue_set_err_recv(f->in_thread_queue, AVERROR(ENOMEM));
4306 break;
4307 }
4308 av_packet_move_ref(queue_pkt, pkt);
4309 ret = av_thread_message_queue_send(f->in_thread_queue, &queue_pkt, flags);
4310 if (flags && ret == AVERROR(EAGAIN)) {
4311 flags = 0;
4312 ret = av_thread_message_queue_send(f->in_thread_queue, &queue_pkt, flags);
4313 av_log(f->ctx, AV_LOG_WARNING,
4314 "Thread message queue blocking; consider raising the "
4315 "thread_queue_size option (current value: %d)\n",
4316 f->thread_queue_size);
4317 }
4318 if (ret < 0) {
4319 if (ret != AVERROR_EOF)
4320 av_log(f->ctx, AV_LOG_ERROR,
4321 "Unable to send packet to main thread: %s\n",
4322 av_err2str(ret));
4323 av_packet_free(&queue_pkt);
4324 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4325 break;
4326 }
4327 }
4328
4329 return NULL;
4330}
4331
4332static void free_input_thread(int i)
4333{
4334 InputFile *f = input_files[i];
4335 AVPacket *pkt;
4336
4337 if (!f || !f->in_thread_queue)
4338 return;
4339 av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
4340 while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
4341 av_packet_free(&pkt);
4342
4343 pthread_join(f->thread, NULL);
4344 f->joined = 1;
4345 av_thread_message_queue_free(&f->in_thread_queue);
4346}
4347
4348static void free_input_threads(void)
4349{
4350 int i;
4351
4352 for (i = 0; i < nb_input_files; i++)
4353 free_input_thread(i);
4354}
4355
4356static int init_input_thread(int i)
4357{
4358 int ret;
4359 InputFile *f = input_files[i];
4360
4361 if (f->thread_queue_size < 0)
4362 f->thread_queue_size = (nb_input_files > 1 ? 8 : 0);
4363 if (!f->thread_queue_size)
4364 return 0;
4365
4366 if (f->ctx->pb ? !f->ctx->pb->seekable :
4367 strcmp(f->ctx->iformat->name, "lavfi"))
4368 f->non_blocking = 1;
4369 ret = av_thread_message_queue_alloc(&f->in_thread_queue,
4370 f->thread_queue_size, sizeof(f->pkt));
4371 if (ret < 0)
4372 return ret;
4373
4374 if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
4375 av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
4376 av_thread_message_queue_free(&f->in_thread_queue);
4377 return AVERROR(ret);
4378 }
4379
4380 return 0;
4381}
4382
4383static int init_input_threads(void)
4384{
4385 int i, ret;
4386
4387 for (i = 0; i < nb_input_files; i++) {
4388 ret = init_input_thread(i);
4389 if (ret < 0)
4390 return ret;
4391 }
4392 return 0;
4393}
4394
4395static int get_input_packet_mt(InputFile *f, AVPacket **pkt)
4396{
4397 return av_thread_message_queue_recv(f->in_thread_queue, pkt,
4398 f->non_blocking ?
4399 AV_THREAD_MESSAGE_NONBLOCK : 0);
4400}
4401#endif
4402
4403static int get_input_packet(InputFile *f, AVPacket **pkt)
4404{
4405 if (f->readrate || f->rate_emu) {
4406 int i;
4407 int64_t file_start = copy_ts * (
4408 (f->ctx->start_time != AV_NOPTS_VALUE ? f->ctx->start_time * !start_at_zero : 0) +
4409 (f->start_time != AV_NOPTS_VALUE ? f->start_time : 0)
4410 );
4411 float scale = f->rate_emu ? 1.0 : f->readrate;
4412 for (i = 0; i < f->nb_streams; i++) {
4414 int64_t stream_ts_offset, pts, now;
4415 if (!ist->nb_packets || (ist->decoding_needed && !ist->got_output)) continue;
4416 stream_ts_offset = FFMAX(ist->first_dts != AV_NOPTS_VALUE ? ist->first_dts : 0, file_start);
4417 pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
4418 now = (av_gettime_relative() - ist->start) * scale + stream_ts_offset;
4419 if (pts > now)
4420 return AVERROR(EAGAIN);
4421 }
4422 }
4423
4424#if HAVE_THREADS
4425 if (f->thread_queue_size)
4426 return get_input_packet_mt(f, pkt);
4427#endif
4428 *pkt = f->pkt;
4429 return av_read_frame(f->ctx, *pkt);
4430}
4431
4432static int got_eagain(void)
4433{
4434 int i;
4435 for (i = 0; i < nb_output_streams; i++)
4436 if (output_streams[i]->unavailable)
4437 return 1;
4438 return 0;
4439}
4440
4441static void reset_eagain(void)
4442{
4443 int i;
4444 for (i = 0; i < nb_input_files; i++)
4445 input_files[i]->eagain = 0;
4446 for (i = 0; i < nb_output_streams; i++)
4447 output_streams[i]->unavailable = 0;
4448}
4449
4450// set duration to max(tmp, duration) in a proper time base and return duration's time_base
4451static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
4452 AVRational time_base)
4453{
4454 int ret;
4455
4456 if (!*duration) {
4457 *duration = tmp;
4458 return tmp_time_base;
4459 }
4460
4461 ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
4462 if (ret < 0) {
4463 *duration = tmp;
4464 return tmp_time_base;
4465 }
4466
4467 return time_base;
4468}
4469
4470static int seek_to_start(InputFile *ifile, AVFormatContext *is)
4471{
4473 AVCodecContext *avctx;
4474 int i, ret, has_audio = 0;
4475 int64_t duration = 0;
4476
4477 ret = avformat_seek_file(is, -1, INT64_MIN, is->start_time, is->start_time, 0);
4478 if (ret < 0)
4479 return ret;
4480
4481 for (i = 0; i < ifile->nb_streams; i++) {
4482 ist = input_streams[ifile->ist_index + i];
4483 avctx = ist->dec_ctx;
4484
4485 /* duration is the length of the last frame in a stream
4486 * when audio stream is present we don't care about
4487 * last video frame length because it's not defined exactly */
4488 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
4489 has_audio = 1;
4490 }
4491
4492 for (i = 0; i < ifile->nb_streams; i++) {
4493 ist = input_streams[ifile->ist_index + i];
4494 avctx = ist->dec_ctx;
4495
4496 if (has_audio) {
4497 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
4498 AVRational sample_rate = {1, avctx->sample_rate};
4499
4500 duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
4501 } else {
4502 continue;
4503 }
4504 } else {
4505 if (ist->framerate.num) {
4506 duration = av_rescale_q(1, av_inv_q(ist->framerate), ist->st->time_base);
4507 } else if (ist->st->avg_frame_rate.num) {
4508 duration = av_rescale_q(1, av_inv_q(ist->st->avg_frame_rate), ist->st->time_base);
4509 } else {
4510 duration = 1;
4511 }
4512 }
4513 if (!ifile->duration)
4514 ifile->time_base = ist->st->time_base;
4515 /* the total duration of the stream, max_pts - min_pts is
4516 * the duration of the stream without the last frame */
4517 if (ist->max_pts > ist->min_pts && ist->max_pts - (uint64_t)ist->min_pts < INT64_MAX - duration)
4518 duration += ist->max_pts - ist->min_pts;
4519 ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
4520 ifile->time_base);
4521 }
4522
4523 if (ifile->loop > 0)
4524 ifile->loop--;
4525
4526 return ret;
4527}
4528
4529/*
4530 * Return
4531 * - 0 -- one packet was read and processed
4532 * - AVERROR(EAGAIN) -- no packets were available for selected file,
4533 * this function should be called again
4534 * - AVERROR_EOF -- this function should not be called again
4535 */
4536static int process_input(int file_index)
4537{
4538 InputFile *ifile = input_files[file_index];
4539 AVFormatContext *is;
4541 AVPacket *pkt;
4542 int ret, thread_ret, i, j;
4543 int64_t duration;
4544 int64_t pkt_dts;
4545 int disable_discontinuity_correction = copy_ts;
4546
4547 is = ifile->ctx;
4548 ret = get_input_packet(ifile, &pkt);
4549
4550 if (ret == AVERROR(EAGAIN)) {
4551 ifile->eagain = 1;
4552 return ret;
4553 }
4554 if (ret < 0 && ifile->loop) {
4555 AVCodecContext *avctx;
4556 for (i = 0; i < ifile->nb_streams; i++) {
4557 ist = input_streams[ifile->ist_index + i];
4558 avctx = ist->dec_ctx;
4559 if (ist->decoding_needed) {
4560 ret = process_input_packet(ist, NULL, 1);
4561 if (ret>0)
4562 return 0;
4563 avcodec_flush_buffers(avctx);
4564 }
4565 }
4566#if HAVE_THREADS
4567 free_input_thread(file_index);
4568#endif
4569 ret = seek_to_start(ifile, is);
4570#if HAVE_THREADS
4571 thread_ret = init_input_thread(file_index);
4572 if (thread_ret < 0)
4573 return thread_ret;
4574#endif
4575 if (ret < 0)
4576 av_log(NULL, AV_LOG_WARNING, "Seek to start failed.\n");
4577 else
4578 ret = get_input_packet(ifile, &pkt);
4579 if (ret == AVERROR(EAGAIN)) {
4580 ifile->eagain = 1;
4581 return ret;
4582 }
4583 }
4584 if (ret < 0) {
4585 if (ret != AVERROR_EOF) {
4586 print_error(is->url, ret);
4587 if (exit_on_error)
4588 exit_program(1);
4589 }
4590
4591 for (i = 0; i < ifile->nb_streams; i++) {
4592 ist = input_streams[ifile->ist_index + i];
4593 if (ist->decoding_needed) {
4594 ret = process_input_packet(ist, NULL, 0);
4595 if (ret>0)
4596 return 0;
4597 }
4598
4599 /* mark all outputs that don't go through lavfi as finished */
4600 for (j = 0; j < nb_output_streams; j++) {
4602
4603 if (ost->source_index == ifile->ist_index + i &&
4604 (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
4606 }
4607 }
4608
4609 ifile->eof_reached = 1;
4610 return AVERROR(EAGAIN);
4611 }
4612
4613 reset_eagain();
4614
4615 if (do_pkt_dump) {
4616 av_pkt_dump_log2(NULL, AV_LOG_INFO, pkt, do_hex_dump,
4617 is->streams[pkt->stream_index]);
4618 }
4619 /* the following test is needed in case new streams appear
4620 dynamically in stream : we ignore them */
4621 if (pkt->stream_index >= ifile->nb_streams) {
4622 report_new_stream(file_index, pkt);
4623 goto discard_packet;
4624 }
4625
4626 ist = input_streams[ifile->ist_index + pkt->stream_index];
4627
4628 ist->data_size += pkt->size;
4629 ist->nb_packets++;
4630
4631 if (ist->discard)
4632 goto discard_packet;
4633
4634 if (pkt->flags & AV_PKT_FLAG_CORRUPT) {
4635 av_log(NULL, exit_on_error ? AV_LOG_FATAL : AV_LOG_WARNING,
4636 "%s: corrupt input packet in stream %d\n", is->url, pkt->stream_index);
4637 if (exit_on_error)
4638 exit_program(1);
4639 }
4640
4641 if (debug_ts) {
4642 av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4643 "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4644 ifile->ist_index + pkt->stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4645 av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
4646 av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
4647 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ist->st->time_base),
4648 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ist->st->time_base),
4649 av_ts2str(input_files[ist->file_index]->ts_offset),
4650 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4651 }
4652
4653 if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4654 int64_t stime, stime2;
4655 // Correcting starttime based on the enabled streams
4656 // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4657 // so we instead do it here as part of discontinuity handling
4658 if ( ist->next_dts == AV_NOPTS_VALUE
4659 && ifile->ts_offset == -is->start_time
4660 && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4661 int64_t new_start_time = INT64_MAX;
4662 for (i=0; i<is->nb_streams; i++) {
4663 AVStream *st = is->streams[i];
4664 if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4665 continue;
4666 new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4667 }
4668 if (new_start_time > is->start_time) {
4669 av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4670 ifile->ts_offset = -new_start_time;
4671 }
4672 }
4673
4674 stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4675 stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4676 ist->wrap_correction_done = 1;
4677
4678 if(stime2 > stime && pkt->dts != AV_NOPTS_VALUE && pkt->dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4679 pkt->dts -= 1ULL<<ist->st->pts_wrap_bits;
4680 ist->wrap_correction_done = 0;
4681 }
4682 if(stime2 > stime && pkt->pts != AV_NOPTS_VALUE && pkt->pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4683 pkt->pts -= 1ULL<<ist->st->pts_wrap_bits;
4684 ist->wrap_correction_done = 0;
4685 }
4686 }
4687
4688 /* add the stream-global side data to the first packet */
4689 if (ist->nb_packets == 1) {
4690 for (i = 0; i < ist->st->nb_side_data; i++) {
4691 AVPacketSideData *src_sd = &ist->st->side_data[i];
4692 uint8_t *dst_data;
4693
4694 if (src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4695 continue;
4696
4697 if (av_packet_get_side_data(pkt, src_sd->type, NULL))
4698 continue;
4699
4700 dst_data = av_packet_new_side_data(pkt, src_sd->type, src_sd->size);
4701 if (!dst_data)
4702 exit_program(1);
4703
4704 memcpy(dst_data, src_sd->data, src_sd->size);
4705 }
4706 }
4707
4708 if (pkt->dts != AV_NOPTS_VALUE)
4709 pkt->dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4710 if (pkt->pts != AV_NOPTS_VALUE)
4711 pkt->pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4712
4713 if (pkt->pts != AV_NOPTS_VALUE)
4714 pkt->pts *= ist->ts_scale;
4715 if (pkt->dts != AV_NOPTS_VALUE)
4716 pkt->dts *= ist->ts_scale;
4717
4718 pkt_dts = av_rescale_q_rnd(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4719 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4720 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4721 pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4722 && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4723 int64_t delta = pkt_dts - ifile->last_ts;
4724 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4725 delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
4726 ifile->ts_offset -= delta;
4727 av_log(NULL, AV_LOG_DEBUG,
4728 "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4729 delta, ifile->ts_offset);
4730 pkt->dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4731 if (pkt->pts != AV_NOPTS_VALUE)
4732 pkt->pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4733 }
4734 }
4735
4736 duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4737 if (pkt->pts != AV_NOPTS_VALUE) {
4738 pkt->pts += duration;
4739 ist->max_pts = FFMAX(pkt->pts, ist->max_pts);
4740 ist->min_pts = FFMIN(pkt->pts, ist->min_pts);
4741 }
4742
4743 if (pkt->dts != AV_NOPTS_VALUE)
4744 pkt->dts += duration;
4745
4746 pkt_dts = av_rescale_q_rnd(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4747
4748 if (copy_ts && pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4749 (is->iformat->flags & AVFMT_TS_DISCONT) && ist->st->pts_wrap_bits < 60) {
4750 int64_t wrap_dts = av_rescale_q_rnd(pkt->dts + (1LL<<ist->st->pts_wrap_bits),
4751 ist->st->time_base, AV_TIME_BASE_Q,
4752 AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4753 if (FFABS(wrap_dts - ist->next_dts) < FFABS(pkt_dts - ist->next_dts)/10)
4754 disable_discontinuity_correction = 0;
4755 }
4756
4757 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4758 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4759 pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4760 !disable_discontinuity_correction) {
4761 int64_t delta = pkt_dts - ist->next_dts;
4762 if (is->iformat->flags & AVFMT_TS_DISCONT) {
4763 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4764 delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
4765 pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4766 ifile->ts_offset -= delta;
4767 av_log(NULL, AV_LOG_DEBUG,
4768 "timestamp discontinuity for stream #%d:%d "
4769 "(id=%d, type=%s): %"PRId64", new offset= %"PRId64"\n",
4770 ist->file_index, ist->st->index, ist->st->id,
4771 av_get_media_type_string(ist->dec_ctx->codec_type),
4772 delta, ifile->ts_offset);
4773 pkt->dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4774 if (pkt->pts != AV_NOPTS_VALUE)
4775 pkt->pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4776 }
4777 } else {
4778 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4779 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4780 av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt->dts, ist->next_dts, pkt->stream_index);
4781 pkt->dts = AV_NOPTS_VALUE;
4782 }
4783 if (pkt->pts != AV_NOPTS_VALUE){
4784 int64_t pkt_pts = av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
4785 delta = pkt_pts - ist->next_dts;
4786 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4787 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4788 av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt->pts, ist->next_dts, pkt->stream_index);
4789 pkt->pts = AV_NOPTS_VALUE;
4790 }
4791 }
4792 }
4793 }
4794
4795 if (pkt->dts != AV_NOPTS_VALUE)
4796 ifile->last_ts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
4797
4798 if (debug_ts) {
4799 av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4800 ifile->ist_index + pkt->stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4801 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ist->st->time_base),
4802 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ist->st->time_base),
4803 av_ts2str(input_files[ist->file_index]->ts_offset),
4804 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4805 }
4806
4807 sub2video_heartbeat(ist, pkt->pts);
4808
4809 process_input_packet(ist, pkt, 0);
4810
4811discard_packet:
4812#if HAVE_THREADS
4813 if (ifile->thread_queue_size)
4814 av_packet_free(&pkt);
4815 else
4816#endif
4817 av_packet_unref(pkt);
4818
4819 return 0;
4820}
4821
4830{
4831 int i, ret;
4832 int nb_requests, nb_requests_max = 0;
4833 InputFilter *ifilter;
4835
4836 *best_ist = NULL;
4837 ret = avfilter_graph_request_oldest(graph->graph);
4838 if (ret >= 0)
4839 return reap_filters(0);
4840
4841 if (ret == AVERROR_EOF) {
4842 ret = reap_filters(1);
4843 for (i = 0; i < graph->nb_outputs; i++)
4844 close_output_stream(graph->outputs[i]->ost);
4845 return ret;
4846 }
4847 if (ret != AVERROR(EAGAIN))
4848 return ret;
4849
4850 for (i = 0; i < graph->nb_inputs; i++) {
4851 ifilter = graph->inputs[i];
4852 ist = ifilter->ist;
4853 if (input_files[ist->file_index]->eagain ||
4854 input_files[ist->file_index]->eof_reached)
4855 continue;
4856 nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4857 if (nb_requests > nb_requests_max) {
4858 nb_requests_max = nb_requests;
4859 *best_ist = ist;
4860 }
4861 }
4862
4863 if (!*best_ist)
4864 for (i = 0; i < graph->nb_outputs; i++)
4865 graph->outputs[i]->ost->unavailable = 1;
4866
4867 return 0;
4868}
4869
4875static int transcode_step(void)
4876{
4878 InputStream *ist = NULL;
4879 int ret;
4880
4881 ost = choose_output();
4882 if (!ost) {
4883 if (got_eagain()) {
4884 reset_eagain();
4885 av_usleep(10000);
4886 return 0;
4887 }
4888 av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4889 return AVERROR_EOF;
4890 }
4891
4892 if (ost->filter && !ost->filter->graph->graph) {
4895 if (ret < 0) {
4896 av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
4897 return ret;
4898 }
4899 }
4900 }
4901
4902 if (ost->filter && ost->filter->graph->graph) {
4903 /*
4904 * Similar case to the early audio initialization in reap_filters.
4905 * Audio is special in ffmpeg.c currently as we depend on lavfi's
4906 * audio frame buffering/creation to get the output audio frame size
4907 * in samples correct. The audio frame size for the filter chain is
4908 * configured during the output stream initialization.
4909 *
4910 * Apparently avfilter_graph_request_oldest (called in
4911 * transcode_from_filter just down the line) peeks. Peeking already
4912 * puts one frame "ready to be given out", which means that any
4913 * update in filter buffer sink configuration afterwards will not
4914 * help us. And yes, even if it would be utilized,
4915 * av_buffersink_get_samples is affected, as it internally utilizes
4916 * the same early exit for peeked frames.
4917 *
4918 * In other words, if avfilter_graph_request_oldest would not make
4919 * further filter chain configuration or usage of
4920 * av_buffersink_get_samples useless (by just causing the return
4921 * of the peeked AVFrame as-is), we could get rid of this additional
4922 * early encoder initialization.
4923 */
4924 if (av_buffersink_get_type(ost->filter->filter) == AVMEDIA_TYPE_AUDIO)
4926
4927 if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4928 return ret;
4929 if (!ist)
4930 return 0;
4931 } else if (ost->filter) {
4932 int i;
4933 for (i = 0; i < ost->filter->graph->nb_inputs; i++) {
4934 InputFilter *ifilter = ost->filter->graph->inputs[i];
4935 if (!ifilter->ist->got_output && !input_files[ifilter->ist->file_index]->eof_reached) {
4936 ist = ifilter->ist;
4937 break;
4938 }
4939 }
4940 if (!ist) {
4941 ost->inputs_done = 1;
4942 return 0;
4943 }
4944 } else {
4945 av_assert0(ost->source_index >= 0);
4947 }
4948
4949 ret = process_input(ist->file_index);
4950 if (ret == AVERROR(EAGAIN)) {
4951 if (input_files[ist->file_index]->eagain)
4952 ost->unavailable = 1;
4953 return 0;
4954 }
4955
4956 if (ret < 0)
4957 return ret == AVERROR_EOF ? 0 : ret;
4958
4959 return reap_filters(0);
4960}
4961
4962/*
4963 * The following code is the main loop of the file converter
4964 */
4965static int transcode(void)
4966{
4967 int ret, i;
4968 AVFormatContext *os;
4971 int64_t timer_start;
4972 int64_t total_packets_written = 0;
4973
4974 ret = transcode_init();
4975 if (ret < 0)
4976 goto fail;
4977
4978 if (stdin_interaction) {
4979 av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4980 }
4981
4982 timer_start = av_gettime_relative();
4983
4984#if HAVE_THREADS
4985 if ((ret = init_input_threads()) < 0)
4986 goto fail;
4987#endif
4988
4990 int64_t cur_time= av_gettime_relative();
4991
4992 /* if 'q' pressed, exits */
4994 if (check_keyboard_interaction(cur_time) < 0)
4995 break;
4996
4997 /* check if there's any stream where output is still needed */
4998 if (!need_output()) {
4999 av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
5000 break;
5001 }
5002
5003 ret = transcode_step();
5004 if (ret < 0 && ret != AVERROR_EOF) {
5005 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
5006 break;
5007 }
5008
5009 /* dump report by using the output first video and audio streams */
5010 print_report(0, timer_start, cur_time);
5011 }
5012#if HAVE_THREADS
5013 free_input_threads();
5014#endif
5015
5016 /* at the end of stream, we must flush the decoder buffers */
5017 for (i = 0; i < nb_input_streams; i++) {
5018 ist = input_streams[i];
5019 if (!input_files[ist->file_index]->eof_reached) {
5020 process_input_packet(ist, NULL, 0);
5021 }
5022 }
5024
5025 term_exit();
5026
5027 /* write the trailer if needed and close file */
5028 for (i = 0; i < nb_output_files; i++) {
5029 os = output_files[i]->ctx;
5030 if (!output_files[i]->header_written) {
5031 av_log(NULL, AV_LOG_ERROR,
5032 "Nothing was written into output file %d (%s), because "
5033 "at least one of its streams received no packets.\n",
5034 i, os->url);
5035 continue;
5036 }
5037 if ((ret = av_write_trailer(os)) < 0) {
5038 av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s\n", os->url, av_err2str(ret));
5039 if (exit_on_error)
5040 exit_program(1);
5041 }
5042 }
5043
5044 /* dump report by using the first video and audio streams */
5045 print_report(1, timer_start, av_gettime_relative());
5046
5047 /* close each encoder */
5048 for (i = 0; i < nb_output_streams; i++) {
5049 ost = output_streams[i];
5050 if (ost->encoding_needed) {
5051 av_freep(&ost->enc_ctx->stats_in);
5052 }
5053 total_packets_written += ost->packets_written;
5055 av_log(NULL, AV_LOG_FATAL, "Empty output on stream %d.\n", i);
5056 exit_program(1);
5057 }
5058 }
5059
5060 if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
5061 av_log(NULL, AV_LOG_FATAL, "Empty output\n");
5062 exit_program(1);
5063 }
5064
5065 /* close each decoder */
5066 for (i = 0; i < nb_input_streams; i++) {
5067 ist = input_streams[i];
5068 if (ist->decoding_needed) {
5069 avcodec_close(ist->dec_ctx);
5070 if (ist->hwaccel_uninit)
5071 ist->hwaccel_uninit(ist->dec_ctx);
5072 }
5073 }
5074
5076
5077 /* finished ! */
5078 ret = 0;
5079
5080 fail:
5081#if HAVE_THREADS
5082 free_input_threads();
5083#endif
5084
5085 if (output_streams) {
5086 for (i = 0; i < nb_output_streams; i++) {
5087 ost = output_streams[i];
5088 if (ost) {
5089 if (ost->logfile) {
5090 if (fclose(ost->logfile))
5091 av_log(NULL, AV_LOG_ERROR,
5092 "Error closing logfile, loss of information possible: %s\n",
5093 av_err2str(AVERROR(errno)));
5094 ost->logfile = NULL;
5095 }
5096 av_freep(&ost->forced_kf_pts);
5097 av_freep(&ost->apad);
5098 av_freep(&ost->disposition);
5099 av_dict_free(&ost->encoder_opts);
5100 av_dict_free(&ost->sws_dict);
5101 av_dict_free(&ost->swr_opts);
5102 av_dict_free(&ost->resample_opts);
5103 }
5104 }
5105 }
5106 return ret;
5107}
5108
5110{
5111 BenchmarkTimeStamps time_stamps = { av_gettime_relative() };
5112#if HAVE_GETRUSAGE
5113 struct rusage rusage;
5114
5115 getrusage(RUSAGE_SELF, &rusage);
5116 time_stamps.user_usec =
5117 (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
5118 time_stamps.sys_usec =
5119 (rusage.ru_stime.tv_sec * 1000000LL) + rusage.ru_stime.tv_usec;
5120#elif HAVE_GETPROCESSTIMES
5121 HANDLE proc;
5122 FILETIME c, e, k, u;
5123 proc = GetCurrentProcess();
5124 GetProcessTimes(proc, &c, &e, &k, &u);
5125 time_stamps.user_usec =
5126 ((int64_t)u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
5127 time_stamps.sys_usec =
5128 ((int64_t)k.dwHighDateTime << 32 | k.dwLowDateTime) / 10;
5129#else
5130 time_stamps.user_usec = time_stamps.sys_usec = 0;
5131#endif
5132 return time_stamps;
5133}
5134
5135static int64_t getmaxrss(void)
5136{
5137#if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
5138 struct rusage rusage;
5139 getrusage(RUSAGE_SELF, &rusage);
5140 return (int64_t)rusage.ru_maxrss * 1024;
5141#elif HAVE_GETPROCESSMEMORYINFO
5142 HANDLE proc;
5143 PROCESS_MEMORY_COUNTERS memcounters;
5144 proc = GetCurrentProcess();
5145 memcounters.cb = sizeof(memcounters);
5146 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
5147 return memcounters.PeakPagefileUsage;
5148#else
5149 return 0;
5150#endif
5151}
5152
5153static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
5154{
5155}
5156
5159 longjmp_value = 0;
5160 received_sigterm = 0;
5162 ffmpeg_exited = 0;
5163 copy_ts_first_pts = AV_NOPTS_VALUE;
5164
5165 run_as_daemon = 0;
5166 nb_frames_dup = 0;
5167 dup_warning = 1000;
5168 nb_frames_drop = 0;
5169 nb_output_dumped = 0;
5170
5171 want_sdp = 1;
5172
5173 progress_avio = NULL;
5174
5175 input_streams = NULL;
5176 nb_input_streams = 0;
5177 input_files = NULL;
5178 nb_input_files = 0;
5179
5180 output_streams = NULL;
5182 output_files = NULL;
5183 nb_output_files = 0;
5184
5185 filtergraphs = NULL;
5186 nb_filtergraphs = 0;
5187
5188 last_time = -1;
5190 first_report = 1;
5191}
5192
5193void set_report_callback(void (*callback)(int, float, float, int64_t, int, double, double))
5194{
5195 report_callback = callback;
5196}
5197
5198void cancel_operation(long id)
5199{
5200 if (id == 0) {
5201 sigterm_handler(SIGINT);
5202 } else {
5203 cancelSession(id);
5204 }
5205}
5206
5207__thread OptionDef *ffmpeg_options = NULL;
5208
5209int ffmpeg_execute(int argc, char **argv)
5210{
5211 char _program_name[] = "ffmpeg";
5212 program_name = (char*)&_program_name;
5213 program_birth_year = 2000;
5214
5215 #define OFFSET(x) offsetof(OptionsContext, x)
5216 OptionDef options[] = {
5217
5218 /* main options */
5219 { "L", OPT_EXIT, { .func_arg = show_license }, "show license" },
5220 { "h", OPT_EXIT, { .func_arg = show_help }, "show help", "topic" },
5221 { "?", OPT_EXIT, { .func_arg = show_help }, "show help", "topic" },
5222 { "help", OPT_EXIT, { .func_arg = show_help }, "show help", "topic" },
5223 { "-help", OPT_EXIT, { .func_arg = show_help }, "show help", "topic" },
5224 { "version", OPT_EXIT, { .func_arg = show_version }, "show version" },
5225 { "buildconf", OPT_EXIT, { .func_arg = show_buildconf }, "show build configuration" },
5226 { "formats", OPT_EXIT, { .func_arg = show_formats }, "show available formats" },
5227 { "muxers", OPT_EXIT, { .func_arg = show_muxers }, "show available muxers" },
5228 { "demuxers", OPT_EXIT, { .func_arg = show_demuxers }, "show available demuxers" },
5229 { "devices", OPT_EXIT, { .func_arg = show_devices }, "show available devices" },
5230 { "codecs", OPT_EXIT, { .func_arg = show_codecs }, "show available codecs" },
5231 { "decoders", OPT_EXIT, { .func_arg = show_decoders }, "show available decoders" },
5232 { "encoders", OPT_EXIT, { .func_arg = show_encoders }, "show available encoders" },
5233 { "bsfs", OPT_EXIT, { .func_arg = show_bsfs }, "show available bit stream filters" },
5234 { "protocols", OPT_EXIT, { .func_arg = show_protocols }, "show available protocols" },
5235 { "filters", OPT_EXIT, { .func_arg = show_filters }, "show available filters" },
5236 { "pix_fmts", OPT_EXIT, { .func_arg = show_pix_fmts }, "show available pixel formats" },
5237 { "layouts", OPT_EXIT, { .func_arg = show_layouts }, "show standard channel layouts" },
5238 { "sample_fmts", OPT_EXIT, { .func_arg = show_sample_fmts }, "show available audio sample formats" },
5239 { "colors", OPT_EXIT, { .func_arg = show_colors }, "show available color names" },
5240 { "loglevel", HAS_ARG, { .func_arg = opt_loglevel }, "set logging level", "loglevel" },
5241 { "v", HAS_ARG, { .func_arg = opt_loglevel }, "set logging level", "loglevel" },
5242 { "report", 0, { .func_arg = opt_report }, "generate a report" },
5243 { "max_alloc", HAS_ARG, { .func_arg = opt_max_alloc }, "set maximum size of a single allocated block", "bytes" },
5244 { "cpuflags", HAS_ARG | OPT_EXPERT, { .func_arg = opt_cpuflags }, "force specific cpu flags", "flags" },
5245 { "cpucount", HAS_ARG | OPT_EXPERT, { .func_arg = opt_cpucount }, "force specific cpu count", "count" },
5246 { "hide_banner", OPT_BOOL | OPT_EXPERT, {&hide_banner}, "do not show program banner", "hide_banner" },
5247
5248 #if CONFIG_AVDEVICE
5249 { "sources" , OPT_EXIT | HAS_ARG, { .func_arg = show_sources },
5250 "list sources of the input device", "device" },
5251 { "sinks" , OPT_EXIT | HAS_ARG, { .func_arg = show_sinks },
5252 "list sinks of the output device", "device" },
5253 #endif
5254
5255 { "f", HAS_ARG | OPT_STRING | OPT_OFFSET |
5256 OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(format) },
5257 "force format", "fmt" },
5258 { "y", OPT_BOOL, { &file_overwrite },
5259 "overwrite output files" },
5260 { "n", OPT_BOOL, { &no_file_overwrite },
5261 "never overwrite output files" },
5262 { "ignore_unknown", OPT_BOOL, { &ignore_unknown_streams },
5263 "Ignore unknown stream types" },
5264 { "copy_unknown", OPT_BOOL | OPT_EXPERT, { &copy_unknown_streams },
5265 "Copy unknown stream types" },
5266 { "c", HAS_ARG | OPT_STRING | OPT_SPEC |
5267 OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(codec_names) },
5268 "codec name", "codec" },
5269 { "codec", HAS_ARG | OPT_STRING | OPT_SPEC |
5270 OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(codec_names) },
5271 "codec name", "codec" },
5272 { "pre", HAS_ARG | OPT_STRING | OPT_SPEC |
5273 OPT_OUTPUT, { .off = OFFSET(presets) },
5274 "preset name", "preset" },
5275 { "map", HAS_ARG | OPT_EXPERT | OPT_PERFILE |
5276 OPT_OUTPUT, { .func_arg = opt_map },
5277 "set input stream mapping",
5278 "[-]input_file_id[:stream_specifier][,sync_file_id[:stream_specifier]]" },
5279 { "map_channel", HAS_ARG | OPT_EXPERT | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_map_channel },
5280 "map an audio channel from one stream to another", "file.stream.channel[:syncfile.syncstream]" },
5281 { "map_metadata", HAS_ARG | OPT_STRING | OPT_SPEC |
5282 OPT_OUTPUT, { .off = OFFSET(metadata_map) },
5283 "set metadata information of outfile from infile",
5284 "outfile[,metadata]:infile[,metadata]" },
5285 { "map_chapters", HAS_ARG | OPT_INT | OPT_EXPERT | OPT_OFFSET |
5286 OPT_OUTPUT, { .off = OFFSET(chapters_input_file) },
5287 "set chapters mapping", "input_file_index" },
5288 { "t", HAS_ARG | OPT_TIME | OPT_OFFSET |
5289 OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(recording_time) },
5290 "record or transcode \"duration\" seconds of audio/video",
5291 "duration" },
5292 { "to", HAS_ARG | OPT_TIME | OPT_OFFSET | OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(stop_time) },
5293 "record or transcode stop time", "time_stop" },
5294 { "fs", HAS_ARG | OPT_INT64 | OPT_OFFSET | OPT_OUTPUT, { .off = OFFSET(limit_filesize) },
5295 "set the limit file size in bytes", "limit_size" },
5296 { "ss", HAS_ARG | OPT_TIME | OPT_OFFSET |
5297 OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(start_time) },
5298 "set the start time offset", "time_off" },
5299 { "sseof", HAS_ARG | OPT_TIME | OPT_OFFSET |
5300 OPT_INPUT, { .off = OFFSET(start_time_eof) },
5301 "set the start time offset relative to EOF", "time_off" },
5302 { "seek_timestamp", HAS_ARG | OPT_INT | OPT_OFFSET |
5303 OPT_INPUT, { .off = OFFSET(seek_timestamp) },
5304 "enable/disable seeking by timestamp with -ss" },
5305 { "accurate_seek", OPT_BOOL | OPT_OFFSET | OPT_EXPERT |
5306 OPT_INPUT, { .off = OFFSET(accurate_seek) },
5307 "enable/disable accurate seeking with -ss" },
5308 { "itsoffset", HAS_ARG | OPT_TIME | OPT_OFFSET |
5309 OPT_EXPERT | OPT_INPUT, { .off = OFFSET(input_ts_offset) },
5310 "set the input ts offset", "time_off" },
5311 { "itsscale", HAS_ARG | OPT_DOUBLE | OPT_SPEC |
5312 OPT_EXPERT | OPT_INPUT, { .off = OFFSET(ts_scale) },
5313 "set the input ts scale", "scale" },
5314 { "timestamp", HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_recording_timestamp },
5315 "set the recording timestamp ('now' to set the current time)", "time" },
5316 { "metadata", HAS_ARG | OPT_STRING | OPT_SPEC | OPT_OUTPUT, { .off = OFFSET(metadata) },
5317 "add metadata", "string=string" },
5318 { "program", HAS_ARG | OPT_STRING | OPT_SPEC | OPT_OUTPUT, { .off = OFFSET(program) },
5319 "add program with specified streams", "title=string:st=number..." },
5320 { "dframes", HAS_ARG | OPT_PERFILE | OPT_EXPERT |
5321 OPT_OUTPUT, { .func_arg = opt_data_frames },
5322 "set the number of data frames to output", "number" },
5323 { "benchmark", OPT_BOOL | OPT_EXPERT, { &do_benchmark },
5324 "add timings for benchmarking" },
5325 { "benchmark_all", OPT_BOOL | OPT_EXPERT, { &do_benchmark_all },
5326 "add timings for each task" },
5327 { "progress", HAS_ARG | OPT_EXPERT, { .func_arg = opt_progress },
5328 "write program-readable progress information", "url" },
5329 { "stdin", OPT_BOOL | OPT_EXPERT, { &stdin_interaction },
5330 "enable or disable interaction on standard input" },
5331 { "timelimit", HAS_ARG | OPT_EXPERT, { .func_arg = opt_timelimit },
5332 "set max runtime in seconds in CPU user time", "limit" },
5333 { "dump", OPT_BOOL | OPT_EXPERT, { &do_pkt_dump },
5334 "dump each input packet" },
5335 { "hex", OPT_BOOL | OPT_EXPERT, { &do_hex_dump },
5336 "when dumping packets, also dump the payload" },
5337 { "re", OPT_BOOL | OPT_EXPERT | OPT_OFFSET |
5338 OPT_INPUT, { .off = OFFSET(rate_emu) },
5339 "read input at native frame rate", "" },
5340 { "target", HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_target },
5341 "specify target file type (\"vcd\", \"svcd\", \"dvd\", \"dv\" or \"dv50\" "
5342 "with optional prefixes \"pal-\", \"ntsc-\" or \"film-\")", "type" },
5343 { "vsync", HAS_ARG | OPT_EXPERT, { .func_arg = opt_vsync },
5344 "video sync method", "" },
5345 { "frame_drop_threshold", HAS_ARG | OPT_FLOAT | OPT_EXPERT, { &frame_drop_threshold },
5346 "frame drop threshold", "" },
5347 { "async", HAS_ARG | OPT_INT | OPT_EXPERT, { &audio_sync_method },
5348 "audio sync method", "" },
5349 { "adrift_threshold", HAS_ARG | OPT_FLOAT | OPT_EXPERT, { &audio_drift_threshold },
5350 "audio drift threshold", "threshold" },
5351 { "copyts", OPT_BOOL | OPT_EXPERT, { &copy_ts },
5352 "copy timestamps" },
5353 { "start_at_zero", OPT_BOOL | OPT_EXPERT, { &start_at_zero },
5354 "shift input timestamps to start at 0 when using copyts" },
5355 { "copytb", HAS_ARG | OPT_INT | OPT_EXPERT, { &copy_tb },
5356 "copy input stream time base when stream copying", "mode" },
5357 { "start_at_zero", OPT_BOOL | OPT_EXPERT, { &start_at_zero },
5358 "shift input timestamps to start at 0 when using copyts" },
5359 { "copytb", HAS_ARG | OPT_INT | OPT_EXPERT, { &copy_tb },
5360 "copy input stream time base when stream copying", "mode" },
5361 { "shortest", OPT_BOOL | OPT_EXPERT | OPT_OFFSET |
5362 OPT_OUTPUT, { .off = OFFSET(shortest) },
5363 "finish encoding within shortest input" },
5364 { "bitexact", OPT_BOOL | OPT_EXPERT | OPT_OFFSET |
5365 OPT_OUTPUT | OPT_INPUT, { .off = OFFSET(bitexact) },
5366 "bitexact mode" },
5367 { "apad", OPT_STRING | HAS_ARG | OPT_SPEC |
5368 OPT_OUTPUT, { .off = OFFSET(apad) },
5369 "audio pad", "" },
5370 { "dts_delta_threshold", HAS_ARG | OPT_FLOAT | OPT_EXPERT, { &dts_delta_threshold },
5371 "timestamp discontinuity delta threshold", "threshold" },
5372 { "dts_error_threshold", HAS_ARG | OPT_FLOAT | OPT_EXPERT, { &dts_error_threshold },
5373 "timestamp error delta threshold", "threshold" },
5374 { "xerror", OPT_BOOL | OPT_EXPERT, { &exit_on_error },
5375 "exit on error", "error" },
5376 { "abort_on", HAS_ARG | OPT_EXPERT, { .func_arg = opt_abort_on },
5377 "abort on the specified condition flags", "flags" },
5378 { "copyinkf", OPT_BOOL | OPT_EXPERT | OPT_SPEC |
5379 OPT_OUTPUT, { .off = OFFSET(copy_initial_nonkeyframes) },
5380 "copy initial non-keyframes" },
5381 { "copypriorss", OPT_INT | HAS_ARG | OPT_EXPERT | OPT_SPEC | OPT_OUTPUT, { .off = OFFSET(copy_prior_start) },
5382 "copy or discard frames before start time" },
5383 { "frames", OPT_INT64 | HAS_ARG | OPT_SPEC | OPT_OUTPUT, { .off = OFFSET(max_frames) },
5384 "set the number of frames to output", "number" },
5385 { "tag", OPT_STRING | HAS_ARG | OPT_SPEC |
5386 OPT_EXPERT | OPT_OUTPUT | OPT_INPUT, { .off = OFFSET(codec_tags) },
5387 "force codec tag/fourcc", "fourcc/tag" },
5388 { "q", HAS_ARG | OPT_EXPERT | OPT_DOUBLE |
5389 OPT_SPEC | OPT_OUTPUT, { .off = OFFSET(qscale) },
5390 "use fixed quality scale (VBR)", "q" },
5391 { "qscale", HAS_ARG | OPT_EXPERT | OPT_PERFILE |
5392 OPT_OUTPUT, { .func_arg = opt_qscale },
5393 "use fixed quality scale (VBR)", "q" },
5394 { "profile", HAS_ARG | OPT_EXPERT | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_profile },
5395 "set profile", "profile" },
5396 { "filter", HAS_ARG | OPT_STRING | OPT_SPEC | OPT_OUTPUT, { .off = OFFSET(filters) },
5397 "set stream filtergraph", "filter_graph" },
5398 { "filter_threads", HAS_ARG | OPT_INT, { &filter_nbthreads },
5399 "number of non-complex filter threads" },
5400 { "filter_script", HAS_ARG | OPT_STRING | OPT_SPEC | OPT_OUTPUT, { .off = OFFSET(filter_scripts) },
5401 "read stream filtergraph description from a file", "filename" },
5402 { "reinit_filter", HAS_ARG | OPT_INT | OPT_SPEC | OPT_INPUT, { .off = OFFSET(reinit_filters) },
5403 "reinit filtergraph on input parameter changes", "" },
5404 { "filter_complex", HAS_ARG | OPT_EXPERT, { .func_arg = opt_filter_complex },
5405 "create a complex filtergraph", "graph_description" },
5406 { "filter_complex_threads", HAS_ARG | OPT_INT, { &filter_complex_nbthreads },
5407 "number of threads for -filter_complex" },
5408 { "lavfi", HAS_ARG | OPT_EXPERT, { .func_arg = opt_filter_complex },
5409 "create a complex filtergraph", "graph_description" },
5410 { "filter_complex_script", HAS_ARG | OPT_EXPERT, { .func_arg = opt_filter_complex_script },
5411 "read complex filtergraph description from a file", "filename" },
5412 { "auto_conversion_filters", OPT_BOOL | OPT_EXPERT, { &auto_conversion_filters },
5413 "enable automatic conversion filters globally" },
5414 { "stats", OPT_BOOL, { &print_stats },
5415 "print progress report during encoding", },
5416 { "stats_period", HAS_ARG | OPT_EXPERT, { .func_arg = opt_stats_period },
5417 "set the period at which ffmpeg updates stats and -progress output", "time" },
5418 { "attach", HAS_ARG | OPT_PERFILE | OPT_EXPERT |
5419 OPT_OUTPUT, { .func_arg = opt_attach },
5420 "add an attachment to the output file", "filename" },
5421 { "dump_attachment", HAS_ARG | OPT_STRING | OPT_SPEC |
5423 "extract an attachment into a file", "filename" },
5424 { "stream_loop", OPT_INT | HAS_ARG | OPT_EXPERT | OPT_INPUT |
5425 OPT_OFFSET, { .off = OFFSET(loop) }, "set number of times input stream shall be looped", "loop count" },
5426 { "debug_ts", OPT_BOOL | OPT_EXPERT, { &debug_ts },
5427 "print timestamp debugging info" },
5428 { "max_error_rate", HAS_ARG | OPT_FLOAT, { &max_error_rate },
5429 "ratio of errors (0.0: no errors, 1.0: 100% errors) above which ffmpeg returns an error instead of success.", "maximum error rate" },
5430 { "discard", OPT_STRING | HAS_ARG | OPT_SPEC |
5431 OPT_INPUT, { .off = OFFSET(discard) },
5432 "discard", "" },
5433 { "disposition", OPT_STRING | HAS_ARG | OPT_SPEC |
5434 OPT_OUTPUT, { .off = OFFSET(disposition) },
5435 "disposition", "" },
5436 { "thread_queue_size", HAS_ARG | OPT_INT | OPT_OFFSET | OPT_EXPERT | OPT_INPUT,
5437 { .off = OFFSET(thread_queue_size) },
5438 "set the maximum number of queued packets from the demuxer" },
5439 { "find_stream_info", OPT_BOOL | OPT_PERFILE | OPT_INPUT | OPT_EXPERT, { &find_stream_info },
5440 "read and decode the streams to fill missing information with heuristics" },
5441
5442 /* video options */
5443 { "vframes", OPT_VIDEO | HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_video_frames },
5444 "set the number of video frames to output", "number" },
5445 { "r", OPT_VIDEO | HAS_ARG | OPT_STRING | OPT_SPEC |
5446 OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(frame_rates) },
5447 "set frame rate (Hz value, fraction or abbreviation)", "rate" },
5449 OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(frame_sizes) },
5450 "set frame size (WxH or abbreviation)", "size" },
5451 { "aspect", OPT_VIDEO | HAS_ARG | OPT_STRING | OPT_SPEC |
5452 OPT_OUTPUT, { .off = OFFSET(frame_aspect_ratios) },
5453 "set aspect ratio (4:3, 16:9 or 1.3333, 1.7777)", "aspect" },
5454 { "pix_fmt", OPT_VIDEO | HAS_ARG | OPT_EXPERT | OPT_STRING | OPT_SPEC |
5455 OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(frame_pix_fmts) },
5456 "set pixel format", "format" },
5457 { "bits_per_raw_sample", OPT_VIDEO | OPT_INT | HAS_ARG, { &frame_bits_per_raw_sample },
5458 "set the number of bits per raw sample", "number" },
5459 { "intra", OPT_VIDEO | OPT_BOOL | OPT_EXPERT, { &intra_only },
5460 "deprecated use -g 1" },
5461 { "vn", OPT_VIDEO | OPT_BOOL | OPT_OFFSET | OPT_INPUT | OPT_OUTPUT,{ .off = OFFSET(video_disable) },
5462 "disable video" },
5463 { "rc_override", OPT_VIDEO | HAS_ARG | OPT_EXPERT | OPT_STRING | OPT_SPEC |
5464 OPT_OUTPUT, { .off = OFFSET(rc_overrides) },
5465 "rate control override for specific intervals", "override" },
5466 { "vcodec", OPT_VIDEO | HAS_ARG | OPT_PERFILE | OPT_INPUT |
5467 OPT_OUTPUT, { .func_arg = opt_video_codec },
5468 "force video codec ('copy' to copy stream)", "codec" },
5469 { "sameq", OPT_VIDEO | OPT_EXPERT , { .func_arg = opt_sameq },
5470 "Removed" },
5471 { "same_quant", OPT_VIDEO | OPT_EXPERT , { .func_arg = opt_sameq },
5472 "Removed" },
5473 { "timecode", OPT_VIDEO | HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_timecode },
5474 "set initial TimeCode value.", "hh:mm:ss[:;.]ff" },
5475 { "pass", OPT_VIDEO | HAS_ARG | OPT_SPEC | OPT_INT | OPT_OUTPUT, { .off = OFFSET(pass) },
5476 "select the pass number (1 to 3)", "n" },
5477 { "passlogfile", OPT_VIDEO | HAS_ARG | OPT_STRING | OPT_EXPERT | OPT_SPEC |
5478 OPT_OUTPUT, { .off = OFFSET(passlogfiles) },
5479 "select two pass log file name prefix", "prefix" },
5480 { "deinterlace", OPT_VIDEO | OPT_BOOL | OPT_EXPERT, { &do_deinterlace },
5481 "this option is deprecated, use the yadif filter instead" },
5482 { "psnr", OPT_VIDEO | OPT_BOOL | OPT_EXPERT, { &do_psnr },
5483 "calculate PSNR of compressed frames" },
5484 { "vstats", OPT_VIDEO | OPT_EXPERT , { .func_arg = opt_vstats },
5485 "dump video coding statistics to file" },
5486 { "vstats_file", OPT_VIDEO | HAS_ARG | OPT_EXPERT , { .func_arg = opt_vstats_file },
5487 "dump video coding statistics to file", "file" },
5488 { "vstats_version", OPT_VIDEO | OPT_INT | HAS_ARG | OPT_EXPERT , { &vstats_version },
5489 "Version of the vstats format to use."},
5490 { "vf", OPT_VIDEO | HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_video_filters },
5491 "set video filters", "filter_graph" },
5492 { "intra_matrix", OPT_VIDEO | HAS_ARG | OPT_EXPERT | OPT_STRING | OPT_SPEC |
5493 OPT_OUTPUT, { .off = OFFSET(intra_matrices) },
5494 "specify intra matrix coeffs", "matrix" },
5495 { "inter_matrix", OPT_VIDEO | HAS_ARG | OPT_EXPERT | OPT_STRING | OPT_SPEC |
5496 OPT_OUTPUT, { .off = OFFSET(inter_matrices) },
5497 "specify inter matrix coeffs", "matrix" },
5498 { "chroma_intra_matrix", OPT_VIDEO | HAS_ARG | OPT_EXPERT | OPT_STRING | OPT_SPEC |
5499 OPT_OUTPUT, { .off = OFFSET(chroma_intra_matrices) },
5500 "specify intra matrix coeffs", "matrix" },
5501 { "top", OPT_VIDEO | HAS_ARG | OPT_EXPERT | OPT_INT| OPT_SPEC |
5502 OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(top_field_first) },
5503 "top=1/bottom=0/auto=-1 field first", "" },
5504 { "vtag", OPT_VIDEO | HAS_ARG | OPT_EXPERT | OPT_PERFILE |
5505 OPT_INPUT | OPT_OUTPUT, { .func_arg = opt_old2new },
5506 "force video tag/fourcc", "fourcc/tag" },
5507 { "qphist", OPT_VIDEO | OPT_BOOL | OPT_EXPERT , { &qp_hist },
5508 "show QP histogram" },
5509 { "force_fps", OPT_VIDEO | OPT_BOOL | OPT_EXPERT | OPT_SPEC |
5510 OPT_OUTPUT, { .off = OFFSET(force_fps) },
5511 "force the selected framerate, disable the best supported framerate selection" },
5512 { "streamid", OPT_VIDEO | HAS_ARG | OPT_EXPERT | OPT_PERFILE |
5513 OPT_OUTPUT, { .func_arg = opt_streamid },
5514 "set the value of an outfile streamid", "streamIndex:value" },
5515 { "force_key_frames", OPT_VIDEO | OPT_STRING | HAS_ARG | OPT_EXPERT |
5516 OPT_SPEC | OPT_OUTPUT, { .off = OFFSET(forced_key_frames) },
5517 "force key frames at specified timestamps", "timestamps" },
5518 { "ab", OPT_VIDEO | HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_bitrate },
5519 "audio bitrate (please use -b:a)", "bitrate" },
5520 { "b", OPT_VIDEO | HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_bitrate },
5521 "video bitrate (please use -b:v)", "bitrate" },
5522 { "hwaccel", OPT_VIDEO | OPT_STRING | HAS_ARG | OPT_EXPERT |
5523 OPT_SPEC | OPT_INPUT, { .off = OFFSET(hwaccels) },
5524 "use HW accelerated decoding", "hwaccel name" },
5525 { "hwaccel_device", OPT_VIDEO | OPT_STRING | HAS_ARG | OPT_EXPERT |
5526 OPT_SPEC | OPT_INPUT, { .off = OFFSET(hwaccel_devices) },
5527 "select a device for HW acceleration", "devicename" },
5528 { "hwaccel_output_format", OPT_VIDEO | OPT_STRING | HAS_ARG | OPT_EXPERT |
5529 OPT_SPEC | OPT_INPUT, { .off = OFFSET(hwaccel_output_formats) },
5530 "select output format used with HW accelerated decoding", "format" },
5531 #if CONFIG_VIDEOTOOLBOX
5532 { "videotoolbox_pixfmt", HAS_ARG | OPT_STRING | OPT_EXPERT, { &videotoolbox_pixfmt}, "" },
5533 #endif
5534 { "hwaccels", OPT_EXIT, { .func_arg = show_hwaccels },
5535 "show available HW acceleration methods" },
5536 { "autorotate", HAS_ARG | OPT_BOOL | OPT_SPEC |
5537 OPT_EXPERT | OPT_INPUT, { .off = OFFSET(autorotate) },
5538 "automatically insert correct rotate filters" },
5539 { "autoscale", HAS_ARG | OPT_BOOL | OPT_SPEC |
5540 OPT_EXPERT | OPT_OUTPUT, { .off = OFFSET(autoscale) },
5541 "automatically insert a scale filter at the end of the filter graph" },
5542
5543 /* audio options */
5544 { "aframes", OPT_AUDIO | HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_audio_frames },
5545 "set the number of audio frames to output", "number" },
5546 { "aq", OPT_AUDIO | HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_audio_qscale },
5547 "set audio quality (codec-specific)", "quality", },
5548 { "ar", OPT_AUDIO | HAS_ARG | OPT_INT | OPT_SPEC |
5549 OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(audio_sample_rate) },
5550 "set audio sampling rate (in Hz)", "rate" },
5551 { "ac", OPT_AUDIO | HAS_ARG | OPT_INT | OPT_SPEC |
5552 OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(audio_channels) },
5553 "set number of audio channels", "channels" },
5554 { "an", OPT_AUDIO | OPT_BOOL | OPT_OFFSET | OPT_INPUT | OPT_OUTPUT,{ .off = OFFSET(audio_disable) },
5555 "disable audio" },
5556 { "acodec", OPT_AUDIO | HAS_ARG | OPT_PERFILE |
5557 OPT_INPUT | OPT_OUTPUT, { .func_arg = opt_audio_codec },
5558 "force audio codec ('copy' to copy stream)", "codec" },
5559 { "atag", OPT_AUDIO | HAS_ARG | OPT_EXPERT | OPT_PERFILE |
5560 OPT_OUTPUT, { .func_arg = opt_old2new },
5561 "force audio tag/fourcc", "fourcc/tag" },
5562 { "vol", OPT_AUDIO | HAS_ARG | OPT_INT, { &audio_volume },
5563 "change audio volume (256=normal)" , "volume" },
5564 { "sample_fmt", OPT_AUDIO | HAS_ARG | OPT_EXPERT | OPT_SPEC |
5565 OPT_STRING | OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(sample_fmts) },
5566 "set sample format", "format" },
5567 { "channel_layout", OPT_AUDIO | HAS_ARG | OPT_EXPERT | OPT_PERFILE |
5568 OPT_INPUT | OPT_OUTPUT, { .func_arg = opt_channel_layout },
5569 "set channel layout", "layout" },
5570 { "af", OPT_AUDIO | HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_audio_filters },
5571 "set audio filters", "filter_graph" },
5572 { "guess_layout_max", OPT_AUDIO | HAS_ARG | OPT_INT | OPT_SPEC | OPT_EXPERT | OPT_INPUT, { .off = OFFSET(guess_layout_max) },
5573 "set the maximum number of channels to try to guess the channel layout" },
5574
5575 /* subtitle options */
5576 { "sn", OPT_SUBTITLE | OPT_BOOL | OPT_OFFSET | OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(subtitle_disable) },
5577 "disable subtitle" },
5578 { "scodec", OPT_SUBTITLE | HAS_ARG | OPT_PERFILE | OPT_INPUT | OPT_OUTPUT, { .func_arg = opt_subtitle_codec },
5579 "force subtitle codec ('copy' to copy stream)", "codec" },
5580 { "stag", OPT_SUBTITLE | HAS_ARG | OPT_EXPERT | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_old2new }
5581 , "force subtitle tag/fourcc", "fourcc/tag" },
5582 { "fix_sub_duration", OPT_BOOL | OPT_EXPERT | OPT_SUBTITLE | OPT_SPEC | OPT_INPUT, { .off = OFFSET(fix_sub_duration) },
5583 "fix subtitles duration" },
5584 { "canvas_size", OPT_SUBTITLE | HAS_ARG | OPT_STRING | OPT_SPEC | OPT_INPUT, { .off = OFFSET(canvas_sizes) },
5585 "set canvas size (WxH or abbreviation)", "size" },
5586
5587 /* grab options */
5588 { "vc", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { .func_arg = opt_video_channel },
5589 "deprecated, use -channel", "channel" },
5590 { "tvstd", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { .func_arg = opt_video_standard },
5591 "deprecated, use -standard", "standard" },
5592 { "isync", OPT_BOOL | OPT_EXPERT, { &input_sync }, "this option is deprecated and does nothing", "" },
5593
5594 /* muxer options */
5595 { "muxdelay", OPT_FLOAT | HAS_ARG | OPT_EXPERT | OPT_OFFSET | OPT_OUTPUT, { .off = OFFSET(mux_max_delay) },
5596 "set the maximum demux-decode delay", "seconds" },
5597 { "muxpreload", OPT_FLOAT | HAS_ARG | OPT_EXPERT | OPT_OFFSET | OPT_OUTPUT, { .off = OFFSET(mux_preload) },
5598 "set the initial demux-decode delay", "seconds" },
5599 { "sdp_file", HAS_ARG | OPT_EXPERT | OPT_OUTPUT, { .func_arg = opt_sdp_file },
5600 "specify a file in which to print sdp information", "file" },
5601
5602 { "time_base", HAS_ARG | OPT_STRING | OPT_EXPERT | OPT_SPEC | OPT_OUTPUT, { .off = OFFSET(time_bases) },
5603 "set the desired time base hint for output stream (1:24, 1:48000 or 0.04166, 2.0833e-5)", "ratio" },
5604 { "enc_time_base", HAS_ARG | OPT_STRING | OPT_EXPERT | OPT_SPEC | OPT_OUTPUT, { .off = OFFSET(enc_time_bases) },
5605 "set the desired time base for the encoder (1:24, 1:48000 or 0.04166, 2.0833e-5). "
5606 "two special values are defined - "
5607 "0 = use frame rate (video) or sample rate (audio),"
5608 "-1 = match source time base", "ratio" },
5609
5610 { "bsf", HAS_ARG | OPT_STRING | OPT_SPEC | OPT_EXPERT | OPT_OUTPUT, { .off = OFFSET(bitstream_filters) },
5611 "A comma-separated list of bitstream filters", "bitstream_filters" },
5612 { "absf", HAS_ARG | OPT_AUDIO | OPT_EXPERT| OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_old2new },
5613 "deprecated", "audio bitstream_filters" },
5614 { "vbsf", OPT_VIDEO | HAS_ARG | OPT_EXPERT| OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_old2new },
5615 "deprecated", "video bitstream_filters" },
5616
5617 { "apre", HAS_ARG | OPT_AUDIO | OPT_EXPERT| OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_preset },
5618 "set the audio options to the indicated preset", "preset" },
5619 { "vpre", OPT_VIDEO | HAS_ARG | OPT_EXPERT| OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_preset },
5620 "set the video options to the indicated preset", "preset" },
5621 { "spre", HAS_ARG | OPT_SUBTITLE | OPT_EXPERT| OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_preset },
5622 "set the subtitle options to the indicated preset", "preset" },
5623 { "fpre", HAS_ARG | OPT_EXPERT| OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_preset },
5624 "set options from indicated preset file", "filename" },
5625
5626 { "max_muxing_queue_size", HAS_ARG | OPT_INT | OPT_SPEC | OPT_EXPERT | OPT_OUTPUT, { .off = OFFSET(max_muxing_queue_size) },
5627 "maximum number of packets that can be buffered while waiting for all streams to initialize", "packets" },
5628 { "muxing_queue_data_threshold", HAS_ARG | OPT_INT | OPT_SPEC | OPT_EXPERT | OPT_OUTPUT, { .off = OFFSET(muxing_queue_data_threshold) },
5629 "set the threshold after which max_muxing_queue_size is taken into account", "bytes" },
5630
5631 /* data codec support */
5632 { "dcodec", HAS_ARG | OPT_DATA | OPT_PERFILE | OPT_EXPERT | OPT_INPUT | OPT_OUTPUT, { .func_arg = opt_data_codec },
5633 "force data codec ('copy' to copy stream)", "codec" },
5634 { "dn", OPT_BOOL | OPT_VIDEO | OPT_OFFSET | OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(data_disable) },
5635 "disable data" },
5636
5637 #if CONFIG_VAAPI
5638 { "vaapi_device", HAS_ARG | OPT_EXPERT, { .func_arg = opt_vaapi_device },
5639 "set VAAPI hardware device (DRM path or X11 display name)", "device" },
5640 #endif
5641
5642 #if CONFIG_QSV
5643 { "qsv_device", HAS_ARG | OPT_STRING | OPT_EXPERT, { &qsv_device },
5644 "set QSV hardware device (DirectX adapter index, DRM path or X11 display name)", "device"},
5645 #endif
5646
5647 { "init_hw_device", HAS_ARG | OPT_EXPERT, { .func_arg = opt_init_hw_device },
5648 "initialise hardware device", "args" },
5649 { "filter_hw_device", HAS_ARG | OPT_EXPERT, { .func_arg = opt_filter_hw_device },
5650 "set hardware device used when filtering", "device" },
5651
5652 { NULL, },
5653 };
5654
5655 ffmpeg_options = options;
5656
5657 int i, ret;
5659
5660 int savedCode = setjmp(ex_buf__);
5661 if (savedCode == 0) {
5662
5664
5665 init_dynload();
5666
5668
5669 setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
5670
5671 av_log_set_flags(AV_LOG_SKIP_REPEATED);
5672 parse_loglevel(argc, argv, options);
5673
5674 if(argc>1 && !strcmp(argv[1], "-d")){
5675 run_as_daemon=1;
5676 av_log_set_callback(log_callback_null);
5677 argc--;
5678 argv++;
5679 }
5680
5681 #if CONFIG_AVDEVICE
5682 avdevice_register_all();
5683 #endif
5684 avformat_network_init();
5685
5686 show_banner(argc, argv, options);
5687
5688 /* parse options and open all input/output files */
5689 ret = ffmpeg_parse_options(argc, argv);
5690 if (ret < 0)
5691 exit_program(1);
5692
5693 if (nb_output_files <= 0 && nb_input_files == 0) {
5694 show_usage();
5695 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
5696 exit_program(1);
5697 }
5698
5699 /* file converter / grab */
5700 if (nb_output_files <= 0) {
5701 av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
5702 exit_program(1);
5703 }
5704
5705 for (i = 0; i < nb_output_files; i++) {
5706 if (strcmp(output_files[i]->ctx->oformat->name, "rtp"))
5707 want_sdp = 0;
5708 }
5709
5711 if (transcode() < 0)
5712 exit_program(1);
5713 if (do_benchmark) {
5714 int64_t utime, stime, rtime;
5716 utime = current_time.user_usec - ti.user_usec;
5717 stime = current_time.sys_usec - ti.sys_usec;
5718 rtime = current_time.real_usec - ti.real_usec;
5719 av_log(NULL, AV_LOG_INFO,
5720 "bench: utime=%0.3fs stime=%0.3fs rtime=%0.3fs\n",
5721 utime / 1000000.0, stime / 1000000.0, rtime / 1000000.0);
5722 }
5723 av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
5726 exit_program(69);
5727
5729
5730 } else {
5732 }
5733
5735}
__thread jmp_buf ex_buf__
void exit_program(int ret)
int show_decoders(void *optctx, const char *opt, const char *arg)
int opt_loglevel(void *optctx, const char *opt, const char *arg)
int opt_cpuflags(void *optctx, const char *opt, const char *arg)
void init_dynload(void)
int show_help(void *optctx, const char *opt, const char *arg)
void print_error(const char *filename, int err)
int show_filters(void *optctx, const char *opt, const char *arg)
int show_sample_fmts(void *optctx, const char *opt, const char *arg)
int show_muxers(void *optctx, const char *opt, const char *arg)
int show_bsfs(void *optctx, const char *opt, const char *arg)
__thread char * program_name
int show_layouts(void *optctx, const char *opt, const char *arg)
int show_encoders(void *optctx, const char *opt, const char *arg)
int show_version(void *optctx, const char *opt, const char *arg)
void parse_loglevel(int argc, char **argv, const OptionDef *options)
__thread int program_birth_year
int opt_cpucount(void *optctx, const char *opt, const char *arg)
void show_banner(int argc, char **argv, const OptionDef *options)
int opt_timelimit(void *optctx, const char *opt, const char *arg)
int show_license(void *optctx, const char *opt, const char *arg)
int show_codecs(void *optctx, const char *opt, const char *arg)
int show_buildconf(void *optctx, const char *opt, const char *arg)
int64_t parse_time_or_die(const char *context, const char *timestr, int is_duration)
void register_exit(void(*cb)(int ret))
int show_devices(void *optctx, const char *opt, const char *arg)
void uninit_opts(void)
int show_formats(void *optctx, const char *opt, const char *arg)
__thread int hide_banner
int show_protocols(void *optctx, const char *opt, const char *arg)
int opt_max_alloc(void *optctx, const char *opt, const char *arg)
int opt_report(void *optctx, const char *opt, const char *arg)
int show_colors(void *optctx, const char *opt, const char *arg)
int show_pix_fmts(void *optctx, const char *opt, const char *arg)
int show_demuxers(void *optctx, const char *opt, const char *arg)
#define OPT_VIDEO
#define OPT_SPEC
#define OPT_BOOL
#define media_type_string
#define OPT_INT64
#define OPT_PERFILE
#define OPT_INT
#define OPT_FLOAT
#define AV_LOG_STDERR
#define OPT_INPUT
#define OPT_DOUBLE
#define OPT_STRING
__thread int find_stream_info
#define OPT_AUDIO
#define OPT_DATA
#define OPT_SUBTITLE
#define OPT_EXPERT
#define OPT_EXIT
#define OPT_OUTPUT
#define OPT_TIME
#define OPT_OFFSET
#define HAS_ARG
static InputStream * get_input_stream(OutputStream *ost)
int opt_channel_layout(void *optctx, const char *opt, const char *arg)
__thread unsigned dup_warning
int opt_sdp_file(void *optctx, const char *opt, const char *arg)
static int transcode(void)
static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output, int *decode_failed)
int opt_timecode(void *optctx, const char *opt, const char *arg)
__thread OptionDef * ffmpeg_options
int opt_vstats_file(void *optctx, const char *opt, const char *arg)
__thread InputStream ** input_streams
static void set_tty_echo(int on)
static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output, int *decode_failed)
__thread const AVIOInterruptCB int_cb
__thread int run_as_daemon
static int check_keyboard_interaction(int64_t cur_time)
static void do_audio_out(OutputFile *of, OutputStream *ost, AVFrame *frame)
static BenchmarkTimeStamps get_benchmark_time_stamps(void)
int opt_data_codec(void *optctx, const char *opt, const char *arg)
int opt_streamid(void *optctx, const char *opt, const char *arg)
__thread int nb_input_streams
static int need_output(void)
void term_exit(void)
static volatile int received_sigterm
const char *const forced_keyframes_const_names[]
void cancelSession(long sessionId)
int opt_qscale(void *optctx, const char *opt, const char *arg)
int opt_sameq(void *optctx, const char *opt, const char *arg)
__thread OutputStream ** output_streams
static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
__thread OutputFile ** output_files
int opt_filter_complex_script(void *optctx, const char *opt, const char *arg)
static void forward_report(int is_last_report, int64_t timer_start, int64_t cur_time)
int opt_filter_complex(void *optctx, const char *opt, const char *arg)
static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
static void do_subtitle_out(OutputFile *of, OutputStream *ost, AVSubtitle *sub)
int opt_vsync(void *optctx, const char *opt, const char *arg)
static int init_input_stream(int ist_index, char *error, int error_len)
__thread int nb_output_streams
static void sub2video_push_ref(InputStream *ist, int64_t pts)
int guess_input_channel_layout(InputStream *ist)
__thread volatile int longjmp_value
static void print_sdp(void)
__thread int nb_frames_dup
static int reap_filters(int flush)
static int check_recording_time(OutputStream *ost)
static void do_video_out(OutputFile *of, OutputStream *ost, AVFrame *next_picture)
__thread BenchmarkTimeStamps current_time
__thread int nb_input_files
static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
static void print_final_stats(int64_t total_size)
static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
__thread int first_report
__thread int nb_output_files
static double psnr(double d)
static int init_output_bsfs(OutputStream *ost)
volatile int handleSIGINT
static int init_output_stream_encode(OutputStream *ost, AVFrame *frame)
static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
int opt_subtitle_codec(void *optctx, const char *opt, const char *arg)
volatile int handleSIGTERM
int opt_video_standard(void *optctx, const char *opt, const char *arg)
void set_report_callback(void(*callback)(int, float, float, int64_t, int, double, double))
int opt_profile(void *optctx, const char *opt, const char *arg)
struct BenchmarkTimeStamps BenchmarkTimeStamps
static int64_t getmaxrss(void)
int opt_abort_on(void *optctx, const char *opt, const char *arg)
static void reset_eagain(void)
__thread int copy_unknown_streams
static void finish_output_stream(OutputStream *ost)
int opt_video_codec(void *optctx, const char *opt, const char *arg)
static void parse_forced_key_frames(char *kf, OutputStream *ost, AVCodecContext *avctx)
int opt_data_frames(void *optctx, const char *opt, const char *arg)
int opt_video_filters(void *optctx, const char *opt, const char *arg)
static int compare_int64(const void *a, const void *b)
__thread int input_sync
__thread volatile int ffmpeg_exited
int opt_attach(void *optctx, const char *opt, const char *arg)
int opt_filter_hw_device(void *optctx, const char *opt, const char *arg)
static int init_output_stream_wrapper(OutputStream *ost, AVFrame *frame, unsigned int fatal)
static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof, int *decode_failed)
__thread atomic_int transcode_init_done
static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame)
static void check_decode_result(InputStream *ist, int *got_output, int ret)
__thread int64_t keyboard_last_time
static void sub2video_heartbeat(InputStream *ist, int64_t pts)
int opt_audio_frames(void *optctx, const char *opt, const char *arg)
static int process_input(int file_index)
static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
static int seek_to_start(InputFile *ifile, AVFormatContext *is)
__thread int no_file_overwrite
int opt_target(void *optctx, const char *opt, const char *arg)
__thread int qp_histogram[52]
static int check_output_constraints(InputStream *ist, OutputStream *ost)
__thread int file_overwrite
int opt_map(void *optctx, const char *opt, const char *arg)
static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
void cancel_operation(long id)
static void set_encoder_id(OutputFile *of, OutputStream *ost)
__thread AVIOContext * progress_avio
__thread int64_t copy_ts_first_pts
__thread InputFile ** input_files
static int read_key(void)
static int check_init_output_file(OutputFile *of, int file_index)
static void close_output_stream(OutputStream *ost)
static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
void sub2video_update(InputStream *ist, int64_t heartbeat_pts, AVSubtitle *sub)
int opt_vstats(void *optctx, const char *opt, const char *arg)
int opt_video_channel(void *optctx, const char *opt, const char *arg)
__thread FilterGraph ** filtergraphs
static int init_output_stream(OutputStream *ost, AVFrame *frame, char *error, int error_len)
static int got_eagain(void)
int opt_video_frames(void *optctx, const char *opt, const char *arg)
__thread unsigned nb_output_dumped
static int send_filter_eof(InputStream *ist)
int decode_interrupt_cb(void *ctx)
int opt_audio_filters(void *optctx, const char *opt, const char *arg)
static void term_exit_sigsafe(void)
static void sub2video_flush(InputStream *ist)
static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h, AVSubtitleRect *r)
void remove_avoptions(AVDictionary **a, AVDictionary *b)
static int transcode_init(void)
static volatile int received_nb_signals
int opt_audio_qscale(void *optctx, const char *opt, const char *arg)
static FILE * vstats_file
void(* report_callback)(int, float, float, int64_t, int, double, double)
static void report_new_stream(int input_index, AVPacket *pkt)
__thread int64_t decode_error_stat[2]
__thread volatile int main_ffmpeg_return_code
static void abort_codec_experimental(const AVCodec *c, int encoder)
int show_hwaccels(void *optctx, const char *opt, const char *arg)
static void update_benchmark(const char *fmt,...)
static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base, AVRational time_base)
static void ffmpeg_cleanup(int ret)
int opt_preset(void *optctx, const char *opt, const char *arg)
static double adjust_frame_pts_to_encoder_tb(OutputFile *of, OutputStream *ost, AVFrame *frame)
static int transcode_step(void)
static int ifilter_has_all_input_formats(FilterGraph *fg)
static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
static OutputStream * choose_output(void)
int opt_old2new(void *optctx, const char *opt, const char *arg)
static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
__thread int nb_filtergraphs
static int init_output_stream_streamcopy(OutputStream *ost)
void term_init(void)
int opt_stats_period(void *optctx, const char *opt, const char *arg)
int cancelRequested(long sessionId)
__thread int64_t last_time
volatile int handleSIGPIPE
static int get_input_packet(InputFile *f, AVPacket **pkt)
#define OFFSET(x)
int opt_bitrate(void *optctx, const char *opt, const char *arg)
volatile int handleSIGXCPU
static void do_video_stats(OutputStream *ost, int frame_size)
static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
__thread uint8_t * subtitle_out
static int sub2video_get_blank_frame(InputStream *ist)
static void flush_encoders(void)
__thread int do_psnr
static void output_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int eof)
int opt_map_channel(void *optctx, const char *opt, const char *arg)
int opt_progress(void *optctx, const char *opt, const char *arg)
int opt_audio_codec(void *optctx, const char *opt, const char *arg)
int opt_recording_timestamp(void *optctx, const char *opt, const char *arg)
void assert_avoptions(AVDictionary *m)
__thread int want_sdp
void ffmpeg_var_cleanup()
static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
int ffmpeg_execute(int argc, char **argv)
static void ifilter_parameters_from_codecpar(InputFilter *ifilter, AVCodecParameters *par)
static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
__thread volatile long globalSessionId
__thread int ignore_unknown_streams
static void sigterm_handler(int sig)
__thread int intra_only
volatile int handleSIGQUIT
int opt_init_hw_device(void *optctx, const char *opt, const char *arg)
__thread int nb_frames_drop
__thread float dts_delta_threshold
int hw_device_setup_for_encode(OutputStream *ost)
__thread int copy_tb
@ HWACCEL_GENERIC
@ HWACCEL_AUTO
__thread int frame_bits_per_raw_sample
int ifilter_parameters_from_frame(InputFilter *ifilter, const AVFrame *frame)
#define VSYNC_DROP
__thread int64_t stats_period
__thread char * sdp_filename
__thread int print_stats
__thread int video_sync_method
__thread int filter_complex_nbthreads
__thread int abort_on_flags
__thread int audio_volume
__thread float max_error_rate
__thread int copy_ts
__thread int stdin_interaction
__thread float dts_error_threshold
int hwaccel_decode_init(AVCodecContext *avctx)
OSTFinished
@ ENCODER_FINISHED
@ MUXER_FINISHED
#define VSYNC_CFR
__thread int filter_nbthreads
void show_usage(void)
#define DECODING_FOR_FILTER
__thread int do_benchmark
__thread float frame_drop_threshold
__thread int vstats_version
__thread char * vstats_filename
__thread int do_deinterlace
int hw_device_setup_for_decode(InputStream *ist)
#define VSYNC_AUTO
void hw_device_free_all(void)
__thread int audio_sync_method
__thread float audio_drift_threshold
__thread int do_benchmark_all
__thread int start_at_zero
@ FKF_PREV_FORCED_N
@ FKF_T
@ FKF_PREV_FORCED_T
@ FKF_N_FORCED
@ FKF_N
__thread int exit_on_error
int ffmpeg_parse_options(int argc, char **argv)
#define ABORT_ON_FLAG_EMPTY_OUTPUT
#define DECODING_FOR_OST
__thread int qp_hist
#define VSYNC_VSCFR
int filtergraph_is_simple(FilterGraph *fg)
#define VSYNC_PASSTHROUGH
int configure_filtergraph(FilterGraph *fg)
__thread int do_hex_dump
__thread int do_pkt_dump
const HWAccel hwaccels[]
#define VSYNC_VFR
void dump_attachment(AVStream *st, const char *filename)
__thread int debug_ts
__thread char * videotoolbox_pixfmt
__thread int auto_conversion_filters
#define ABORT_ON_FLAG_EMPTY_OUTPUT_STREAM
fg inputs[0] ist
OutputStream * ost
ist filters[ist->nb_filters - 1]
ost filter
fg outputs[0] format
fg outputs[0] graph
OutputFilter ** outputs
const char * graph_desc
AVFilterGraph * graph
InputFilter ** inputs
enum HWAccelID id
int(* init)(AVCodecContext *s)
const char * name
AVPacket * pkt
int64_t ts_offset
int64_t duration
AVFormatContext * ctx
int64_t input_ts_offset
int64_t recording_time
AVRational time_base
int nb_streams_warn
int64_t last_ts
float readrate
int64_t start_time
AVBufferRef * hw_frames_ctx
uint8_t * name
struct InputStream * ist
AVFilterContext * filter
enum AVMediaType type
AVFifoBuffer * frame_queue
uint64_t channel_layout
struct FilterGraph * graph
AVRational sample_aspect_ratio
unsigned int initialize
marks if sub2video_update should force an initialization
AVFrame * decoded_frame
int64_t dts
dts of the last packet read for this stream (in AV_TIME_BASE units)
AVCodecContext * dec_ctx
int64_t pts
current pts of the decoded frame (in AV_TIME_BASE units)
struct InputStream::@2 prev_sub
uint64_t data_size
int64_t next_dts
AVPacket * pkt
struct InputStream::sub2video sub2video
AVStream * st
InputFilter ** filters
AVSubtitle subtitle
const AVCodec * dec
uint64_t limit_filesize
AVFormatContext * ctx
int64_t start_time
start time in microseconds == AV_TIME_BASE units
AVDictionary * opts
int64_t recording_time
desired length of the resulting file in microseconds == AV_TIME_BASE units
AVFilterInOut * out_tmp
struct OutputStream * ost
uint64_t * channel_layouts
AVFilterContext * filter
uint8_t * name
struct FilterGraph * graph
int max_muxing_queue_size
AVDictionary * swr_opts
int copy_initial_nonkeyframes
int64_t last_mux_dts
AVRational mux_timebase
double forced_keyframes_expr_const_values[FKF_NB]
OSTFinished finished
int * audio_channels_map
AVPacket * pkt
AVRational frame_aspect_ratio
double rotate_override_value
AVFrame * last_frame
const AVCodec * enc
int audio_channels_mapped
int64_t sync_opts
int64_t * forced_kf_pts
int64_t error[4]
uint64_t packets_written
uint64_t frames_encoded
int64_t max_frames
size_t muxing_queue_data_threshold
AVDictionary * resample_opts
AVRational max_frame_rate
AVRational enc_timebase
AVFifoBuffer * muxing_queue
AVCodecParameters * ref_par
char * forced_keyframes
AVFrame * filtered_frame
const char * attachment_filename
AVRational frame_rate
AVCodecContext * enc_ctx
struct InputStream * sync_ist
AVDictionary * encoder_opts
uint64_t data_size
AVStream * st
char * filters
filtergraph associated to the -filter option
int64_t forced_kf_ref_pts
uint64_t samples_encoded
char * filters_script
filtergraph script associated to the -filter_script option
AVBSFContext * bsf_ctx
int64_t first_pts
AVDictionary * sws_dict
OutputFilter * filter
char * disposition
AVExpr * forced_keyframes_pexpr
size_t muxing_queue_data_size
int last_nb0_frames[3]
char * logfile_prefix