FFmpegKit iOS / macOS / tvOS API 5.1
fftools_ffmpeg.c
Go to the documentation of this file.
1/*
2 * Copyright (c) 2000-2003 Fabrice Bellard
3 * Copyright (c) 2018 Taner Sener ( tanersener gmail com )
4 *
5 * This file is part of FFmpeg.
6 *
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
27/*
28 * This file is the modified version of ffmpeg.c file living in ffmpeg source code under the fftools folder. We
29 * manually update it each time we depend on a new ffmpeg version. Below you can see the list of changes applied
30 * by us to develop mobile-ffmpeg and later ffmpeg-kit libraries.
31 *
32 * mobile-ffmpeg / ffmpeg-kit changes by Taner Sener
33 *
34 * 09.2022
35 * --------------------------------------------------------
36 * - added opt_common.h include
37 * - volatile dropped from thread local variables
38 * - setvbuf call dropped
39 * - flushing stderr dropped
40 *
41 * 08.2020
42 * --------------------------------------------------------
43 * - OptionDef defines combined
44 *
45 * 06.2020
46 * --------------------------------------------------------
47 * - ignoring signals implemented
48 * - cancel_operation() method signature updated with id
49 * - cancel by execution id implemented
50 * - volatile modifier added to critical variables
51 *
52 * 01.2020
53 * --------------------------------------------------------
54 * - ffprobe support (added ffmpeg_ prefix to methods and variables defined for both ffmpeg and ffprobe)
55 *
56 * 12.2019
57 * --------------------------------------------------------
58 * - concurrent execution support ("__thread" specifier added to variables used by multiple threads,
59 * extern signatures of ffmpeg_opt.c methods called by both ffmpeg and ffprobe added, copied options from
60 * ffmpeg_opt.c and defined them as inline in execute method)
61 *
62 * 08.2018
63 * --------------------------------------------------------
64 * - fftools_ prefix added to file name and parent headers
65 * - forward_report() method, report_callback function pointer and set_report_callback() setter
66 * method added to forward stats
67 * - forward_report() call added from print_report()
68 * - cancel_operation() method added to trigger sigterm_handler
69 * - (!received_sigterm) validation added inside ifilter_send_eof() to complete cancellation
70 *
71 * 07.2018
72 * --------------------------------------------------------
73 * - main() function renamed as execute()
74 * - exit_program() implemented with setjmp
75 * - extern longjmp_value added to access exit code stored in exit_program()
76 * - cleanup() method added
77 */
78
79#include "config.h"
80#include <ctype.h>
81#include <string.h>
82#include <math.h>
83#include <stdlib.h>
84#include <errno.h>
85#include <limits.h>
86#include <stdatomic.h>
87#include <stdint.h>
88
89#include "ffmpegkit_exception.h"
90#include "fftools_opt_common.h"
91
92#if HAVE_IO_H
93#include <io.h>
94#endif
95#if HAVE_UNISTD_H
96#include <unistd.h>
97#endif
98
99#include "libavformat/avformat.h"
100#include "libavdevice/avdevice.h"
101#include "libswresample/swresample.h"
102#include "libavutil/opt.h"
103#include "libavutil/channel_layout.h"
104#include "libavutil/parseutils.h"
105#include "libavutil/samplefmt.h"
106#include "libavutil/fifo.h"
107#include "libavutil/hwcontext.h"
108#include "libavutil/internal.h"
109#include "libavutil/intreadwrite.h"
110#include "libavutil/dict.h"
111#include "libavutil/display.h"
112#include "libavutil/mathematics.h"
113#include "libavutil/pixdesc.h"
114#include "libavutil/avstring.h"
115#include "libavutil/libm.h"
116#include "libavutil/imgutils.h"
117#include "libavutil/timestamp.h"
118#include "libavutil/bprint.h"
119#include "libavutil/time.h"
120#include "libavutil/thread.h"
121#include "libavutil/threadmessage.h"
122#include "libavcodec/mathops.h"
123#include "libavformat/os_support.h"
124
125# include "libavfilter/avfilter.h"
126# include "libavfilter/buffersrc.h"
127# include "libavfilter/buffersink.h"
128
129#if HAVE_SYS_RESOURCE_H
130#include <sys/time.h>
131#include <sys/types.h>
132#include <sys/resource.h>
133#elif HAVE_GETPROCESSTIMES
134#include <windows.h>
135#endif
136#if HAVE_GETPROCESSMEMORYINFO
137#include <windows.h>
138#include <psapi.h>
139#endif
140#if HAVE_SETCONSOLECTRLHANDLER
141#include <windows.h>
142#endif
143
144
145#if HAVE_SYS_SELECT_H
146#include <sys/select.h>
147#endif
148
149#if HAVE_TERMIOS_H
150#include <fcntl.h>
151#include <sys/ioctl.h>
152#include <sys/time.h>
153#include <termios.h>
154#elif HAVE_KBHIT
155#include <conio.h>
156#endif
157
158#include <time.h>
159
160#include "fftools_ffmpeg.h"
161#include "fftools_cmdutils.h"
162
163#include "libavutil/avassert.h"
164
165static FILE *vstats_file;
166
167const char *const forced_keyframes_const_names[] = {
168 "n",
169 "n_forced",
170 "prev_forced_n",
171 "prev_forced_t",
172 "t",
173 NULL
174};
175
176typedef struct BenchmarkTimeStamps {
177 int64_t real_usec;
178 int64_t user_usec;
179 int64_t sys_usec;
181
183static int64_t getmaxrss(void);
185
186__thread int64_t nb_frames_dup = 0;
187__thread uint64_t dup_warning = 1000;
188__thread int64_t nb_frames_drop = 0;
189__thread int64_t decode_error_stat[2];
190__thread unsigned nb_output_dumped = 0;
191
192__thread int want_sdp = 1;
193
195__thread AVIOContext *progress_avio = NULL;
196
197__thread uint8_t *subtitle_out;
198
199__thread InputStream **input_streams = NULL;
200__thread int nb_input_streams = 0;
201__thread InputFile **input_files = NULL;
202__thread int nb_input_files = 0;
203
204__thread OutputStream **output_streams = NULL;
205__thread int nb_output_streams = 0;
206__thread OutputFile **output_files = NULL;
207__thread int nb_output_files = 0;
208
210__thread int nb_filtergraphs;
211
212__thread int64_t last_time = -1;
213__thread int64_t keyboard_last_time = 0;
214__thread int first_report = 1;
215__thread int qp_histogram[52];
216
217void (*report_callback)(int, float, float, int64_t, int, double, double) = NULL;
218
219extern int opt_map(void *optctx, const char *opt, const char *arg);
220extern int opt_map_channel(void *optctx, const char *opt, const char *arg);
221extern int opt_recording_timestamp(void *optctx, const char *opt, const char *arg);
222extern int opt_data_frames(void *optctx, const char *opt, const char *arg);
223extern int opt_progress(void *optctx, const char *opt, const char *arg);
224extern int opt_target(void *optctx, const char *opt, const char *arg);
225extern int opt_vsync(void *optctx, const char *opt, const char *arg);
226extern int opt_abort_on(void *optctx, const char *opt, const char *arg);
227extern int opt_stats_period(void *optctx, const char *opt, const char *arg);
228extern int opt_qscale(void *optctx, const char *opt, const char *arg);
229extern int opt_profile(void *optctx, const char *opt, const char *arg);
230extern int opt_filter_complex(void *optctx, const char *opt, const char *arg);
231extern int opt_filter_complex_script(void *optctx, const char *opt, const char *arg);
232extern int opt_attach(void *optctx, const char *opt, const char *arg);
233extern int opt_video_frames(void *optctx, const char *opt, const char *arg);
234extern int opt_video_codec(void *optctx, const char *opt, const char *arg);
235extern int opt_sameq(void *optctx, const char *opt, const char *arg);
236extern int opt_timecode(void *optctx, const char *opt, const char *arg);
237extern int opt_vstats_file(void *optctx, const char *opt, const char *arg);
238extern int opt_vstats(void *optctx, const char *opt, const char *arg);
239extern int opt_video_frames(void *optctx, const char *opt, const char *arg);
240extern int opt_old2new(void *optctx, const char *opt, const char *arg);
241extern int opt_streamid(void *optctx, const char *opt, const char *arg);
242extern int opt_bitrate(void *optctx, const char *opt, const char *arg);
243extern int show_hwaccels(void *optctx, const char *opt, const char *arg);
244extern int opt_video_filters(void *optctx, const char *opt, const char *arg);
245extern int opt_audio_frames(void *optctx, const char *opt, const char *arg);
246extern int opt_audio_qscale(void *optctx, const char *opt, const char *arg);
247extern int opt_audio_codec(void *optctx, const char *opt, const char *arg);
248extern int opt_channel_layout(void *optctx, const char *opt, const char *arg);
249extern int opt_preset(void *optctx, const char *opt, const char *arg);
250extern int opt_audio_filters(void *optctx, const char *opt, const char *arg);
251extern int opt_subtitle_codec(void *optctx, const char *opt, const char *arg);
252extern int opt_video_channel(void *optctx, const char *opt, const char *arg);
253extern int opt_video_standard(void *optctx, const char *opt, const char *arg);
254extern int opt_sdp_file(void *optctx, const char *opt, const char *arg);
255#if CONFIG_VAAPI
256extern int opt_vaapi_device(void *optctx, const char *opt, const char *arg);
257#endif
258#if CONFIG_QSV
259extern int opt_qsv_device(void *optctx, const char *opt, const char *arg);
260#endif
261extern int opt_data_codec(void *optctx, const char *opt, const char *arg);
262extern int opt_init_hw_device(void *optctx, const char *opt, const char *arg);
263extern int opt_filter_hw_device(void *optctx, const char *opt, const char *arg);
264extern int opt_filter_threads(void *optctx, const char *opt, const char *arg);
265extern __thread int input_sync;
266extern __thread int file_overwrite;
267extern __thread int no_file_overwrite;
268extern __thread int do_psnr;
269extern __thread int ignore_unknown_streams;
270extern __thread int copy_unknown_streams;
271extern __thread int recast_media;
272
273#if HAVE_TERMIOS_H
274
275/* init terminal so that we can grab keys */
276__thread struct termios oldtty;
277__thread int restore_tty;
278#endif
279
280#if HAVE_THREADS
281static void free_input_threads(void);
282#endif
283
284extern volatile int handleSIGQUIT;
285extern volatile int handleSIGINT;
286extern volatile int handleSIGTERM;
287extern volatile int handleSIGXCPU;
288extern volatile int handleSIGPIPE;
289
290extern __thread long globalSessionId;
291extern void cancelSession(long sessionId);
292extern int cancelRequested(long sessionId);
293
294/* sub2video hack:
295 Convert subtitles to video with alpha to insert them in filter graphs.
296 This is a temporary solution until libavfilter gets real subtitles support.
297 */
298
300{
301 int ret;
302 AVFrame *frame = ist->sub2video.frame;
303
304 av_frame_unref(frame);
305 ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
306 ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
307 ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
308 if ((ret = av_frame_get_buffer(frame, 0)) < 0)
309 return ret;
310 memset(frame->data[0], 0, frame->height * frame->linesize[0]);
311 return 0;
312}
313
314static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
315 AVSubtitleRect *r)
316{
317 uint32_t *pal, *dst2;
318 uint8_t *src, *src2;
319 int x, y;
320
321 if (r->type != SUBTITLE_BITMAP) {
322 av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
323 return;
324 }
325 if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
326 av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
327 r->x, r->y, r->w, r->h, w, h
328 );
329 return;
330 }
331
332 dst += r->y * dst_linesize + r->x * 4;
333 src = r->data[0];
334 pal = (uint32_t *)r->data[1];
335 for (y = 0; y < r->h; y++) {
336 dst2 = (uint32_t *)dst;
337 src2 = src;
338 for (x = 0; x < r->w; x++)
339 *(dst2++) = pal[*(src2++)];
340 dst += dst_linesize;
341 src += r->linesize[0];
342 }
343}
344
345static void sub2video_push_ref(InputStream *ist, int64_t pts)
346{
347 AVFrame *frame = ist->sub2video.frame;
348 int i;
349 int ret;
350
351 av_assert1(frame->data[0]);
352 ist->sub2video.last_pts = frame->pts = pts;
353 for (i = 0; i < ist->nb_filters; i++) {
354 ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
355 AV_BUFFERSRC_FLAG_KEEP_REF |
356 AV_BUFFERSRC_FLAG_PUSH);
357 if (ret != AVERROR_EOF && ret < 0)
358 av_log(NULL, AV_LOG_WARNING, "Error while add the frame to buffer source(%s).\n",
359 av_err2str(ret));
360 }
361}
362
363void sub2video_update(InputStream *ist, int64_t heartbeat_pts, AVSubtitle *sub)
364{
365 AVFrame *frame = ist->sub2video.frame;
366 int8_t *dst;
367 int dst_linesize;
368 int num_rects, i;
369 int64_t pts, end_pts;
370
371 if (!frame)
372 return;
373 if (sub) {
374 pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
375 AV_TIME_BASE_Q, ist->st->time_base);
376 end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
377 AV_TIME_BASE_Q, ist->st->time_base);
378 num_rects = sub->num_rects;
379 } else {
380 /* If we are initializing the system, utilize current heartbeat
381 PTS as the start time, and show until the following subpicture
382 is received. Otherwise, utilize the previous subpicture's end time
383 as the fall-back value. */
384 pts = ist->sub2video.initialize ?
385 heartbeat_pts : ist->sub2video.end_pts;
386 end_pts = INT64_MAX;
387 num_rects = 0;
388 }
389 if (sub2video_get_blank_frame(ist) < 0) {
390 av_log(ist->dec_ctx, AV_LOG_ERROR,
391 "Impossible to get a blank canvas.\n");
392 return;
393 }
394 dst = frame->data [0];
395 dst_linesize = frame->linesize[0];
396 for (i = 0; i < num_rects; i++)
397 sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
398 sub2video_push_ref(ist, pts);
399 ist->sub2video.end_pts = end_pts;
400 ist->sub2video.initialize = 0;
401}
402
403static void sub2video_heartbeat(InputStream *ist, int64_t pts)
404{
405 InputFile *infile = input_files[ist->file_index];
406 int i, j, nb_reqs;
407 int64_t pts2;
408
409 /* When a frame is read from a file, examine all sub2video streams in
410 the same file and send the sub2video frame again. Otherwise, decoded
411 video frames could be accumulating in the filter graph while a filter
412 (possibly overlay) is desperately waiting for a subtitle frame. */
413 for (i = 0; i < infile->nb_streams; i++) {
414 InputStream *ist2 = input_streams[infile->ist_index + i];
415 if (!ist2->sub2video.frame)
416 continue;
417 /* subtitles seem to be usually muxed ahead of other streams;
418 if not, subtracting a larger time here is necessary */
419 pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
420 /* do not send the heartbeat frame if the subtitle is already ahead */
421 if (pts2 <= ist2->sub2video.last_pts)
422 continue;
423 if (pts2 >= ist2->sub2video.end_pts || ist2->sub2video.initialize)
424 /* if we have hit the end of the current displayed subpicture,
425 or if we need to initialize the system, update the
426 overlayed subpicture and its start/end times */
427 sub2video_update(ist2, pts2 + 1, NULL);
428 for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
429 nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
430 if (nb_reqs)
431 sub2video_push_ref(ist2, pts2);
432 }
433}
434
436{
437 int i;
438 int ret;
439
440 if (ist->sub2video.end_pts < INT64_MAX)
441 sub2video_update(ist, INT64_MAX, NULL);
442 for (i = 0; i < ist->nb_filters; i++) {
443 ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
444 if (ret != AVERROR_EOF && ret < 0)
445 av_log(NULL, AV_LOG_WARNING, "Flush the frame error.\n");
446 }
447}
448
449/* end of sub2video hack */
450
451static void term_exit_sigsafe(void)
452{
453#if HAVE_TERMIOS_H
454 if(restore_tty)
455 tcsetattr (0, TCSANOW, &oldtty);
456#endif
457}
458
459void term_exit(void)
460{
461 av_log(NULL, AV_LOG_QUIET, "%s", "");
463}
464
465static volatile int received_sigterm = 0;
466static volatile int received_nb_signals = 0;
467__thread atomic_int transcode_init_done = ATOMIC_VAR_INIT(0);
468__thread int ffmpeg_exited = 0;
469__thread int main_ffmpeg_return_code = 0;
470__thread int64_t copy_ts_first_pts = AV_NOPTS_VALUE;
471extern __thread int longjmp_value;
472
473static void
475{
476 received_sigterm = sig;
479}
480
481#if HAVE_SETCONSOLECTRLHANDLER
482static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
483{
484 av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
485
486 switch (fdwCtrlType)
487 {
488 case CTRL_C_EVENT:
489 case CTRL_BREAK_EVENT:
490 sigterm_handler(SIGINT);
491 return TRUE;
492
493 case CTRL_CLOSE_EVENT:
494 case CTRL_LOGOFF_EVENT:
495 case CTRL_SHUTDOWN_EVENT:
496 sigterm_handler(SIGTERM);
497 /* Basically, with these 3 events, when we return from this method the
498 process is hard terminated, so stall as long as we need to
499 to try and let the main thread(s) clean up and gracefully terminate
500 (we have at most 5 seconds, but should be done far before that). */
501 while (!ffmpeg_exited) {
502 Sleep(0);
503 }
504 return TRUE;
505
506 default:
507 av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
508 return FALSE;
509 }
510}
511#endif
512
513#ifdef __linux__
514#define SIGNAL(sig, func) \
515 do { \
516 action.sa_handler = func; \
517 sigaction(sig, &action, NULL); \
518 } while (0)
519#else
520#define SIGNAL(sig, func) \
521 signal(sig, func)
522#endif
523
524void term_init(void)
525{
526#if defined __linux__
527 #if defined __aarch64__ || defined __amd64__ || defined __x86_64__
528 struct sigaction action = {0};
529 #else
530 struct sigaction action = {{0}};
531 #endif
532
533 action.sa_handler = sigterm_handler;
534
535 /* block other interrupts while processing this one */
536 sigfillset(&action.sa_mask);
537
538 /* restart interruptible functions (i.e. don't fail with EINTR) */
539 action.sa_flags = SA_RESTART;
540#endif
541
542#if HAVE_TERMIOS_H
543 if (stdin_interaction) {
544 struct termios tty;
545 if (tcgetattr (0, &tty) == 0) {
546 oldtty = tty;
547 restore_tty = 1;
548
549 tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
550 |INLCR|IGNCR|ICRNL|IXON);
551 tty.c_oflag |= OPOST;
552 tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
553 tty.c_cflag &= ~(CSIZE|PARENB);
554 tty.c_cflag |= CS8;
555 tty.c_cc[VMIN] = 1;
556 tty.c_cc[VTIME] = 0;
557
558 tcsetattr (0, TCSANOW, &tty);
559 }
560 if (handleSIGQUIT == 1) {
561 SIGNAL(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
562 }
563 }
564#endif
565
566 if (handleSIGINT == 1) {
567 SIGNAL(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
568 }
569 if (handleSIGTERM == 1) {
570 SIGNAL(SIGTERM, sigterm_handler); /* Termination (ANSI). */
571 }
572#ifdef SIGXCPU
573 if (handleSIGXCPU == 1) {
574 SIGNAL(SIGXCPU, sigterm_handler);
575 }
576#endif
577#ifdef SIGPIPE
578 if (handleSIGPIPE == 1) {
579 SIGNAL(SIGPIPE, SIG_IGN); /* Broken pipe (POSIX). */
580 }
581#endif
582#if HAVE_SETCONSOLECTRLHANDLER
583 SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
584#endif
585}
586
587/* read a key without blocking */
588static int read_key(void)
589{
590 unsigned char ch;
591#if HAVE_TERMIOS_H
592 int n = 1;
593 struct timeval tv;
594 fd_set rfds;
595
596 FD_ZERO(&rfds);
597 FD_SET(0, &rfds);
598 tv.tv_sec = 0;
599 tv.tv_usec = 0;
600 n = select(1, &rfds, NULL, NULL, &tv);
601 if (n > 0) {
602 n = read(0, &ch, 1);
603 if (n == 1)
604 return ch;
605
606 return n;
607 }
608#elif HAVE_KBHIT
609# if HAVE_PEEKNAMEDPIPE
610 static int is_pipe;
611 static HANDLE input_handle;
612 DWORD dw, nchars;
613 if(!input_handle){
614 input_handle = GetStdHandle(STD_INPUT_HANDLE);
615 is_pipe = !GetConsoleMode(input_handle, &dw);
616 }
617
618 if (is_pipe) {
619 /* When running under a GUI, you will end here. */
620 if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
621 // input pipe may have been closed by the program that ran ffmpeg
622 return -1;
623 }
624 //Read it
625 if(nchars != 0) {
626 read(0, &ch, 1);
627 return ch;
628 }else{
629 return -1;
630 }
631 }
632# endif
633 if(kbhit())
634 return(getch());
635#endif
636 return -1;
637}
638
639int decode_interrupt_cb(void *ctx);
640
642{
643 return received_nb_signals > atomic_load(&transcode_init_done);
644}
645
646__thread const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
647
648static void ffmpeg_cleanup(int ret)
649{
650 int i, j;
651
652 if (do_benchmark) {
653 int maxrss = getmaxrss() / 1024;
654 av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
655 }
656
657 for (i = 0; i < nb_filtergraphs; i++) {
658 FilterGraph *fg = filtergraphs[i];
659 avfilter_graph_free(&fg->graph);
660 for (j = 0; j < fg->nb_inputs; j++) {
661 InputFilter *ifilter = fg->inputs[j];
662 struct InputStream *ist = ifilter->ist;
663
664 if (ifilter->frame_queue) {
665 AVFrame *frame;
666 while (av_fifo_read(ifilter->frame_queue, &frame, 1) >= 0)
667 av_frame_free(&frame);
668 av_fifo_freep2(&ifilter->frame_queue);
669 }
670 av_freep(&ifilter->displaymatrix);
671 if (ist->sub2video.sub_queue) {
672 AVSubtitle sub;
673 while (av_fifo_read(ist->sub2video.sub_queue, &sub, 1) >= 0)
674 avsubtitle_free(&sub);
675 av_fifo_freep2(&ist->sub2video.sub_queue);
676 }
677 av_buffer_unref(&ifilter->hw_frames_ctx);
678 av_freep(&ifilter->name);
679 av_freep(&fg->inputs[j]);
680 }
681 av_freep(&fg->inputs);
682 for (j = 0; j < fg->nb_outputs; j++) {
683 OutputFilter *ofilter = fg->outputs[j];
684
685 avfilter_inout_free(&ofilter->out_tmp);
686 av_freep(&ofilter->name);
687 av_channel_layout_uninit(&ofilter->ch_layout);
688 av_freep(&fg->outputs[j]);
689 }
690 av_freep(&fg->outputs);
691 av_freep(&fg->graph_desc);
692
693 av_freep(&filtergraphs[i]);
694 }
695 av_freep(&filtergraphs);
696
697 av_freep(&subtitle_out);
698
699 /* close files */
700 for (i = 0; i < nb_output_files; i++)
702
703 for (i = 0; i < nb_output_streams; i++) {
705
706 if (!ost)
707 continue;
708
709 av_bsf_free(&ost->bsf_ctx);
710
711 av_frame_free(&ost->filtered_frame);
712 av_frame_free(&ost->last_frame);
713 av_packet_free(&ost->pkt);
714 av_dict_free(&ost->encoder_opts);
715
716 av_freep(&ost->forced_keyframes);
717 av_expr_free(ost->forced_keyframes_pexpr);
718 av_freep(&ost->avfilter);
719 av_freep(&ost->logfile_prefix);
720
721 av_freep(&ost->audio_channels_map);
722 ost->audio_channels_mapped = 0;
723
724 av_dict_free(&ost->sws_dict);
725 av_dict_free(&ost->swr_opts);
726
727 avcodec_free_context(&ost->enc_ctx);
728 avcodec_parameters_free(&ost->ref_par);
729
730 if (ost->muxing_queue) {
731 AVPacket *pkt;
732 while (av_fifo_read(ost->muxing_queue, &pkt, 1) >= 0)
733 av_packet_free(&pkt);
734 av_fifo_freep2(&ost->muxing_queue);
735 }
736
737 av_freep(&output_streams[i]);
738 }
739#if HAVE_THREADS
740 free_input_threads();
741#endif
742 for (i = 0; i < nb_input_files; i++) {
743 avformat_close_input(&input_files[i]->ctx);
744 av_packet_free(&input_files[i]->pkt);
745 av_freep(&input_files[i]);
746 }
747 for (i = 0; i < nb_input_streams; i++) {
748 InputStream *ist = input_streams[i];
749
750 av_frame_free(&ist->decoded_frame);
751 av_packet_free(&ist->pkt);
752 av_dict_free(&ist->decoder_opts);
753 avsubtitle_free(&ist->prev_sub.subtitle);
754 av_frame_free(&ist->sub2video.frame);
755 av_freep(&ist->filters);
756 av_freep(&ist->hwaccel_device);
757 av_freep(&ist->dts_buffer);
758
759 avcodec_free_context(&ist->dec_ctx);
760
761 av_freep(&input_streams[i]);
762 }
763
764 if (vstats_file) {
765 if (fclose(vstats_file))
766 av_log(NULL, AV_LOG_ERROR,
767 "Error closing vstats file, loss of information possible: %s\n",
768 av_err2str(AVERROR(errno)));
769 }
770 av_freep(&vstats_filename);
771 av_freep(&filter_nbthreads);
772
773 av_freep(&input_streams);
774 av_freep(&input_files);
775 av_freep(&output_streams);
776 av_freep(&output_files);
777
778 uninit_opts();
779
780 avformat_network_deinit();
781
782 if (received_sigterm) {
783 av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
784 (int) received_sigterm);
785 } else if (cancelRequested(globalSessionId)) {
786 av_log(NULL, AV_LOG_INFO, "Exiting normally, received cancel request.\n");
787 } else if (ret && atomic_load(&transcode_init_done)) {
788 av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
789 }
790 term_exit();
791 ffmpeg_exited = 1;
792}
793
794void remove_avoptions(AVDictionary **a, AVDictionary *b)
795{
796 const AVDictionaryEntry *t = NULL;
797
798 while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
799 av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
800 }
801}
802
803void assert_avoptions(AVDictionary *m)
804{
805 const AVDictionaryEntry *t;
806 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
807 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
808 exit_program(1);
809 }
810}
811
812static void abort_codec_experimental(const AVCodec *c, int encoder)
813{
814 exit_program(1);
815}
816
817static void update_benchmark(const char *fmt, ...)
818{
819 if (do_benchmark_all) {
821 va_list va;
822 char buf[1024];
823
824 if (fmt) {
825 va_start(va, fmt);
826 vsnprintf(buf, sizeof(buf), fmt, va);
827 va_end(va);
828 av_log(NULL, AV_LOG_INFO,
829 "bench: %8" PRIu64 " user %8" PRIu64 " sys %8" PRIu64 " real %s \n",
833 }
834 current_time = t;
835 }
836}
837
839{
841 AVRational time_base = ost->stream_copy ? ost->mux_timebase : ost->enc_ctx->time_base;
842
844 if (of->shortest) {
845 int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, time_base, AV_TIME_BASE_Q);
846 of->recording_time = FFMIN(of->recording_time, end);
847 }
848}
849
850/*
851 * Send a single packet to the output, applying any bitstream filters
852 * associated with the output stream. This may result in any number
853 * of packets actually being written, depending on what bitstream
854 * filters are applied. The supplied packet is consumed and will be
855 * blank (as if newly-allocated) when this function returns.
856 *
857 * If eof is set, instead indicate EOF to all bitstream filters and
858 * therefore flush any delayed packets to the output. A blank packet
859 * must be supplied in this case.
860 */
861static void output_packet(OutputFile *of, AVPacket *pkt,
862 OutputStream *ost, int eof)
863{
864 int ret = 0;
865
866 /* apply the output bitstream filters */
867 if (ost->bsf_ctx) {
868 ret = av_bsf_send_packet(ost->bsf_ctx, eof ? NULL : pkt);
869 if (ret < 0)
870 goto finish;
871 while ((ret = av_bsf_receive_packet(ost->bsf_ctx, pkt)) >= 0)
872 of_write_packet(of, pkt, ost, 0);
873 if (ret == AVERROR(EAGAIN))
874 ret = 0;
875 } else if (!eof)
876 of_write_packet(of, pkt, ost, 0);
877
878finish:
879 if (ret < 0 && ret != AVERROR_EOF) {
880 av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
881 "packet for stream #%d:%d.\n", ost->file_index, ost->index);
882 if(exit_on_error)
883 exit_program(1);
884 }
885}
886
888{
890
891 if (of->recording_time != INT64_MAX &&
892 av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
893 AV_TIME_BASE_Q) >= 0) {
895 return 0;
896 }
897 return 1;
898}
899
901 AVFrame *frame)
902{
903 double float_pts = AV_NOPTS_VALUE; // this is identical to frame.pts but with higher precision
904 AVCodecContext *enc = ost->enc_ctx;
905 if (!frame || frame->pts == AV_NOPTS_VALUE ||
906 !enc || !ost->filter || !ost->filter->graph->graph)
907 goto early_exit;
908
909 {
910 AVFilterContext *filter = ost->filter->filter;
911
912 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
913 AVRational filter_tb = av_buffersink_get_time_base(filter);
914 AVRational tb = enc->time_base;
915 int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
916
917 tb.den <<= extra_bits;
918 float_pts =
919 av_rescale_q(frame->pts, filter_tb, tb) -
920 av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
921 float_pts /= 1 << extra_bits;
922 // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
923 float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
924
925 frame->pts =
926 av_rescale_q(frame->pts, filter_tb, enc->time_base) -
927 av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
928 }
929
930early_exit:
931
932 if (debug_ts) {
933 av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
934 frame ? av_ts2str(frame->pts) : "NULL",
935 frame ? av_ts2timestr(frame->pts, &enc->time_base) : "NULL",
936 float_pts,
937 enc ? enc->time_base.num : -1,
938 enc ? enc->time_base.den : -1);
939 }
940
941 return float_pts;
942}
943
944static int init_output_stream(OutputStream *ost, AVFrame *frame,
945 char *error, int error_len);
946
947static int init_output_stream_wrapper(OutputStream *ost, AVFrame *frame,
948 unsigned int fatal)
949{
950 int ret = AVERROR_BUG;
951 char error[1024] = {0};
952
953 if (ost->initialized)
954 return 0;
955
956 ret = init_output_stream(ost, frame, error, sizeof(error));
957 if (ret < 0) {
958 av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
959 ost->file_index, ost->index, error);
960
961 if (fatal)
962 exit_program(1);
963 }
964
965 return ret;
966}
967
968static double psnr(double d)
969{
970 return -10.0 * log10(d);
971}
972
973static void update_video_stats(OutputStream *ost, const AVPacket *pkt, int write_vstats)
974{
975 const uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
976 NULL);
977 AVCodecContext *enc = ost->enc_ctx;
978 int64_t frame_number;
979 double ti1, bitrate, avg_bitrate;
980
981 ost->quality = sd ? AV_RL32(sd) : -1;
982 ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
983
984 for (int i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
985 if (sd && i < sd[5])
986 ost->error[i] = AV_RL64(sd + 8 + 8*i);
987 else
988 ost->error[i] = -1;
989 }
990
991 if (!write_vstats)
992 return;
993
994 /* this is executed just the first time update_video_stats is called */
995 if (!vstats_file) {
996 vstats_file = fopen(vstats_filename, "w");
997 if (!vstats_file) {
998 perror("fopen");
999 exit_program(1);
1000 }
1001 }
1002
1003 frame_number = ost->packets_encoded;
1004 if (vstats_version <= 1) {
1005 fprintf(vstats_file, "frame= %5"PRId64" q= %2.1f ", frame_number,
1006 ost->quality / (float)FF_QP2LAMBDA);
1007 } else {
1008 fprintf(vstats_file, "out= %2d st= %2d frame= %5"PRId64" q= %2.1f ", ost->file_index, ost->index, frame_number,
1009 ost->quality / (float)FF_QP2LAMBDA);
1010 }
1011
1012 if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1013 fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1014
1015 fprintf(vstats_file,"f_size= %6d ", pkt->size);
1016 /* compute pts value */
1017 ti1 = pkt->dts * av_q2d(ost->mux_timebase);
1018 if (ti1 < 0.01)
1019 ti1 = 0.01;
1020
1021 bitrate = (pkt->size * 8) / av_q2d(enc->time_base) / 1000.0;
1022 avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1023 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1024 (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1025 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1026}
1027
1028static int encode_frame(OutputFile *of, OutputStream *ost, AVFrame *frame)
1029{
1030 AVCodecContext *enc = ost->enc_ctx;
1031 AVPacket *pkt = ost->pkt;
1032 const char *type_desc = av_get_media_type_string(enc->codec_type);
1033 const char *action = frame ? "encode" : "flush";
1034 int ret;
1035
1036 if (frame) {
1037 ost->frames_encoded++;
1038
1039 if (debug_ts) {
1040 av_log(NULL, AV_LOG_INFO, "encoder <- type:%s "
1041 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1042 type_desc,
1043 av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
1044 enc->time_base.num, enc->time_base.den);
1045 }
1046 }
1047
1048 update_benchmark(NULL);
1049
1050 ret = avcodec_send_frame(enc, frame);
1051 if (ret < 0 && !(ret == AVERROR_EOF && !frame)) {
1052 av_log(NULL, AV_LOG_ERROR, "Error submitting %s frame to the encoder\n",
1053 type_desc);
1054 return ret;
1055 }
1056
1057 while (1) {
1058 ret = avcodec_receive_packet(enc, pkt);
1059 update_benchmark("%s_%s %d.%d", action, type_desc,
1060 ost->file_index, ost->index);
1061
1062 /* if two pass, output log on success and EOF */
1063 if ((ret >= 0 || ret == AVERROR_EOF) && ost->logfile && enc->stats_out)
1064 fprintf(ost->logfile, "%s", enc->stats_out);
1065
1066 if (ret == AVERROR(EAGAIN)) {
1067 av_assert0(frame); // should never happen during flushing
1068 return 0;
1069 } else if (ret == AVERROR_EOF) {
1070 output_packet(of, pkt, ost, 1);
1071 return ret;
1072 } else if (ret < 0) {
1073 av_log(NULL, AV_LOG_ERROR, "%s encoding failed\n", type_desc);
1074 return ret;
1075 }
1076
1077 if (debug_ts) {
1078 av_log(NULL, AV_LOG_INFO, "encoder -> type:%s "
1079 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s "
1080 "duration:%s duration_time:%s\n",
1081 type_desc,
1082 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &enc->time_base),
1083 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &enc->time_base),
1084 av_ts2str(pkt->duration), av_ts2timestr(pkt->duration, &enc->time_base));
1085 }
1086
1087 av_packet_rescale_ts(pkt, enc->time_base, ost->mux_timebase);
1088
1089 if (debug_ts) {
1090 av_log(NULL, AV_LOG_INFO, "encoder -> type:%s "
1091 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s "
1092 "duration:%s duration_time:%s\n",
1093 type_desc,
1094 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &enc->time_base),
1095 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &enc->time_base),
1096 av_ts2str(pkt->duration), av_ts2timestr(pkt->duration, &enc->time_base));
1097 }
1098
1099 if (enc->codec_type == AVMEDIA_TYPE_VIDEO)
1101
1102 ost->packets_encoded++;
1103
1104 output_packet(of, pkt, ost, 0);
1105 }
1106
1107 av_assert0(0);
1108}
1109
1111 AVFrame *frame)
1112{
1113 int ret;
1114
1115 adjust_frame_pts_to_encoder_tb(of, ost, frame);
1116
1117 if (!check_recording_time(ost))
1118 return;
1119
1120 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
1121 frame->pts = ost->sync_opts;
1122 ost->sync_opts = frame->pts + frame->nb_samples;
1123 ost->samples_encoded += frame->nb_samples;
1124
1125 ret = encode_frame(of, ost, frame);
1126 if (ret < 0)
1127 exit_program(1);
1128}
1129
1131 OutputStream *ost,
1132 AVSubtitle *sub)
1133{
1134 int subtitle_out_max_size = 1024 * 1024;
1135 int subtitle_out_size, nb, i;
1136 AVCodecContext *enc;
1137 AVPacket *pkt = ost->pkt;
1138 int64_t pts;
1139
1140 if (sub->pts == AV_NOPTS_VALUE) {
1141 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
1142 if (exit_on_error)
1143 exit_program(1);
1144 return;
1145 }
1146
1147 enc = ost->enc_ctx;
1148
1149 if (!subtitle_out) {
1150 subtitle_out = av_malloc(subtitle_out_max_size);
1151 if (!subtitle_out) {
1152 av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
1153 exit_program(1);
1154 }
1155 }
1156
1157 /* Note: DVB subtitle need one packet to draw them and one other
1158 packet to clear them */
1159 /* XXX: signal it in the codec context ? */
1160 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
1161 nb = 2;
1162 else
1163 nb = 1;
1164
1165 /* shift timestamp to honor -ss and make check_recording_time() work with -t */
1166 pts = sub->pts;
1167 if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
1169 for (i = 0; i < nb; i++) {
1170 unsigned save_num_rects = sub->num_rects;
1171
1172 ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
1173 if (!check_recording_time(ost))
1174 return;
1175
1176 sub->pts = pts;
1177 // start_display_time is required to be 0
1178 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
1179 sub->end_display_time -= sub->start_display_time;
1180 sub->start_display_time = 0;
1181 if (i == 1)
1182 sub->num_rects = 0;
1183
1184 ost->frames_encoded++;
1185
1186 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1187 subtitle_out_max_size, sub);
1188 if (i == 1)
1189 sub->num_rects = save_num_rects;
1190 if (subtitle_out_size < 0) {
1191 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1192 exit_program(1);
1193 }
1194
1195 av_packet_unref(pkt);
1196 pkt->data = subtitle_out;
1197 pkt->size = subtitle_out_size;
1198 pkt->pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase);
1199 pkt->duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1200 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
1201 /* XXX: the pts correction is handled here. Maybe handling
1202 it in the codec would be better */
1203 if (i == 0)
1204 pkt->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1205 else
1206 pkt->pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1207 }
1208 pkt->dts = pkt->pts;
1209 output_packet(of, pkt, ost, 0);
1210 }
1211}
1212
1213/* May modify/reset next_picture */
1214static void do_video_out(OutputFile *of,
1215 OutputStream *ost,
1216 AVFrame *next_picture)
1217{
1218 int ret;
1219 AVCodecContext *enc = ost->enc_ctx;
1220 AVRational frame_rate;
1221 int64_t nb_frames, nb0_frames, i;
1222 double delta, delta0;
1223 double duration = 0;
1224 double sync_ipts = AV_NOPTS_VALUE;
1225 InputStream *ist = NULL;
1226 AVFilterContext *filter = ost->filter->filter;
1227
1228 init_output_stream_wrapper(ost, next_picture, 1);
1229 sync_ipts = adjust_frame_pts_to_encoder_tb(of, ost, next_picture);
1230
1231 if (ost->source_index >= 0)
1232 ist = input_streams[ost->source_index];
1233
1234 frame_rate = av_buffersink_get_frame_rate(filter);
1235 if (frame_rate.num > 0 && frame_rate.den > 0)
1236 duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1237
1238 if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1239 duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1240
1241 if (!ost->filters_script &&
1242 !ost->filters &&
1243 (nb_filtergraphs == 0 || !filtergraphs[0]->graph_desc) &&
1244 next_picture &&
1245 ist &&
1246 lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1247 duration = lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1248 }
1249
1250 if (!next_picture) {
1251 //end, flushing
1252 nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1253 ost->last_nb0_frames[1],
1254 ost->last_nb0_frames[2]);
1255 } else {
1256 delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1257 delta = delta0 + duration;
1258
1259 /* by default, we output a single frame */
1260 nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1261 nb_frames = 1;
1262
1263 if (delta0 < 0 &&
1264 delta > 0 &&
1266 ost->vsync_method != VSYNC_DROP) {
1267 if (delta0 < -0.6) {
1268 av_log(NULL, AV_LOG_VERBOSE, "Past duration %f too large\n", -delta0);
1269 } else
1270 av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1271 sync_ipts = ost->sync_opts;
1272 duration += delta0;
1273 delta0 = 0;
1274 }
1275
1276 switch (ost->vsync_method) {
1277 case VSYNC_VSCFR:
1278 if (ost->frame_number == 0 && delta0 >= 0.5) {
1279 av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1280 delta = duration;
1281 delta0 = 0;
1282 ost->sync_opts = llrint(sync_ipts);
1283 }
1284 case VSYNC_CFR:
1285 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1286 if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1287 nb_frames = 0;
1288 } else if (delta < -1.1)
1289 nb_frames = 0;
1290 else if (delta > 1.1) {
1291 nb_frames = llrintf(delta);
1292 if (delta0 > 1.1)
1293 nb0_frames = llrintf(delta0 - 0.6);
1294 }
1295 break;
1296 case VSYNC_VFR:
1297 if (delta <= -0.6)
1298 nb_frames = 0;
1299 else if (delta > 0.6)
1300 ost->sync_opts = llrint(sync_ipts);
1301 break;
1302 case VSYNC_DROP:
1303 case VSYNC_PASSTHROUGH:
1304 ost->sync_opts = llrint(sync_ipts);
1305 break;
1306 default:
1307 av_assert0(0);
1308 }
1309 }
1310
1311 /*
1312 * For video, number of frames in == number of packets out.
1313 * But there may be reordering, so we can't throw away frames on encoder
1314 * flush, we need to limit them here, before they go into encoder.
1315 */
1316 nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1317 nb0_frames = FFMIN(nb0_frames, nb_frames);
1318
1319 memmove(ost->last_nb0_frames + 1,
1320 ost->last_nb0_frames,
1321 sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1322 ost->last_nb0_frames[0] = nb0_frames;
1323
1324 if (nb0_frames == 0 && ost->last_dropped) {
1326 av_log(NULL, AV_LOG_VERBOSE,
1327 "*** dropping frame %"PRId64" from stream %d at ts %"PRId64"\n",
1328 ost->frame_number, ost->st->index, ost->last_frame->pts);
1329 }
1330 if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1331 if (nb_frames > dts_error_threshold * 30) {
1332 av_log(NULL, AV_LOG_ERROR, "%"PRId64" frame duplication too large, skipping\n", nb_frames - 1);
1334 return;
1335 }
1336 nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1337 av_log(NULL, AV_LOG_VERBOSE, "*** %"PRId64" dup!\n", nb_frames - 1);
1338 if (nb_frames_dup > dup_warning) {
1339 av_log(NULL, AV_LOG_WARNING, "More than %"PRIu64" frames duplicated\n", dup_warning);
1340 dup_warning *= 10;
1341 }
1342 }
1343 ost->last_dropped = nb_frames == nb0_frames && next_picture;
1344 ost->dropped_keyframe = ost->last_dropped && next_picture && next_picture->key_frame;
1345
1346 /* duplicates frame if needed */
1347 for (i = 0; i < nb_frames; i++) {
1348 AVFrame *in_picture;
1349 int forced_keyframe = 0;
1350 double pts_time;
1351
1352 if (i < nb0_frames && ost->last_frame->buf[0]) {
1353 in_picture = ost->last_frame;
1354 } else
1355 in_picture = next_picture;
1356
1357 if (!in_picture)
1358 return;
1359
1360 in_picture->pts = ost->sync_opts;
1361
1362 if (!check_recording_time(ost))
1363 return;
1364
1365 in_picture->quality = enc->global_quality;
1366 in_picture->pict_type = 0;
1367
1368 if (ost->forced_kf_ref_pts == AV_NOPTS_VALUE &&
1369 in_picture->pts != AV_NOPTS_VALUE)
1370 ost->forced_kf_ref_pts = in_picture->pts;
1371
1372 pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1373 (in_picture->pts - ost->forced_kf_ref_pts) * av_q2d(enc->time_base) : NAN;
1374 if (ost->forced_kf_index < ost->forced_kf_count &&
1375 in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1376 ost->forced_kf_index++;
1377 forced_keyframe = 1;
1378 } else if (ost->forced_keyframes_pexpr) {
1379 double res;
1381 res = av_expr_eval(ost->forced_keyframes_pexpr,
1383 ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1389 res);
1390 if (res) {
1391 forced_keyframe = 1;
1397 }
1398
1400 } else if ( ost->forced_keyframes
1401 && !strncmp(ost->forced_keyframes, "source", 6)
1402 && in_picture->key_frame==1
1403 && !i) {
1404 forced_keyframe = 1;
1405 } else if ( ost->forced_keyframes
1406 && !strncmp(ost->forced_keyframes, "source_no_drop", 14)
1407 && !i) {
1408 forced_keyframe = (in_picture->key_frame == 1) || ost->dropped_keyframe;
1409 ost->dropped_keyframe = 0;
1410 }
1411
1412 if (forced_keyframe) {
1413 in_picture->pict_type = AV_PICTURE_TYPE_I;
1414 av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1415 }
1416
1417 ret = encode_frame(of, ost, in_picture);
1418 if (ret < 0)
1419 exit_program(1);
1420
1421 ost->sync_opts++;
1422 ost->frame_number++;
1423 }
1424
1425 av_frame_unref(ost->last_frame);
1426 if (next_picture)
1427 av_frame_move_ref(ost->last_frame, next_picture);
1428}
1429
1431{
1433 AVRational time_base = ost->stream_copy ? ost->mux_timebase : ost->enc_ctx->time_base;
1434
1436
1437 if (of->shortest) {
1438 int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, time_base, AV_TIME_BASE_Q);
1439 of->recording_time = FFMIN(of->recording_time, end);
1440 }
1441}
1442
1449static int reap_filters(int flush)
1450{
1451 AVFrame *filtered_frame = NULL;
1452 int i;
1453
1454 /* Reap all buffers present in the buffer sinks */
1455 for (i = 0; i < nb_output_streams; i++) {
1456 OutputStream *ost = output_streams[i];
1458 AVFilterContext *filter;
1459 AVCodecContext *enc = ost->enc_ctx;
1460 int ret = 0;
1461
1462 if (!ost->filter || !ost->filter->graph->graph)
1463 continue;
1464 filter = ost->filter->filter;
1465
1466 /*
1467 * Unlike video, with audio the audio frame size matters.
1468 * Currently we are fully reliant on the lavfi filter chain to
1469 * do the buffering deed for us, and thus the frame size parameter
1470 * needs to be set accordingly. Where does one get the required
1471 * frame size? From the initialized AVCodecContext of an audio
1472 * encoder. Thus, if we have gotten to an audio stream, initialize
1473 * the encoder earlier than receiving the first AVFrame.
1474 */
1475 if (av_buffersink_get_type(filter) == AVMEDIA_TYPE_AUDIO)
1476 init_output_stream_wrapper(ost, NULL, 1);
1477
1478 filtered_frame = ost->filtered_frame;
1479
1480 while (1) {
1481 ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1482 AV_BUFFERSINK_FLAG_NO_REQUEST);
1483 if (ret < 0) {
1484 if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1485 av_log(NULL, AV_LOG_WARNING,
1486 "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1487 } else if (flush && ret == AVERROR_EOF) {
1488 if (av_buffersink_get_type(filter) == AVMEDIA_TYPE_VIDEO)
1489 do_video_out(of, ost, NULL);
1490 }
1491 break;
1492 }
1493 if (ost->finished) {
1494 av_frame_unref(filtered_frame);
1495 continue;
1496 }
1497
1498 switch (av_buffersink_get_type(filter)) {
1499 case AVMEDIA_TYPE_VIDEO:
1500 if (!ost->frame_aspect_ratio.num)
1501 enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1502
1503 do_video_out(of, ost, filtered_frame);
1504 break;
1505 case AVMEDIA_TYPE_AUDIO:
1506 if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1507 enc->ch_layout.nb_channels != filtered_frame->ch_layout.nb_channels) {
1508 av_log(NULL, AV_LOG_ERROR,
1509 "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1510 break;
1511 }
1512 do_audio_out(of, ost, filtered_frame);
1513 break;
1514 default:
1515 // TODO support subtitle filters
1516 av_assert0(0);
1517 }
1518
1519 av_frame_unref(filtered_frame);
1520 }
1521 }
1522
1523 return 0;
1524}
1525
1526static void print_final_stats(int64_t total_size)
1527{
1528 uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1529 uint64_t subtitle_size = 0;
1530 uint64_t data_size = 0;
1531 float percent = -1.0;
1532 int i, j;
1533 int pass1_used = 1;
1534
1535 for (i = 0; i < nb_output_streams; i++) {
1536 OutputStream *ost = output_streams[i];
1537 switch (ost->enc_ctx->codec_type) {
1538 case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1539 case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1540 case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1541 default: other_size += ost->data_size; break;
1542 }
1543 extra_size += ost->enc_ctx->extradata_size;
1544 data_size += ost->data_size;
1545 if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | AV_CODEC_FLAG_PASS2))
1546 != AV_CODEC_FLAG_PASS1)
1547 pass1_used = 0;
1548 }
1549
1550 if (data_size && total_size>0 && total_size >= data_size)
1551 percent = 100.0 * (total_size - data_size) / data_size;
1552
1553 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1554 video_size / 1024.0,
1555 audio_size / 1024.0,
1556 subtitle_size / 1024.0,
1557 other_size / 1024.0,
1558 extra_size / 1024.0);
1559 if (percent >= 0.0)
1560 av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1561 else
1562 av_log(NULL, AV_LOG_INFO, "unknown");
1563 av_log(NULL, AV_LOG_INFO, "\n");
1564
1565 /* print verbose per-stream stats */
1566 for (i = 0; i < nb_input_files; i++) {
1567 InputFile *f = input_files[i];
1568 uint64_t total_packets = 0, total_size = 0;
1569
1570 av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1571 i, f->ctx->url);
1572
1573 for (j = 0; j < f->nb_streams; j++) {
1574 InputStream *ist = input_streams[f->ist_index + j];
1575 enum AVMediaType type = ist->dec_ctx->codec_type;
1576
1577 total_size += ist->data_size;
1578 total_packets += ist->nb_packets;
1579
1580 av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1581 i, j, av_get_media_type_string(type));
1582 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1583 ist->nb_packets, ist->data_size);
1584
1585 if (ist->decoding_needed) {
1586 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1587 ist->frames_decoded);
1588 if (type == AVMEDIA_TYPE_AUDIO)
1589 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1590 av_log(NULL, AV_LOG_VERBOSE, "; ");
1591 }
1592
1593 av_log(NULL, AV_LOG_VERBOSE, "\n");
1594 }
1595
1596 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1597 total_packets, total_size);
1598 }
1599
1600 for (i = 0; i < nb_output_files; i++) {
1601 OutputFile *of = output_files[i];
1602 uint64_t total_packets = 0, total_size = 0;
1603
1604 av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1605 i, of->ctx->url);
1606
1607 for (j = 0; j < of->ctx->nb_streams; j++) {
1608 OutputStream *ost = output_streams[of->ost_index + j];
1609 enum AVMediaType type = ost->enc_ctx->codec_type;
1610
1611 total_size += ost->data_size;
1612 total_packets += ost->packets_written;
1613
1614 av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1615 i, j, av_get_media_type_string(type));
1616 if (ost->encoding_needed) {
1617 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1618 ost->frames_encoded);
1619 if (type == AVMEDIA_TYPE_AUDIO)
1620 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1621 av_log(NULL, AV_LOG_VERBOSE, "; ");
1622 }
1623
1624 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1625 ost->packets_written, ost->data_size);
1626
1627 av_log(NULL, AV_LOG_VERBOSE, "\n");
1628 }
1629
1630 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1631 total_packets, total_size);
1632 }
1633 if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1634 av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1635 if (pass1_used) {
1636 av_log(NULL, AV_LOG_WARNING, "\n");
1637 } else {
1638 av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1639 }
1640 }
1641}
1642
1643static void forward_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1644{
1645 AVFormatContext *oc = NULL;
1646 AVCodecContext *enc = NULL;
1647 OutputStream *ost = NULL;
1648 int64_t pts = INT64_MIN + 1;
1649 int vid, i;
1650
1651 int64_t frame_number = 0;
1652 float fps = 0;
1653 float quality = 0;
1654 int64_t total_size = 0;
1655 int seconds = 0;
1656 double bitrate = 0.0;
1657 double speed = 0.0;
1658
1659 float t = (cur_time-timer_start) / 1000000.0;
1660
1661 oc = output_files[0]->ctx;
1662
1663 // 1. calculate size
1664 total_size = avio_size(oc->pb);
1665 if (total_size <= 0) {
1666 total_size = avio_tell(oc->pb);
1667 }
1668
1669 vid = 0;
1670 for (i = 0; i < nb_output_streams; i++) {
1671 ost = output_streams[i];
1672 enc = ost->enc_ctx;
1673
1674 if (!ost->stream_copy) {
1675
1676 // 2. extract quality
1677 quality = ost->quality / (float) FF_QP2LAMBDA;
1678 }
1679
1680 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1681
1682 // 3. extract frame number
1683 frame_number = ost->frame_number;
1684
1685 // 4. calculate fps
1686 fps = t > 1 ? frame_number / t : 0;
1687 }
1688
1689 // 5. calculate time
1690 if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE)
1691 pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1692 ost->st->time_base, AV_TIME_BASE_Q));
1693
1694 vid = 1;
1695 }
1696
1697 // 6. calculate time, with microseconds to milliseconds conversion
1698 seconds = FFABS(pts) / 1000;
1699
1700 // 7. calculating kbit/s value
1701 bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1702
1703 // 9. calculate processing speed = processed stream duration/operation duration
1704 speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1705
1706 // FORWARD DATA
1707 if (report_callback != NULL) {
1708 report_callback(frame_number, fps, quality, total_size, seconds, bitrate, speed);
1709 }
1710}
1711
1712static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1713{
1714 AVBPrint buf, buf_script;
1715 OutputStream *ost;
1716 AVFormatContext *oc;
1717 int64_t total_size;
1718 AVCodecContext *enc;
1719 int vid, i;
1720 double bitrate;
1721 double speed;
1722 int64_t pts = INT64_MIN + 1;
1723 int hours, mins, secs, us;
1724 const char *hours_sign;
1725 int ret;
1726 float t;
1727
1728 if (!is_last_report) {
1729 if (last_time == -1) {
1730 last_time = cur_time;
1731 }
1732 if (((cur_time - last_time) < stats_period && !first_report) ||
1734 return;
1735 last_time = cur_time;
1736 }
1737
1738 forward_report(is_last_report, timer_start, cur_time);
1739
1740 if (!print_stats && !is_last_report && !progress_avio)
1741 return;
1742
1743 t = (cur_time-timer_start) / 1000000.0;
1744
1745
1746 oc = output_files[0]->ctx;
1747
1748 total_size = avio_size(oc->pb);
1749 if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1750 total_size = avio_tell(oc->pb);
1751
1752 vid = 0;
1753 av_bprint_init(&buf, 0, AV_BPRINT_SIZE_AUTOMATIC);
1754 av_bprint_init(&buf_script, 0, AV_BPRINT_SIZE_AUTOMATIC);
1755 for (i = 0; i < nb_output_streams; i++) {
1756 float q = -1;
1757 ost = output_streams[i];
1758 enc = ost->enc_ctx;
1759 if (!ost->stream_copy)
1760 q = ost->quality / (float) FF_QP2LAMBDA;
1761
1762 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1763 av_bprintf(&buf, "q=%2.1f ", q);
1764 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1765 ost->file_index, ost->index, q);
1766 }
1767 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1768 float fps;
1769 int64_t frame_number = ost->frame_number;
1770
1771 fps = t > 1 ? frame_number / t : 0;
1772 av_bprintf(&buf, "frame=%5"PRId64" fps=%3.*f q=%3.1f ",
1773 frame_number, fps < 9.95, fps, q);
1774 av_bprintf(&buf_script, "frame=%"PRId64"\n", frame_number);
1775 av_bprintf(&buf_script, "fps=%.2f\n", fps);
1776 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1777 ost->file_index, ost->index, q);
1778 if (is_last_report)
1779 av_bprintf(&buf, "L");
1780 if (qp_hist) {
1781 int j;
1782 int qp = lrintf(q);
1783 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1784 qp_histogram[qp]++;
1785 for (j = 0; j < 32; j++)
1786 av_bprintf(&buf, "%X", av_log2(qp_histogram[j] + 1));
1787 }
1788
1789 if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1790 int j;
1791 double error, error_sum = 0;
1792 double scale, scale_sum = 0;
1793 double p;
1794 char type[3] = { 'Y','U','V' };
1795 av_bprintf(&buf, "PSNR=");
1796 for (j = 0; j < 3; j++) {
1797 if (is_last_report) {
1798 error = enc->error[j];
1799 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1800 } else {
1801 error = ost->error[j];
1802 scale = enc->width * enc->height * 255.0 * 255.0;
1803 }
1804 if (j)
1805 scale /= 4;
1806 error_sum += error;
1807 scale_sum += scale;
1808 p = psnr(error / scale);
1809 av_bprintf(&buf, "%c:%2.2f ", type[j], p);
1810 av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1811 ost->file_index, ost->index, type[j] | 32, p);
1812 }
1813 p = psnr(error_sum / scale_sum);
1814 av_bprintf(&buf, "*:%2.2f ", psnr(error_sum / scale_sum));
1815 av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1816 ost->file_index, ost->index, p);
1817 }
1818 vid = 1;
1819 }
1820 /* compute min output value */
1821 if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE) {
1822 pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1823 ost->st->time_base, AV_TIME_BASE_Q));
1824 if (copy_ts) {
1825 if (copy_ts_first_pts == AV_NOPTS_VALUE && pts > 1)
1827 if (copy_ts_first_pts != AV_NOPTS_VALUE)
1829 }
1830 }
1831
1832 if (is_last_report)
1834 }
1835
1836 secs = FFABS(pts) / AV_TIME_BASE;
1837 us = FFABS(pts) % AV_TIME_BASE;
1838 mins = secs / 60;
1839 secs %= 60;
1840 hours = mins / 60;
1841 mins %= 60;
1842 hours_sign = (pts < 0) ? "-" : "";
1843
1844 bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1845 speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1846
1847 if (total_size < 0) av_bprintf(&buf, "size=N/A time=");
1848 else av_bprintf(&buf, "size=%8.0fkB time=", total_size / 1024.0);
1849 if (pts == AV_NOPTS_VALUE) {
1850 av_bprintf(&buf, "N/A ");
1851 } else {
1852 av_bprintf(&buf, "%s%02d:%02d:%02d.%02d ",
1853 hours_sign, hours, mins, secs, (100 * us) / AV_TIME_BASE);
1854 }
1855
1856 if (bitrate < 0) {
1857 av_bprintf(&buf, "bitrate=N/A");
1858 av_bprintf(&buf_script, "bitrate=N/A\n");
1859 }else{
1860 av_bprintf(&buf, "bitrate=%6.1fkbits/s", bitrate);
1861 av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1862 }
1863
1864 if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1865 else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1866 if (pts == AV_NOPTS_VALUE) {
1867 av_bprintf(&buf_script, "out_time_us=N/A\n");
1868 av_bprintf(&buf_script, "out_time_ms=N/A\n");
1869 av_bprintf(&buf_script, "out_time=N/A\n");
1870 } else {
1871 av_bprintf(&buf_script, "out_time_us=%"PRId64"\n", pts);
1872 av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1873 av_bprintf(&buf_script, "out_time=%s%02d:%02d:%02d.%06d\n",
1874 hours_sign, hours, mins, secs, us);
1875 }
1876
1878 av_bprintf(&buf, " dup=%"PRId64" drop=%"PRId64, nb_frames_dup, nb_frames_drop);
1879 av_bprintf(&buf_script, "dup_frames=%"PRId64"\n", nb_frames_dup);
1880 av_bprintf(&buf_script, "drop_frames=%"PRId64"\n", nb_frames_drop);
1881
1882 if (speed < 0) {
1883 av_bprintf(&buf, " speed=N/A");
1884 av_bprintf(&buf_script, "speed=N/A\n");
1885 } else {
1886 av_bprintf(&buf, " speed=%4.3gx", speed);
1887 av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1888 }
1889
1890 if (print_stats || is_last_report) {
1891 const char end = is_last_report ? '\n' : '\r';
1892 if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1893 av_log(NULL, AV_LOG_STDERR, "%s %c", buf.str, end);
1894 } else
1895 av_log(NULL, AV_LOG_INFO, "%s %c", buf.str, end);
1896 }
1897 av_bprint_finalize(&buf, NULL);
1898
1899 if (progress_avio) {
1900 av_bprintf(&buf_script, "progress=%s\n",
1901 is_last_report ? "end" : "continue");
1902 avio_write(progress_avio, buf_script.str,
1903 FFMIN(buf_script.len, buf_script.size - 1));
1904 avio_flush(progress_avio);
1905 av_bprint_finalize(&buf_script, NULL);
1906 if (is_last_report) {
1907 if ((ret = avio_closep(&progress_avio)) < 0)
1908 av_log(NULL, AV_LOG_ERROR,
1909 "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1910 }
1911 }
1912
1913 first_report = 0;
1914
1915 if (is_last_report)
1916 print_final_stats(total_size);
1917}
1918
1919static int ifilter_parameters_from_codecpar(InputFilter *ifilter, AVCodecParameters *par)
1920{
1921 int ret;
1922
1923 // We never got any input. Set a fake format, which will
1924 // come from libavformat.
1925 ifilter->format = par->format;
1926 ifilter->sample_rate = par->sample_rate;
1927 ifilter->width = par->width;
1928 ifilter->height = par->height;
1929 ifilter->sample_aspect_ratio = par->sample_aspect_ratio;
1930 ret = av_channel_layout_copy(&ifilter->ch_layout, &par->ch_layout);
1931 if (ret < 0)
1932 return ret;
1933
1934 return 0;
1935}
1936
1937static void flush_encoders(void)
1938{
1939 int i, ret;
1940
1941 for (i = 0; i < nb_output_streams; i++) {
1942 OutputStream *ost = output_streams[i];
1943 AVCodecContext *enc = ost->enc_ctx;
1945
1946 if (!ost->encoding_needed)
1947 continue;
1948
1949 // Try to enable encoding with no input frames.
1950 // Maybe we should just let encoding fail instead.
1951 if (!ost->initialized) {
1952 FilterGraph *fg = ost->filter->graph;
1953
1954 av_log(NULL, AV_LOG_WARNING,
1955 "Finishing stream %d:%d without any data written to it.\n",
1956 ost->file_index, ost->st->index);
1957
1958 if (ost->filter && !fg->graph) {
1959 int x;
1960 for (x = 0; x < fg->nb_inputs; x++) {
1961 InputFilter *ifilter = fg->inputs[x];
1962 if (ifilter->format < 0 &&
1963 ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar) < 0) {
1964 av_log(NULL, AV_LOG_ERROR, "Error copying paramerets from input stream\n");
1965 exit_program(1);
1966 }
1967 }
1968
1970 continue;
1971
1973 if (ret < 0) {
1974 av_log(NULL, AV_LOG_ERROR, "Error configuring filter graph\n");
1975 exit_program(1);
1976 }
1977
1979 }
1980
1981 init_output_stream_wrapper(ost, NULL, 1);
1982 }
1983
1984 if (enc->codec_type != AVMEDIA_TYPE_VIDEO && enc->codec_type != AVMEDIA_TYPE_AUDIO)
1985 continue;
1986
1987 ret = encode_frame(of, ost, NULL);
1988 if (ret != AVERROR_EOF)
1989 exit_program(1);
1990 }
1991}
1992
1993/*
1994 * Check whether a packet from ist should be written into ost at this time
1995 */
1997{
1999 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
2000
2001 if (ost->source_index != ist_index)
2002 return 0;
2003
2004 if (ost->finished & MUXER_FINISHED)
2005 return 0;
2006
2007 if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
2008 return 0;
2009
2010 return 1;
2011}
2012
2013static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
2014{
2016 InputFile *f = input_files [ist->file_index];
2017 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
2018 int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
2019 AVPacket *opkt = ost->pkt;
2020
2021 av_packet_unref(opkt);
2022 // EOF: flush output bitstream filters.
2023 if (!pkt) {
2024 output_packet(of, opkt, ost, 1);
2025 return;
2026 }
2027
2028 if (!ost->streamcopy_started && !(pkt->flags & AV_PKT_FLAG_KEY) &&
2030 return;
2031
2032 if (!ost->streamcopy_started && !ost->copy_prior_start) {
2033 int64_t comp_start = start_time;
2034 if (copy_ts && f->start_time != AV_NOPTS_VALUE)
2035 comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
2036 if (pkt->pts == AV_NOPTS_VALUE ?
2037 ist->pts < comp_start :
2038 pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
2039 return;
2040 }
2041
2042 if (of->recording_time != INT64_MAX &&
2043 ist->pts >= of->recording_time + start_time) {
2045 return;
2046 }
2047
2048 if (f->recording_time != INT64_MAX) {
2049 start_time = 0;
2050 if (copy_ts) {
2051 start_time += f->start_time != AV_NOPTS_VALUE ? f->start_time : 0;
2052 start_time += start_at_zero ? 0 : f->ctx->start_time;
2053 }
2054 if (ist->pts >= f->recording_time + start_time) {
2056 return;
2057 }
2058 }
2059
2060 if (av_packet_ref(opkt, pkt) < 0)
2061 exit_program(1);
2062
2063 if (pkt->pts != AV_NOPTS_VALUE)
2064 opkt->pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
2065
2066 if (pkt->dts == AV_NOPTS_VALUE) {
2067 opkt->dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase);
2068 } else if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
2069 int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
2070 if(!duration)
2071 duration = ist->dec_ctx->frame_size;
2072 opkt->dts = av_rescale_delta(ist->st->time_base, pkt->dts,
2073 (AVRational){1, ist->dec_ctx->sample_rate}, duration,
2075 /* dts will be set immediately afterwards to what pts is now */
2076 opkt->pts = opkt->dts - ost_tb_start_time;
2077 } else
2078 opkt->dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
2079 opkt->dts -= ost_tb_start_time;
2080
2081 opkt->duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
2082
2083 ost->sync_opts += opkt->duration;
2084
2085 output_packet(of, opkt, ost, 0);
2086
2087 ost->streamcopy_started = 1;
2088}
2089
2091{
2092 AVCodecContext *dec = ist->dec_ctx;
2093
2094 if (dec->ch_layout.order == AV_CHANNEL_ORDER_UNSPEC) {
2095 char layout_name[256];
2096
2097 if (dec->ch_layout.nb_channels > ist->guess_layout_max)
2098 return 0;
2099 av_channel_layout_default(&dec->ch_layout, dec->ch_layout.nb_channels);
2100 if (dec->ch_layout.order == AV_CHANNEL_ORDER_UNSPEC)
2101 return 0;
2102 av_channel_layout_describe(&dec->ch_layout, layout_name, sizeof(layout_name));
2103 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2104 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2105 }
2106 return 1;
2107}
2108
2109static void check_decode_result(InputStream *ist, int *got_output, int ret)
2110{
2111 if (*got_output || ret<0)
2112 decode_error_stat[ret<0] ++;
2113
2114 if (ret < 0 && exit_on_error)
2115 exit_program(1);
2116
2117 if (*got_output && ist) {
2118 if (ist->decoded_frame->decode_error_flags || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) {
2119 av_log(NULL, exit_on_error ? AV_LOG_FATAL : AV_LOG_WARNING,
2120 "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->url, ist->st->index);
2121 if (exit_on_error)
2122 exit_program(1);
2123 }
2124 }
2125}
2126
2127// Filters can be configured only if the formats of all inputs are known.
2129{
2130 int i;
2131 for (i = 0; i < fg->nb_inputs; i++) {
2132 if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
2133 fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
2134 return 0;
2135 }
2136 return 1;
2137}
2138
2139static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame, int keep_reference)
2140{
2141 FilterGraph *fg = ifilter->graph;
2142 AVFrameSideData *sd;
2143 int need_reinit, ret;
2144 int buffersrc_flags = AV_BUFFERSRC_FLAG_PUSH;
2145
2146 if (keep_reference)
2147 buffersrc_flags |= AV_BUFFERSRC_FLAG_KEEP_REF;
2148
2149 /* determine if the parameters for this input changed */
2150 need_reinit = ifilter->format != frame->format;
2151
2152 switch (ifilter->ist->st->codecpar->codec_type) {
2153 case AVMEDIA_TYPE_AUDIO:
2154 need_reinit |= ifilter->sample_rate != frame->sample_rate ||
2155 av_channel_layout_compare(&ifilter->ch_layout, &frame->ch_layout);
2156 break;
2157 case AVMEDIA_TYPE_VIDEO:
2158 need_reinit |= ifilter->width != frame->width ||
2159 ifilter->height != frame->height;
2160 break;
2161 }
2162
2163 if (!ifilter->ist->reinit_filters && fg->graph)
2164 need_reinit = 0;
2165
2166 if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
2167 (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2168 need_reinit = 1;
2169
2170 if ((sd = av_frame_get_side_data(frame, AV_FRAME_DATA_DISPLAYMATRIX))) {
2171 if (!ifilter->displaymatrix || memcmp(sd->data, ifilter->displaymatrix, sizeof(int32_t) * 9))
2172 need_reinit = 1;
2173 } else if (ifilter->displaymatrix)
2174 need_reinit = 1;
2175
2176 if (need_reinit) {
2177 ret = ifilter_parameters_from_frame(ifilter, frame);
2178 if (ret < 0)
2179 return ret;
2180 }
2181
2182 /* (re)init the graph if possible, otherwise buffer the frame and return */
2183 if (need_reinit || !fg->graph) {
2185 AVFrame *tmp = av_frame_clone(frame);
2186 if (!tmp)
2187 return AVERROR(ENOMEM);
2188
2189 ret = av_fifo_write(ifilter->frame_queue, &tmp, 1);
2190 if (ret < 0)
2191 av_frame_free(&tmp);
2192
2193 return ret;
2194 }
2195
2196 ret = reap_filters(1);
2197 if (ret < 0 && ret != AVERROR_EOF) {
2198 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2199 return ret;
2200 }
2201
2203 if (ret < 0) {
2204 av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
2205 return ret;
2206 }
2207 }
2208
2209 ret = av_buffersrc_add_frame_flags(ifilter->filter, frame, buffersrc_flags);
2210 if (ret < 0) {
2211 if (ret != AVERROR_EOF)
2212 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2213 return ret;
2214 }
2215
2216 return 0;
2217}
2218
2219static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
2220{
2221 int ret = 0;
2222
2223 ifilter->eof = 1;
2224
2225 if (ifilter->filter) {
2226
2227 /* THIS VALIDATION IS REQUIRED TO COMPLETE CANCELLATION */
2229 ret = av_buffersrc_close(ifilter->filter, pts, AV_BUFFERSRC_FLAG_PUSH);
2230 }
2231 if (ret < 0)
2232 return ret;
2233 } else {
2234 // the filtergraph was never configured
2235 if (ifilter->format < 0) {
2236 ret = ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
2237 if (ret < 0)
2238 return ret;
2239 }
2240 if (ifilter->format < 0 && (ifilter->type == AVMEDIA_TYPE_AUDIO || ifilter->type == AVMEDIA_TYPE_VIDEO)) {
2241 av_log(NULL, AV_LOG_ERROR, "Cannot determine format of input stream %d:%d after EOF\n", ifilter->ist->file_index, ifilter->ist->st->index);
2242 return AVERROR_INVALIDDATA;
2243 }
2244 }
2245
2246 return 0;
2247}
2248
2249// This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2250// There is the following difference: if you got a frame, you must call
2251// it again with pkt=NULL. pkt==NULL is treated differently from pkt->size==0
2252// (pkt==NULL means get more output, pkt->size==0 is a flush/drain packet)
2253static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2254{
2255 int ret;
2256
2257 *got_frame = 0;
2258
2259 if (pkt) {
2260 ret = avcodec_send_packet(avctx, pkt);
2261 // In particular, we don't expect AVERROR(EAGAIN), because we read all
2262 // decoded frames with avcodec_receive_frame() until done.
2263 if (ret < 0 && ret != AVERROR_EOF)
2264 return ret;
2265 }
2266
2267 ret = avcodec_receive_frame(avctx, frame);
2268 if (ret < 0 && ret != AVERROR(EAGAIN))
2269 return ret;
2270 if (ret >= 0)
2271 *got_frame = 1;
2272
2273 return 0;
2274}
2275
2277{
2278 int i, ret;
2279
2280 av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2281 for (i = 0; i < ist->nb_filters; i++) {
2282 ret = ifilter_send_frame(ist->filters[i], decoded_frame, i < ist->nb_filters - 1);
2283 if (ret == AVERROR_EOF)
2284 ret = 0; /* ignore */
2285 if (ret < 0) {
2286 av_log(NULL, AV_LOG_ERROR,
2287 "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2288 break;
2289 }
2290 }
2291 return ret;
2292}
2293
2294static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output,
2295 int *decode_failed)
2296{
2297 AVFrame *decoded_frame = ist->decoded_frame;
2298 AVCodecContext *avctx = ist->dec_ctx;
2299 int ret, err = 0;
2300 AVRational decoded_frame_tb;
2301
2302 update_benchmark(NULL);
2304 update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2305 if (ret < 0)
2306 *decode_failed = 1;
2307
2308 if (ret >= 0 && avctx->sample_rate <= 0) {
2309 av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2310 ret = AVERROR_INVALIDDATA;
2311 }
2312
2313 if (ret != AVERROR_EOF)
2315
2316 if (!*got_output || ret < 0)
2317 return ret;
2318
2319 ist->samples_decoded += decoded_frame->nb_samples;
2320 ist->frames_decoded++;
2321
2322 /* increment next_dts to use for the case where the input stream does not
2323 have timestamps or there are multiple frames in the packet */
2324 ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2325 avctx->sample_rate;
2326 ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2327 avctx->sample_rate;
2328
2329 if (decoded_frame->pts != AV_NOPTS_VALUE) {
2330 decoded_frame_tb = ist->st->time_base;
2331 } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2332 decoded_frame->pts = pkt->pts;
2333 decoded_frame_tb = ist->st->time_base;
2334 }else {
2335 decoded_frame->pts = ist->dts;
2336 decoded_frame_tb = AV_TIME_BASE_Q;
2337 }
2338 if (pkt && pkt->duration && ist->prev_pkt_pts != AV_NOPTS_VALUE &&
2339 pkt->pts != AV_NOPTS_VALUE && pkt->pts - ist->prev_pkt_pts > pkt->duration)
2340 ist->filter_in_rescale_delta_last = AV_NOPTS_VALUE;
2341 if (pkt)
2342 ist->prev_pkt_pts = pkt->pts;
2343 if (decoded_frame->pts != AV_NOPTS_VALUE)
2344 decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2345 (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2346 (AVRational){1, avctx->sample_rate});
2347 ist->nb_samples = decoded_frame->nb_samples;
2349
2350 av_frame_unref(decoded_frame);
2351 return err < 0 ? err : ret;
2352}
2353
2354static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof,
2355 int *decode_failed)
2356{
2357 AVFrame *decoded_frame = ist->decoded_frame;
2358 int i, ret = 0, err = 0;
2359 int64_t best_effort_timestamp;
2360 int64_t dts = AV_NOPTS_VALUE;
2361
2362 // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2363 // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2364 // skip the packet.
2365 if (!eof && pkt && pkt->size == 0)
2366 return 0;
2367
2368 if (ist->dts != AV_NOPTS_VALUE)
2369 dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2370 if (pkt) {
2371 pkt->dts = dts; // ffmpeg.c probably shouldn't do this
2372 }
2373
2374 // The old code used to set dts on the drain packet, which does not work
2375 // with the new API anymore.
2376 if (eof) {
2377 void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2378 if (!new)
2379 return AVERROR(ENOMEM);
2380 ist->dts_buffer = new;
2381 ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2382 }
2383
2384 update_benchmark(NULL);
2386 update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2387 if (ret < 0)
2388 *decode_failed = 1;
2389
2390 // The following line may be required in some cases where there is no parser
2391 // or the parser does not has_b_frames correctly
2392 if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2393 if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2394 ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2395 } else
2396 av_log(ist->dec_ctx, AV_LOG_WARNING,
2397 "video_delay is larger in decoder than demuxer %d > %d.\n"
2398 "If you want to help, upload a sample "
2399 "of this file to https://streams.videolan.org/upload/ "
2400 "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
2401 ist->dec_ctx->has_b_frames,
2402 ist->st->codecpar->video_delay);
2403 }
2404
2405 if (ret != AVERROR_EOF)
2407
2408 if (*got_output && ret >= 0) {
2409 if (ist->dec_ctx->width != decoded_frame->width ||
2410 ist->dec_ctx->height != decoded_frame->height ||
2411 ist->dec_ctx->pix_fmt != decoded_frame->format) {
2412 av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2413 decoded_frame->width,
2414 decoded_frame->height,
2415 decoded_frame->format,
2416 ist->dec_ctx->width,
2417 ist->dec_ctx->height,
2418 ist->dec_ctx->pix_fmt);
2419 }
2420 }
2421
2422 if (!*got_output || ret < 0)
2423 return ret;
2424
2425 if(ist->top_field_first>=0)
2426 decoded_frame->top_field_first = ist->top_field_first;
2427
2428 ist->frames_decoded++;
2429
2430 if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2432 if (err < 0)
2433 goto fail;
2434 }
2436
2437 best_effort_timestamp= decoded_frame->best_effort_timestamp;
2438 *duration_pts = decoded_frame->pkt_duration;
2439
2440 if (ist->framerate.num)
2441 best_effort_timestamp = ist->cfr_next_pts++;
2442
2443 if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2444 best_effort_timestamp = ist->dts_buffer[0];
2445
2446 for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2447 ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2448 ist->nb_dts_buffer--;
2449 }
2450
2451 if(best_effort_timestamp != AV_NOPTS_VALUE) {
2452 int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2453
2454 if (ts != AV_NOPTS_VALUE)
2455 ist->next_pts = ist->pts = ts;
2456 }
2457
2458 if (debug_ts) {
2459 av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2460 "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2461 ist->st->index, av_ts2str(decoded_frame->pts),
2462 av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2463 best_effort_timestamp,
2464 av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2465 decoded_frame->key_frame, decoded_frame->pict_type,
2466 ist->st->time_base.num, ist->st->time_base.den);
2467 }
2468
2469 if (ist->st->sample_aspect_ratio.num)
2470 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2471
2473
2474fail:
2475 av_frame_unref(decoded_frame);
2476 return err < 0 ? err : ret;
2477}
2478
2479static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output,
2480 int *decode_failed)
2481{
2482 AVSubtitle subtitle;
2483 int free_sub = 1;
2484 int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2486
2488
2489 if (ret < 0 || !*got_output) {
2490 *decode_failed = 1;
2491 if (!pkt->size)
2492 sub2video_flush(ist);
2493 return ret;
2494 }
2495
2496 if (ist->fix_sub_duration) {
2497 int end = 1;
2498 if (ist->prev_sub.got_output) {
2499 end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2500 1000, AV_TIME_BASE);
2501 if (end < ist->prev_sub.subtitle.end_display_time) {
2502 av_log(ist->dec_ctx, AV_LOG_DEBUG,
2503 "Subtitle duration reduced from %"PRId32" to %d%s\n",
2504 ist->prev_sub.subtitle.end_display_time, end,
2505 end <= 0 ? ", dropping it" : "");
2506 ist->prev_sub.subtitle.end_display_time = end;
2507 }
2508 }
2509 FFSWAP(int, *got_output, ist->prev_sub.got_output);
2510 FFSWAP(int, ret, ist->prev_sub.ret);
2511 FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2512 if (end <= 0)
2513 goto out;
2514 }
2515
2516 if (!*got_output)
2517 return ret;
2518
2519 if (ist->sub2video.frame) {
2520 sub2video_update(ist, INT64_MIN, &subtitle);
2521 } else if (ist->nb_filters) {
2522 if (!ist->sub2video.sub_queue)
2523 ist->sub2video.sub_queue = av_fifo_alloc2(8, sizeof(AVSubtitle), AV_FIFO_FLAG_AUTO_GROW);
2524 if (!ist->sub2video.sub_queue)
2525 exit_program(1);
2526
2527 ret = av_fifo_write(ist->sub2video.sub_queue, &subtitle, 1);
2528 if (ret < 0)
2529 exit_program(1);
2530 free_sub = 0;
2531 }
2532
2533 if (!subtitle.num_rects)
2534 goto out;
2535
2536 ist->frames_decoded++;
2537
2538 for (i = 0; i < nb_output_streams; i++) {
2539 OutputStream *ost = output_streams[i];
2540
2541 if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2542 || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2543 continue;
2544
2546 }
2547
2548out:
2549 if (free_sub)
2550 avsubtitle_free(&subtitle);
2551 return ret;
2552}
2553
2555{
2556 int i, ret;
2557 /* TODO keep pts also in stream time base to avoid converting back */
2558 int64_t pts = av_rescale_q_rnd(ist->pts, AV_TIME_BASE_Q, ist->st->time_base,
2559 AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX);
2560
2561 for (i = 0; i < ist->nb_filters; i++) {
2562 ret = ifilter_send_eof(ist->filters[i], pts);
2563 if (ret < 0)
2564 return ret;
2565 }
2566 return 0;
2567}
2568
2569/* pkt = NULL means EOF (needed to flush decoder buffers) */
2570static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2571{
2572 int ret = 0, i;
2573 int repeating = 0;
2574 int eof_reached = 0;
2575
2576 AVPacket *avpkt = ist->pkt;
2577
2578 if (!ist->saw_first_ts) {
2579 ist->first_dts =
2580 ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2581 ist->pts = 0;
2582 if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2583 ist->first_dts =
2584 ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2585 ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2586 }
2587 ist->saw_first_ts = 1;
2588 }
2589
2590 if (ist->next_dts == AV_NOPTS_VALUE)
2591 ist->next_dts = ist->dts;
2592 if (ist->next_pts == AV_NOPTS_VALUE)
2593 ist->next_pts = ist->pts;
2594
2595 if (pkt) {
2596 av_packet_unref(avpkt);
2597 ret = av_packet_ref(avpkt, pkt);
2598 if (ret < 0)
2599 return ret;
2600 }
2601
2602 if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2603 ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2604 if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2605 ist->next_pts = ist->pts = ist->dts;
2606 }
2607
2608 // while we have more to decode or while the decoder did output something on EOF
2609 while (ist->decoding_needed) {
2610 int64_t duration_dts = 0;
2611 int64_t duration_pts = 0;
2612 int got_output = 0;
2613 int decode_failed = 0;
2614
2615 ist->pts = ist->next_pts;
2616 ist->dts = ist->next_dts;
2617
2618 switch (ist->dec_ctx->codec_type) {
2619 case AVMEDIA_TYPE_AUDIO:
2620 ret = decode_audio (ist, repeating ? NULL : avpkt, &got_output,
2621 &decode_failed);
2622 av_packet_unref(avpkt);
2623 break;
2624 case AVMEDIA_TYPE_VIDEO:
2625 ret = decode_video (ist, repeating ? NULL : avpkt, &got_output, &duration_pts, !pkt,
2626 &decode_failed);
2627 if (!repeating || !pkt || got_output) {
2628 if (pkt && pkt->duration) {
2629 duration_dts = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2630 } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2631 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2632 duration_dts = ((int64_t)AV_TIME_BASE *
2633 ist->dec_ctx->framerate.den * ticks) /
2634 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2635 }
2636
2637 if(ist->dts != AV_NOPTS_VALUE && duration_dts) {
2638 ist->next_dts += duration_dts;
2639 }else
2640 ist->next_dts = AV_NOPTS_VALUE;
2641 }
2642
2643 if (got_output) {
2644 if (duration_pts > 0) {
2645 ist->next_pts += av_rescale_q(duration_pts, ist->st->time_base, AV_TIME_BASE_Q);
2646 } else {
2647 ist->next_pts += duration_dts;
2648 }
2649 }
2650 av_packet_unref(avpkt);
2651 break;
2652 case AVMEDIA_TYPE_SUBTITLE:
2653 if (repeating)
2654 break;
2655 ret = transcode_subtitles(ist, avpkt, &got_output, &decode_failed);
2656 if (!pkt && ret >= 0)
2657 ret = AVERROR_EOF;
2658 av_packet_unref(avpkt);
2659 break;
2660 default:
2661 return -1;
2662 }
2663
2664 if (ret == AVERROR_EOF) {
2665 eof_reached = 1;
2666 break;
2667 }
2668
2669 if (ret < 0) {
2670 if (decode_failed) {
2671 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2672 ist->file_index, ist->st->index, av_err2str(ret));
2673 } else {
2674 av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded "
2675 "data for stream #%d:%d\n", ist->file_index, ist->st->index);
2676 }
2677 if (!decode_failed || exit_on_error)
2678 exit_program(1);
2679 break;
2680 }
2681
2682 if (got_output)
2683 ist->got_output = 1;
2684
2685 if (!got_output)
2686 break;
2687
2688 // During draining, we might get multiple output frames in this loop.
2689 // ffmpeg.c does not drain the filter chain on configuration changes,
2690 // which means if we send multiple frames at once to the filters, and
2691 // one of those frames changes configuration, the buffered frames will
2692 // be lost. This can upset certain FATE tests.
2693 // Decode only 1 frame per call on EOF to appease these FATE tests.
2694 // The ideal solution would be to rewrite decoding to use the new
2695 // decoding API in a better way.
2696 if (!pkt)
2697 break;
2698
2699 repeating = 1;
2700 }
2701
2702 /* after flushing, send an EOF on all the filter inputs attached to the stream */
2703 /* except when looping we need to flush but not to send an EOF */
2704 if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2705 int ret = send_filter_eof(ist);
2706 if (ret < 0) {
2707 av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2708 exit_program(1);
2709 }
2710 }
2711
2712 /* handle stream copy */
2713 if (!ist->decoding_needed && pkt) {
2714 ist->dts = ist->next_dts;
2715 switch (ist->dec_ctx->codec_type) {
2716 case AVMEDIA_TYPE_AUDIO:
2717 av_assert1(pkt->duration >= 0);
2718 if (ist->dec_ctx->sample_rate) {
2719 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2720 ist->dec_ctx->sample_rate;
2721 } else {
2722 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2723 }
2724 break;
2725 case AVMEDIA_TYPE_VIDEO:
2726 if (ist->framerate.num) {
2727 // TODO: Remove work-around for c99-to-c89 issue 7
2728 AVRational time_base_q = AV_TIME_BASE_Q;
2729 int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2730 ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2731 } else if (pkt->duration) {
2732 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2733 } else if(ist->dec_ctx->framerate.num != 0) {
2734 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2735 ist->next_dts += ((int64_t)AV_TIME_BASE *
2736 ist->dec_ctx->framerate.den * ticks) /
2737 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2738 }
2739 break;
2740 }
2741 ist->pts = ist->dts;
2742 ist->next_pts = ist->next_dts;
2743 } else if (!ist->decoding_needed)
2744 eof_reached = 1;
2745
2746 for (i = 0; i < nb_output_streams; i++) {
2747 OutputStream *ost = output_streams[i];
2748
2749 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2750 continue;
2751
2752 do_streamcopy(ist, ost, pkt);
2753 }
2754
2755 return !eof_reached;
2756}
2757
2758static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
2759{
2760 InputStream *ist = s->opaque;
2761 const enum AVPixelFormat *p;
2762 int ret;
2763
2764 for (p = pix_fmts; *p != AV_PIX_FMT_NONE; p++) {
2765 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
2766 const AVCodecHWConfig *config = NULL;
2767 int i;
2768
2769 if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2770 break;
2771
2772 if (ist->hwaccel_id == HWACCEL_GENERIC ||
2773 ist->hwaccel_id == HWACCEL_AUTO) {
2774 for (i = 0;; i++) {
2775 config = avcodec_get_hw_config(s->codec, i);
2776 if (!config)
2777 break;
2778 if (!(config->methods &
2779 AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX))
2780 continue;
2781 if (config->pix_fmt == *p)
2782 break;
2783 }
2784 }
2785 if (config && config->device_type == ist->hwaccel_device_type) {
2787 if (ret < 0) {
2788 if (ist->hwaccel_id == HWACCEL_GENERIC) {
2789 av_log(NULL, AV_LOG_FATAL,
2790 "%s hwaccel requested for input stream #%d:%d, "
2791 "but cannot be initialized.\n",
2792 av_hwdevice_get_type_name(config->device_type),
2793 ist->file_index, ist->st->index);
2794 return AV_PIX_FMT_NONE;
2795 }
2796 continue;
2797 }
2798
2799 ist->hwaccel_pix_fmt = *p;
2800 break;
2801 }
2802 }
2803
2804 return *p;
2805}
2806
2807static int init_input_stream(int ist_index, char *error, int error_len)
2808{
2809 int ret;
2810 InputStream *ist = input_streams[ist_index];
2811
2812 if (ist->decoding_needed) {
2813 const AVCodec *codec = ist->dec;
2814 if (!codec) {
2815 snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2816 avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2817 return AVERROR(EINVAL);
2818 }
2819
2820 ist->dec_ctx->opaque = ist;
2821 ist->dec_ctx->get_format = get_format;
2822#if LIBAVCODEC_VERSION_MAJOR < 60
2823 AV_NOWARN_DEPRECATED({
2824 ist->dec_ctx->thread_safe_callbacks = 1;
2825 })
2826#endif
2827
2828 if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2830 av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2832 av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2833 }
2834
2835 /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2836 * audio, and video decoders such as cuvid or mediacodec */
2837 ist->dec_ctx->pkt_timebase = ist->st->time_base;
2838
2839 if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2840 av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2841 /* Attached pics are sparse, therefore we would not want to delay their decoding till EOF. */
2842 if (ist->st->disposition & AV_DISPOSITION_ATTACHED_PIC)
2843 av_dict_set(&ist->decoder_opts, "threads", "1", 0);
2844
2846 if (ret < 0) {
2847 snprintf(error, error_len, "Device setup failed for "
2848 "decoder on input stream #%d:%d : %s",
2849 ist->file_index, ist->st->index, av_err2str(ret));
2850 return ret;
2851 }
2852
2853 if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2854 if (ret == AVERROR_EXPERIMENTAL)
2855 abort_codec_experimental(codec, 0);
2856
2857 snprintf(error, error_len,
2858 "Error while opening decoder for input stream "
2859 "#%d:%d : %s",
2860 ist->file_index, ist->st->index, av_err2str(ret));
2861 return ret;
2862 }
2864 }
2865
2866 ist->next_pts = AV_NOPTS_VALUE;
2867 ist->next_dts = AV_NOPTS_VALUE;
2868
2869 return 0;
2870}
2871
2873{
2874 if (ost->source_index >= 0)
2875 return input_streams[ost->source_index];
2876 return NULL;
2877}
2878
2879static int compare_int64(const void *a, const void *b)
2880{
2881 return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2882}
2883
2885{
2886 AVBSFContext *ctx = ost->bsf_ctx;
2887 int ret;
2888
2889 if (!ctx)
2890 return 0;
2891
2892 ret = avcodec_parameters_copy(ctx->par_in, ost->st->codecpar);
2893 if (ret < 0)
2894 return ret;
2895
2896 ctx->time_base_in = ost->st->time_base;
2897
2898 ret = av_bsf_init(ctx);
2899 if (ret < 0) {
2900 av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
2901 ctx->filter->name);
2902 return ret;
2903 }
2904
2905 ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
2906 if (ret < 0)
2907 return ret;
2908 ost->st->time_base = ctx->time_base_out;
2909
2910 return 0;
2911}
2912
2914{
2916 InputStream *ist = get_input_stream(ost);
2917 AVCodecParameters *par_dst = ost->st->codecpar;
2918 AVCodecParameters *par_src = ost->ref_par;
2919 AVRational sar;
2920 int i, ret;
2921 uint32_t codec_tag = par_dst->codec_tag;
2922
2923 av_assert0(ist && !ost->filter);
2924
2925 ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
2926 if (ret >= 0)
2927 ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
2928 if (ret < 0) {
2929 av_log(NULL, AV_LOG_FATAL,
2930 "Error setting up codec context options.\n");
2931 return ret;
2932 }
2933
2934 ret = avcodec_parameters_from_context(par_src, ost->enc_ctx);
2935 if (ret < 0) {
2936 av_log(NULL, AV_LOG_FATAL,
2937 "Error getting reference codec parameters.\n");
2938 return ret;
2939 }
2940
2941 if (!codec_tag) {
2942 unsigned int codec_tag_tmp;
2943 if (!of->format->codec_tag ||
2944 av_codec_get_id (of->format->codec_tag, par_src->codec_tag) == par_src->codec_id ||
2945 !av_codec_get_tag2(of->format->codec_tag, par_src->codec_id, &codec_tag_tmp))
2946 codec_tag = par_src->codec_tag;
2947 }
2948
2949 ret = avcodec_parameters_copy(par_dst, par_src);
2950 if (ret < 0)
2951 return ret;
2952
2953 par_dst->codec_tag = codec_tag;
2954
2955 if (!ost->frame_rate.num)
2956 ost->frame_rate = ist->framerate;
2957
2958 if (ost->frame_rate.num)
2959 ost->st->avg_frame_rate = ost->frame_rate;
2960 else
2961 ost->st->avg_frame_rate = ist->st->avg_frame_rate;
2962
2963 ret = avformat_transfer_internal_stream_timing_info(of->format, ost->st, ist->st, copy_tb);
2964 if (ret < 0)
2965 return ret;
2966
2967 // copy timebase while removing common factors
2968 if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0) {
2969 if (ost->frame_rate.num)
2970 ost->st->time_base = av_inv_q(ost->frame_rate);
2971 else
2972 ost->st->time_base = av_add_q(av_stream_get_codec_timebase(ost->st), (AVRational){0, 1});
2973 }
2974
2975 // copy estimated duration as a hint to the muxer
2976 if (ost->st->duration <= 0 && ist->st->duration > 0)
2977 ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
2978
2979 if (ist->st->nb_side_data) {
2980 for (i = 0; i < ist->st->nb_side_data; i++) {
2981 const AVPacketSideData *sd_src = &ist->st->side_data[i];
2982 uint8_t *dst_data;
2983
2984 dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
2985 if (!dst_data)
2986 return AVERROR(ENOMEM);
2987 memcpy(dst_data, sd_src->data, sd_src->size);
2988 }
2989 }
2990
2991 if (ost->rotate_overridden) {
2992 uint8_t *sd = av_stream_new_side_data(ost->st, AV_PKT_DATA_DISPLAYMATRIX,
2993 sizeof(int32_t) * 9);
2994 if (sd)
2995 av_display_rotation_set((int32_t *)sd, -ost->rotate_override_value);
2996 }
2997
2998 switch (par_dst->codec_type) {
2999 case AVMEDIA_TYPE_AUDIO:
3000 if (audio_volume != 256) {
3001 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
3002 exit_program(1);
3003 }
3004 if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
3005 par_dst->block_align= 0;
3006 if(par_dst->codec_id == AV_CODEC_ID_AC3)
3007 par_dst->block_align= 0;
3008 break;
3009 case AVMEDIA_TYPE_VIDEO:
3010 if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3011 sar =
3012 av_mul_q(ost->frame_aspect_ratio,
3013 (AVRational){ par_dst->height, par_dst->width });
3014 av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3015 "with stream copy may produce invalid files\n");
3016 }
3017 else if (ist->st->sample_aspect_ratio.num)
3018 sar = ist->st->sample_aspect_ratio;
3019 else
3020 sar = par_src->sample_aspect_ratio;
3021 ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
3022 ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3023 ost->st->r_frame_rate = ist->st->r_frame_rate;
3024 break;
3025 }
3026
3027 ost->mux_timebase = ist->st->time_base;
3028
3029 return 0;
3030}
3031
3033{
3034 const AVDictionaryEntry *e;
3035
3036 uint8_t *encoder_string;
3037 int encoder_string_len;
3038 int format_flags = 0;
3039 int codec_flags = ost->enc_ctx->flags;
3040
3041 if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
3042 return;
3043
3044 e = av_dict_get(of->opts, "fflags", NULL, 0);
3045 if (e) {
3046 const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3047 if (!o)
3048 return;
3049 av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3050 }
3051 e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3052 if (e) {
3053 const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3054 if (!o)
3055 return;
3056 av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3057 }
3058
3059 encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3060 encoder_string = av_mallocz(encoder_string_len);
3061 if (!encoder_string)
3062 exit_program(1);
3063
3064 if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3065 av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3066 else
3067 av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3068 av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3069 av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3070 AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
3071}
3072
3073static void parse_forced_key_frames(char *kf, OutputStream *ost,
3074 AVCodecContext *avctx)
3075{
3076 char *p;
3077 int n = 1, i, size, index = 0;
3078 int64_t t, *pts;
3079
3080 for (p = kf; *p; p++)
3081 if (*p == ',')
3082 n++;
3083 size = n;
3084 pts = av_malloc_array(size, sizeof(*pts));
3085 if (!pts) {
3086 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3087 exit_program(1);
3088 }
3089
3090 p = kf;
3091 for (i = 0; i < n; i++) {
3092 char *next = strchr(p, ',');
3093
3094 if (next)
3095 *next++ = 0;
3096
3097 if (!memcmp(p, "chapters", 8)) {
3098
3099 AVFormatContext *avf = output_files[ost->file_index]->ctx;
3100 int j;
3101
3102 if (avf->nb_chapters > INT_MAX - size ||
3103 !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3104 sizeof(*pts)))) {
3105 av_log(NULL, AV_LOG_FATAL,
3106 "Could not allocate forced key frames array.\n");
3107 exit_program(1);
3108 }
3109 t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3110 t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3111
3112 for (j = 0; j < avf->nb_chapters; j++) {
3113 AVChapter *c = avf->chapters[j];
3114 av_assert1(index < size);
3115 pts[index++] = av_rescale_q(c->start, c->time_base,
3116 avctx->time_base) + t;
3117 }
3118
3119 } else {
3120
3121 t = parse_time_or_die("force_key_frames", p, 1);
3122 av_assert1(index < size);
3123 pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3124
3125 }
3126
3127 p = next;
3128 }
3129
3130 av_assert0(index == size);
3131 qsort(pts, size, sizeof(*pts), compare_int64);
3132 ost->forced_kf_count = size;
3133 ost->forced_kf_pts = pts;
3134}
3135
3136static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
3137{
3138 InputStream *ist = get_input_stream(ost);
3139 AVCodecContext *enc_ctx = ost->enc_ctx;
3140 AVFormatContext *oc;
3141
3142 if (ost->enc_timebase.num > 0) {
3143 enc_ctx->time_base = ost->enc_timebase;
3144 return;
3145 }
3146
3147 if (ost->enc_timebase.num < 0) {
3148 if (ist) {
3149 enc_ctx->time_base = ist->st->time_base;
3150 return;
3151 }
3152
3153 oc = output_files[ost->file_index]->ctx;
3154 av_log(oc, AV_LOG_WARNING, "Input stream data not available, using default time base\n");
3155 }
3156
3157 enc_ctx->time_base = default_time_base;
3158}
3159
3160static int init_output_stream_encode(OutputStream *ost, AVFrame *frame)
3161{
3162 InputStream *ist = get_input_stream(ost);
3163 AVCodecContext *enc_ctx = ost->enc_ctx;
3164 AVCodecContext *dec_ctx = NULL;
3166 AVFormatContext *oc = of->ctx;
3167 int ret;
3168
3170
3171 if (ist) {
3172 dec_ctx = ist->dec_ctx;
3173 }
3174
3175 if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3176 if (!ost->frame_rate.num)
3177 ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
3178 if (ist && !ost->frame_rate.num && !ost->max_frame_rate.num) {
3179 ost->frame_rate = (AVRational){25, 1};
3180 av_log(NULL, AV_LOG_WARNING,
3181 "No information "
3182 "about the input framerate is available. Falling "
3183 "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3184 "if you want a different framerate.\n",
3185 ost->file_index, ost->index);
3186 }
3187
3188 if (ost->max_frame_rate.num &&
3189 (av_q2d(ost->frame_rate) > av_q2d(ost->max_frame_rate) ||
3190 !ost->frame_rate.den))
3191 ost->frame_rate = ost->max_frame_rate;
3192
3193 if (ost->enc->supported_framerates && !ost->force_fps) {
3194 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3195 ost->frame_rate = ost->enc->supported_framerates[idx];
3196 }
3197 // reduce frame rate for mpeg4 to be within the spec limits
3198 if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3199 av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3200 ost->frame_rate.num, ost->frame_rate.den, 65535);
3201 }
3202 }
3203
3204 switch (enc_ctx->codec_type) {
3205 case AVMEDIA_TYPE_AUDIO:
3206 enc_ctx->sample_fmt = av_buffersink_get_format(ost->filter->filter);
3207 enc_ctx->sample_rate = av_buffersink_get_sample_rate(ost->filter->filter);
3208 ret = av_buffersink_get_ch_layout(ost->filter->filter, &enc_ctx->ch_layout);
3209 if (ret < 0)
3210 return ret;
3211
3212 if (ost->bits_per_raw_sample)
3213 enc_ctx->bits_per_raw_sample = ost->bits_per_raw_sample;
3214 else if (dec_ctx && ost->filter->graph->is_meta)
3215 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3216 av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3217
3218 init_encoder_time_base(ost, av_make_q(1, enc_ctx->sample_rate));
3219 break;
3220
3221 case AVMEDIA_TYPE_VIDEO:
3222 init_encoder_time_base(ost, av_inv_q(ost->frame_rate));
3223
3224 if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3225 enc_ctx->time_base = av_buffersink_get_time_base(ost->filter->filter);
3226 if ( av_q2d(enc_ctx->time_base) < 0.001 && ost->vsync_method != VSYNC_PASSTHROUGH
3227 && (ost->vsync_method == VSYNC_CFR || ost->vsync_method == VSYNC_VSCFR ||
3228 (ost->vsync_method == VSYNC_AUTO && !(of->format->flags & AVFMT_VARIABLE_FPS)))){
3229 av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3230 "Please consider specifying a lower framerate, a different muxer or "
3231 "setting vsync/fps_mode to vfr\n");
3232 }
3233
3234 enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
3235 enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
3236 enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3237 ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3238 av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3239 av_buffersink_get_sample_aspect_ratio(ost->filter->filter);
3240
3241 enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter);
3242
3243 if (ost->bits_per_raw_sample)
3244 enc_ctx->bits_per_raw_sample = ost->bits_per_raw_sample;
3245 else if (dec_ctx && ost->filter->graph->is_meta)
3246 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3247 av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3248
3249 if (frame) {
3250 enc_ctx->color_range = frame->color_range;
3251 enc_ctx->color_primaries = frame->color_primaries;
3252 enc_ctx->color_trc = frame->color_trc;
3253 enc_ctx->colorspace = frame->colorspace;
3254 enc_ctx->chroma_sample_location = frame->chroma_location;
3255 }
3256
3257 enc_ctx->framerate = ost->frame_rate;
3258
3259 ost->st->avg_frame_rate = ost->frame_rate;
3260
3261 // Field order: autodetection
3262 if (frame) {
3263 if (enc_ctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
3264 ost->top_field_first >= 0)
3265 frame->top_field_first = !!ost->top_field_first;
3266
3267 if (frame->interlaced_frame) {
3268 if (enc_ctx->codec->id == AV_CODEC_ID_MJPEG)
3269 enc_ctx->field_order = frame->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
3270 else
3271 enc_ctx->field_order = frame->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
3272 } else
3273 enc_ctx->field_order = AV_FIELD_PROGRESSIVE;
3274 }
3275
3276 // Field order: override
3277 if (ost->top_field_first == 0) {
3278 enc_ctx->field_order = AV_FIELD_BB;
3279 } else if (ost->top_field_first == 1) {
3280 enc_ctx->field_order = AV_FIELD_TT;
3281 }
3282
3283 if (ost->forced_keyframes) {
3284 if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3285 ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3286 forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
3287 if (ret < 0) {
3288 av_log(NULL, AV_LOG_ERROR,
3289 "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3290 return ret;
3291 }
3296
3297 // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3298 // parse it only for static kf timings
3299 } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3301 }
3302 }
3303 break;
3304 case AVMEDIA_TYPE_SUBTITLE:
3305 enc_ctx->time_base = AV_TIME_BASE_Q;
3306 if (!enc_ctx->width) {
3307 enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3308 enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3309 }
3310 break;
3311 case AVMEDIA_TYPE_DATA:
3312 break;
3313 default:
3314 abort();
3315 break;
3316 }
3317
3318 ost->mux_timebase = enc_ctx->time_base;
3319
3320 return 0;
3321}
3322
3323static int init_output_stream(OutputStream *ost, AVFrame *frame,
3324 char *error, int error_len)
3325{
3326 int ret = 0;
3327
3328 if (ost->encoding_needed) {
3329 const AVCodec *codec = ost->enc;
3330 AVCodecContext *dec = NULL;
3331 InputStream *ist;
3332
3333 ret = init_output_stream_encode(ost, frame);
3334 if (ret < 0)
3335 return ret;
3336
3337 if ((ist = get_input_stream(ost)))
3338 dec = ist->dec_ctx;
3339 if (dec && dec->subtitle_header) {
3340 /* ASS code assumes this buffer is null terminated so add extra byte. */
3341 ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
3342 if (!ost->enc_ctx->subtitle_header)
3343 return AVERROR(ENOMEM);
3344 memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3345 ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
3346 }
3347 if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3348 av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3349
3351 if (ret < 0) {
3352 snprintf(error, error_len, "Device setup failed for "
3353 "encoder on output stream #%d:%d : %s",
3354 ost->file_index, ost->index, av_err2str(ret));
3355 return ret;
3356 }
3357
3358 if (ist && ist->dec->type == AVMEDIA_TYPE_SUBTITLE && ost->enc->type == AVMEDIA_TYPE_SUBTITLE) {
3359 int input_props = 0, output_props = 0;
3360 AVCodecDescriptor const *input_descriptor =
3361 avcodec_descriptor_get(dec->codec_id);
3362 AVCodecDescriptor const *output_descriptor =
3363 avcodec_descriptor_get(ost->enc_ctx->codec_id);
3364 if (input_descriptor)
3365 input_props = input_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3366 if (output_descriptor)
3367 output_props = output_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3368 if (input_props && output_props && input_props != output_props) {
3369 snprintf(error, error_len,
3370 "Subtitle encoding currently only possible from text to text "
3371 "or bitmap to bitmap");
3372 return AVERROR_INVALIDDATA;
3373 }
3374 }
3375
3376 if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3377 if (ret == AVERROR_EXPERIMENTAL)
3378 abort_codec_experimental(codec, 1);
3379 snprintf(error, error_len,
3380 "Error while opening encoder for output stream #%d:%d - "
3381 "maybe incorrect parameters such as bit_rate, rate, width or height",
3382 ost->file_index, ost->index);
3383 return ret;
3384 }
3385 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3386 !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3387 av_buffersink_set_frame_size(ost->filter->filter,
3388 ost->enc_ctx->frame_size);
3390 if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000 &&
3391 ost->enc_ctx->codec_id != AV_CODEC_ID_CODEC2 /* don't complain about 700 bit/s modes */)
3392 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3393 " It takes bits/s as argument, not kbits/s\n");
3394
3395 ret = avcodec_parameters_from_context(ost->st->codecpar, ost->enc_ctx);
3396 if (ret < 0) {
3397 av_log(NULL, AV_LOG_FATAL,
3398 "Error initializing the output stream codec context.\n");
3399 exit_program(1);
3400 }
3401
3402 if (ost->enc_ctx->nb_coded_side_data) {
3403 int i;
3404
3405 for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3406 const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3407 uint8_t *dst_data;
3408
3409 dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3410 if (!dst_data)
3411 return AVERROR(ENOMEM);
3412 memcpy(dst_data, sd_src->data, sd_src->size);
3413 }
3414 }
3415
3416 /*
3417 * Add global input side data. For now this is naive, and copies it
3418 * from the input stream's global side data. All side data should
3419 * really be funneled over AVFrame and libavfilter, then added back to
3420 * packet side data, and then potentially using the first packet for
3421 * global side data.
3422 */
3423 if (ist) {
3424 int i;
3425 for (i = 0; i < ist->st->nb_side_data; i++) {
3426 AVPacketSideData *sd = &ist->st->side_data[i];
3427 if (sd->type != AV_PKT_DATA_CPB_PROPERTIES) {
3428 uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size);
3429 if (!dst)
3430 return AVERROR(ENOMEM);
3431 memcpy(dst, sd->data, sd->size);
3432 if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3433 av_display_rotation_set((uint32_t *)dst, 0);
3434 }
3435 }
3436 }
3437
3438 // copy timebase while removing common factors
3439 if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3440 ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3441
3442 // copy estimated duration as a hint to the muxer
3443 if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
3444 ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3445 } else if (ost->stream_copy) {
3447 if (ret < 0)
3448 return ret;
3449 }
3450
3451 /* initialize bitstream filters for the output stream
3452 * needs to be done here, because the codec id for streamcopy is not
3453 * known until now */
3454 ret = init_output_bsfs(ost);
3455 if (ret < 0)
3456 return ret;
3457
3458 ost->initialized = 1;
3459
3461 if (ret < 0)
3462 return ret;
3463
3464 return ret;
3465}
3466
3467static void report_new_stream(int input_index, AVPacket *pkt)
3468{
3469 InputFile *file = input_files[input_index];
3470 AVStream *st = file->ctx->streams[pkt->stream_index];
3471
3472 if (pkt->stream_index < file->nb_streams_warn)
3473 return;
3474 av_log(file->ctx, AV_LOG_WARNING,
3475 "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3476 av_get_media_type_string(st->codecpar->codec_type),
3477 input_index, pkt->stream_index,
3478 pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3479 file->nb_streams_warn = pkt->stream_index + 1;
3480}
3481
3482static int transcode_init(void)
3483{
3484 int ret = 0, i, j, k;
3485 AVFormatContext *oc;
3486 OutputStream *ost;
3487 InputStream *ist;
3488 char error[1024] = {0};
3489
3490 for (i = 0; i < nb_filtergraphs; i++) {
3491 FilterGraph *fg = filtergraphs[i];
3492 for (j = 0; j < fg->nb_outputs; j++) {
3493 OutputFilter *ofilter = fg->outputs[j];
3494 if (!ofilter->ost || ofilter->ost->source_index >= 0)
3495 continue;
3496 if (fg->nb_inputs != 1)
3497 continue;
3498 for (k = nb_input_streams-1; k >= 0 ; k--)
3499 if (fg->inputs[0]->ist == input_streams[k])
3500 break;
3501 ofilter->ost->source_index = k;
3502 }
3503 }
3504
3505 /* init framerate emulation */
3506 for (i = 0; i < nb_input_files; i++) {
3507 InputFile *ifile = input_files[i];
3508 if (ifile->readrate || ifile->rate_emu)
3509 for (j = 0; j < ifile->nb_streams; j++)
3510 input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3511 }
3512
3513 /* init input streams */
3514 for (i = 0; i < nb_input_streams; i++)
3515 if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3516 for (i = 0; i < nb_output_streams; i++) {
3517 ost = output_streams[i];
3518 avcodec_close(ost->enc_ctx);
3519 }
3520 goto dump_format;
3521 }
3522
3523 /*
3524 * initialize stream copy and subtitle/data streams.
3525 * Encoded AVFrame based streams will get initialized as follows:
3526 * - when the first AVFrame is received in do_video_out
3527 * - just before the first AVFrame is received in either transcode_step
3528 * or reap_filters due to us requiring the filter chain buffer sink
3529 * to be configured with the correct audio frame size, which is only
3530 * known after the encoder is initialized.
3531 */
3532 for (i = 0; i < nb_output_streams; i++) {
3533 if (!output_streams[i]->stream_copy &&
3534 (output_streams[i]->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3535 output_streams[i]->enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO))
3536 continue;
3537
3539 if (ret < 0)
3540 goto dump_format;
3541 }
3542
3543 /* discard unused programs */
3544 for (i = 0; i < nb_input_files; i++) {
3545 InputFile *ifile = input_files[i];
3546 for (j = 0; j < ifile->ctx->nb_programs; j++) {
3547 AVProgram *p = ifile->ctx->programs[j];
3548 int discard = AVDISCARD_ALL;
3549
3550 for (k = 0; k < p->nb_stream_indexes; k++)
3551 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3552 discard = AVDISCARD_DEFAULT;
3553 break;
3554 }
3555 p->discard = discard;
3556 }
3557 }
3558
3559 /* write headers for files with no streams */
3560 for (i = 0; i < nb_output_files; i++) {
3561 oc = output_files[i]->ctx;
3562 if (output_files[i]->format->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3564 if (ret < 0)
3565 goto dump_format;
3566 }
3567 }
3568
3569 dump_format:
3570 /* dump the stream mapping */
3571 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3572 for (i = 0; i < nb_input_streams; i++) {
3573 ist = input_streams[i];
3574
3575 for (j = 0; j < ist->nb_filters; j++) {
3576 if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3577 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3578 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3579 ist->filters[j]->name);
3580 if (nb_filtergraphs > 1)
3581 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3582 av_log(NULL, AV_LOG_INFO, "\n");
3583 }
3584 }
3585 }
3586
3587 for (i = 0; i < nb_output_streams; i++) {
3588 ost = output_streams[i];
3589
3590 if (ost->attachment_filename) {
3591 /* an attached file */
3592 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3593 ost->attachment_filename, ost->file_index, ost->index);
3594 continue;
3595 }
3596
3597 if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3598 /* output from a complex graph */
3599 av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3600 if (nb_filtergraphs > 1)
3601 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3602
3603 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3604 ost->index, ost->enc ? ost->enc->name : "?");
3605 continue;
3606 }
3607
3608 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3610 input_streams[ost->source_index]->st->index,
3611 ost->file_index,
3612 ost->index);
3613 if (ost->sync_ist != input_streams[ost->source_index])
3614 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3615 ost->sync_ist->file_index,
3616 ost->sync_ist->st->index);
3617 if (ost->stream_copy)
3618 av_log(NULL, AV_LOG_INFO, " (copy)");
3619 else {
3620 const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3621 const AVCodec *out_codec = ost->enc;
3622 const char *decoder_name = "?";
3623 const char *in_codec_name = "?";
3624 const char *encoder_name = "?";
3625 const char *out_codec_name = "?";
3626 const AVCodecDescriptor *desc;
3627
3628 if (in_codec) {
3629 decoder_name = in_codec->name;
3630 desc = avcodec_descriptor_get(in_codec->id);
3631 if (desc)
3632 in_codec_name = desc->name;
3633 if (!strcmp(decoder_name, in_codec_name))
3634 decoder_name = "native";
3635 }
3636
3637 if (out_codec) {
3638 encoder_name = out_codec->name;
3639 desc = avcodec_descriptor_get(out_codec->id);
3640 if (desc)
3641 out_codec_name = desc->name;
3642 if (!strcmp(encoder_name, out_codec_name))
3643 encoder_name = "native";
3644 }
3645
3646 av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3647 in_codec_name, decoder_name,
3648 out_codec_name, encoder_name);
3649 }
3650 av_log(NULL, AV_LOG_INFO, "\n");
3651 }
3652
3653 if (ret) {
3654 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3655 return ret;
3656 }
3657
3658 atomic_store(&transcode_init_done, 1);
3659
3660 return 0;
3661}
3662
3663/* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3664static int need_output(void)
3665{
3666 int i;
3667
3668 for (i = 0; i < nb_output_streams; i++) {
3669 OutputStream *ost = output_streams[i];
3671 AVFormatContext *os = output_files[ost->file_index]->ctx;
3672
3673 if (ost->finished ||
3674 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3675 continue;
3676 if (ost->frame_number >= ost->max_frames) {
3677 int j;
3678 for (j = 0; j < of->ctx->nb_streams; j++)
3680 continue;
3681 }
3682
3683 return 1;
3684 }
3685
3686 return 0;
3687}
3688
3695{
3696 int i;
3697 int64_t opts_min = INT64_MAX;
3698 OutputStream *ost_min = NULL;
3699
3700 for (i = 0; i < nb_output_streams; i++) {
3701 OutputStream *ost = output_streams[i];
3702 int64_t opts = ost->last_mux_dts == AV_NOPTS_VALUE ? INT64_MIN :
3703 av_rescale_q(ost->last_mux_dts, ost->st->time_base,
3704 AV_TIME_BASE_Q);
3705 if (ost->last_mux_dts == AV_NOPTS_VALUE)
3706 av_log(NULL, AV_LOG_DEBUG,
3707 "cur_dts is invalid st:%d (%d) [init:%d i_done:%d finish:%d] (this is harmless if it occurs once at the start per stream)\n",
3708 ost->st->index, ost->st->id, ost->initialized, ost->inputs_done, ost->finished);
3709
3710 if (!ost->initialized && !ost->inputs_done)
3711 return ost->unavailable ? NULL : ost;
3712
3713 if (!ost->finished && opts < opts_min) {
3714 opts_min = opts;
3715 ost_min = ost->unavailable ? NULL : ost;
3716 }
3717 }
3718 return ost_min;
3719}
3720
3721static void set_tty_echo(int on)
3722{
3723#if HAVE_TERMIOS_H
3724 struct termios tty;
3725 if (tcgetattr(0, &tty) == 0) {
3726 if (on) tty.c_lflag |= ECHO;
3727 else tty.c_lflag &= ~ECHO;
3728 tcsetattr(0, TCSANOW, &tty);
3729 }
3730#endif
3731}
3732
3733static int check_keyboard_interaction(int64_t cur_time)
3734{
3735 int i, ret, key;
3737 return AVERROR_EXIT;
3738 /* read_key() returns 0 on EOF */
3739 if(cur_time - keyboard_last_time >= 100000){
3740 key = read_key();
3741 keyboard_last_time = cur_time;
3742 }else
3743 key = -1;
3744 if (key == 'q') {
3745 av_log(NULL, AV_LOG_INFO, "\n\n[q] command received. Exiting.\n\n");
3746 return AVERROR_EXIT;
3747 }
3748 if (key == '+') av_log_set_level(av_log_get_level()+10);
3749 if (key == '-') av_log_set_level(av_log_get_level()-10);
3750 if (key == 's') qp_hist ^= 1;
3751 if (key == 'h'){
3752 if (do_hex_dump){
3754 } else if(do_pkt_dump){
3755 do_hex_dump = 1;
3756 } else
3757 do_pkt_dump = 1;
3758 av_log_set_level(AV_LOG_DEBUG);
3759 }
3760 if (key == 'c' || key == 'C'){
3761 char buf[4096], target[64], command[256], arg[256] = {0};
3762 double time;
3763 int k, n = 0;
3764 av_log(NULL, AV_LOG_STDERR, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3765 i = 0;
3766 set_tty_echo(1);
3767 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3768 if (k > 0)
3769 buf[i++] = k;
3770 buf[i] = 0;
3771 set_tty_echo(0);
3772 av_log(NULL, AV_LOG_STDERR, "\n");
3773 if (k > 0 &&
3774 (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3775 av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3776 target, time, command, arg);
3777 for (i = 0; i < nb_filtergraphs; i++) {
3778 FilterGraph *fg = filtergraphs[i];
3779 if (fg->graph) {
3780 if (time < 0) {
3781 ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3782 key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3783 av_log(NULL, AV_LOG_STDERR, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3784 } else if (key == 'c') {
3785 av_log(NULL, AV_LOG_STDERR, "Queuing commands only on filters supporting the specific command is unsupported\n");
3786 ret = AVERROR_PATCHWELCOME;
3787 } else {
3788 ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3789 if (ret < 0)
3790 av_log(NULL, AV_LOG_STDERR, "Queuing command failed with error %s\n", av_err2str(ret));
3791 }
3792 }
3793 }
3794 } else {
3795 av_log(NULL, AV_LOG_ERROR,
3796 "Parse error, at least 3 arguments were expected, "
3797 "only %d given in string '%s'\n", n, buf);
3798 }
3799 }
3800 if (key == 'd' || key == 'D'){
3801 int debug=0;
3802 if(key == 'D') {
3803 debug = input_streams[0]->dec_ctx->debug << 1;
3804 if(!debug) debug = 1;
3805 while (debug & FF_DEBUG_DCT_COEFF) //unsupported, would just crash
3806 debug += debug;
3807 }else{
3808 char buf[32];
3809 int k = 0;
3810 i = 0;
3811 set_tty_echo(1);
3812 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3813 if (k > 0)
3814 buf[i++] = k;
3815 buf[i] = 0;
3816 set_tty_echo(0);
3817 av_log(NULL, AV_LOG_STDERR, "\n");
3818 if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3819 av_log(NULL, AV_LOG_STDERR,"error parsing debug value\n");
3820 }
3821 for(i=0;i<nb_input_streams;i++) {
3822 input_streams[i]->dec_ctx->debug = debug;
3823 }
3824 for(i=0;i<nb_output_streams;i++) {
3825 OutputStream *ost = output_streams[i];
3826 ost->enc_ctx->debug = debug;
3827 }
3828 if(debug) av_log_set_level(AV_LOG_DEBUG);
3829 av_log(NULL, AV_LOG_STDERR,"debug=%d\n", debug);
3830 }
3831 if (key == '?'){
3832 av_log(NULL, AV_LOG_STDERR, "key function\n"
3833 "? show this help\n"
3834 "+ increase verbosity\n"
3835 "- decrease verbosity\n"
3836 "c Send command to first matching filter supporting it\n"
3837 "C Send/Queue command to all matching filters\n"
3838 "D cycle through available debug modes\n"
3839 "h dump packets/hex press to cycle through the 3 states\n"
3840 "q quit\n"
3841 "s Show QP histogram\n"
3842 );
3843 }
3844 return 0;
3845}
3846
3847#if HAVE_THREADS
3848static void *input_thread(void *arg)
3849{
3850 InputFile *f = arg;
3851 AVPacket *pkt = f->pkt, *queue_pkt;
3852 unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3853 int ret = 0;
3854
3855 while (1) {
3856 ret = av_read_frame(f->ctx, pkt);
3857
3858 if (ret == AVERROR(EAGAIN)) {
3859 av_usleep(10000);
3860 continue;
3861 }
3862 if (ret < 0) {
3863 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3864 break;
3865 }
3866 queue_pkt = av_packet_alloc();
3867 if (!queue_pkt) {
3868 av_packet_unref(pkt);
3869 av_thread_message_queue_set_err_recv(f->in_thread_queue, AVERROR(ENOMEM));
3870 break;
3871 }
3872 av_packet_move_ref(queue_pkt, pkt);
3873 ret = av_thread_message_queue_send(f->in_thread_queue, &queue_pkt, flags);
3874 if (flags && ret == AVERROR(EAGAIN)) {
3875 flags = 0;
3876 ret = av_thread_message_queue_send(f->in_thread_queue, &queue_pkt, flags);
3877 av_log(f->ctx, AV_LOG_WARNING,
3878 "Thread message queue blocking; consider raising the "
3879 "thread_queue_size option (current value: %d)\n",
3880 f->thread_queue_size);
3881 }
3882 if (ret < 0) {
3883 if (ret != AVERROR_EOF)
3884 av_log(f->ctx, AV_LOG_ERROR,
3885 "Unable to send packet to main thread: %s\n",
3886 av_err2str(ret));
3887 av_packet_free(&queue_pkt);
3888 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3889 break;
3890 }
3891 }
3892
3893 return NULL;
3894}
3895
3896static void free_input_thread(int i)
3897{
3898 InputFile *f = input_files[i];
3899 AVPacket *pkt;
3900
3901 if (!f || !f->in_thread_queue)
3902 return;
3903 av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
3904 while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
3905 av_packet_free(&pkt);
3906
3907 pthread_join(f->thread, NULL);
3908 f->joined = 1;
3909 av_thread_message_queue_free(&f->in_thread_queue);
3910}
3911
3912static void free_input_threads(void)
3913{
3914 int i;
3915
3916 for (i = 0; i < nb_input_files; i++)
3917 free_input_thread(i);
3918}
3919
3920static int init_input_thread(int i)
3921{
3922 int ret;
3923 InputFile *f = input_files[i];
3924
3925 if (f->thread_queue_size < 0)
3926 f->thread_queue_size = (nb_input_files > 1 ? 8 : 0);
3927 if (!f->thread_queue_size)
3928 return 0;
3929
3930 if (f->ctx->pb ? !f->ctx->pb->seekable :
3931 strcmp(f->ctx->iformat->name, "lavfi"))
3932 f->non_blocking = 1;
3933 ret = av_thread_message_queue_alloc(&f->in_thread_queue,
3934 f->thread_queue_size, sizeof(f->pkt));
3935 if (ret < 0)
3936 return ret;
3937
3938 if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
3939 av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
3940 av_thread_message_queue_free(&f->in_thread_queue);
3941 return AVERROR(ret);
3942 }
3943
3944 return 0;
3945}
3946
3947static int init_input_threads(void)
3948{
3949 int i, ret;
3950
3951 for (i = 0; i < nb_input_files; i++) {
3952 ret = init_input_thread(i);
3953 if (ret < 0)
3954 return ret;
3955 }
3956 return 0;
3957}
3958
3959static int get_input_packet_mt(InputFile *f, AVPacket **pkt)
3960{
3961 return av_thread_message_queue_recv(f->in_thread_queue, pkt,
3962 f->non_blocking ?
3963 AV_THREAD_MESSAGE_NONBLOCK : 0);
3964}
3965#endif
3966
3967static int get_input_packet(InputFile *f, AVPacket **pkt)
3968{
3969 if (f->readrate || f->rate_emu) {
3970 int i;
3971 int64_t file_start = copy_ts * (
3972 (f->ctx->start_time != AV_NOPTS_VALUE ? f->ctx->start_time * !start_at_zero : 0) +
3973 (f->start_time != AV_NOPTS_VALUE ? f->start_time : 0)
3974 );
3975 float scale = f->rate_emu ? 1.0 : f->readrate;
3976 for (i = 0; i < f->nb_streams; i++) {
3977 InputStream *ist = input_streams[f->ist_index + i];
3978 int64_t stream_ts_offset, pts, now;
3979 if (!ist->nb_packets || (ist->decoding_needed && !ist->got_output)) continue;
3980 stream_ts_offset = FFMAX(ist->first_dts != AV_NOPTS_VALUE ? ist->first_dts : 0, file_start);
3981 pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
3982 now = (av_gettime_relative() - ist->start) * scale + stream_ts_offset;
3983 if (pts > now)
3984 return AVERROR(EAGAIN);
3985 }
3986 }
3987
3988#if HAVE_THREADS
3989 if (f->thread_queue_size)
3990 return get_input_packet_mt(f, pkt);
3991#endif
3992 *pkt = f->pkt;
3993 return av_read_frame(f->ctx, *pkt);
3994}
3995
3996static int got_eagain(void)
3997{
3998 int i;
3999 for (i = 0; i < nb_output_streams; i++)
4000 if (output_streams[i]->unavailable)
4001 return 1;
4002 return 0;
4003}
4004
4005static void reset_eagain(void)
4006{
4007 int i;
4008 for (i = 0; i < nb_input_files; i++)
4009 input_files[i]->eagain = 0;
4010 for (i = 0; i < nb_output_streams; i++)
4011 output_streams[i]->unavailable = 0;
4012}
4013
4014// set duration to max(tmp, duration) in a proper time base and return duration's time_base
4015static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
4016 AVRational time_base)
4017{
4018 int ret;
4019
4020 if (!*duration) {
4021 *duration = tmp;
4022 return tmp_time_base;
4023 }
4024
4025 ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
4026 if (ret < 0) {
4027 *duration = tmp;
4028 return tmp_time_base;
4029 }
4030
4031 return time_base;
4032}
4033
4034static int seek_to_start(InputFile *ifile, AVFormatContext *is)
4035{
4036 InputStream *ist;
4037 AVCodecContext *avctx;
4038 int i, ret, has_audio = 0;
4039 int64_t duration = 0;
4040
4041 ret = avformat_seek_file(is, -1, INT64_MIN, is->start_time, is->start_time, 0);
4042 if (ret < 0)
4043 return ret;
4044
4045 for (i = 0; i < ifile->nb_streams; i++) {
4046 ist = input_streams[ifile->ist_index + i];
4047 avctx = ist->dec_ctx;
4048
4049 /* duration is the length of the last frame in a stream
4050 * when audio stream is present we don't care about
4051 * last video frame length because it's not defined exactly */
4052 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
4053 has_audio = 1;
4054 }
4055
4056 for (i = 0; i < ifile->nb_streams; i++) {
4057 ist = input_streams[ifile->ist_index + i];
4058 avctx = ist->dec_ctx;
4059
4060 if (has_audio) {
4061 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
4062 AVRational sample_rate = {1, avctx->sample_rate};
4063
4064 duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
4065 } else {
4066 continue;
4067 }
4068 } else {
4069 if (ist->framerate.num) {
4070 duration = av_rescale_q(1, av_inv_q(ist->framerate), ist->st->time_base);
4071 } else if (ist->st->avg_frame_rate.num) {
4072 duration = av_rescale_q(1, av_inv_q(ist->st->avg_frame_rate), ist->st->time_base);
4073 } else {
4074 duration = 1;
4075 }
4076 }
4077 if (!ifile->duration)
4078 ifile->time_base = ist->st->time_base;
4079 /* the total duration of the stream, max_pts - min_pts is
4080 * the duration of the stream without the last frame */
4081 if (ist->max_pts > ist->min_pts && ist->max_pts - (uint64_t)ist->min_pts < INT64_MAX - duration)
4082 duration += ist->max_pts - ist->min_pts;
4083 ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
4084 ifile->time_base);
4085 }
4086
4087 if (ifile->loop > 0)
4088 ifile->loop--;
4089
4090 return ret;
4091}
4092
4093/*
4094 * Return
4095 * - 0 -- one packet was read and processed
4096 * - AVERROR(EAGAIN) -- no packets were available for selected file,
4097 * this function should be called again
4098 * - AVERROR_EOF -- this function should not be called again
4099 */
4100static int process_input(int file_index)
4101{
4102 InputFile *ifile = input_files[file_index];
4103 AVFormatContext *is;
4104 InputStream *ist;
4105 AVPacket *pkt;
4106 int ret, thread_ret, i, j;
4107 int64_t duration;
4108 int64_t pkt_dts;
4109 int disable_discontinuity_correction = copy_ts;
4110
4111 is = ifile->ctx;
4112 ret = get_input_packet(ifile, &pkt);
4113
4114 if (ret == AVERROR(EAGAIN)) {
4115 ifile->eagain = 1;
4116 return ret;
4117 }
4118 if (ret < 0 && ifile->loop) {
4119 AVCodecContext *avctx;
4120 for (i = 0; i < ifile->nb_streams; i++) {
4121 ist = input_streams[ifile->ist_index + i];
4122 avctx = ist->dec_ctx;
4123 if (ist->processing_needed) {
4124 ret = process_input_packet(ist, NULL, 1);
4125 if (ret>0)
4126 return 0;
4127 if (ist->decoding_needed)
4128 avcodec_flush_buffers(avctx);
4129 }
4130 }
4131#if HAVE_THREADS
4132 free_input_thread(file_index);
4133#endif
4134 ret = seek_to_start(ifile, is);
4135#if HAVE_THREADS
4136 thread_ret = init_input_thread(file_index);
4137 if (thread_ret < 0)
4138 return thread_ret;
4139#endif
4140 if (ret < 0)
4141 av_log(NULL, AV_LOG_WARNING, "Seek to start failed.\n");
4142 else
4143 ret = get_input_packet(ifile, &pkt);
4144 if (ret == AVERROR(EAGAIN)) {
4145 ifile->eagain = 1;
4146 return ret;
4147 }
4148 }
4149 if (ret < 0) {
4150 if (ret != AVERROR_EOF) {
4151 print_error(is->url, ret);
4152 if (exit_on_error)
4153 exit_program(1);
4154 }
4155
4156 for (i = 0; i < ifile->nb_streams; i++) {
4157 ist = input_streams[ifile->ist_index + i];
4158 if (ist->processing_needed) {
4159 ret = process_input_packet(ist, NULL, 0);
4160 if (ret>0)
4161 return 0;
4162 }
4163
4164 /* mark all outputs that don't go through lavfi as finished */
4165 for (j = 0; j < nb_output_streams; j++) {
4166 OutputStream *ost = output_streams[j];
4167
4168 if (ost->source_index == ifile->ist_index + i &&
4169 (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
4171 }
4172 }
4173
4174 ifile->eof_reached = 1;
4175 return AVERROR(EAGAIN);
4176 }
4177
4178 reset_eagain();
4179
4180 if (do_pkt_dump) {
4181 av_pkt_dump_log2(NULL, AV_LOG_INFO, pkt, do_hex_dump,
4182 is->streams[pkt->stream_index]);
4183 }
4184 /* the following test is needed in case new streams appear
4185 dynamically in stream : we ignore them */
4186 if (pkt->stream_index >= ifile->nb_streams) {
4187 report_new_stream(file_index, pkt);
4188 goto discard_packet;
4189 }
4190
4191 ist = input_streams[ifile->ist_index + pkt->stream_index];
4192
4193 ist->data_size += pkt->size;
4194 ist->nb_packets++;
4195
4196 if (ist->discard)
4197 goto discard_packet;
4198
4199 if (pkt->flags & AV_PKT_FLAG_CORRUPT) {
4200 av_log(NULL, exit_on_error ? AV_LOG_FATAL : AV_LOG_WARNING,
4201 "%s: corrupt input packet in stream %d\n", is->url, pkt->stream_index);
4202 if (exit_on_error)
4203 exit_program(1);
4204 }
4205
4206 if (debug_ts) {
4207 av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4208 "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s duration:%s duration_time:%s off:%s off_time:%s\n",
4209 ifile->ist_index + pkt->stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4210 av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
4211 av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
4212 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ist->st->time_base),
4213 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ist->st->time_base),
4214 av_ts2str(pkt->duration), av_ts2timestr(pkt->duration, &ist->st->time_base),
4215 av_ts2str(input_files[ist->file_index]->ts_offset),
4216 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4217 }
4218
4219 if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4220 int64_t stime, stime2;
4221 // Correcting starttime based on the enabled streams
4222 // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4223 // so we instead do it here as part of discontinuity handling
4224 if ( ist->next_dts == AV_NOPTS_VALUE
4225 && ifile->ts_offset == -is->start_time
4226 && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4227 int64_t new_start_time = INT64_MAX;
4228 for (i=0; i<is->nb_streams; i++) {
4229 AVStream *st = is->streams[i];
4230 if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4231 continue;
4232 new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4233 }
4234 if (new_start_time > is->start_time) {
4235 av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4236 ifile->ts_offset = -new_start_time;
4237 }
4238 }
4239
4240 stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4241 stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4242 ist->wrap_correction_done = 1;
4243
4244 if(stime2 > stime && pkt->dts != AV_NOPTS_VALUE && pkt->dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4245 pkt->dts -= 1ULL<<ist->st->pts_wrap_bits;
4246 ist->wrap_correction_done = 0;
4247 }
4248 if(stime2 > stime && pkt->pts != AV_NOPTS_VALUE && pkt->pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4249 pkt->pts -= 1ULL<<ist->st->pts_wrap_bits;
4250 ist->wrap_correction_done = 0;
4251 }
4252 }
4253
4254 /* add the stream-global side data to the first packet */
4255 if (ist->nb_packets == 1) {
4256 for (i = 0; i < ist->st->nb_side_data; i++) {
4257 AVPacketSideData *src_sd = &ist->st->side_data[i];
4258 uint8_t *dst_data;
4259
4260 if (src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4261 continue;
4262
4263 if (av_packet_get_side_data(pkt, src_sd->type, NULL))
4264 continue;
4265
4266 dst_data = av_packet_new_side_data(pkt, src_sd->type, src_sd->size);
4267 if (!dst_data)
4268 exit_program(1);
4269
4270 memcpy(dst_data, src_sd->data, src_sd->size);
4271 }
4272 }
4273
4274 if (pkt->dts != AV_NOPTS_VALUE)
4275 pkt->dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4276 if (pkt->pts != AV_NOPTS_VALUE)
4277 pkt->pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4278
4279 if (pkt->pts != AV_NOPTS_VALUE)
4280 pkt->pts *= ist->ts_scale;
4281 if (pkt->dts != AV_NOPTS_VALUE)
4282 pkt->dts *= ist->ts_scale;
4283
4284 pkt_dts = av_rescale_q_rnd(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4285 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4286 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4287 pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4288 && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4289 int64_t delta = pkt_dts - ifile->last_ts;
4290 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4291 delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
4292 ifile->ts_offset -= delta;
4293 av_log(NULL, AV_LOG_DEBUG,
4294 "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4295 delta, ifile->ts_offset);
4296 pkt->dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4297 if (pkt->pts != AV_NOPTS_VALUE)
4298 pkt->pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4299 }
4300 }
4301
4302 duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4303 if (pkt->pts != AV_NOPTS_VALUE) {
4304 pkt->pts += duration;
4305 ist->max_pts = FFMAX(pkt->pts, ist->max_pts);
4306 ist->min_pts = FFMIN(pkt->pts, ist->min_pts);
4307 }
4308
4309 if (pkt->dts != AV_NOPTS_VALUE)
4310 pkt->dts += duration;
4311
4312 pkt_dts = av_rescale_q_rnd(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4313
4314 if (copy_ts && pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4315 (is->iformat->flags & AVFMT_TS_DISCONT) && ist->st->pts_wrap_bits < 60) {
4316 int64_t wrap_dts = av_rescale_q_rnd(pkt->dts + (1LL<<ist->st->pts_wrap_bits),
4317 ist->st->time_base, AV_TIME_BASE_Q,
4318 AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4319 if (FFABS(wrap_dts - ist->next_dts) < FFABS(pkt_dts - ist->next_dts)/10)
4320 disable_discontinuity_correction = 0;
4321 }
4322
4323 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4324 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4325 pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4326 !disable_discontinuity_correction) {
4327 int64_t delta = pkt_dts - ist->next_dts;
4328 if (is->iformat->flags & AVFMT_TS_DISCONT) {
4329 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4330 delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
4331 pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4332 ifile->ts_offset -= delta;
4333 av_log(NULL, AV_LOG_DEBUG,
4334 "timestamp discontinuity for stream #%d:%d "
4335 "(id=%d, type=%s): %"PRId64", new offset= %"PRId64"\n",
4336 ist->file_index, ist->st->index, ist->st->id,
4337 av_get_media_type_string(ist->dec_ctx->codec_type),
4338 delta, ifile->ts_offset);
4339 pkt->dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4340 if (pkt->pts != AV_NOPTS_VALUE)
4341 pkt->pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4342 }
4343 } else {
4344 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4345 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4346 av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt->dts, ist->next_dts, pkt->stream_index);
4347 pkt->dts = AV_NOPTS_VALUE;
4348 }
4349 if (pkt->pts != AV_NOPTS_VALUE){
4350 int64_t pkt_pts = av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
4351 delta = pkt_pts - ist->next_dts;
4352 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4353 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4354 av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt->pts, ist->next_dts, pkt->stream_index);
4355 pkt->pts = AV_NOPTS_VALUE;
4356 }
4357 }
4358 }
4359 }
4360
4361 if (pkt->dts != AV_NOPTS_VALUE)
4362 ifile->last_ts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
4363
4364 if (debug_ts) {
4365 av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s duration:%s duration_time:%s off:%s off_time:%s\n",
4366 ifile->ist_index + pkt->stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4367 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ist->st->time_base),
4368 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ist->st->time_base),
4369 av_ts2str(pkt->duration), av_ts2timestr(pkt->duration, &ist->st->time_base),
4370 av_ts2str(input_files[ist->file_index]->ts_offset),
4371 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4372 }
4373
4374 sub2video_heartbeat(ist, pkt->pts);
4375
4376 process_input_packet(ist, pkt, 0);
4377
4378discard_packet:
4379#if HAVE_THREADS
4380 if (ifile->thread_queue_size)
4381 av_packet_free(&pkt);
4382 else
4383#endif
4384 av_packet_unref(pkt);
4385
4386 return 0;
4387}
4388
4396static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4397{
4398 int i, ret;
4399 int nb_requests, nb_requests_max = 0;
4400 InputFilter *ifilter;
4401 InputStream *ist;
4402
4403 *best_ist = NULL;
4404 ret = avfilter_graph_request_oldest(graph->graph);
4405 if (ret >= 0)
4406 return reap_filters(0);
4407
4408 if (ret == AVERROR_EOF) {
4409 ret = reap_filters(1);
4410 for (i = 0; i < graph->nb_outputs; i++)
4411 close_output_stream(graph->outputs[i]->ost);
4412 return ret;
4413 }
4414 if (ret != AVERROR(EAGAIN))
4415 return ret;
4416
4417 for (i = 0; i < graph->nb_inputs; i++) {
4418 ifilter = graph->inputs[i];
4419 ist = ifilter->ist;
4420 if (input_files[ist->file_index]->eagain ||
4422 continue;
4423 nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4424 if (nb_requests > nb_requests_max) {
4425 nb_requests_max = nb_requests;
4426 *best_ist = ist;
4427 }
4428 }
4429
4430 if (!*best_ist)
4431 for (i = 0; i < graph->nb_outputs; i++)
4432 graph->outputs[i]->ost->unavailable = 1;
4433
4434 return 0;
4435}
4436
4442static int transcode_step(void)
4443{
4444 OutputStream *ost;
4445 InputStream *ist = NULL;
4446 int ret;
4447
4448 ost = choose_output();
4449 if (!ost) {
4450 if (got_eagain()) {
4451 reset_eagain();
4452 av_usleep(10000);
4453 return 0;
4454 }
4455 av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4456 return AVERROR_EOF;
4457 }
4458
4459 if (ost->filter && !ost->filter->graph->graph) {
4461 ret = configure_filtergraph(ost->filter->graph);
4462 if (ret < 0) {
4463 av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
4464 return ret;
4465 }
4466 }
4467 }
4468
4469 if (ost->filter && ost->filter->graph->graph) {
4470 /*
4471 * Similar case to the early audio initialization in reap_filters.
4472 * Audio is special in ffmpeg.c currently as we depend on lavfi's
4473 * audio frame buffering/creation to get the output audio frame size
4474 * in samples correct. The audio frame size for the filter chain is
4475 * configured during the output stream initialization.
4476 *
4477 * Apparently avfilter_graph_request_oldest (called in
4478 * transcode_from_filter just down the line) peeks. Peeking already
4479 * puts one frame "ready to be given out", which means that any
4480 * update in filter buffer sink configuration afterwards will not
4481 * help us. And yes, even if it would be utilized,
4482 * av_buffersink_get_samples is affected, as it internally utilizes
4483 * the same early exit for peeked frames.
4484 *
4485 * In other words, if avfilter_graph_request_oldest would not make
4486 * further filter chain configuration or usage of
4487 * av_buffersink_get_samples useless (by just causing the return
4488 * of the peeked AVFrame as-is), we could get rid of this additional
4489 * early encoder initialization.
4490 */
4491 if (av_buffersink_get_type(ost->filter->filter) == AVMEDIA_TYPE_AUDIO)
4492 init_output_stream_wrapper(ost, NULL, 1);
4493
4494 if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4495 return ret;
4496 if (!ist)
4497 return 0;
4498 } else if (ost->filter) {
4499 int i;
4500 for (i = 0; i < ost->filter->graph->nb_inputs; i++) {
4501 InputFilter *ifilter = ost->filter->graph->inputs[i];
4502 if (!ifilter->ist->got_output && !input_files[ifilter->ist->file_index]->eof_reached) {
4503 ist = ifilter->ist;
4504 break;
4505 }
4506 }
4507 if (!ist) {
4508 ost->inputs_done = 1;
4509 return 0;
4510 }
4511 } else {
4512 av_assert0(ost->source_index >= 0);
4513 ist = input_streams[ost->source_index];
4514 }
4515
4516 ret = process_input(ist->file_index);
4517 if (ret == AVERROR(EAGAIN)) {
4518 if (input_files[ist->file_index]->eagain)
4519 ost->unavailable = 1;
4520 return 0;
4521 }
4522
4523 if (ret < 0)
4524 return ret == AVERROR_EOF ? 0 : ret;
4525
4526 return reap_filters(0);
4527}
4528
4529/*
4530 * The following code is the main loop of the file converter
4531 */
4532static int transcode(void)
4533{
4534 int ret, i;
4535 AVFormatContext *os;
4536 OutputStream *ost;
4537 InputStream *ist;
4538 int64_t timer_start;
4539 int64_t total_packets_written = 0;
4540
4541 ret = transcode_init();
4542 if (ret < 0)
4543 goto fail;
4544
4545 if (stdin_interaction) {
4546 av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4547 }
4548
4549 timer_start = av_gettime_relative();
4550
4551#if HAVE_THREADS
4552 if ((ret = init_input_threads()) < 0)
4553 goto fail;
4554#endif
4555
4557 int64_t cur_time= av_gettime_relative();
4558
4559 /* if 'q' pressed, exits */
4561 if (check_keyboard_interaction(cur_time) < 0)
4562 break;
4563
4564 /* check if there's any stream where output is still needed */
4565 if (!need_output()) {
4566 av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4567 break;
4568 }
4569
4570 ret = transcode_step();
4571 if (ret < 0 && ret != AVERROR_EOF) {
4572 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
4573 break;
4574 }
4575
4576 /* dump report by using the output first video and audio streams */
4577 print_report(0, timer_start, cur_time);
4578 }
4579#if HAVE_THREADS
4580 free_input_threads();
4581#endif
4582
4583 /* at the end of stream, we must flush the decoder buffers */
4584 for (i = 0; i < nb_input_streams; i++) {
4585 ist = input_streams[i];
4586 if (!input_files[ist->file_index]->eof_reached) {
4587 process_input_packet(ist, NULL, 0);
4588 }
4589 }
4591
4592 term_exit();
4593
4594 /* write the trailer if needed */
4595 for (i = 0; i < nb_output_files; i++) {
4597 if (ret < 0 && exit_on_error)
4598 exit_program(1);
4599 }
4600
4601 /* dump report by using the first video and audio streams */
4602 print_report(1, timer_start, av_gettime_relative());
4603
4604 /* close the output files */
4605 for (i = 0; i < nb_output_files; i++) {
4606 os = output_files[i]->ctx;
4607 if (os && os->oformat && !(os->oformat->flags & AVFMT_NOFILE)) {
4608 if ((ret = avio_closep(&os->pb)) < 0) {
4609 av_log(NULL, AV_LOG_ERROR, "Error closing file %s: %s\n", os->url, av_err2str(ret));
4610 if (exit_on_error)
4611 exit_program(1);
4612 }
4613 }
4614 }
4615
4616 /* dump report by using the first video and audio streams */
4617 print_report(1, timer_start, av_gettime_relative());
4618
4619 /* close each encoder */
4620 for (i = 0; i < nb_output_streams; i++) {
4621 ost = output_streams[i];
4622 if (ost->encoding_needed) {
4623 av_freep(&ost->enc_ctx->stats_in);
4624 }
4625 total_packets_written += ost->packets_written;
4627 av_log(NULL, AV_LOG_FATAL, "Empty output on stream %d.\n", i);
4628 exit_program(1);
4629 }
4630 }
4631
4632 if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4633 av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4634 exit_program(1);
4635 }
4636
4637 /* close each decoder */
4638 for (i = 0; i < nb_input_streams; i++) {
4639 ist = input_streams[i];
4640 if (ist->decoding_needed) {
4641 avcodec_close(ist->dec_ctx);
4642 if (ist->hwaccel_uninit)
4643 ist->hwaccel_uninit(ist->dec_ctx);
4644 }
4645 }
4646
4648
4649 /* finished ! */
4650 ret = 0;
4651
4652 fail:
4653#if HAVE_THREADS
4654 free_input_threads();
4655#endif
4656
4657 if (output_streams) {
4658 for (i = 0; i < nb_output_streams; i++) {
4659 ost = output_streams[i];
4660 if (ost) {
4661 if (ost->logfile) {
4662 if (fclose(ost->logfile))
4663 av_log(NULL, AV_LOG_ERROR,
4664 "Error closing logfile, loss of information possible: %s\n",
4665 av_err2str(AVERROR(errno)));
4666 ost->logfile = NULL;
4667 }
4668 av_freep(&ost->forced_kf_pts);
4669 av_freep(&ost->apad);
4670 av_freep(&ost->disposition);
4671 av_dict_free(&ost->encoder_opts);
4672 av_dict_free(&ost->sws_dict);
4673 av_dict_free(&ost->swr_opts);
4674 }
4675 }
4676 }
4677 return ret;
4678}
4679
4681{
4682 BenchmarkTimeStamps time_stamps = { av_gettime_relative() };
4683#if HAVE_GETRUSAGE
4684 struct rusage rusage;
4685
4686 getrusage(RUSAGE_SELF, &rusage);
4687 time_stamps.user_usec =
4688 (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4689 time_stamps.sys_usec =
4690 (rusage.ru_stime.tv_sec * 1000000LL) + rusage.ru_stime.tv_usec;
4691#elif HAVE_GETPROCESSTIMES
4692 HANDLE proc;
4693 FILETIME c, e, k, u;
4694 proc = GetCurrentProcess();
4695 GetProcessTimes(proc, &c, &e, &k, &u);
4696 time_stamps.user_usec =
4697 ((int64_t)u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4698 time_stamps.sys_usec =
4699 ((int64_t)k.dwHighDateTime << 32 | k.dwLowDateTime) / 10;
4700#else
4701 time_stamps.user_usec = time_stamps.sys_usec = 0;
4702#endif
4703 return time_stamps;
4704}
4705
4706static int64_t getmaxrss(void)
4707{
4708#if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4709 struct rusage rusage;
4710 getrusage(RUSAGE_SELF, &rusage);
4711 return (int64_t)rusage.ru_maxrss * 1024;
4712#elif HAVE_GETPROCESSMEMORYINFO
4713 HANDLE proc;
4714 PROCESS_MEMORY_COUNTERS memcounters;
4715 proc = GetCurrentProcess();
4716 memcounters.cb = sizeof(memcounters);
4717 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4718 return memcounters.PeakPagefileUsage;
4719#else
4720 return 0;
4721#endif
4722}
4723
4726 longjmp_value = 0;
4727 received_sigterm = 0;
4729 ffmpeg_exited = 0;
4730 copy_ts_first_pts = AV_NOPTS_VALUE;
4731
4732 nb_frames_dup = 0;
4733 dup_warning = 1000;
4734 nb_frames_drop = 0;
4735 nb_output_dumped = 0;
4736
4737 want_sdp = 1;
4738
4739 progress_avio = NULL;
4740
4741 input_streams = NULL;
4742 nb_input_streams = 0;
4743 input_files = NULL;
4744 nb_input_files = 0;
4745
4746 output_streams = NULL;
4748 output_files = NULL;
4749 nb_output_files = 0;
4750
4751 filtergraphs = NULL;
4752 nb_filtergraphs = 0;
4753
4754 last_time = -1;
4756 first_report = 1;
4757}
4758
4759void set_report_callback(void (*callback)(int, float, float, int64_t, int, double, double))
4760{
4761 report_callback = callback;
4762}
4763
4764void cancel_operation(long id)
4765{
4766 if (id == 0) {
4767 sigterm_handler(SIGINT);
4768 } else {
4769 cancelSession(id);
4770 }
4771}
4772
4773__thread OptionDef *ffmpeg_options = NULL;
4774
4775int ffmpeg_execute(int argc, char **argv)
4776{
4777 char _program_name[] = "ffmpeg";
4778 program_name = (char*)&_program_name;
4779 program_birth_year = 2000;
4780
4781 #define OFFSET(x) offsetof(OptionsContext, x)
4782 OptionDef options[] = {
4783
4784 /* main options */
4785 { "L", OPT_EXIT, { .func_arg = show_license }, "show license" },
4786 { "h", OPT_EXIT, { .func_arg = show_help }, "show help", "topic" },
4787 { "?", OPT_EXIT, { .func_arg = show_help }, "show help", "topic" },
4788 { "help", OPT_EXIT, { .func_arg = show_help }, "show help", "topic" },
4789 { "-help", OPT_EXIT, { .func_arg = show_help }, "show help", "topic" },
4790 { "version", OPT_EXIT, { .func_arg = show_version }, "show version" },
4791 { "buildconf", OPT_EXIT, { .func_arg = show_buildconf }, "show build configuration" },
4792 { "formats", OPT_EXIT, { .func_arg = show_formats }, "show available formats" },
4793 { "muxers", OPT_EXIT, { .func_arg = show_muxers }, "show available muxers" },
4794 { "demuxers", OPT_EXIT, { .func_arg = show_demuxers }, "show available demuxers" },
4795 { "devices", OPT_EXIT, { .func_arg = show_devices }, "show available devices" },
4796 { "codecs", OPT_EXIT, { .func_arg = show_codecs }, "show available codecs" },
4797 { "decoders", OPT_EXIT, { .func_arg = show_decoders }, "show available decoders" },
4798 { "encoders", OPT_EXIT, { .func_arg = show_encoders }, "show available encoders" },
4799 { "bsfs", OPT_EXIT, { .func_arg = show_bsfs }, "show available bit stream filters" },
4800 { "protocols", OPT_EXIT, { .func_arg = show_protocols }, "show available protocols" },
4801 { "filters", OPT_EXIT, { .func_arg = show_filters }, "show available filters" },
4802 { "pix_fmts", OPT_EXIT, { .func_arg = show_pix_fmts }, "show available pixel formats" },
4803 { "layouts", OPT_EXIT, { .func_arg = show_layouts }, "show standard channel layouts" },
4804 { "sample_fmts", OPT_EXIT, { .func_arg = show_sample_fmts }, "show available audio sample formats" },
4805 { "dispositions", OPT_EXIT, { .func_arg = show_dispositions}, "show available stream dispositions" },
4806 { "colors", OPT_EXIT, { .func_arg = show_colors }, "show available color names" },
4807 { "loglevel", HAS_ARG, { .func_arg = opt_loglevel }, "set logging level", "loglevel" },
4808 { "v", HAS_ARG, { .func_arg = opt_loglevel }, "set logging level", "loglevel" },
4809 { "report", 0, { .func_arg = opt_report }, "generate a report" },
4810 { "max_alloc", HAS_ARG, { .func_arg = opt_max_alloc }, "set maximum size of a single allocated block", "bytes" },
4811 { "cpuflags", HAS_ARG | OPT_EXPERT, { .func_arg = opt_cpuflags }, "force specific cpu flags", "flags" },
4812 { "cpucount", HAS_ARG | OPT_EXPERT, { .func_arg = opt_cpucount }, "force specific cpu count", "count" },
4813 { "hide_banner", OPT_BOOL | OPT_EXPERT, {&hide_banner}, "do not show program banner", "hide_banner" },
4814
4815 #if CONFIG_AVDEVICE
4816 { "sources" , OPT_EXIT | HAS_ARG, { .func_arg = show_sources },
4817 "list sources of the input device", "device" },
4818 { "sinks" , OPT_EXIT | HAS_ARG, { .func_arg = show_sinks },
4819 "list sinks of the output device", "device" },
4820 #endif
4821
4822 { "f", HAS_ARG | OPT_STRING | OPT_OFFSET |
4823 OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(format) },
4824 "force format", "fmt" },
4825 { "y", OPT_BOOL, { &file_overwrite },
4826 "overwrite output files" },
4827 { "n", OPT_BOOL, { &no_file_overwrite },
4828 "never overwrite output files" },
4829 { "ignore_unknown", OPT_BOOL, { &ignore_unknown_streams },
4830 "Ignore unknown stream types" },
4831 { "copy_unknown", OPT_BOOL | OPT_EXPERT, { &copy_unknown_streams },
4832 "Copy unknown stream types" },
4833 { "recast_media", OPT_BOOL | OPT_EXPERT, { &recast_media },
4834 "allow recasting stream type in order to force a decoder of different media type" },
4835 { "c", HAS_ARG | OPT_STRING | OPT_SPEC |
4836 OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(codec_names) },
4837 "codec name", "codec" },
4838 { "codec", HAS_ARG | OPT_STRING | OPT_SPEC |
4839 OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(codec_names) },
4840 "codec name", "codec" },
4841 { "pre", HAS_ARG | OPT_STRING | OPT_SPEC |
4842 OPT_OUTPUT, { .off = OFFSET(presets) },
4843 "preset name", "preset" },
4844 { "map", HAS_ARG | OPT_EXPERT | OPT_PERFILE |
4845 OPT_OUTPUT, { .func_arg = opt_map },
4846 "set input stream mapping",
4847 "[-]input_file_id[:stream_specifier][,sync_file_id[:stream_specifier]]" },
4848 { "map_channel", HAS_ARG | OPT_EXPERT | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_map_channel },
4849 "map an audio channel from one stream to another", "file.stream.channel[:syncfile.syncstream]" },
4850 { "map_metadata", HAS_ARG | OPT_STRING | OPT_SPEC |
4851 OPT_OUTPUT, { .off = OFFSET(metadata_map) },
4852 "set metadata information of outfile from infile",
4853 "outfile[,metadata]:infile[,metadata]" },
4854 { "map_chapters", HAS_ARG | OPT_INT | OPT_EXPERT | OPT_OFFSET |
4855 OPT_OUTPUT, { .off = OFFSET(chapters_input_file) },
4856 "set chapters mapping", "input_file_index" },
4857 { "t", HAS_ARG | OPT_TIME | OPT_OFFSET |
4858 OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(recording_time) },
4859 "record or transcode \"duration\" seconds of audio/video",
4860 "duration" },
4861 { "to", HAS_ARG | OPT_TIME | OPT_OFFSET | OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(stop_time) },
4862 "record or transcode stop time", "time_stop" },
4863 { "fs", HAS_ARG | OPT_INT64 | OPT_OFFSET | OPT_OUTPUT, { .off = OFFSET(limit_filesize) },
4864 "set the limit file size in bytes", "limit_size" },
4865 { "ss", HAS_ARG | OPT_TIME | OPT_OFFSET |
4866 OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(start_time) },
4867 "set the start time offset", "time_off" },
4868 { "sseof", HAS_ARG | OPT_TIME | OPT_OFFSET |
4869 OPT_INPUT, { .off = OFFSET(start_time_eof) },
4870 "set the start time offset relative to EOF", "time_off" },
4871 { "seek_timestamp", HAS_ARG | OPT_INT | OPT_OFFSET |
4872 OPT_INPUT, { .off = OFFSET(seek_timestamp) },
4873 "enable/disable seeking by timestamp with -ss" },
4874 { "accurate_seek", OPT_BOOL | OPT_OFFSET | OPT_EXPERT |
4875 OPT_INPUT, { .off = OFFSET(accurate_seek) },
4876 "enable/disable accurate seeking with -ss" },
4877 { "isync", HAS_ARG | OPT_INT | OPT_OFFSET |
4878 OPT_EXPERT | OPT_INPUT, { .off = OFFSET(input_sync_ref) },
4879 "Indicate the input index for sync reference", "sync ref" },
4880 { "itsoffset", HAS_ARG | OPT_TIME | OPT_OFFSET |
4881 OPT_EXPERT | OPT_INPUT, { .off = OFFSET(input_ts_offset) },
4882 "set the input ts offset", "time_off" },
4883 { "itsscale", HAS_ARG | OPT_DOUBLE | OPT_SPEC |
4884 OPT_EXPERT | OPT_INPUT, { .off = OFFSET(ts_scale) },
4885 "set the input ts scale", "scale" },
4886 { "timestamp", HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_recording_timestamp },
4887 "set the recording timestamp ('now' to set the current time)", "time" },
4888 { "metadata", HAS_ARG | OPT_STRING | OPT_SPEC | OPT_OUTPUT, { .off = OFFSET(metadata) },
4889 "add metadata", "string=string" },
4890 { "program", HAS_ARG | OPT_STRING | OPT_SPEC | OPT_OUTPUT, { .off = OFFSET(program) },
4891 "add program with specified streams", "title=string:st=number..." },
4892 { "dframes", HAS_ARG | OPT_PERFILE | OPT_EXPERT |
4893 OPT_OUTPUT, { .func_arg = opt_data_frames },
4894 "set the number of data frames to output", "number" },
4895 { "benchmark", OPT_BOOL | OPT_EXPERT, { &do_benchmark },
4896 "add timings for benchmarking" },
4897 { "benchmark_all", OPT_BOOL | OPT_EXPERT, { &do_benchmark_all },
4898 "add timings for each task" },
4899 { "progress", HAS_ARG | OPT_EXPERT, { .func_arg = opt_progress },
4900 "write program-readable progress information", "url" },
4901 { "stdin", OPT_BOOL | OPT_EXPERT, { &stdin_interaction },
4902 "enable or disable interaction on standard input" },
4903 { "timelimit", HAS_ARG | OPT_EXPERT, { .func_arg = opt_timelimit },
4904 "set max runtime in seconds in CPU user time", "limit" },
4905 { "dump", OPT_BOOL | OPT_EXPERT, { &do_pkt_dump },
4906 "dump each input packet" },
4907 { "hex", OPT_BOOL | OPT_EXPERT, { &do_hex_dump },
4908 "when dumping packets, also dump the payload" },
4909 { "re", OPT_BOOL | OPT_EXPERT | OPT_OFFSET |
4910 OPT_INPUT, { .off = OFFSET(rate_emu) },
4911 "read input at native frame rate; equivalent to -readrate 1", "" },
4912 { "readrate", HAS_ARG | OPT_FLOAT | OPT_OFFSET |
4913 OPT_EXPERT | OPT_INPUT, { .off = OFFSET(readrate) },
4914 "read input at specified rate", "speed" },
4915 { "target", HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_target },
4916 "specify target file type (\"vcd\", \"svcd\", \"dvd\", \"dv\" or \"dv50\" "
4917 "with optional prefixes \"pal-\", \"ntsc-\" or \"film-\")", "type" },
4918 { "vsync", HAS_ARG | OPT_EXPERT, { .func_arg = opt_vsync },
4919 "set video sync method globally; deprecated, use -fps_mode", "" },
4920 { "frame_drop_threshold", HAS_ARG | OPT_FLOAT | OPT_EXPERT, { &frame_drop_threshold },
4921 "frame drop threshold", "" },
4922 { "async", HAS_ARG | OPT_INT | OPT_EXPERT, { &audio_sync_method },
4923 "audio sync method", "" },
4924 { "adrift_threshold", HAS_ARG | OPT_FLOAT | OPT_EXPERT, { &audio_drift_threshold },
4925 "audio drift threshold", "threshold" },
4926 { "copyts", OPT_BOOL | OPT_EXPERT, { &copy_ts },
4927 "copy timestamps" },
4928 { "start_at_zero", OPT_BOOL | OPT_EXPERT, { &start_at_zero },
4929 "shift input timestamps to start at 0 when using copyts" },
4930 { "copytb", HAS_ARG | OPT_INT | OPT_EXPERT, { &copy_tb },
4931 "copy input stream time base when stream copying", "mode" },
4932 { "shortest", OPT_BOOL | OPT_EXPERT | OPT_OFFSET |
4933 OPT_OUTPUT, { .off = OFFSET(shortest) },
4934 "finish encoding within shortest input" },
4935 { "bitexact", OPT_BOOL | OPT_EXPERT | OPT_OFFSET |
4936 OPT_OUTPUT | OPT_INPUT, { .off = OFFSET(bitexact) },
4937 "bitexact mode" },
4938 { "apad", OPT_STRING | HAS_ARG | OPT_SPEC |
4939 OPT_OUTPUT, { .off = OFFSET(apad) },
4940 "audio pad", "" },
4941 { "dts_delta_threshold", HAS_ARG | OPT_FLOAT | OPT_EXPERT, { &dts_delta_threshold },
4942 "timestamp discontinuity delta threshold", "threshold" },
4943 { "dts_error_threshold", HAS_ARG | OPT_FLOAT | OPT_EXPERT, { &dts_error_threshold },
4944 "timestamp error delta threshold", "threshold" },
4945 { "xerror", OPT_BOOL | OPT_EXPERT, { &exit_on_error },
4946 "exit on error", "error" },
4947 { "abort_on", HAS_ARG | OPT_EXPERT, { .func_arg = opt_abort_on },
4948 "abort on the specified condition flags", "flags" },
4949 { "copyinkf", OPT_BOOL | OPT_EXPERT | OPT_SPEC |
4950 OPT_OUTPUT, { .off = OFFSET(copy_initial_nonkeyframes) },
4951 "copy initial non-keyframes" },
4952 { "copypriorss", OPT_INT | HAS_ARG | OPT_EXPERT | OPT_SPEC | OPT_OUTPUT, { .off = OFFSET(copy_prior_start) },
4953 "copy or discard frames before start time" },
4954 { "frames", OPT_INT64 | HAS_ARG | OPT_SPEC | OPT_OUTPUT, { .off = OFFSET(max_frames) },
4955 "set the number of frames to output", "number" },
4956 { "tag", OPT_STRING | HAS_ARG | OPT_SPEC |
4957 OPT_EXPERT | OPT_OUTPUT | OPT_INPUT, { .off = OFFSET(codec_tags) },
4958 "force codec tag/fourcc", "fourcc/tag" },
4959 { "q", HAS_ARG | OPT_EXPERT | OPT_DOUBLE |
4960 OPT_SPEC | OPT_OUTPUT, { .off = OFFSET(qscale) },
4961 "use fixed quality scale (VBR)", "q" },
4962 { "qscale", HAS_ARG | OPT_EXPERT | OPT_PERFILE |
4963 OPT_OUTPUT, { .func_arg = opt_qscale },
4964 "use fixed quality scale (VBR)", "q" },
4965 { "profile", HAS_ARG | OPT_EXPERT | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_profile },
4966 "set profile", "profile" },
4967 { "filter", HAS_ARG | OPT_STRING | OPT_SPEC | OPT_OUTPUT, { .off = OFFSET(filters) },
4968 "set stream filtergraph", "filter_graph" },
4969 { "filter_threads", HAS_ARG, { .func_arg = opt_filter_threads },
4970 "number of non-complex filter threads" },
4971 { "filter_script", HAS_ARG | OPT_STRING | OPT_SPEC | OPT_OUTPUT, { .off = OFFSET(filter_scripts) },
4972 "read stream filtergraph description from a file", "filename" },
4973 { "reinit_filter", HAS_ARG | OPT_INT | OPT_SPEC | OPT_INPUT, { .off = OFFSET(reinit_filters) },
4974 "reinit filtergraph on input parameter changes", "" },
4975 { "filter_complex", HAS_ARG | OPT_EXPERT, { .func_arg = opt_filter_complex },
4976 "create a complex filtergraph", "graph_description" },
4977 { "filter_complex_threads", HAS_ARG | OPT_INT, { &filter_complex_nbthreads },
4978 "number of threads for -filter_complex" },
4979 { "lavfi", HAS_ARG | OPT_EXPERT, { .func_arg = opt_filter_complex },
4980 "create a complex filtergraph", "graph_description" },
4981 { "filter_complex_script", HAS_ARG | OPT_EXPERT, { .func_arg = opt_filter_complex_script },
4982 "read complex filtergraph description from a file", "filename" },
4983 { "auto_conversion_filters", OPT_BOOL | OPT_EXPERT, { &auto_conversion_filters },
4984 "enable automatic conversion filters globally" },
4985 { "stats", OPT_BOOL, { &print_stats },
4986 "print progress report during encoding", },
4987 { "stats_period", HAS_ARG | OPT_EXPERT, { .func_arg = opt_stats_period },
4988 "set the period at which ffmpeg updates stats and -progress output", "time" },
4989 { "attach", HAS_ARG | OPT_PERFILE | OPT_EXPERT |
4990 OPT_OUTPUT, { .func_arg = opt_attach },
4991 "add an attachment to the output file", "filename" },
4992 { "dump_attachment", HAS_ARG | OPT_STRING | OPT_SPEC |
4994 "extract an attachment into a file", "filename" },
4995 { "stream_loop", OPT_INT | HAS_ARG | OPT_EXPERT | OPT_INPUT |
4996 OPT_OFFSET, { .off = OFFSET(loop) }, "set number of times input stream shall be looped", "loop count" },
4997 { "debug_ts", OPT_BOOL | OPT_EXPERT, { &debug_ts },
4998 "print timestamp debugging info" },
4999 { "max_error_rate", HAS_ARG | OPT_FLOAT, { &max_error_rate },
5000 "ratio of decoding errors (0.0: no errors, 1.0: 100% errors) above which ffmpeg returns an error instead of success.", "maximum error rate" },
5001 { "discard", OPT_STRING | HAS_ARG | OPT_SPEC |
5002 OPT_INPUT, { .off = OFFSET(discard) },
5003 "discard", "" },
5004 { "disposition", OPT_STRING | HAS_ARG | OPT_SPEC |
5005 OPT_OUTPUT, { .off = OFFSET(disposition) },
5006 "disposition", "" },
5007 { "thread_queue_size", HAS_ARG | OPT_INT | OPT_OFFSET | OPT_EXPERT | OPT_INPUT,
5008 { .off = OFFSET(thread_queue_size) },
5009 "set the maximum number of queued packets from the demuxer" },
5010 { "find_stream_info", OPT_BOOL | OPT_PERFILE | OPT_INPUT | OPT_EXPERT, { &find_stream_info },
5011 "read and decode the streams to fill missing information with heuristics" },
5012 { "bits_per_raw_sample", OPT_INT | HAS_ARG | OPT_EXPERT | OPT_SPEC | OPT_OUTPUT,
5013 { .off = OFFSET(bits_per_raw_sample) },
5014 "set the number of bits per raw sample", "number" },
5015
5016 /* video options */
5017 { "vframes", OPT_VIDEO | HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_video_frames },
5018 "set the number of video frames to output", "number" },
5019 { "r", OPT_VIDEO | HAS_ARG | OPT_STRING | OPT_SPEC |
5020 OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(frame_rates) },
5021 "set frame rate (Hz value, fraction or abbreviation)", "rate" },
5022 { "fpsmax", OPT_VIDEO | HAS_ARG | OPT_STRING | OPT_SPEC |
5023 OPT_OUTPUT, { .off = OFFSET(max_frame_rates) },
5024 "set max frame rate (Hz value, fraction or abbreviation)", "rate" },
5026 OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(frame_sizes) },
5027 "set frame size (WxH or abbreviation)", "size" },
5028 { "aspect", OPT_VIDEO | HAS_ARG | OPT_STRING | OPT_SPEC |
5029 OPT_OUTPUT, { .off = OFFSET(frame_aspect_ratios) },
5030 "set aspect ratio (4:3, 16:9 or 1.3333, 1.7777)", "aspect" },
5031 { "pix_fmt", OPT_VIDEO | HAS_ARG | OPT_EXPERT | OPT_STRING | OPT_SPEC |
5032 OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(frame_pix_fmts) },
5033 "set pixel format", "format" },
5034 { "vn", OPT_VIDEO | OPT_BOOL | OPT_OFFSET | OPT_INPUT | OPT_OUTPUT,{ .off = OFFSET(video_disable) },
5035 "disable video" },
5036 { "rc_override", OPT_VIDEO | HAS_ARG | OPT_EXPERT | OPT_STRING | OPT_SPEC |
5037 OPT_OUTPUT, { .off = OFFSET(rc_overrides) },
5038 "rate control override for specific intervals", "override" },
5039 { "vcodec", OPT_VIDEO | HAS_ARG | OPT_PERFILE | OPT_INPUT |
5040 OPT_OUTPUT, { .func_arg = opt_video_codec },
5041 "force video codec ('copy' to copy stream)", "codec" },
5042 { "timecode", OPT_VIDEO | HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_timecode },
5043 "set initial TimeCode value.", "hh:mm:ss[:;.]ff" },
5044 { "pass", OPT_VIDEO | HAS_ARG | OPT_SPEC | OPT_INT | OPT_OUTPUT, { .off = OFFSET(pass) },
5045 "select the pass number (1 to 3)", "n" },
5046 { "passlogfile", OPT_VIDEO | HAS_ARG | OPT_STRING | OPT_EXPERT | OPT_SPEC |
5047 OPT_OUTPUT, { .off = OFFSET(passlogfiles) },
5048 "select two pass log file name prefix", "prefix" },
5049 { "psnr", OPT_VIDEO | OPT_BOOL | OPT_EXPERT, { &do_psnr },
5050 "calculate PSNR of compressed frames" },
5051 { "vstats", OPT_VIDEO | OPT_EXPERT , { .func_arg = opt_vstats },
5052 "dump video coding statistics to file" },
5053 { "vstats_file", OPT_VIDEO | HAS_ARG | OPT_EXPERT , { .func_arg = opt_vstats_file },
5054 "dump video coding statistics to file", "file" },
5055 { "vstats_version", OPT_VIDEO | OPT_INT | HAS_ARG | OPT_EXPERT , { &vstats_version },
5056 "Version of the vstats format to use."},
5057 { "vf", OPT_VIDEO | HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_video_filters },
5058 "set video filters", "filter_graph" },
5059 { "intra_matrix", OPT_VIDEO | HAS_ARG | OPT_EXPERT | OPT_STRING | OPT_SPEC |
5060 OPT_OUTPUT, { .off = OFFSET(intra_matrices) },
5061 "specify intra matrix coeffs", "matrix" },
5062 { "inter_matrix", OPT_VIDEO | HAS_ARG | OPT_EXPERT | OPT_STRING | OPT_SPEC |
5063 OPT_OUTPUT, { .off = OFFSET(inter_matrices) },
5064 "specify inter matrix coeffs", "matrix" },
5065 { "chroma_intra_matrix", OPT_VIDEO | HAS_ARG | OPT_EXPERT | OPT_STRING | OPT_SPEC |
5066 OPT_OUTPUT, { .off = OFFSET(chroma_intra_matrices) },
5067 "specify intra matrix coeffs", "matrix" },
5068 { "top", OPT_VIDEO | HAS_ARG | OPT_EXPERT | OPT_INT| OPT_SPEC |
5069 OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(top_field_first) },
5070 "top=1/bottom=0/auto=-1 field first", "" },
5071 { "vtag", OPT_VIDEO | HAS_ARG | OPT_EXPERT | OPT_PERFILE |
5072 OPT_INPUT | OPT_OUTPUT, { .func_arg = opt_old2new },
5073 "force video tag/fourcc", "fourcc/tag" },
5074 { "qphist", OPT_VIDEO | OPT_BOOL | OPT_EXPERT , { &qp_hist },
5075 "show QP histogram" },
5076 { "fps_mode", OPT_VIDEO | HAS_ARG | OPT_STRING | OPT_EXPERT |
5077 OPT_SPEC | OPT_OUTPUT, { .off = OFFSET(fps_mode) },
5078 "set framerate mode for matching video streams; overrides vsync" },
5079 { "force_fps", OPT_VIDEO | OPT_BOOL | OPT_EXPERT | OPT_SPEC |
5080 OPT_OUTPUT, { .off = OFFSET(force_fps) },
5081 "force the selected framerate, disable the best supported framerate selection" },
5082 { "streamid", OPT_VIDEO | HAS_ARG | OPT_EXPERT | OPT_PERFILE |
5083 OPT_OUTPUT, { .func_arg = opt_streamid },
5084 "set the value of an outfile streamid", "streamIndex:value" },
5085 { "force_key_frames", OPT_VIDEO | OPT_STRING | HAS_ARG | OPT_EXPERT |
5086 OPT_SPEC | OPT_OUTPUT, { .off = OFFSET(forced_key_frames) },
5087 "force key frames at specified timestamps", "timestamps" },
5088 { "ab", OPT_VIDEO | HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_bitrate },
5089 "audio bitrate (please use -b:a)", "bitrate" },
5090 { "b", OPT_VIDEO | HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_bitrate },
5091 "video bitrate (please use -b:v)", "bitrate" },
5092 { "hwaccel", OPT_VIDEO | OPT_STRING | HAS_ARG | OPT_EXPERT |
5093 OPT_SPEC | OPT_INPUT, { .off = OFFSET(hwaccels) },
5094 "use HW accelerated decoding", "hwaccel name" },
5095 { "hwaccel_device", OPT_VIDEO | OPT_STRING | HAS_ARG | OPT_EXPERT |
5096 OPT_SPEC | OPT_INPUT, { .off = OFFSET(hwaccel_devices) },
5097 "select a device for HW acceleration", "devicename" },
5098 { "hwaccel_output_format", OPT_VIDEO | OPT_STRING | HAS_ARG | OPT_EXPERT |
5099 OPT_SPEC | OPT_INPUT, { .off = OFFSET(hwaccel_output_formats) },
5100 "select output format used with HW accelerated decoding", "format" },
5101 { "hwaccels", OPT_EXIT, { .func_arg = show_hwaccels },
5102 "show available HW acceleration methods" },
5103 { "autorotate", HAS_ARG | OPT_BOOL | OPT_SPEC |
5104 OPT_EXPERT | OPT_INPUT, { .off = OFFSET(autorotate) },
5105 "automatically insert correct rotate filters" },
5106 { "autoscale", HAS_ARG | OPT_BOOL | OPT_SPEC |
5107 OPT_EXPERT | OPT_OUTPUT, { .off = OFFSET(autoscale) },
5108 "automatically insert a scale filter at the end of the filter graph" },
5109
5110 /* audio options */
5111 { "aframes", OPT_AUDIO | HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_audio_frames },
5112 "set the number of audio frames to output", "number" },
5113 { "aq", OPT_AUDIO | HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_audio_qscale },
5114 "set audio quality (codec-specific)", "quality", },
5115 { "ar", OPT_AUDIO | HAS_ARG | OPT_INT | OPT_SPEC |
5116 OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(audio_sample_rate) },
5117 "set audio sampling rate (in Hz)", "rate" },
5118 { "ac", OPT_AUDIO | HAS_ARG | OPT_INT | OPT_SPEC |
5119 OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(audio_channels) },
5120 "set number of audio channels", "channels" },
5121 { "an", OPT_AUDIO | OPT_BOOL | OPT_OFFSET | OPT_INPUT | OPT_OUTPUT,{ .off = OFFSET(audio_disable) },
5122 "disable audio" },
5123 { "acodec", OPT_AUDIO | HAS_ARG | OPT_PERFILE |
5124 OPT_INPUT | OPT_OUTPUT, { .func_arg = opt_audio_codec },
5125 "force audio codec ('copy' to copy stream)", "codec" },
5126 { "atag", OPT_AUDIO | HAS_ARG | OPT_EXPERT | OPT_PERFILE |
5127 OPT_OUTPUT, { .func_arg = opt_old2new },
5128 "force audio tag/fourcc", "fourcc/tag" },
5129 { "vol", OPT_AUDIO | HAS_ARG | OPT_INT, { &audio_volume },
5130 "change audio volume (256=normal)" , "volume" },
5131 { "sample_fmt", OPT_AUDIO | HAS_ARG | OPT_EXPERT | OPT_SPEC |
5132 OPT_STRING | OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(sample_fmts) },
5133 "set sample format", "format" },
5134 { "channel_layout", OPT_AUDIO | HAS_ARG | OPT_EXPERT | OPT_SPEC |
5135 OPT_STRING | OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(audio_ch_layouts) },
5136 "set channel layout", "layout" },
5137 { "ch_layout", OPT_AUDIO | HAS_ARG | OPT_EXPERT | OPT_SPEC |
5138 OPT_STRING | OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(audio_ch_layouts) },
5139 "set channel layout", "layout" },
5140 { "af", OPT_AUDIO | HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_audio_filters },
5141 "set audio filters", "filter_graph" },
5142 { "guess_layout_max", OPT_AUDIO | HAS_ARG | OPT_INT | OPT_SPEC | OPT_EXPERT | OPT_INPUT, { .off = OFFSET(guess_layout_max) },
5143 "set the maximum number of channels to try to guess the channel layout" },
5144
5145 /* subtitle options */
5146 { "sn", OPT_SUBTITLE | OPT_BOOL | OPT_OFFSET | OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(subtitle_disable) },
5147 "disable subtitle" },
5148 { "scodec", OPT_SUBTITLE | HAS_ARG | OPT_PERFILE | OPT_INPUT | OPT_OUTPUT, { .func_arg = opt_subtitle_codec },
5149 "force subtitle codec ('copy' to copy stream)", "codec" },
5150 { "stag", OPT_SUBTITLE | HAS_ARG | OPT_EXPERT | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_old2new }
5151 , "force subtitle tag/fourcc", "fourcc/tag" },
5152 { "fix_sub_duration", OPT_BOOL | OPT_EXPERT | OPT_SUBTITLE | OPT_SPEC | OPT_INPUT, { .off = OFFSET(fix_sub_duration) },
5153 "fix subtitles duration" },
5154 { "canvas_size", OPT_SUBTITLE | HAS_ARG | OPT_STRING | OPT_SPEC | OPT_INPUT, { .off = OFFSET(canvas_sizes) },
5155 "set canvas size (WxH or abbreviation)", "size" },
5156
5157 /* muxer options */
5158 { "muxdelay", OPT_FLOAT | HAS_ARG | OPT_EXPERT | OPT_OFFSET | OPT_OUTPUT, { .off = OFFSET(mux_max_delay) },
5159 "set the maximum demux-decode delay", "seconds" },
5160 { "muxpreload", OPT_FLOAT | HAS_ARG | OPT_EXPERT | OPT_OFFSET | OPT_OUTPUT, { .off = OFFSET(mux_preload) },
5161 "set the initial demux-decode delay", "seconds" },
5162 { "sdp_file", HAS_ARG | OPT_EXPERT | OPT_OUTPUT, { .func_arg = opt_sdp_file },
5163 "specify a file in which to print sdp information", "file" },
5164
5165 { "time_base", HAS_ARG | OPT_STRING | OPT_EXPERT | OPT_SPEC | OPT_OUTPUT, { .off = OFFSET(time_bases) },
5166 "set the desired time base hint for output stream (1:24, 1:48000 or 0.04166, 2.0833e-5)", "ratio" },
5167 { "enc_time_base", HAS_ARG | OPT_STRING | OPT_EXPERT | OPT_SPEC | OPT_OUTPUT, { .off = OFFSET(enc_time_bases) },
5168 "set the desired time base for the encoder (1:24, 1:48000 or 0.04166, 2.0833e-5). "
5169 "two special values are defined - "
5170 "0 = use frame rate (video) or sample rate (audio),"
5171 "-1 = match source time base", "ratio" },
5172
5173 { "bsf", HAS_ARG | OPT_STRING | OPT_SPEC | OPT_EXPERT | OPT_OUTPUT, { .off = OFFSET(bitstream_filters) },
5174 "A comma-separated list of bitstream filters", "bitstream_filters" },
5175 { "absf", HAS_ARG | OPT_AUDIO | OPT_EXPERT| OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_old2new },
5176 "deprecated", "audio bitstream_filters" },
5177 { "vbsf", OPT_VIDEO | HAS_ARG | OPT_EXPERT| OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_old2new },
5178 "deprecated", "video bitstream_filters" },
5179
5180 { "apre", HAS_ARG | OPT_AUDIO | OPT_EXPERT| OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_preset },
5181 "set the audio options to the indicated preset", "preset" },
5182 { "vpre", OPT_VIDEO | HAS_ARG | OPT_EXPERT| OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_preset },
5183 "set the video options to the indicated preset", "preset" },
5184 { "spre", HAS_ARG | OPT_SUBTITLE | OPT_EXPERT| OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_preset },
5185 "set the subtitle options to the indicated preset", "preset" },
5186 { "fpre", HAS_ARG | OPT_EXPERT| OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_preset },
5187 "set options from indicated preset file", "filename" },
5188
5189 { "max_muxing_queue_size", HAS_ARG | OPT_INT | OPT_SPEC | OPT_EXPERT | OPT_OUTPUT, { .off = OFFSET(max_muxing_queue_size) },
5190 "maximum number of packets that can be buffered while waiting for all streams to initialize", "packets" },
5191 { "muxing_queue_data_threshold", HAS_ARG | OPT_INT | OPT_SPEC | OPT_EXPERT | OPT_OUTPUT, { .off = OFFSET(muxing_queue_data_threshold) },
5192 "set the threshold after which max_muxing_queue_size is taken into account", "bytes" },
5193
5194 /* data codec support */
5195 { "dcodec", HAS_ARG | OPT_DATA | OPT_PERFILE | OPT_EXPERT | OPT_INPUT | OPT_OUTPUT, { .func_arg = opt_data_codec },
5196 "force data codec ('copy' to copy stream)", "codec" },
5197 { "dn", OPT_BOOL | OPT_VIDEO | OPT_OFFSET | OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(data_disable) },
5198 "disable data" },
5199
5200 #if CONFIG_VAAPI
5201 { "vaapi_device", HAS_ARG | OPT_EXPERT, { .func_arg = opt_vaapi_device },
5202 "set VAAPI hardware device (DRM path or X11 display name)", "device" },
5203 #endif
5204
5205 #if CONFIG_QSV
5206 { "qsv_device", HAS_ARG | OPT_EXPERT, { .func_arg = opt_qsv_device },
5207 "set QSV hardware device (DirectX adapter index, DRM path or X11 display name)", "device"},
5208 #endif
5209
5210 { "init_hw_device", HAS_ARG | OPT_EXPERT, { .func_arg = opt_init_hw_device },
5211 "initialise hardware device", "args" },
5212 { "filter_hw_device", HAS_ARG | OPT_EXPERT, { .func_arg = opt_filter_hw_device },
5213 "set hardware device used when filtering", "device" },
5214
5215 { NULL, },
5216 };
5217
5218 ffmpeg_options = options;
5219
5220 int i, ret;
5222
5223 int savedCode = setjmp(ex_buf__);
5224 if (savedCode == 0) {
5225
5227
5228 init_dynload();
5229
5231
5232 av_log_set_flags(AV_LOG_SKIP_REPEATED);
5233 parse_loglevel(argc, argv, options);
5234
5235 #if CONFIG_AVDEVICE
5236 avdevice_register_all();
5237 #endif
5238 avformat_network_init();
5239
5240 show_banner(argc, argv, options);
5241
5242 /* parse options and open all input/output files */
5243 ret = ffmpeg_parse_options(argc, argv);
5244 if (ret < 0)
5245 exit_program(1);
5246
5247 if (nb_output_files <= 0 && nb_input_files == 0) {
5248 show_usage();
5249 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
5250 exit_program(1);
5251 }
5252
5253 /* file converter / grab */
5254 if (nb_output_files <= 0) {
5255 av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
5256 exit_program(1);
5257 }
5258
5259 for (i = 0; i < nb_output_files; i++) {
5260 if (strcmp(output_files[i]->format->name, "rtp"))
5261 want_sdp = 0;
5262 }
5263
5265 if (transcode() < 0)
5266 exit_program(1);
5267 if (do_benchmark) {
5268 int64_t utime, stime, rtime;
5270 utime = current_time.user_usec - ti.user_usec;
5271 stime = current_time.sys_usec - ti.sys_usec;
5272 rtime = current_time.real_usec - ti.real_usec;
5273 av_log(NULL, AV_LOG_INFO,
5274 "bench: utime=%0.3fs stime=%0.3fs rtime=%0.3fs\n",
5275 utime / 1000000.0, stime / 1000000.0, rtime / 1000000.0);
5276 }
5277 av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
5280 exit_program(69);
5281
5283
5284 } else {
5286 }
5287
5289}
__thread jmp_buf ex_buf__
void exit_program(int ret)
void init_dynload(void)
void print_error(const char *filename, int err)
__thread char * program_name
void parse_loglevel(int argc, char **argv, const OptionDef *options)
__thread int program_birth_year
int64_t parse_time_or_die(const char *context, const char *timestr, int is_duration)
void register_exit(void(*cb)(int ret))
void uninit_opts(void)
__thread int hide_banner
#define OPT_VIDEO
int show_help(void *optctx, const char *opt, const char *arg)
#define OPT_SPEC
#define OPT_BOOL
#define OPT_INT64
#define OPT_PERFILE
#define OPT_INT
#define OPT_FLOAT
#define AV_LOG_STDERR
#define OPT_INPUT
#define OPT_DOUBLE
#define OPT_STRING
__thread int find_stream_info
void show_banner(int argc, char **argv, const OptionDef *options)
int opt_timelimit(void *optctx, const char *opt, const char *arg)
#define OPT_AUDIO
#define OPT_DATA
#define OPT_SUBTITLE
#define OPT_EXPERT
#define OPT_EXIT
#define OPT_OUTPUT
#define OPT_TIME
#define OPT_OFFSET
#define HAS_ARG
static InputStream * get_input_stream(OutputStream *ost)
int opt_channel_layout(void *optctx, const char *opt, const char *arg)
int opt_sdp_file(void *optctx, const char *opt, const char *arg)
static int transcode(void)
static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output, int *decode_failed)
int opt_timecode(void *optctx, const char *opt, const char *arg)
__thread OptionDef * ffmpeg_options
int opt_vstats_file(void *optctx, const char *opt, const char *arg)
__thread InputStream ** input_streams
static void set_tty_echo(int on)
static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output, int *decode_failed)
__thread const AVIOInterruptCB int_cb
static int check_keyboard_interaction(int64_t cur_time)
static void do_audio_out(OutputFile *of, OutputStream *ost, AVFrame *frame)
static BenchmarkTimeStamps get_benchmark_time_stamps(void)
int opt_data_codec(void *optctx, const char *opt, const char *arg)
int opt_streamid(void *optctx, const char *opt, const char *arg)
__thread int nb_input_streams
static int need_output(void)
void term_exit(void)
static volatile int received_sigterm
const char *const forced_keyframes_const_names[]
void cancelSession(long sessionId)
int opt_qscale(void *optctx, const char *opt, const char *arg)
int opt_sameq(void *optctx, const char *opt, const char *arg)
__thread OutputStream ** output_streams
static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
__thread OutputFile ** output_files
int opt_filter_complex_script(void *optctx, const char *opt, const char *arg)
static void forward_report(int is_last_report, int64_t timer_start, int64_t cur_time)
int opt_filter_complex(void *optctx, const char *opt, const char *arg)
static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
static void do_subtitle_out(OutputFile *of, OutputStream *ost, AVSubtitle *sub)
int opt_vsync(void *optctx, const char *opt, const char *arg)
static int init_input_stream(int ist_index, char *error, int error_len)
__thread int nb_output_streams
static void sub2video_push_ref(InputStream *ist, int64_t pts)
int guess_input_channel_layout(InputStream *ist)
static int reap_filters(int flush)
static int check_recording_time(OutputStream *ost)
static void do_video_out(OutputFile *of, OutputStream *ost, AVFrame *next_picture)
__thread BenchmarkTimeStamps current_time
__thread int nb_input_files
static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
static void print_final_stats(int64_t total_size)
static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
__thread int first_report
__thread int recast_media
__thread int nb_output_files
static double psnr(double d)
static int init_output_bsfs(OutputStream *ost)
volatile int handleSIGINT
static int init_output_stream_encode(OutputStream *ost, AVFrame *frame)
static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
int opt_subtitle_codec(void *optctx, const char *opt, const char *arg)
__thread int ffmpeg_exited
volatile int handleSIGTERM
int opt_video_standard(void *optctx, const char *opt, const char *arg)
void set_report_callback(void(*callback)(int, float, float, int64_t, int, double, double))
__thread int64_t nb_frames_drop
int opt_profile(void *optctx, const char *opt, const char *arg)
struct BenchmarkTimeStamps BenchmarkTimeStamps
static int64_t getmaxrss(void)
int opt_abort_on(void *optctx, const char *opt, const char *arg)
static void reset_eagain(void)
__thread int copy_unknown_streams
static void finish_output_stream(OutputStream *ost)
int opt_video_codec(void *optctx, const char *opt, const char *arg)
static void parse_forced_key_frames(char *kf, OutputStream *ost, AVCodecContext *avctx)
int opt_data_frames(void *optctx, const char *opt, const char *arg)
int opt_video_filters(void *optctx, const char *opt, const char *arg)
static int compare_int64(const void *a, const void *b)
__thread int input_sync
int opt_attach(void *optctx, const char *opt, const char *arg)
int opt_filter_hw_device(void *optctx, const char *opt, const char *arg)
static int init_output_stream_wrapper(OutputStream *ost, AVFrame *frame, unsigned int fatal)
static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof, int *decode_failed)
__thread atomic_int transcode_init_done
static void check_decode_result(InputStream *ist, int *got_output, int ret)
__thread int64_t keyboard_last_time
static void sub2video_heartbeat(InputStream *ist, int64_t pts)
__thread int64_t nb_frames_dup
int opt_audio_frames(void *optctx, const char *opt, const char *arg)
static int process_input(int file_index)
static int seek_to_start(InputFile *ifile, AVFormatContext *is)
__thread int no_file_overwrite
int opt_target(void *optctx, const char *opt, const char *arg)
__thread int qp_histogram[52]
__thread int longjmp_value
static int check_output_constraints(InputStream *ist, OutputStream *ost)
__thread int file_overwrite
int opt_map(void *optctx, const char *opt, const char *arg)
void cancel_operation(long id)
static void set_encoder_id(OutputFile *of, OutputStream *ost)
__thread AVIOContext * progress_avio
__thread int64_t copy_ts_first_pts
__thread InputFile ** input_files
static int read_key(void)
static void close_output_stream(OutputStream *ost)
#define SIGNAL(sig, func)
void sub2video_update(InputStream *ist, int64_t heartbeat_pts, AVSubtitle *sub)
int opt_vstats(void *optctx, const char *opt, const char *arg)
int opt_video_channel(void *optctx, const char *opt, const char *arg)
__thread FilterGraph ** filtergraphs
static int init_output_stream(OutputStream *ost, AVFrame *frame, char *error, int error_len)
static int got_eagain(void)
int opt_video_frames(void *optctx, const char *opt, const char *arg)
__thread unsigned nb_output_dumped
static int send_filter_eof(InputStream *ist)
__thread int main_ffmpeg_return_code
int decode_interrupt_cb(void *ctx)
int opt_audio_filters(void *optctx, const char *opt, const char *arg)
static void term_exit_sigsafe(void)
static void sub2video_flush(InputStream *ist)
static int ifilter_parameters_from_codecpar(InputFilter *ifilter, AVCodecParameters *par)
static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h, AVSubtitleRect *r)
void remove_avoptions(AVDictionary **a, AVDictionary *b)
static int transcode_init(void)
static volatile int received_nb_signals
int opt_audio_qscale(void *optctx, const char *opt, const char *arg)
__thread long globalSessionId
int opt_filter_threads(void *optctx, const char *opt, const char *arg)
static FILE * vstats_file
__thread uint64_t dup_warning
void(* report_callback)(int, float, float, int64_t, int, double, double)
static void report_new_stream(int input_index, AVPacket *pkt)
__thread int64_t decode_error_stat[2]
static void abort_codec_experimental(const AVCodec *c, int encoder)
int show_hwaccels(void *optctx, const char *opt, const char *arg)
static void update_benchmark(const char *fmt,...)
static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base, AVRational time_base)
static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame, int keep_reference)
static void ffmpeg_cleanup(int ret)
int opt_preset(void *optctx, const char *opt, const char *arg)
static double adjust_frame_pts_to_encoder_tb(OutputFile *of, OutputStream *ost, AVFrame *frame)
static int transcode_step(void)
static int ifilter_has_all_input_formats(FilterGraph *fg)
static OutputStream * choose_output(void)
int opt_old2new(void *optctx, const char *opt, const char *arg)
static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
__thread int nb_filtergraphs
static int init_output_stream_streamcopy(OutputStream *ost)
void term_init(void)
int opt_stats_period(void *optctx, const char *opt, const char *arg)
int cancelRequested(long sessionId)
__thread int64_t last_time
volatile int handleSIGPIPE
static int get_input_packet(InputFile *f, AVPacket **pkt)
#define OFFSET(x)
static int encode_frame(OutputFile *of, OutputStream *ost, AVFrame *frame)
int opt_bitrate(void *optctx, const char *opt, const char *arg)
volatile int handleSIGXCPU
static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
__thread uint8_t * subtitle_out
static int sub2video_get_blank_frame(InputStream *ist)
static void flush_encoders(void)
__thread int do_psnr
static void output_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int eof)
int opt_map_channel(void *optctx, const char *opt, const char *arg)
int opt_progress(void *optctx, const char *opt, const char *arg)
int opt_audio_codec(void *optctx, const char *opt, const char *arg)
int opt_recording_timestamp(void *optctx, const char *opt, const char *arg)
void assert_avoptions(AVDictionary *m)
__thread int want_sdp
void ffmpeg_var_cleanup()
static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
int ffmpeg_execute(int argc, char **argv)
static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
__thread int ignore_unknown_streams
static void sigterm_handler(int sig)
volatile int handleSIGQUIT
int opt_init_hw_device(void *optctx, const char *opt, const char *arg)
static void update_video_stats(OutputStream *ost, const AVPacket *pkt, int write_vstats)
__thread float dts_delta_threshold
int hw_device_setup_for_encode(OutputStream *ost)
__thread int copy_tb
@ HWACCEL_GENERIC
@ HWACCEL_AUTO
int ifilter_parameters_from_frame(InputFilter *ifilter, const AVFrame *frame)
void of_close(OutputFile **pof)
__thread int64_t stats_period
__thread int print_stats
__thread int filter_complex_nbthreads
__thread int abort_on_flags
__thread int audio_volume
__thread float max_error_rate
__thread int copy_ts
__thread int stdin_interaction
__thread float dts_error_threshold
int hwaccel_decode_init(AVCodecContext *avctx)
@ ENCODER_FINISHED
@ MUXER_FINISHED
void show_usage(void)
int of_check_init(OutputFile *of)
#define DECODING_FOR_FILTER
__thread char * filter_nbthreads
__thread int do_benchmark
__thread float frame_drop_threshold
__thread int vstats_version
__thread char * vstats_filename
int hw_device_setup_for_decode(InputStream *ist)
void hw_device_free_all(void)
__thread int audio_sync_method
__thread float audio_drift_threshold
int of_write_trailer(OutputFile *of)
__thread int do_benchmark_all
__thread int start_at_zero
@ FKF_PREV_FORCED_N
@ FKF_T
@ FKF_PREV_FORCED_T
@ FKF_N_FORCED
@ FKF_N
__thread int exit_on_error
int ffmpeg_parse_options(int argc, char **argv)
@ VSYNC_VFR
@ VSYNC_AUTO
@ VSYNC_PASSTHROUGH
@ VSYNC_CFR
@ VSYNC_DROP
@ VSYNC_VSCFR
#define ABORT_ON_FLAG_EMPTY_OUTPUT
#define DECODING_FOR_OST
__thread int qp_hist
int filtergraph_is_simple(FilterGraph *fg)
int configure_filtergraph(FilterGraph *fg)
__thread int do_hex_dump
__thread int do_pkt_dump
__thread int debug_ts
void of_write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
__thread int auto_conversion_filters
#define ABORT_ON_FLAG_EMPTY_OUTPUT_STREAM
void dump_attachment(AVStream *st, const char *filename)
int show_decoders(void *optctx, const char *opt, const char *arg)
int opt_loglevel(void *optctx, const char *opt, const char *arg)
int opt_cpuflags(void *optctx, const char *opt, const char *arg)
int show_filters(void *optctx, const char *opt, const char *arg)
int show_sample_fmts(void *optctx, const char *opt, const char *arg)
int show_muxers(void *optctx, const char *opt, const char *arg)
int show_bsfs(void *optctx, const char *opt, const char *arg)
int show_dispositions(void *optctx, const char *opt, const char *arg)
int show_layouts(void *optctx, const char *opt, const char *arg)
int show_encoders(void *optctx, const char *opt, const char *arg)
int show_version(void *optctx, const char *opt, const char *arg)
int opt_cpucount(void *optctx, const char *opt, const char *arg)
int show_license(void *optctx, const char *opt, const char *arg)
int show_codecs(void *optctx, const char *opt, const char *arg)
int show_buildconf(void *optctx, const char *opt, const char *arg)
int show_devices(void *optctx, const char *opt, const char *arg)
int show_formats(void *optctx, const char *opt, const char *arg)
int show_protocols(void *optctx, const char *opt, const char *arg)
int opt_max_alloc(void *optctx, const char *opt, const char *arg)
int opt_report(void *optctx, const char *opt, const char *arg)
int show_colors(void *optctx, const char *opt, const char *arg)
int show_pix_fmts(void *optctx, const char *opt, const char *arg)
int show_demuxers(void *optctx, const char *opt, const char *arg)
OutputFilter ** outputs
const char * graph_desc
AVFilterGraph * graph
InputFilter ** inputs
AVPacket * pkt
int64_t ts_offset
int64_t duration
AVFormatContext * ctx
int64_t recording_time
AVRational time_base
int nb_streams_warn
int64_t last_ts
float readrate
int64_t start_time
AVBufferRef * hw_frames_ctx
uint8_t * name
struct InputStream * ist
int32_t * displaymatrix
AVFifo * frame_queue
AVFilterContext * filter
AVChannelLayout ch_layout
enum AVMediaType type
struct FilterGraph * graph
AVRational sample_aspect_ratio
unsigned int initialize
marks if sub2video_update should force an initialization
AVFifo * sub_queue
queue of AVSubtitle* before filter init
enum AVPixelFormat hwaccel_pix_fmt
AVFrame * decoded_frame
int64_t * dts_buffer
int64_t dts
dts of the last packet read for this stream (in AV_TIME_BASE units)
int64_t cfr_next_pts
void(* hwaccel_uninit)(AVCodecContext *s)
AVCodecContext * dec_ctx
int64_t pts
current pts of the decoded frame (in AV_TIME_BASE units)
struct InputStream::@2 prev_sub
enum HWAccelID hwaccel_id
uint64_t data_size
int64_t filter_in_rescale_delta_last
int64_t next_dts
int wrap_correction_done
int64_t max_pts
enum AVPixelFormat hwaccel_retrieved_pix_fmt
AVPacket * pkt
int64_t first_dts
dts of the first packet read for this stream (in AV_TIME_BASE units)
uint64_t samples_decoded
int64_t next_pts
synthetic pts for the next decode frame (in AV_TIME_BASE units)
uint64_t frames_decoded
struct InputStream::sub2video sub2video
AVStream * st
InputFilter ** filters
int(* hwaccel_retrieve_data)(AVCodecContext *s, AVFrame *frame)
uint64_t nb_packets
AVSubtitle subtitle
char * hwaccel_device
int64_t prev_pkt_pts
AVDictionary * decoder_opts
int64_t min_pts
const AVCodec * dec
enum AVHWDeviceType hwaccel_device_type
int64_t nb_samples
AVRational framerate
const AVOutputFormat * format
uint64_t limit_filesize
AVFormatContext * ctx
int64_t start_time
start time in microseconds == AV_TIME_BASE units
AVDictionary * opts
int64_t recording_time
desired length of the resulting file in microseconds == AV_TIME_BASE units
AVFilterInOut * out_tmp
struct OutputStream * ost
AVFilterContext * filter
uint8_t * name
struct FilterGraph * graph
AVChannelLayout ch_layout
AVDictionary * swr_opts
int copy_initial_nonkeyframes
int64_t last_mux_dts
AVRational mux_timebase
double forced_keyframes_expr_const_values[FKF_NB]
OSTFinished finished
int * audio_channels_map
AVPacket * pkt
AVRational frame_aspect_ratio
double rotate_override_value
AVFrame * last_frame
const AVCodec * enc
int audio_channels_mapped
int64_t sync_opts
int64_t * forced_kf_pts
int64_t error[4]
uint64_t packets_written
uint64_t frames_encoded
int64_t max_frames
enum VideoSyncMethod vsync_method
AVRational max_frame_rate
AVRational enc_timebase
int64_t frame_number
AVCodecParameters * ref_par
char * forced_keyframes
AVFrame * filtered_frame
const char * attachment_filename
AVRational frame_rate
int64_t last_nb0_frames[3]
AVCodecContext * enc_ctx
struct InputStream * sync_ist
AVDictionary * encoder_opts
uint64_t data_size
AVStream * st
char * filters
filtergraph associated to the -filter option
int64_t forced_kf_ref_pts
uint64_t samples_encoded
char * filters_script
filtergraph script associated to the -filter_script option
AVBSFContext * bsf_ctx
int64_t first_pts
AVDictionary * sws_dict
OutputFilter * filter
char * disposition
AVFifo * muxing_queue
uint64_t packets_encoded
AVExpr * forced_keyframes_pexpr
int64_t last_dropped
char * logfile_prefix