FFmpegKit iOS / macOS / tvOS API  4.4
fftools_ffmpeg.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2000-2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
26 /*
27  * CHANGES 06.2020
28  * - ignoring signals implemented
29  * - cancel_operation() method signature updated with id
30  * - cancel by execution id implemented
31  *
32  * CHANGES 01.2020
33  * - ffprobe support changes
34  *
35  * CHANGES 12.2019
36  * - concurrent execution support
37  *
38  * CHANGES 08.2018
39  * --------------------------------------------------------
40  * - fftools_ prefix added to file name and parent headers
41  * - forward_report() method, report_callback function pointer and set_report_callback() setter
42  * method added to forward stats.
43  * - forward_report() call added from print_report()
44  * - cancel_operation() method added to trigger sigterm_handler
45  * - (!received_sigterm) validation added inside ifilter_send_eof() to complete cancellation
46  *
47  * CHANGES 07.2018
48  * --------------------------------------------------------
49  * - main() function renamed as ffmpeg_execute()
50  * - exit_program() implemented with setjmp
51  * - extern longjmp_value added to access exit code stored in exit_program()
52  * - ffmpeg_var_cleanup() method added
53  */
54 
55 #include "config.h"
56 #include <ctype.h>
57 #include <string.h>
58 #include <math.h>
59 #include <stdlib.h>
60 #include <errno.h>
61 #include <limits.h>
62 #include <stdatomic.h>
63 #include <stdint.h>
64 
65 #include "ffmpegkit_exception.h"
66 
67 #if HAVE_IO_H
68 #include <io.h>
69 #endif
70 #if HAVE_UNISTD_H
71 #include <unistd.h>
72 #endif
73 
74 #include "libavformat/avformat.h"
75 #include "libavdevice/avdevice.h"
76 #include "libswresample/swresample.h"
77 #include "libavutil/opt.h"
78 #include "libavutil/channel_layout.h"
79 #include "libavutil/parseutils.h"
80 #include "libavutil/samplefmt.h"
81 #include "libavutil/fifo.h"
82 #include "libavutil/hwcontext.h"
83 #include "libavutil/internal.h"
84 #include "libavutil/intreadwrite.h"
85 #include "libavutil/dict.h"
86 #include "libavutil/display.h"
87 #include "libavutil/mathematics.h"
88 #include "libavutil/pixdesc.h"
89 #include "libavutil/avstring.h"
90 #include "libavutil/libm.h"
91 #include "libavutil/imgutils.h"
92 #include "libavutil/timestamp.h"
93 #include "libavutil/bprint.h"
94 #include "libavutil/time.h"
95 #include "libavutil/thread.h"
96 #include "libavutil/threadmessage.h"
97 #include "libavcodec/mathops.h"
98 #include "libavformat/os_support.h"
99 
100 # include "libavfilter/avfilter.h"
101 # include "libavfilter/buffersrc.h"
102 # include "libavfilter/buffersink.h"
103 
104 #if HAVE_SYS_RESOURCE_H
105 #include <sys/time.h>
106 #include <sys/types.h>
107 #include <sys/resource.h>
108 #elif HAVE_GETPROCESSTIMES
109 #include <windows.h>
110 #endif
111 #if HAVE_GETPROCESSMEMORYINFO
112 #include <windows.h>
113 #include <psapi.h>
114 #endif
115 #if HAVE_SETCONSOLECTRLHANDLER
116 #include <windows.h>
117 #endif
118 
119 
120 #if HAVE_SYS_SELECT_H
121 #include <sys/select.h>
122 #endif
123 
124 #if HAVE_TERMIOS_H
125 #include <fcntl.h>
126 #include <sys/ioctl.h>
127 #include <sys/time.h>
128 #include <termios.h>
129 #elif HAVE_KBHIT
130 #include <conio.h>
131 #endif
132 
133 #include <time.h>
134 
135 #include "fftools_ffmpeg.h"
136 #include "fftools_cmdutils.h"
137 
138 #include "libavutil/avassert.h"
139 
140 static FILE *vstats_file;
141 
142 const char *const forced_keyframes_const_names[] = {
143  "n",
144  "n_forced",
145  "prev_forced_n",
146  "prev_forced_t",
147  "t",
148  NULL
149 };
150 
151 typedef struct BenchmarkTimeStamps {
152  int64_t real_usec;
153  int64_t user_usec;
154  int64_t sys_usec;
156 
157 static void do_video_stats(OutputStream *ost, int frame_size);
159 static int64_t getmaxrss(void);
161 
162 __thread int run_as_daemon = 0;
163 __thread int nb_frames_dup = 0;
164 __thread unsigned dup_warning = 1000;
165 __thread int nb_frames_drop = 0;
166 __thread int64_t decode_error_stat[2];
167 __thread unsigned nb_output_dumped = 0;
168 
169 __thread int want_sdp = 1;
170 
172 __thread AVIOContext *progress_avio = NULL;
173 
174 __thread uint8_t *subtitle_out;
175 
176 __thread InputStream **input_streams = NULL;
177 __thread int nb_input_streams = 0;
178 __thread InputFile **input_files = NULL;
179 __thread int nb_input_files = 0;
180 
181 __thread OutputStream **output_streams = NULL;
182 __thread int nb_output_streams = 0;
183 __thread OutputFile **output_files = NULL;
184 __thread int nb_output_files = 0;
185 
187 __thread int nb_filtergraphs;
188 
189 __thread int64_t last_time = -1;
190 __thread int64_t keyboard_last_time = 0;
191 __thread int first_report = 1;
192 __thread int qp_histogram[52];
193 
194 void (*report_callback)(int, float, float, int64_t, int, double, double) = NULL;
195 
196 extern __thread int file_overwrite;
197 extern __thread int no_file_overwrite;
198 extern __thread int ignore_unknown_streams;
199 extern __thread int copy_unknown_streams;
200 extern int opt_map(void *optctx, const char *opt, const char *arg);
201 extern int opt_map_channel(void *optctx, const char *opt, const char *arg);
202 extern int opt_recording_timestamp(void *optctx, const char *opt, const char *arg);
203 extern int opt_data_frames(void *optctx, const char *opt, const char *arg);
204 extern int opt_progress(void *optctx, const char *opt, const char *arg);
205 extern int opt_target(void *optctx, const char *opt, const char *arg);
206 extern int opt_vsync(void *optctx, const char *opt, const char *arg);
207 extern int opt_abort_on(void *optctx, const char *opt, const char *arg);
208 extern int opt_stats_period(void *optctx, const char *opt, const char *arg);
209 extern int opt_qscale(void *optctx, const char *opt, const char *arg);
210 extern int opt_profile(void *optctx, const char *opt, const char *arg);
211 extern int opt_filter_complex(void *optctx, const char *opt, const char *arg);
212 extern int opt_filter_complex_script(void *optctx, const char *opt, const char *arg);
213 extern int opt_attach(void *optctx, const char *opt, const char *arg);
214 extern int opt_video_frames(void *optctx, const char *opt, const char *arg);
215 extern __thread int intra_only;
216 extern int opt_video_codec(void *optctx, const char *opt, const char *arg);
217 extern int opt_sameq(void *optctx, const char *opt, const char *arg);
218 extern int opt_timecode(void *optctx, const char *opt, const char *arg);
219 extern __thread int do_psnr;
220 extern int opt_vstats_file(void *optctx, const char *opt, const char *arg);
221 extern int opt_vstats(void *optctx, const char *opt, const char *arg);
222 extern int opt_video_frames(void *optctx, const char *opt, const char *arg);
223 extern int opt_old2new(void *optctx, const char *opt, const char *arg);
224 extern int opt_streamid(void *optctx, const char *opt, const char *arg);
225 extern int opt_bitrate(void *optctx, const char *opt, const char *arg);
226 extern int show_hwaccels(void *optctx, const char *opt, const char *arg);
227 extern int opt_video_filters(void *optctx, const char *opt, const char *arg);
228 extern int opt_audio_frames(void *optctx, const char *opt, const char *arg);
229 extern int opt_audio_qscale(void *optctx, const char *opt, const char *arg);
230 extern int opt_audio_codec(void *optctx, const char *opt, const char *arg);
231 extern int opt_channel_layout(void *optctx, const char *opt, const char *arg);
232 extern int opt_preset(void *optctx, const char *opt, const char *arg);
233 extern int opt_audio_filters(void *optctx, const char *opt, const char *arg);
234 extern int opt_subtitle_codec(void *optctx, const char *opt, const char *arg);
235 extern int opt_video_channel(void *optctx, const char *opt, const char *arg);
236 extern int opt_video_standard(void *optctx, const char *opt, const char *arg);
237 extern int opt_sdp_file(void *optctx, const char *opt, const char *arg);
238 extern int opt_data_codec(void *optctx, const char *opt, const char *arg);
239 extern int opt_init_hw_device(void *optctx, const char *opt, const char *arg);
240 extern int opt_filter_hw_device(void *optctx, const char *opt, const char *arg);
241 extern __thread int input_sync;
242 
243 #if HAVE_TERMIOS_H
244 
245 /* init terminal so that we can grab keys */
246 __thread struct termios oldtty;
247 __thread int restore_tty;
248 #endif
249 
250 #if HAVE_THREADS
251 static void free_input_threads(void);
252 #endif
253 
254 extern volatile int handleSIGQUIT;
255 extern volatile int handleSIGINT;
256 extern volatile int handleSIGTERM;
257 extern volatile int handleSIGXCPU;
258 extern volatile int handleSIGPIPE;
259 
260 extern __thread volatile long _sessionId;
261 extern void cancelSession(long sessionId);
262 extern int cancelRequested(long sessionId);
263 
264 /* sub2video hack:
265  Convert subtitles to video with alpha to insert them in filter graphs.
266  This is a temporary solution until libavfilter gets real subtitles support.
267  */
268 
270 {
271  int ret;
272  AVFrame *frame = ist->sub2video.frame;
273 
274  av_frame_unref(frame);
275  ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
276  ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
277  ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
278  if ((ret = av_frame_get_buffer(frame, 0)) < 0)
279  return ret;
280  memset(frame->data[0], 0, frame->height * frame->linesize[0]);
281  return 0;
282 }
283 
284 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
285  AVSubtitleRect *r)
286 {
287  uint32_t *pal, *dst2;
288  uint8_t *src, *src2;
289  int x, y;
290 
291  if (r->type != SUBTITLE_BITMAP) {
292  av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
293  return;
294  }
295  if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
296  av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
297  r->x, r->y, r->w, r->h, w, h
298  );
299  return;
300  }
301 
302  dst += r->y * dst_linesize + r->x * 4;
303  src = r->data[0];
304  pal = (uint32_t *)r->data[1];
305  for (y = 0; y < r->h; y++) {
306  dst2 = (uint32_t *)dst;
307  src2 = src;
308  for (x = 0; x < r->w; x++)
309  *(dst2++) = pal[*(src2++)];
310  dst += dst_linesize;
311  src += r->linesize[0];
312  }
313 }
314 
315 static void sub2video_push_ref(InputStream *ist, int64_t pts)
316 {
317  AVFrame *frame = ist->sub2video.frame;
318  int i;
319  int ret;
320 
321  av_assert1(frame->data[0]);
322  ist->sub2video.last_pts = frame->pts = pts;
323  for (i = 0; i < ist->nb_filters; i++) {
324  ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
325  AV_BUFFERSRC_FLAG_KEEP_REF |
326  AV_BUFFERSRC_FLAG_PUSH);
327  if (ret != AVERROR_EOF && ret < 0)
328  av_log(NULL, AV_LOG_WARNING, "Error while add the frame to buffer source(%s).\n",
329  av_err2str(ret));
330  }
331 }
332 
333 void sub2video_update(InputStream *ist, int64_t heartbeat_pts, AVSubtitle *sub)
334 {
335  AVFrame *frame = ist->sub2video.frame;
336  int8_t *dst;
337  int dst_linesize;
338  int num_rects, i;
339  int64_t pts, end_pts;
340 
341  if (!frame)
342  return;
343  if (sub) {
344  pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
345  AV_TIME_BASE_Q, ist->st->time_base);
346  end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
347  AV_TIME_BASE_Q, ist->st->time_base);
348  num_rects = sub->num_rects;
349  } else {
350  /* If we are initializing the system, utilize current heartbeat
351  PTS as the start time, and show until the following subpicture
352  is received. Otherwise, utilize the previous subpicture's end time
353  as the fall-back value. */
354  pts = ist->sub2video.initialize ?
355  heartbeat_pts : ist->sub2video.end_pts;
356  end_pts = INT64_MAX;
357  num_rects = 0;
358  }
359  if (sub2video_get_blank_frame(ist) < 0) {
360  av_log(ist->dec_ctx, AV_LOG_ERROR,
361  "Impossible to get a blank canvas.\n");
362  return;
363  }
364  dst = frame->data [0];
365  dst_linesize = frame->linesize[0];
366  for (i = 0; i < num_rects; i++)
367  sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
368  sub2video_push_ref(ist, pts);
369  ist->sub2video.end_pts = end_pts;
370  ist->sub2video.initialize = 0;
371 }
372 
373 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
374 {
375  InputFile *infile = input_files[ist->file_index];
376  int i, j, nb_reqs;
377  int64_t pts2;
378 
379  /* When a frame is read from a file, examine all sub2video streams in
380  the same file and send the sub2video frame again. Otherwise, decoded
381  video frames could be accumulating in the filter graph while a filter
382  (possibly overlay) is desperately waiting for a subtitle frame. */
383  for (i = 0; i < infile->nb_streams; i++) {
384  InputStream *ist2 = input_streams[infile->ist_index + i];
385  if (!ist2->sub2video.frame)
386  continue;
387  /* subtitles seem to be usually muxed ahead of other streams;
388  if not, subtracting a larger time here is necessary */
389  pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
390  /* do not send the heartbeat frame if the subtitle is already ahead */
391  if (pts2 <= ist2->sub2video.last_pts)
392  continue;
393  if (pts2 >= ist2->sub2video.end_pts || ist2->sub2video.initialize)
394  /* if we have hit the end of the current displayed subpicture,
395  or if we need to initialize the system, update the
396  overlayed subpicture and its start/end times */
397  sub2video_update(ist2, pts2 + 1, NULL);
398  for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
399  nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
400  if (nb_reqs)
401  sub2video_push_ref(ist2, pts2);
402  }
403 }
404 
405 static void sub2video_flush(InputStream *ist)
406 {
407  int i;
408  int ret;
409 
410  if (ist->sub2video.end_pts < INT64_MAX)
411  sub2video_update(ist, INT64_MAX, NULL);
412  for (i = 0; i < ist->nb_filters; i++) {
413  ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
414  if (ret != AVERROR_EOF && ret < 0)
415  av_log(NULL, AV_LOG_WARNING, "Flush the frame error.\n");
416  }
417 }
418 
419 /* end of sub2video hack */
420 
421 static void term_exit_sigsafe(void)
422 {
423 #if HAVE_TERMIOS_H
424  if(restore_tty)
425  tcsetattr (0, TCSANOW, &oldtty);
426 #endif
427 }
428 
429 void term_exit(void)
430 {
431  av_log(NULL, AV_LOG_QUIET, "%s", "");
433 }
434 
435 static volatile int received_sigterm = 0;
436 static volatile int received_nb_signals = 0;
437 __thread atomic_int transcode_init_done = ATOMIC_VAR_INIT(0);
438 __thread volatile int ffmpeg_exited = 0;
439 __thread volatile int main_ffmpeg_return_code = 0;
440 __thread int64_t copy_ts_first_pts = AV_NOPTS_VALUE;
441 extern __thread volatile int longjmp_value;
442 
443 static void
445 {
446  int ret;
447  received_sigterm = sig;
450  if(received_nb_signals > 3) {
451  ret = write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
452  strlen("Received > 3 system signals, hard exiting\n"));
453  if (ret < 0) { /* Do nothing */ };
454  exit(123);
455  }
456 }
457 
458 #if HAVE_SETCONSOLECTRLHANDLER
459 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
460 {
461  av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
462 
463  switch (fdwCtrlType)
464  {
465  case CTRL_C_EVENT:
466  case CTRL_BREAK_EVENT:
467  sigterm_handler(SIGINT);
468  return TRUE;
469 
470  case CTRL_CLOSE_EVENT:
471  case CTRL_LOGOFF_EVENT:
472  case CTRL_SHUTDOWN_EVENT:
473  sigterm_handler(SIGTERM);
474  /* Basically, with these 3 events, when we return from this method the
475  process is hard terminated, so stall as long as we need to
476  to try and let the main thread(s) clean up and gracefully terminate
477  (we have at most 5 seconds, but should be done far before that). */
478  while (!ffmpeg_exited) {
479  Sleep(0);
480  }
481  return TRUE;
482 
483  default:
484  av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
485  return FALSE;
486  }
487 }
488 #endif
489 
490 #ifdef __linux__
491 #define SIGNAL(sig, func) \
492  do { \
493  action.sa_handler = func; \
494  sigaction(sig, &action, NULL); \
495  } while (0)
496 #else
497 #define SIGNAL(sig, func) \
498  signal(sig, func)
499 #endif
500 
501 void term_init(void)
502 {
503 #if defined __linux__
504  struct sigaction action = {0};
505  action.sa_handler = sigterm_handler;
506 
507  /* block other interrupts while processing this one */
508  sigfillset(&action.sa_mask);
509 
510  /* restart interruptible functions (i.e. don't fail with EINTR) */
511  action.sa_flags = SA_RESTART;
512 #endif
513 
514 #if HAVE_TERMIOS_H
516  struct termios tty;
517  if (tcgetattr (0, &tty) == 0) {
518  oldtty = tty;
519  restore_tty = 1;
520 
521  tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
522  |INLCR|IGNCR|ICRNL|IXON);
523  tty.c_oflag |= OPOST;
524  tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
525  tty.c_cflag &= ~(CSIZE|PARENB);
526  tty.c_cflag |= CS8;
527  tty.c_cc[VMIN] = 1;
528  tty.c_cc[VTIME] = 0;
529 
530  tcsetattr (0, TCSANOW, &tty);
531  }
532  if (handleSIGQUIT == 1) {
533  signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
534  }
535  }
536 #endif
537 
538  if (handleSIGINT == 1) {
539  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
540  }
541  if (handleSIGTERM == 1) {
542  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
543  }
544 #ifdef SIGXCPU
545  if (handleSIGXCPU == 1) {
546  signal(SIGXCPU, sigterm_handler);
547  }
548 #endif
549 #ifdef SIGPIPE
550  if (handleSIGPIPE == 1) {
551  signal(SIGPIPE, SIG_IGN); /* Broken pipe (POSIX). */
552  }
553 #endif
554 #if HAVE_SETCONSOLECTRLHANDLER
555  SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
556 #endif
557 }
558 
559 /* read a key without blocking */
560 static int read_key(void)
561 {
562  unsigned char ch;
563 #if HAVE_TERMIOS_H
564  int n = 1;
565  struct timeval tv;
566  fd_set rfds;
567 
568  FD_ZERO(&rfds);
569  FD_SET(0, &rfds);
570  tv.tv_sec = 0;
571  tv.tv_usec = 0;
572  n = select(1, &rfds, NULL, NULL, &tv);
573  if (n > 0) {
574  n = read(0, &ch, 1);
575  if (n == 1)
576  return ch;
577 
578  return n;
579  }
580 #elif HAVE_KBHIT
581 # if HAVE_PEEKNAMEDPIPE
582  static int is_pipe;
583  static HANDLE input_handle;
584  DWORD dw, nchars;
585  if(!input_handle){
586  input_handle = GetStdHandle(STD_INPUT_HANDLE);
587  is_pipe = !GetConsoleMode(input_handle, &dw);
588  }
589 
590  if (is_pipe) {
591  /* When running under a GUI, you will end here. */
592  if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
593  // input pipe may have been closed by the program that ran ffmpeg
594  return -1;
595  }
596  //Read it
597  if(nchars != 0) {
598  read(0, &ch, 1);
599  return ch;
600  }else{
601  return -1;
602  }
603  }
604 # endif
605  if(kbhit())
606  return(getch());
607 #endif
608  return -1;
609 }
610 
611 int decode_interrupt_cb(void *ctx);
612 
613 int decode_interrupt_cb(void *ctx)
614 {
615  return received_nb_signals > atomic_load(&transcode_init_done);
616 }
617 
618 __thread const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
619 
620 static void ffmpeg_cleanup(int ret)
621 {
622  int i, j;
623 
624  if (do_benchmark) {
625  int maxrss = getmaxrss() / 1024;
626  av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
627  }
628 
629  for (i = 0; i < nb_filtergraphs; i++) {
630  FilterGraph *fg = filtergraphs[i];
631  avfilter_graph_free(&fg->graph);
632  for (j = 0; j < fg->nb_inputs; j++) {
633  InputFilter *ifilter = fg->inputs[j];
634  struct InputStream *ist = ifilter->ist;
635 
636  while (av_fifo_size(ifilter->frame_queue)) {
637  AVFrame *frame;
638  av_fifo_generic_read(ifilter->frame_queue, &frame,
639  sizeof(frame), NULL);
640  av_frame_free(&frame);
641  }
642  av_fifo_freep(&ifilter->frame_queue);
643  if (ist->sub2video.sub_queue) {
644  while (av_fifo_size(ist->sub2video.sub_queue)) {
645  AVSubtitle sub;
646  av_fifo_generic_read(ist->sub2video.sub_queue,
647  &sub, sizeof(sub), NULL);
648  avsubtitle_free(&sub);
649  }
650  av_fifo_freep(&ist->sub2video.sub_queue);
651  }
652  av_buffer_unref(&ifilter->hw_frames_ctx);
653  av_freep(&ifilter->name);
654  av_freep(&fg->inputs[j]);
655  }
656  av_freep(&fg->inputs);
657  for (j = 0; j < fg->nb_outputs; j++) {
658  OutputFilter *ofilter = fg->outputs[j];
659 
660  avfilter_inout_free(&ofilter->out_tmp);
661  av_freep(&ofilter->name);
662  av_freep(&ofilter->formats);
663  av_freep(&ofilter->channel_layouts);
664  av_freep(&ofilter->sample_rates);
665  av_freep(&fg->outputs[j]);
666  }
667  av_freep(&fg->outputs);
668  av_freep(&fg->graph_desc);
669 
670  av_freep(&filtergraphs[i]);
671  }
672  av_freep(&filtergraphs);
673 
674  av_freep(&subtitle_out);
675 
676  /* close files */
677  for (i = 0; i < nb_output_files; i++) {
678  OutputFile *of = output_files[i];
679  AVFormatContext *s;
680  if (!of)
681  continue;
682  s = of->ctx;
683  if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
684  avio_closep(&s->pb);
685  avformat_free_context(s);
686  av_dict_free(&of->opts);
687 
688  av_freep(&output_files[i]);
689  }
690  for (i = 0; i < nb_output_streams; i++) {
691  OutputStream *ost = output_streams[i];
692 
693  if (!ost)
694  continue;
695 
696  av_bsf_free(&ost->bsf_ctx);
697 
698  av_frame_free(&ost->filtered_frame);
699  av_frame_free(&ost->last_frame);
700  av_dict_free(&ost->encoder_opts);
701 
702  av_freep(&ost->forced_keyframes);
703  av_expr_free(ost->forced_keyframes_pexpr);
704  av_freep(&ost->avfilter);
705  av_freep(&ost->logfile_prefix);
706 
707  av_freep(&ost->audio_channels_map);
708  ost->audio_channels_mapped = 0;
709 
710  av_dict_free(&ost->sws_dict);
711  av_dict_free(&ost->swr_opts);
712 
713  avcodec_free_context(&ost->enc_ctx);
714  avcodec_parameters_free(&ost->ref_par);
715 
716  if (ost->muxing_queue) {
717  while (av_fifo_size(ost->muxing_queue)) {
718  AVPacket pkt;
719  av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
720  av_packet_unref(&pkt);
721  }
722  av_fifo_freep(&ost->muxing_queue);
723  }
724 
725  av_freep(&output_streams[i]);
726  }
727 #if HAVE_THREADS
728  free_input_threads();
729 #endif
730  for (i = 0; i < nb_input_files; i++) {
731  avformat_close_input(&input_files[i]->ctx);
732  av_freep(&input_files[i]);
733  }
734  for (i = 0; i < nb_input_streams; i++) {
735  InputStream *ist = input_streams[i];
736 
737  av_frame_free(&ist->decoded_frame);
738  av_frame_free(&ist->filter_frame);
739  av_dict_free(&ist->decoder_opts);
740  avsubtitle_free(&ist->prev_sub.subtitle);
741  av_frame_free(&ist->sub2video.frame);
742  av_freep(&ist->filters);
743  av_freep(&ist->hwaccel_device);
744  av_freep(&ist->dts_buffer);
745 
746  avcodec_free_context(&ist->dec_ctx);
747 
748  av_freep(&input_streams[i]);
749  }
750 
751  if (vstats_file) {
752  if (fclose(vstats_file))
753  av_log(NULL, AV_LOG_ERROR,
754  "Error closing vstats file, loss of information possible: %s\n",
755  av_err2str(AVERROR(errno)));
756  }
757  av_freep(&vstats_filename);
758 
759  av_freep(&input_streams);
760  av_freep(&input_files);
761  av_freep(&output_streams);
762  av_freep(&output_files);
763 
764  uninit_opts();
765 
766  avformat_network_deinit();
767 
768  if (received_sigterm) {
769  av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
770  (int) received_sigterm);
771  } else if (cancelRequested(_sessionId)) {
772  av_log(NULL, AV_LOG_INFO, "Exiting normally, received cancel request.\n");
773  } else if (ret && atomic_load(&transcode_init_done)) {
774  av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
775  }
776  term_exit();
777  ffmpeg_exited = 1;
778 }
779 
780 void remove_avoptions(AVDictionary **a, AVDictionary *b)
781 {
782  AVDictionaryEntry *t = NULL;
783 
784  while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
785  av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
786  }
787 }
788 
789 void assert_avoptions(AVDictionary *m)
790 {
791  AVDictionaryEntry *t;
792  if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
793  av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
794  exit_program(1);
795  }
796 }
797 
798 static void abort_codec_experimental(AVCodec *c, int encoder)
799 {
800  exit_program(1);
801 }
802 
803 static void update_benchmark(const char *fmt, ...)
804 {
805  if (do_benchmark_all) {
807  va_list va;
808  char buf[1024];
809 
810  if (fmt) {
811  va_start(va, fmt);
812  vsnprintf(buf, sizeof(buf), fmt, va);
813  va_end(va);
814  av_log(NULL, AV_LOG_INFO,
815  "bench: %8" PRIu64 " user %8" PRIu64 " sys %8" PRIu64 " real %s \n",
818  t.real_usec - current_time.real_usec, buf);
819  }
820  current_time = t;
821  }
822 }
823 
824 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
825 {
826  int i;
827  for (i = 0; i < nb_output_streams; i++) {
828  OutputStream *ost2 = output_streams[i];
829  ost2->finished |= ost == ost2 ? this_stream : others;
830  }
831 }
832 
833 static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
834 {
835  AVFormatContext *s = of->ctx;
836  AVStream *st = ost->st;
837  int ret;
838 
839  /*
840  * Audio encoders may split the packets -- #frames in != #packets out.
841  * But there is no reordering, so we can limit the number of output packets
842  * by simply dropping them here.
843  * Counting encoded video frames needs to be done separately because of
844  * reordering, see do_video_out().
845  * Do not count the packet when unqueued because it has been counted when queued.
846  */
847  if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed) && !unqueue) {
848  if (ost->frame_number >= ost->max_frames) {
849  av_packet_unref(pkt);
850  return;
851  }
852  ost->frame_number++;
853  }
854 
855  if (!of->header_written) {
856  AVPacket tmp_pkt = {0};
857  /* the muxer is not initialized yet, buffer the packet */
858  if (!av_fifo_space(ost->muxing_queue)) {
859  unsigned int are_we_over_size =
860  (ost->muxing_queue_data_size + pkt->size) > ost->muxing_queue_data_threshold;
861  int new_size = are_we_over_size ?
862  FFMIN(2 * av_fifo_size(ost->muxing_queue),
863  ost->max_muxing_queue_size) :
864  2 * av_fifo_size(ost->muxing_queue);
865 
866  if (new_size <= av_fifo_size(ost->muxing_queue)) {
867  av_log(NULL, AV_LOG_ERROR,
868  "Too many packets buffered for output stream %d:%d.\n",
869  ost->file_index, ost->st->index);
870  exit_program(1);
871  }
872  ret = av_fifo_realloc2(ost->muxing_queue, new_size);
873  if (ret < 0)
874  exit_program(1);
875  }
876  ret = av_packet_make_refcounted(pkt);
877  if (ret < 0)
878  exit_program(1);
879  av_packet_move_ref(&tmp_pkt, pkt);
880  ost->muxing_queue_data_size += tmp_pkt.size;
881  av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
882  return;
883  }
884 
885  if ((st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
886  (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
887  pkt->pts = pkt->dts = AV_NOPTS_VALUE;
888 
889  if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
890  int i;
891  uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
892  NULL);
893  ost->quality = sd ? AV_RL32(sd) : -1;
894  ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
895 
896  for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
897  if (sd && i < sd[5])
898  ost->error[i] = AV_RL64(sd + 8 + 8*i);
899  else
900  ost->error[i] = -1;
901  }
902 
903  if (ost->frame_rate.num && ost->is_cfr) {
904  if (pkt->duration > 0)
905  av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
906  pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
907  ost->mux_timebase);
908  }
909  }
910 
911  av_packet_rescale_ts(pkt, ost->mux_timebase, ost->st->time_base);
912 
913  if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
914  if (pkt->dts != AV_NOPTS_VALUE &&
915  pkt->pts != AV_NOPTS_VALUE &&
916  pkt->dts > pkt->pts) {
917  av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
918  pkt->dts, pkt->pts,
919  ost->file_index, ost->st->index);
920  pkt->pts =
921  pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
922  - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
923  - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
924  }
925  if ((st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO || st->codecpar->codec_type == AVMEDIA_TYPE_SUBTITLE) &&
926  pkt->dts != AV_NOPTS_VALUE &&
927  !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
928  ost->last_mux_dts != AV_NOPTS_VALUE) {
929  int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
930  if (pkt->dts < max) {
931  int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
932  if (exit_on_error)
933  loglevel = AV_LOG_ERROR;
934  av_log(s, loglevel, "Non-monotonous DTS in output stream "
935  "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
936  ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
937  if (exit_on_error) {
938  av_log(NULL, AV_LOG_FATAL, "aborting.\n");
939  exit_program(1);
940  }
941  av_log(s, loglevel, "changing to %"PRId64". This may result "
942  "in incorrect timestamps in the output file.\n",
943  max);
944  if (pkt->pts >= pkt->dts)
945  pkt->pts = FFMAX(pkt->pts, max);
946  pkt->dts = max;
947  }
948  }
949  }
950  ost->last_mux_dts = pkt->dts;
951 
952  ost->data_size += pkt->size;
953  ost->packets_written++;
954 
955  pkt->stream_index = ost->index;
956 
957  if (debug_ts) {
958  av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
959  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
960  av_get_media_type_string(ost->enc_ctx->codec_type),
961  av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
962  av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
963  pkt->size
964  );
965  }
966 
967  ret = av_interleaved_write_frame(s, pkt);
968  if (ret < 0) {
969  print_error("av_interleaved_write_frame()", ret);
972  }
973  av_packet_unref(pkt);
974 }
975 
977 {
978  OutputFile *of = output_files[ost->file_index];
979 
980  ost->finished |= ENCODER_FINISHED;
981  if (of->shortest) {
982  int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
983  of->recording_time = FFMIN(of->recording_time, end);
984  }
985 }
986 
987 /*
988  * Send a single packet to the output, applying any bitstream filters
989  * associated with the output stream. This may result in any number
990  * of packets actually being written, depending on what bitstream
991  * filters are applied. The supplied packet is consumed and will be
992  * blank (as if newly-allocated) when this function returns.
993  *
994  * If eof is set, instead indicate EOF to all bitstream filters and
995  * therefore flush any delayed packets to the output. A blank packet
996  * must be supplied in this case.
997  */
998 static void output_packet(OutputFile *of, AVPacket *pkt,
999  OutputStream *ost, int eof)
1000 {
1001  int ret = 0;
1002 
1003  /* apply the output bitstream filters */
1004  if (ost->bsf_ctx) {
1005  ret = av_bsf_send_packet(ost->bsf_ctx, eof ? NULL : pkt);
1006  if (ret < 0)
1007  goto finish;
1008  while ((ret = av_bsf_receive_packet(ost->bsf_ctx, pkt)) >= 0)
1009  write_packet(of, pkt, ost, 0);
1010  if (ret == AVERROR(EAGAIN))
1011  ret = 0;
1012  } else if (!eof)
1013  write_packet(of, pkt, ost, 0);
1014 
1015 finish:
1016  if (ret < 0 && ret != AVERROR_EOF) {
1017  av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
1018  "packet for stream #%d:%d.\n", ost->file_index, ost->index);
1019  if(exit_on_error)
1020  exit_program(1);
1021  }
1022 }
1023 
1025 {
1026  OutputFile *of = output_files[ost->file_index];
1027 
1028  if (of->recording_time != INT64_MAX &&
1029  av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
1030  AV_TIME_BASE_Q) >= 0) {
1031  close_output_stream(ost);
1032  return 0;
1033  }
1034  return 1;
1035 }
1036 
1037 static double adjust_frame_pts_to_encoder_tb(OutputFile *of, OutputStream *ost, AVFrame *frame)
1038 {
1039  double float_pts = AV_NOPTS_VALUE; // this is identical to frame.pts but with higher precision
1040  AVCodecContext *enc = ost->enc_ctx;
1041  if (!frame || frame->pts == AV_NOPTS_VALUE ||
1042  !enc || !ost->filter || !ost->filter->graph->graph)
1043  goto early_exit;
1044 
1045  {
1046  AVFilterContext *filter = ost->filter->filter;
1047 
1048  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1049  AVRational filter_tb = av_buffersink_get_time_base(filter);
1050  AVRational tb = enc->time_base;
1051  int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1052 
1053  tb.den <<= extra_bits;
1054  float_pts =
1055  av_rescale_q(frame->pts, filter_tb, tb) -
1056  av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1057  float_pts /= 1 << extra_bits;
1058  // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1059  float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1060 
1061  frame->pts =
1062  av_rescale_q(frame->pts, filter_tb, enc->time_base) -
1063  av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1064  }
1065 
1066 early_exit:
1067 
1068  if (debug_ts) {
1069  av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1070  frame ? av_ts2str(frame->pts) : "NULL",
1071  frame ? av_ts2timestr(frame->pts, &enc->time_base) : "NULL",
1072  float_pts,
1073  enc ? enc->time_base.num : -1,
1074  enc ? enc->time_base.den : -1);
1075  }
1076 
1077  return float_pts;
1078 }
1079 
1080 static int init_output_stream(OutputStream *ost, AVFrame *frame, char *error, int error_len);
1081 
1082 static int init_output_stream_wrapper(OutputStream *ost, AVFrame *frame, unsigned int fatal)
1083 {
1084  int ret = AVERROR_BUG;
1085  char error[1024] = {0};
1086 
1087  if (ost->initialized)
1088  return 0;
1089 
1090  ret = init_output_stream(ost, frame, error, sizeof(error));
1091  if (ret < 0) {
1092  av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1093  ost->file_index, ost->index, error);
1094 
1095  if (fatal)
1096  exit_program(1);
1097  }
1098 
1099  return ret;
1100 }
1101 
1102 static void do_audio_out(OutputFile *of, OutputStream *ost,
1103  AVFrame *frame)
1104 {
1105  AVCodecContext *enc = ost->enc_ctx;
1106  AVPacket pkt;
1107  int ret;
1108 
1109  av_init_packet(&pkt);
1110  pkt.data = NULL;
1111  pkt.size = 0;
1112 
1113  adjust_frame_pts_to_encoder_tb(of, ost, frame);
1114 
1115  if (!check_recording_time(ost))
1116  return;
1117 
1118  if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
1119  frame->pts = ost->sync_opts;
1120  ost->sync_opts = frame->pts + frame->nb_samples;
1121  ost->samples_encoded += frame->nb_samples;
1122  ost->frames_encoded++;
1123 
1124  av_assert0(pkt.size || !pkt.data);
1125  update_benchmark(NULL);
1126  if (debug_ts) {
1127  av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
1128  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1129  av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
1130  enc->time_base.num, enc->time_base.den);
1131  }
1132 
1133  ret = avcodec_send_frame(enc, frame);
1134  if (ret < 0)
1135  goto error;
1136 
1137  while (1) {
1138  ret = avcodec_receive_packet(enc, &pkt);
1139  if (ret == AVERROR(EAGAIN))
1140  break;
1141  if (ret < 0)
1142  goto error;
1143 
1144  update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
1145 
1146  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1147 
1148  if (debug_ts) {
1149  av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
1150  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1151  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1152  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1153  }
1154 
1155  output_packet(of, &pkt, ost, 0);
1156  }
1157 
1158  return;
1159 error:
1160  av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
1161  exit_program(1);
1162 }
1163 
1164 static void do_subtitle_out(OutputFile *of,
1165  OutputStream *ost,
1166  AVSubtitle *sub)
1167 {
1168  int subtitle_out_max_size = 1024 * 1024;
1169  int subtitle_out_size, nb, i;
1170  AVCodecContext *enc;
1171  AVPacket pkt;
1172  int64_t pts;
1173 
1174  if (sub->pts == AV_NOPTS_VALUE) {
1175  av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
1176  if (exit_on_error)
1177  exit_program(1);
1178  return;
1179  }
1180 
1181  enc = ost->enc_ctx;
1182 
1183  if (!subtitle_out) {
1184  subtitle_out = av_malloc(subtitle_out_max_size);
1185  if (!subtitle_out) {
1186  av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
1187  exit_program(1);
1188  }
1189  }
1190 
1191  /* Note: DVB subtitle need one packet to draw them and one other
1192  packet to clear them */
1193  /* XXX: signal it in the codec context ? */
1194  if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
1195  nb = 2;
1196  else
1197  nb = 1;
1198 
1199  /* shift timestamp to honor -ss and make check_recording_time() work with -t */
1200  pts = sub->pts;
1201  if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
1203  for (i = 0; i < nb; i++) {
1204  unsigned save_num_rects = sub->num_rects;
1205 
1206  ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
1207  if (!check_recording_time(ost))
1208  return;
1209 
1210  sub->pts = pts;
1211  // start_display_time is required to be 0
1212  sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
1213  sub->end_display_time -= sub->start_display_time;
1214  sub->start_display_time = 0;
1215  if (i == 1)
1216  sub->num_rects = 0;
1217 
1218  ost->frames_encoded++;
1219 
1220  subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1221  subtitle_out_max_size, sub);
1222  if (i == 1)
1223  sub->num_rects = save_num_rects;
1224  if (subtitle_out_size < 0) {
1225  av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1226  exit_program(1);
1227  }
1228 
1229  av_init_packet(&pkt);
1230  pkt.data = subtitle_out;
1231  pkt.size = subtitle_out_size;
1232  pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase);
1233  pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1234  if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
1235  /* XXX: the pts correction is handled here. Maybe handling
1236  it in the codec would be better */
1237  if (i == 0)
1238  pkt.pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1239  else
1240  pkt.pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1241  }
1242  pkt.dts = pkt.pts;
1243  output_packet(of, &pkt, ost, 0);
1244  }
1245 }
1246 
1247 static void do_video_out(OutputFile *of,
1248  OutputStream *ost,
1249  AVFrame *next_picture)
1250 {
1251  int ret, format_video_sync;
1252  AVPacket pkt;
1253  AVCodecContext *enc = ost->enc_ctx;
1254  AVRational frame_rate;
1255  int nb_frames, nb0_frames, i;
1256  double delta, delta0;
1257  double duration = 0;
1258  double sync_ipts = AV_NOPTS_VALUE;
1259  int frame_size = 0;
1260  InputStream *ist = NULL;
1261  AVFilterContext *filter = ost->filter->filter;
1262 
1263  init_output_stream_wrapper(ost, next_picture, 1);
1264  sync_ipts = adjust_frame_pts_to_encoder_tb(of, ost, next_picture);
1265 
1266  if (ost->source_index >= 0)
1267  ist = input_streams[ost->source_index];
1268 
1269  frame_rate = av_buffersink_get_frame_rate(filter);
1270  if (frame_rate.num > 0 && frame_rate.den > 0)
1271  duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1272 
1273  if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1274  duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1275 
1276  if (!ost->filters_script &&
1277  !ost->filters &&
1278  (nb_filtergraphs == 0 || !filtergraphs[0]->graph_desc) &&
1279  next_picture &&
1280  ist &&
1281  lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1282  duration = lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1283  }
1284 
1285  if (!next_picture) {
1286  //end, flushing
1287  nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1288  ost->last_nb0_frames[1],
1289  ost->last_nb0_frames[2]);
1290  } else {
1291  delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1292  delta = delta0 + duration;
1293 
1294  /* by default, we output a single frame */
1295  nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1296  nb_frames = 1;
1297 
1298  format_video_sync = video_sync_method;
1299  if (format_video_sync == VSYNC_AUTO) {
1300  if(!strcmp(of->ctx->oformat->name, "avi")) {
1301  format_video_sync = VSYNC_VFR;
1302  } else
1303  format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1304  if ( ist
1305  && format_video_sync == VSYNC_CFR
1306  && input_files[ist->file_index]->ctx->nb_streams == 1
1307  && input_files[ist->file_index]->input_ts_offset == 0) {
1308  format_video_sync = VSYNC_VSCFR;
1309  }
1310  if (format_video_sync == VSYNC_CFR && copy_ts) {
1311  format_video_sync = VSYNC_VSCFR;
1312  }
1313  }
1314  ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1315 
1316  if (delta0 < 0 &&
1317  delta > 0 &&
1318  format_video_sync != VSYNC_PASSTHROUGH &&
1319  format_video_sync != VSYNC_DROP) {
1320  if (delta0 < -0.6) {
1321  av_log(NULL, AV_LOG_VERBOSE, "Past duration %f too large\n", -delta0);
1322  } else
1323  av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1324  sync_ipts = ost->sync_opts;
1325  duration += delta0;
1326  delta0 = 0;
1327  }
1328 
1329  switch (format_video_sync) {
1330  case VSYNC_VSCFR:
1331  if (ost->frame_number == 0 && delta0 >= 0.5) {
1332  av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1333  delta = duration;
1334  delta0 = 0;
1335  ost->sync_opts = llrint(sync_ipts);
1336  }
1337  case VSYNC_CFR:
1338  // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1339  if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1340  nb_frames = 0;
1341  } else if (delta < -1.1)
1342  nb_frames = 0;
1343  else if (delta > 1.1) {
1344  nb_frames = lrintf(delta);
1345  if (delta0 > 1.1)
1346  nb0_frames = llrintf(delta0 - 0.6);
1347  }
1348  break;
1349  case VSYNC_VFR:
1350  if (delta <= -0.6)
1351  nb_frames = 0;
1352  else if (delta > 0.6)
1353  ost->sync_opts = llrint(sync_ipts);
1354  break;
1355  case VSYNC_DROP:
1356  case VSYNC_PASSTHROUGH:
1357  ost->sync_opts = llrint(sync_ipts);
1358  break;
1359  default:
1360  av_assert0(0);
1361  }
1362  }
1363 
1364  nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1365  nb0_frames = FFMIN(nb0_frames, nb_frames);
1366 
1367  memmove(ost->last_nb0_frames + 1,
1368  ost->last_nb0_frames,
1369  sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1370  ost->last_nb0_frames[0] = nb0_frames;
1371 
1372  if (nb0_frames == 0 && ost->last_dropped) {
1373  nb_frames_drop++;
1374  av_log(NULL, AV_LOG_VERBOSE,
1375  "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1376  ost->frame_number, ost->st->index, ost->last_frame->pts);
1377  }
1378  if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1379  if (nb_frames > dts_error_threshold * 30) {
1380  av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1381  nb_frames_drop++;
1382  return;
1383  }
1384  nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1385  av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1386  if (nb_frames_dup > dup_warning) {
1387  av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
1388  dup_warning *= 10;
1389  }
1390  }
1391  ost->last_dropped = nb_frames == nb0_frames && next_picture;
1392 
1393  /* duplicates frame if needed */
1394  for (i = 0; i < nb_frames; i++) {
1395  AVFrame *in_picture;
1396  int forced_keyframe = 0;
1397  double pts_time;
1398  av_init_packet(&pkt);
1399  pkt.data = NULL;
1400  pkt.size = 0;
1401 
1402  if (i < nb0_frames && ost->last_frame) {
1403  in_picture = ost->last_frame;
1404  } else
1405  in_picture = next_picture;
1406 
1407  if (!in_picture)
1408  return;
1409 
1410  in_picture->pts = ost->sync_opts;
1411 
1412  if (!check_recording_time(ost))
1413  return;
1414 
1415  in_picture->quality = enc->global_quality;
1416  in_picture->pict_type = 0;
1417 
1418  if (ost->forced_kf_ref_pts == AV_NOPTS_VALUE &&
1419  in_picture->pts != AV_NOPTS_VALUE)
1420  ost->forced_kf_ref_pts = in_picture->pts;
1421 
1422  pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1423  (in_picture->pts - ost->forced_kf_ref_pts) * av_q2d(enc->time_base) : NAN;
1424  if (ost->forced_kf_index < ost->forced_kf_count &&
1425  in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1426  ost->forced_kf_index++;
1427  forced_keyframe = 1;
1428  } else if (ost->forced_keyframes_pexpr) {
1429  double res;
1430  ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1431  res = av_expr_eval(ost->forced_keyframes_pexpr,
1433  ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1439  res);
1440  if (res) {
1441  forced_keyframe = 1;
1447  }
1448 
1450  } else if ( ost->forced_keyframes
1451  && !strncmp(ost->forced_keyframes, "source", 6)
1452  && in_picture->key_frame==1
1453  && !i) {
1454  forced_keyframe = 1;
1455  }
1456 
1457  if (forced_keyframe) {
1458  in_picture->pict_type = AV_PICTURE_TYPE_I;
1459  av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1460  }
1461 
1462  update_benchmark(NULL);
1463  if (debug_ts) {
1464  av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1465  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1466  av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1467  enc->time_base.num, enc->time_base.den);
1468  }
1469 
1470  ost->frames_encoded++;
1471 
1472  ret = avcodec_send_frame(enc, in_picture);
1473  if (ret < 0)
1474  goto error;
1475  // Make sure Closed Captions will not be duplicated
1476  av_frame_remove_side_data(in_picture, AV_FRAME_DATA_A53_CC);
1477 
1478  while (1) {
1479  ret = avcodec_receive_packet(enc, &pkt);
1480  update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1481  if (ret == AVERROR(EAGAIN))
1482  break;
1483  if (ret < 0)
1484  goto error;
1485 
1486  if (debug_ts) {
1487  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1488  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1489  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1490  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1491  }
1492 
1493  if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1494  pkt.pts = ost->sync_opts;
1495 
1496  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1497 
1498  if (debug_ts) {
1499  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1500  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1501  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->mux_timebase),
1502  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->mux_timebase));
1503  }
1504 
1505  frame_size = pkt.size;
1506  output_packet(of, &pkt, ost, 0);
1507 
1508  /* if two pass, output log */
1509  if (ost->logfile && enc->stats_out) {
1510  fprintf(ost->logfile, "%s", enc->stats_out);
1511  }
1512  }
1513  ost->sync_opts++;
1514  /*
1515  * For video, number of frames in == number of packets out.
1516  * But there may be reordering, so we can't throw away frames on encoder
1517  * flush, we need to limit them here, before they go into encoder.
1518  */
1519  ost->frame_number++;
1520 
1521  if (vstats_filename && frame_size)
1522  do_video_stats(ost, frame_size);
1523  }
1524 
1525  if (!ost->last_frame)
1526  ost->last_frame = av_frame_alloc();
1527  av_frame_unref(ost->last_frame);
1528  if (next_picture && ost->last_frame)
1529  av_frame_ref(ost->last_frame, next_picture);
1530  else
1531  av_frame_free(&ost->last_frame);
1532 
1533  return;
1534 error:
1535  av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1536  exit_program(1);
1537 }
1538 
1539 static double psnr(double d)
1540 {
1541  return -10.0 * log10(d);
1542 }
1543 
1544 static void do_video_stats(OutputStream *ost, int frame_size)
1545 {
1546  AVCodecContext *enc;
1547  int frame_number;
1548  double ti1, bitrate, avg_bitrate;
1549 
1550  /* this is executed just the first time do_video_stats is called */
1551  if (!vstats_file) {
1552  vstats_file = fopen(vstats_filename, "w");
1553  if (!vstats_file) {
1554  perror("fopen");
1555  exit_program(1);
1556  }
1557  }
1558 
1559  enc = ost->enc_ctx;
1560  if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1561  frame_number = ost->st->nb_frames;
1562  if (vstats_version <= 1) {
1563  fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1564  ost->quality / (float)FF_QP2LAMBDA);
1565  } else {
1566  fprintf(vstats_file, "out= %2d st= %2d frame= %5d q= %2.1f ", ost->file_index, ost->index, frame_number,
1567  ost->quality / (float)FF_QP2LAMBDA);
1568  }
1569 
1570  if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1571  fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1572 
1573  fprintf(vstats_file,"f_size= %6d ", frame_size);
1574  /* compute pts value */
1575  ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1576  if (ti1 < 0.01)
1577  ti1 = 0.01;
1578 
1579  bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1580  avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1581  fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1582  (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1583  fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1584  }
1585 }
1586 
1588 {
1589  OutputFile *of = output_files[ost->file_index];
1590  int i;
1591 
1593 
1594  if (of->shortest) {
1595  for (i = 0; i < of->ctx->nb_streams; i++)
1597  }
1598 }
1599 
1606 static int reap_filters(int flush)
1607 {
1608  AVFrame *filtered_frame = NULL;
1609  int i;
1610 
1611  /* Reap all buffers present in the buffer sinks */
1612  for (i = 0; i < nb_output_streams; i++) {
1613  OutputStream *ost = output_streams[i];
1614  OutputFile *of = output_files[ost->file_index];
1615  AVFilterContext *filter;
1616  AVCodecContext *enc = ost->enc_ctx;
1617  int ret = 0;
1618 
1619  if (!ost->filter || !ost->filter->graph->graph)
1620  continue;
1621  filter = ost->filter->filter;
1622 
1623  /*
1624  * Unlike video, with audio the audio frame size matters.
1625  * Currently we are fully reliant on the lavfi filter chain to
1626  * do the buffering deed for us, and thus the frame size parameter
1627  * needs to be set accordingly. Where does one get the required
1628  * frame size? From the initialized AVCodecContext of an audio
1629  * encoder. Thus, if we have gotten to an audio stream, initialize
1630  * the encoder earlier than receiving the first AVFrame.
1631  */
1632  if (av_buffersink_get_type(filter) == AVMEDIA_TYPE_AUDIO)
1633  init_output_stream_wrapper(ost, NULL, 1);
1634 
1635  if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1636  return AVERROR(ENOMEM);
1637  }
1638  filtered_frame = ost->filtered_frame;
1639 
1640  while (1) {
1641  ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1642  AV_BUFFERSINK_FLAG_NO_REQUEST);
1643  if (ret < 0) {
1644  if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1645  av_log(NULL, AV_LOG_WARNING,
1646  "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1647  } else if (flush && ret == AVERROR_EOF) {
1648  if (av_buffersink_get_type(filter) == AVMEDIA_TYPE_VIDEO)
1649  do_video_out(of, ost, NULL);
1650  }
1651  break;
1652  }
1653  if (ost->finished) {
1654  av_frame_unref(filtered_frame);
1655  continue;
1656  }
1657 
1658  switch (av_buffersink_get_type(filter)) {
1659  case AVMEDIA_TYPE_VIDEO:
1660  if (!ost->frame_aspect_ratio.num)
1661  enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1662 
1663  do_video_out(of, ost, filtered_frame);
1664  break;
1665  case AVMEDIA_TYPE_AUDIO:
1666  if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1667  enc->channels != filtered_frame->channels) {
1668  av_log(NULL, AV_LOG_ERROR,
1669  "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1670  break;
1671  }
1672  do_audio_out(of, ost, filtered_frame);
1673  break;
1674  default:
1675  // TODO support subtitle filters
1676  av_assert0(0);
1677  }
1678 
1679  av_frame_unref(filtered_frame);
1680  }
1681  }
1682 
1683  return 0;
1684 }
1685 
1686 static void print_final_stats(int64_t total_size)
1687 {
1688  uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1689  uint64_t subtitle_size = 0;
1690  uint64_t data_size = 0;
1691  float percent = -1.0;
1692  int i, j;
1693  int pass1_used = 1;
1694 
1695  for (i = 0; i < nb_output_streams; i++) {
1696  OutputStream *ost = output_streams[i];
1697  switch (ost->enc_ctx->codec_type) {
1698  case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1699  case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1700  case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1701  default: other_size += ost->data_size; break;
1702  }
1703  extra_size += ost->enc_ctx->extradata_size;
1704  data_size += ost->data_size;
1705  if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | AV_CODEC_FLAG_PASS2))
1706  != AV_CODEC_FLAG_PASS1)
1707  pass1_used = 0;
1708  }
1709 
1710  if (data_size && total_size>0 && total_size >= data_size)
1711  percent = 100.0 * (total_size - data_size) / data_size;
1712 
1713  av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1714  video_size / 1024.0,
1715  audio_size / 1024.0,
1716  subtitle_size / 1024.0,
1717  other_size / 1024.0,
1718  extra_size / 1024.0);
1719  if (percent >= 0.0)
1720  av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1721  else
1722  av_log(NULL, AV_LOG_INFO, "unknown");
1723  av_log(NULL, AV_LOG_INFO, "\n");
1724 
1725  /* print verbose per-stream stats */
1726  for (i = 0; i < nb_input_files; i++) {
1727  InputFile *f = input_files[i];
1728  uint64_t total_packets = 0, total_size = 0;
1729 
1730  av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1731  i, f->ctx->url);
1732 
1733  for (j = 0; j < f->nb_streams; j++) {
1734  InputStream *ist = input_streams[f->ist_index + j];
1735  enum AVMediaType type = ist->dec_ctx->codec_type;
1736 
1737  total_size += ist->data_size;
1738  total_packets += ist->nb_packets;
1739 
1740  av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1741  i, j, media_type_string(type));
1742  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1743  ist->nb_packets, ist->data_size);
1744 
1745  if (ist->decoding_needed) {
1746  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1747  ist->frames_decoded);
1748  if (type == AVMEDIA_TYPE_AUDIO)
1749  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1750  av_log(NULL, AV_LOG_VERBOSE, "; ");
1751  }
1752 
1753  av_log(NULL, AV_LOG_VERBOSE, "\n");
1754  }
1755 
1756  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1757  total_packets, total_size);
1758  }
1759 
1760  for (i = 0; i < nb_output_files; i++) {
1761  OutputFile *of = output_files[i];
1762  uint64_t total_packets = 0, total_size = 0;
1763 
1764  av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1765  i, of->ctx->url);
1766 
1767  for (j = 0; j < of->ctx->nb_streams; j++) {
1768  OutputStream *ost = output_streams[of->ost_index + j];
1769  enum AVMediaType type = ost->enc_ctx->codec_type;
1770 
1771  total_size += ost->data_size;
1772  total_packets += ost->packets_written;
1773 
1774  av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1775  i, j, media_type_string(type));
1776  if (ost->encoding_needed) {
1777  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1778  ost->frames_encoded);
1779  if (type == AVMEDIA_TYPE_AUDIO)
1780  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1781  av_log(NULL, AV_LOG_VERBOSE, "; ");
1782  }
1783 
1784  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1785  ost->packets_written, ost->data_size);
1786 
1787  av_log(NULL, AV_LOG_VERBOSE, "\n");
1788  }
1789 
1790  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1791  total_packets, total_size);
1792  }
1793  if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1794  av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1795  if (pass1_used) {
1796  av_log(NULL, AV_LOG_WARNING, "\n");
1797  } else {
1798  av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1799  }
1800  }
1801 }
1802 
1803 static void forward_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1804 {
1805  AVFormatContext *oc = NULL;
1806  AVCodecContext *enc = NULL;
1807  OutputStream *ost = NULL;
1808  int64_t pts = INT64_MIN + 1;
1809  int vid, i;
1810 
1811  int frame_number = 0;
1812  float fps = 0;
1813  float quality = 0;
1814  int64_t total_size = 0;
1815  int seconds = 0;
1816  double bitrate = 0.0;
1817  double speed = 0.0;
1818 
1819  float t = (cur_time-timer_start) / 1000000.0;
1820 
1821  oc = output_files[0]->ctx;
1822 
1823  // 1. calculate size
1824  total_size = avio_size(oc->pb);
1825  if (total_size <= 0) {
1826  total_size = avio_tell(oc->pb);
1827  }
1828 
1829  vid = 0;
1830  for (i = 0; i < nb_output_streams; i++) {
1831  ost = output_streams[i];
1832  enc = ost->enc_ctx;
1833 
1834  if (!ost->stream_copy) {
1835 
1836  // 2. extract quality
1837  quality = ost->quality / (float) FF_QP2LAMBDA;
1838  }
1839 
1840  if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1841 
1842  // 3. extract frame number
1843  frame_number = ost->frame_number;
1844 
1845  // 4. calculate fps
1846  fps = t > 1 ? frame_number / t : 0;
1847  }
1848 
1849  // 5. calculate time
1850  if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE)
1851  pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1852  ost->st->time_base, AV_TIME_BASE_Q));
1853 
1854  vid = 1;
1855  }
1856 
1857  // 6. calculate time, with microseconds to milliseconds conversion
1858  seconds = FFABS(pts) / 1000;
1859 
1860  // 7. calculating kbit/s value
1861  bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1862 
1863  // 9. calculate processing speed = processed stream duration/operation duration
1864  speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1865 
1866  // FORWARD DATA
1867  if (report_callback != NULL) {
1868  report_callback(frame_number, fps, quality, total_size, seconds, bitrate, speed);
1869  }
1870 }
1871 
1872 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1873 {
1874  AVBPrint buf, buf_script;
1875  OutputStream *ost;
1876  AVFormatContext *oc;
1877  int64_t total_size;
1878  AVCodecContext *enc;
1879  int frame_number, vid, i;
1880  double bitrate;
1881  double speed;
1882  int64_t pts = INT64_MIN + 1;
1883  int hours, mins, secs, us;
1884  const char *hours_sign;
1885  int ret;
1886  float t;
1887 
1888  if (!is_last_report) {
1889  if (last_time == -1) {
1890  last_time = cur_time;
1891  }
1892  if (((cur_time - last_time) < stats_period && !first_report) ||
1894  return;
1895  last_time = cur_time;
1896  }
1897 
1898  forward_report(is_last_report, timer_start, cur_time);
1899 
1900  if (!print_stats && !is_last_report && !progress_avio)
1901  return;
1902 
1903  t = (cur_time-timer_start) / 1000000.0;
1904 
1905 
1906  oc = output_files[0]->ctx;
1907 
1908  total_size = avio_size(oc->pb);
1909  if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1910  total_size = avio_tell(oc->pb);
1911 
1912  vid = 0;
1913  av_bprint_init(&buf, 0, AV_BPRINT_SIZE_AUTOMATIC);
1914  av_bprint_init(&buf_script, 0, AV_BPRINT_SIZE_AUTOMATIC);
1915  for (i = 0; i < nb_output_streams; i++) {
1916  float q = -1;
1917  ost = output_streams[i];
1918  enc = ost->enc_ctx;
1919  if (!ost->stream_copy)
1920  q = ost->quality / (float) FF_QP2LAMBDA;
1921 
1922  if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1923  av_bprintf(&buf, "q=%2.1f ", q);
1924  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1925  ost->file_index, ost->index, q);
1926  }
1927  if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1928  float fps;
1929 
1930  frame_number = ost->frame_number;
1931  fps = t > 1 ? frame_number / t : 0;
1932  av_bprintf(&buf, "frame=%5d fps=%3.*f q=%3.1f ",
1933  frame_number, fps < 9.95, fps, q);
1934  av_bprintf(&buf_script, "frame=%d\n", frame_number);
1935  av_bprintf(&buf_script, "fps=%.2f\n", fps);
1936  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1937  ost->file_index, ost->index, q);
1938  if (is_last_report)
1939  av_bprintf(&buf, "L");
1940  if (qp_hist) {
1941  int j;
1942  int qp = lrintf(q);
1943  if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1944  qp_histogram[qp]++;
1945  for (j = 0; j < 32; j++)
1946  av_bprintf(&buf, "%X", av_log2(qp_histogram[j] + 1));
1947  }
1948 
1949  if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1950  int j;
1951  double error, error_sum = 0;
1952  double scale, scale_sum = 0;
1953  double p;
1954  char type[3] = { 'Y','U','V' };
1955  av_bprintf(&buf, "PSNR=");
1956  for (j = 0; j < 3; j++) {
1957  if (is_last_report) {
1958  error = enc->error[j];
1959  scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1960  } else {
1961  error = ost->error[j];
1962  scale = enc->width * enc->height * 255.0 * 255.0;
1963  }
1964  if (j)
1965  scale /= 4;
1966  error_sum += error;
1967  scale_sum += scale;
1968  p = psnr(error / scale);
1969  av_bprintf(&buf, "%c:%2.2f ", type[j], p);
1970  av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1971  ost->file_index, ost->index, type[j] | 32, p);
1972  }
1973  p = psnr(error_sum / scale_sum);
1974  av_bprintf(&buf, "*:%2.2f ", psnr(error_sum / scale_sum));
1975  av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1976  ost->file_index, ost->index, p);
1977  }
1978  vid = 1;
1979  }
1980  /* compute min output value */
1981  if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE) {
1982  pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1983  ost->st->time_base, AV_TIME_BASE_Q));
1984  if (copy_ts) {
1985  if (copy_ts_first_pts == AV_NOPTS_VALUE && pts > 1)
1987  if (copy_ts_first_pts != AV_NOPTS_VALUE)
1989  }
1990  }
1991 
1992  if (is_last_report)
1993  nb_frames_drop += ost->last_dropped;
1994  }
1995 
1996  secs = FFABS(pts) / AV_TIME_BASE;
1997  us = FFABS(pts) % AV_TIME_BASE;
1998  mins = secs / 60;
1999  secs %= 60;
2000  hours = mins / 60;
2001  mins %= 60;
2002  hours_sign = (pts < 0) ? "-" : "";
2003 
2004  bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
2005  speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
2006 
2007  if (total_size < 0) av_bprintf(&buf, "size=N/A time=");
2008  else av_bprintf(&buf, "size=%8.0fkB time=", total_size / 1024.0);
2009  if (pts == AV_NOPTS_VALUE) {
2010  av_bprintf(&buf, "N/A ");
2011  } else {
2012  av_bprintf(&buf, "%s%02d:%02d:%02d.%02d ",
2013  hours_sign, hours, mins, secs, (100 * us) / AV_TIME_BASE);
2014  }
2015 
2016  if (bitrate < 0) {
2017  av_bprintf(&buf, "bitrate=N/A");
2018  av_bprintf(&buf_script, "bitrate=N/A\n");
2019  }else{
2020  av_bprintf(&buf, "bitrate=%6.1fkbits/s", bitrate);
2021  av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
2022  }
2023 
2024  if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
2025  else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
2026  if (pts == AV_NOPTS_VALUE) {
2027  av_bprintf(&buf_script, "out_time_us=N/A\n");
2028  av_bprintf(&buf_script, "out_time_ms=N/A\n");
2029  av_bprintf(&buf_script, "out_time=N/A\n");
2030  } else {
2031  av_bprintf(&buf_script, "out_time_us=%"PRId64"\n", pts);
2032  av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
2033  av_bprintf(&buf_script, "out_time=%s%02d:%02d:%02d.%06d\n",
2034  hours_sign, hours, mins, secs, us);
2035  }
2036 
2038  av_bprintf(&buf, " dup=%d drop=%d", nb_frames_dup, nb_frames_drop);
2039  av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
2040  av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
2041 
2042  if (speed < 0) {
2043  av_bprintf(&buf, " speed=N/A");
2044  av_bprintf(&buf_script, "speed=N/A\n");
2045  } else {
2046  av_bprintf(&buf, " speed=%4.3gx", speed);
2047  av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
2048  }
2049 
2050  if (print_stats || is_last_report) {
2051  const char end = is_last_report ? '\n' : '\r';
2052  if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
2053  fprintf(stderr, "%s %c", buf.str, end);
2054  } else
2055  av_log(NULL, AV_LOG_INFO, "%s %c", buf.str, end);
2056 
2057  fflush(stderr);
2058  }
2059  av_bprint_finalize(&buf, NULL);
2060 
2061  if (progress_avio) {
2062  av_bprintf(&buf_script, "progress=%s\n",
2063  is_last_report ? "end" : "continue");
2064  avio_write(progress_avio, buf_script.str,
2065  FFMIN(buf_script.len, buf_script.size - 1));
2066  avio_flush(progress_avio);
2067  av_bprint_finalize(&buf_script, NULL);
2068  if (is_last_report) {
2069  if ((ret = avio_closep(&progress_avio)) < 0)
2070  av_log(NULL, AV_LOG_ERROR,
2071  "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
2072  }
2073  }
2074 
2075  first_report = 0;
2076 
2077  if (is_last_report)
2078  print_final_stats(total_size);
2079 }
2080 
2081 static void ifilter_parameters_from_codecpar(InputFilter *ifilter, AVCodecParameters *par)
2082 {
2083  // We never got any input. Set a fake format, which will
2084  // come from libavformat.
2085  ifilter->format = par->format;
2086  ifilter->sample_rate = par->sample_rate;
2087  ifilter->channels = par->channels;
2088  ifilter->channel_layout = par->channel_layout;
2089  ifilter->width = par->width;
2090  ifilter->height = par->height;
2091  ifilter->sample_aspect_ratio = par->sample_aspect_ratio;
2092 }
2093 
2094 static void flush_encoders(void)
2095 {
2096  int i, ret;
2097 
2098  for (i = 0; i < nb_output_streams; i++) {
2099  OutputStream *ost = output_streams[i];
2100  AVCodecContext *enc = ost->enc_ctx;
2101  OutputFile *of = output_files[ost->file_index];
2102 
2103  if (!ost->encoding_needed)
2104  continue;
2105 
2106  // Try to enable encoding with no input frames.
2107  // Maybe we should just let encoding fail instead.
2108  if (!ost->initialized) {
2109  FilterGraph *fg = ost->filter->graph;
2110 
2111  av_log(NULL, AV_LOG_WARNING,
2112  "Finishing stream %d:%d without any data written to it.\n",
2113  ost->file_index, ost->st->index);
2114 
2115  if (ost->filter && !fg->graph) {
2116  int x;
2117  for (x = 0; x < fg->nb_inputs; x++) {
2118  InputFilter *ifilter = fg->inputs[x];
2119  if (ifilter->format < 0)
2120  ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
2121  }
2122 
2124  continue;
2125 
2126  ret = configure_filtergraph(fg);
2127  if (ret < 0) {
2128  av_log(NULL, AV_LOG_ERROR, "Error configuring filter graph\n");
2129  exit_program(1);
2130  }
2131 
2132  finish_output_stream(ost);
2133  }
2134 
2135  init_output_stream_wrapper(ost, NULL, 1);
2136  }
2137 
2138  if (enc->codec_type != AVMEDIA_TYPE_VIDEO && enc->codec_type != AVMEDIA_TYPE_AUDIO)
2139  continue;
2140 
2141  for (;;) {
2142  const char *desc = NULL;
2143  AVPacket pkt;
2144  int pkt_size;
2145 
2146  switch (enc->codec_type) {
2147  case AVMEDIA_TYPE_AUDIO:
2148  desc = "audio";
2149  break;
2150  case AVMEDIA_TYPE_VIDEO:
2151  desc = "video";
2152  break;
2153  default:
2154  av_assert0(0);
2155  }
2156 
2157  av_init_packet(&pkt);
2158  pkt.data = NULL;
2159  pkt.size = 0;
2160 
2161  update_benchmark(NULL);
2162 
2163  while ((ret = avcodec_receive_packet(enc, &pkt)) == AVERROR(EAGAIN)) {
2164  ret = avcodec_send_frame(enc, NULL);
2165  if (ret < 0) {
2166  av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
2167  desc,
2168  av_err2str(ret));
2169  exit_program(1);
2170  }
2171  }
2172 
2173  update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
2174  if (ret < 0 && ret != AVERROR_EOF) {
2175  av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
2176  desc,
2177  av_err2str(ret));
2178  exit_program(1);
2179  }
2180  if (ost->logfile && enc->stats_out) {
2181  fprintf(ost->logfile, "%s", enc->stats_out);
2182  }
2183  if (ret == AVERROR_EOF) {
2184  output_packet(of, &pkt, ost, 1);
2185  break;
2186  }
2187  if (ost->finished & MUXER_FINISHED) {
2188  av_packet_unref(&pkt);
2189  continue;
2190  }
2191  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
2192  pkt_size = pkt.size;
2193  output_packet(of, &pkt, ost, 0);
2194  if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
2195  do_video_stats(ost, pkt_size);
2196  }
2197  }
2198  }
2199 }
2200 
2201 /*
2202  * Check whether a packet from ist should be written into ost at this time
2203  */
2205 {
2206  OutputFile *of = output_files[ost->file_index];
2207  int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
2208 
2209  if (ost->source_index != ist_index)
2210  return 0;
2211 
2212  if (ost->finished)
2213  return 0;
2214 
2215  if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
2216  return 0;
2217 
2218  return 1;
2219 }
2220 
2221 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
2222 {
2223  OutputFile *of = output_files[ost->file_index];
2224  InputFile *f = input_files [ist->file_index];
2225  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
2226  int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
2227  AVPacket opkt;
2228 
2229  // EOF: flush output bitstream filters.
2230  if (!pkt) {
2231  av_init_packet(&opkt);
2232  opkt.data = NULL;
2233  opkt.size = 0;
2234  output_packet(of, &opkt, ost, 1);
2235  return;
2236  }
2237 
2238  if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
2240  return;
2241 
2242  if (!ost->frame_number && !ost->copy_prior_start) {
2243  int64_t comp_start = start_time;
2244  if (copy_ts && f->start_time != AV_NOPTS_VALUE)
2245  comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
2246  if (pkt->pts == AV_NOPTS_VALUE ?
2247  ist->pts < comp_start :
2248  pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
2249  return;
2250  }
2251 
2252  if (of->recording_time != INT64_MAX &&
2253  ist->pts >= of->recording_time + start_time) {
2254  close_output_stream(ost);
2255  return;
2256  }
2257 
2258  if (f->recording_time != INT64_MAX) {
2259  start_time = f->ctx->start_time;
2260  if (f->start_time != AV_NOPTS_VALUE && copy_ts)
2261  start_time += f->start_time;
2262  if (ist->pts >= f->recording_time + start_time) {
2263  close_output_stream(ost);
2264  return;
2265  }
2266  }
2267 
2268  /* force the input stream PTS */
2269  if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2270  ost->sync_opts++;
2271 
2272  if (av_packet_ref(&opkt, pkt) < 0)
2273  exit_program(1);
2274 
2275  if (pkt->pts != AV_NOPTS_VALUE)
2276  opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
2277 
2278  if (pkt->dts == AV_NOPTS_VALUE) {
2279  opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase);
2280  } else if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
2281  int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
2282  if(!duration)
2283  duration = ist->dec_ctx->frame_size;
2284  opkt.dts = av_rescale_delta(ist->st->time_base, pkt->dts,
2285  (AVRational){1, ist->dec_ctx->sample_rate}, duration,
2287  /* dts will be set immediately afterwards to what pts is now */
2288  opkt.pts = opkt.dts - ost_tb_start_time;
2289  } else
2290  opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
2291  opkt.dts -= ost_tb_start_time;
2292 
2293  opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
2294 
2295  output_packet(of, &opkt, ost, 0);
2296 }
2297 
2299 {
2300  AVCodecContext *dec = ist->dec_ctx;
2301 
2302  if (!dec->channel_layout) {
2303  char layout_name[256];
2304 
2305  if (dec->channels > ist->guess_layout_max)
2306  return 0;
2307  dec->channel_layout = av_get_default_channel_layout(dec->channels);
2308  if (!dec->channel_layout)
2309  return 0;
2310  av_get_channel_layout_string(layout_name, sizeof(layout_name),
2311  dec->channels, dec->channel_layout);
2312  av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2313  "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2314  }
2315  return 1;
2316 }
2317 
2318 static void check_decode_result(InputStream *ist, int *got_output, int ret)
2319 {
2320  if (*got_output || ret<0)
2321  decode_error_stat[ret<0] ++;
2322 
2323  if (ret < 0 && exit_on_error)
2324  exit_program(1);
2325 
2326  if (*got_output && ist) {
2327  if (ist->decoded_frame->decode_error_flags || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) {
2328  av_log(NULL, exit_on_error ? AV_LOG_FATAL : AV_LOG_WARNING,
2329  "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->url, ist->st->index);
2330  if (exit_on_error)
2331  exit_program(1);
2332  }
2333  }
2334 }
2335 
2336 // Filters can be configured only if the formats of all inputs are known.
2338 {
2339  int i;
2340  for (i = 0; i < fg->nb_inputs; i++) {
2341  if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
2342  fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
2343  return 0;
2344  }
2345  return 1;
2346 }
2347 
2348 static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame)
2349 {
2350  FilterGraph *fg = ifilter->graph;
2351  int need_reinit, ret, i;
2352 
2353  /* determine if the parameters for this input changed */
2354  need_reinit = ifilter->format != frame->format;
2355 
2356  switch (ifilter->ist->st->codecpar->codec_type) {
2357  case AVMEDIA_TYPE_AUDIO:
2358  need_reinit |= ifilter->sample_rate != frame->sample_rate ||
2359  ifilter->channels != frame->channels ||
2360  ifilter->channel_layout != frame->channel_layout;
2361  break;
2362  case AVMEDIA_TYPE_VIDEO:
2363  need_reinit |= ifilter->width != frame->width ||
2364  ifilter->height != frame->height;
2365  break;
2366  }
2367 
2368  if (!ifilter->ist->reinit_filters && fg->graph)
2369  need_reinit = 0;
2370 
2371  if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
2372  (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2373  need_reinit = 1;
2374 
2375  if (need_reinit) {
2376  ret = ifilter_parameters_from_frame(ifilter, frame);
2377  if (ret < 0)
2378  return ret;
2379  }
2380 
2381  /* (re)init the graph if possible, otherwise buffer the frame and return */
2382  if (need_reinit || !fg->graph) {
2383  for (i = 0; i < fg->nb_inputs; i++) {
2384  if (!ifilter_has_all_input_formats(fg)) {
2385  AVFrame *tmp = av_frame_clone(frame);
2386  if (!tmp)
2387  return AVERROR(ENOMEM);
2388  av_frame_unref(frame);
2389 
2390  if (!av_fifo_space(ifilter->frame_queue)) {
2391  ret = av_fifo_realloc2(ifilter->frame_queue, 2 * av_fifo_size(ifilter->frame_queue));
2392  if (ret < 0) {
2393  av_frame_free(&tmp);
2394  return ret;
2395  }
2396  }
2397  av_fifo_generic_write(ifilter->frame_queue, &tmp, sizeof(tmp), NULL);
2398  return 0;
2399  }
2400  }
2401 
2402  ret = reap_filters(1);
2403  if (ret < 0 && ret != AVERROR_EOF) {
2404  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2405  return ret;
2406  }
2407 
2408  ret = configure_filtergraph(fg);
2409  if (ret < 0) {
2410  av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
2411  return ret;
2412  }
2413  }
2414 
2415  ret = av_buffersrc_add_frame_flags(ifilter->filter, frame, AV_BUFFERSRC_FLAG_PUSH);
2416  if (ret < 0) {
2417  if (ret != AVERROR_EOF)
2418  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2419  return ret;
2420  }
2421 
2422  return 0;
2423 }
2424 
2425 static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
2426 {
2427  int ret = 0;
2428 
2429  ifilter->eof = 1;
2430 
2431  if (ifilter->filter) {
2432 
2433  /* THIS VALIDATION IS REQUIRED TO COMPLETE CANCELLATION */
2435  ret = av_buffersrc_close(ifilter->filter, pts, AV_BUFFERSRC_FLAG_PUSH);
2436  }
2437  if (ret < 0)
2438  return ret;
2439  } else {
2440  // the filtergraph was never configured
2441  if (ifilter->format < 0)
2442  ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
2443  if (ifilter->format < 0 && (ifilter->type == AVMEDIA_TYPE_AUDIO || ifilter->type == AVMEDIA_TYPE_VIDEO)) {
2444  av_log(NULL, AV_LOG_ERROR, "Cannot determine format of input stream %d:%d after EOF\n", ifilter->ist->file_index, ifilter->ist->st->index);
2445  return AVERROR_INVALIDDATA;
2446  }
2447  }
2448 
2449  return 0;
2450 }
2451 
2452 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2453 // There is the following difference: if you got a frame, you must call
2454 // it again with pkt=NULL. pkt==NULL is treated differently from pkt->size==0
2455 // (pkt==NULL means get more output, pkt->size==0 is a flush/drain packet)
2456 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2457 {
2458  int ret;
2459 
2460  *got_frame = 0;
2461 
2462  if (pkt) {
2463  ret = avcodec_send_packet(avctx, pkt);
2464  // In particular, we don't expect AVERROR(EAGAIN), because we read all
2465  // decoded frames with avcodec_receive_frame() until done.
2466  if (ret < 0 && ret != AVERROR_EOF)
2467  return ret;
2468  }
2469 
2470  ret = avcodec_receive_frame(avctx, frame);
2471  if (ret < 0 && ret != AVERROR(EAGAIN))
2472  return ret;
2473  if (ret >= 0)
2474  *got_frame = 1;
2475 
2476  return 0;
2477 }
2478 
2480 {
2481  int i, ret;
2482  AVFrame *f;
2483 
2484  av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2485  for (i = 0; i < ist->nb_filters; i++) {
2486  if (i < ist->nb_filters - 1) {
2487  f = ist->filter_frame;
2488  ret = av_frame_ref(f, decoded_frame);
2489  if (ret < 0)
2490  break;
2491  } else
2492  f = decoded_frame;
2493  ret = ifilter_send_frame(ist->filters[i], f);
2494  if (ret == AVERROR_EOF)
2495  ret = 0; /* ignore */
2496  if (ret < 0) {
2497  av_log(NULL, AV_LOG_ERROR,
2498  "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2499  break;
2500  }
2501  }
2502  return ret;
2503 }
2504 
2505 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output,
2506  int *decode_failed)
2507 {
2508  AVFrame *decoded_frame;
2509  AVCodecContext *avctx = ist->dec_ctx;
2510  int ret, err = 0;
2511  AVRational decoded_frame_tb;
2512 
2513  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2514  return AVERROR(ENOMEM);
2515  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2516  return AVERROR(ENOMEM);
2518 
2519  update_benchmark(NULL);
2520  ret = decode(avctx, decoded_frame, got_output, pkt);
2521  update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2522  if (ret < 0)
2523  *decode_failed = 1;
2524 
2525  if (ret >= 0 && avctx->sample_rate <= 0) {
2526  av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2527  ret = AVERROR_INVALIDDATA;
2528  }
2529 
2530  if (ret != AVERROR_EOF)
2532 
2533  if (!*got_output || ret < 0)
2534  return ret;
2535 
2536  ist->samples_decoded += decoded_frame->nb_samples;
2537  ist->frames_decoded++;
2538 
2539  /* increment next_dts to use for the case where the input stream does not
2540  have timestamps or there are multiple frames in the packet */
2541  ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2542  avctx->sample_rate;
2543  ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2544  avctx->sample_rate;
2545 
2546  if (decoded_frame->pts != AV_NOPTS_VALUE) {
2547  decoded_frame_tb = ist->st->time_base;
2548  } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2549  decoded_frame->pts = pkt->pts;
2550  decoded_frame_tb = ist->st->time_base;
2551  }else {
2552  decoded_frame->pts = ist->dts;
2553  decoded_frame_tb = AV_TIME_BASE_Q;
2554  }
2555  if (decoded_frame->pts != AV_NOPTS_VALUE)
2556  decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2557  (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2558  (AVRational){1, avctx->sample_rate});
2559  ist->nb_samples = decoded_frame->nb_samples;
2561 
2562  av_frame_unref(ist->filter_frame);
2563  av_frame_unref(decoded_frame);
2564  return err < 0 ? err : ret;
2565 }
2566 
2567 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof,
2568  int *decode_failed)
2569 {
2570  AVFrame *decoded_frame;
2571  int i, ret = 0, err = 0;
2572  int64_t best_effort_timestamp;
2573  int64_t dts = AV_NOPTS_VALUE;
2574  AVPacket avpkt;
2575 
2576  // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2577  // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2578  // skip the packet.
2579  if (!eof && pkt && pkt->size == 0)
2580  return 0;
2581 
2582  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2583  return AVERROR(ENOMEM);
2584  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2585  return AVERROR(ENOMEM);
2587  if (ist->dts != AV_NOPTS_VALUE)
2588  dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2589  if (pkt) {
2590  avpkt = *pkt;
2591  avpkt.dts = dts; // ffmpeg.c probably shouldn't do this
2592  }
2593 
2594  // The old code used to set dts on the drain packet, which does not work
2595  // with the new API anymore.
2596  if (eof) {
2597  void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2598  if (!new)
2599  return AVERROR(ENOMEM);
2600  ist->dts_buffer = new;
2601  ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2602  }
2603 
2604  update_benchmark(NULL);
2605  ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL);
2606  update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2607  if (ret < 0)
2608  *decode_failed = 1;
2609 
2610  // The following line may be required in some cases where there is no parser
2611  // or the parser does not has_b_frames correctly
2612  if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2613  if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2614  ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2615  } else
2616  av_log(ist->dec_ctx, AV_LOG_WARNING,
2617  "video_delay is larger in decoder than demuxer %d > %d.\n"
2618  "If you want to help, upload a sample "
2619  "of this file to https://streams.videolan.org/upload/ "
2620  "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
2621  ist->dec_ctx->has_b_frames,
2622  ist->st->codecpar->video_delay);
2623  }
2624 
2625  if (ret != AVERROR_EOF)
2627 
2628  if (*got_output && ret >= 0) {
2629  if (ist->dec_ctx->width != decoded_frame->width ||
2630  ist->dec_ctx->height != decoded_frame->height ||
2631  ist->dec_ctx->pix_fmt != decoded_frame->format) {
2632  av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2633  decoded_frame->width,
2634  decoded_frame->height,
2635  decoded_frame->format,
2636  ist->dec_ctx->width,
2637  ist->dec_ctx->height,
2638  ist->dec_ctx->pix_fmt);
2639  }
2640  }
2641 
2642  if (!*got_output || ret < 0)
2643  return ret;
2644 
2645  if(ist->top_field_first>=0)
2646  decoded_frame->top_field_first = ist->top_field_first;
2647 
2648  ist->frames_decoded++;
2649 
2650  if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2651  err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2652  if (err < 0)
2653  goto fail;
2654  }
2656 
2657  best_effort_timestamp= decoded_frame->best_effort_timestamp;
2658  *duration_pts = decoded_frame->pkt_duration;
2659 
2660  if (ist->framerate.num)
2661  best_effort_timestamp = ist->cfr_next_pts++;
2662 
2663  if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2664  best_effort_timestamp = ist->dts_buffer[0];
2665 
2666  for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2667  ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2668  ist->nb_dts_buffer--;
2669  }
2670 
2671  if(best_effort_timestamp != AV_NOPTS_VALUE) {
2672  int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2673 
2674  if (ts != AV_NOPTS_VALUE)
2675  ist->next_pts = ist->pts = ts;
2676  }
2677 
2678  if (debug_ts) {
2679  av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2680  "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2681  ist->st->index, av_ts2str(decoded_frame->pts),
2682  av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2683  best_effort_timestamp,
2684  av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2685  decoded_frame->key_frame, decoded_frame->pict_type,
2686  ist->st->time_base.num, ist->st->time_base.den);
2687  }
2688 
2689  if (ist->st->sample_aspect_ratio.num)
2690  decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2691 
2693 
2694 fail:
2695  av_frame_unref(ist->filter_frame);
2696  av_frame_unref(decoded_frame);
2697  return err < 0 ? err : ret;
2698 }
2699 
2700 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output,
2701  int *decode_failed)
2702 {
2703  AVSubtitle subtitle;
2704  int free_sub = 1;
2705  int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2706  &subtitle, got_output, pkt);
2707 
2709 
2710  if (ret < 0 || !*got_output) {
2711  *decode_failed = 1;
2712  if (!pkt->size)
2713  sub2video_flush(ist);
2714  return ret;
2715  }
2716 
2717  if (ist->fix_sub_duration) {
2718  int end = 1;
2719  if (ist->prev_sub.got_output) {
2720  end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2721  1000, AV_TIME_BASE);
2722  if (end < ist->prev_sub.subtitle.end_display_time) {
2723  av_log(ist->dec_ctx, AV_LOG_DEBUG,
2724  "Subtitle duration reduced from %"PRId32" to %d%s\n",
2725  ist->prev_sub.subtitle.end_display_time, end,
2726  end <= 0 ? ", dropping it" : "");
2727  ist->prev_sub.subtitle.end_display_time = end;
2728  }
2729  }
2730  FFSWAP(int, *got_output, ist->prev_sub.got_output);
2731  FFSWAP(int, ret, ist->prev_sub.ret);
2732  FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2733  if (end <= 0)
2734  goto out;
2735  }
2736 
2737  if (!*got_output)
2738  return ret;
2739 
2740  if (ist->sub2video.frame) {
2741  sub2video_update(ist, INT64_MIN, &subtitle);
2742  } else if (ist->nb_filters) {
2743  if (!ist->sub2video.sub_queue)
2744  ist->sub2video.sub_queue = av_fifo_alloc(8 * sizeof(AVSubtitle));
2745  if (!ist->sub2video.sub_queue)
2746  exit_program(1);
2747  if (!av_fifo_space(ist->sub2video.sub_queue)) {
2748  ret = av_fifo_realloc2(ist->sub2video.sub_queue, 2 * av_fifo_size(ist->sub2video.sub_queue));
2749  if (ret < 0)
2750  exit_program(1);
2751  }
2752  av_fifo_generic_write(ist->sub2video.sub_queue, &subtitle, sizeof(subtitle), NULL);
2753  free_sub = 0;
2754  }
2755 
2756  if (!subtitle.num_rects)
2757  goto out;
2758 
2759  ist->frames_decoded++;
2760 
2761  for (i = 0; i < nb_output_streams; i++) {
2762  OutputStream *ost = output_streams[i];
2763 
2764  if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2765  || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2766  continue;
2767 
2769  }
2770 
2771 out:
2772  if (free_sub)
2773  avsubtitle_free(&subtitle);
2774  return ret;
2775 }
2776 
2778 {
2779  int i, ret;
2780  /* TODO keep pts also in stream time base to avoid converting back */
2781  int64_t pts = av_rescale_q_rnd(ist->pts, AV_TIME_BASE_Q, ist->st->time_base,
2782  AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX);
2783 
2784  for (i = 0; i < ist->nb_filters; i++) {
2785  ret = ifilter_send_eof(ist->filters[i], pts);
2786  if (ret < 0)
2787  return ret;
2788  }
2789  return 0;
2790 }
2791 
2792 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2793 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2794 {
2795  int ret = 0, i;
2796  int repeating = 0;
2797  int eof_reached = 0;
2798 
2799  AVPacket avpkt;
2800  if (!ist->saw_first_ts) {
2801  ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2802  ist->pts = 0;
2803  if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2804  ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2805  ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2806  }
2807  ist->saw_first_ts = 1;
2808  }
2809 
2810  if (ist->next_dts == AV_NOPTS_VALUE)
2811  ist->next_dts = ist->dts;
2812  if (ist->next_pts == AV_NOPTS_VALUE)
2813  ist->next_pts = ist->pts;
2814 
2815  if (!pkt) {
2816  /* EOF handling */
2817  av_init_packet(&avpkt);
2818  avpkt.data = NULL;
2819  avpkt.size = 0;
2820  } else {
2821  avpkt = *pkt;
2822  }
2823 
2824  if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2825  ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2826  if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2827  ist->next_pts = ist->pts = ist->dts;
2828  }
2829 
2830  // while we have more to decode or while the decoder did output something on EOF
2831  while (ist->decoding_needed) {
2832  int64_t duration_dts = 0;
2833  int64_t duration_pts = 0;
2834  int got_output = 0;
2835  int decode_failed = 0;
2836 
2837  ist->pts = ist->next_pts;
2838  ist->dts = ist->next_dts;
2839 
2840  switch (ist->dec_ctx->codec_type) {
2841  case AVMEDIA_TYPE_AUDIO:
2842  ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output,
2843  &decode_failed);
2844  break;
2845  case AVMEDIA_TYPE_VIDEO:
2846  ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, &duration_pts, !pkt,
2847  &decode_failed);
2848  if (!repeating || !pkt || got_output) {
2849  if (pkt && pkt->duration) {
2850  duration_dts = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2851  } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2852  int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2853  duration_dts = ((int64_t)AV_TIME_BASE *
2854  ist->dec_ctx->framerate.den * ticks) /
2855  ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2856  }
2857 
2858  if(ist->dts != AV_NOPTS_VALUE && duration_dts) {
2859  ist->next_dts += duration_dts;
2860  }else
2861  ist->next_dts = AV_NOPTS_VALUE;
2862  }
2863 
2864  if (got_output) {
2865  if (duration_pts > 0) {
2866  ist->next_pts += av_rescale_q(duration_pts, ist->st->time_base, AV_TIME_BASE_Q);
2867  } else {
2868  ist->next_pts += duration_dts;
2869  }
2870  }
2871  break;
2872  case AVMEDIA_TYPE_SUBTITLE:
2873  if (repeating)
2874  break;
2875  ret = transcode_subtitles(ist, &avpkt, &got_output, &decode_failed);
2876  if (!pkt && ret >= 0)
2877  ret = AVERROR_EOF;
2878  break;
2879  default:
2880  return -1;
2881  }
2882 
2883  if (ret == AVERROR_EOF) {
2884  eof_reached = 1;
2885  break;
2886  }
2887 
2888  if (ret < 0) {
2889  if (decode_failed) {
2890  av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2891  ist->file_index, ist->st->index, av_err2str(ret));
2892  } else {
2893  av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded "
2894  "data for stream #%d:%d\n", ist->file_index, ist->st->index);
2895  }
2896  if (!decode_failed || exit_on_error)
2897  exit_program(1);
2898  break;
2899  }
2900 
2901  if (got_output)
2902  ist->got_output = 1;
2903 
2904  if (!got_output)
2905  break;
2906 
2907  // During draining, we might get multiple output frames in this loop.
2908  // ffmpeg.c does not drain the filter chain on configuration changes,
2909  // which means if we send multiple frames at once to the filters, and
2910  // one of those frames changes configuration, the buffered frames will
2911  // be lost. This can upset certain FATE tests.
2912  // Decode only 1 frame per call on EOF to appease these FATE tests.
2913  // The ideal solution would be to rewrite decoding to use the new
2914  // decoding API in a better way.
2915  if (!pkt)
2916  break;
2917 
2918  repeating = 1;
2919  }
2920 
2921  /* after flushing, send an EOF on all the filter inputs attached to the stream */
2922  /* except when looping we need to flush but not to send an EOF */
2923  if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2924  int ret = send_filter_eof(ist);
2925  if (ret < 0) {
2926  av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2927  exit_program(1);
2928  }
2929  }
2930 
2931  /* handle stream copy */
2932  if (!ist->decoding_needed && pkt) {
2933  ist->dts = ist->next_dts;
2934  switch (ist->dec_ctx->codec_type) {
2935  case AVMEDIA_TYPE_AUDIO:
2936  av_assert1(pkt->duration >= 0);
2937  if (ist->dec_ctx->sample_rate) {
2938  ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2939  ist->dec_ctx->sample_rate;
2940  } else {
2941  ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2942  }
2943  break;
2944  case AVMEDIA_TYPE_VIDEO:
2945  if (ist->framerate.num) {
2946  // TODO: Remove work-around for c99-to-c89 issue 7
2947  AVRational time_base_q = AV_TIME_BASE_Q;
2948  int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2949  ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2950  } else if (pkt->duration) {
2951  ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2952  } else if(ist->dec_ctx->framerate.num != 0) {
2953  int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2954  ist->next_dts += ((int64_t)AV_TIME_BASE *
2955  ist->dec_ctx->framerate.den * ticks) /
2956  ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2957  }
2958  break;
2959  }
2960  ist->pts = ist->dts;
2961  ist->next_pts = ist->next_dts;
2962  }
2963  for (i = 0; i < nb_output_streams; i++) {
2964  OutputStream *ost = output_streams[i];
2965 
2966  if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2967  continue;
2968 
2969  do_streamcopy(ist, ost, pkt);
2970  }
2971 
2972  return !eof_reached;
2973 }
2974 
2975 static void print_sdp(void)
2976 {
2977  char sdp[16384];
2978  int i;
2979  int j;
2980  AVIOContext *sdp_pb;
2981  AVFormatContext **avc;
2982 
2983  for (i = 0; i < nb_output_files; i++) {
2984  if (!output_files[i]->header_written)
2985  return;
2986  }
2987 
2988  avc = av_malloc_array(nb_output_files, sizeof(*avc));
2989  if (!avc)
2990  exit_program(1);
2991  for (i = 0, j = 0; i < nb_output_files; i++) {
2992  if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2993  avc[j] = output_files[i]->ctx;
2994  j++;
2995  }
2996  }
2997 
2998  if (!j)
2999  goto fail;
3000 
3001  av_sdp_create(avc, j, sdp, sizeof(sdp));
3002 
3003  if (!sdp_filename) {
3004  av_log(NULL, AV_LOG_STDERR, "SDP:\n%s\n", sdp);
3005  fflush(stdout);
3006  } else {
3007  if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
3008  av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
3009  } else {
3010  avio_print(sdp_pb, sdp);
3011  avio_closep(&sdp_pb);
3012  av_freep(&sdp_filename);
3013  }
3014  }
3015 
3016 fail:
3017  av_freep(&avc);
3018 }
3019 
3020 static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
3021 {
3022  InputStream *ist = s->opaque;
3023  const enum AVPixelFormat *p;
3024  int ret;
3025 
3026  for (p = pix_fmts; *p != AV_PIX_FMT_NONE; p++) {
3027  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
3028  const AVCodecHWConfig *config = NULL;
3029  int i;
3030 
3031  if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
3032  break;
3033 
3034  if (ist->hwaccel_id == HWACCEL_GENERIC ||
3035  ist->hwaccel_id == HWACCEL_AUTO) {
3036  for (i = 0;; i++) {
3037  config = avcodec_get_hw_config(s->codec, i);
3038  if (!config)
3039  break;
3040  if (!(config->methods &
3041  AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX))
3042  continue;
3043  if (config->pix_fmt == *p)
3044  break;
3045  }
3046  }
3047  if (config) {
3048  if (config->device_type != ist->hwaccel_device_type) {
3049  // Different hwaccel offered, ignore.
3050  continue;
3051  }
3052 
3053  ret = hwaccel_decode_init(s);
3054  if (ret < 0) {
3055  if (ist->hwaccel_id == HWACCEL_GENERIC) {
3056  av_log(NULL, AV_LOG_FATAL,
3057  "%s hwaccel requested for input stream #%d:%d, "
3058  "but cannot be initialized.\n",
3059  av_hwdevice_get_type_name(config->device_type),
3060  ist->file_index, ist->st->index);
3061  return AV_PIX_FMT_NONE;
3062  }
3063  continue;
3064  }
3065  } else {
3066  const HWAccel *hwaccel = NULL;
3067  int i;
3068  for (i = 0; hwaccels[i].name; i++) {
3069  if (hwaccels[i].pix_fmt == *p) {
3070  hwaccel = &hwaccels[i];
3071  break;
3072  }
3073  }
3074  if (!hwaccel) {
3075  // No hwaccel supporting this pixfmt.
3076  continue;
3077  }
3078  if (hwaccel->id != ist->hwaccel_id) {
3079  // Does not match requested hwaccel.
3080  continue;
3081  }
3082 
3083  ret = hwaccel->init(s);
3084  if (ret < 0) {
3085  av_log(NULL, AV_LOG_FATAL,
3086  "%s hwaccel requested for input stream #%d:%d, "
3087  "but cannot be initialized.\n", hwaccel->name,
3088  ist->file_index, ist->st->index);
3089  return AV_PIX_FMT_NONE;
3090  }
3091  }
3092 
3093  if (ist->hw_frames_ctx) {
3094  s->hw_frames_ctx = av_buffer_ref(ist->hw_frames_ctx);
3095  if (!s->hw_frames_ctx)
3096  return AV_PIX_FMT_NONE;
3097  }
3098 
3099  ist->hwaccel_pix_fmt = *p;
3100  break;
3101  }
3102 
3103  return *p;
3104 }
3105 
3106 static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
3107 {
3108  InputStream *ist = s->opaque;
3109 
3110  if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
3111  return ist->hwaccel_get_buffer(s, frame, flags);
3112 
3113  return avcodec_default_get_buffer2(s, frame, flags);
3114 }
3115 
3116 static int init_input_stream(int ist_index, char *error, int error_len)
3117 {
3118  int ret;
3119  InputStream *ist = input_streams[ist_index];
3120 
3121  if (ist->decoding_needed) {
3122  AVCodec *codec = ist->dec;
3123  if (!codec) {
3124  snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
3125  avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
3126  return AVERROR(EINVAL);
3127  }
3128 
3129  ist->dec_ctx->opaque = ist;
3130  ist->dec_ctx->get_format = get_format;
3131  ist->dec_ctx->get_buffer2 = get_buffer;
3132 #if LIBAVCODEC_VERSION_MAJOR < 60
3133  ist->dec_ctx->thread_safe_callbacks = 1;
3134 #endif
3135 
3136  av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
3137  if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
3138  (ist->decoding_needed & DECODING_FOR_OST)) {
3139  av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
3141  av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
3142  }
3143 
3144  av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
3145 
3146  /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
3147  * audio, and video decoders such as cuvid or mediacodec */
3148  ist->dec_ctx->pkt_timebase = ist->st->time_base;
3149 
3150  if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
3151  av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
3152  /* Attached pics are sparse, therefore we would not want to delay their decoding till EOF. */
3153  if (ist->st->disposition & AV_DISPOSITION_ATTACHED_PIC)
3154  av_dict_set(&ist->decoder_opts, "threads", "1", 0);
3155 
3157  if (ret < 0) {
3158  snprintf(error, error_len, "Device setup failed for "
3159  "decoder on input stream #%d:%d : %s",
3160  ist->file_index, ist->st->index, av_err2str(ret));
3161  return ret;
3162  }
3163 
3164  if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
3165  if (ret == AVERROR_EXPERIMENTAL)
3166  abort_codec_experimental(codec, 0);
3167 
3168  snprintf(error, error_len,
3169  "Error while opening decoder for input stream "
3170  "#%d:%d : %s",
3171  ist->file_index, ist->st->index, av_err2str(ret));
3172  return ret;
3173  }
3175  }
3176 
3177  ist->next_pts = AV_NOPTS_VALUE;
3178  ist->next_dts = AV_NOPTS_VALUE;
3179 
3180  return 0;
3181 }
3182 
3184 {
3185  if (ost->source_index >= 0)
3186  return input_streams[ost->source_index];
3187  return NULL;
3188 }
3189 
3190 static int compare_int64(const void *a, const void *b)
3191 {
3192  return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
3193 }
3194 
3195 /* open the muxer when all the streams are initialized */
3197 {
3198  int ret, i;
3199 
3200  for (i = 0; i < of->ctx->nb_streams; i++) {
3201  OutputStream *ost = output_streams[of->ost_index + i];
3202  if (!ost->initialized)
3203  return 0;
3204  }
3205 
3206  of->ctx->interrupt_callback = int_cb;
3207 
3208  ret = avformat_write_header(of->ctx, &of->opts);
3209  if (ret < 0) {
3210  av_log(NULL, AV_LOG_ERROR,
3211  "Could not write header for output file #%d "
3212  "(incorrect codec parameters ?): %s\n",
3213  file_index, av_err2str(ret));
3214  return ret;
3215  }
3216  //assert_avoptions(of->opts);
3217  of->header_written = 1;
3218 
3219  av_dump_format(of->ctx, file_index, of->ctx->url, 1);
3220  nb_output_dumped++;
3221 
3222  if (sdp_filename || want_sdp)
3223  print_sdp();
3224 
3225  /* flush the muxing queues */
3226  for (i = 0; i < of->ctx->nb_streams; i++) {
3227  OutputStream *ost = output_streams[of->ost_index + i];
3228 
3229  /* try to improve muxing time_base (only possible if nothing has been written yet) */
3230  if (!av_fifo_size(ost->muxing_queue))
3231  ost->mux_timebase = ost->st->time_base;
3232 
3233  while (av_fifo_size(ost->muxing_queue)) {
3234  AVPacket pkt;
3235  av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
3236  ost->muxing_queue_data_size -= pkt.size;
3237  write_packet(of, &pkt, ost, 1);
3238  }
3239  }
3240 
3241  return 0;
3242 }
3243 
3245 {
3246  AVBSFContext *ctx = ost->bsf_ctx;
3247  int ret;
3248 
3249  if (!ctx)
3250  return 0;
3251 
3252  ret = avcodec_parameters_copy(ctx->par_in, ost->st->codecpar);
3253  if (ret < 0)
3254  return ret;
3255 
3256  ctx->time_base_in = ost->st->time_base;
3257 
3258  ret = av_bsf_init(ctx);
3259  if (ret < 0) {
3260  av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
3261  ctx->filter->name);
3262  return ret;
3263  }
3264 
3265  ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
3266  if (ret < 0)
3267  return ret;
3268 
3269  ost->st->time_base = ctx->time_base_out;
3270 
3271  return 0;
3272 }
3273 
3275 {
3276  OutputFile *of = output_files[ost->file_index];
3277  InputStream *ist = get_input_stream(ost);
3278  AVCodecParameters *par_dst = ost->st->codecpar;
3279  AVCodecParameters *par_src = ost->ref_par;
3280  AVRational sar;
3281  int i, ret;
3282  uint32_t codec_tag = par_dst->codec_tag;
3283 
3284  av_assert0(ist && !ost->filter);
3285 
3286  ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
3287  if (ret >= 0)
3288  ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
3289  if (ret < 0) {
3290  av_log(NULL, AV_LOG_FATAL,
3291  "Error setting up codec context options.\n");
3292  return ret;
3293  }
3294 
3295  ret = avcodec_parameters_from_context(par_src, ost->enc_ctx);
3296  if (ret < 0) {
3297  av_log(NULL, AV_LOG_FATAL,
3298  "Error getting reference codec parameters.\n");
3299  return ret;
3300  }
3301 
3302  if (!codec_tag) {
3303  unsigned int codec_tag_tmp;
3304  if (!of->ctx->oformat->codec_tag ||
3305  av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
3306  !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
3307  codec_tag = par_src->codec_tag;
3308  }
3309 
3310  ret = avcodec_parameters_copy(par_dst, par_src);
3311  if (ret < 0)
3312  return ret;
3313 
3314  par_dst->codec_tag = codec_tag;
3315 
3316  if (!ost->frame_rate.num)
3317  ost->frame_rate = ist->framerate;
3318  ost->st->avg_frame_rate = ost->frame_rate;
3319 
3320  ret = avformat_transfer_internal_stream_timing_info(of->ctx->oformat, ost->st, ist->st, copy_tb);
3321  if (ret < 0)
3322  return ret;
3323 
3324  // copy timebase while removing common factors
3325  if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3326  ost->st->time_base = av_add_q(av_stream_get_codec_timebase(ost->st), (AVRational){0, 1});
3327 
3328  // copy estimated duration as a hint to the muxer
3329  if (ost->st->duration <= 0 && ist->st->duration > 0)
3330  ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3331 
3332  // copy disposition
3333  ost->st->disposition = ist->st->disposition;
3334 
3335  if (ist->st->nb_side_data) {
3336  for (i = 0; i < ist->st->nb_side_data; i++) {
3337  const AVPacketSideData *sd_src = &ist->st->side_data[i];
3338  uint8_t *dst_data;
3339 
3340  dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3341  if (!dst_data)
3342  return AVERROR(ENOMEM);
3343  memcpy(dst_data, sd_src->data, sd_src->size);
3344  }
3345  }
3346 
3347  if (ost->rotate_overridden) {
3348  uint8_t *sd = av_stream_new_side_data(ost->st, AV_PKT_DATA_DISPLAYMATRIX,
3349  sizeof(int32_t) * 9);
3350  if (sd)
3351  av_display_rotation_set((int32_t *)sd, -ost->rotate_override_value);
3352  }
3353 
3354  switch (par_dst->codec_type) {
3355  case AVMEDIA_TYPE_AUDIO:
3356  if (audio_volume != 256) {
3357  av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
3358  exit_program(1);
3359  }
3360  if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
3361  par_dst->block_align= 0;
3362  if(par_dst->codec_id == AV_CODEC_ID_AC3)
3363  par_dst->block_align= 0;
3364  break;
3365  case AVMEDIA_TYPE_VIDEO:
3366  if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3367  sar =
3368  av_mul_q(ost->frame_aspect_ratio,
3369  (AVRational){ par_dst->height, par_dst->width });
3370  av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3371  "with stream copy may produce invalid files\n");
3372  }
3373  else if (ist->st->sample_aspect_ratio.num)
3374  sar = ist->st->sample_aspect_ratio;
3375  else
3376  sar = par_src->sample_aspect_ratio;
3377  ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
3378  ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3379  ost->st->r_frame_rate = ist->st->r_frame_rate;
3380  break;
3381  }
3382 
3383  ost->mux_timebase = ist->st->time_base;
3384 
3385  return 0;
3386 }
3387 
3389 {
3390  AVDictionaryEntry *e;
3391 
3392  uint8_t *encoder_string;
3393  int encoder_string_len;
3394  int format_flags = 0;
3395  int codec_flags = ost->enc_ctx->flags;
3396 
3397  if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
3398  return;
3399 
3400  e = av_dict_get(of->opts, "fflags", NULL, 0);
3401  if (e) {
3402  const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3403  if (!o)
3404  return;
3405  av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3406  }
3407  e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3408  if (e) {
3409  const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3410  if (!o)
3411  return;
3412  av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3413  }
3414 
3415  encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3416  encoder_string = av_mallocz(encoder_string_len);
3417  if (!encoder_string)
3418  exit_program(1);
3419 
3420  if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3421  av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3422  else
3423  av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3424  av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3425  av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3426  AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
3427 }
3428 
3429 static void parse_forced_key_frames(char *kf, OutputStream *ost,
3430  AVCodecContext *avctx)
3431 {
3432  char *p;
3433  int n = 1, i, size, index = 0;
3434  int64_t t, *pts;
3435 
3436  for (p = kf; *p; p++)
3437  if (*p == ',')
3438  n++;
3439  size = n;
3440  pts = av_malloc_array(size, sizeof(*pts));
3441  if (!pts) {
3442  av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3443  exit_program(1);
3444  }
3445 
3446  p = kf;
3447  for (i = 0; i < n; i++) {
3448  char *next = strchr(p, ',');
3449 
3450  if (next)
3451  *next++ = 0;
3452 
3453  if (!memcmp(p, "chapters", 8)) {
3454 
3455  AVFormatContext *avf = output_files[ost->file_index]->ctx;
3456  int j;
3457 
3458  if (avf->nb_chapters > INT_MAX - size ||
3459  !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3460  sizeof(*pts)))) {
3461  av_log(NULL, AV_LOG_FATAL,
3462  "Could not allocate forced key frames array.\n");
3463  exit_program(1);
3464  }
3465  t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3466  t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3467 
3468  for (j = 0; j < avf->nb_chapters; j++) {
3469  AVChapter *c = avf->chapters[j];
3470  av_assert1(index < size);
3471  pts[index++] = av_rescale_q(c->start, c->time_base,
3472  avctx->time_base) + t;
3473  }
3474 
3475  } else {
3476 
3477  t = parse_time_or_die("force_key_frames", p, 1);
3478  av_assert1(index < size);
3479  pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3480 
3481  }
3482 
3483  p = next;
3484  }
3485 
3486  av_assert0(index == size);
3487  qsort(pts, size, sizeof(*pts), compare_int64);
3488  ost->forced_kf_count = size;
3489  ost->forced_kf_pts = pts;
3490 }
3491 
3492 static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
3493 {
3494  InputStream *ist = get_input_stream(ost);
3495  AVCodecContext *enc_ctx = ost->enc_ctx;
3496  AVFormatContext *oc;
3497 
3498  if (ost->enc_timebase.num > 0) {
3499  enc_ctx->time_base = ost->enc_timebase;
3500  return;
3501  }
3502 
3503  if (ost->enc_timebase.num < 0) {
3504  if (ist) {
3505  enc_ctx->time_base = ist->st->time_base;
3506  return;
3507  }
3508 
3509  oc = output_files[ost->file_index]->ctx;
3510  av_log(oc, AV_LOG_WARNING, "Input stream data not available, using default time base\n");
3511  }
3512 
3513  enc_ctx->time_base = default_time_base;
3514 }
3515 
3516 static int init_output_stream_encode(OutputStream *ost, AVFrame *frame)
3517 {
3518  InputStream *ist = get_input_stream(ost);
3519  AVCodecContext *enc_ctx = ost->enc_ctx;
3520  AVCodecContext *dec_ctx = NULL;
3521  AVFormatContext *oc = output_files[ost->file_index]->ctx;
3522  int j, ret;
3523 
3525 
3526  // Muxers use AV_PKT_DATA_DISPLAYMATRIX to signal rotation. On the other
3527  // hand, the legacy API makes demuxers set "rotate" metadata entries,
3528  // which have to be filtered out to prevent leaking them to output files.
3529  av_dict_set(&ost->st->metadata, "rotate", NULL, 0);
3530 
3531  if (ist) {
3532  ost->st->disposition = ist->st->disposition;
3533 
3534  dec_ctx = ist->dec_ctx;
3535 
3536  enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
3537  } else {
3538  for (j = 0; j < oc->nb_streams; j++) {
3539  AVStream *st = oc->streams[j];
3540  if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3541  break;
3542  }
3543  if (j == oc->nb_streams)
3544  if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3545  ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
3546  ost->st->disposition = AV_DISPOSITION_DEFAULT;
3547  }
3548 
3549  if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3550  if (!ost->frame_rate.num)
3551  ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
3552  if (ist && !ost->frame_rate.num)
3553  ost->frame_rate = ist->framerate;
3554  if (ist && !ost->frame_rate.num)
3555  ost->frame_rate = ist->st->r_frame_rate;
3556  if (ist && !ost->frame_rate.num && !ost->max_frame_rate.num) {
3557  ost->frame_rate = (AVRational){25, 1};
3558  av_log(NULL, AV_LOG_WARNING,
3559  "No information "
3560  "about the input framerate is available. Falling "
3561  "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3562  "if you want a different framerate.\n",
3563  ost->file_index, ost->index);
3564  }
3565 
3566  if (ost->max_frame_rate.num &&
3567  (av_q2d(ost->frame_rate) > av_q2d(ost->max_frame_rate) ||
3568  !ost->frame_rate.den))
3569  ost->frame_rate = ost->max_frame_rate;
3570 
3571  if (ost->enc->supported_framerates && !ost->force_fps) {
3572  int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3573  ost->frame_rate = ost->enc->supported_framerates[idx];
3574  }
3575  // reduce frame rate for mpeg4 to be within the spec limits
3576  if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3577  av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3578  ost->frame_rate.num, ost->frame_rate.den, 65535);
3579  }
3580  }
3581 
3582  switch (enc_ctx->codec_type) {
3583  case AVMEDIA_TYPE_AUDIO:
3584  enc_ctx->sample_fmt = av_buffersink_get_format(ost->filter->filter);
3585  if (dec_ctx)
3586  enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3587  av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3588  enc_ctx->sample_rate = av_buffersink_get_sample_rate(ost->filter->filter);
3589  enc_ctx->channel_layout = av_buffersink_get_channel_layout(ost->filter->filter);
3590  enc_ctx->channels = av_buffersink_get_channels(ost->filter->filter);
3591 
3592  init_encoder_time_base(ost, av_make_q(1, enc_ctx->sample_rate));
3593  break;
3594 
3595  case AVMEDIA_TYPE_VIDEO:
3596  init_encoder_time_base(ost, av_inv_q(ost->frame_rate));
3597 
3598  if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3599  enc_ctx->time_base = av_buffersink_get_time_base(ost->filter->filter);
3600  if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3601  && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
3602  av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3603  "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3604  }
3605 
3606  enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
3607  enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
3608  enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3609  ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3610  av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3611  av_buffersink_get_sample_aspect_ratio(ost->filter->filter);
3612 
3613  enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter);
3614  if (dec_ctx)
3615  enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3616  av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3617 
3618  if (frame) {
3619  enc_ctx->color_range = frame->color_range;
3620  enc_ctx->color_primaries = frame->color_primaries;
3621  enc_ctx->color_trc = frame->color_trc;
3622  enc_ctx->colorspace = frame->colorspace;
3623  enc_ctx->chroma_sample_location = frame->chroma_location;
3624  }
3625 
3626  enc_ctx->framerate = ost->frame_rate;
3627 
3628  ost->st->avg_frame_rate = ost->frame_rate;
3629 
3630  if (!dec_ctx ||
3631  enc_ctx->width != dec_ctx->width ||
3632  enc_ctx->height != dec_ctx->height ||
3633  enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3634  enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
3635  }
3636 
3637  if (ost->top_field_first == 0) {
3638  enc_ctx->field_order = AV_FIELD_BB;
3639  } else if (ost->top_field_first == 1) {
3640  enc_ctx->field_order = AV_FIELD_TT;
3641  }
3642 
3643  if (frame) {
3644  if (enc_ctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
3645  ost->top_field_first >= 0)
3646  frame->top_field_first = !!ost->top_field_first;
3647 
3648  if (frame->interlaced_frame) {
3649  if (enc_ctx->codec->id == AV_CODEC_ID_MJPEG)
3650  enc_ctx->field_order = frame->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
3651  else
3652  enc_ctx->field_order = frame->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
3653  } else
3654  enc_ctx->field_order = AV_FIELD_PROGRESSIVE;
3655  }
3656 
3657  if (ost->forced_keyframes) {
3658  if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3659  ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3660  forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
3661  if (ret < 0) {
3662  av_log(NULL, AV_LOG_ERROR,
3663  "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3664  return ret;
3665  }
3670 
3671  // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3672  // parse it only for static kf timings
3673  } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3675  }
3676  }
3677  break;
3678  case AVMEDIA_TYPE_SUBTITLE:
3679  enc_ctx->time_base = AV_TIME_BASE_Q;
3680  if (!enc_ctx->width) {
3681  enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3682  enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3683  }
3684  break;
3685  case AVMEDIA_TYPE_DATA:
3686  break;
3687  default:
3688  abort();
3689  break;
3690  }
3691 
3692  ost->mux_timebase = enc_ctx->time_base;
3693 
3694  return 0;
3695 }
3696 
3697 static int init_output_stream(OutputStream *ost, AVFrame *frame, char *error, int error_len)
3698 {
3699  int ret = 0;
3700 
3701  if (ost->encoding_needed) {
3702  AVCodec *codec = ost->enc;
3703  AVCodecContext *dec = NULL;
3704  InputStream *ist;
3705 
3706  ret = init_output_stream_encode(ost, frame);
3707  if (ret < 0)
3708  return ret;
3709 
3710  if ((ist = get_input_stream(ost)))
3711  dec = ist->dec_ctx;
3712  if (dec && dec->subtitle_header) {
3713  /* ASS code assumes this buffer is null terminated so add extra byte. */
3714  ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
3715  if (!ost->enc_ctx->subtitle_header)
3716  return AVERROR(ENOMEM);
3717  memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3718  ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
3719  }
3720  if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3721  av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3722  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3723  !codec->defaults &&
3724  !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
3725  !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
3726  av_dict_set(&ost->encoder_opts, "b", "128000", 0);
3727 
3729  if (ret < 0) {
3730  snprintf(error, error_len, "Device setup failed for "
3731  "encoder on output stream #%d:%d : %s",
3732  ost->file_index, ost->index, av_err2str(ret));
3733  return ret;
3734  }
3735 
3736  if (ist && ist->dec->type == AVMEDIA_TYPE_SUBTITLE && ost->enc->type == AVMEDIA_TYPE_SUBTITLE) {
3737  int input_props = 0, output_props = 0;
3738  AVCodecDescriptor const *input_descriptor =
3739  avcodec_descriptor_get(dec->codec_id);
3740  AVCodecDescriptor const *output_descriptor =
3741  avcodec_descriptor_get(ost->enc_ctx->codec_id);
3742  if (input_descriptor)
3743  input_props = input_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3744  if (output_descriptor)
3745  output_props = output_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3746  if (input_props && output_props && input_props != output_props) {
3747  snprintf(error, error_len,
3748  "Subtitle encoding currently only possible from text to text "
3749  "or bitmap to bitmap");
3750  return AVERROR_INVALIDDATA;
3751  }
3752  }
3753 
3754  if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3755  if (ret == AVERROR_EXPERIMENTAL)
3756  abort_codec_experimental(codec, 1);
3757  snprintf(error, error_len,
3758  "Error while opening encoder for output stream #%d:%d - "
3759  "maybe incorrect parameters such as bit_rate, rate, width or height",
3760  ost->file_index, ost->index);
3761  return ret;
3762  }
3763  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3764  !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3765  av_buffersink_set_frame_size(ost->filter->filter,
3766  ost->enc_ctx->frame_size);
3768  if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000 &&
3769  ost->enc_ctx->codec_id != AV_CODEC_ID_CODEC2 /* don't complain about 700 bit/s modes */)
3770  av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3771  " It takes bits/s as argument, not kbits/s\n");
3772 
3773  ret = avcodec_parameters_from_context(ost->st->codecpar, ost->enc_ctx);
3774  if (ret < 0) {
3775  av_log(NULL, AV_LOG_FATAL,
3776  "Error initializing the output stream codec context.\n");
3777  exit_program(1);
3778  }
3779 
3780  if (ost->enc_ctx->nb_coded_side_data) {
3781  int i;
3782 
3783  for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3784  const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3785  uint8_t *dst_data;
3786 
3787  dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3788  if (!dst_data)
3789  return AVERROR(ENOMEM);
3790  memcpy(dst_data, sd_src->data, sd_src->size);
3791  }
3792  }
3793 
3794  /*
3795  * Add global input side data. For now this is naive, and copies it
3796  * from the input stream's global side data. All side data should
3797  * really be funneled over AVFrame and libavfilter, then added back to
3798  * packet side data, and then potentially using the first packet for
3799  * global side data.
3800  */
3801  if (ist) {
3802  int i;
3803  for (i = 0; i < ist->st->nb_side_data; i++) {
3804  AVPacketSideData *sd = &ist->st->side_data[i];
3805  if (sd->type != AV_PKT_DATA_CPB_PROPERTIES) {
3806  uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size);
3807  if (!dst)
3808  return AVERROR(ENOMEM);
3809  memcpy(dst, sd->data, sd->size);
3810  if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3811  av_display_rotation_set((uint32_t *)dst, 0);
3812  }
3813  }
3814  }
3815 
3816  // copy timebase while removing common factors
3817  if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3818  ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3819 
3820  // copy estimated duration as a hint to the muxer
3821  if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
3822  ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3823  } else if (ost->stream_copy) {
3825  if (ret < 0)
3826  return ret;
3827  }
3828 
3829  // parse user provided disposition, and update stream values
3830  if (ost->disposition) {
3831  static const AVOption opts[] = {
3832  { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, (double)INT64_MAX, .unit = "flags" },
3833  { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3834  { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3835  { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3836  { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3837  { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3838  { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3839  { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3840  { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3841  { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3842  { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3843  { "attached_pic" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ATTACHED_PIC }, .unit = "flags" },
3844  { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3845  { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3846  { "dependent" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEPENDENT }, .unit = "flags" },
3847  { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3848  { NULL },
3849  };
3850  static const AVClass class = {
3851  .class_name = "",
3852  .item_name = av_default_item_name,
3853  .option = opts,
3854  .version = LIBAVUTIL_VERSION_INT,
3855  };
3856  const AVClass *pclass = &class;
3857 
3858  ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3859  if (ret < 0)
3860  return ret;
3861  }
3862 
3863  /* initialize bitstream filters for the output stream
3864  * needs to be done here, because the codec id for streamcopy is not
3865  * known until now */
3866  ret = init_output_bsfs(ost);
3867  if (ret < 0)
3868  return ret;
3869 
3870  ost->initialized = 1;
3871 
3873  if (ret < 0)
3874  return ret;
3875 
3876  return ret;
3877 }
3878 
3879 static void report_new_stream(int input_index, AVPacket *pkt)
3880 {
3881  InputFile *file = input_files[input_index];
3882  AVStream *st = file->ctx->streams[pkt->stream_index];
3883 
3884  if (pkt->stream_index < file->nb_streams_warn)
3885  return;
3886  av_log(file->ctx, AV_LOG_WARNING,
3887  "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3888  av_get_media_type_string(st->codecpar->codec_type),
3889  input_index, pkt->stream_index,
3890  pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3891  file->nb_streams_warn = pkt->stream_index + 1;
3892 }
3893 
3894 static int transcode_init(void)
3895 {
3896  int ret = 0, i, j, k;
3897  AVFormatContext *oc;
3898  OutputStream *ost;
3899  InputStream *ist;
3900  char error[1024] = {0};
3901 
3902  for (i = 0; i < nb_filtergraphs; i++) {
3903  FilterGraph *fg = filtergraphs[i];
3904  for (j = 0; j < fg->nb_outputs; j++) {
3905  OutputFilter *ofilter = fg->outputs[j];
3906  if (!ofilter->ost || ofilter->ost->source_index >= 0)
3907  continue;
3908  if (fg->nb_inputs != 1)
3909  continue;
3910  for (k = nb_input_streams-1; k >= 0 ; k--)
3911  if (fg->inputs[0]->ist == input_streams[k])
3912  break;
3913  ofilter->ost->source_index = k;
3914  }
3915  }
3916 
3917  /* init framerate emulation */
3918  for (i = 0; i < nb_input_files; i++) {
3919  InputFile *ifile = input_files[i];
3920  if (ifile->rate_emu)
3921  for (j = 0; j < ifile->nb_streams; j++)
3922  input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3923  }
3924 
3925  /* init input streams */
3926  for (i = 0; i < nb_input_streams; i++)
3927  if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3928  for (i = 0; i < nb_output_streams; i++) {
3929  ost = output_streams[i];
3930  avcodec_close(ost->enc_ctx);
3931  }
3932  goto dump_format;
3933  }
3934 
3935  /*
3936  * initialize stream copy and subtitle/data streams.
3937  * Encoded AVFrame based streams will get initialized as follows:
3938  * - when the first AVFrame is received in do_video_out
3939  * - just before the first AVFrame is received in either transcode_step
3940  * or reap_filters due to us requiring the filter chain buffer sink
3941  * to be configured with the correct audio frame size, which is only
3942  * known after the encoder is initialized.
3943  */
3944  for (i = 0; i < nb_output_streams; i++) {
3945  if (!output_streams[i]->stream_copy &&
3946  (output_streams[i]->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3947  output_streams[i]->enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO))
3948  continue;
3949 
3950  ret = init_output_stream_wrapper(output_streams[i], NULL, 0);
3951  if (ret < 0)
3952  goto dump_format;
3953  }
3954 
3955  /* discard unused programs */
3956  for (i = 0; i < nb_input_files; i++) {
3957  InputFile *ifile = input_files[i];
3958  for (j = 0; j < ifile->ctx->nb_programs; j++) {
3959  AVProgram *p = ifile->ctx->programs[j];
3960  int discard = AVDISCARD_ALL;
3961 
3962  for (k = 0; k < p->nb_stream_indexes; k++)
3963  if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3964  discard = AVDISCARD_DEFAULT;
3965  break;
3966  }
3967  p->discard = discard;
3968  }
3969  }
3970 
3971  /* write headers for files with no streams */
3972  for (i = 0; i < nb_output_files; i++) {
3973  oc = output_files[i]->ctx;
3974  if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3975  ret = check_init_output_file(output_files[i], i);
3976  if (ret < 0)
3977  goto dump_format;
3978  }
3979  }
3980 
3981  dump_format:
3982  /* dump the stream mapping */
3983  av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3984  for (i = 0; i < nb_input_streams; i++) {
3985  ist = input_streams[i];
3986 
3987  for (j = 0; j < ist->nb_filters; j++) {
3988  if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3989  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3990  ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3991  ist->filters[j]->name);
3992  if (nb_filtergraphs > 1)
3993  av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3994  av_log(NULL, AV_LOG_INFO, "\n");
3995  }
3996  }
3997  }
3998 
3999  for (i = 0; i < nb_output_streams; i++) {
4000  ost = output_streams[i];
4001 
4002  if (ost->attachment_filename) {
4003  /* an attached file */
4004  av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
4005  ost->attachment_filename, ost->file_index, ost->index);
4006  continue;
4007  }
4008 
4009  if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
4010  /* output from a complex graph */
4011  av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
4012  if (nb_filtergraphs > 1)
4013  av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
4014 
4015  av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
4016  ost->index, ost->enc ? ost->enc->name : "?");
4017  continue;
4018  }
4019 
4020  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
4022  input_streams[ost->source_index]->st->index,
4023  ost->file_index,
4024  ost->index);
4025  if (ost->sync_ist != input_streams[ost->source_index])
4026  av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
4027  ost->sync_ist->file_index,
4028  ost->sync_ist->st->index);
4029  if (ost->stream_copy)
4030  av_log(NULL, AV_LOG_INFO, " (copy)");
4031  else {
4032  const AVCodec *in_codec = input_streams[ost->source_index]->dec;
4033  const AVCodec *out_codec = ost->enc;
4034  const char *decoder_name = "?";
4035  const char *in_codec_name = "?";
4036  const char *encoder_name = "?";
4037  const char *out_codec_name = "?";
4038  const AVCodecDescriptor *desc;
4039 
4040  if (in_codec) {
4041  decoder_name = in_codec->name;
4042  desc = avcodec_descriptor_get(in_codec->id);
4043  if (desc)
4044  in_codec_name = desc->name;
4045  if (!strcmp(decoder_name, in_codec_name))
4046  decoder_name = "native";
4047  }
4048 
4049  if (out_codec) {
4050  encoder_name = out_codec->name;
4051  desc = avcodec_descriptor_get(out_codec->id);
4052  if (desc)
4053  out_codec_name = desc->name;
4054  if (!strcmp(encoder_name, out_codec_name))
4055  encoder_name = "native";
4056  }
4057 
4058  av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
4059  in_codec_name, decoder_name,
4060  out_codec_name, encoder_name);
4061  }
4062  av_log(NULL, AV_LOG_INFO, "\n");
4063  }
4064 
4065  if (ret) {
4066  av_log(NULL, AV_LOG_ERROR, "%s\n", error);
4067  return ret;
4068  }
4069 
4070  atomic_store(&transcode_init_done, 1);
4071 
4072  return 0;
4073 }
4074 
4075 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
4076 static int need_output(void)
4077 {
4078  int i;
4079 
4080  for (i = 0; i < nb_output_streams; i++) {
4081  OutputStream *ost = output_streams[i];
4082  OutputFile *of = output_files[ost->file_index];
4083  AVFormatContext *os = output_files[ost->file_index]->ctx;
4084 
4085  if (ost->finished ||
4086  (os->pb && avio_tell(os->pb) >= of->limit_filesize))
4087  continue;
4088  if (ost->frame_number >= ost->max_frames) {
4089  int j;
4090  for (j = 0; j < of->ctx->nb_streams; j++)
4092  continue;
4093  }
4094 
4095  return 1;
4096  }
4097 
4098  return 0;
4099 }
4100 
4107 {
4108  int i;
4109  int64_t opts_min = INT64_MAX;
4110  OutputStream *ost_min = NULL;
4111 
4112  for (i = 0; i < nb_output_streams; i++) {
4113  OutputStream *ost = output_streams[i];
4114  int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
4115  av_rescale_q(ost->st->cur_dts, ost->st->time_base,
4116  AV_TIME_BASE_Q);
4117  if (ost->st->cur_dts == AV_NOPTS_VALUE)
4118  av_log(NULL, AV_LOG_DEBUG,
4119  "cur_dts is invalid st:%d (%d) [init:%d i_done:%d finish:%d] (this is harmless if it occurs once at the start per stream)\n",
4120  ost->st->index, ost->st->id, ost->initialized, ost->inputs_done, ost->finished);
4121 
4122  if (!ost->initialized && !ost->inputs_done)
4123  return ost;
4124 
4125  if (!ost->finished && opts < opts_min) {
4126  opts_min = opts;
4127  ost_min = ost->unavailable ? NULL : ost;
4128  }
4129  }
4130  return ost_min;
4131 }
4132 
4133 static void set_tty_echo(int on)
4134 {
4135 #if HAVE_TERMIOS_H
4136  struct termios tty;
4137  if (tcgetattr(0, &tty) == 0) {
4138  if (on) tty.c_lflag |= ECHO;
4139  else tty.c_lflag &= ~ECHO;
4140  tcsetattr(0, TCSANOW, &tty);
4141  }
4142 #endif
4143 }
4144 
4145 static int check_keyboard_interaction(int64_t cur_time)
4146 {
4147  int i, ret, key;
4148  if (received_nb_signals)
4149  return AVERROR_EXIT;
4150  /* read_key() returns 0 on EOF */
4151  if(cur_time - keyboard_last_time >= 100000 && !run_as_daemon){
4152  key = read_key();
4153  keyboard_last_time = cur_time;
4154  }else
4155  key = -1;
4156  if (key == 'q')
4157  return AVERROR_EXIT;
4158  if (key == '+') av_log_set_level(av_log_get_level()+10);
4159  if (key == '-') av_log_set_level(av_log_get_level()-10);
4160  if (key == 's') qp_hist ^= 1;
4161  if (key == 'h'){
4162  if (do_hex_dump){
4163  do_hex_dump = do_pkt_dump = 0;
4164  } else if(do_pkt_dump){
4165  do_hex_dump = 1;
4166  } else
4167  do_pkt_dump = 1;
4168  av_log_set_level(AV_LOG_DEBUG);
4169  }
4170  if (key == 'c' || key == 'C'){
4171  char buf[4096], target[64], command[256], arg[256] = {0};
4172  double time;
4173  int k, n = 0;
4174  fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
4175  i = 0;
4176  set_tty_echo(1);
4177  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
4178  if (k > 0)
4179  buf[i++] = k;
4180  buf[i] = 0;
4181  set_tty_echo(0);
4182  fprintf(stderr, "\n");
4183  if (k > 0 &&
4184  (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
4185  av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
4186  target, time, command, arg);
4187  for (i = 0; i < nb_filtergraphs; i++) {
4188  FilterGraph *fg = filtergraphs[i];
4189  if (fg->graph) {
4190  if (time < 0) {
4191  ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
4192  key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
4193  fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
4194  } else if (key == 'c') {
4195  fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
4196  ret = AVERROR_PATCHWELCOME;
4197  } else {
4198  ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
4199  if (ret < 0)
4200  fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
4201  }
4202  }
4203  }
4204  } else {
4205  av_log(NULL, AV_LOG_ERROR,
4206  "Parse error, at least 3 arguments were expected, "
4207  "only %d given in string '%s'\n", n, buf);
4208  }
4209  }
4210  if (key == 'd' || key == 'D'){
4211  int debug=0;
4212  if(key == 'D') {
4213  debug = input_streams[0]->dec_ctx->debug << 1;
4214  if(!debug) debug = 1;
4215  while (debug & FF_DEBUG_DCT_COEFF) //unsupported, would just crash
4216  debug += debug;
4217  }else{
4218  char buf[32];
4219  int k = 0;
4220  i = 0;
4221  set_tty_echo(1);
4222  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
4223  if (k > 0)
4224  buf[i++] = k;
4225  buf[i] = 0;
4226  set_tty_echo(0);
4227  fprintf(stderr, "\n");
4228  if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
4229  fprintf(stderr,"error parsing debug value\n");
4230  }
4231  for(i=0;i<nb_input_streams;i++) {
4232  input_streams[i]->dec_ctx->debug = debug;
4233  }
4234  for(i=0;i<nb_output_streams;i++) {
4235  OutputStream *ost = output_streams[i];
4236  ost->enc_ctx->debug = debug;
4237  }
4238  if(debug) av_log_set_level(AV_LOG_DEBUG);
4239  fprintf(stderr,"debug=%d\n", debug);
4240  }
4241  if (key == '?'){
4242  fprintf(stderr, "key function\n"
4243  "? show this help\n"
4244  "+ increase verbosity\n"
4245  "- decrease verbosity\n"
4246  "c Send command to first matching filter supporting it\n"
4247  "C Send/Queue command to all matching filters\n"
4248  "D cycle through available debug modes\n"
4249  "h dump packets/hex press to cycle through the 3 states\n"
4250  "q quit\n"
4251  "s Show QP histogram\n"
4252  );
4253  }
4254  return 0;
4255 }
4256 
4257 #if HAVE_THREADS
4258 static void *input_thread(void *arg)
4259 {
4260  InputFile *f = arg;
4261  unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
4262  int ret = 0;
4263 
4264  while (1) {
4265  AVPacket pkt;
4266  ret = av_read_frame(f->ctx, &pkt);
4267 
4268  if (ret == AVERROR(EAGAIN)) {
4269  av_usleep(10000);
4270  continue;
4271  }
4272  if (ret < 0) {
4273  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4274  break;
4275  }
4276  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
4277  if (flags && ret == AVERROR(EAGAIN)) {
4278  flags = 0;
4279  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
4280  av_log(f->ctx, AV_LOG_WARNING,
4281  "Thread message queue blocking; consider raising the "
4282  "thread_queue_size option (current value: %d)\n",
4283  f->thread_queue_size);
4284  }
4285  if (ret < 0) {
4286  if (ret != AVERROR_EOF)
4287  av_log(f->ctx, AV_LOG_ERROR,
4288  "Unable to send packet to main thread: %s\n",
4289  av_err2str(ret));
4290  av_packet_unref(&pkt);
4291  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4292  break;
4293  }
4294  }
4295 
4296  return NULL;
4297 }
4298 
4299 static void free_input_thread(int i)
4300 {
4301  InputFile *f = input_files[i];
4302  AVPacket pkt;
4303 
4304  if (!f || !f->in_thread_queue)
4305  return;
4306  av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
4307  while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
4308  av_packet_unref(&pkt);
4309 
4310  pthread_join(f->thread, NULL);
4311  f->joined = 1;
4312  av_thread_message_queue_free(&f->in_thread_queue);
4313 }
4314 
4315 static void free_input_threads(void)
4316 {
4317  int i;
4318 
4319  for (i = 0; i < nb_input_files; i++)
4320  free_input_thread(i);
4321 }
4322 
4323 static int init_input_thread(int i)
4324 {
4325  int ret;
4326  InputFile *f = input_files[i];
4327 
4328  if (f->thread_queue_size < 0)
4329  f->thread_queue_size = (nb_input_files > 1 ? 8 : 0);
4330  if (!f->thread_queue_size)
4331  return 0;
4332 
4333  if (f->ctx->pb ? !f->ctx->pb->seekable :
4334  strcmp(f->ctx->iformat->name, "lavfi"))
4335  f->non_blocking = 1;
4336  ret = av_thread_message_queue_alloc(&f->in_thread_queue,
4337  f->thread_queue_size, sizeof(AVPacket));
4338  if (ret < 0)
4339  return ret;
4340 
4341  if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
4342  av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
4343  av_thread_message_queue_free(&f->in_thread_queue);
4344  return AVERROR(ret);
4345  }
4346 
4347  return 0;
4348 }
4349 
4350 static int init_input_threads(void)
4351 {
4352  int i, ret;
4353 
4354  for (i = 0; i < nb_input_files; i++) {
4355  ret = init_input_thread(i);
4356  if (ret < 0)
4357  return ret;
4358  }
4359  return 0;
4360 }
4361 
4362 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
4363 {
4364  return av_thread_message_queue_recv(f->in_thread_queue, pkt,
4365  f->non_blocking ?
4366  AV_THREAD_MESSAGE_NONBLOCK : 0);
4367 }
4368 #endif
4369 
4370 static int get_input_packet(InputFile *f, AVPacket *pkt)
4371 {
4372  if (f->rate_emu) {
4373  int i;
4374  for (i = 0; i < f->nb_streams; i++) {
4375  InputStream *ist = input_streams[f->ist_index + i];
4376  int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
4377  int64_t now = av_gettime_relative() - ist->start;
4378  if (pts > now)
4379  return AVERROR(EAGAIN);
4380  }
4381  }
4382 
4383 #if HAVE_THREADS
4384  if (f->thread_queue_size)
4385  return get_input_packet_mt(f, pkt);
4386 #endif
4387  return av_read_frame(f->ctx, pkt);
4388 }
4389 
4390 static int got_eagain(void)
4391 {
4392  int i;
4393  for (i = 0; i < nb_output_streams; i++)
4394  if (output_streams[i]->unavailable)
4395  return 1;
4396  return 0;
4397 }
4398 
4399 static void reset_eagain(void)
4400 {
4401  int i;
4402  for (i = 0; i < nb_input_files; i++)
4403  input_files[i]->eagain = 0;
4404  for (i = 0; i < nb_output_streams; i++)
4405  output_streams[i]->unavailable = 0;
4406 }
4407 
4408 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
4409 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
4410  AVRational time_base)
4411 {
4412  int ret;
4413 
4414  if (!*duration) {
4415  *duration = tmp;
4416  return tmp_time_base;
4417  }
4418 
4419  ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
4420  if (ret < 0) {
4421  *duration = tmp;
4422  return tmp_time_base;
4423  }
4424 
4425  return time_base;
4426 }
4427 
4428 static int seek_to_start(InputFile *ifile, AVFormatContext *is)
4429 {
4430  InputStream *ist;
4431  AVCodecContext *avctx;
4432  int i, ret, has_audio = 0;
4433  int64_t duration = 0;
4434 
4435  ret = avformat_seek_file(is, -1, INT64_MIN, is->start_time, is->start_time, 0);
4436  if (ret < 0)
4437  return ret;
4438 
4439  for (i = 0; i < ifile->nb_streams; i++) {
4440  ist = input_streams[ifile->ist_index + i];
4441  avctx = ist->dec_ctx;
4442 
4443  /* duration is the length of the last frame in a stream
4444  * when audio stream is present we don't care about
4445  * last video frame length because it's not defined exactly */
4446  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
4447  has_audio = 1;
4448  }
4449 
4450  for (i = 0; i < ifile->nb_streams; i++) {
4451  ist = input_streams[ifile->ist_index + i];
4452  avctx = ist->dec_ctx;
4453 
4454  if (has_audio) {
4455  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
4456  AVRational sample_rate = {1, avctx->sample_rate};
4457 
4458  duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
4459  } else {
4460  continue;
4461  }
4462  } else {
4463  if (ist->framerate.num) {
4464  duration = av_rescale_q(1, av_inv_q(ist->framerate), ist->st->time_base);
4465  } else if (ist->st->avg_frame_rate.num) {
4466  duration = av_rescale_q(1, av_inv_q(ist->st->avg_frame_rate), ist->st->time_base);
4467  } else {
4468  duration = 1;
4469  }
4470  }
4471  if (!ifile->duration)
4472  ifile->time_base = ist->st->time_base;
4473  /* the total duration of the stream, max_pts - min_pts is
4474  * the duration of the stream without the last frame */
4475  if (ist->max_pts > ist->min_pts && ist->max_pts - (uint64_t)ist->min_pts < INT64_MAX - duration)
4476  duration += ist->max_pts - ist->min_pts;
4477  ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
4478  ifile->time_base);
4479  }
4480 
4481  if (ifile->loop > 0)
4482  ifile->loop--;
4483 
4484  return ret;
4485 }
4486 
4487 /*
4488  * Return
4489  * - 0 -- one packet was read and processed
4490  * - AVERROR(EAGAIN) -- no packets were available for selected file,
4491  * this function should be called again
4492  * - AVERROR_EOF -- this function should not be called again
4493  */
4494 static int process_input(int file_index)
4495 {
4496  InputFile *ifile = input_files[file_index];
4497  AVFormatContext *is;
4498  InputStream *ist;
4499  AVPacket pkt;
4500  int ret, thread_ret, i, j;
4501  int64_t duration;
4502  int64_t pkt_dts;
4503  int disable_discontinuity_correction = copy_ts;
4504 
4505  is = ifile->ctx;
4506  ret = get_input_packet(ifile, &pkt);
4507 
4508  if (ret == AVERROR(EAGAIN)) {
4509  ifile->eagain = 1;
4510  return ret;
4511  }
4512  if (ret < 0 && ifile->loop) {
4513  AVCodecContext *avctx;
4514  for (i = 0; i < ifile->nb_streams; i++) {
4515  ist = input_streams[ifile->ist_index + i];
4516  avctx = ist->dec_ctx;
4517  if (ist->decoding_needed) {
4518  ret = process_input_packet(ist, NULL, 1);
4519  if (ret>0)
4520  return 0;
4521  avcodec_flush_buffers(avctx);
4522  }
4523  }
4524 #if HAVE_THREADS
4525  free_input_thread(file_index);
4526 #endif
4527  ret = seek_to_start(ifile, is);
4528 #if HAVE_THREADS
4529  thread_ret = init_input_thread(file_index);
4530  if (thread_ret < 0)
4531  return thread_ret;
4532 #endif
4533  if (ret < 0)
4534  av_log(NULL, AV_LOG_WARNING, "Seek to start failed.\n");
4535  else
4536  ret = get_input_packet(ifile, &pkt);
4537  if (ret == AVERROR(EAGAIN)) {
4538  ifile->eagain = 1;
4539  return ret;
4540  }
4541  }
4542  if (ret < 0) {
4543  if (ret != AVERROR_EOF) {
4544  print_error(is->url, ret);
4545  if (exit_on_error)
4546  exit_program(1);
4547  }
4548 
4549  for (i = 0; i < ifile->nb_streams; i++) {
4550  ist = input_streams[ifile->ist_index + i];
4551  if (ist->decoding_needed) {
4552  ret = process_input_packet(ist, NULL, 0);
4553  if (ret>0)
4554  return 0;
4555  }
4556 
4557  /* mark all outputs that don't go through lavfi as finished */
4558  for (j = 0; j < nb_output_streams; j++) {
4559  OutputStream *ost = output_streams[j];
4560 
4561  if (ost->source_index == ifile->ist_index + i &&
4562  (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
4563  finish_output_stream(ost);
4564  }
4565  }
4566 
4567  ifile->eof_reached = 1;
4568  return AVERROR(EAGAIN);
4569  }
4570 
4571  reset_eagain();
4572 
4573  if (do_pkt_dump) {
4574  av_pkt_dump_log2(NULL, AV_LOG_INFO, &pkt, do_hex_dump,
4575  is->streams[pkt.stream_index]);
4576  }
4577  /* the following test is needed in case new streams appear
4578  dynamically in stream : we ignore them */
4579  if (pkt.stream_index >= ifile->nb_streams) {
4580  report_new_stream(file_index, &pkt);
4581  goto discard_packet;
4582  }
4583 
4584  ist = input_streams[ifile->ist_index + pkt.stream_index];
4585 
4586  ist->data_size += pkt.size;
4587  ist->nb_packets++;
4588 
4589  if (ist->discard)
4590  goto discard_packet;
4591 
4592  if (pkt.flags & AV_PKT_FLAG_CORRUPT) {
4593  av_log(NULL, exit_on_error ? AV_LOG_FATAL : AV_LOG_WARNING,
4594  "%s: corrupt input packet in stream %d\n", is->url, pkt.stream_index);
4595  if (exit_on_error)
4596  exit_program(1);
4597  }
4598 
4599  if (debug_ts) {
4600  av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4601  "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4602  ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4603  av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
4604  av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
4605  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4606  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4607  av_ts2str(input_files[ist->file_index]->ts_offset),
4608  av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4609  }
4610 
4611  if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4612  int64_t stime, stime2;
4613  // Correcting starttime based on the enabled streams
4614  // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4615  // so we instead do it here as part of discontinuity handling
4616  if ( ist->next_dts == AV_NOPTS_VALUE
4617  && ifile->ts_offset == -is->start_time
4618  && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4619  int64_t new_start_time = INT64_MAX;
4620  for (i=0; i<is->nb_streams; i++) {
4621  AVStream *st = is->streams[i];
4622  if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4623  continue;
4624  new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4625  }
4626  if (new_start_time > is->start_time) {
4627  av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4628  ifile->ts_offset = -new_start_time;
4629  }
4630  }
4631 
4632  stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4633  stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4634  ist->wrap_correction_done = 1;
4635 
4636  if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4637  pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
4638  ist->wrap_correction_done = 0;
4639  }
4640  if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4641  pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
4642  ist->wrap_correction_done = 0;
4643  }
4644  }
4645 
4646  /* add the stream-global side data to the first packet */
4647  if (ist->nb_packets == 1) {
4648  for (i = 0; i < ist->st->nb_side_data; i++) {
4649  AVPacketSideData *src_sd = &ist->st->side_data[i];
4650  uint8_t *dst_data;
4651 
4652  if (src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4653  continue;
4654 
4655  if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
4656  continue;
4657 
4658  dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
4659  if (!dst_data)
4660  exit_program(1);
4661 
4662  memcpy(dst_data, src_sd->data, src_sd->size);
4663  }
4664  }
4665 
4666  if (pkt.dts != AV_NOPTS_VALUE)
4667  pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4668  if (pkt.pts != AV_NOPTS_VALUE)
4669  pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4670 
4671  if (pkt.pts != AV_NOPTS_VALUE)
4672  pkt.pts *= ist->ts_scale;
4673  if (pkt.dts != AV_NOPTS_VALUE)
4674  pkt.dts *= ist->ts_scale;
4675 
4676  pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4677  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4678  ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4679  pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4680  && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4681  int64_t delta = pkt_dts - ifile->last_ts;
4682  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4683  delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
4684  ifile->ts_offset -= delta;
4685  av_log(NULL, AV_LOG_DEBUG,
4686  "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4687  delta, ifile->ts_offset);
4688  pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4689  if (pkt.pts != AV_NOPTS_VALUE)
4690  pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4691  }
4692  }
4693 
4694  duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4695  if (pkt.pts != AV_NOPTS_VALUE) {
4696  pkt.pts += duration;
4697  ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
4698  ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
4699  }
4700 
4701  if (pkt.dts != AV_NOPTS_VALUE)
4702  pkt.dts += duration;
4703 
4704  pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4705 
4706  if (copy_ts && pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4707  (is->iformat->flags & AVFMT_TS_DISCONT) && ist->st->pts_wrap_bits < 60) {
4708  int64_t wrap_dts = av_rescale_q_rnd(pkt.dts + (1LL<<ist->st->pts_wrap_bits),
4709  ist->st->time_base, AV_TIME_BASE_Q,
4710  AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4711  if (FFABS(wrap_dts - ist->next_dts) < FFABS(pkt_dts - ist->next_dts)/10)
4712  disable_discontinuity_correction = 0;
4713  }
4714 
4715  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4716  ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4717  pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4718  !disable_discontinuity_correction) {
4719  int64_t delta = pkt_dts - ist->next_dts;
4720  if (is->iformat->flags & AVFMT_TS_DISCONT) {
4721  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4722  delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
4723  pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4724  ifile->ts_offset -= delta;
4725  av_log(NULL, AV_LOG_DEBUG,
4726  "timestamp discontinuity for stream #%d:%d "
4727  "(id=%d, type=%s): %"PRId64", new offset= %"PRId64"\n",
4728  ist->file_index, ist->st->index, ist->st->id,
4729  av_get_media_type_string(ist->dec_ctx->codec_type),
4730  delta, ifile->ts_offset);
4731  pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4732  if (pkt.pts != AV_NOPTS_VALUE)
4733  pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4734  }
4735  } else {
4736  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4737  delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4738  av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
4739  pkt.dts = AV_NOPTS_VALUE;
4740  }
4741  if (pkt.pts != AV_NOPTS_VALUE){
4742  int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
4743  delta = pkt_pts - ist->next_dts;
4744  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4745  delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4746  av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
4747  pkt.pts = AV_NOPTS_VALUE;
4748  }
4749  }
4750  }
4751  }
4752 
4753  if (pkt.dts != AV_NOPTS_VALUE)
4754  ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
4755 
4756  if (debug_ts) {
4757  av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4758  ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4759  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4760  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4761  av_ts2str(input_files[ist->file_index]->ts_offset),
4762  av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4763  }
4764 
4765  sub2video_heartbeat(ist, pkt.pts);
4766 
4767  process_input_packet(ist, &pkt, 0);
4768 
4769 discard_packet:
4770  av_packet_unref(&pkt);
4771 
4772  return 0;
4773 }
4774 
4782 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4783 {
4784  int i, ret;
4785  int nb_requests, nb_requests_max = 0;
4786  InputFilter *ifilter;
4787  InputStream *ist;
4788 
4789  *best_ist = NULL;
4790  ret = avfilter_graph_request_oldest(graph->graph);
4791  if (ret >= 0)
4792  return reap_filters(0);
4793 
4794  if (ret == AVERROR_EOF) {
4795  ret = reap_filters(1);
4796  for (i = 0; i < graph->nb_outputs; i++)
4797  close_output_stream(graph->outputs[i]->ost);
4798  return ret;
4799  }
4800  if (ret != AVERROR(EAGAIN))
4801  return ret;
4802 
4803  for (i = 0; i < graph->nb_inputs; i++) {
4804  ifilter = graph->inputs[i];
4805  ist = ifilter->ist;
4806  if (input_files[ist->file_index]->eagain ||
4808  continue;
4809  nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4810  if (nb_requests > nb_requests_max) {
4811  nb_requests_max = nb_requests;
4812  *best_ist = ist;
4813  }
4814  }
4815 
4816  if (!*best_ist)
4817  for (i = 0; i < graph->nb_outputs; i++)
4818  graph->outputs[i]->ost->unavailable = 1;
4819 
4820  return 0;
4821 }
4822 
4828 static int transcode_step(void)
4829 {
4830  OutputStream *ost;
4831  InputStream *ist = NULL;
4832  int ret;
4833 
4834  ost = choose_output();
4835  if (!ost) {
4836  if (got_eagain()) {
4837  reset_eagain();
4838  av_usleep(10000);
4839  return 0;
4840  }
4841  av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4842  return AVERROR_EOF;
4843  }
4844 
4845  if (ost->filter && !ost->filter->graph->graph) {
4847  ret = configure_filtergraph(ost->filter->graph);
4848  if (ret < 0) {
4849  av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
4850  return ret;
4851  }
4852  }
4853  }
4854 
4855  if (ost->filter && ost->filter->graph->graph) {
4856  /*
4857  * Similar case to the early audio initialization in reap_filters.
4858  * Audio is special in ffmpeg.c currently as we depend on lavfi's
4859  * audio frame buffering/creation to get the output audio frame size
4860  * in samples correct. The audio frame size for the filter chain is
4861  * configured during the output stream initialization.
4862  *
4863  * Apparently avfilter_graph_request_oldest (called in
4864  * transcode_from_filter just down the line) peeks. Peeking already
4865  * puts one frame "ready to be given out", which means that any
4866  * update in filter buffer sink configuration afterwards will not
4867  * help us. And yes, even if it would be utilized,
4868  * av_buffersink_get_samples is affected, as it internally utilizes
4869  * the same early exit for peeked frames.
4870  *
4871  * In other words, if avfilter_graph_request_oldest would not make
4872  * further filter chain configuration or usage of
4873  * av_buffersink_get_samples useless (by just causing the return
4874  * of the peeked AVFrame as-is), we could get rid of this additional
4875  * early encoder initialization.
4876  */
4877  if (av_buffersink_get_type(ost->filter->filter) == AVMEDIA_TYPE_AUDIO)
4878  init_output_stream_wrapper(ost, NULL, 1);
4879 
4880  if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4881  return ret;
4882  if (!ist)
4883  return 0;
4884  } else if (ost->filter) {
4885  int i;
4886  for (i = 0; i < ost->filter->graph->nb_inputs; i++) {
4887  InputFilter *ifilter = ost->filter->graph->inputs[i];
4888  if (!ifilter->ist->got_output && !input_files[ifilter->ist->file_index]->eof_reached) {
4889  ist = ifilter->ist;
4890  break;
4891  }
4892  }
4893  if (!ist) {
4894  ost->inputs_done = 1;
4895  return 0;
4896  }
4897  } else {
4898  av_assert0(ost->source_index >= 0);
4899  ist = input_streams[ost->source_index];
4900  }
4901 
4902  ret = process_input(ist->file_index);
4903  if (ret == AVERROR(EAGAIN)) {
4904  if (input_files[ist->file_index]->eagain)
4905  ost->unavailable = 1;
4906  return 0;
4907  }
4908 
4909  if (ret < 0)
4910  return ret == AVERROR_EOF ? 0 : ret;
4911 
4912  return reap_filters(0);
4913 }
4914 
4915 /*
4916  * The following code is the main loop of the file converter
4917  */
4918 static int transcode(void)
4919 {
4920  int ret, i;
4921  AVFormatContext *os;
4922  OutputStream *ost;
4923  InputStream *ist;
4924  int64_t timer_start;
4925  int64_t total_packets_written = 0;
4926 
4927  ret = transcode_init();
4928  if (ret < 0)
4929  goto fail;
4930 
4931  if (stdin_interaction) {
4932  av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4933  }
4934 
4935  timer_start = av_gettime_relative();
4936 
4937 #if HAVE_THREADS
4938  if ((ret = init_input_threads()) < 0)
4939  goto fail;
4940 #endif
4941 
4943  int64_t cur_time= av_gettime_relative();
4944 
4945  /* if 'q' pressed, exits */
4946  if (stdin_interaction)
4947  if (check_keyboard_interaction(cur_time) < 0)
4948  break;
4949 
4950  /* check if there's any stream where output is still needed */
4951  if (!need_output()) {
4952  av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4953  break;
4954  }
4955 
4956  ret = transcode_step();
4957  if (ret < 0 && ret != AVERROR_EOF) {
4958  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
4959  break;
4960  }
4961 
4962  /* dump report by using the output first video and audio streams */
4963  print_report(0, timer_start, cur_time);
4964  }
4965 #if HAVE_THREADS
4966  free_input_threads();
4967 #endif
4968 
4969  /* at the end of stream, we must flush the decoder buffers */
4970  for (i = 0; i < nb_input_streams; i++) {
4971  ist = input_streams[i];
4972  if (!input_files[ist->file_index]->eof_reached) {
4973  process_input_packet(ist, NULL, 0);
4974  }
4975  }
4976  flush_encoders();
4977 
4978  term_exit();
4979 
4980  /* write the trailer if needed and close file */
4981  for (i = 0; i < nb_output_files; i++) {
4982  os = output_files[i]->ctx;
4983  if (!output_files[i]->header_written) {
4984  av_log(NULL, AV_LOG_ERROR,
4985  "Nothing was written into output file %d (%s), because "
4986  "at least one of its streams received no packets.\n",
4987  i, os->url);
4988  continue;
4989  }
4990  if ((ret = av_write_trailer(os)) < 0) {
4991  av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s\n", os->url, av_err2str(ret));
4992  if (exit_on_error)
4993  exit_program(1);
4994  }
4995  }
4996 
4997  /* dump report by using the first video and audio streams */
4998  print_report(1, timer_start, av_gettime_relative());
4999 
5000  /* close each encoder */
5001  for (i = 0; i < nb_output_streams; i++) {
5002  ost = output_streams[i];
5003  if (ost->encoding_needed) {
5004  av_freep(&ost->enc_ctx->stats_in);
5005  }
5006  total_packets_written += ost->packets_written;
5008  av_log(NULL, AV_LOG_FATAL, "Empty output on stream %d.\n", i);
5009  exit_program(1);
5010  }
5011  }
5012 
5013  if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
5014  av_log(NULL, AV_LOG_FATAL, "Empty output\n");
5015  exit_program(1);
5016  }
5017 
5018  /* close each decoder */
5019  for (i = 0; i < nb_input_streams; i++) {
5020  ist = input_streams[i];
5021  if (ist->decoding_needed) {
5022  avcodec_close(ist->dec_ctx);
5023  if (ist->hwaccel_uninit)
5024  ist->hwaccel_uninit(ist->dec_ctx);
5025  }
5026  }
5027 
5029 
5030  /* finished ! */
5031  ret = 0;
5032 
5033  fail:
5034 #if HAVE_THREADS
5035  free_input_threads();
5036 #endif
5037 
5038  if (output_streams) {
5039  for (i = 0; i < nb_output_streams; i++) {
5040  ost = output_streams[i];
5041  if (ost) {
5042  if (ost->logfile) {
5043  if (fclose(ost->logfile))
5044  av_log(NULL, AV_LOG_ERROR,
5045  "Error closing logfile, loss of information possible: %s\n",
5046  av_err2str(AVERROR(errno)));
5047  ost->logfile = NULL;
5048  }
5049  av_freep(&ost->forced_kf_pts);
5050  av_freep(&ost->apad);
5051  av_freep(&ost->disposition);
5052  av_dict_free(&ost->encoder_opts);
5053  av_dict_free(&ost->sws_dict);
5054  av_dict_free(&ost->swr_opts);
5055  av_dict_free(&ost->resample_opts);
5056  }
5057  }
5058  }
5059  return ret;
5060 }
5061 
5063 {
5064  BenchmarkTimeStamps time_stamps = { av_gettime_relative() };
5065 #if HAVE_GETRUSAGE
5066  struct rusage rusage;
5067 
5068  getrusage(RUSAGE_SELF, &rusage);
5069  time_stamps.user_usec =
5070  (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
5071  time_stamps.sys_usec =
5072  (rusage.ru_stime.tv_sec * 1000000LL) + rusage.ru_stime.tv_usec;
5073 #elif HAVE_GETPROCESSTIMES
5074  HANDLE proc;
5075  FILETIME c, e, k, u;
5076  proc = GetCurrentProcess();
5077  GetProcessTimes(proc, &c, &e, &k, &u);
5078  time_stamps.user_usec =
5079  ((int64_t)u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
5080  time_stamps.sys_usec =
5081  ((int64_t)k.dwHighDateTime << 32 | k.dwLowDateTime) / 10;
5082 #else
5083  time_stamps.user_usec = time_stamps.sys_usec = 0;
5084 #endif
5085  return time_stamps;
5086 }
5087 
5088 static int64_t getmaxrss(void)
5089 {
5090 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
5091  struct rusage rusage;
5092  getrusage(RUSAGE_SELF, &rusage);
5093  return (int64_t)rusage.ru_maxrss * 1024;
5094 #elif HAVE_GETPROCESSMEMORYINFO
5095  HANDLE proc;
5096  PROCESS_MEMORY_COUNTERS memcounters;
5097  proc = GetCurrentProcess();
5098  memcounters.cb = sizeof(memcounters);
5099  GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
5100  return memcounters.PeakPagefileUsage;
5101 #else
5102  return 0;
5103 #endif
5104 }
5105 
5106 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
5107 {
5108 }
5109 
5112  longjmp_value = 0;
5113  received_sigterm = 0;
5114  received_nb_signals = 0;
5115  ffmpeg_exited = 0;
5116  copy_ts_first_pts = AV_NOPTS_VALUE;
5117 
5118  run_as_daemon = 0;
5119  nb_frames_dup = 0;
5120  dup_warning = 1000;
5121  nb_frames_drop = 0;
5122  nb_output_dumped = 0;
5123 
5124  want_sdp = 1;
5125 
5126  progress_avio = NULL;
5127 
5128  input_streams = NULL;
5129  nb_input_streams = 0;
5130  input_files = NULL;
5131  nb_input_files = 0;
5132 
5133  output_streams = NULL;
5134  nb_output_streams = 0;
5135  output_files = NULL;
5136  nb_output_files = 0;
5137 
5138  filtergraphs = NULL;
5139  nb_filtergraphs = 0;
5140 
5141  last_time = -1;
5142  keyboard_last_time = 0;
5143  first_report = 1;
5144 }
5145 
5146 void set_report_callback(void (*callback)(int, float, float, int64_t, int, double, double))
5147 {
5148  report_callback = callback;
5149 }
5150 
5151 void cancel_operation(long id)
5152 {
5153  if (id == 0) {
5154  sigterm_handler(SIGINT);
5155  } else {
5156  cancelSession(id);
5157  }
5158 }
5159 
5160 __thread OptionDef *ffmpeg_options = NULL;
5161 
5162 int ffmpeg_execute(int argc, char **argv)
5163 {
5164  char _program_name[] = "ffmpeg";
5165  program_name = (char*)&_program_name;
5166  program_birth_year = 2000;
5167 
5168  #define OFFSET(x) offsetof(OptionsContext, x)
5169  OptionDef options[] = {
5170 
5171  /* main options */
5172  { "L", OPT_EXIT, { .func_arg = show_license }, "show license" },
5173  { "h", OPT_EXIT, { .func_arg = show_help }, "show help", "topic" },
5174  { "?", OPT_EXIT, { .func_arg = show_help }, "show help", "topic" },
5175  { "help", OPT_EXIT, { .func_arg = show_help }, "show help", "topic" },
5176  { "-help", OPT_EXIT, { .func_arg = show_help }, "show help", "topic" },
5177  { "version", OPT_EXIT, { .func_arg = show_version }, "show version" },
5178  { "buildconf", OPT_EXIT, { .func_arg = show_buildconf }, "show build configuration" },
5179  { "formats", OPT_EXIT, { .func_arg = show_formats }, "show available formats" },
5180  { "muxers", OPT_EXIT, { .func_arg = show_muxers }, "show available muxers" },
5181  { "demuxers", OPT_EXIT, { .func_arg = show_demuxers }, "show available demuxers" },
5182  { "devices", OPT_EXIT, { .func_arg = show_devices }, "show available devices" },
5183  { "codecs", OPT_EXIT, { .func_arg = show_codecs }, "show available codecs" },
5184  { "decoders", OPT_EXIT, { .func_arg = show_decoders }, "show available decoders" },
5185  { "encoders", OPT_EXIT, { .func_arg = show_encoders }, "show available encoders" },
5186  { "bsfs", OPT_EXIT, { .func_arg = show_bsfs }, "show available bit stream filters" },
5187  { "protocols", OPT_EXIT, { .func_arg = show_protocols }, "show available protocols" },
5188  { "filters", OPT_EXIT, { .func_arg = show_filters }, "show available filters" },
5189  { "pix_fmts", OPT_EXIT, { .func_arg = show_pix_fmts }, "show available pixel formats" },
5190  { "layouts", OPT_EXIT, { .func_arg = show_layouts }, "show standard channel layouts" },
5191  { "sample_fmts", OPT_EXIT, { .func_arg = show_sample_fmts }, "show available audio sample formats" },
5192  { "colors", OPT_EXIT, { .func_arg = show_colors }, "show available color names" },
5193  { "loglevel", HAS_ARG, { .func_arg = opt_loglevel }, "set logging level", "loglevel" },
5194  { "v", HAS_ARG, { .func_arg = opt_loglevel }, "set logging level", "loglevel" },
5195  { "report", 0, { .func_arg = opt_report }, "generate a report" },
5196  { "max_alloc", HAS_ARG, { .func_arg = opt_max_alloc }, "set maximum size of a single allocated block", "bytes" },
5197  { "cpuflags", HAS_ARG | OPT_EXPERT, { .func_arg = opt_cpuflags }, "force specific cpu flags", "flags" },
5198  { "hide_banner", OPT_BOOL | OPT_EXPERT, {&hide_banner}, "do not show program banner", "hide_banner" },
5199 
5200  #if CONFIG_AVDEVICE
5201  { "sources" , OPT_EXIT | HAS_ARG, { .func_arg = show_sources },
5202  "list sources of the input device", "device" },
5203  { "sinks" , OPT_EXIT | HAS_ARG, { .func_arg = show_sinks },
5204  "list sinks of the output device", "device" },
5205  #endif
5206 
5207  { "f", HAS_ARG | OPT_STRING | OPT_OFFSET |
5208  OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(format) },
5209  "force format", "fmt" },
5210  { "y", OPT_BOOL, { &file_overwrite },
5211  "overwrite output files" },
5212  { "n", OPT_BOOL, { &no_file_overwrite },
5213  "never overwrite output files" },
5214  { "ignore_unknown", OPT_BOOL, { &ignore_unknown_streams },
5215  "Ignore unknown stream types" },
5216  { "copy_unknown", OPT_BOOL | OPT_EXPERT, { &copy_unknown_streams },
5217  "Copy unknown stream types" },
5218  { "c", HAS_ARG | OPT_STRING | OPT_SPEC |
5219  OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(codec_names) },
5220  "codec name", "codec" },
5221  { "codec", HAS_ARG | OPT_STRING | OPT_SPEC |
5222  OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(codec_names) },
5223  "codec name", "codec" },
5224  { "pre", HAS_ARG | OPT_STRING | OPT_SPEC |
5225  OPT_OUTPUT, { .off = OFFSET(presets) },
5226  "preset name", "preset" },
5227  { "map", HAS_ARG | OPT_EXPERT | OPT_PERFILE |
5228  OPT_OUTPUT, { .func_arg = opt_map },
5229  "set input stream mapping",
5230  "[-]input_file_id[:stream_specifier][,sync_file_id[:stream_specifier]]" },
5231  { "map_channel", HAS_ARG | OPT_EXPERT | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_map_channel },
5232  "map an audio channel from one stream to another", "file.stream.channel[:syncfile.syncstream]" },
5233  { "map_metadata", HAS_ARG | OPT_STRING | OPT_SPEC |
5234  OPT_OUTPUT, { .off = OFFSET(metadata_map) },
5235  "set metadata information of outfile from infile",
5236  "outfile[,metadata]:infile[,metadata]" },
5237  { "map_chapters", HAS_ARG | OPT_INT | OPT_EXPERT | OPT_OFFSET |
5238  OPT_OUTPUT, { .off = OFFSET(chapters_input_file) },
5239  "set chapters mapping", "input_file_index" },
5240  { "t", HAS_ARG | OPT_TIME | OPT_OFFSET |
5241  OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(recording_time) },
5242  "record or transcode \"duration\" seconds of audio/video",
5243  "duration" },
5244  { "to", HAS_ARG | OPT_TIME | OPT_OFFSET | OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(stop_time) },
5245  "record or transcode stop time", "time_stop" },
5246  { "fs", HAS_ARG | OPT_INT64 | OPT_OFFSET | OPT_OUTPUT, { .off = OFFSET(limit_filesize) },
5247  "set the limit file size in bytes", "limit_size" },
5248  { "ss", HAS_ARG | OPT_TIME | OPT_OFFSET |
5249  OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(start_time) },
5250  "set the start time offset", "time_off" },
5251  { "sseof", HAS_ARG | OPT_TIME | OPT_OFFSET |
5252  OPT_INPUT, { .off = OFFSET(start_time_eof) },
5253  "set the start time offset relative to EOF", "time_off" },
5254  { "seek_timestamp", HAS_ARG | OPT_INT | OPT_OFFSET |
5255  OPT_INPUT, { .off = OFFSET(seek_timestamp) },
5256  "enable/disable seeking by timestamp with -ss" },
5257  { "accurate_seek", OPT_BOOL | OPT_OFFSET | OPT_EXPERT |
5258  OPT_INPUT, { .off = OFFSET(accurate_seek) },
5259  "enable/disable accurate seeking with -ss" },
5260  { "itsoffset", HAS_ARG | OPT_TIME | OPT_OFFSET |
5261  OPT_EXPERT | OPT_INPUT, { .off = OFFSET(input_ts_offset) },
5262  "set the input ts offset", "time_off" },
5263  { "itsscale", HAS_ARG | OPT_DOUBLE | OPT_SPEC |
5264  OPT_EXPERT | OPT_INPUT, { .off = OFFSET(ts_scale) },
5265  "set the input ts scale", "scale" },
5266  { "timestamp", HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_recording_timestamp },
5267  "set the recording timestamp ('now' to set the current time)", "time" },
5268  { "metadata", HAS_ARG | OPT_STRING | OPT_SPEC | OPT_OUTPUT, { .off = OFFSET(metadata) },
5269  "add metadata", "string=string" },
5270  { "program", HAS_ARG | OPT_STRING | OPT_SPEC | OPT_OUTPUT, { .off = OFFSET(program) },
5271  "add program with specified streams", "title=string:st=number..." },
5272  { "dframes", HAS_ARG | OPT_PERFILE | OPT_EXPERT |
5273  OPT_OUTPUT, { .func_arg = opt_data_frames },
5274  "set the number of data frames to output", "number" },
5275  { "benchmark", OPT_BOOL | OPT_EXPERT, { &do_benchmark },
5276  "add timings for benchmarking" },
5277  { "benchmark_all", OPT_BOOL | OPT_EXPERT, { &do_benchmark_all },
5278  "add timings for each task" },
5279  { "progress", HAS_ARG | OPT_EXPERT, { .func_arg = opt_progress },
5280  "write program-readable progress information", "url" },
5281  { "stdin", OPT_BOOL | OPT_EXPERT, { &stdin_interaction },
5282  "enable or disable interaction on standard input" },
5283  { "timelimit", HAS_ARG | OPT_EXPERT, { .func_arg = opt_timelimit },
5284  "set max runtime in seconds in CPU user time", "limit" },
5285  { "dump", OPT_BOOL | OPT_EXPERT, { &do_pkt_dump },
5286  "dump each input packet" },
5287  { "hex", OPT_BOOL | OPT_EXPERT, { &do_hex_dump },
5288  "when dumping packets, also dump the payload" },
5289  { "re", OPT_BOOL | OPT_EXPERT | OPT_OFFSET |
5290  OPT_INPUT, { .off = OFFSET(rate_emu) },
5291  "read input at native frame rate", "" },
5292  { "target", HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_target },
5293  "specify target file type (\"vcd\", \"svcd\", \"dvd\", \"dv\" or \"dv50\" "
5294  "with optional prefixes \"pal-\", \"ntsc-\" or \"film-\")", "type" },
5295  { "vsync", HAS_ARG | OPT_EXPERT, { .func_arg = opt_vsync },
5296  "video sync method", "" },
5297  { "frame_drop_threshold", HAS_ARG | OPT_FLOAT | OPT_EXPERT, { &frame_drop_threshold },
5298  "frame drop threshold", "" },
5299  { "async", HAS_ARG | OPT_INT | OPT_EXPERT, { &audio_sync_method },
5300  "audio sync method", "" },
5301  { "adrift_threshold", HAS_ARG | OPT_FLOAT | OPT_EXPERT, { &audio_drift_threshold },
5302  "audio drift threshold", "threshold" },
5303  { "copyts", OPT_BOOL | OPT_EXPERT, { &copy_ts },
5304  "copy timestamps" },
5305  { "start_at_zero", OPT_BOOL | OPT_EXPERT, { &start_at_zero },
5306  "shift input timestamps to start at 0 when using copyts" },
5307  { "copytb", HAS_ARG | OPT_INT | OPT_EXPERT, { &copy_tb },
5308  "copy input stream time base when stream copying", "mode" },
5309  { "start_at_zero", OPT_BOOL | OPT_EXPERT, { &start_at_zero },
5310  "shift input timestamps to start at 0 when using copyts" },
5311  { "copytb", HAS_ARG | OPT_INT | OPT_EXPERT, { &copy_tb },
5312  "copy input stream time base when stream copying", "mode" },
5313  { "shortest", OPT_BOOL | OPT_EXPERT | OPT_OFFSET |
5314  OPT_OUTPUT, { .off = OFFSET(shortest) },
5315  "finish encoding within shortest input" },
5316  { "bitexact", OPT_BOOL | OPT_EXPERT | OPT_OFFSET |
5317  OPT_OUTPUT | OPT_INPUT, { .off = OFFSET(bitexact) },
5318  "bitexact mode" },
5319  { "apad", OPT_STRING | HAS_ARG | OPT_SPEC |
5320  OPT_OUTPUT, { .off = OFFSET(apad) },
5321  "audio pad", "" },
5322  { "dts_delta_threshold", HAS_ARG | OPT_FLOAT | OPT_EXPERT, { &dts_delta_threshold },
5323  "timestamp discontinuity delta threshold", "threshold" },
5324  { "dts_error_threshold", HAS_ARG | OPT_FLOAT | OPT_EXPERT, { &dts_error_threshold },
5325  "timestamp error delta threshold", "threshold" },
5326  { "xerror", OPT_BOOL | OPT_EXPERT, { &exit_on_error },
5327  "exit on error", "error" },
5328  { "abort_on", HAS_ARG | OPT_EXPERT, { .func_arg = opt_abort_on },
5329  "abort on the specified condition flags", "flags" },
5330  { "copyinkf", OPT_BOOL | OPT_EXPERT | OPT_SPEC |
5331  OPT_OUTPUT, { .off = OFFSET(copy_initial_nonkeyframes) },
5332  "copy initial non-keyframes" },
5333  { "copypriorss", OPT_INT | HAS_ARG | OPT_EXPERT | OPT_SPEC | OPT_OUTPUT, { .off = OFFSET(copy_prior_start) },
5334  "copy or discard frames before start time" },
5335  { "frames", OPT_INT64 | HAS_ARG | OPT_SPEC | OPT_OUTPUT, { .off = OFFSET(max_frames) },
5336  "set the number of frames to output", "number" },
5337  { "tag", OPT_STRING | HAS_ARG | OPT_SPEC |
5338  OPT_EXPERT | OPT_OUTPUT | OPT_INPUT, { .off = OFFSET(codec_tags) },
5339  "force codec tag/fourcc", "fourcc/tag" },
5340  { "q", HAS_ARG | OPT_EXPERT | OPT_DOUBLE |
5341  OPT_SPEC | OPT_OUTPUT, { .off = OFFSET(qscale) },
5342  "use fixed quality scale (VBR)", "q" },
5343  { "qscale", HAS_ARG | OPT_EXPERT | OPT_PERFILE |
5344  OPT_OUTPUT, { .func_arg = opt_qscale },
5345  "use fixed quality scale (VBR)", "q" },
5346  { "profile", HAS_ARG | OPT_EXPERT | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_profile },
5347  "set profile", "profile" },
5348  { "filter", HAS_ARG | OPT_STRING | OPT_SPEC | OPT_OUTPUT, { .off = OFFSET(filters) },
5349  "set stream filtergraph", "filter_graph" },
5350  { "filter_threads", HAS_ARG | OPT_INT, { &filter_nbthreads },
5351  "number of non-complex filter threads" },
5352  { "filter_script", HAS_ARG | OPT_STRING | OPT_SPEC | OPT_OUTPUT, { .off = OFFSET(filter_scripts) },
5353  "read stream filtergraph description from a file", "filename" },
5354  { "reinit_filter", HAS_ARG | OPT_INT | OPT_SPEC | OPT_INPUT, { .off = OFFSET(reinit_filters) },
5355  "reinit filtergraph on input parameter changes", "" },
5356  { "filter_complex", HAS_ARG | OPT_EXPERT, { .func_arg = opt_filter_complex },
5357  "create a complex filtergraph", "graph_description" },
5358  { "filter_complex_threads", HAS_ARG | OPT_INT, { &filter_complex_nbthreads },
5359  "number of threads for -filter_complex" },
5360  { "lavfi", HAS_ARG | OPT_EXPERT, { .func_arg = opt_filter_complex },
5361  "create a complex filtergraph", "graph_description" },
5362  { "filter_complex_script", HAS_ARG | OPT_EXPERT, { .func_arg = opt_filter_complex_script },
5363  "read complex filtergraph description from a file", "filename" },
5364  { "auto_conversion_filters", OPT_BOOL | OPT_EXPERT, { &auto_conversion_filters },
5365  "enable automatic conversion filters globally" },
5366  { "stats", OPT_BOOL, { &print_stats },
5367  "print progress report during encoding", },
5368  { "stats_period", HAS_ARG | OPT_EXPERT, { .func_arg = opt_stats_period },
5369  "set the period at which ffmpeg updates stats and -progress output", "time" },
5370  { "attach", HAS_ARG | OPT_PERFILE | OPT_EXPERT |
5371  OPT_OUTPUT, { .func_arg = opt_attach },
5372  "add an attachment to the output file", "filename" },
5373  { "dump_attachment", HAS_ARG | OPT_STRING | OPT_SPEC |
5374  OPT_EXPERT | OPT_INPUT, { .off = OFFSET(dump_attachment) },
5375  "extract an attachment into a file", "filename" },
5376  { "stream_loop", OPT_INT | HAS_ARG | OPT_EXPERT | OPT_INPUT |
5377  OPT_OFFSET, { .off = OFFSET(loop) }, "set number of times input stream shall be looped", "loop count" },
5378  { "debug_ts", OPT_BOOL | OPT_EXPERT, { &debug_ts },
5379  "print timestamp debugging info" },
5380  { "max_error_rate", HAS_ARG | OPT_FLOAT, { &max_error_rate },
5381  "ratio of errors (0.0: no errors, 1.0: 100% errors) above which ffmpeg returns an error instead of success.", "maximum error rate" },
5382  { "discard", OPT_STRING | HAS_ARG | OPT_SPEC |
5383  OPT_INPUT, { .off = OFFSET(discard) },
5384  "discard", "" },
5385  { "disposition", OPT_STRING | HAS_ARG | OPT_SPEC |
5386  OPT_OUTPUT, { .off = OFFSET(disposition) },
5387  "disposition", "" },
5388  { "thread_queue_size", HAS_ARG | OPT_INT | OPT_OFFSET | OPT_EXPERT | OPT_INPUT,
5389  { .off = OFFSET(thread_queue_size) },
5390  "set the maximum number of queued packets from the demuxer" },
5391  { "find_stream_info", OPT_BOOL | OPT_PERFILE | OPT_INPUT | OPT_EXPERT, { &find_stream_info },
5392  "read and decode the streams to fill missing information with heuristics" },
5393 
5394  /* video options */
5395  { "vframes", OPT_VIDEO | HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_video_frames },
5396  "set the number of video frames to output", "number" },
5397  { "r", OPT_VIDEO | HAS_ARG | OPT_STRING | OPT_SPEC |
5398  OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(frame_rates) },
5399  "set frame rate (Hz value, fraction or abbreviation)", "rate" },
5401  OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(frame_sizes) },
5402  "set frame size (WxH or abbreviation)", "size" },
5403  { "aspect", OPT_VIDEO | HAS_ARG | OPT_STRING | OPT_SPEC |
5404  OPT_OUTPUT, { .off = OFFSET(frame_aspect_ratios) },
5405  "set aspect ratio (4:3, 16:9 or 1.3333, 1.7777)", "aspect" },
5406  { "pix_fmt", OPT_VIDEO | HAS_ARG | OPT_EXPERT | OPT_STRING | OPT_SPEC |
5407  OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(frame_pix_fmts) },
5408  "set pixel format", "format" },
5409  { "bits_per_raw_sample", OPT_VIDEO | OPT_INT | HAS_ARG, { &frame_bits_per_raw_sample },
5410  "set the number of bits per raw sample", "number" },
5411  { "intra", OPT_VIDEO | OPT_BOOL | OPT_EXPERT, { &intra_only },
5412  "deprecated use -g 1" },
5413  { "vn", OPT_VIDEO | OPT_BOOL | OPT_OFFSET | OPT_INPUT | OPT_OUTPUT,{ .off = OFFSET(video_disable) },
5414  "disable video" },
5415  { "rc_override", OPT_VIDEO | HAS_ARG | OPT_EXPERT | OPT_STRING | OPT_SPEC |
5416  OPT_OUTPUT, { .off = OFFSET(rc_overrides) },
5417  "rate control override for specific intervals", "override" },
5418  { "vcodec", OPT_VIDEO | HAS_ARG | OPT_PERFILE | OPT_INPUT |
5419  OPT_OUTPUT, { .func_arg = opt_video_codec },
5420  "force video codec ('copy' to copy stream)", "codec" },
5421  { "sameq", OPT_VIDEO | OPT_EXPERT , { .func_arg = opt_sameq },
5422  "Removed" },
5423  { "same_quant", OPT_VIDEO | OPT_EXPERT , { .func_arg = opt_sameq },
5424  "Removed" },
5425  { "timecode", OPT_VIDEO | HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_timecode },
5426  "set initial TimeCode value.", "hh:mm:ss[:;.]ff" },
5427  { "pass", OPT_VIDEO | HAS_ARG | OPT_SPEC | OPT_INT | OPT_OUTPUT, { .off = OFFSET(pass) },
5428  "select the pass number (1 to 3)", "n" },
5429  { "passlogfile", OPT_VIDEO | HAS_ARG | OPT_STRING | OPT_EXPERT | OPT_SPEC |
5430  OPT_OUTPUT, { .off = OFFSET(passlogfiles) },
5431  "select two pass log file name prefix", "prefix" },
5432  { "deinterlace", OPT_VIDEO | OPT_BOOL | OPT_EXPERT, { &do_deinterlace },
5433  "this option is deprecated, use the yadif filter instead" },
5434  { "psnr", OPT_VIDEO | OPT_BOOL | OPT_EXPERT, { &do_psnr },
5435  "calculate PSNR of compressed frames" },
5436  { "vstats", OPT_VIDEO | OPT_EXPERT , { .func_arg = opt_vstats },
5437  "dump video coding statistics to file" },
5438  { "vstats_file", OPT_VIDEO | HAS_ARG | OPT_EXPERT , { .func_arg = opt_vstats_file },
5439  "dump video coding statistics to file", "file" },
5440  { "vstats_version", OPT_VIDEO | OPT_INT | HAS_ARG | OPT_EXPERT , { &vstats_version },
5441  "Version of the vstats format to use."},
5442  { "vf", OPT_VIDEO | HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_video_filters },
5443  "set video filters", "filter_graph" },
5444  { "intra_matrix", OPT_VIDEO | HAS_ARG | OPT_EXPERT | OPT_STRING | OPT_SPEC |
5445  OPT_OUTPUT, { .off = OFFSET(intra_matrices) },
5446  "specify intra matrix coeffs", "matrix" },
5447  { "inter_matrix", OPT_VIDEO | HAS_ARG | OPT_EXPERT | OPT_STRING | OPT_SPEC |
5448  OPT_OUTPUT, { .off = OFFSET(inter_matrices) },
5449  "specify inter matrix coeffs", "matrix" },
5450  { "chroma_intra_matrix", OPT_VIDEO | HAS_ARG | OPT_EXPERT | OPT_STRING | OPT_SPEC |
5451  OPT_OUTPUT, { .off = OFFSET(chroma_intra_matrices) },
5452  "specify intra matrix coeffs", "matrix" },
5453  { "top", OPT_VIDEO | HAS_ARG | OPT_EXPERT | OPT_INT| OPT_SPEC |
5454  OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(top_field_first) },
5455  "top=1/bottom=0/auto=-1 field first", "" },
5456  { "vtag", OPT_VIDEO | HAS_ARG | OPT_EXPERT | OPT_PERFILE |
5457  OPT_INPUT | OPT_OUTPUT, { .func_arg = opt_old2new },
5458  "force video tag/fourcc", "fourcc/tag" },
5459  { "qphist", OPT_VIDEO | OPT_BOOL | OPT_EXPERT , { &qp_hist },
5460  "show QP histogram" },
5461  { "force_fps", OPT_VIDEO | OPT_BOOL | OPT_EXPERT | OPT_SPEC |
5462  OPT_OUTPUT, { .off = OFFSET(force_fps) },
5463  "force the selected framerate, disable the best supported framerate selection" },
5464  { "streamid", OPT_VIDEO | HAS_ARG | OPT_EXPERT | OPT_PERFILE |
5465  OPT_OUTPUT, { .func_arg = opt_streamid },
5466  "set the value of an outfile streamid", "streamIndex:value" },
5467  { "force_key_frames", OPT_VIDEO | OPT_STRING | HAS_ARG | OPT_EXPERT |
5468  OPT_SPEC | OPT_OUTPUT, { .off = OFFSET(forced_key_frames) },
5469  "force key frames at specified timestamps", "timestamps" },
5470  { "ab", OPT_VIDEO | HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_bitrate },
5471  "audio bitrate (please use -b:a)", "bitrate" },
5472  { "b", OPT_VIDEO | HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_bitrate },
5473  "video bitrate (please use -b:v)", "bitrate" },
5474  { "hwaccel", OPT_VIDEO | OPT_STRING | HAS_ARG | OPT_EXPERT |
5475  OPT_SPEC | OPT_INPUT, { .off = OFFSET(hwaccels) },
5476  "use HW accelerated decoding", "hwaccel name" },
5477  { "hwaccel_device", OPT_VIDEO | OPT_STRING | HAS_ARG | OPT_EXPERT |
5478  OPT_SPEC | OPT_INPUT, { .off = OFFSET(hwaccel_devices) },
5479  "select a device for HW acceleration", "devicename" },
5480  { "hwaccel_output_format", OPT_VIDEO | OPT_STRING | HAS_ARG | OPT_EXPERT |
5481  OPT_SPEC | OPT_INPUT, { .off = OFFSET(hwaccel_output_formats) },
5482  "select output format used with HW accelerated decoding", "format" },
5483  #if CONFIG_VIDEOTOOLBOX
5484  { "videotoolbox_pixfmt", HAS_ARG | OPT_STRING | OPT_EXPERT, { &videotoolbox_pixfmt}, "" },
5485  #endif
5486  { "hwaccels", OPT_EXIT, { .func_arg = show_hwaccels },
5487  "show available HW acceleration methods" },
5488  { "autorotate", HAS_ARG | OPT_BOOL | OPT_SPEC |
5489  OPT_EXPERT | OPT_INPUT, { .off = OFFSET(autorotate) },
5490  "automatically insert correct rotate filters" },
5491  { "autoscale", HAS_ARG | OPT_BOOL | OPT_SPEC |
5492  OPT_EXPERT | OPT_OUTPUT, { .off = OFFSET(autoscale) },
5493  "automatically insert a scale filter at the end of the filter graph" },
5494 
5495  /* audio options */
5496  { "aframes", OPT_AUDIO | HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_audio_frames },
5497  "set the number of audio frames to output", "number" },
5498  { "aq", OPT_AUDIO | HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_audio_qscale },
5499  "set audio quality (codec-specific)", "quality", },
5500  { "ar", OPT_AUDIO | HAS_ARG | OPT_INT | OPT_SPEC |
5501  OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(audio_sample_rate) },
5502  "set audio sampling rate (in Hz)", "rate" },
5503  { "ac", OPT_AUDIO | HAS_ARG | OPT_INT | OPT_SPEC |
5504  OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(audio_channels) },
5505  "set number of audio channels", "channels" },
5506  { "an", OPT_AUDIO | OPT_BOOL | OPT_OFFSET | OPT_INPUT | OPT_OUTPUT,{ .off = OFFSET(audio_disable) },
5507  "disable audio" },
5508  { "acodec", OPT_AUDIO | HAS_ARG | OPT_PERFILE |
5509  OPT_INPUT | OPT_OUTPUT, { .func_arg = opt_audio_codec },
5510  "force audio codec ('copy' to copy stream)", "codec" },
5511  { "atag", OPT_AUDIO | HAS_ARG | OPT_EXPERT | OPT_PERFILE |
5512  OPT_OUTPUT, { .func_arg = opt_old2new },
5513  "force audio tag/fourcc", "fourcc/tag" },
5514  { "vol", OPT_AUDIO | HAS_ARG | OPT_INT, { &audio_volume },
5515  "change audio volume (256=normal)" , "volume" },
5516  { "sample_fmt", OPT_AUDIO | HAS_ARG | OPT_EXPERT | OPT_SPEC |
5517  OPT_STRING | OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(sample_fmts) },
5518  "set sample format", "format" },
5519  { "channel_layout", OPT_AUDIO | HAS_ARG | OPT_EXPERT | OPT_PERFILE |
5520  OPT_INPUT | OPT_OUTPUT, { .func_arg = opt_channel_layout },
5521  "set channel layout", "layout" },
5522  { "af", OPT_AUDIO | HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_audio_filters },
5523  "set audio filters", "filter_graph" },
5524  { "guess_layout_max", OPT_AUDIO | HAS_ARG | OPT_INT | OPT_SPEC | OPT_EXPERT | OPT_INPUT, { .off = OFFSET(guess_layout_max) },
5525  "set the maximum number of channels to try to guess the channel layout" },
5526 
5527  /* subtitle options */
5528  { "sn", OPT_SUBTITLE | OPT_BOOL | OPT_OFFSET | OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(subtitle_disable) },
5529  "disable subtitle" },
5530  { "scodec", OPT_SUBTITLE | HAS_ARG | OPT_PERFILE | OPT_INPUT | OPT_OUTPUT, { .func_arg = opt_subtitle_codec },
5531  "force subtitle codec ('copy' to copy stream)", "codec" },
5532  { "stag", OPT_SUBTITLE | HAS_ARG | OPT_EXPERT | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_old2new }
5533  , "force subtitle tag/fourcc", "fourcc/tag" },
5534  { "fix_sub_duration", OPT_BOOL | OPT_EXPERT | OPT_SUBTITLE | OPT_SPEC | OPT_INPUT, { .off = OFFSET(fix_sub_duration) },
5535  "fix subtitles duration" },
5536  { "canvas_size", OPT_SUBTITLE | HAS_ARG | OPT_STRING | OPT_SPEC | OPT_INPUT, { .off = OFFSET(canvas_sizes) },
5537  "set canvas size (WxH or abbreviation)", "size" },
5538 
5539  /* grab options */
5540  { "vc", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { .func_arg = opt_video_channel },
5541  "deprecated, use -channel", "channel" },
5542  { "tvstd", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { .func_arg = opt_video_standard },
5543  "deprecated, use -standard", "standard" },
5544  { "isync", OPT_BOOL | OPT_EXPERT, { &input_sync }, "this option is deprecated and does nothing", "" },
5545 
5546  /* muxer options */
5547  { "muxdelay", OPT_FLOAT | HAS_ARG | OPT_EXPERT | OPT_OFFSET | OPT_OUTPUT, { .off = OFFSET(mux_max_delay) },
5548  "set the maximum demux-decode delay", "seconds" },
5549  { "muxpreload", OPT_FLOAT | HAS_ARG | OPT_EXPERT | OPT_OFFSET | OPT_OUTPUT, { .off = OFFSET(mux_preload) },
5550  "set the initial demux-decode delay", "seconds" },
5551  { "sdp_file", HAS_ARG | OPT_EXPERT | OPT_OUTPUT, { .func_arg = opt_sdp_file },
5552  "specify a file in which to print sdp information", "file" },
5553 
5554  { "time_base", HAS_ARG | OPT_STRING | OPT_EXPERT | OPT_SPEC | OPT_OUTPUT, { .off = OFFSET(time_bases) },
5555  "set the desired time base hint for output stream (1:24, 1:48000 or 0.04166, 2.0833e-5)", "ratio" },
5556  { "enc_time_base", HAS_ARG | OPT_STRING | OPT_EXPERT | OPT_SPEC | OPT_OUTPUT, { .off = OFFSET(enc_time_bases) },
5557  "set the desired time base for the encoder (1:24, 1:48000 or 0.04166, 2.0833e-5). "
5558  "two special values are defined - "
5559  "0 = use frame rate (video) or sample rate (audio),"
5560  "-1 = match source time base", "ratio" },
5561 
5562  { "bsf", HAS_ARG | OPT_STRING | OPT_SPEC | OPT_EXPERT | OPT_OUTPUT, { .off = OFFSET(bitstream_filters) },
5563  "A comma-separated list of bitstream filters", "bitstream_filters" },
5564  { "absf", HAS_ARG | OPT_AUDIO | OPT_EXPERT| OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_old2new },
5565  "deprecated", "audio bitstream_filters" },
5566  { "vbsf", OPT_VIDEO | HAS_ARG | OPT_EXPERT| OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_old2new },
5567  "deprecated", "video bitstream_filters" },
5568 
5569  { "apre", HAS_ARG | OPT_AUDIO | OPT_EXPERT| OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_preset },
5570  "set the audio options to the indicated preset", "preset" },
5571  { "vpre", OPT_VIDEO | HAS_ARG | OPT_EXPERT| OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_preset },
5572  "set the video options to the indicated preset", "preset" },
5573  { "spre", HAS_ARG | OPT_SUBTITLE | OPT_EXPERT| OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_preset },
5574  "set the subtitle options to the indicated preset", "preset" },
5575  { "fpre", HAS_ARG | OPT_EXPERT| OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_preset },
5576  "set options from indicated preset file", "filename" },
5577 
5578  { "max_muxing_queue_size", HAS_ARG | OPT_INT | OPT_SPEC | OPT_EXPERT | OPT_OUTPUT, { .off = OFFSET(max_muxing_queue_size) },
5579  "maximum number of packets that can be buffered while waiting for all streams to initialize", "packets" },
5580  { "muxing_queue_data_threshold", HAS_ARG | OPT_INT | OPT_SPEC | OPT_EXPERT | OPT_OUTPUT, { .off = OFFSET(muxing_queue_data_threshold) },
5581  "set the threshold after which max_muxing_queue_size is taken into account", "bytes" },
5582 
5583  /* data codec support */
5584  { "dcodec", HAS_ARG | OPT_DATA | OPT_PERFILE | OPT_EXPERT | OPT_INPUT | OPT_OUTPUT, { .func_arg = opt_data_codec },
5585  "force data codec ('copy' to copy stream)", "codec" },
5586  { "dn", OPT_BOOL | OPT_VIDEO | OPT_OFFSET | OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(data_disable) },
5587  "disable data" },
5588 
5589  #if CONFIG_VAAPI
5590  { "vaapi_device", HAS_ARG | OPT_EXPERT, { .func_arg = opt_vaapi_device },
5591  "set VAAPI hardware device (DRM path or X11 display name)", "device" },
5592  #endif
5593 
5594  #if CONFIG_QSV
5595  { "qsv_device", HAS_ARG | OPT_STRING | OPT_EXPERT, { &qsv_device },
5596  "set QSV hardware device (DirectX adapter index, DRM path or X11 display name)", "device"},
5597  #endif
5598 
5599  { "init_hw_device", HAS_ARG | OPT_EXPERT, { .func_arg = opt_init_hw_device },
5600  "initialise hardware device", "args" },
5601  { "filter_hw_device", HAS_ARG | OPT_EXPERT, { .func_arg = opt_filter_hw_device },
5602  "set hardware device used when filtering", "device" },
5603 
5604  { NULL, },
5605  };
5606 
5607  ffmpeg_options = options;
5608 
5609  int i, ret;
5611 
5612  int savedCode = setjmp(ex_buf__);
5613  if (savedCode == 0) {
5614 
5616 
5617  init_dynload();
5618 
5620 
5621  setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
5622 
5623  av_log_set_flags(AV_LOG_SKIP_REPEATED);
5624  parse_loglevel(argc, argv, options);
5625 
5626  if(argc>1 && !strcmp(argv[1], "-d")){
5627  run_as_daemon=1;
5628  av_log_set_callback(log_callback_null);
5629  argc--;
5630  argv++;
5631  }
5632 
5633  #if CONFIG_AVDEVICE
5634  avdevice_register_all();
5635  #endif
5636  avformat_network_init();
5637 
5638  show_banner(argc, argv, options);
5639 
5640  /* parse options and open all input/output files */
5641  ret = ffmpeg_parse_options(argc, argv);
5642  if (ret < 0)
5643  exit_program(1);
5644 
5645  if (nb_output_files <= 0 && nb_input_files == 0) {
5646  show_usage();
5647  av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
5648  exit_program(1);
5649  }
5650 
5651  /* file converter / grab */
5652  if (nb_output_files <= 0) {
5653  av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
5654  exit_program(1);
5655  }
5656 
5657  for (i = 0; i < nb_output_files; i++) {
5658  if (strcmp(output_files[i]->ctx->oformat->name, "rtp"))
5659  want_sdp = 0;
5660  }
5661 
5663  if (transcode() < 0)
5664  exit_program(1);
5665  if (do_benchmark) {
5666  int64_t utime, stime, rtime;
5668  utime = current_time.user_usec - ti.user_usec;
5669  stime = current_time.sys_usec - ti.sys_usec;
5670  rtime = current_time.real_usec - ti.real_usec;
5671  av_log(NULL, AV_LOG_INFO,
5672  "bench: utime=%0.3fs stime=%0.3fs rtime=%0.3fs\n",
5673  utime / 1000000.0, stime / 1000000.0, rtime / 1000000.0);
5674  }
5675  av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
5678  exit_program(69);
5679 
5681 
5682  } else {
5684  }
5685 
5686  return main_ffmpeg_return_code;
5687 }
__thread jmp_buf ex_buf__
void exit_program(int ret)
int show_decoders(void *optctx, const char *opt, const char *arg)
int opt_loglevel(void *optctx, const char *opt, const char *arg)
int opt_cpuflags(void *optctx, const char *opt, const char *arg)
void init_dynload(void)
int show_help(void *optctx, const char *opt, const char *arg)
void print_error(const char *filename, int err)
int show_filters(void *optctx, const char *opt, const char *arg)
int show_sample_fmts(void *optctx, const char *opt, const char *arg)
int show_muxers(void *optctx, const char *opt, const char *arg)
int show_bsfs(void *optctx, const char *opt, const char *arg)
__thread char * program_name
int show_layouts(void *optctx, const char *opt, const char *arg)
int show_encoders(void *optctx, const char *opt, const char *arg)
int show_version(void *optctx, const char *opt, const char *arg)
void parse_loglevel(int argc, char **argv, const OptionDef *options)
__thread int program_birth_year
void show_banner(int argc, char **argv, const OptionDef *options)
int opt_timelimit(void *optctx, const char *opt, const char *arg)
int show_license(void *optctx, const char *opt, const char *arg)
int show_codecs(void *optctx, const char *opt, const char *arg)
int show_buildconf(void *optctx, const char *opt, const char *arg)
int64_t parse_time_or_die(const char *context, const char *timestr, int is_duration)
void register_exit(void(*cb)(int ret))
int show_devices(void *optctx, const char *opt, const char *arg)
void uninit_opts(void)
int show_formats(void *optctx, const char *opt, const char *arg)
__thread int hide_banner
int show_protocols(void *optctx, const char *opt, const char *arg)
int opt_max_alloc(void *optctx, const char *opt, const char *arg)
int opt_report(void *optctx, const char *opt, const char *arg)
int show_colors(void *optctx, const char *opt, const char *arg)
int show_pix_fmts(void *optctx, const char *opt, const char *arg)
int show_demuxers(void *optctx, const char *opt, const char *arg)
#define OPT_VIDEO
#define OPT_SPEC
#define OPT_BOOL
#define media_type_string
#define OPT_INT64
#define OPT_PERFILE
#define OPT_INT
#define OPT_FLOAT
#define AV_LOG_STDERR
#define OPT_INPUT
#define OPT_DOUBLE
#define OPT_STRING
__thread int find_stream_info
#define OPT_AUDIO
#define OPT_DATA
#define OPT_SUBTITLE
#define OPT_EXPERT
#define OPT_EXIT
#define OPT_OUTPUT
#define OPT_TIME
#define OPT_OFFSET
#define HAS_ARG
int opt_channel_layout(void *optctx, const char *opt, const char *arg)
__thread unsigned dup_warning
int opt_sdp_file(void *optctx, const char *opt, const char *arg)
static int transcode(void)
static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output, int *decode_failed)
int opt_timecode(void *optctx, const char *opt, const char *arg)
__thread OptionDef * ffmpeg_options
int opt_vstats_file(void *optctx, const char *opt, const char *arg)
__thread InputStream ** input_streams
static void set_tty_echo(int on)
static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output, int *decode_failed)
__thread const AVIOInterruptCB int_cb
__thread int run_as_daemon
static int check_keyboard_interaction(int64_t cur_time)
static void do_audio_out(OutputFile *of, OutputStream *ost, AVFrame *frame)
static BenchmarkTimeStamps get_benchmark_time_stamps(void)
int opt_data_codec(void *optctx, const char *opt, const char *arg)
int opt_streamid(void *optctx, const char *opt, const char *arg)
__thread int nb_input_streams
static int need_output(void)
void term_exit(void)
static volatile int received_sigterm
const char *const forced_keyframes_const_names[]
void cancelSession(long sessionId)
int opt_qscale(void *optctx, const char *opt, const char *arg)
int opt_sameq(void *optctx, const char *opt, const char *arg)
__thread OutputStream ** output_streams
static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
__thread OutputFile ** output_files
int opt_filter_complex_script(void *optctx, const char *opt, const char *arg)
static void forward_report(int is_last_report, int64_t timer_start, int64_t cur_time)
int opt_filter_complex(void *optctx, const char *opt, const char *arg)
static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
static void do_subtitle_out(OutputFile *of, OutputStream *ost, AVSubtitle *sub)
int opt_vsync(void *optctx, const char *opt, const char *arg)
static int init_input_stream(int ist_index, char *error, int error_len)
__thread int nb_output_streams
static void sub2video_push_ref(InputStream *ist, int64_t pts)
int guess_input_channel_layout(InputStream *ist)
__thread volatile int longjmp_value
static void print_sdp(void)
__thread int nb_frames_dup
static int reap_filters(int flush)
static int check_recording_time(OutputStream *ost)
static void abort_codec_experimental(AVCodec *c, int encoder)
static void do_video_out(OutputFile *of, OutputStream *ost, AVFrame *next_picture)
__thread BenchmarkTimeStamps current_time
__thread int nb_input_files
static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
static void print_final_stats(int64_t total_size)
static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
__thread int first_report
__thread int nb_output_files
static double psnr(double d)
static int init_output_bsfs(OutputStream *ost)
volatile int handleSIGINT
static int init_output_stream_encode(OutputStream *ost, AVFrame *frame)
static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
int opt_subtitle_codec(void *optctx, const char *opt, const char *arg)
volatile int handleSIGTERM
int opt_video_standard(void *optctx, const char *opt, const char *arg)
void set_report_callback(void(*callback)(int, float, float, int64_t, int, double, double))
int opt_profile(void *optctx, const char *opt, const char *arg)
struct BenchmarkTimeStamps BenchmarkTimeStamps
static int64_t getmaxrss(void)
int opt_abort_on(void *optctx, const char *opt, const char *arg)
static void reset_eagain(void)
__thread int copy_unknown_streams
static void finish_output_stream(OutputStream *ost)
int opt_video_codec(void *optctx, const char *opt, const char *arg)
static void parse_forced_key_frames(char *kf, OutputStream *ost, AVCodecContext *avctx)
int opt_data_frames(void *optctx, const char *opt, const char *arg)
static OutputStream * choose_output(void)
int opt_video_filters(void *optctx, const char *opt, const char *arg)
static int compare_int64(const void *a, const void *b)
__thread int input_sync
__thread volatile int ffmpeg_exited
int opt_attach(void *optctx, const char *opt, const char *arg)
int opt_filter_hw_device(void *optctx, const char *opt, const char *arg)
static int init_output_stream_wrapper(OutputStream *ost, AVFrame *frame, unsigned int fatal)
static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof, int *decode_failed)
__thread atomic_int transcode_init_done
static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame)
static void check_decode_result(InputStream *ist, int *got_output, int ret)
__thread int64_t keyboard_last_time
static void sub2video_heartbeat(InputStream *ist, int64_t pts)
int opt_audio_frames(void *optctx, const char *opt, const char *arg)
static int process_input(int file_index)
static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
static int seek_to_start(InputFile *ifile, AVFormatContext *is)
__thread int no_file_overwrite
int opt_target(void *optctx, const char *opt, const char *arg)
__thread int qp_histogram[52]
static int check_output_constraints(InputStream *ist, OutputStream *ost)
__thread int file_overwrite
int opt_map(void *optctx, const char *opt, const char *arg)
static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
void cancel_operation(long id)
static void set_encoder_id(OutputFile *of, OutputStream *ost)
__thread AVIOContext * progress_avio
__thread int64_t copy_ts_first_pts
__thread InputFile ** input_files
static int read_key(void)
static int check_init_output_file(OutputFile *of, int file_index)
static void close_output_stream(OutputStream *ost)
static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
__thread volatile long _sessionId
void sub2video_update(InputStream *ist, int64_t heartbeat_pts, AVSubtitle *sub)
int opt_vstats(void *optctx, const char *opt, const char *arg)
int opt_video_channel(void *optctx, const char *opt, const char *arg)
__thread FilterGraph ** filtergraphs
static int init_output_stream(OutputStream *ost, AVFrame *frame, char *error, int error_len)
static int got_eagain(void)
int opt_video_frames(void *optctx, const char *opt, const char *arg)
static int get_input_packet(InputFile *f, AVPacket *pkt)
__thread unsigned nb_output_dumped
static int send_filter_eof(InputStream *ist)
int decode_interrupt_cb(void *ctx)
int opt_audio_filters(void *optctx, const char *opt, const char *arg)
static void term_exit_sigsafe(void)
static void sub2video_flush(InputStream *ist)
static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h, AVSubtitleRect *r)
void remove_avoptions(AVDictionary **a, AVDictionary *b)
static int transcode_init(void)
static volatile int received_nb_signals
int opt_audio_qscale(void *optctx, const char *opt, const char *arg)
static FILE * vstats_file
void(* report_callback)(int, float, float, int64_t, int, double, double)
static void report_new_stream(int input_index, AVPacket *pkt)
__thread int64_t decode_error_stat[2]
__thread volatile int main_ffmpeg_return_code
int show_hwaccels(void *optctx, const char *opt, const char *arg)
static void update_benchmark(const char *fmt,...)
static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base, AVRational time_base)
static void ffmpeg_cleanup(int ret)
int opt_preset(void *optctx, const char *opt, const char *arg)
static double adjust_frame_pts_to_encoder_tb(OutputFile *of, OutputStream *ost, AVFrame *frame)
static int transcode_step(void)
static int ifilter_has_all_input_formats(FilterGraph *fg)
static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
int opt_old2new(void *optctx, const char *opt, const char *arg)
static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
__thread int nb_filtergraphs
static int init_output_stream_streamcopy(OutputStream *ost)
void term_init(void)
int opt_stats_period(void *optctx, const char *opt, const char *arg)
int cancelRequested(long sessionId)
__thread int64_t last_time
volatile int handleSIGPIPE
#define OFFSET(x)
int opt_bitrate(void *optctx, const char *opt, const char *arg)
volatile int handleSIGXCPU
static void do_video_stats(OutputStream *ost, int frame_size)
static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
__thread uint8_t * subtitle_out
static int sub2video_get_blank_frame(InputStream *ist)
static void flush_encoders(void)
__thread int do_psnr
static void output_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int eof)
static InputStream * get_input_stream(OutputStream *ost)
int opt_map_channel(void *optctx, const char *opt, const char *arg)
int opt_progress(void *optctx, const char *opt, const char *arg)
int opt_audio_codec(void *optctx, const char *opt, const char *arg)
int opt_recording_timestamp(void *optctx, const char *opt, const char *arg)
void assert_avoptions(AVDictionary *m)
__thread int want_sdp
void ffmpeg_var_cleanup()
static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
int ffmpeg_execute(int argc, char **argv)
static void ifilter_parameters_from_codecpar(InputFilter *ifilter, AVCodecParameters *par)
static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
__thread int ignore_unknown_streams
static void sigterm_handler(int sig)
__thread int intra_only
volatile int handleSIGQUIT
int opt_init_hw_device(void *optctx, const char *opt, const char *arg)
__thread int nb_frames_drop
__thread float dts_delta_threshold
int hw_device_setup_for_encode(OutputStream *ost)
__thread int copy_tb
@ HWACCEL_GENERIC
@ HWACCEL_AUTO
__thread int frame_bits_per_raw_sample
int ifilter_parameters_from_frame(InputFilter *ifilter, const AVFrame *frame)
#define VSYNC_DROP
__thread int64_t stats_period
__thread char * sdp_filename
__thread int print_stats
__thread int video_sync_method
__thread int filter_complex_nbthreads
__thread int abort_on_flags
__thread int audio_volume
__thread float max_error_rate
__thread int copy_ts
__thread int stdin_interaction
__thread float dts_error_threshold
int hwaccel_decode_init(AVCodecContext *avctx)
OSTFinished
@ ENCODER_FINISHED
@ MUXER_FINISHED
#define VSYNC_CFR
__thread int filter_nbthreads
void show_usage(void)
#define DECODING_FOR_FILTER
__thread int do_benchmark
__thread float frame_drop_threshold
__thread int vstats_version
__thread char * vstats_filename
__thread int do_deinterlace
int hw_device_setup_for_decode(InputStream *ist)
#define VSYNC_AUTO
void hw_device_free_all(void)
__thread int audio_sync_method
__thread float audio_drift_threshold
__thread int do_benchmark_all
__thread int start_at_zero
@ FKF_PREV_FORCED_N
@ FKF_T
@ FKF_PREV_FORCED_T
@ FKF_N_FORCED
@ FKF_N
__thread int exit_on_error
int ffmpeg_parse_options(int argc, char **argv)
#define ABORT_ON_FLAG_EMPTY_OUTPUT
#define DECODING_FOR_OST
__thread int qp_hist
#define VSYNC_VSCFR
int filtergraph_is_simple(FilterGraph *fg)
#define VSYNC_PASSTHROUGH
int configure_filtergraph(FilterGraph *fg)
__thread int do_hex_dump
__thread int do_pkt_dump
const HWAccel hwaccels[]
#define VSYNC_VFR
void dump_attachment(AVStream *st, const char *filename)
__thread int debug_ts
__thread char * videotoolbox_pixfmt
__thread int auto_conversion_filters
#define ABORT_ON_FLAG_EMPTY_OUTPUT_STREAM
OutputFilter ** outputs
const char * graph_desc
AVFilterGraph * graph
InputFilter ** inputs
enum HWAccelID id
int(* init)(AVCodecContext *s)
const char * name
int64_t ts_offset
int64_t duration
AVFormatContext * ctx
int64_t input_ts_offset
int64_t recording_time
AVRational time_base
int nb_streams_warn
int64_t last_ts
int64_t start_time
AVBufferRef * hw_frames_ctx
uint8_t * name
struct InputStream * ist
AVFilterContext * filter
enum AVMediaType type
AVFifoBuffer * frame_queue
uint64_t channel_layout
struct FilterGraph * graph
AVRational sample_aspect_ratio
unsigned int initialize
marks if sub2video_update should force an initialization
AVFifoBuffer * sub_queue
queue of AVSubtitle* before filter init
enum AVPixelFormat hwaccel_pix_fmt
AVFrame * decoded_frame
int64_t * dts_buffer
int64_t dts
dts of the last packet read for this stream (in AV_TIME_BASE units)
int64_t cfr_next_pts
void(* hwaccel_uninit)(AVCodecContext *s)
AVCodecContext * dec_ctx
int64_t pts
current pts of the decoded frame (in AV_TIME_BASE units)
AVBufferRef * hw_frames_ctx
AVCodec * dec
struct InputStream::@2 prev_sub
enum HWAccelID hwaccel_id
uint64_t data_size
int(* hwaccel_get_buffer)(AVCodecContext *s, AVFrame *frame, int flags)
int64_t filter_in_rescale_delta_last
int64_t next_dts
int wrap_correction_done
int64_t max_pts
enum AVPixelFormat hwaccel_retrieved_pix_fmt
AVFrame * filter_frame
uint64_t samples_decoded
int64_t next_pts
synthetic pts for the next decode frame (in AV_TIME_BASE units)
uint64_t frames_decoded
struct InputStream::sub2video sub2video
AVStream * st
InputFilter ** filters
int(* hwaccel_retrieve_data)(AVCodecContext *s, AVFrame *frame)
uint64_t nb_packets
AVSubtitle subtitle
char * hwaccel_device
AVDictionary * decoder_opts
int64_t min_pts
enum AVHWDeviceType hwaccel_device_type
int64_t nb_samples
AVRational framerate
uint64_t limit_filesize
AVFormatContext * ctx
int64_t start_time
start time in microseconds == AV_TIME_BASE units
AVDictionary * opts
int64_t recording_time
desired length of the resulting file in microseconds == AV_TIME_BASE units
AVFilterInOut * out_tmp
struct OutputStream * ost
uint64_t * channel_layouts
AVFilterContext * filter
uint8_t * name
struct FilterGraph * graph
int max_muxing_queue_size
AVDictionary * swr_opts
int copy_initial_nonkeyframes
int64_t last_mux_dts
AVRational mux_timebase
double forced_keyframes_expr_const_values[FKF_NB]
OSTFinished finished
int * audio_channels_map
AVRational frame_aspect_ratio
double rotate_override_value
AVFrame * last_frame
int audio_channels_mapped
int64_t sync_opts
int64_t * forced_kf_pts
int64_t error[4]
uint64_t packets_written
uint64_t frames_encoded
int64_t max_frames
size_t muxing_queue_data_threshold
AVDictionary * resample_opts
AVRational max_frame_rate
AVRational enc_timebase
AVFifoBuffer * muxing_queue
AVCodecParameters * ref_par
char * forced_keyframes
AVFrame * filtered_frame
const char * attachment_filename
AVRational frame_rate
AVCodecContext * enc_ctx
struct InputStream * sync_ist
AVDictionary * encoder_opts
uint64_t data_size
AVStream * st
char * filters
filtergraph associated to the -filter option
int64_t forced_kf_ref_pts
uint64_t samples_encoded
char * filters_script
filtergraph script associated to the -filter_script option
AVBSFContext * bsf_ctx
int64_t first_pts
AVCodec * enc
AVDictionary * sws_dict
OutputFilter * filter
char * disposition
AVExpr * forced_keyframes_pexpr
size_t muxing_queue_data_size
int last_nb0_frames[3]
char * logfile_prefix