FFmpegKit iOS / macOS / tvOS API 6.0
Loading...
Searching...
No Matches
fftools_ffmpeg.c
Go to the documentation of this file.
1/*
2 * Copyright (c) 2000-2003 Fabrice Bellard
3 * Copyright (c) 2018-2022 Taner Sener
4 * Copyright (c) 2023 ARTHENICA LTD
5 *
6 * This file is part of FFmpeg.
7 *
8 * FFmpeg is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with FFmpeg; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
28/*
29 * This file is the modified version of ffmpeg.c file living in ffmpeg source code under the fftools folder. We
30 * manually update it each time we depend on a new ffmpeg version. Below you can see the list of changes applied
31 * by us to develop mobile-ffmpeg and later ffmpeg-kit libraries.
32 *
33 * ffmpeg-kit changes by ARTHENICA LTD
34 *
35 * 07.2023
36 * --------------------------------------------------------
37 * - FFmpeg 6.0 changes migrated
38 * - cherry-picked commit 7357012bb5205e0d03634aff48fc0167a9248190
39 * - vstats_file, received_sigterm and received_nb_signals updated as thread-local
40 * - forward_report method signature updated
41 * - time field in report_callback/forward_report/set_report_callback updated as double
42 *
43 * mobile-ffmpeg / ffmpeg-kit changes by Taner Sener
44 *
45 * 09.2022
46 * --------------------------------------------------------
47 * - added opt_common.h include
48 * - volatile dropped from thread local variables
49 * - setvbuf call dropped
50 * - flushing stderr dropped
51 * - muxing overhead printed in single line
52 *
53 * 08.2020
54 * --------------------------------------------------------
55 * - OptionDef defines combined
56 *
57 * 06.2020
58 * --------------------------------------------------------
59 * - ignoring signals implemented
60 * - cancel_operation() method signature updated with id
61 * - cancel by execution id implemented
62 * - volatile modifier added to critical variables
63 *
64 * 01.2020
65 * --------------------------------------------------------
66 * - ffprobe support (added ffmpeg_ prefix to methods and variables defined for both ffmpeg and ffprobe)
67 *
68 * 12.2019
69 * --------------------------------------------------------
70 * - concurrent execution support ("__thread" specifier added to variables used by multiple threads,
71 * extern signatures of ffmpeg_opt.c methods called by both ffmpeg and ffprobe added, copied options from
72 * ffmpeg_opt.c and defined them as inline in execute method)
73 *
74 * 08.2018
75 * --------------------------------------------------------
76 * - fftools_ prefix added to file name and parent headers
77 * - forward_report() method, report_callback function pointer and set_report_callback() setter
78 * method added to forward stats
79 * - forward_report() call added from print_report()
80 * - cancel_operation() method added to trigger sigterm_handler
81 * - (!received_sigterm) validation added inside ifilter_send_eof() to complete cancellation
82 *
83 * 07.2018
84 * --------------------------------------------------------
85 * - main() function renamed as execute()
86 * - exit_program() implemented with setjmp
87 * - extern longjmp_value added to access exit code stored in exit_program()
88 * - cleanup() method added
89 */
90
91#include "config.h"
92#include <ctype.h>
93#include <string.h>
94#include <math.h>
95#include <stdlib.h>
96#include <errno.h>
97#include <limits.h>
98#include <stdatomic.h>
99#include <stdint.h>
100
101#include "ffmpegkit_exception.h"
102#include "fftools_opt_common.h"
103
104#if HAVE_IO_H
105#include <io.h>
106#endif
107#if HAVE_UNISTD_H
108#include <unistd.h>
109#endif
110
111#include "libavformat/avformat.h"
112#include "libavdevice/avdevice.h"
113#include "libswresample/swresample.h"
114#include "libavutil/opt.h"
115#include "libavutil/channel_layout.h"
116#include "libavutil/parseutils.h"
117#include "libavutil/samplefmt.h"
118#include "libavutil/fifo.h"
119#include "libavutil/hwcontext.h"
120#include "libavutil/internal.h"
121#include "libavutil/intreadwrite.h"
122#include "libavutil/dict.h"
123#include "libavutil/display.h"
124#include "libavutil/mathematics.h"
125#include "libavutil/pixdesc.h"
126#include "libavutil/avstring.h"
127#include "libavutil/libm.h"
128#include "libavutil/imgutils.h"
129#include "libavutil/timestamp.h"
130#include "libavutil/bprint.h"
131#include "libavutil/time.h"
132#include "libavutil/thread.h"
133#include "libavutil/threadmessage.h"
134#include "libavcodec/mathops.h"
135#include "libavformat/os_support.h"
136
137# include "libavfilter/avfilter.h"
138# include "libavfilter/buffersrc.h"
139# include "libavfilter/buffersink.h"
140
141#if HAVE_SYS_RESOURCE_H
142#include <sys/time.h>
143#include <sys/types.h>
144#include <sys/resource.h>
145#elif HAVE_GETPROCESSTIMES
146#include <windows.h>
147#endif
148#if HAVE_GETPROCESSMEMORYINFO
149#include <windows.h>
150#include <psapi.h>
151#endif
152#if HAVE_SETCONSOLECTRLHANDLER
153#include <windows.h>
154#endif
155
156
157#if HAVE_SYS_SELECT_H
158#include <sys/select.h>
159#endif
160
161#if HAVE_TERMIOS_H
162#include <fcntl.h>
163#include <sys/ioctl.h>
164#include <sys/time.h>
165#include <termios.h>
166#elif HAVE_KBHIT
167#include <conio.h>
168#endif
169
170#include <time.h>
171
172#include "fftools_ffmpeg.h"
173#include "fftools_cmdutils.h"
174#include "fftools_sync_queue.h"
175
176#include "libavutil/avassert.h"
177
178static __thread FILE *vstats_file;
179
180// optionally attached as opaque_ref to decoded AVFrames
181typedef struct FrameData {
182 uint64_t idx;
183 int64_t pts;
184 AVRational tb;
186
187typedef struct BenchmarkTimeStamps {
188 int64_t real_usec;
189 int64_t user_usec;
190 int64_t sys_usec;
192
193static int trigger_fix_sub_duration_heartbeat(OutputStream *ost, const AVPacket *pkt);
195static int64_t getmaxrss(void);
197
198__thread int64_t nb_frames_dup = 0;
199__thread uint64_t dup_warning = 1000;
200__thread int64_t nb_frames_drop = 0;
201__thread int64_t decode_error_stat[2];
202__thread unsigned nb_output_dumped = 0;
203
205__thread AVIOContext *progress_avio = NULL;
206
207__thread InputFile **input_files = NULL;
208__thread int nb_input_files = 0;
209
210__thread OutputFile **output_files = NULL;
211__thread int nb_output_files = 0;
212
214__thread int nb_filtergraphs;
215
216__thread int64_t last_time = -1;
217__thread int64_t keyboard_last_time = 0;
218__thread int first_report = 1;
219__thread int qp_histogram[52];
220
221void (*report_callback)(int, float, float, int64_t, double, double, double) = NULL;
222
223extern int opt_map(void *optctx, const char *opt, const char *arg);
224extern int opt_map_channel(void *optctx, const char *opt, const char *arg);
225extern int opt_recording_timestamp(void *optctx, const char *opt, const char *arg);
226extern int opt_data_frames(void *optctx, const char *opt, const char *arg);
227extern int opt_progress(void *optctx, const char *opt, const char *arg);
228extern int opt_target(void *optctx, const char *opt, const char *arg);
229extern int opt_vsync(void *optctx, const char *opt, const char *arg);
230extern int opt_abort_on(void *optctx, const char *opt, const char *arg);
231extern int opt_stats_period(void *optctx, const char *opt, const char *arg);
232extern int opt_qscale(void *optctx, const char *opt, const char *arg);
233extern int opt_profile(void *optctx, const char *opt, const char *arg);
234extern int opt_filter_complex(void *optctx, const char *opt, const char *arg);
235extern int opt_filter_complex_script(void *optctx, const char *opt, const char *arg);
236extern int opt_attach(void *optctx, const char *opt, const char *arg);
237extern int opt_video_frames(void *optctx, const char *opt, const char *arg);
238extern int opt_video_codec(void *optctx, const char *opt, const char *arg);
239extern int opt_sameq(void *optctx, const char *opt, const char *arg);
240extern int opt_timecode(void *optctx, const char *opt, const char *arg);
241extern int opt_vstats_file(void *optctx, const char *opt, const char *arg);
242extern int opt_vstats(void *optctx, const char *opt, const char *arg);
243extern int opt_video_frames(void *optctx, const char *opt, const char *arg);
244extern int opt_old2new(void *optctx, const char *opt, const char *arg);
245extern int opt_streamid(void *optctx, const char *opt, const char *arg);
246extern int opt_bitrate(void *optctx, const char *opt, const char *arg);
247extern int show_hwaccels(void *optctx, const char *opt, const char *arg);
248extern int opt_video_filters(void *optctx, const char *opt, const char *arg);
249extern int opt_audio_frames(void *optctx, const char *opt, const char *arg);
250extern int opt_audio_qscale(void *optctx, const char *opt, const char *arg);
251extern int opt_audio_codec(void *optctx, const char *opt, const char *arg);
252extern int opt_channel_layout(void *optctx, const char *opt, const char *arg);
253extern int opt_preset(void *optctx, const char *opt, const char *arg);
254extern int opt_audio_filters(void *optctx, const char *opt, const char *arg);
255extern int opt_subtitle_codec(void *optctx, const char *opt, const char *arg);
256extern int opt_video_channel(void *optctx, const char *opt, const char *arg);
257extern int opt_video_standard(void *optctx, const char *opt, const char *arg);
258extern int opt_sdp_file(void *optctx, const char *opt, const char *arg);
259#if CONFIG_VAAPI
260extern int opt_vaapi_device(void *optctx, const char *opt, const char *arg);
261#endif
262#if CONFIG_QSV
263extern int opt_qsv_device(void *optctx, const char *opt, const char *arg);
264#endif
265extern int opt_data_codec(void *optctx, const char *opt, const char *arg);
266extern int opt_init_hw_device(void *optctx, const char *opt, const char *arg);
267extern int opt_filter_hw_device(void *optctx, const char *opt, const char *arg);
268extern int opt_filter_threads(void *optctx, const char *opt, const char *arg);
269extern __thread int file_overwrite;
270extern __thread int no_file_overwrite;
271extern __thread int do_psnr;
272extern __thread int ignore_unknown_streams;
273extern __thread int copy_unknown_streams;
274extern __thread int recast_media;
275
276#if HAVE_TERMIOS_H
277
278/* init terminal so that we can grab keys */
279__thread struct termios oldtty;
280__thread int restore_tty;
281#endif
282
283extern volatile int handleSIGQUIT;
284extern volatile int handleSIGINT;
285extern volatile int handleSIGTERM;
286extern volatile int handleSIGXCPU;
287extern volatile int handleSIGPIPE;
288
289extern __thread long globalSessionId;
290extern void cancelSession(long sessionId);
291extern int cancelRequested(long sessionId);
292
293/* sub2video hack:
294 Convert subtitles to video with alpha to insert them in filter graphs.
295 This is a temporary solution until libavfilter gets real subtitles support.
296 */
297
299{
300 int ret;
301 AVFrame *frame = ist->sub2video.frame;
302
303 av_frame_unref(frame);
304 ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
305 ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
306 ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
307 if ((ret = av_frame_get_buffer(frame, 0)) < 0)
308 return ret;
309 memset(frame->data[0], 0, frame->height * frame->linesize[0]);
310 return 0;
311}
312
313static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
314 AVSubtitleRect *r)
315{
316 uint32_t *pal, *dst2;
317 uint8_t *src, *src2;
318 int x, y;
319
320 if (r->type != SUBTITLE_BITMAP) {
321 av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
322 return;
323 }
324 if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
325 av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
326 r->x, r->y, r->w, r->h, w, h
327 );
328 return;
329 }
330
331 dst += r->y * dst_linesize + r->x * 4;
332 src = r->data[0];
333 pal = (uint32_t *)r->data[1];
334 for (y = 0; y < r->h; y++) {
335 dst2 = (uint32_t *)dst;
336 src2 = src;
337 for (x = 0; x < r->w; x++)
338 *(dst2++) = pal[*(src2++)];
339 dst += dst_linesize;
340 src += r->linesize[0];
341 }
342}
343
344static void sub2video_push_ref(InputStream *ist, int64_t pts)
345{
346 AVFrame *frame = ist->sub2video.frame;
347 int i;
348 int ret;
349
350 av_assert1(frame->data[0]);
351 ist->sub2video.last_pts = frame->pts = pts;
352 for (i = 0; i < ist->nb_filters; i++) {
353 ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
354 AV_BUFFERSRC_FLAG_KEEP_REF |
355 AV_BUFFERSRC_FLAG_PUSH);
356 if (ret != AVERROR_EOF && ret < 0)
357 av_log(NULL, AV_LOG_WARNING, "Error while add the frame to buffer source(%s).\n",
358 av_err2str(ret));
359 }
360}
361
362void sub2video_update(InputStream *ist, int64_t heartbeat_pts, AVSubtitle *sub)
363{
364 AVFrame *frame = ist->sub2video.frame;
365 int8_t *dst;
366 int dst_linesize;
367 int num_rects, i;
368 int64_t pts, end_pts;
369
370 if (!frame)
371 return;
372 if (sub) {
373 pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
374 AV_TIME_BASE_Q, ist->st->time_base);
375 end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
376 AV_TIME_BASE_Q, ist->st->time_base);
377 num_rects = sub->num_rects;
378 } else {
379 /* If we are initializing the system, utilize current heartbeat
380 PTS as the start time, and show until the following subpicture
381 is received. Otherwise, utilize the previous subpicture's end time
382 as the fall-back value. */
383 pts = ist->sub2video.initialize ?
384 heartbeat_pts : ist->sub2video.end_pts;
385 end_pts = INT64_MAX;
386 num_rects = 0;
387 }
388 if (sub2video_get_blank_frame(ist) < 0) {
389 av_log(NULL, AV_LOG_ERROR,
390 "Impossible to get a blank canvas.\n");
391 return;
392 }
393 dst = frame->data [0];
394 dst_linesize = frame->linesize[0];
395 for (i = 0; i < num_rects; i++)
396 sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
397 sub2video_push_ref(ist, pts);
398 ist->sub2video.end_pts = end_pts;
399 ist->sub2video.initialize = 0;
400}
401
402static void sub2video_heartbeat(InputStream *ist, int64_t pts)
403{
404 InputFile *infile = input_files[ist->file_index];
405 int i, j, nb_reqs;
406 int64_t pts2;
407
408 /* When a frame is read from a file, examine all sub2video streams in
409 the same file and send the sub2video frame again. Otherwise, decoded
410 video frames could be accumulating in the filter graph while a filter
411 (possibly overlay) is desperately waiting for a subtitle frame. */
412 for (i = 0; i < infile->nb_streams; i++) {
413 InputStream *ist2 = infile->streams[i];
414 if (!ist2->sub2video.frame)
415 continue;
416 /* subtitles seem to be usually muxed ahead of other streams;
417 if not, subtracting a larger time here is necessary */
418 pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
419 /* do not send the heartbeat frame if the subtitle is already ahead */
420 if (pts2 <= ist2->sub2video.last_pts)
421 continue;
422 if (pts2 >= ist2->sub2video.end_pts || ist2->sub2video.initialize)
423 /* if we have hit the end of the current displayed subpicture,
424 or if we need to initialize the system, update the
425 overlayed subpicture and its start/end times */
426 sub2video_update(ist2, pts2 + 1, NULL);
427 for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
428 nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
429 if (nb_reqs)
430 sub2video_push_ref(ist2, pts2);
431 }
432}
433
435{
436 int i;
437 int ret;
438
439 if (ist->sub2video.end_pts < INT64_MAX)
440 sub2video_update(ist, INT64_MAX, NULL);
441 for (i = 0; i < ist->nb_filters; i++) {
442 ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
443 if (ret != AVERROR_EOF && ret < 0)
444 av_log(NULL, AV_LOG_WARNING, "Flush the frame error.\n");
445 }
446}
447
448/* end of sub2video hack */
449
450static void term_exit_sigsafe(void)
451{
452#if HAVE_TERMIOS_H
453 if(restore_tty)
454 tcsetattr (0, TCSANOW, &oldtty);
455#endif
456}
457
458void term_exit(void)
459{
460 av_log(NULL, AV_LOG_QUIET, "%s", "");
462}
463
464static volatile int received_sigterm = 0;
465static volatile int received_nb_signals = 0;
466__thread atomic_int transcode_init_done = ATOMIC_VAR_INIT(0);
467__thread int ffmpeg_exited = 0;
468__thread int main_ffmpeg_return_code = 0;
469__thread int64_t copy_ts_first_pts = AV_NOPTS_VALUE;
470extern __thread int longjmp_value;
471extern __thread int want_sdp;
472struct EncStatsFile;
473extern __thread struct EncStatsFile *enc_stats_files;
474extern __thread int nb_enc_stats_files;
475
476static void
478{
479 // int ret;
480 received_sigterm = sig;
483 // FFmpegKit - Hard Exit Disabled
484 // if(received_nb_signals > 3) {
485 // ret = write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
486 // strlen("Received > 3 system signals, hard exiting\n"));
487 // if (ret < 0) { /* Do nothing */ };
488 // exit(123);
489 // }
490}
491
492#if HAVE_SETCONSOLECTRLHANDLER
493static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
494{
495 av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
496
497 switch (fdwCtrlType)
498 {
499 case CTRL_C_EVENT:
500 case CTRL_BREAK_EVENT:
501 sigterm_handler(SIGINT);
502 return TRUE;
503
504 case CTRL_CLOSE_EVENT:
505 case CTRL_LOGOFF_EVENT:
506 case CTRL_SHUTDOWN_EVENT:
507 sigterm_handler(SIGTERM);
508 /* Basically, with these 3 events, when we return from this method the
509 process is hard terminated, so stall as long as we need to
510 to try and let the main thread(s) clean up and gracefully terminate
511 (we have at most 5 seconds, but should be done far before that). */
512 while (!ffmpeg_exited) {
513 Sleep(0);
514 }
515 return TRUE;
516
517 default:
518 av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
519 return FALSE;
520 }
521}
522#endif
523
524#ifdef __linux__
525#define SIGNAL(sig, func) \
526 do { \
527 action.sa_handler = func; \
528 sigaction(sig, &action, NULL); \
529 } while (0)
530#else
531#define SIGNAL(sig, func) \
532 signal(sig, func)
533#endif
534
535void term_init(void)
536{
537#if defined __linux__
538 #if defined __aarch64__ || defined __amd64__ || defined __x86_64__
539 struct sigaction action = {0};
540 #else
541 struct sigaction action = {{0}};
542 #endif
543
544 action.sa_handler = sigterm_handler;
545
546 /* block other interrupts while processing this one */
547 sigfillset(&action.sa_mask);
548
549 /* restart interruptible functions (i.e. don't fail with EINTR) */
550 action.sa_flags = SA_RESTART;
551#endif
552
553#if HAVE_TERMIOS_H
554 if (stdin_interaction) {
555 struct termios tty;
556 if (tcgetattr (0, &tty) == 0) {
557 oldtty = tty;
558 restore_tty = 1;
559
560 tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
561 |INLCR|IGNCR|ICRNL|IXON);
562 tty.c_oflag |= OPOST;
563 tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
564 tty.c_cflag &= ~(CSIZE|PARENB);
565 tty.c_cflag |= CS8;
566 tty.c_cc[VMIN] = 1;
567 tty.c_cc[VTIME] = 0;
568
569 tcsetattr (0, TCSANOW, &tty);
570 }
571 if (handleSIGQUIT == 1) {
572 SIGNAL(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
573 }
574 }
575#endif
576
577 if (handleSIGINT == 1) {
578 SIGNAL(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
579 }
580 if (handleSIGTERM == 1) {
581 SIGNAL(SIGTERM, sigterm_handler); /* Termination (ANSI). */
582 }
583#ifdef SIGXCPU
584 if (handleSIGXCPU == 1) {
585 SIGNAL(SIGXCPU, sigterm_handler);
586 }
587#endif
588#ifdef SIGPIPE
589 if (handleSIGPIPE == 1) {
590 SIGNAL(SIGPIPE, SIG_IGN); /* Broken pipe (POSIX). */
591 }
592#endif
593#if HAVE_SETCONSOLECTRLHANDLER
594 SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
595#endif
596}
597
598/* read a key without blocking */
599static int read_key(void)
600{
601 unsigned char ch;
602#if HAVE_TERMIOS_H
603 int n = 1;
604 struct timeval tv;
605 fd_set rfds;
606
607 FD_ZERO(&rfds);
608 FD_SET(0, &rfds);
609 tv.tv_sec = 0;
610 tv.tv_usec = 0;
611 n = select(1, &rfds, NULL, NULL, &tv);
612 if (n > 0) {
613 n = read(0, &ch, 1);
614 if (n == 1)
615 return ch;
616
617 return n;
618 }
619#elif HAVE_KBHIT
620# if HAVE_PEEKNAMEDPIPE
621 static int is_pipe;
622 static HANDLE input_handle;
623 DWORD dw, nchars;
624 if(!input_handle){
625 input_handle = GetStdHandle(STD_INPUT_HANDLE);
626 is_pipe = !GetConsoleMode(input_handle, &dw);
627 }
628
629 if (is_pipe) {
630 /* When running under a GUI, you will end here. */
631 if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
632 // input pipe may have been closed by the program that ran ffmpeg
633 return -1;
634 }
635 //Read it
636 if(nchars != 0) {
637 read(0, &ch, 1);
638 return ch;
639 }else{
640 return -1;
641 }
642 }
643# endif
644 if(kbhit())
645 return(getch());
646#endif
647 return -1;
648}
649
650int decode_interrupt_cb(void *ctx);
651
653{
654 return received_nb_signals > atomic_load(&transcode_init_done);
655}
656
657__thread const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
658
659static void ffmpeg_cleanup(int ret)
660{
661 int i, j;
662
663 if (do_benchmark) {
664 int maxrss = getmaxrss() / 1024;
665 av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
666 }
667
668 for (i = 0; i < nb_filtergraphs; i++) {
669 FilterGraph *fg = filtergraphs[i];
670 avfilter_graph_free(&fg->graph);
671 for (j = 0; j < fg->nb_inputs; j++) {
672 InputFilter *ifilter = fg->inputs[j];
673 struct InputStream *ist = ifilter->ist;
674
675 if (ifilter->frame_queue) {
676 AVFrame *frame;
677 while (av_fifo_read(ifilter->frame_queue, &frame, 1) >= 0)
678 av_frame_free(&frame);
679 av_fifo_freep2(&ifilter->frame_queue);
680 }
681 av_freep(&ifilter->displaymatrix);
682 if (ist->sub2video.sub_queue) {
683 AVSubtitle sub;
684 while (av_fifo_read(ist->sub2video.sub_queue, &sub, 1) >= 0)
685 avsubtitle_free(&sub);
686 av_fifo_freep2(&ist->sub2video.sub_queue);
687 }
688 av_buffer_unref(&ifilter->hw_frames_ctx);
689 av_freep(&ifilter->name);
690 av_freep(&fg->inputs[j]);
691 }
692 av_freep(&fg->inputs);
693 for (j = 0; j < fg->nb_outputs; j++) {
694 OutputFilter *ofilter = fg->outputs[j];
695
696 avfilter_inout_free(&ofilter->out_tmp);
697 av_freep(&ofilter->name);
698 av_channel_layout_uninit(&ofilter->ch_layout);
699 av_freep(&fg->outputs[j]);
700 }
701 av_freep(&fg->outputs);
702 av_freep(&fg->graph_desc);
703
704 av_freep(&filtergraphs[i]);
705 }
706 av_freep(&filtergraphs);
707
708 /* close files */
709 for (i = 0; i < nb_output_files; i++)
711
712 for (i = 0; i < nb_input_files; i++)
714
715 if (vstats_file) {
716 if (fclose(vstats_file))
717 av_log(NULL, AV_LOG_ERROR,
718 "Error closing vstats file, loss of information possible: %s\n",
719 av_err2str(AVERROR(errno)));
720 }
721 av_freep(&vstats_filename);
723
724 av_freep(&filter_nbthreads);
725
726 av_freep(&input_files);
727 av_freep(&output_files);
728
729 uninit_opts();
730
731 avformat_network_deinit();
732
733 if (received_sigterm) {
734 av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
735 (int) received_sigterm);
736 } else if (cancelRequested(globalSessionId)) {
737 av_log(NULL, AV_LOG_INFO, "Exiting normally, received cancel request.\n");
738 } else if (ret && atomic_load(&transcode_init_done)) {
739 av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
740 }
741 term_exit();
742 ffmpeg_exited = 1;
743}
744
745/* iterate over all output streams in all output files;
746 * pass NULL to start iteration */
748{
749 int of_idx = prev ? prev->file_index : 0;
750 int ost_idx = prev ? prev->index + 1 : 0;
751
752 for (; of_idx < nb_output_files; of_idx++) {
753 OutputFile *of = output_files[of_idx];
754 if (ost_idx < of->nb_streams)
755 return of->streams[ost_idx];
756
757 ost_idx = 0;
758 }
759
760 return NULL;
761}
762
764{
765 int if_idx = prev ? prev->file_index : 0;
766 int ist_idx = prev ? prev->st->index + 1 : 0;
767
768 for (; if_idx < nb_input_files; if_idx++) {
769 InputFile *f = input_files[if_idx];
770 if (ist_idx < f->nb_streams)
771 return f->streams[ist_idx];
772
773 ist_idx = 0;
774 }
775
776 return NULL;
777}
778
779void remove_avoptions(AVDictionary **a, AVDictionary *b)
780{
781 const AVDictionaryEntry *t = NULL;
782
783 while ((t = av_dict_iterate(b, t))) {
784 av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
785 }
786}
787
788void assert_avoptions(AVDictionary *m)
789{
790 const AVDictionaryEntry *t;
791 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
792 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
793 exit_program(1);
794 }
795}
796
797static void abort_codec_experimental(const AVCodec *c, int encoder)
798{
799 exit_program(1);
800}
801
802static void update_benchmark(const char *fmt, ...)
803{
804 if (do_benchmark_all) {
806 va_list va;
807 char buf[1024];
808
809 if (fmt) {
810 va_start(va, fmt);
811 vsnprintf(buf, sizeof(buf), fmt, va);
812 va_end(va);
813 av_log(NULL, AV_LOG_INFO,
814 "bench: %8" PRIu64 " user %8" PRIu64 " sys %8" PRIu64 " real %s \n",
818 }
819 current_time = t;
820 }
821}
822
824{
827
828 if (ost->sq_idx_encode >= 0)
829 sq_send(of->sq_encode, ost->sq_idx_encode, SQFRAME(NULL));
830}
831
832static int check_recording_time(OutputStream *ost, int64_t ts, AVRational tb)
833{
835
836 if (of->recording_time != INT64_MAX &&
837 av_compare_ts(ts, tb, of->recording_time, AV_TIME_BASE_Q) >= 0) {
839 return 0;
840 }
841 return 1;
842}
843
845 AVFrame *frame)
846{
847 double float_pts = AV_NOPTS_VALUE; // this is identical to frame.pts but with higher precision
848 const int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ?
849 0 : of->start_time;
850
851 AVCodecContext *const enc = ost->enc_ctx;
852
853 AVRational tb = enc->time_base;
854 AVRational filter_tb = frame->time_base;
855 const int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
856
857 if (frame->pts == AV_NOPTS_VALUE)
858 goto early_exit;
859
860 tb.den <<= extra_bits;
861 float_pts = av_rescale_q(frame->pts, filter_tb, tb) -
862 av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
863 float_pts /= 1 << extra_bits;
864 // avoid exact midoints to reduce the chance of rounding differences, this
865 // can be removed in case the fps code is changed to work with integers
866 float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
867
868 frame->pts = av_rescale_q(frame->pts, filter_tb, enc->time_base) -
869 av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
870 frame->time_base = enc->time_base;
871
872early_exit:
873
874 if (debug_ts) {
875 av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
876 frame ? av_ts2str(frame->pts) : "NULL",
877 (enc && frame) ? av_ts2timestr(frame->pts, &enc->time_base) : "NULL",
878 float_pts,
879 enc ? enc->time_base.num : -1,
880 enc ? enc->time_base.den : -1);
881 }
882
883 return float_pts;
884}
885
886static int init_output_stream(OutputStream *ost, AVFrame *frame,
887 char *error, int error_len);
888
889static int init_output_stream_wrapper(OutputStream *ost, AVFrame *frame,
890 unsigned int fatal)
891{
892 int ret = AVERROR_BUG;
893 char error[1024] = {0};
894
895 if (ost->initialized)
896 return 0;
897
898 ret = init_output_stream(ost, frame, error, sizeof(error));
899 if (ret < 0) {
900 av_log(ost, AV_LOG_ERROR, "Error initializing output stream: %s\n",
901 error);
902
903 if (fatal)
904 exit_program(1);
905 }
906
907 return ret;
908}
909
910static double psnr(double d)
911{
912 return -10.0 * log10(d);
913}
914
915static void update_video_stats(OutputStream *ost, const AVPacket *pkt, int write_vstats)
916{
917 const uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
918 NULL);
919 AVCodecContext *enc = ost->enc_ctx;
920 int64_t frame_number;
921 double ti1, bitrate, avg_bitrate;
922
923 ost->quality = sd ? AV_RL32(sd) : -1;
924 ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
925
926 for (int i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
927 if (sd && i < sd[5])
928 ost->error[i] = AV_RL64(sd + 8 + 8*i);
929 else
930 ost->error[i] = -1;
931 }
932
933 if (!write_vstats)
934 return;
935
936 /* this is executed just the first time update_video_stats is called */
937 if (!vstats_file) {
938 vstats_file = fopen(vstats_filename, "w");
939 if (!vstats_file) {
940 perror("fopen");
941 exit_program(1);
942 }
943 }
944
945 frame_number = ost->packets_encoded;
946 if (vstats_version <= 1) {
947 fprintf(vstats_file, "frame= %5"PRId64" q= %2.1f ", frame_number,
948 ost->quality / (float)FF_QP2LAMBDA);
949 } else {
950 fprintf(vstats_file, "out= %2d st= %2d frame= %5"PRId64" q= %2.1f ", ost->file_index, ost->index, frame_number,
951 ost->quality / (float)FF_QP2LAMBDA);
952 }
953
954 if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
955 fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
956
957 fprintf(vstats_file,"f_size= %6d ", pkt->size);
958 /* compute pts value */
959 ti1 = pkt->dts * av_q2d(pkt->time_base);
960 if (ti1 < 0.01)
961 ti1 = 0.01;
962
963 bitrate = (pkt->size * 8) / av_q2d(enc->time_base) / 1000.0;
964 avg_bitrate = (double)(ost->data_size_enc * 8) / ti1 / 1000.0;
965 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
966 (double)ost->data_size_enc / 1024, ti1, bitrate, avg_bitrate);
967 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
968}
969
971 const AVFrame *frame, const AVPacket *pkt,
972 uint64_t frame_num)
973{
974 AVIOContext *io = es->io;
975 AVRational tb = frame ? frame->time_base : pkt->time_base;
976 int64_t pts = frame ? frame->pts : pkt->pts;
977
978 AVRational tbi = (AVRational){ 0, 1};
979 int64_t ptsi = INT64_MAX;
980
981 const FrameData *fd;
982
983 if ((frame && frame->opaque_ref) || (pkt && pkt->opaque_ref)) {
984 fd = (const FrameData*)(frame ? frame->opaque_ref->data : pkt->opaque_ref->data);
985 tbi = fd->tb;
986 ptsi = fd->pts;
987 }
988
989 for (size_t i = 0; i < es->nb_components; i++) {
990 const EncStatsComponent *c = &es->components[i];
991
992 switch (c->type) {
993 case ENC_STATS_LITERAL: avio_write (io, c->str, c->str_len); continue;
994 case ENC_STATS_FILE_IDX: avio_printf(io, "%d", ost->file_index); continue;
995 case ENC_STATS_STREAM_IDX: avio_printf(io, "%d", ost->index); continue;
996 case ENC_STATS_TIMEBASE: avio_printf(io, "%d/%d", tb.num, tb.den); continue;
997 case ENC_STATS_TIMEBASE_IN: avio_printf(io, "%d/%d", tbi.num, tbi.den); continue;
998 case ENC_STATS_PTS: avio_printf(io, "%"PRId64, pts); continue;
999 case ENC_STATS_PTS_IN: avio_printf(io, "%"PRId64, ptsi); continue;
1000 case ENC_STATS_PTS_TIME: avio_printf(io, "%g", pts * av_q2d(tb)); continue;
1001 case ENC_STATS_PTS_TIME_IN: avio_printf(io, "%g", ptsi == INT64_MAX ?
1002 INFINITY : ptsi * av_q2d(tbi)); continue;
1003 case ENC_STATS_FRAME_NUM: avio_printf(io, "%"PRIu64, frame_num); continue;
1004 case ENC_STATS_FRAME_NUM_IN: avio_printf(io, "%"PRIu64, fd ? fd->idx : -1); continue;
1005 }
1006
1007 if (frame) {
1008 switch (c->type) {
1009 case ENC_STATS_SAMPLE_NUM: avio_printf(io, "%"PRIu64, ost->samples_encoded); continue;
1010 case ENC_STATS_NB_SAMPLES: avio_printf(io, "%d", frame->nb_samples); continue;
1011 default: av_assert0(0);
1012 }
1013 } else {
1014 switch (c->type) {
1015 case ENC_STATS_DTS: avio_printf(io, "%"PRId64, pkt->dts); continue;
1016 case ENC_STATS_DTS_TIME: avio_printf(io, "%g", pkt->dts * av_q2d(tb)); continue;
1017 case ENC_STATS_PKT_SIZE: avio_printf(io, "%d", pkt->size); continue;
1018 case ENC_STATS_BITRATE: {
1019 double duration = FFMAX(pkt->duration, 1) * av_q2d(tb);
1020 avio_printf(io, "%g", 8.0 * pkt->size / duration);
1021 continue;
1022 }
1023 case ENC_STATS_AVG_BITRATE: {
1024 double duration = pkt->dts * av_q2d(tb);
1025 avio_printf(io, "%g", duration > 0 ? 8.0 * ost->data_size_enc / duration : -1.);
1026 continue;
1027 }
1028 default: av_assert0(0);
1029 }
1030 }
1031 }
1032 avio_w8(io, '\n');
1033 avio_flush(io);
1034}
1035
1036static int encode_frame(OutputFile *of, OutputStream *ost, AVFrame *frame)
1037{
1038 AVCodecContext *enc = ost->enc_ctx;
1039 AVPacket *pkt = ost->pkt;
1040 const char *type_desc = av_get_media_type_string(enc->codec_type);
1041 const char *action = frame ? "encode" : "flush";
1042 int ret;
1043
1044 if (frame) {
1045 if (ost->enc_stats_pre.io)
1046 enc_stats_write(ost, &ost->enc_stats_pre, frame, NULL,
1047 ost->frames_encoded);
1048
1049 ost->frames_encoded++;
1050 ost->samples_encoded += frame->nb_samples;
1051
1052 if (debug_ts) {
1053 av_log(ost, AV_LOG_INFO, "encoder <- type:%s "
1054 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1055 type_desc,
1056 av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
1057 enc->time_base.num, enc->time_base.den);
1058 }
1059 }
1060
1061 update_benchmark(NULL);
1062
1063 ret = avcodec_send_frame(enc, frame);
1064 if (ret < 0 && !(ret == AVERROR_EOF && !frame)) {
1065 av_log(ost, AV_LOG_ERROR, "Error submitting %s frame to the encoder\n",
1066 type_desc);
1067 return ret;
1068 }
1069
1070 while (1) {
1071 ret = avcodec_receive_packet(enc, pkt);
1072 update_benchmark("%s_%s %d.%d", action, type_desc,
1073 ost->file_index, ost->index);
1074
1075 pkt->time_base = enc->time_base;
1076
1077 /* if two pass, output log on success and EOF */
1078 if ((ret >= 0 || ret == AVERROR_EOF) && ost->logfile && enc->stats_out)
1079 fprintf(ost->logfile, "%s", enc->stats_out);
1080
1081 if (ret == AVERROR(EAGAIN)) {
1082 av_assert0(frame); // should never happen during flushing
1083 return 0;
1084 } else if (ret == AVERROR_EOF) {
1085 of_output_packet(of, pkt, ost, 1);
1086 return ret;
1087 } else if (ret < 0) {
1088 av_log(ost, AV_LOG_ERROR, "%s encoding failed\n", type_desc);
1089 return ret;
1090 }
1091
1092 if (enc->codec_type == AVMEDIA_TYPE_VIDEO)
1094 if (ost->enc_stats_post.io)
1095 enc_stats_write(ost, &ost->enc_stats_post, NULL, pkt,
1096 ost->packets_encoded);
1097
1098 if (debug_ts) {
1099 av_log(ost, AV_LOG_INFO, "encoder -> type:%s "
1100 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s "
1101 "duration:%s duration_time:%s\n",
1102 type_desc,
1103 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &enc->time_base),
1104 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &enc->time_base),
1105 av_ts2str(pkt->duration), av_ts2timestr(pkt->duration, &enc->time_base));
1106 }
1107
1108 av_packet_rescale_ts(pkt, pkt->time_base, ost->mux_timebase);
1109 pkt->time_base = ost->mux_timebase;
1110
1111 if (debug_ts) {
1112 av_log(ost, AV_LOG_INFO, "encoder -> type:%s "
1113 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s "
1114 "duration:%s duration_time:%s\n",
1115 type_desc,
1116 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &enc->time_base),
1117 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &enc->time_base),
1118 av_ts2str(pkt->duration), av_ts2timestr(pkt->duration, &enc->time_base));
1119 }
1120
1121 if ((ret = trigger_fix_sub_duration_heartbeat(ost, pkt)) < 0) {
1122 av_log(NULL, AV_LOG_ERROR,
1123 "Subtitle heartbeat logic failed in %s! (%s)\n",
1124 __func__, av_err2str(ret));
1125 exit_program(1);
1126 }
1127
1128 ost->data_size_enc += pkt->size;
1129
1130 ost->packets_encoded++;
1131
1132 of_output_packet(of, pkt, ost, 0);
1133 }
1134
1135 av_assert0(0);
1136}
1137
1139 AVFrame *frame)
1140{
1141 int ret;
1142
1143 if (ost->sq_idx_encode < 0)
1144 return encode_frame(of, ost, frame);
1145
1146 if (frame) {
1147 ret = av_frame_ref(ost->sq_frame, frame);
1148 if (ret < 0)
1149 return ret;
1150 frame = ost->sq_frame;
1151 }
1152
1153 ret = sq_send(of->sq_encode, ost->sq_idx_encode,
1154 SQFRAME(frame));
1155 if (ret < 0) {
1156 if (frame)
1157 av_frame_unref(frame);
1158 if (ret != AVERROR_EOF)
1159 return ret;
1160 }
1161
1162 while (1) {
1163 AVFrame *enc_frame = ost->sq_frame;
1164
1166 SQFRAME(enc_frame));
1167 if (ret == AVERROR_EOF) {
1168 enc_frame = NULL;
1169 } else if (ret < 0) {
1170 return (ret == AVERROR(EAGAIN)) ? 0 : ret;
1171 }
1172
1173 ret = encode_frame(of, ost, enc_frame);
1174 if (enc_frame)
1175 av_frame_unref(enc_frame);
1176 if (ret < 0) {
1177 if (ret == AVERROR_EOF)
1179 return ret;
1180 }
1181 }
1182}
1183
1185 AVFrame *frame)
1186{
1187 AVCodecContext *enc = ost->enc_ctx;
1188 int ret;
1189
1190 if (frame->pts == AV_NOPTS_VALUE)
1191 frame->pts = ost->next_pts;
1192 else {
1193 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1194 frame->pts =
1195 av_rescale_q(frame->pts, frame->time_base, enc->time_base) -
1196 av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1197 }
1198 frame->time_base = enc->time_base;
1199
1200 if (!check_recording_time(ost, frame->pts, frame->time_base))
1201 return;
1202
1203 ost->next_pts = frame->pts + frame->nb_samples;
1204
1205 ret = submit_encode_frame(of, ost, frame);
1206 if (ret < 0 && ret != AVERROR_EOF)
1207 exit_program(1);
1208}
1209
1211 OutputStream *ost,
1212 AVSubtitle *sub)
1213{
1214 int subtitle_out_max_size = 1024 * 1024;
1215 int subtitle_out_size, nb, i, ret;
1216 AVCodecContext *enc;
1217 AVPacket *pkt = ost->pkt;
1218 int64_t pts;
1219
1220 if (sub->pts == AV_NOPTS_VALUE) {
1221 av_log(ost, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
1222 if (exit_on_error)
1223 exit_program(1);
1224 return;
1225 }
1226
1227 enc = ost->enc_ctx;
1228
1229 /* Note: DVB subtitle need one packet to draw them and one other
1230 packet to clear them */
1231 /* XXX: signal it in the codec context ? */
1232 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
1233 nb = 2;
1234 else
1235 nb = 1;
1236
1237 /* shift timestamp to honor -ss and make check_recording_time() work with -t */
1238 pts = sub->pts;
1239 if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
1241 for (i = 0; i < nb; i++) {
1242 unsigned save_num_rects = sub->num_rects;
1243
1244 if (!check_recording_time(ost, pts, AV_TIME_BASE_Q))
1245 return;
1246
1247 ret = av_new_packet(pkt, subtitle_out_max_size);
1248 if (ret < 0)
1249 report_and_exit(AVERROR(ENOMEM));
1250
1251 sub->pts = pts;
1252 // start_display_time is required to be 0
1253 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
1254 sub->end_display_time -= sub->start_display_time;
1255 sub->start_display_time = 0;
1256 if (i == 1)
1257 sub->num_rects = 0;
1258
1259 ost->frames_encoded++;
1260
1261 subtitle_out_size = avcodec_encode_subtitle(enc, pkt->data, pkt->size, sub);
1262 if (i == 1)
1263 sub->num_rects = save_num_rects;
1264 if (subtitle_out_size < 0) {
1265 av_log(ost, AV_LOG_FATAL, "Subtitle encoding failed\n");
1266 exit_program(1);
1267 }
1268
1269 av_shrink_packet(pkt, subtitle_out_size);
1270 pkt->time_base = ost->mux_timebase;
1271 pkt->pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, pkt->time_base);
1272 pkt->duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, pkt->time_base);
1273 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
1274 /* XXX: the pts correction is handled here. Maybe handling
1275 it in the codec would be better */
1276 if (i == 0)
1277 pkt->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, pkt->time_base);
1278 else
1279 pkt->pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, pkt->time_base);
1280 }
1281 pkt->dts = pkt->pts;
1282
1283 of_output_packet(of, pkt, ost, 0);
1284 }
1285}
1286
1287/* Convert frame timestamps to the encoder timebase and decide how many times
1288 * should this (and possibly previous) frame be repeated in order to conform to
1289 * desired target framerate (if any).
1290 */
1292 AVFrame *next_picture, double duration,
1293 int64_t *nb_frames, int64_t *nb_frames_prev)
1294{
1295 double delta0, delta;
1296
1297 double sync_ipts = adjust_frame_pts_to_encoder_tb(of, ost, next_picture);
1298 /* delta0 is the "drift" between the input frame (next_picture) and
1299 * where it would fall in the output. */
1300 delta0 = sync_ipts - ost->next_pts;
1301 delta = delta0 + duration;
1302
1303 // tracks the number of times the PREVIOUS frame should be duplicated,
1304 // mostly for variable framerate (VFR)
1305 *nb_frames_prev = 0;
1306 /* by default, we output a single frame */
1307 *nb_frames = 1;
1308
1309 if (delta0 < 0 &&
1310 delta > 0 &&
1312 ost->vsync_method != VSYNC_DROP) {
1313 if (delta0 < -0.6) {
1314 av_log(ost, AV_LOG_VERBOSE, "Past duration %f too large\n", -delta0);
1315 } else
1316 av_log(ost, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1317 sync_ipts = ost->next_pts;
1318 duration += delta0;
1319 delta0 = 0;
1320 }
1321
1322 switch (ost->vsync_method) {
1323 case VSYNC_VSCFR:
1324 if (ost->vsync_frame_number == 0 && delta0 >= 0.5) {
1325 av_log(ost, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1326 delta = duration;
1327 delta0 = 0;
1328 ost->next_pts = llrint(sync_ipts);
1329 }
1330 case VSYNC_CFR:
1331 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1332 if (frame_drop_threshold && delta < frame_drop_threshold && ost->vsync_frame_number) {
1333 *nb_frames = 0;
1334 } else if (delta < -1.1)
1335 *nb_frames = 0;
1336 else if (delta > 1.1) {
1337 *nb_frames = llrintf(delta);
1338 if (delta0 > 1.1)
1339 *nb_frames_prev = llrintf(delta0 - 0.6);
1340 }
1341 next_picture->duration = 1;
1342 break;
1343 case VSYNC_VFR:
1344 if (delta <= -0.6)
1345 *nb_frames = 0;
1346 else if (delta > 0.6)
1347 ost->next_pts = llrint(sync_ipts);
1348 next_picture->duration = duration;
1349 break;
1350 case VSYNC_DROP:
1351 case VSYNC_PASSTHROUGH:
1352 next_picture->duration = duration;
1353 ost->next_pts = llrint(sync_ipts);
1354 break;
1355 default:
1356 av_assert0(0);
1357 }
1358}
1359
1360enum AVPictureType forced_kf_apply(void *logctx, KeyframeForceCtx *kf,
1361 AVRational tb, const AVFrame *in_picture,
1362 int dup_idx)
1363{
1364 double pts_time;
1365
1366 if (kf->ref_pts == AV_NOPTS_VALUE)
1367 kf->ref_pts = in_picture->pts;
1368
1369 pts_time = (in_picture->pts - kf->ref_pts) * av_q2d(tb);
1370 if (kf->index < kf->nb_pts &&
1371 av_compare_ts(in_picture->pts, tb, kf->pts[kf->index], AV_TIME_BASE_Q) >= 0) {
1372 kf->index++;
1373 goto force_keyframe;
1374 } else if (kf->pexpr) {
1375 double res;
1376 kf->expr_const_values[FKF_T] = pts_time;
1377 res = av_expr_eval(kf->pexpr,
1378 kf->expr_const_values, NULL);
1379 ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1385 res);
1386
1387 kf->expr_const_values[FKF_N] += 1;
1388
1389 if (res) {
1393 goto force_keyframe;
1394 }
1395 } else if (kf->type == KF_FORCE_SOURCE &&
1396 in_picture->key_frame == 1 && !dup_idx) {
1397 goto force_keyframe;
1398 } else if (kf->type == KF_FORCE_SOURCE_NO_DROP && !dup_idx) {
1399 kf->dropped_keyframe = 0;
1400 if ((in_picture->key_frame == 1) || kf->dropped_keyframe)
1401 goto force_keyframe;
1402 }
1403
1404 return AV_PICTURE_TYPE_NONE;
1405
1406force_keyframe:
1407 av_log(logctx, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1408 return AV_PICTURE_TYPE_I;
1409}
1410
1411/* May modify/reset next_picture */
1412static void do_video_out(OutputFile *of,
1413 OutputStream *ost,
1414 AVFrame *next_picture)
1415{
1416 int ret;
1417 AVCodecContext *enc = ost->enc_ctx;
1418 AVRational frame_rate;
1419 int64_t nb_frames, nb_frames_prev, i;
1420 double duration = 0;
1421 InputStream *ist = ost->ist;
1422 AVFilterContext *filter = ost->filter->filter;
1423
1424 init_output_stream_wrapper(ost, next_picture, 1);
1425
1426 frame_rate = av_buffersink_get_frame_rate(filter);
1427 if (frame_rate.num > 0 && frame_rate.den > 0)
1428 duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1429
1430 if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1431 duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1432
1433 if (!ost->filters_script &&
1434 !ost->filters &&
1435 (nb_filtergraphs == 0 || !filtergraphs[0]->graph_desc) &&
1436 next_picture &&
1437 ist &&
1438 lrintf(next_picture->duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1439 duration = lrintf(next_picture->duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1440 }
1441
1442 if (!next_picture) {
1443 //end, flushing
1444 nb_frames_prev = nb_frames = mid_pred(ost->last_nb0_frames[0],
1445 ost->last_nb0_frames[1],
1446 ost->last_nb0_frames[2]);
1447 } else {
1448 video_sync_process(of, ost, next_picture, duration,
1449 &nb_frames, &nb_frames_prev);
1450 }
1451
1452 memmove(ost->last_nb0_frames + 1,
1453 ost->last_nb0_frames,
1454 sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1455 ost->last_nb0_frames[0] = nb_frames_prev;
1456
1457 if (nb_frames_prev == 0 && ost->last_dropped) {
1459 av_log(ost, AV_LOG_VERBOSE,
1460 "*** dropping frame %"PRId64" at ts %"PRId64"\n",
1461 ost->vsync_frame_number, ost->last_frame->pts);
1462 }
1463 if (nb_frames > (nb_frames_prev && ost->last_dropped) + (nb_frames > nb_frames_prev)) {
1464 if (nb_frames > dts_error_threshold * 30) {
1465 av_log(ost, AV_LOG_ERROR, "%"PRId64" frame duplication too large, skipping\n", nb_frames - 1);
1467 return;
1468 }
1469 nb_frames_dup += nb_frames - (nb_frames_prev && ost->last_dropped) - (nb_frames > nb_frames_prev);
1470 av_log(ost, AV_LOG_VERBOSE, "*** %"PRId64" dup!\n", nb_frames - 1);
1471 if (nb_frames_dup > dup_warning) {
1472 av_log(ost, AV_LOG_WARNING, "More than %"PRIu64" frames duplicated\n", dup_warning);
1473 dup_warning *= 10;
1474 }
1475 }
1476 ost->last_dropped = nb_frames == nb_frames_prev && next_picture;
1477 ost->kf.dropped_keyframe = ost->last_dropped && next_picture && next_picture->key_frame;
1478
1479 /* duplicates frame if needed */
1480 for (i = 0; i < nb_frames; i++) {
1481 AVFrame *in_picture;
1482
1483 if (i < nb_frames_prev && ost->last_frame->buf[0]) {
1484 in_picture = ost->last_frame;
1485 } else
1486 in_picture = next_picture;
1487
1488 if (!in_picture)
1489 return;
1490
1491 in_picture->pts = ost->next_pts;
1492
1493 if (!check_recording_time(ost, in_picture->pts, ost->enc_ctx->time_base))
1494 return;
1495
1496 in_picture->quality = enc->global_quality;
1497 in_picture->pict_type = forced_kf_apply(ost, &ost->kf, enc->time_base, in_picture, i);
1498
1499 ret = submit_encode_frame(of, ost, in_picture);
1500 if (ret == AVERROR_EOF)
1501 break;
1502 else if (ret < 0)
1503 exit_program(1);
1504
1505 ost->next_pts++;
1506 ost->vsync_frame_number++;
1507 }
1508
1509 av_frame_unref(ost->last_frame);
1510 if (next_picture)
1511 av_frame_move_ref(ost->last_frame, next_picture);
1512}
1513
1520static int reap_filters(int flush)
1521{
1522 AVFrame *filtered_frame = NULL;
1523
1524 /* Reap all buffers present in the buffer sinks */
1525 for (OutputStream *ost = ost_iter(NULL); ost; ost = ost_iter(ost)) {
1526 OutputFile *of = output_files[ost->file_index];
1527 AVFilterContext *filter;
1528 AVCodecContext *enc = ost->enc_ctx;
1529 int ret = 0;
1530
1531 if (!ost->filter || !ost->filter->graph->graph)
1532 continue;
1533 filter = ost->filter->filter;
1534
1535 /*
1536 * Unlike video, with audio the audio frame size matters.
1537 * Currently we are fully reliant on the lavfi filter chain to
1538 * do the buffering deed for us, and thus the frame size parameter
1539 * needs to be set accordingly. Where does one get the required
1540 * frame size? From the initialized AVCodecContext of an audio
1541 * encoder. Thus, if we have gotten to an audio stream, initialize
1542 * the encoder earlier than receiving the first AVFrame.
1543 */
1544 if (av_buffersink_get_type(filter) == AVMEDIA_TYPE_AUDIO)
1545 init_output_stream_wrapper(ost, NULL, 1);
1546
1547 filtered_frame = ost->filtered_frame;
1548
1549 while (1) {
1550 ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1551 AV_BUFFERSINK_FLAG_NO_REQUEST);
1552 if (ret < 0) {
1553 if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1554 av_log(NULL, AV_LOG_WARNING,
1555 "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1556 } else if (flush && ret == AVERROR_EOF) {
1557 if (av_buffersink_get_type(filter) == AVMEDIA_TYPE_VIDEO)
1558 do_video_out(of, ost, NULL);
1559 }
1560 break;
1561 }
1562 if (ost->finished) {
1563 av_frame_unref(filtered_frame);
1564 continue;
1565 }
1566
1567 if (filtered_frame->pts != AV_NOPTS_VALUE) {
1568 AVRational tb = av_buffersink_get_time_base(filter);
1569 ost->last_filter_pts = av_rescale_q(filtered_frame->pts, tb,
1570 AV_TIME_BASE_Q);
1571 filtered_frame->time_base = tb;
1572
1573 if (debug_ts)
1574 av_log(NULL, AV_LOG_INFO, "filter_raw -> pts:%s pts_time:%s time_base:%d/%d\n",
1575 av_ts2str(filtered_frame->pts),
1576 av_ts2timestr(filtered_frame->pts, &tb),
1577 tb.num, tb.den);
1578 }
1579
1580 switch (av_buffersink_get_type(filter)) {
1581 case AVMEDIA_TYPE_VIDEO:
1582 if (!ost->frame_aspect_ratio.num)
1583 enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1584
1585 do_video_out(of, ost, filtered_frame);
1586 break;
1587 case AVMEDIA_TYPE_AUDIO:
1588 if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1589 enc->ch_layout.nb_channels != filtered_frame->ch_layout.nb_channels) {
1590 av_log(NULL, AV_LOG_ERROR,
1591 "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1592 break;
1593 }
1594 do_audio_out(of, ost, filtered_frame);
1595 break;
1596 default:
1597 // TODO support subtitle filters
1598 av_assert0(0);
1599 }
1600
1601 av_frame_unref(filtered_frame);
1602 }
1603 }
1604
1605 return 0;
1606}
1607
1608static void print_final_stats(int64_t total_size)
1609{
1610 uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1611 uint64_t subtitle_size = 0;
1612 uint64_t data_size = 0;
1613 float percent = -1.0;
1614 int i, j;
1615 int pass1_used = 1;
1616
1617 for (OutputStream *ost = ost_iter(NULL); ost; ost = ost_iter(ost)) {
1618 AVCodecParameters *par = ost->st->codecpar;
1619 const uint64_t s = ost->data_size_mux;
1620
1621 switch (par->codec_type) {
1622 case AVMEDIA_TYPE_VIDEO: video_size += s; break;
1623 case AVMEDIA_TYPE_AUDIO: audio_size += s; break;
1624 case AVMEDIA_TYPE_SUBTITLE: subtitle_size += s; break;
1625 default: other_size += s; break;
1626 }
1627 extra_size += par->extradata_size;
1628 data_size += s;
1629 if (ost->enc_ctx &&
1630 (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | AV_CODEC_FLAG_PASS2))
1631 != AV_CODEC_FLAG_PASS1)
1632 pass1_used = 0;
1633 }
1634
1635 if (data_size && total_size>0 && total_size >= data_size)
1636 percent = 100.0 * (total_size - data_size) / data_size;
1637
1638 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1639 video_size / 1024.0,
1640 audio_size / 1024.0,
1641 subtitle_size / 1024.0,
1642 other_size / 1024.0,
1643 extra_size / 1024.0);
1644 if (percent >= 0.0)
1645 av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1646 else
1647 av_log(NULL, AV_LOG_INFO, "unknown");
1648 av_log(NULL, AV_LOG_INFO, "\n");
1649
1650 /* print verbose per-stream stats */
1651 for (i = 0; i < nb_input_files; i++) {
1652 InputFile *f = input_files[i];
1653 uint64_t total_packets = 0, total_size = 0;
1654
1655 av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1656 i, f->ctx->url);
1657
1658 for (j = 0; j < f->nb_streams; j++) {
1659 InputStream *ist = f->streams[j];
1660 enum AVMediaType type = ist->par->codec_type;
1661
1662 total_size += ist->data_size;
1663 total_packets += ist->nb_packets;
1664
1665 av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1666 i, j, av_get_media_type_string(type));
1667 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1668 ist->nb_packets, ist->data_size);
1669
1670 if (ist->decoding_needed) {
1671 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1672 ist->frames_decoded);
1673 if (type == AVMEDIA_TYPE_AUDIO)
1674 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1675 av_log(NULL, AV_LOG_VERBOSE, "; ");
1676 }
1677
1678 av_log(NULL, AV_LOG_VERBOSE, "\n");
1679 }
1680
1681 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1682 total_packets, total_size);
1683 }
1684
1685 for (i = 0; i < nb_output_files; i++) {
1686 OutputFile *of = output_files[i];
1687 uint64_t total_packets = 0, total_size = 0;
1688
1689 av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1690 i, of->url);
1691
1692 for (j = 0; j < of->nb_streams; j++) {
1693 OutputStream *ost = of->streams[j];
1694 enum AVMediaType type = ost->st->codecpar->codec_type;
1695
1696 total_size += ost->data_size_mux;
1697 total_packets += atomic_load(&ost->packets_written);
1698
1699 av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1700 i, j, av_get_media_type_string(type));
1701 if (ost->enc_ctx) {
1702 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1703 ost->frames_encoded);
1704 if (type == AVMEDIA_TYPE_AUDIO)
1705 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1706 av_log(NULL, AV_LOG_VERBOSE, "; ");
1707 }
1708
1709 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1710 atomic_load(&ost->packets_written), ost->data_size_mux);
1711
1712 av_log(NULL, AV_LOG_VERBOSE, "\n");
1713 }
1714
1715 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1716 total_packets, total_size);
1717 }
1718 if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1719 av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1720 if (pass1_used) {
1721 av_log(NULL, AV_LOG_WARNING, "\n");
1722 } else {
1723 av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1724 }
1725 }
1726}
1727
1728static void forward_report(uint64_t frame_number, float fps, float quality, int64_t total_size, int seconds, int microseconds, double bitrate, double speed)
1729{
1730 // FORWARD DATA
1731 if (report_callback != NULL) {
1732 report_callback(frame_number, fps, quality, total_size, ((double)seconds*1000) + ((double)microseconds)/1000, bitrate, speed);
1733 }
1734}
1735
1736static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1737{
1738 AVBPrint buf, buf_script;
1739 int64_t total_size = of_filesize(output_files[0]);
1740 int vid;
1741 double bitrate;
1742 double speed;
1743 int64_t pts = AV_NOPTS_VALUE;
1744 int mins, secs, us;
1745 int64_t hours;
1746 const char *hours_sign;
1747 int ret;
1748 float t;
1749
1750 // FFmpegKit field declarations
1751 int local_print_stats = 1;
1752 uint64_t frame_number = 0;
1753 float fps = 0;
1754 float q = 0;
1755
1756 if (!print_stats && !is_last_report && !progress_avio)
1757 local_print_stats = 0;
1758
1759 if (!is_last_report) {
1760 if (last_time == -1) {
1761 last_time = cur_time;
1762 }
1763 if (((cur_time - last_time) < stats_period && !first_report) ||
1765 return;
1766 last_time = cur_time;
1767 }
1768
1769 t = (cur_time-timer_start) / 1000000.0;
1770
1771 vid = 0;
1772 if (local_print_stats) {
1773 av_bprint_init(&buf, 0, AV_BPRINT_SIZE_AUTOMATIC);
1774 av_bprint_init(&buf_script, 0, AV_BPRINT_SIZE_AUTOMATIC);
1775 }
1776 for (OutputStream *ost = ost_iter(NULL); ost; ost = ost_iter(ost)) {
1777 const AVCodecContext * const enc = ost->enc_ctx;
1778 q = enc ? ost->quality / (float) FF_QP2LAMBDA : -1;
1779
1780 if (local_print_stats && vid && ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
1781 av_bprintf(&buf, "q=%2.1f ", q);
1782 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1783 ost->file_index, ost->index, q);
1784 }
1785 if (!vid && ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
1786 frame_number = atomic_load(&ost->packets_written);
1787 fps = t > 1 ? frame_number / t : 0;
1788 if (local_print_stats) {
1789 av_bprintf(&buf, "frame=%5"PRId64" fps=%3.*f q=%3.1f ",
1790 frame_number, fps < 9.95, fps, q);
1791 av_bprintf(&buf_script, "frame=%"PRId64"\n", frame_number);
1792 av_bprintf(&buf_script, "fps=%.2f\n", fps);
1793 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1794 ost->file_index, ost->index, q);
1795 }
1796 if (local_print_stats && is_last_report)
1797 av_bprintf(&buf, "L");
1798 if (qp_hist) {
1799 int j;
1800 int qp = lrintf(q);
1801 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1802 qp_histogram[qp]++;
1803 if (local_print_stats) {
1804 for (j = 0; j < 32; j++)
1805 av_bprintf(&buf, "%X", av_log2(qp_histogram[j] + 1));
1806 }
1807 }
1808
1809 if (local_print_stats && enc && (enc->flags & AV_CODEC_FLAG_PSNR) &&
1810 (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1811 int j;
1812 double error, error_sum = 0;
1813 double scale, scale_sum = 0;
1814 double p;
1815 char type[3] = { 'Y','U','V' };
1816 av_bprintf(&buf, "PSNR=");
1817 for (j = 0; j < 3; j++) {
1818 if (is_last_report) {
1819 error = enc->error[j];
1820 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1821 } else {
1822 error = ost->error[j];
1823 scale = enc->width * enc->height * 255.0 * 255.0;
1824 }
1825 if (j)
1826 scale /= 4;
1827 error_sum += error;
1828 scale_sum += scale;
1829 p = psnr(error / scale);
1830 av_bprintf(&buf, "%c:%2.2f ", type[j], p);
1831 av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1832 ost->file_index, ost->index, type[j] | 32, p);
1833 }
1834 p = psnr(error_sum / scale_sum);
1835 av_bprintf(&buf, "*:%2.2f ", psnr(error_sum / scale_sum));
1836 av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1837 ost->file_index, ost->index, p);
1838 }
1839 vid = 1;
1840 }
1841 /* compute min output value */
1842 if (ost->last_mux_dts != AV_NOPTS_VALUE) {
1843 if (pts == AV_NOPTS_VALUE || ost->last_mux_dts > pts)
1844 pts = ost->last_mux_dts;
1845 if (copy_ts) {
1846 if (copy_ts_first_pts == AV_NOPTS_VALUE && pts > 1)
1848 if (copy_ts_first_pts != AV_NOPTS_VALUE)
1850 }
1851 }
1852
1853 if (is_last_report)
1854 nb_frames_drop += ost->last_dropped;
1855 }
1856
1857 us = FFABS64U(pts) % AV_TIME_BASE;
1858 secs = FFABS64U(pts) / AV_TIME_BASE % 60;
1859 mins = FFABS64U(pts) / AV_TIME_BASE / 60 % 60;
1860 hours = FFABS64U(pts) / AV_TIME_BASE / 3600;
1861 hours_sign = (pts < 0) ? "-" : "";
1862
1863 bitrate = pts != AV_NOPTS_VALUE && pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1864 speed = pts != AV_NOPTS_VALUE && t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1865
1866 // FFmpegKit forward report
1867 if (pts == AV_NOPTS_VALUE) {
1868 forward_report(frame_number, fps, q, total_size, 0, 0, bitrate, speed);
1869 } else {
1870 forward_report(frame_number, fps, q, total_size, secs, us, bitrate, speed);
1871 }
1872
1873 if (local_print_stats) {
1874 if (total_size < 0) av_bprintf(&buf, "size=N/A time=");
1875 else av_bprintf(&buf, "size=%8.0fkB time=", total_size / 1024.0);
1876 if (pts == AV_NOPTS_VALUE) {
1877 av_bprintf(&buf, "N/A ");
1878 } else {
1879 av_bprintf(&buf, "%s%02"PRId64":%02d:%02d.%02d ",
1880 hours_sign, hours, mins, secs, (100 * us) / AV_TIME_BASE);
1881 }
1882
1883 if (bitrate < 0) {
1884 av_bprintf(&buf, "bitrate=N/A");
1885 av_bprintf(&buf_script, "bitrate=N/A\n");
1886 }else{
1887 av_bprintf(&buf, "bitrate=%6.1fkbits/s", bitrate);
1888 av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1889 }
1890
1891 if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1892 else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1893 if (pts == AV_NOPTS_VALUE) {
1894 av_bprintf(&buf_script, "out_time_us=N/A\n");
1895 av_bprintf(&buf_script, "out_time_ms=N/A\n");
1896 av_bprintf(&buf_script, "out_time=N/A\n");
1897 } else {
1898 av_bprintf(&buf_script, "out_time_us=%"PRId64"\n", pts);
1899 av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1900 av_bprintf(&buf_script, "out_time=%s%02"PRId64":%02d:%02d.%06d\n",
1901 hours_sign, hours, mins, secs, us);
1902 }
1903
1905 av_bprintf(&buf, " dup=%"PRId64" drop=%"PRId64, nb_frames_dup, nb_frames_drop);
1906 av_bprintf(&buf_script, "dup_frames=%"PRId64"\n", nb_frames_dup);
1907 av_bprintf(&buf_script, "drop_frames=%"PRId64"\n", nb_frames_drop);
1908
1909 if (speed < 0) {
1910 av_bprintf(&buf, " speed=N/A");
1911 av_bprintf(&buf_script, "speed=N/A\n");
1912 } else {
1913 av_bprintf(&buf, " speed=%4.3gx", speed);
1914 av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1915 }
1916
1917 if (print_stats || is_last_report) {
1918 const char end = is_last_report ? '\n' : '\r';
1919 if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1920 av_log(NULL, AV_LOG_STDERR, "%s %c", buf.str, end);
1921 } else
1922 av_log(NULL, AV_LOG_INFO, "%s %c", buf.str, end);
1923 }
1924 av_bprint_finalize(&buf, NULL);
1925
1926 if (progress_avio) {
1927 av_bprintf(&buf_script, "progress=%s\n",
1928 is_last_report ? "end" : "continue");
1929 avio_write(progress_avio, buf_script.str,
1930 FFMIN(buf_script.len, buf_script.size - 1));
1931 avio_flush(progress_avio);
1932 av_bprint_finalize(&buf_script, NULL);
1933 if (is_last_report) {
1934 if ((ret = avio_closep(&progress_avio)) < 0)
1935 av_log(NULL, AV_LOG_ERROR,
1936 "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1937 }
1938 }
1939
1940 first_report = 0;
1941
1942 if (is_last_report)
1943 print_final_stats(total_size);
1944 }
1945}
1946
1947static int ifilter_parameters_from_codecpar(InputFilter *ifilter, AVCodecParameters *par)
1948{
1949 int ret;
1950
1951 // We never got any input. Set a fake format, which will
1952 // come from libavformat.
1953 ifilter->format = par->format;
1954 ifilter->sample_rate = par->sample_rate;
1955 ifilter->width = par->width;
1956 ifilter->height = par->height;
1957 ifilter->sample_aspect_ratio = par->sample_aspect_ratio;
1958 ret = av_channel_layout_copy(&ifilter->ch_layout, &par->ch_layout);
1959 if (ret < 0)
1960 return ret;
1961
1962 return 0;
1963}
1964
1965static void flush_encoders(void)
1966{
1967 int ret;
1968
1969 for (OutputStream *ost = ost_iter(NULL); ost; ost = ost_iter(ost)) {
1970 OutputFile *of = output_files[ost->file_index];
1971 if (ost->sq_idx_encode >= 0)
1972 sq_send(of->sq_encode, ost->sq_idx_encode, SQFRAME(NULL));
1973 }
1974
1975 for (OutputStream *ost = ost_iter(NULL); ost; ost = ost_iter(ost)) {
1976 AVCodecContext *enc = ost->enc_ctx;
1977 OutputFile *of = output_files[ost->file_index];
1978
1979 if (!enc)
1980 continue;
1981
1982 // Try to enable encoding with no input frames.
1983 // Maybe we should just let encoding fail instead.
1984 if (!ost->initialized) {
1985 FilterGraph *fg = ost->filter->graph;
1986
1987 av_log(ost, AV_LOG_WARNING,
1988 "Finishing stream without any data written to it.\n");
1989
1990 if (ost->filter && !fg->graph) {
1991 int x;
1992 for (x = 0; x < fg->nb_inputs; x++) {
1993 InputFilter *ifilter = fg->inputs[x];
1994 if (ifilter->format < 0 &&
1995 ifilter_parameters_from_codecpar(ifilter, ifilter->ist->par) < 0) {
1996 av_log(ost, AV_LOG_ERROR, "Error copying paramerets from input stream\n");
1997 exit_program(1);
1998 }
1999 }
2000
2002 continue;
2003
2005 if (ret < 0) {
2006 av_log(ost, AV_LOG_ERROR, "Error configuring filter graph\n");
2007 exit_program(1);
2008 }
2009
2010 of_output_packet(of, ost->pkt, ost, 1);
2011 }
2012
2013 init_output_stream_wrapper(ost, NULL, 1);
2014 }
2015
2016 if (enc->codec_type != AVMEDIA_TYPE_VIDEO && enc->codec_type != AVMEDIA_TYPE_AUDIO)
2017 continue;
2018
2019 ret = submit_encode_frame(of, ost, NULL);
2020 if (ret != AVERROR_EOF)
2021 exit_program(1);
2022 }
2023}
2024
2025/*
2026 * Check whether a packet from ist should be written into ost at this time
2027 */
2029{
2031
2032 if (ost->ist != ist)
2033 return 0;
2034
2035 if (ost->finished & MUXER_FINISHED)
2036 return 0;
2037
2038 if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
2039 return 0;
2040
2041 return 1;
2042}
2043
2044static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
2045{
2047 InputFile *f = input_files [ist->file_index];
2048 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
2049 int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
2050 AVPacket *opkt = ost->pkt;
2051
2052 av_packet_unref(opkt);
2053 // EOF: flush output bitstream filters.
2054 if (!pkt) {
2055 of_output_packet(of, opkt, ost, 1);
2056 return;
2057 }
2058
2059 if (!ost->streamcopy_started && !(pkt->flags & AV_PKT_FLAG_KEY) &&
2061 return;
2062
2063 if (!ost->streamcopy_started && !ost->copy_prior_start) {
2064 if (pkt->pts == AV_NOPTS_VALUE ?
2065 ist->pts < ost->ts_copy_start :
2066 pkt->pts < av_rescale_q(ost->ts_copy_start, AV_TIME_BASE_Q, ist->st->time_base))
2067 return;
2068 }
2069
2070 if (of->recording_time != INT64_MAX &&
2071 ist->pts >= of->recording_time + start_time) {
2073 return;
2074 }
2075
2076 if (f->recording_time != INT64_MAX) {
2077 start_time = 0;
2078 if (copy_ts) {
2079 start_time += f->start_time != AV_NOPTS_VALUE ? f->start_time : 0;
2080 start_time += start_at_zero ? 0 : f->start_time_effective;
2081 }
2082 if (ist->pts >= f->recording_time + start_time) {
2084 return;
2085 }
2086 }
2087
2088 if (av_packet_ref(opkt, pkt) < 0)
2089 exit_program(1);
2090
2091 opkt->time_base = ost->mux_timebase;
2092
2093 if (pkt->pts != AV_NOPTS_VALUE)
2094 opkt->pts = av_rescale_q(pkt->pts, ist->st->time_base, opkt->time_base) - ost_tb_start_time;
2095
2096 if (pkt->dts == AV_NOPTS_VALUE) {
2097 opkt->dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, opkt->time_base);
2098 } else if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
2099 int duration = av_get_audio_frame_duration2(ist->par, pkt->size);
2100 if(!duration)
2101 duration = ist->par->frame_size;
2102 opkt->dts = av_rescale_delta(ist->st->time_base, pkt->dts,
2103 (AVRational){1, ist->par->sample_rate}, duration,
2104 &ist->filter_in_rescale_delta_last, opkt->time_base);
2105 /* dts will be set immediately afterwards to what pts is now */
2106 opkt->pts = opkt->dts - ost_tb_start_time;
2107 } else
2108 opkt->dts = av_rescale_q(pkt->dts, ist->st->time_base, opkt->time_base);
2109 opkt->dts -= ost_tb_start_time;
2110
2111 opkt->duration = av_rescale_q(pkt->duration, ist->st->time_base, opkt->time_base);
2112
2113 {
2115 if (ret < 0) {
2116 av_log(NULL, AV_LOG_ERROR,
2117 "Subtitle heartbeat logic failed in %s! (%s)\n",
2118 __func__, av_err2str(ret));
2119 exit_program(1);
2120 }
2121 }
2122
2123 of_output_packet(of, opkt, ost, 0);
2124
2125 ost->streamcopy_started = 1;
2126}
2127
2128static void check_decode_result(InputStream *ist, int *got_output, int ret)
2129{
2130 if (*got_output || ret<0)
2131 decode_error_stat[ret<0] ++;
2132
2133 if (ret < 0 && exit_on_error)
2134 exit_program(1);
2135
2136 if (*got_output && ist) {
2137 if (ist->decoded_frame->decode_error_flags || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) {
2138 av_log(NULL, exit_on_error ? AV_LOG_FATAL : AV_LOG_WARNING,
2139 "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->url, ist->st->index);
2140 if (exit_on_error)
2141 exit_program(1);
2142 }
2143 }
2144}
2145
2146// Filters can be configured only if the formats of all inputs are known.
2148{
2149 int i;
2150 for (i = 0; i < fg->nb_inputs; i++) {
2151 if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
2152 fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
2153 return 0;
2154 }
2155 return 1;
2156}
2157
2158static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame, int keep_reference)
2159{
2160 FilterGraph *fg = ifilter->graph;
2161 AVFrameSideData *sd;
2162 int need_reinit, ret;
2163 int buffersrc_flags = AV_BUFFERSRC_FLAG_PUSH;
2164
2165 if (keep_reference)
2166 buffersrc_flags |= AV_BUFFERSRC_FLAG_KEEP_REF;
2167
2168 /* determine if the parameters for this input changed */
2169 need_reinit = ifilter->format != frame->format;
2170
2171 switch (ifilter->ist->par->codec_type) {
2172 case AVMEDIA_TYPE_AUDIO:
2173 need_reinit |= ifilter->sample_rate != frame->sample_rate ||
2174 av_channel_layout_compare(&ifilter->ch_layout, &frame->ch_layout);
2175 break;
2176 case AVMEDIA_TYPE_VIDEO:
2177 need_reinit |= ifilter->width != frame->width ||
2178 ifilter->height != frame->height;
2179 break;
2180 }
2181
2182 if (!ifilter->ist->reinit_filters && fg->graph)
2183 need_reinit = 0;
2184
2185 if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
2186 (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2187 need_reinit = 1;
2188
2189 if ((sd = av_frame_get_side_data(frame, AV_FRAME_DATA_DISPLAYMATRIX))) {
2190 if (!ifilter->displaymatrix || memcmp(sd->data, ifilter->displaymatrix, sizeof(int32_t) * 9))
2191 need_reinit = 1;
2192 } else if (ifilter->displaymatrix)
2193 need_reinit = 1;
2194
2195 if (need_reinit) {
2196 ret = ifilter_parameters_from_frame(ifilter, frame);
2197 if (ret < 0)
2198 return ret;
2199 }
2200
2201 /* (re)init the graph if possible, otherwise buffer the frame and return */
2202 if (need_reinit || !fg->graph) {
2204 AVFrame *tmp = av_frame_clone(frame);
2205 if (!tmp)
2206 return AVERROR(ENOMEM);
2207
2208 ret = av_fifo_write(ifilter->frame_queue, &tmp, 1);
2209 if (ret < 0)
2210 av_frame_free(&tmp);
2211
2212 return ret;
2213 }
2214
2215 ret = reap_filters(1);
2216 if (ret < 0 && ret != AVERROR_EOF) {
2217 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2218 return ret;
2219 }
2220
2222 if (ret < 0) {
2223 av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
2224 return ret;
2225 }
2226 }
2227
2228 ret = av_buffersrc_add_frame_flags(ifilter->filter, frame, buffersrc_flags);
2229 if (ret < 0) {
2230 if (ret != AVERROR_EOF)
2231 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2232 return ret;
2233 }
2234
2235 return 0;
2236}
2237
2238static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
2239{
2240 int ret = 0;
2241
2242 ifilter->eof = 1;
2243
2244 if (ifilter->filter) {
2245
2246 /* THIS VALIDATION IS REQUIRED TO COMPLETE CANCELLATION */
2248 ret = av_buffersrc_close(ifilter->filter, pts, AV_BUFFERSRC_FLAG_PUSH);
2249 }
2250 if (ret < 0)
2251 return ret;
2252 } else {
2253 // the filtergraph was never configured
2254 if (ifilter->format < 0) {
2255 ret = ifilter_parameters_from_codecpar(ifilter, ifilter->ist->par);
2256 if (ret < 0)
2257 return ret;
2258 }
2259 if (ifilter->format < 0 && (ifilter->type == AVMEDIA_TYPE_AUDIO || ifilter->type == AVMEDIA_TYPE_VIDEO)) {
2260 av_log(NULL, AV_LOG_ERROR, "Cannot determine format of input stream %d:%d after EOF\n", ifilter->ist->file_index, ifilter->ist->st->index);
2261 return AVERROR_INVALIDDATA;
2262 }
2263 }
2264
2265 return 0;
2266}
2267
2268// This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2269// There is the following difference: if you got a frame, you must call
2270// it again with pkt=NULL. pkt==NULL is treated differently from pkt->size==0
2271// (pkt==NULL means get more output, pkt->size==0 is a flush/drain packet)
2272static int decode(InputStream *ist, AVCodecContext *avctx,
2273 AVFrame *frame, int *got_frame, AVPacket *pkt)
2274{
2275 int ret;
2276
2277 *got_frame = 0;
2278
2279 if (pkt) {
2280 ret = avcodec_send_packet(avctx, pkt);
2281 // In particular, we don't expect AVERROR(EAGAIN), because we read all
2282 // decoded frames with avcodec_receive_frame() until done.
2283 if (ret < 0 && ret != AVERROR_EOF)
2284 return ret;
2285 }
2286
2287 ret = avcodec_receive_frame(avctx, frame);
2288 if (ret < 0 && ret != AVERROR(EAGAIN))
2289 return ret;
2290 if (ret >= 0) {
2291 if (ist->want_frame_data) {
2292 FrameData *fd;
2293
2294 av_assert0(!frame->opaque_ref);
2295 frame->opaque_ref = av_buffer_allocz(sizeof(*fd));
2296 if (!frame->opaque_ref) {
2297 av_frame_unref(frame);
2298 return AVERROR(ENOMEM);
2299 }
2300 fd = (FrameData*)frame->opaque_ref->data;
2301 fd->pts = frame->pts;
2302 fd->tb = avctx->pkt_timebase;
2303 fd->idx = avctx->frame_num - 1;
2304 }
2305
2306 *got_frame = 1;
2307 }
2308
2309 return 0;
2310}
2311
2313{
2314 int i, ret;
2315
2316 av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2317 for (i = 0; i < ist->nb_filters; i++) {
2318 ret = ifilter_send_frame(ist->filters[i], decoded_frame, i < ist->nb_filters - 1);
2319 if (ret == AVERROR_EOF)
2320 ret = 0; /* ignore */
2321 if (ret < 0) {
2322 av_log(NULL, AV_LOG_ERROR,
2323 "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2324 break;
2325 }
2326 }
2327 return ret;
2328}
2329
2330static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output,
2331 int *decode_failed)
2332{
2333 AVFrame *decoded_frame = ist->decoded_frame;
2334 AVCodecContext *avctx = ist->dec_ctx;
2335 int ret, err = 0;
2336 AVRational decoded_frame_tb;
2337
2338 update_benchmark(NULL);
2339 ret = decode(ist, avctx, decoded_frame, got_output, pkt);
2340 update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2341 if (ret < 0)
2342 *decode_failed = 1;
2343
2344 if (ret != AVERROR_EOF)
2346
2347 if (!*got_output || ret < 0)
2348 return ret;
2349
2350 ist->samples_decoded += decoded_frame->nb_samples;
2351 ist->frames_decoded++;
2352
2353 /* increment next_dts to use for the case where the input stream does not
2354 have timestamps or there are multiple frames in the packet */
2355 ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2356 decoded_frame->sample_rate;
2357 ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2358 decoded_frame->sample_rate;
2359
2360 if (decoded_frame->pts != AV_NOPTS_VALUE) {
2361 decoded_frame_tb = ist->st->time_base;
2362 } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2363 decoded_frame->pts = pkt->pts;
2364 decoded_frame_tb = ist->st->time_base;
2365 }else {
2366 decoded_frame->pts = ist->dts;
2367 decoded_frame_tb = AV_TIME_BASE_Q;
2368 }
2369 if (pkt && pkt->duration && ist->prev_pkt_pts != AV_NOPTS_VALUE &&
2370 pkt->pts != AV_NOPTS_VALUE && pkt->pts - ist->prev_pkt_pts > pkt->duration)
2371 ist->filter_in_rescale_delta_last = AV_NOPTS_VALUE;
2372 if (pkt)
2373 ist->prev_pkt_pts = pkt->pts;
2374 if (decoded_frame->pts != AV_NOPTS_VALUE)
2375 decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2376 (AVRational){1, decoded_frame->sample_rate},
2377 decoded_frame->nb_samples,
2379 (AVRational){1, decoded_frame->sample_rate});
2380 ist->nb_samples = decoded_frame->nb_samples;
2382
2383 av_frame_unref(decoded_frame);
2384 return err < 0 ? err : ret;
2385}
2386
2387static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof,
2388 int *decode_failed)
2389{
2390 AVFrame *decoded_frame = ist->decoded_frame;
2391 int i, ret = 0, err = 0;
2392 int64_t best_effort_timestamp;
2393 int64_t dts = AV_NOPTS_VALUE;
2394
2395 // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2396 // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2397 // skip the packet.
2398 if (!eof && pkt && pkt->size == 0)
2399 return 0;
2400
2401 if (ist->dts != AV_NOPTS_VALUE)
2402 dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2403 if (pkt) {
2404 pkt->dts = dts; // ffmpeg.c probably shouldn't do this
2405 }
2406
2407 // The old code used to set dts on the drain packet, which does not work
2408 // with the new API anymore.
2409 if (eof) {
2410 void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2411 if (!new)
2412 return AVERROR(ENOMEM);
2413 ist->dts_buffer = new;
2414 ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2415 }
2416
2417 update_benchmark(NULL);
2418 ret = decode(ist, ist->dec_ctx, decoded_frame, got_output, pkt);
2419 update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2420 if (ret < 0)
2421 *decode_failed = 1;
2422
2423 // The following line may be required in some cases where there is no parser
2424 // or the parser does not has_b_frames correctly
2425 if (ist->par->video_delay < ist->dec_ctx->has_b_frames) {
2426 if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2427 ist->par->video_delay = ist->dec_ctx->has_b_frames;
2428 } else
2429 av_log(ist->dec_ctx, AV_LOG_WARNING,
2430 "video_delay is larger in decoder than demuxer %d > %d.\n"
2431 "If you want to help, upload a sample "
2432 "of this file to https://streams.videolan.org/upload/ "
2433 "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
2434 ist->dec_ctx->has_b_frames,
2435 ist->par->video_delay);
2436 }
2437
2438 if (ret != AVERROR_EOF)
2440
2441 if (*got_output && ret >= 0) {
2442 if (ist->dec_ctx->width != decoded_frame->width ||
2443 ist->dec_ctx->height != decoded_frame->height ||
2444 ist->dec_ctx->pix_fmt != decoded_frame->format) {
2445 av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2446 decoded_frame->width,
2447 decoded_frame->height,
2448 decoded_frame->format,
2449 ist->dec_ctx->width,
2450 ist->dec_ctx->height,
2451 ist->dec_ctx->pix_fmt);
2452 }
2453 }
2454
2455 if (!*got_output || ret < 0)
2456 return ret;
2457
2458 if(ist->top_field_first>=0)
2459 decoded_frame->top_field_first = ist->top_field_first;
2460
2461 ist->frames_decoded++;
2462
2463 if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2465 if (err < 0)
2466 goto fail;
2467 }
2468
2469 best_effort_timestamp= decoded_frame->best_effort_timestamp;
2470 *duration_pts = decoded_frame->duration;
2471
2472 if (ist->framerate.num)
2473 best_effort_timestamp = ist->cfr_next_pts++;
2474
2475 if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2476 best_effort_timestamp = ist->dts_buffer[0];
2477
2478 for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2479 ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2480 ist->nb_dts_buffer--;
2481 }
2482
2483 if(best_effort_timestamp != AV_NOPTS_VALUE) {
2484 int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2485
2486 if (ts != AV_NOPTS_VALUE)
2487 ist->next_pts = ist->pts = ts;
2488 }
2489
2490 if (debug_ts) {
2491 av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2492 "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2493 ist->st->index, av_ts2str(decoded_frame->pts),
2494 av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2495 best_effort_timestamp,
2496 av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2497 decoded_frame->key_frame, decoded_frame->pict_type,
2498 ist->st->time_base.num, ist->st->time_base.den);
2499 }
2500
2501 if (ist->st->sample_aspect_ratio.num)
2502 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2503
2505
2506fail:
2507 av_frame_unref(decoded_frame);
2508 return err < 0 ? err : ret;
2509}
2510
2511static int process_subtitle(InputStream *ist, AVSubtitle *subtitle, int *got_output)
2512{
2513 int ret = 0;
2514 int free_sub = 1;
2515
2516 if (ist->fix_sub_duration) {
2517 int end = 1;
2518 if (ist->prev_sub.got_output) {
2519 end = av_rescale(subtitle->pts - ist->prev_sub.subtitle.pts,
2520 1000, AV_TIME_BASE);
2521 if (end < ist->prev_sub.subtitle.end_display_time) {
2522 av_log(NULL, AV_LOG_DEBUG,
2523 "Subtitle duration reduced from %"PRId32" to %d%s\n",
2524 ist->prev_sub.subtitle.end_display_time, end,
2525 end <= 0 ? ", dropping it" : "");
2526 ist->prev_sub.subtitle.end_display_time = end;
2527 }
2528 }
2529 FFSWAP(int, *got_output, ist->prev_sub.got_output);
2530 FFSWAP(int, ret, ist->prev_sub.ret);
2531 FFSWAP(AVSubtitle, *subtitle, ist->prev_sub.subtitle);
2532 if (end <= 0)
2533 goto out;
2534 }
2535
2536 if (!*got_output)
2537 return ret;
2538
2539 if (ist->sub2video.frame) {
2540 sub2video_update(ist, INT64_MIN, subtitle);
2541 } else if (ist->nb_filters) {
2542 if (!ist->sub2video.sub_queue)
2543 ist->sub2video.sub_queue = av_fifo_alloc2(8, sizeof(AVSubtitle), AV_FIFO_FLAG_AUTO_GROW);
2544 if (!ist->sub2video.sub_queue)
2545 report_and_exit(AVERROR(ENOMEM));
2546
2547 ret = av_fifo_write(ist->sub2video.sub_queue, subtitle, 1);
2548 if (ret < 0)
2549 exit_program(1);
2550 free_sub = 0;
2551 }
2552
2553 if (!subtitle->num_rects)
2554 goto out;
2555
2556 for (OutputStream *ost = ost_iter(NULL); ost; ost = ost_iter(ost)) {
2557 if (!check_output_constraints(ist, ost) || !ost->enc_ctx
2558 || ost->enc_ctx->codec_type != AVMEDIA_TYPE_SUBTITLE)
2559 continue;
2560
2561 do_subtitle_out(output_files[ost->file_index], ost, subtitle);
2562 }
2563
2564out:
2565 if (free_sub)
2566 avsubtitle_free(subtitle);
2567 return ret;
2568}
2569
2570static int copy_av_subtitle(AVSubtitle *dst, AVSubtitle *src)
2571{
2572 int ret = AVERROR_BUG;
2573 AVSubtitle tmp = {
2574 .format = src->format,
2575 .start_display_time = src->start_display_time,
2576 .end_display_time = src->end_display_time,
2577 .num_rects = 0,
2578 .rects = NULL,
2579 .pts = src->pts
2580 };
2581
2582 if (!src->num_rects)
2583 goto success;
2584
2585 if (!(tmp.rects = av_calloc(src->num_rects, sizeof(*tmp.rects))))
2586 return AVERROR(ENOMEM);
2587
2588 for (int i = 0; i < src->num_rects; i++) {
2589 AVSubtitleRect *src_rect = src->rects[i];
2590 AVSubtitleRect *dst_rect;
2591
2592 if (!(dst_rect = tmp.rects[i] = av_mallocz(sizeof(*tmp.rects[0])))) {
2593 ret = AVERROR(ENOMEM);
2594 goto cleanup;
2595 }
2596
2597 tmp.num_rects++;
2598
2599 dst_rect->type = src_rect->type;
2600 dst_rect->flags = src_rect->flags;
2601
2602 dst_rect->x = src_rect->x;
2603 dst_rect->y = src_rect->y;
2604 dst_rect->w = src_rect->w;
2605 dst_rect->h = src_rect->h;
2606 dst_rect->nb_colors = src_rect->nb_colors;
2607
2608 if (src_rect->text)
2609 if (!(dst_rect->text = av_strdup(src_rect->text))) {
2610 ret = AVERROR(ENOMEM);
2611 goto cleanup;
2612 }
2613
2614 if (src_rect->ass)
2615 if (!(dst_rect->ass = av_strdup(src_rect->ass))) {
2616 ret = AVERROR(ENOMEM);
2617 goto cleanup;
2618 }
2619
2620 for (int j = 0; j < 4; j++) {
2621 // SUBTITLE_BITMAP images are special in the sense that they
2622 // are like PAL8 images. first pointer to data, second to
2623 // palette. This makes the size calculation match this.
2624 size_t buf_size = src_rect->type == SUBTITLE_BITMAP && j == 1 ?
2625 AVPALETTE_SIZE :
2626 src_rect->h * src_rect->linesize[j];
2627
2628 if (!src_rect->data[j])
2629 continue;
2630
2631 if (!(dst_rect->data[j] = av_memdup(src_rect->data[j], buf_size))) {
2632 ret = AVERROR(ENOMEM);
2633 goto cleanup;
2634 }
2635 dst_rect->linesize[j] = src_rect->linesize[j];
2636 }
2637 }
2638
2639success:
2640 *dst = tmp;
2641
2642 return 0;
2643
2644cleanup:
2645 avsubtitle_free(&tmp);
2646
2647 return ret;
2648}
2649
2650static int fix_sub_duration_heartbeat(InputStream *ist, int64_t signal_pts)
2651{
2652 int ret = AVERROR_BUG;
2653 int got_output = 1;
2654 AVSubtitle *prev_subtitle = &ist->prev_sub.subtitle;
2655 AVSubtitle subtitle;
2656
2657 if (!ist->fix_sub_duration || !prev_subtitle->num_rects ||
2658 signal_pts <= prev_subtitle->pts)
2659 return 0;
2660
2661 if ((ret = copy_av_subtitle(&subtitle, prev_subtitle)) < 0)
2662 return ret;
2663
2664 subtitle.pts = signal_pts;
2665
2666 return process_subtitle(ist, &subtitle, &got_output);
2667}
2668
2669static int trigger_fix_sub_duration_heartbeat(OutputStream *ost, const AVPacket *pkt)
2670{
2672 int64_t signal_pts = av_rescale_q(pkt->pts, pkt->time_base,
2673 AV_TIME_BASE_Q);
2674
2675 if (!ost->fix_sub_duration_heartbeat || !(pkt->flags & AV_PKT_FLAG_KEY))
2676 // we are only interested in heartbeats on streams configured, and
2677 // only on random access points.
2678 return 0;
2679
2680 for (int i = 0; i < of->nb_streams; i++) {
2681 OutputStream *iter_ost = of->streams[i];
2682 InputStream *ist = iter_ost->ist;
2683 int ret = AVERROR_BUG;
2684
2685 if (iter_ost == ost || !ist || !ist->decoding_needed ||
2686 ist->dec_ctx->codec_type != AVMEDIA_TYPE_SUBTITLE)
2687 // We wish to skip the stream that causes the heartbeat,
2688 // output streams without an input stream, streams not decoded
2689 // (as fix_sub_duration is only done for decoded subtitles) as
2690 // well as non-subtitle streams.
2691 continue;
2692
2693 if ((ret = fix_sub_duration_heartbeat(ist, signal_pts)) < 0)
2694 return ret;
2695 }
2696
2697 return 0;
2698}
2699
2700static int transcode_subtitles(InputStream *ist, const AVPacket *pkt,
2701 int *got_output, int *decode_failed)
2702{
2703 AVSubtitle subtitle;
2704 int ret = avcodec_decode_subtitle2(ist->dec_ctx,
2706
2708
2709 if (ret < 0 || !*got_output) {
2710 *decode_failed = 1;
2711 if (!pkt->size)
2712 sub2video_flush(ist);
2713 return ret;
2714 }
2715
2716 ist->frames_decoded++;
2717
2718 return process_subtitle(ist, &subtitle, got_output);
2719}
2720
2722{
2723 int i, ret;
2724 /* TODO keep pts also in stream time base to avoid converting back */
2725 int64_t pts = av_rescale_q_rnd(ist->pts, AV_TIME_BASE_Q, ist->st->time_base,
2726 AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX);
2727
2728 for (i = 0; i < ist->nb_filters; i++) {
2729 ret = ifilter_send_eof(ist->filters[i], pts);
2730 if (ret < 0)
2731 return ret;
2732 }
2733 return 0;
2734}
2735
2736/* pkt = NULL means EOF (needed to flush decoder buffers) */
2737static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2738{
2739 const AVCodecParameters *par = ist->par;
2740 int ret = 0;
2741 int repeating = 0;
2742 int eof_reached = 0;
2743
2744 AVPacket *avpkt = ist->pkt;
2745
2746 if (!ist->saw_first_ts) {
2747 ist->first_dts =
2748 ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2749 ist->pts = 0;
2750 if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2751 ist->first_dts =
2752 ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2753 ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2754 }
2755 ist->saw_first_ts = 1;
2756 }
2757
2758 if (ist->next_dts == AV_NOPTS_VALUE)
2759 ist->next_dts = ist->dts;
2760 if (ist->next_pts == AV_NOPTS_VALUE)
2761 ist->next_pts = ist->pts;
2762
2763 if (pkt) {
2764 av_packet_unref(avpkt);
2765 ret = av_packet_ref(avpkt, pkt);
2766 if (ret < 0)
2767 return ret;
2768 }
2769
2770 if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2771 ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2772 if (par->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2773 ist->next_pts = ist->pts = ist->dts;
2774 }
2775
2776 // while we have more to decode or while the decoder did output something on EOF
2777 while (ist->decoding_needed) {
2778 int64_t duration_dts = 0;
2779 int64_t duration_pts = 0;
2780 int got_output = 0;
2781 int decode_failed = 0;
2782
2783 ist->pts = ist->next_pts;
2784 ist->dts = ist->next_dts;
2785
2786 switch (par->codec_type) {
2787 case AVMEDIA_TYPE_AUDIO:
2788 ret = decode_audio (ist, repeating ? NULL : avpkt, &got_output,
2789 &decode_failed);
2790 av_packet_unref(avpkt);
2791 break;
2792 case AVMEDIA_TYPE_VIDEO:
2793 ret = decode_video (ist, repeating ? NULL : avpkt, &got_output, &duration_pts, !pkt,
2794 &decode_failed);
2795 if (!repeating || !pkt || got_output) {
2796 if (pkt && pkt->duration) {
2797 duration_dts = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2798 } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2799 int ticks = ist->last_pkt_repeat_pict >= 0 ?
2800 ist->last_pkt_repeat_pict + 1 :
2801 ist->dec_ctx->ticks_per_frame;
2802 duration_dts = ((int64_t)AV_TIME_BASE *
2803 ist->dec_ctx->framerate.den * ticks) /
2804 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2805 }
2806
2807 if(ist->dts != AV_NOPTS_VALUE && duration_dts) {
2808 ist->next_dts += duration_dts;
2809 }else
2810 ist->next_dts = AV_NOPTS_VALUE;
2811 }
2812
2813 if (got_output) {
2814 if (duration_pts > 0) {
2815 ist->next_pts += av_rescale_q(duration_pts, ist->st->time_base, AV_TIME_BASE_Q);
2816 } else {
2817 ist->next_pts += duration_dts;
2818 }
2819 }
2820 av_packet_unref(avpkt);
2821 break;
2822 case AVMEDIA_TYPE_SUBTITLE:
2823 if (repeating)
2824 break;
2825 ret = transcode_subtitles(ist, avpkt, &got_output, &decode_failed);
2826 if (!pkt && ret >= 0)
2827 ret = AVERROR_EOF;
2828 av_packet_unref(avpkt);
2829 break;
2830 default:
2831 return -1;
2832 }
2833
2834 if (ret == AVERROR_EOF) {
2835 eof_reached = 1;
2836 break;
2837 }
2838
2839 if (ret < 0) {
2840 if (decode_failed) {
2841 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2842 ist->file_index, ist->st->index, av_err2str(ret));
2843 } else {
2844 av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded "
2845 "data for stream #%d:%d\n", ist->file_index, ist->st->index);
2846 }
2847 if (!decode_failed || exit_on_error)
2848 exit_program(1);
2849 break;
2850 }
2851
2852 if (got_output)
2853 ist->got_output = 1;
2854
2855 if (!got_output)
2856 break;
2857
2858 // During draining, we might get multiple output frames in this loop.
2859 // ffmpeg.c does not drain the filter chain on configuration changes,
2860 // which means if we send multiple frames at once to the filters, and
2861 // one of those frames changes configuration, the buffered frames will
2862 // be lost. This can upset certain FATE tests.
2863 // Decode only 1 frame per call on EOF to appease these FATE tests.
2864 // The ideal solution would be to rewrite decoding to use the new
2865 // decoding API in a better way.
2866 if (!pkt)
2867 break;
2868
2869 repeating = 1;
2870 }
2871
2872 /* after flushing, send an EOF on all the filter inputs attached to the stream */
2873 /* except when looping we need to flush but not to send an EOF */
2874 if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2875 int ret = send_filter_eof(ist);
2876 if (ret < 0) {
2877 av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2878 exit_program(1);
2879 }
2880 }
2881
2882 /* handle stream copy */
2883 if (!ist->decoding_needed && pkt) {
2884 ist->dts = ist->next_dts;
2885 switch (par->codec_type) {
2886 case AVMEDIA_TYPE_AUDIO:
2887 av_assert1(pkt->duration >= 0);
2888 if (par->sample_rate) {
2889 ist->next_dts += ((int64_t)AV_TIME_BASE * par->frame_size) /
2890 par->sample_rate;
2891 } else {
2892 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2893 }
2894 break;
2895 case AVMEDIA_TYPE_VIDEO:
2896 if (ist->framerate.num) {
2897 // TODO: Remove work-around for c99-to-c89 issue 7
2898 AVRational time_base_q = AV_TIME_BASE_Q;
2899 int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2900 ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2901 } else if (pkt->duration) {
2902 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2903 } else if(ist->dec_ctx->framerate.num != 0) {
2904 int ticks = ist->last_pkt_repeat_pict >= 0 ?
2905 ist->last_pkt_repeat_pict + 1 :
2906 ist->dec_ctx->ticks_per_frame;
2907 ist->next_dts += ((int64_t)AV_TIME_BASE *
2908 ist->dec_ctx->framerate.den * ticks) /
2909 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2910 }
2911 break;
2912 }
2913 ist->pts = ist->dts;
2914 ist->next_pts = ist->next_dts;
2915 } else if (!ist->decoding_needed)
2916 eof_reached = 1;
2917
2918 for (OutputStream *ost = ost_iter(NULL); ost; ost = ost_iter(ost)) {
2919 if (!check_output_constraints(ist, ost) || ost->enc_ctx ||
2920 (!pkt && no_eof))
2921 continue;
2922
2923 do_streamcopy(ist, ost, pkt);
2924 }
2925
2926 return !eof_reached;
2927}
2928
2929static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
2930{
2931 InputStream *ist = s->opaque;
2932 const enum AVPixelFormat *p;
2933 int ret;
2934
2935 for (p = pix_fmts; *p != AV_PIX_FMT_NONE; p++) {
2936 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
2937 const AVCodecHWConfig *config = NULL;
2938 int i;
2939
2940 if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2941 break;
2942
2943 if (ist->hwaccel_id == HWACCEL_GENERIC ||
2944 ist->hwaccel_id == HWACCEL_AUTO) {
2945 for (i = 0;; i++) {
2946 config = avcodec_get_hw_config(s->codec, i);
2947 if (!config)
2948 break;
2949 if (!(config->methods &
2950 AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX))
2951 continue;
2952 if (config->pix_fmt == *p)
2953 break;
2954 }
2955 }
2956 if (config && config->device_type == ist->hwaccel_device_type) {
2958 if (ret < 0) {
2959 if (ist->hwaccel_id == HWACCEL_GENERIC) {
2960 av_log(NULL, AV_LOG_FATAL,
2961 "%s hwaccel requested for input stream #%d:%d, "
2962 "but cannot be initialized.\n",
2963 av_hwdevice_get_type_name(config->device_type),
2964 ist->file_index, ist->st->index);
2965 return AV_PIX_FMT_NONE;
2966 }
2967 continue;
2968 }
2969
2970 ist->hwaccel_pix_fmt = *p;
2971 break;
2972 }
2973 }
2974
2975 return *p;
2976}
2977
2978static int init_input_stream(InputStream *ist, char *error, int error_len)
2979{
2980 int ret;
2981
2982 if (ist->decoding_needed) {
2983 const AVCodec *codec = ist->dec;
2984 if (!codec) {
2985 snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2986 avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2987 return AVERROR(EINVAL);
2988 }
2989
2990 ist->dec_ctx->opaque = ist;
2991 ist->dec_ctx->get_format = get_format;
2992
2993 if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2995 av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2997 av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2998 }
2999
3000 /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
3001 * audio, and video decoders such as cuvid or mediacodec */
3002 ist->dec_ctx->pkt_timebase = ist->st->time_base;
3003
3004 if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
3005 av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
3006 /* Attached pics are sparse, therefore we would not want to delay their decoding till EOF. */
3007 if (ist->st->disposition & AV_DISPOSITION_ATTACHED_PIC)
3008 av_dict_set(&ist->decoder_opts, "threads", "1", 0);
3009
3011 if (ret < 0) {
3012 snprintf(error, error_len, "Device setup failed for "
3013 "decoder on input stream #%d:%d : %s",
3014 ist->file_index, ist->st->index, av_err2str(ret));
3015 return ret;
3016 }
3017
3018 if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
3019 if (ret == AVERROR_EXPERIMENTAL)
3020 abort_codec_experimental(codec, 0);
3021
3022 snprintf(error, error_len,
3023 "Error while opening decoder for input stream "
3024 "#%d:%d : %s",
3025 ist->file_index, ist->st->index, av_err2str(ret));
3026 return ret;
3027 }
3029 }
3030
3031 ist->next_pts = AV_NOPTS_VALUE;
3032 ist->next_dts = AV_NOPTS_VALUE;
3033
3034 return 0;
3035}
3036
3038{
3040 InputStream *ist = ost->ist;
3041 InputFile *ifile = input_files[ist->file_index];
3042 AVCodecParameters *par = ost->st->codecpar;
3043 AVCodecContext *codec_ctx;
3044 AVRational sar;
3045 int i, ret;
3046 uint32_t codec_tag = par->codec_tag;
3047
3048 av_assert0(ist && !ost->filter);
3049
3050 codec_ctx = avcodec_alloc_context3(NULL);
3051 if (!codec_ctx)
3052 return AVERROR(ENOMEM);
3053
3054 ret = avcodec_parameters_to_context(codec_ctx, ist->par);
3055 if (ret >= 0)
3056 ret = av_opt_set_dict(codec_ctx, &ost->encoder_opts);
3057 if (ret < 0) {
3058 av_log(ost, AV_LOG_FATAL,
3059 "Error setting up codec context options.\n");
3060 avcodec_free_context(&codec_ctx);
3061 return ret;
3062 }
3063
3064 ret = avcodec_parameters_from_context(par, codec_ctx);
3065 avcodec_free_context(&codec_ctx);
3066 if (ret < 0) {
3067 av_log(ost, AV_LOG_FATAL,
3068 "Error getting reference codec parameters.\n");
3069 return ret;
3070 }
3071
3072 if (!codec_tag) {
3073 unsigned int codec_tag_tmp;
3074 if (!of->format->codec_tag ||
3075 av_codec_get_id (of->format->codec_tag, par->codec_tag) == par->codec_id ||
3076 !av_codec_get_tag2(of->format->codec_tag, par->codec_id, &codec_tag_tmp))
3077 codec_tag = par->codec_tag;
3078 }
3079
3080 par->codec_tag = codec_tag;
3081
3082 if (!ost->frame_rate.num)
3083 ost->frame_rate = ist->framerate;
3084
3085 if (ost->frame_rate.num)
3086 ost->st->avg_frame_rate = ost->frame_rate;
3087 else
3088 ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3089
3090 ret = avformat_transfer_internal_stream_timing_info(of->format, ost->st, ist->st, copy_tb);
3091 if (ret < 0)
3092 return ret;
3093
3094 // copy timebase while removing common factors
3095 if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0) {
3096 if (ost->frame_rate.num)
3097 ost->st->time_base = av_inv_q(ost->frame_rate);
3098 else
3099 ost->st->time_base = av_add_q(av_stream_get_codec_timebase(ost->st), (AVRational){0, 1});
3100 }
3101
3102 // copy estimated duration as a hint to the muxer
3103 if (ost->st->duration <= 0 && ist->st->duration > 0)
3104 ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3105
3106 if (!ost->copy_prior_start) {
3107 ost->ts_copy_start = (of->start_time == AV_NOPTS_VALUE) ?
3108 0 : of->start_time;
3109 if (copy_ts && ifile->start_time != AV_NOPTS_VALUE) {
3110 ost->ts_copy_start = FFMAX(ost->ts_copy_start,
3111 ifile->start_time + ifile->ts_offset);
3112 }
3113 }
3114
3115 if (ist->st->nb_side_data) {
3116 for (i = 0; i < ist->st->nb_side_data; i++) {
3117 const AVPacketSideData *sd_src = &ist->st->side_data[i];
3118 uint8_t *dst_data;
3119
3120 dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3121 if (!dst_data)
3122 return AVERROR(ENOMEM);
3123 memcpy(dst_data, sd_src->data, sd_src->size);
3124 }
3125 }
3126
3127#if FFMPEG_ROTATION_METADATA
3128 if (ost->rotate_overridden) {
3129 uint8_t *sd = av_stream_new_side_data(ost->st, AV_PKT_DATA_DISPLAYMATRIX,
3130 sizeof(int32_t) * 9);
3131 if (sd)
3132 av_display_rotation_set((int32_t *)sd, -ost->rotate_override_value);
3133 }
3134#endif
3135
3136 switch (par->codec_type) {
3137 case AVMEDIA_TYPE_AUDIO:
3138 if ((par->block_align == 1 || par->block_align == 1152 || par->block_align == 576) &&
3139 par->codec_id == AV_CODEC_ID_MP3)
3140 par->block_align = 0;
3141 if (par->codec_id == AV_CODEC_ID_AC3)
3142 par->block_align = 0;
3143 break;
3144 case AVMEDIA_TYPE_VIDEO:
3145 if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3146 sar =
3147 av_mul_q(ost->frame_aspect_ratio,
3148 (AVRational){ par->height, par->width });
3149 av_log(ost, AV_LOG_WARNING, "Overriding aspect ratio "
3150 "with stream copy may produce invalid files\n");
3151 }
3152 else if (ist->st->sample_aspect_ratio.num)
3153 sar = ist->st->sample_aspect_ratio;
3154 else
3155 sar = par->sample_aspect_ratio;
3156 ost->st->sample_aspect_ratio = par->sample_aspect_ratio = sar;
3157 ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3158 ost->st->r_frame_rate = ist->st->r_frame_rate;
3159 break;
3160 }
3161
3162 ost->mux_timebase = ist->st->time_base;
3163
3164 return 0;
3165}
3166
3168{
3169 const char *cname = ost->enc_ctx->codec->name;
3170 uint8_t *encoder_string;
3171 int encoder_string_len;
3172
3173 if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
3174 return;
3175
3176 encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(cname) + 2;
3177 encoder_string = av_mallocz(encoder_string_len);
3178 if (!encoder_string)
3179 report_and_exit(AVERROR(ENOMEM));
3180
3181 if (!of->bitexact && !ost->bitexact)
3182 av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3183 else
3184 av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3185 av_strlcat(encoder_string, cname, encoder_string_len);
3186 av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3187 AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
3188}
3189
3190static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
3191{
3192 InputStream *ist = ost->ist;
3193 AVCodecContext *enc_ctx = ost->enc_ctx;
3194
3195 if (ost->enc_timebase.num > 0) {
3196 enc_ctx->time_base = ost->enc_timebase;
3197 return;
3198 }
3199
3200 if (ost->enc_timebase.num < 0) {
3201 if (ist) {
3202 enc_ctx->time_base = ist->st->time_base;
3203 return;
3204 }
3205
3206 av_log(ost, AV_LOG_WARNING,
3207 "Input stream data not available, using default time base\n");
3208 }
3209
3210 enc_ctx->time_base = default_time_base;
3211}
3212
3213static int init_output_stream_encode(OutputStream *ost, AVFrame *frame)
3214{
3215 InputStream *ist = ost->ist;
3216 AVCodecContext *enc_ctx = ost->enc_ctx;
3217 AVCodecContext *dec_ctx = NULL;
3219 int ret;
3220
3222
3223 if (ist) {
3224 dec_ctx = ist->dec_ctx;
3225 }
3226
3227 if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3228 if (!ost->frame_rate.num)
3229 ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
3230 if (!ost->frame_rate.num && !ost->max_frame_rate.num) {
3231 ost->frame_rate = (AVRational){25, 1};
3232 av_log(ost, AV_LOG_WARNING,
3233 "No information "
3234 "about the input framerate is available. Falling "
3235 "back to a default value of 25fps. Use the -r option "
3236 "if you want a different framerate.\n");
3237 }
3238
3239 if (ost->max_frame_rate.num &&
3240 (av_q2d(ost->frame_rate) > av_q2d(ost->max_frame_rate) ||
3241 !ost->frame_rate.den))
3242 ost->frame_rate = ost->max_frame_rate;
3243
3244 if (enc_ctx->codec->supported_framerates && !ost->force_fps) {
3245 int idx = av_find_nearest_q_idx(ost->frame_rate, enc_ctx->codec->supported_framerates);
3246 ost->frame_rate = enc_ctx->codec->supported_framerates[idx];
3247 }
3248 // reduce frame rate for mpeg4 to be within the spec limits
3249 if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3250 av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3251 ost->frame_rate.num, ost->frame_rate.den, 65535);
3252 }
3253 }
3254
3255 switch (enc_ctx->codec_type) {
3256 case AVMEDIA_TYPE_AUDIO:
3257 enc_ctx->sample_fmt = av_buffersink_get_format(ost->filter->filter);
3258 enc_ctx->sample_rate = av_buffersink_get_sample_rate(ost->filter->filter);
3259 ret = av_buffersink_get_ch_layout(ost->filter->filter, &enc_ctx->ch_layout);
3260 if (ret < 0)
3261 return ret;
3262
3263 if (ost->bits_per_raw_sample)
3264 enc_ctx->bits_per_raw_sample = ost->bits_per_raw_sample;
3265 else if (dec_ctx && ost->filter->graph->is_meta)
3266 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3267 av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3268
3269 init_encoder_time_base(ost, av_make_q(1, enc_ctx->sample_rate));
3270 break;
3271
3272 case AVMEDIA_TYPE_VIDEO:
3273 init_encoder_time_base(ost, av_inv_q(ost->frame_rate));
3274
3275 if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3276 enc_ctx->time_base = av_buffersink_get_time_base(ost->filter->filter);
3277 if ( av_q2d(enc_ctx->time_base) < 0.001 && ost->vsync_method != VSYNC_PASSTHROUGH
3278 && (ost->vsync_method == VSYNC_CFR || ost->vsync_method == VSYNC_VSCFR ||
3279 (ost->vsync_method == VSYNC_AUTO && !(of->format->flags & AVFMT_VARIABLE_FPS)))){
3280 av_log(ost, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3281 "Please consider specifying a lower framerate, a different muxer or "
3282 "setting vsync/fps_mode to vfr\n");
3283 }
3284
3285 enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
3286 enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
3287 enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3288 ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3289 av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3290 av_buffersink_get_sample_aspect_ratio(ost->filter->filter);
3291
3292 enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter);
3293
3294 if (ost->bits_per_raw_sample)
3295 enc_ctx->bits_per_raw_sample = ost->bits_per_raw_sample;
3296 else if (dec_ctx && ost->filter->graph->is_meta)
3297 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3298 av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3299
3300 if (frame) {
3301 enc_ctx->color_range = frame->color_range;
3302 enc_ctx->color_primaries = frame->color_primaries;
3303 enc_ctx->color_trc = frame->color_trc;
3304 enc_ctx->colorspace = frame->colorspace;
3305 enc_ctx->chroma_sample_location = frame->chroma_location;
3306 }
3307
3308 enc_ctx->framerate = ost->frame_rate;
3309
3310 ost->st->avg_frame_rate = ost->frame_rate;
3311
3312 // Field order: autodetection
3313 if (frame) {
3314 if (enc_ctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
3315 ost->top_field_first >= 0)
3316 frame->top_field_first = !!ost->top_field_first;
3317
3318 if (frame->interlaced_frame) {
3319 if (enc_ctx->codec->id == AV_CODEC_ID_MJPEG)
3320 enc_ctx->field_order = frame->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
3321 else
3322 enc_ctx->field_order = frame->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
3323 } else
3324 enc_ctx->field_order = AV_FIELD_PROGRESSIVE;
3325 }
3326
3327 // Field order: override
3328 if (ost->top_field_first == 0) {
3329 enc_ctx->field_order = AV_FIELD_BB;
3330 } else if (ost->top_field_first == 1) {
3331 enc_ctx->field_order = AV_FIELD_TT;
3332 }
3333
3334 break;
3335 case AVMEDIA_TYPE_SUBTITLE:
3336 enc_ctx->time_base = AV_TIME_BASE_Q;
3337 if (!enc_ctx->width) {
3338 enc_ctx->width = ost->ist->par->width;
3339 enc_ctx->height = ost->ist->par->height;
3340 }
3341 if (dec_ctx && dec_ctx->subtitle_header) {
3342 /* ASS code assumes this buffer is null terminated so add extra byte. */
3343 ost->enc_ctx->subtitle_header = av_mallocz(dec_ctx->subtitle_header_size + 1);
3344 if (!ost->enc_ctx->subtitle_header)
3345 return AVERROR(ENOMEM);
3346 memcpy(ost->enc_ctx->subtitle_header, dec_ctx->subtitle_header,
3347 dec_ctx->subtitle_header_size);
3348 ost->enc_ctx->subtitle_header_size = dec_ctx->subtitle_header_size;
3349 }
3350 if (ist && ist->dec->type == AVMEDIA_TYPE_SUBTITLE &&
3351 enc_ctx->codec_type == AVMEDIA_TYPE_SUBTITLE) {
3352 int input_props = 0, output_props = 0;
3353 AVCodecDescriptor const *input_descriptor =
3354 avcodec_descriptor_get(ist->dec->id);
3355 AVCodecDescriptor const *output_descriptor =
3356 avcodec_descriptor_get(ost->enc_ctx->codec_id);
3357 if (input_descriptor)
3358 input_props = input_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3359 if (output_descriptor)
3360 output_props = output_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3361 if (input_props && output_props && input_props != output_props) {
3362 av_log(ost, AV_LOG_ERROR,
3363 "Subtitle encoding currently only possible from text to text "
3364 "or bitmap to bitmap");
3365 return AVERROR_INVALIDDATA;
3366 }
3367 }
3368
3369 break;
3370 case AVMEDIA_TYPE_DATA:
3371 break;
3372 default:
3373 abort();
3374 break;
3375 }
3376
3377 if (ost->bitexact)
3378 enc_ctx->flags |= AV_CODEC_FLAG_BITEXACT;
3379
3380 if (ost->sq_idx_encode >= 0)
3381 sq_set_tb(of->sq_encode, ost->sq_idx_encode, enc_ctx->time_base);
3382
3383 ost->mux_timebase = enc_ctx->time_base;
3384
3385 return 0;
3386}
3387
3388static int init_output_stream(OutputStream *ost, AVFrame *frame,
3389 char *error, int error_len)
3390{
3391 int ret = 0;
3392
3393 if (ost->enc_ctx) {
3394 const AVCodec *codec = ost->enc_ctx->codec;
3395 InputStream *ist = ost->ist;
3396
3397 ret = init_output_stream_encode(ost, frame);
3398 if (ret < 0)
3399 return ret;
3400
3401 if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3402 av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3403
3404 if (codec->capabilities & AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE) {
3405 ret = av_dict_set(&ost->encoder_opts, "flags", "+copy_opaque", AV_DICT_MULTIKEY);
3406 if (ret < 0)
3407 return ret;
3408 }
3409
3411 if (ret < 0) {
3412 snprintf(error, error_len, "Device setup failed for "
3413 "encoder on output stream #%d:%d : %s",
3414 ost->file_index, ost->index, av_err2str(ret));
3415 return ret;
3416 }
3417
3418 if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3419 if (ret == AVERROR_EXPERIMENTAL)
3420 abort_codec_experimental(codec, 1);
3421 snprintf(error, error_len,
3422 "Error while opening encoder for output stream #%d:%d - "
3423 "maybe incorrect parameters such as bit_rate, rate, width or height",
3424 ost->file_index, ost->index);
3425 return ret;
3426 }
3427 if (codec->type == AVMEDIA_TYPE_AUDIO &&
3428 !(codec->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3429 av_buffersink_set_frame_size(ost->filter->filter,
3430 ost->enc_ctx->frame_size);
3432 if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000 &&
3433 ost->enc_ctx->codec_id != AV_CODEC_ID_CODEC2 /* don't complain about 700 bit/s modes */)
3434 av_log(ost, AV_LOG_WARNING, "The bitrate parameter is set too low."
3435 " It takes bits/s as argument, not kbits/s\n");
3436
3437 ret = avcodec_parameters_from_context(ost->st->codecpar, ost->enc_ctx);
3438 if (ret < 0) {
3439 av_log(ost, AV_LOG_FATAL,
3440 "Error initializing the output stream codec context.\n");
3441 exit_program(1);
3442 }
3443
3444 if (ost->enc_ctx->nb_coded_side_data) {
3445 int i;
3446
3447 for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3448 const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3449 uint8_t *dst_data;
3450
3451 dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3452 if (!dst_data)
3453 return AVERROR(ENOMEM);
3454 memcpy(dst_data, sd_src->data, sd_src->size);
3455 }
3456 }
3457
3458 /*
3459 * Add global input side data. For now this is naive, and copies it
3460 * from the input stream's global side data. All side data should
3461 * really be funneled over AVFrame and libavfilter, then added back to
3462 * packet side data, and then potentially using the first packet for
3463 * global side data.
3464 */
3465 if (ist) {
3466 int i;
3467 for (i = 0; i < ist->st->nb_side_data; i++) {
3468 AVPacketSideData *sd = &ist->st->side_data[i];
3469 if (sd->type != AV_PKT_DATA_CPB_PROPERTIES) {
3470 uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size);
3471 if (!dst)
3472 return AVERROR(ENOMEM);
3473 memcpy(dst, sd->data, sd->size);
3474 if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3475 av_display_rotation_set((int32_t *)dst, 0);
3476 }
3477 }
3478 }
3479
3480 // copy timebase while removing common factors
3481 if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3482 ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3483
3484 // copy estimated duration as a hint to the muxer
3485 if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
3486 ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3487 } else if (ost->ist) {
3489 if (ret < 0)
3490 return ret;
3491 }
3492
3494 if (ret < 0)
3495 return ret;
3496
3497 return ret;
3498}
3499
3500static int transcode_init(void)
3501{
3502 int ret = 0;
3503 char error[1024] = {0};
3504
3505 /* init framerate emulation */
3506 for (int i = 0; i < nb_input_files; i++) {
3507 InputFile *ifile = input_files[i];
3508 if (ifile->readrate || ifile->rate_emu)
3509 for (int j = 0; j < ifile->nb_streams; j++)
3510 ifile->streams[j]->start = av_gettime_relative();
3511 }
3512
3513 /* init input streams */
3514 for (InputStream *ist = ist_iter(NULL); ist; ist = ist_iter(ist))
3515 if ((ret = init_input_stream(ist, error, sizeof(error))) < 0)
3516 goto dump_format;
3517
3518 /*
3519 * initialize stream copy and subtitle/data streams.
3520 * Encoded AVFrame based streams will get initialized as follows:
3521 * - when the first AVFrame is received in do_video_out
3522 * - just before the first AVFrame is received in either transcode_step
3523 * or reap_filters due to us requiring the filter chain buffer sink
3524 * to be configured with the correct audio frame size, which is only
3525 * known after the encoder is initialized.
3526 */
3527 for (OutputStream *ost = ost_iter(NULL); ost; ost = ost_iter(ost)) {
3528 if (ost->enc_ctx &&
3529 (ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ||
3530 ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO))
3531 continue;
3532
3533 ret = init_output_stream_wrapper(ost, NULL, 0);
3534 if (ret < 0)
3535 goto dump_format;
3536 }
3537
3538 /* discard unused programs */
3539 for (int i = 0; i < nb_input_files; i++) {
3540 InputFile *ifile = input_files[i];
3541 for (int j = 0; j < ifile->ctx->nb_programs; j++) {
3542 AVProgram *p = ifile->ctx->programs[j];
3543 int discard = AVDISCARD_ALL;
3544
3545 for (int k = 0; k < p->nb_stream_indexes; k++)
3546 if (!ifile->streams[p->stream_index[k]]->discard) {
3547 discard = AVDISCARD_DEFAULT;
3548 break;
3549 }
3550 p->discard = discard;
3551 }
3552 }
3553
3554 dump_format:
3555 /* dump the stream mapping */
3556 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3557 for (InputStream *ist = ist_iter(NULL); ist; ist = ist_iter(ist)) {
3558 for (int j = 0; j < ist->nb_filters; j++) {
3559 if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3560 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3561 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3562 ist->filters[j]->name);
3563 if (nb_filtergraphs > 1)
3564 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3565 av_log(NULL, AV_LOG_INFO, "\n");
3566 }
3567 }
3568 }
3569
3570 for (OutputStream *ost = ost_iter(NULL); ost; ost = ost_iter(ost)) {
3571 if (ost->attachment_filename) {
3572 /* an attached file */
3573 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3574 ost->attachment_filename, ost->file_index, ost->index);
3575 continue;
3576 }
3577
3578 if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3579 /* output from a complex graph */
3580 av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3581 if (nb_filtergraphs > 1)
3582 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3583
3584 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3585 ost->index, ost->enc_ctx->codec->name);
3586 continue;
3587 }
3588
3589 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3590 ost->ist->file_index,
3591 ost->ist->st->index,
3592 ost->file_index,
3593 ost->index);
3594 if (ost->enc_ctx) {
3595 const AVCodec *in_codec = ost->ist->dec;
3596 const AVCodec *out_codec = ost->enc_ctx->codec;
3597 const char *decoder_name = "?";
3598 const char *in_codec_name = "?";
3599 const char *encoder_name = "?";
3600 const char *out_codec_name = "?";
3601 const AVCodecDescriptor *desc;
3602
3603 if (in_codec) {
3604 decoder_name = in_codec->name;
3605 desc = avcodec_descriptor_get(in_codec->id);
3606 if (desc)
3607 in_codec_name = desc->name;
3608 if (!strcmp(decoder_name, in_codec_name))
3609 decoder_name = "native";
3610 }
3611
3612 if (out_codec) {
3613 encoder_name = out_codec->name;
3614 desc = avcodec_descriptor_get(out_codec->id);
3615 if (desc)
3616 out_codec_name = desc->name;
3617 if (!strcmp(encoder_name, out_codec_name))
3618 encoder_name = "native";
3619 }
3620
3621 av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3622 in_codec_name, decoder_name,
3623 out_codec_name, encoder_name);
3624 } else
3625 av_log(NULL, AV_LOG_INFO, " (copy)");
3626 av_log(NULL, AV_LOG_INFO, "\n");
3627 }
3628
3629 if (ret) {
3630 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3631 return ret;
3632 }
3633
3634 atomic_store(&transcode_init_done, 1);
3635
3636 return 0;
3637}
3638
3639/* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3640static int need_output(void)
3641{
3642 for (OutputStream *ost = ost_iter(NULL); ost; ost = ost_iter(ost)) {
3643 if (ost->finished)
3644 continue;
3645
3646 return 1;
3647 }
3648
3649 return 0;
3650}
3651
3658{
3659 int64_t opts_min = INT64_MAX;
3660 OutputStream *ost_min = NULL;
3661
3662 for (OutputStream *ost = ost_iter(NULL); ost; ost = ost_iter(ost)) {
3663 int64_t opts;
3664
3665 if (ost->filter && ost->last_filter_pts != AV_NOPTS_VALUE) {
3666 opts = ost->last_filter_pts;
3667 } else {
3668 opts = ost->last_mux_dts == AV_NOPTS_VALUE ?
3669 INT64_MIN : ost->last_mux_dts;
3670 if (ost->last_mux_dts == AV_NOPTS_VALUE)
3671 av_log(ost, AV_LOG_DEBUG,
3672 "cur_dts is invalid [init:%d i_done:%d finish:%d] (this is harmless if it occurs once at the start per stream)\n",
3673 ost->initialized, ost->inputs_done, ost->finished);
3674 }
3675
3676 if (!ost->initialized && !ost->inputs_done)
3677 return ost->unavailable ? NULL : ost;
3678
3679 if (!ost->finished && opts < opts_min) {
3680 opts_min = opts;
3681 ost_min = ost->unavailable ? NULL : ost;
3682 }
3683 }
3684 return ost_min;
3685}
3686
3687static void set_tty_echo(int on)
3688{
3689#if HAVE_TERMIOS_H
3690 struct termios tty;
3691 if (tcgetattr(0, &tty) == 0) {
3692 if (on) tty.c_lflag |= ECHO;
3693 else tty.c_lflag &= ~ECHO;
3694 tcsetattr(0, TCSANOW, &tty);
3695 }
3696#endif
3697}
3698
3699static int check_keyboard_interaction(int64_t cur_time)
3700{
3701 int i, ret, key;
3703 return AVERROR_EXIT;
3704 /* read_key() returns 0 on EOF */
3705 if(cur_time - keyboard_last_time >= 100000){
3706 key = read_key();
3707 keyboard_last_time = cur_time;
3708 }else
3709 key = -1;
3710 if (key == 'q') {
3711 av_log(NULL, AV_LOG_INFO, "\n\n[q] command received. Exiting.\n\n");
3712 return AVERROR_EXIT;
3713 }
3714 if (key == '+') av_log_set_level(av_log_get_level()+10);
3715 if (key == '-') av_log_set_level(av_log_get_level()-10);
3716 if (key == 's') qp_hist ^= 1;
3717 if (key == 'c' || key == 'C'){
3718 char buf[4096], target[64], command[256], arg[256] = {0};
3719 double time;
3720 int k, n = 0;
3721 av_log(NULL, AV_LOG_STDERR, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3722 i = 0;
3723 set_tty_echo(1);
3724 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3725 if (k > 0)
3726 buf[i++] = k;
3727 buf[i] = 0;
3728 set_tty_echo(0);
3729 av_log(NULL, AV_LOG_STDERR, "\n");
3730 if (k > 0 &&
3731 (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3732 av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3733 target, time, command, arg);
3734 for (i = 0; i < nb_filtergraphs; i++) {
3735 FilterGraph *fg = filtergraphs[i];
3736 if (fg->graph) {
3737 if (time < 0) {
3738 ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3739 key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3740 av_log(NULL, AV_LOG_STDERR, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3741 } else if (key == 'c') {
3742 av_log(NULL, AV_LOG_STDERR, "Queuing commands only on filters supporting the specific command is unsupported\n");
3743 ret = AVERROR_PATCHWELCOME;
3744 } else {
3745 ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3746 if (ret < 0)
3747 av_log(NULL, AV_LOG_STDERR, "Queuing command failed with error %s\n", av_err2str(ret));
3748 }
3749 }
3750 }
3751 } else {
3752 av_log(NULL, AV_LOG_ERROR,
3753 "Parse error, at least 3 arguments were expected, "
3754 "only %d given in string '%s'\n", n, buf);
3755 }
3756 }
3757 if (key == 'd' || key == 'D'){
3758 int debug=0;
3759 if(key == 'D') {
3760 InputStream *ist = ist_iter(NULL);
3761
3762 if (ist)
3763 debug = ist->dec_ctx->debug << 1;
3764
3765 if(!debug) debug = 1;
3766 while (debug & FF_DEBUG_DCT_COEFF) //unsupported, would just crash
3767 debug += debug;
3768 }else{
3769 char buf[32];
3770 int k = 0;
3771 i = 0;
3772 set_tty_echo(1);
3773 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3774 if (k > 0)
3775 buf[i++] = k;
3776 buf[i] = 0;
3777 set_tty_echo(0);
3778 av_log(NULL, AV_LOG_STDERR, "\n");
3779 if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3780 av_log(NULL, AV_LOG_STDERR,"error parsing debug value\n");
3781 }
3782 for (InputStream *ist = ist_iter(NULL); ist; ist = ist_iter(ist))
3783 ist->dec_ctx->debug = debug;
3784 for (OutputStream *ost = ost_iter(NULL); ost; ost = ost_iter(ost)) {
3785 if (ost->enc_ctx)
3786 ost->enc_ctx->debug = debug;
3787 }
3788 if(debug) av_log_set_level(AV_LOG_DEBUG);
3789 av_log(NULL, AV_LOG_STDERR,"debug=%d\n", debug);
3790 }
3791 if (key == '?'){
3792 av_log(NULL, AV_LOG_STDERR, "key function\n"
3793 "? show this help\n"
3794 "+ increase verbosity\n"
3795 "- decrease verbosity\n"
3796 "c Send command to first matching filter supporting it\n"
3797 "C Send/Queue command to all matching filters\n"
3798 "D cycle through available debug modes\n"
3799 "h dump packets/hex press to cycle through the 3 states\n"
3800 "q quit\n"
3801 "s Show QP histogram\n"
3802 );
3803 }
3804 return 0;
3805}
3806
3807static int got_eagain(void)
3808{
3809 for (OutputStream *ost = ost_iter(NULL); ost; ost = ost_iter(ost))
3810 if (ost->unavailable)
3811 return 1;
3812 return 0;
3813}
3814
3815static void reset_eagain(void)
3816{
3817 int i;
3818 for (i = 0; i < nb_input_files; i++)
3819 input_files[i]->eagain = 0;
3820 for (OutputStream *ost = ost_iter(NULL); ost; ost = ost_iter(ost))
3821 ost->unavailable = 0;
3822}
3823
3824static void decode_flush(InputFile *ifile)
3825{
3826 for (int i = 0; i < ifile->nb_streams; i++) {
3827 InputStream *ist = ifile->streams[i];
3828 int ret;
3829
3830 if (!ist->processing_needed)
3831 continue;
3832
3833 do {
3834 ret = process_input_packet(ist, NULL, 1);
3835 } while (ret > 0);
3836
3837 if (ist->decoding_needed) {
3838 /* report last frame duration to the demuxer thread */
3839 if (ist->par->codec_type == AVMEDIA_TYPE_AUDIO) {
3841
3842 dur.stream_idx = i;
3843 dur.duration = av_rescale_q(ist->nb_samples,
3844 (AVRational){ 1, ist->dec_ctx->sample_rate},
3845 ist->st->time_base);
3846
3847 av_thread_message_queue_send(ifile->audio_duration_queue, &dur, 0);
3848 }
3849
3850 avcodec_flush_buffers(ist->dec_ctx);
3851 }
3852 }
3853}
3854
3856 AVPacket *pkt)
3857{
3858 const int fmt_is_discont = ifile->ctx->iformat->flags & AVFMT_TS_DISCONT;
3859 int disable_discontinuity_correction = copy_ts;
3860 int64_t pkt_dts = av_rescale_q_rnd(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q,
3861 AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX);
3862
3863 if (copy_ts && ist->next_dts != AV_NOPTS_VALUE &&
3864 fmt_is_discont && ist->st->pts_wrap_bits < 60) {
3865 int64_t wrap_dts = av_rescale_q_rnd(pkt->dts + (1LL<<ist->st->pts_wrap_bits),
3866 ist->st->time_base, AV_TIME_BASE_Q,
3867 AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
3868 if (FFABS(wrap_dts - ist->next_dts) < FFABS(pkt_dts - ist->next_dts)/10)
3869 disable_discontinuity_correction = 0;
3870 }
3871
3872 if (ist->next_dts != AV_NOPTS_VALUE && !disable_discontinuity_correction) {
3873 int64_t delta = pkt_dts - ist->next_dts;
3874 if (fmt_is_discont) {
3875 if (FFABS(delta) > 1LL * dts_delta_threshold * AV_TIME_BASE ||
3876 pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
3877 ifile->ts_offset_discont -= delta;
3878 av_log(NULL, AV_LOG_DEBUG,
3879 "timestamp discontinuity for stream #%d:%d "
3880 "(id=%d, type=%s): %"PRId64", new offset= %"PRId64"\n",
3881 ist->file_index, ist->st->index, ist->st->id,
3882 av_get_media_type_string(ist->par->codec_type),
3883 delta, ifile->ts_offset_discont);
3884 pkt->dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3885 if (pkt->pts != AV_NOPTS_VALUE)
3886 pkt->pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3887 }
3888 } else {
3889 if (FFABS(delta) > 1LL * dts_error_threshold * AV_TIME_BASE) {
3890 av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt->dts, ist->next_dts, pkt->stream_index);
3891 pkt->dts = AV_NOPTS_VALUE;
3892 }
3893 if (pkt->pts != AV_NOPTS_VALUE){
3894 int64_t pkt_pts = av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
3895 delta = pkt_pts - ist->next_dts;
3896 if (FFABS(delta) > 1LL * dts_error_threshold * AV_TIME_BASE) {
3897 av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt->pts, ist->next_dts, pkt->stream_index);
3898 pkt->pts = AV_NOPTS_VALUE;
3899 }
3900 }
3901 }
3902 } else if (ist->next_dts == AV_NOPTS_VALUE && !copy_ts &&
3903 fmt_is_discont && ifile->last_ts != AV_NOPTS_VALUE) {
3904 int64_t delta = pkt_dts - ifile->last_ts;
3905 if (FFABS(delta) > 1LL * dts_delta_threshold * AV_TIME_BASE) {
3906 ifile->ts_offset_discont -= delta;
3907 av_log(NULL, AV_LOG_DEBUG,
3908 "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
3909 delta, ifile->ts_offset_discont);
3910 pkt->dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3911 if (pkt->pts != AV_NOPTS_VALUE)
3912 pkt->pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3913 }
3914 }
3915
3916 ifile->last_ts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
3917}
3918
3920 AVPacket *pkt)
3921{
3922 int64_t offset = av_rescale_q(ifile->ts_offset_discont, AV_TIME_BASE_Q,
3923 ist->st->time_base);
3924
3925 // apply previously-detected timestamp-discontinuity offset
3926 // (to all streams, not just audio/video)
3927 if (pkt->dts != AV_NOPTS_VALUE)
3928 pkt->dts += offset;
3929 if (pkt->pts != AV_NOPTS_VALUE)
3930 pkt->pts += offset;
3931
3932 // detect timestamp discontinuities for audio/video
3933 if ((ist->par->codec_type == AVMEDIA_TYPE_VIDEO ||
3934 ist->par->codec_type == AVMEDIA_TYPE_AUDIO) &&
3935 pkt->dts != AV_NOPTS_VALUE)
3936 ts_discontinuity_detect(ifile, ist, pkt);
3937}
3938
3939/*
3940 * Return
3941 * - 0 -- one packet was read and processed
3942 * - AVERROR(EAGAIN) -- no packets were available for selected file,
3943 * this function should be called again
3944 * - AVERROR_EOF -- this function should not be called again
3945 */
3946static int process_input(int file_index)
3947{
3948 InputFile *ifile = input_files[file_index];
3949 AVFormatContext *is;
3950 InputStream *ist;
3951 AVPacket *pkt;
3952 int ret, i;
3953
3954 is = ifile->ctx;
3955 ret = ifile_get_packet(ifile, &pkt);
3956
3957 if (ret == AVERROR(EAGAIN)) {
3958 ifile->eagain = 1;
3959 return ret;
3960 }
3961 if (ret == 1) {
3962 /* the input file is looped: flush the decoders */
3963 decode_flush(ifile);
3964 return AVERROR(EAGAIN);
3965 }
3966 if (ret < 0) {
3967 if (ret != AVERROR_EOF) {
3968 print_error(is->url, ret);
3969 if (exit_on_error)
3970 exit_program(1);
3971 }
3972
3973 for (i = 0; i < ifile->nb_streams; i++) {
3974 ist = ifile->streams[i];
3975 if (ist->processing_needed) {
3976 ret = process_input_packet(ist, NULL, 0);
3977 if (ret>0)
3978 return 0;
3979 }
3980
3981 /* mark all outputs that don't go through lavfi as finished */
3982 for (OutputStream *ost = ost_iter(NULL); ost; ost = ost_iter(ost)) {
3983 if (ost->ist == ist &&
3984 (!ost->enc_ctx || ost->enc_ctx->codec_type == AVMEDIA_TYPE_SUBTITLE)) {
3985 OutputFile *of = output_files[ost->file_index];
3986 of_output_packet(of, ost->pkt, ost, 1);
3987 }
3988 }
3989 }
3990
3991 ifile->eof_reached = 1;
3992 return AVERROR(EAGAIN);
3993 }
3994
3995 reset_eagain();
3996
3997 ist = ifile->streams[pkt->stream_index];
3998
3999 ist->data_size += pkt->size;
4000 ist->nb_packets++;
4001
4002 if (ist->discard)
4003 goto discard_packet;
4004
4005 /* add the stream-global side data to the first packet */
4006 if (ist->nb_packets == 1) {
4007 for (i = 0; i < ist->st->nb_side_data; i++) {
4008 AVPacketSideData *src_sd = &ist->st->side_data[i];
4009 uint8_t *dst_data;
4010
4011 if (src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4012 continue;
4013
4014 if (av_packet_get_side_data(pkt, src_sd->type, NULL))
4015 continue;
4016
4017 dst_data = av_packet_new_side_data(pkt, src_sd->type, src_sd->size);
4018 if (!dst_data)
4019 report_and_exit(AVERROR(ENOMEM));
4020
4021 memcpy(dst_data, src_sd->data, src_sd->size);
4022 }
4023 }
4024
4025 // detect and try to correct for timestamp discontinuities
4026 ts_discontinuity_process(ifile, ist, pkt);
4027
4028 if (debug_ts) {
4029 av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s duration:%s duration_time:%s off:%s off_time:%s\n",
4030 ifile->index, pkt->stream_index,
4031 av_get_media_type_string(ist->par->codec_type),
4032 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ist->st->time_base),
4033 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ist->st->time_base),
4034 av_ts2str(pkt->duration), av_ts2timestr(pkt->duration, &ist->st->time_base),
4035 av_ts2str(input_files[ist->file_index]->ts_offset),
4036 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4037 }
4038
4039 sub2video_heartbeat(ist, pkt->pts);
4040
4041 process_input_packet(ist, pkt, 0);
4042
4043discard_packet:
4044 av_packet_free(&pkt);
4045
4046 return 0;
4047}
4048
4056static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4057{
4058 int i, ret;
4059 int nb_requests, nb_requests_max = 0;
4060 InputFilter *ifilter;
4061 InputStream *ist;
4062
4063 *best_ist = NULL;
4064 ret = avfilter_graph_request_oldest(graph->graph);
4065 if (ret >= 0)
4066 return reap_filters(0);
4067
4068 if (ret == AVERROR_EOF) {
4069 ret = reap_filters(1);
4070 for (i = 0; i < graph->nb_outputs; i++)
4071 close_output_stream(graph->outputs[i]->ost);
4072 return ret;
4073 }
4074 if (ret != AVERROR(EAGAIN))
4075 return ret;
4076
4077 for (i = 0; i < graph->nb_inputs; i++) {
4078 ifilter = graph->inputs[i];
4079 ist = ifilter->ist;
4080 if (input_files[ist->file_index]->eagain ||
4082 continue;
4083 nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4084 if (nb_requests > nb_requests_max) {
4085 nb_requests_max = nb_requests;
4086 *best_ist = ist;
4087 }
4088 }
4089
4090 if (!*best_ist)
4091 for (i = 0; i < graph->nb_outputs; i++)
4092 graph->outputs[i]->ost->unavailable = 1;
4093
4094 return 0;
4095}
4096
4102static int transcode_step(void)
4103{
4104 OutputStream *ost;
4105 InputStream *ist = NULL;
4106 int ret;
4107
4108 ost = choose_output();
4109 if (!ost) {
4110 if (got_eagain()) {
4111 reset_eagain();
4112 av_usleep(10000);
4113 return 0;
4114 }
4115 av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4116 return AVERROR_EOF;
4117 }
4118
4119 if (ost->filter && !ost->filter->graph->graph) {
4121 ret = configure_filtergraph(ost->filter->graph);
4122 if (ret < 0) {
4123 av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
4124 return ret;
4125 }
4126 }
4127 }
4128
4129 if (ost->filter && ost->filter->graph->graph) {
4130 /*
4131 * Similar case to the early audio initialization in reap_filters.
4132 * Audio is special in ffmpeg.c currently as we depend on lavfi's
4133 * audio frame buffering/creation to get the output audio frame size
4134 * in samples correct. The audio frame size for the filter chain is
4135 * configured during the output stream initialization.
4136 *
4137 * Apparently avfilter_graph_request_oldest (called in
4138 * transcode_from_filter just down the line) peeks. Peeking already
4139 * puts one frame "ready to be given out", which means that any
4140 * update in filter buffer sink configuration afterwards will not
4141 * help us. And yes, even if it would be utilized,
4142 * av_buffersink_get_samples is affected, as it internally utilizes
4143 * the same early exit for peeked frames.
4144 *
4145 * In other words, if avfilter_graph_request_oldest would not make
4146 * further filter chain configuration or usage of
4147 * av_buffersink_get_samples useless (by just causing the return
4148 * of the peeked AVFrame as-is), we could get rid of this additional
4149 * early encoder initialization.
4150 */
4151 if (av_buffersink_get_type(ost->filter->filter) == AVMEDIA_TYPE_AUDIO)
4152 init_output_stream_wrapper(ost, NULL, 1);
4153
4154 if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4155 return ret;
4156 if (!ist)
4157 return 0;
4158 } else if (ost->filter) {
4159 int i;
4160 for (i = 0; i < ost->filter->graph->nb_inputs; i++) {
4161 InputFilter *ifilter = ost->filter->graph->inputs[i];
4162 if (!ifilter->ist->got_output && !input_files[ifilter->ist->file_index]->eof_reached) {
4163 ist = ifilter->ist;
4164 break;
4165 }
4166 }
4167 if (!ist) {
4168 ost->inputs_done = 1;
4169 return 0;
4170 }
4171 } else {
4172 ist = ost->ist;
4173 av_assert0(ist);
4174 }
4175
4176 ret = process_input(ist->file_index);
4177 if (ret == AVERROR(EAGAIN)) {
4178 if (input_files[ist->file_index]->eagain)
4179 ost->unavailable = 1;
4180 return 0;
4181 }
4182
4183 if (ret < 0)
4184 return ret == AVERROR_EOF ? 0 : ret;
4185
4186 return reap_filters(0);
4187}
4188
4189/*
4190 * The following code is the main loop of the file converter
4191 */
4192static int transcode(void)
4193{
4194 int ret, i;
4195 InputStream *ist;
4196 int64_t timer_start;
4197 int64_t total_packets_written = 0;
4198
4199 ret = transcode_init();
4200 if (ret < 0)
4201 goto fail;
4202
4203 if (stdin_interaction) {
4204 av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4205 }
4206
4207 timer_start = av_gettime_relative();
4208
4210 int64_t cur_time= av_gettime_relative();
4211
4212 /* if 'q' pressed, exits */
4214 if (check_keyboard_interaction(cur_time) < 0)
4215 break;
4216
4217 /* check if there's any stream where output is still needed */
4218 if (!need_output()) {
4219 av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4220 break;
4221 }
4222
4223 ret = transcode_step();
4224 if (ret < 0 && ret != AVERROR_EOF) {
4225 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
4226 break;
4227 }
4228
4229 /* dump report by using the output first video and audio streams */
4230 print_report(0, timer_start, cur_time);
4231 }
4232
4233 /* at the end of stream, we must flush the decoder buffers */
4234 for (ist = ist_iter(NULL); ist; ist = ist_iter(ist)) {
4235 if (!input_files[ist->file_index]->eof_reached) {
4236 process_input_packet(ist, NULL, 0);
4237 }
4238 }
4240
4241 term_exit();
4242
4243 /* write the trailer if needed */
4244 for (i = 0; i < nb_output_files; i++) {
4246 if (ret < 0 && exit_on_error)
4247 exit_program(1);
4248 }
4249
4250 /* dump report by using the first video and audio streams */
4251 print_report(1, timer_start, av_gettime_relative());
4252
4253 /* close each encoder */
4254 for (OutputStream *ost = ost_iter(NULL); ost; ost = ost_iter(ost)) {
4255 uint64_t packets_written;
4256 packets_written = atomic_load(&ost->packets_written);
4257 total_packets_written += packets_written;
4258 if (!packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT_STREAM)) {
4259 av_log(ost, AV_LOG_FATAL, "Empty output\n");
4260 exit_program(1);
4261 }
4262 }
4263
4264 if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4265 av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4266 exit_program(1);
4267 }
4268
4270
4271 /* finished ! */
4272 ret = 0;
4273
4274 fail:
4275 return ret;
4276}
4277
4279{
4280 BenchmarkTimeStamps time_stamps = { av_gettime_relative() };
4281#if HAVE_GETRUSAGE
4282 struct rusage rusage;
4283
4284 getrusage(RUSAGE_SELF, &rusage);
4285 time_stamps.user_usec =
4286 (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4287 time_stamps.sys_usec =
4288 (rusage.ru_stime.tv_sec * 1000000LL) + rusage.ru_stime.tv_usec;
4289#elif HAVE_GETPROCESSTIMES
4290 HANDLE proc;
4291 FILETIME c, e, k, u;
4292 proc = GetCurrentProcess();
4293 GetProcessTimes(proc, &c, &e, &k, &u);
4294 time_stamps.user_usec =
4295 ((int64_t)u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4296 time_stamps.sys_usec =
4297 ((int64_t)k.dwHighDateTime << 32 | k.dwLowDateTime) / 10;
4298#else
4299 time_stamps.user_usec = time_stamps.sys_usec = 0;
4300#endif
4301 return time_stamps;
4302}
4303
4304static int64_t getmaxrss(void)
4305{
4306#if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4307 struct rusage rusage;
4308 getrusage(RUSAGE_SELF, &rusage);
4309 return (int64_t)rusage.ru_maxrss * 1024;
4310#elif HAVE_GETPROCESSMEMORYINFO
4311 HANDLE proc;
4312 PROCESS_MEMORY_COUNTERS memcounters;
4313 proc = GetCurrentProcess();
4314 memcounters.cb = sizeof(memcounters);
4315 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4316 return memcounters.PeakPagefileUsage;
4317#else
4318 return 0;
4319#endif
4320}
4321
4323 received_sigterm = 0;
4325 transcode_init_done = ATOMIC_VAR_INIT(0);
4326 ffmpeg_exited = 0;
4328 copy_ts_first_pts = AV_NOPTS_VALUE;
4329 longjmp_value = 0;
4330 want_sdp = 1;
4331 enc_stats_files = NULL;
4333
4334 vstats_file = NULL;
4335
4336 nb_frames_dup = 0;
4337 dup_warning = 1000;
4338 nb_frames_drop = 0;
4339 decode_error_stat[0] = 0;
4340 decode_error_stat[1] = 0;
4341 nb_output_dumped = 0;
4342
4343 progress_avio = NULL;
4344
4345 input_files = NULL;
4346 nb_input_files = 0;
4347
4348 output_files = NULL;
4349 nb_output_files = 0;
4350
4351 filtergraphs = NULL;
4352 nb_filtergraphs = 0;
4353
4354 last_time = -1;
4356 first_report = 1;
4357 for(int i = 0; i < FF_ARRAY_ELEMS(qp_histogram); i++) {
4358 qp_histogram[i] = 0;
4359 }
4360}
4361
4362void set_report_callback(void (*callback)(int, float, float, int64_t, double, double, double))
4363{
4364 report_callback = callback;
4365}
4366
4367void cancel_operation(long id)
4368{
4369 if (id == 0) {
4370 sigterm_handler(SIGINT);
4371 } else {
4372 cancelSession(id);
4373 }
4374}
4375
4376__thread OptionDef *ffmpeg_options = NULL;
4377
4378int ffmpeg_execute(int argc, char **argv)
4379{
4380 char _program_name[] = "ffmpeg";
4381 program_name = (char*)&_program_name;
4382 program_birth_year = 2000;
4383
4384 #define OFFSET(x) offsetof(OptionsContext, x)
4385 OptionDef options[] = {
4386
4387 /* main options */
4388 { "L", OPT_EXIT, { .func_arg = show_license }, "show license" },
4389 { "h", OPT_EXIT, { .func_arg = show_help }, "show help", "topic" },
4390 { "?", OPT_EXIT, { .func_arg = show_help }, "show help", "topic" },
4391 { "help", OPT_EXIT, { .func_arg = show_help }, "show help", "topic" },
4392 { "-help", OPT_EXIT, { .func_arg = show_help }, "show help", "topic" },
4393 { "version", OPT_EXIT, { .func_arg = show_version }, "show version" },
4394 { "buildconf", OPT_EXIT, { .func_arg = show_buildconf }, "show build configuration" },
4395 { "formats", OPT_EXIT, { .func_arg = show_formats }, "show available formats" },
4396 { "muxers", OPT_EXIT, { .func_arg = show_muxers }, "show available muxers" },
4397 { "demuxers", OPT_EXIT, { .func_arg = show_demuxers }, "show available demuxers" },
4398 { "devices", OPT_EXIT, { .func_arg = show_devices }, "show available devices" },
4399 { "codecs", OPT_EXIT, { .func_arg = show_codecs }, "show available codecs" },
4400 { "decoders", OPT_EXIT, { .func_arg = show_decoders }, "show available decoders" },
4401 { "encoders", OPT_EXIT, { .func_arg = show_encoders }, "show available encoders" },
4402 { "bsfs", OPT_EXIT, { .func_arg = show_bsfs }, "show available bit stream filters" },
4403 { "protocols", OPT_EXIT, { .func_arg = show_protocols }, "show available protocols" },
4404 { "filters", OPT_EXIT, { .func_arg = show_filters }, "show available filters" },
4405 { "pix_fmts", OPT_EXIT, { .func_arg = show_pix_fmts }, "show available pixel formats" },
4406 { "layouts", OPT_EXIT, { .func_arg = show_layouts }, "show standard channel layouts" },
4407 { "sample_fmts", OPT_EXIT, { .func_arg = show_sample_fmts }, "show available audio sample formats" },
4408 { "dispositions", OPT_EXIT, { .func_arg = show_dispositions}, "show available stream dispositions" },
4409 { "colors", OPT_EXIT, { .func_arg = show_colors }, "show available color names" },
4410 { "loglevel", HAS_ARG, { .func_arg = opt_loglevel }, "set logging level", "loglevel" },
4411 { "v", HAS_ARG, { .func_arg = opt_loglevel }, "set logging level", "loglevel" },
4412 { "report", 0, { .func_arg = opt_report }, "generate a report" },
4413 { "max_alloc", HAS_ARG, { .func_arg = opt_max_alloc }, "set maximum size of a single allocated block", "bytes" },
4414 { "cpuflags", HAS_ARG | OPT_EXPERT, { .func_arg = opt_cpuflags }, "force specific cpu flags", "flags" },
4415 { "cpucount", HAS_ARG | OPT_EXPERT, { .func_arg = opt_cpucount }, "force specific cpu count", "count" },
4416 { "hide_banner", OPT_BOOL | OPT_EXPERT, {&hide_banner}, "do not show program banner", "hide_banner" },
4417
4418 #if CONFIG_AVDEVICE
4419 { "sources" , OPT_EXIT | HAS_ARG, { .func_arg = show_sources },
4420 "list sources of the input device", "device" },
4421 { "sinks" , OPT_EXIT | HAS_ARG, { .func_arg = show_sinks },
4422 "list sinks of the output device", "device" },
4423 #endif
4424
4425 { "f", HAS_ARG | OPT_STRING | OPT_OFFSET |
4426 OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(format) },
4427 "force format", "fmt" },
4428 { "y", OPT_BOOL, { &file_overwrite },
4429 "overwrite output files" },
4430 { "n", OPT_BOOL, { &no_file_overwrite },
4431 "never overwrite output files" },
4432 { "ignore_unknown", OPT_BOOL, { &ignore_unknown_streams },
4433 "Ignore unknown stream types" },
4434 { "copy_unknown", OPT_BOOL | OPT_EXPERT, { &copy_unknown_streams },
4435 "Copy unknown stream types" },
4436 { "recast_media", OPT_BOOL | OPT_EXPERT, { &recast_media },
4437 "allow recasting stream type in order to force a decoder of different media type" },
4438 { "c", HAS_ARG | OPT_STRING | OPT_SPEC |
4439 OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(codec_names) },
4440 "codec name", "codec" },
4441 { "codec", HAS_ARG | OPT_STRING | OPT_SPEC |
4442 OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(codec_names) },
4443 "codec name", "codec" },
4444 { "pre", HAS_ARG | OPT_STRING | OPT_SPEC |
4445 OPT_OUTPUT, { .off = OFFSET(presets) },
4446 "preset name", "preset" },
4447 { "map", HAS_ARG | OPT_EXPERT | OPT_PERFILE |
4448 OPT_OUTPUT, { .func_arg = opt_map },
4449 "set input stream mapping",
4450 "[-]input_file_id[:stream_specifier][,sync_file_id[:stream_specifier]]" },
4451 #if FFMPEG_OPT_MAP_CHANNEL
4452 { "map_channel", HAS_ARG | OPT_EXPERT | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_map_channel },
4453 "map an audio channel from one stream to another (deprecated)", "file.stream.channel[:syncfile.syncstream]" },
4454 #endif
4455 { "map_metadata", HAS_ARG | OPT_STRING | OPT_SPEC |
4456 OPT_OUTPUT, { .off = OFFSET(metadata_map) },
4457 "set metadata information of outfile from infile",
4458 "outfile[,metadata]:infile[,metadata]" },
4459 { "map_chapters", HAS_ARG | OPT_INT | OPT_EXPERT | OPT_OFFSET |
4460 OPT_OUTPUT, { .off = OFFSET(chapters_input_file) },
4461 "set chapters mapping", "input_file_index" },
4462 { "t", HAS_ARG | OPT_TIME | OPT_OFFSET |
4463 OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(recording_time) },
4464 "record or transcode \"duration\" seconds of audio/video",
4465 "duration" },
4466 { "to", HAS_ARG | OPT_TIME | OPT_OFFSET | OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(stop_time) },
4467 "record or transcode stop time", "time_stop" },
4468 { "fs", HAS_ARG | OPT_INT64 | OPT_OFFSET | OPT_OUTPUT, { .off = OFFSET(limit_filesize) },
4469 "set the limit file size in bytes", "limit_size" },
4470 { "ss", HAS_ARG | OPT_TIME | OPT_OFFSET |
4471 OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(start_time) },
4472 "set the start time offset", "time_off" },
4473 { "sseof", HAS_ARG | OPT_TIME | OPT_OFFSET |
4474 OPT_INPUT, { .off = OFFSET(start_time_eof) },
4475 "set the start time offset relative to EOF", "time_off" },
4476 { "seek_timestamp", HAS_ARG | OPT_INT | OPT_OFFSET |
4477 OPT_INPUT, { .off = OFFSET(seek_timestamp) },
4478 "enable/disable seeking by timestamp with -ss" },
4479 { "accurate_seek", OPT_BOOL | OPT_OFFSET | OPT_EXPERT |
4480 OPT_INPUT, { .off = OFFSET(accurate_seek) },
4481 "enable/disable accurate seeking with -ss" },
4482 { "isync", HAS_ARG | OPT_INT | OPT_OFFSET |
4483 OPT_EXPERT | OPT_INPUT, { .off = OFFSET(input_sync_ref) },
4484 "Indicate the input index for sync reference", "sync ref" },
4485 { "itsoffset", HAS_ARG | OPT_TIME | OPT_OFFSET |
4486 OPT_EXPERT | OPT_INPUT, { .off = OFFSET(input_ts_offset) },
4487 "set the input ts offset", "time_off" },
4488 { "itsscale", HAS_ARG | OPT_DOUBLE | OPT_SPEC |
4489 OPT_EXPERT | OPT_INPUT, { .off = OFFSET(ts_scale) },
4490 "set the input ts scale", "scale" },
4491 { "timestamp", HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_recording_timestamp },
4492 "set the recording timestamp ('now' to set the current time)", "time" },
4493 { "metadata", HAS_ARG | OPT_STRING | OPT_SPEC | OPT_OUTPUT, { .off = OFFSET(metadata) },
4494 "add metadata", "string=string" },
4495 { "program", HAS_ARG | OPT_STRING | OPT_SPEC | OPT_OUTPUT, { .off = OFFSET(program) },
4496 "add program with specified streams", "title=string:st=number..." },
4497 { "dframes", HAS_ARG | OPT_PERFILE | OPT_EXPERT |
4498 OPT_OUTPUT, { .func_arg = opt_data_frames },
4499 "set the number of data frames to output", "number" },
4500 { "benchmark", OPT_BOOL | OPT_EXPERT, { &do_benchmark },
4501 "add timings for benchmarking" },
4502 { "benchmark_all", OPT_BOOL | OPT_EXPERT, { &do_benchmark_all },
4503 "add timings for each task" },
4504 { "progress", HAS_ARG | OPT_EXPERT, { .func_arg = opt_progress },
4505 "write program-readable progress information", "url" },
4506 { "stdin", OPT_BOOL | OPT_EXPERT, { &stdin_interaction },
4507 "enable or disable interaction on standard input" },
4508 { "timelimit", HAS_ARG | OPT_EXPERT, { .func_arg = opt_timelimit },
4509 "set max runtime in seconds in CPU user time", "limit" },
4510 { "dump", OPT_BOOL | OPT_EXPERT, { &do_pkt_dump },
4511 "dump each input packet" },
4512 { "hex", OPT_BOOL | OPT_EXPERT, { &do_hex_dump },
4513 "when dumping packets, also dump the payload" },
4514 { "re", OPT_BOOL | OPT_EXPERT | OPT_OFFSET |
4515 OPT_INPUT, { .off = OFFSET(rate_emu) },
4516 "read input at native frame rate; equivalent to -readrate 1", "" },
4517 { "readrate", HAS_ARG | OPT_FLOAT | OPT_OFFSET |
4518 OPT_EXPERT | OPT_INPUT, { .off = OFFSET(readrate) },
4519 "read input at specified rate", "speed" },
4520 { "target", HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_target },
4521 "specify target file type (\"vcd\", \"svcd\", \"dvd\", \"dv\" or \"dv50\" "
4522 "with optional prefixes \"pal-\", \"ntsc-\" or \"film-\")", "type" },
4523 { "vsync", HAS_ARG | OPT_EXPERT, { .func_arg = opt_vsync },
4524 "set video sync method globally; deprecated, use -fps_mode", "" },
4525 { "frame_drop_threshold", HAS_ARG | OPT_FLOAT | OPT_EXPERT, { &frame_drop_threshold },
4526 "frame drop threshold", "" },
4527 { "adrift_threshold", HAS_ARG | OPT_FLOAT | OPT_EXPERT, { &audio_drift_threshold },
4528 "audio drift threshold", "threshold" },
4529 { "copyts", OPT_BOOL | OPT_EXPERT, { &copy_ts },
4530 "copy timestamps" },
4531 { "start_at_zero", OPT_BOOL | OPT_EXPERT, { &start_at_zero },
4532 "shift input timestamps to start at 0 when using copyts" },
4533 { "copytb", HAS_ARG | OPT_INT | OPT_EXPERT, { &copy_tb },
4534 "copy input stream time base when stream copying", "mode" },
4535 { "shortest", OPT_BOOL | OPT_EXPERT | OPT_OFFSET |
4536 OPT_OUTPUT, { .off = OFFSET(shortest) },
4537 "finish encoding within shortest input" },
4538 { "shortest_buf_duration", HAS_ARG | OPT_FLOAT | OPT_EXPERT | OPT_OFFSET | OPT_OUTPUT, { .off = OFFSET(shortest_buf_duration) },
4539 "maximum buffering duration (in seconds) for the -shortest option" },
4540 { "bitexact", OPT_BOOL | OPT_EXPERT | OPT_OFFSET |
4541 OPT_OUTPUT | OPT_INPUT, { .off = OFFSET(bitexact) },
4542 "bitexact mode" },
4543 { "apad", OPT_STRING | HAS_ARG | OPT_SPEC |
4544 OPT_OUTPUT, { .off = OFFSET(apad) },
4545 "audio pad", "" },
4546 { "dts_delta_threshold", HAS_ARG | OPT_FLOAT | OPT_EXPERT, { &dts_delta_threshold },
4547 "timestamp discontinuity delta threshold", "threshold" },
4548 { "dts_error_threshold", HAS_ARG | OPT_FLOAT | OPT_EXPERT, { &dts_error_threshold },
4549 "timestamp error delta threshold", "threshold" },
4550 { "xerror", OPT_BOOL | OPT_EXPERT, { &exit_on_error },
4551 "exit on error", "error" },
4552 { "abort_on", HAS_ARG | OPT_EXPERT, { .func_arg = opt_abort_on },
4553 "abort on the specified condition flags", "flags" },
4554 { "copyinkf", OPT_BOOL | OPT_EXPERT | OPT_SPEC |
4555 OPT_OUTPUT, { .off = OFFSET(copy_initial_nonkeyframes) },
4556 "copy initial non-keyframes" },
4557 { "copypriorss", OPT_INT | HAS_ARG | OPT_EXPERT | OPT_SPEC | OPT_OUTPUT, { .off = OFFSET(copy_prior_start) },
4558 "copy or discard frames before start time" },
4559 { "frames", OPT_INT64 | HAS_ARG | OPT_SPEC | OPT_OUTPUT, { .off = OFFSET(max_frames) },
4560 "set the number of frames to output", "number" },
4561 { "tag", OPT_STRING | HAS_ARG | OPT_SPEC |
4562 OPT_EXPERT | OPT_OUTPUT | OPT_INPUT, { .off = OFFSET(codec_tags) },
4563 "force codec tag/fourcc", "fourcc/tag" },
4564 { "q", HAS_ARG | OPT_EXPERT | OPT_DOUBLE |
4565 OPT_SPEC | OPT_OUTPUT, { .off = OFFSET(qscale) },
4566 "use fixed quality scale (VBR)", "q" },
4567 { "qscale", HAS_ARG | OPT_EXPERT | OPT_PERFILE |
4568 OPT_OUTPUT, { .func_arg = opt_qscale },
4569 "use fixed quality scale (VBR)", "q" },
4570 { "profile", HAS_ARG | OPT_EXPERT | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_profile },
4571 "set profile", "profile" },
4572 { "filter", HAS_ARG | OPT_STRING | OPT_SPEC | OPT_OUTPUT, { .off = OFFSET(filters) },
4573 "set stream filtergraph", "filter_graph" },
4574 { "filter_threads", HAS_ARG, { .func_arg = opt_filter_threads },
4575 "number of non-complex filter threads" },
4576 { "filter_script", HAS_ARG | OPT_STRING | OPT_SPEC | OPT_OUTPUT, { .off = OFFSET(filter_scripts) },
4577 "read stream filtergraph description from a file", "filename" },
4578 { "reinit_filter", HAS_ARG | OPT_INT | OPT_SPEC | OPT_INPUT, { .off = OFFSET(reinit_filters) },
4579 "reinit filtergraph on input parameter changes", "" },
4580 { "filter_complex", HAS_ARG | OPT_EXPERT, { .func_arg = opt_filter_complex },
4581 "create a complex filtergraph", "graph_description" },
4582 { "filter_complex_threads", HAS_ARG | OPT_INT, { &filter_complex_nbthreads },
4583 "number of threads for -filter_complex" },
4584 { "lavfi", HAS_ARG | OPT_EXPERT, { .func_arg = opt_filter_complex },
4585 "create a complex filtergraph", "graph_description" },
4586 { "filter_complex_script", HAS_ARG | OPT_EXPERT, { .func_arg = opt_filter_complex_script },
4587 "read complex filtergraph description from a file", "filename" },
4588 { "auto_conversion_filters", OPT_BOOL | OPT_EXPERT, { &auto_conversion_filters },
4589 "enable automatic conversion filters globally" },
4590 { "stats", OPT_BOOL, { &print_stats },
4591 "print progress report during encoding", },
4592 { "stats_period", HAS_ARG | OPT_EXPERT, { .func_arg = opt_stats_period },
4593 "set the period at which ffmpeg updates stats and -progress output", "time" },
4594 { "attach", HAS_ARG | OPT_PERFILE | OPT_EXPERT |
4595 OPT_OUTPUT, { .func_arg = opt_attach },
4596 "add an attachment to the output file", "filename" },
4597 { "dump_attachment", HAS_ARG | OPT_STRING | OPT_SPEC |
4599 "extract an attachment into a file", "filename" },
4600 { "stream_loop", OPT_INT | HAS_ARG | OPT_EXPERT | OPT_INPUT |
4601 OPT_OFFSET, { .off = OFFSET(loop) }, "set number of times input stream shall be looped", "loop count" },
4602 { "debug_ts", OPT_BOOL | OPT_EXPERT, { &debug_ts },
4603 "print timestamp debugging info" },
4604 { "max_error_rate", HAS_ARG | OPT_FLOAT, { &max_error_rate },
4605 "ratio of decoding errors (0.0: no errors, 1.0: 100% errors) above which ffmpeg returns an error instead of success.", "maximum error rate" },
4606 { "discard", OPT_STRING | HAS_ARG | OPT_SPEC |
4607 OPT_INPUT, { .off = OFFSET(discard) },
4608 "discard", "" },
4609 { "disposition", OPT_STRING | HAS_ARG | OPT_SPEC |
4610 OPT_OUTPUT, { .off = OFFSET(disposition) },
4611 "disposition", "" },
4612 { "thread_queue_size", HAS_ARG | OPT_INT | OPT_OFFSET | OPT_EXPERT | OPT_INPUT | OPT_OUTPUT,
4613 { .off = OFFSET(thread_queue_size) },
4614 "set the maximum number of queued packets from the demuxer" },
4615 { "find_stream_info", OPT_BOOL | OPT_INPUT | OPT_EXPERT | OPT_OFFSET, { .off = OFFSET(find_stream_info) },
4616 "read and decode the streams to fill missing information with heuristics" },
4617 { "bits_per_raw_sample", OPT_INT | HAS_ARG | OPT_EXPERT | OPT_SPEC | OPT_OUTPUT,
4618 { .off = OFFSET(bits_per_raw_sample) },
4619 "set the number of bits per raw sample", "number" },
4620
4621 { "stats_enc_pre", HAS_ARG | OPT_SPEC | OPT_EXPERT | OPT_OUTPUT | OPT_STRING, { .off = OFFSET(enc_stats_pre) },
4622 "write encoding stats before encoding" },
4623 { "stats_enc_post", HAS_ARG | OPT_SPEC | OPT_EXPERT | OPT_OUTPUT | OPT_STRING, { .off = OFFSET(enc_stats_post) },
4624 "write encoding stats after encoding" },
4625 { "stats_mux_pre", HAS_ARG | OPT_SPEC | OPT_EXPERT | OPT_OUTPUT | OPT_STRING, { .off = OFFSET(mux_stats) },
4626 "write packets stats before muxing" },
4627 { "stats_enc_pre_fmt", HAS_ARG | OPT_SPEC | OPT_EXPERT | OPT_OUTPUT | OPT_STRING, { .off = OFFSET(enc_stats_pre_fmt) },
4628 "format of the stats written with -stats_enc_pre" },
4629 { "stats_enc_post_fmt", HAS_ARG | OPT_SPEC | OPT_EXPERT | OPT_OUTPUT | OPT_STRING, { .off = OFFSET(enc_stats_post_fmt) },
4630 "format of the stats written with -stats_enc_post" },
4631 { "stats_mux_pre_fmt", HAS_ARG | OPT_SPEC | OPT_EXPERT | OPT_OUTPUT | OPT_STRING, { .off = OFFSET(mux_stats_fmt) },
4632 "format of the stats written with -stats_mux_pre" },
4633
4634 /* video options */
4635 { "vframes", OPT_VIDEO | HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_video_frames },
4636 "set the number of video frames to output", "number" },
4637 { "r", OPT_VIDEO | HAS_ARG | OPT_STRING | OPT_SPEC |
4638 OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(frame_rates) },
4639 "set frame rate (Hz value, fraction or abbreviation)", "rate" },
4640 { "fpsmax", OPT_VIDEO | HAS_ARG | OPT_STRING | OPT_SPEC |
4641 OPT_OUTPUT, { .off = OFFSET(max_frame_rates) },
4642 "set max frame rate (Hz value, fraction or abbreviation)", "rate" },
4644 OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(frame_sizes) },
4645 "set frame size (WxH or abbreviation)", "size" },
4646 { "aspect", OPT_VIDEO | HAS_ARG | OPT_STRING | OPT_SPEC |
4647 OPT_OUTPUT, { .off = OFFSET(frame_aspect_ratios) },
4648 "set aspect ratio (4:3, 16:9 or 1.3333, 1.7777)", "aspect" },
4649 { "pix_fmt", OPT_VIDEO | HAS_ARG | OPT_EXPERT | OPT_STRING | OPT_SPEC |
4650 OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(frame_pix_fmts) },
4651 "set pixel format", "format" },
4652 { "display_rotation", OPT_VIDEO | HAS_ARG | OPT_DOUBLE | OPT_SPEC |
4653 OPT_INPUT, { .off = OFFSET(display_rotations) },
4654 "set pure counter-clockwise rotation in degrees for stream(s)",
4655 "angle" },
4656 { "display_hflip", OPT_VIDEO | OPT_BOOL | OPT_SPEC | OPT_INPUT, { .off = OFFSET(display_hflips) },
4657 "set display horizontal flip for stream(s) "
4658 "(overrides any display rotation if it is not set)"},
4659 { "display_vflip", OPT_VIDEO | OPT_BOOL | OPT_SPEC | OPT_INPUT, { .off = OFFSET(display_vflips) },
4660 "set display vertical flip for stream(s) "
4661 "(overrides any display rotation if it is not set)"},
4662 { "vn", OPT_VIDEO | OPT_BOOL | OPT_OFFSET | OPT_INPUT | OPT_OUTPUT,{ .off = OFFSET(video_disable) },
4663 "disable video" },
4664 { "rc_override", OPT_VIDEO | HAS_ARG | OPT_EXPERT | OPT_STRING | OPT_SPEC |
4665 OPT_OUTPUT, { .off = OFFSET(rc_overrides) },
4666 "rate control override for specific intervals", "override" },
4667 { "vcodec", OPT_VIDEO | HAS_ARG | OPT_PERFILE | OPT_INPUT |
4668 OPT_OUTPUT, { .func_arg = opt_video_codec },
4669 "force video codec ('copy' to copy stream)", "codec" },
4670 { "timecode", OPT_VIDEO | HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_timecode },
4671 "set initial TimeCode value.", "hh:mm:ss[:;.]ff" },
4672 { "pass", OPT_VIDEO | HAS_ARG | OPT_SPEC | OPT_INT | OPT_OUTPUT, { .off = OFFSET(pass) },
4673 "select the pass number (1 to 3)", "n" },
4674 { "passlogfile", OPT_VIDEO | HAS_ARG | OPT_STRING | OPT_EXPERT | OPT_SPEC |
4675 OPT_OUTPUT, { .off = OFFSET(passlogfiles) },
4676 "select two pass log file name prefix", "prefix" },
4677 #if FFMPEG_OPT_PSNR
4678 { "psnr", OPT_VIDEO | OPT_BOOL | OPT_EXPERT, { &do_psnr },
4679 "calculate PSNR of compressed frames (deprecated, use -flags +psnr)" },
4680 #endif
4681 { "vstats", OPT_VIDEO | OPT_EXPERT , { .func_arg = opt_vstats },
4682 "dump video coding statistics to file" },
4683 { "vstats_file", OPT_VIDEO | HAS_ARG | OPT_EXPERT , { .func_arg = opt_vstats_file },
4684 "dump video coding statistics to file", "file" },
4685 { "vstats_version", OPT_VIDEO | OPT_INT | HAS_ARG | OPT_EXPERT , { &vstats_version },
4686 "Version of the vstats format to use."},
4687 { "vf", OPT_VIDEO | HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_video_filters },
4688 "set video filters", "filter_graph" },
4689 { "intra_matrix", OPT_VIDEO | HAS_ARG | OPT_EXPERT | OPT_STRING | OPT_SPEC |
4690 OPT_OUTPUT, { .off = OFFSET(intra_matrices) },
4691 "specify intra matrix coeffs", "matrix" },
4692 { "inter_matrix", OPT_VIDEO | HAS_ARG | OPT_EXPERT | OPT_STRING | OPT_SPEC |
4693 OPT_OUTPUT, { .off = OFFSET(inter_matrices) },
4694 "specify inter matrix coeffs", "matrix" },
4695 { "chroma_intra_matrix", OPT_VIDEO | HAS_ARG | OPT_EXPERT | OPT_STRING | OPT_SPEC |
4696 OPT_OUTPUT, { .off = OFFSET(chroma_intra_matrices) },
4697 "specify intra matrix coeffs", "matrix" },
4698 { "top", OPT_VIDEO | HAS_ARG | OPT_EXPERT | OPT_INT| OPT_SPEC |
4699 OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(top_field_first) },
4700 "top=1/bottom=0/auto=-1 field first", "" },
4701 { "vtag", OPT_VIDEO | HAS_ARG | OPT_EXPERT | OPT_PERFILE |
4702 OPT_INPUT | OPT_OUTPUT, { .func_arg = opt_old2new },
4703 "force video tag/fourcc", "fourcc/tag" },
4704 { "qphist", OPT_VIDEO | OPT_BOOL | OPT_EXPERT , { &qp_hist },
4705 "show QP histogram" },
4706 { "fps_mode", OPT_VIDEO | HAS_ARG | OPT_STRING | OPT_EXPERT |
4707 OPT_SPEC | OPT_OUTPUT, { .off = OFFSET(fps_mode) },
4708 "set framerate mode for matching video streams; overrides vsync" },
4709 { "force_fps", OPT_VIDEO | OPT_BOOL | OPT_EXPERT | OPT_SPEC |
4710 OPT_OUTPUT, { .off = OFFSET(force_fps) },
4711 "force the selected framerate, disable the best supported framerate selection" },
4712 { "streamid", OPT_VIDEO | HAS_ARG | OPT_EXPERT | OPT_PERFILE |
4713 OPT_OUTPUT, { .func_arg = opt_streamid },
4714 "set the value of an outfile streamid", "streamIndex:value" },
4715 { "force_key_frames", OPT_VIDEO | OPT_STRING | HAS_ARG | OPT_EXPERT |
4716 OPT_SPEC | OPT_OUTPUT, { .off = OFFSET(forced_key_frames) },
4717 "force key frames at specified timestamps", "timestamps" },
4718 { "b", OPT_VIDEO | HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_bitrate },
4719 "video bitrate (please use -b:v)", "bitrate" },
4720 { "hwaccel", OPT_VIDEO | OPT_STRING | HAS_ARG | OPT_EXPERT |
4721 OPT_SPEC | OPT_INPUT, { .off = OFFSET(hwaccels) },
4722 "use HW accelerated decoding", "hwaccel name" },
4723 { "hwaccel_device", OPT_VIDEO | OPT_STRING | HAS_ARG | OPT_EXPERT |
4724 OPT_SPEC | OPT_INPUT, { .off = OFFSET(hwaccel_devices) },
4725 "select a device for HW acceleration", "devicename" },
4726 { "hwaccel_output_format", OPT_VIDEO | OPT_STRING | HAS_ARG | OPT_EXPERT |
4727 OPT_SPEC | OPT_INPUT, { .off = OFFSET(hwaccel_output_formats) },
4728 "select output format used with HW accelerated decoding", "format" },
4729 { "hwaccels", OPT_EXIT, { .func_arg = show_hwaccels },
4730 "show available HW acceleration methods" },
4731 { "autorotate", HAS_ARG | OPT_BOOL | OPT_SPEC |
4732 OPT_EXPERT | OPT_INPUT, { .off = OFFSET(autorotate) },
4733 "automatically insert correct rotate filters" },
4734 { "autoscale", HAS_ARG | OPT_BOOL | OPT_SPEC |
4735 OPT_EXPERT | OPT_OUTPUT, { .off = OFFSET(autoscale) },
4736 "automatically insert a scale filter at the end of the filter graph" },
4737 { "fix_sub_duration_heartbeat", OPT_VIDEO | OPT_BOOL | OPT_EXPERT |
4739 "set this video output stream to be a heartbeat stream for "
4740 "fix_sub_duration, according to which subtitles should be split at "
4741 "random access points" },
4742
4743 /* audio options */
4744 { "aframes", OPT_AUDIO | HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_audio_frames },
4745 "set the number of audio frames to output", "number" },
4746 { "aq", OPT_AUDIO | HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_audio_qscale },
4747 "set audio quality (codec-specific)", "quality", },
4748 { "ar", OPT_AUDIO | HAS_ARG | OPT_INT | OPT_SPEC |
4749 OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(audio_sample_rate) },
4750 "set audio sampling rate (in Hz)", "rate" },
4751 { "ac", OPT_AUDIO | HAS_ARG | OPT_INT | OPT_SPEC |
4752 OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(audio_channels) },
4753 "set number of audio channels", "channels" },
4754 { "an", OPT_AUDIO | OPT_BOOL | OPT_OFFSET | OPT_INPUT | OPT_OUTPUT,{ .off = OFFSET(audio_disable) },
4755 "disable audio" },
4756 { "acodec", OPT_AUDIO | HAS_ARG | OPT_PERFILE |
4757 OPT_INPUT | OPT_OUTPUT, { .func_arg = opt_audio_codec },
4758 "force audio codec ('copy' to copy stream)", "codec" },
4759 { "ab", OPT_AUDIO | HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_bitrate },
4760 "audio bitrate (please use -b:a)", "bitrate" },
4761 { "atag", OPT_AUDIO | HAS_ARG | OPT_EXPERT | OPT_PERFILE |
4762 OPT_OUTPUT, { .func_arg = opt_old2new },
4763 "force audio tag/fourcc", "fourcc/tag" },
4764 { "sample_fmt", OPT_AUDIO | HAS_ARG | OPT_EXPERT | OPT_SPEC |
4765 OPT_STRING | OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(sample_fmts) },
4766 "set sample format", "format" },
4767 { "channel_layout", OPT_AUDIO | HAS_ARG | OPT_EXPERT | OPT_SPEC |
4768 OPT_STRING | OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(audio_ch_layouts) },
4769 "set channel layout", "layout" },
4770 { "ch_layout", OPT_AUDIO | HAS_ARG | OPT_EXPERT | OPT_SPEC |
4771 OPT_STRING | OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(audio_ch_layouts) },
4772 "set channel layout", "layout" },
4773 { "af", OPT_AUDIO | HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_audio_filters },
4774 "set audio filters", "filter_graph" },
4775 { "guess_layout_max", OPT_AUDIO | HAS_ARG | OPT_INT | OPT_SPEC | OPT_EXPERT | OPT_INPUT, { .off = OFFSET(guess_layout_max) },
4776 "set the maximum number of channels to try to guess the channel layout" },
4777
4778 /* subtitle options */
4779 { "sn", OPT_SUBTITLE | OPT_BOOL | OPT_OFFSET | OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(subtitle_disable) },
4780 "disable subtitle" },
4781 { "scodec", OPT_SUBTITLE | HAS_ARG | OPT_PERFILE | OPT_INPUT | OPT_OUTPUT, { .func_arg = opt_subtitle_codec },
4782 "force subtitle codec ('copy' to copy stream)", "codec" },
4783 { "stag", OPT_SUBTITLE | HAS_ARG | OPT_EXPERT | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_old2new }
4784 , "force subtitle tag/fourcc", "fourcc/tag" },
4785 { "fix_sub_duration", OPT_BOOL | OPT_EXPERT | OPT_SUBTITLE | OPT_SPEC | OPT_INPUT, { .off = OFFSET(fix_sub_duration) },
4786 "fix subtitles duration" },
4787 { "canvas_size", OPT_SUBTITLE | HAS_ARG | OPT_STRING | OPT_SPEC | OPT_INPUT, { .off = OFFSET(canvas_sizes) },
4788 "set canvas size (WxH or abbreviation)", "size" },
4789
4790 /* muxer options */
4791 { "muxdelay", OPT_FLOAT | HAS_ARG | OPT_EXPERT | OPT_OFFSET | OPT_OUTPUT, { .off = OFFSET(mux_max_delay) },
4792 "set the maximum demux-decode delay", "seconds" },
4793 { "muxpreload", OPT_FLOAT | HAS_ARG | OPT_EXPERT | OPT_OFFSET | OPT_OUTPUT, { .off = OFFSET(mux_preload) },
4794 "set the initial demux-decode delay", "seconds" },
4795 { "sdp_file", HAS_ARG | OPT_EXPERT | OPT_OUTPUT, { .func_arg = opt_sdp_file },
4796 "specify a file in which to print sdp information", "file" },
4797
4798 { "time_base", HAS_ARG | OPT_STRING | OPT_EXPERT | OPT_SPEC | OPT_OUTPUT, { .off = OFFSET(time_bases) },
4799 "set the desired time base hint for output stream (1:24, 1:48000 or 0.04166, 2.0833e-5)", "ratio" },
4800 { "enc_time_base", HAS_ARG | OPT_STRING | OPT_EXPERT | OPT_SPEC | OPT_OUTPUT, { .off = OFFSET(enc_time_bases) },
4801 "set the desired time base for the encoder (1:24, 1:48000 or 0.04166, 2.0833e-5). "
4802 "two special values are defined - "
4803 "0 = use frame rate (video) or sample rate (audio),"
4804 "-1 = match source time base", "ratio" },
4805
4806 { "bsf", HAS_ARG | OPT_STRING | OPT_SPEC | OPT_EXPERT | OPT_OUTPUT, { .off = OFFSET(bitstream_filters) },
4807 "A comma-separated list of bitstream filters", "bitstream_filters" },
4808 { "absf", HAS_ARG | OPT_AUDIO | OPT_EXPERT| OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_old2new },
4809 "deprecated", "audio bitstream_filters" },
4810 { "vbsf", OPT_VIDEO | HAS_ARG | OPT_EXPERT| OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_old2new },
4811 "deprecated", "video bitstream_filters" },
4812
4813 { "apre", HAS_ARG | OPT_AUDIO | OPT_EXPERT| OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_preset },
4814 "set the audio options to the indicated preset", "preset" },
4815 { "vpre", OPT_VIDEO | HAS_ARG | OPT_EXPERT| OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_preset },
4816 "set the video options to the indicated preset", "preset" },
4817 { "spre", HAS_ARG | OPT_SUBTITLE | OPT_EXPERT| OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_preset },
4818 "set the subtitle options to the indicated preset", "preset" },
4819 { "fpre", HAS_ARG | OPT_EXPERT| OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_preset },
4820 "set options from indicated preset file", "filename" },
4821
4822 { "max_muxing_queue_size", HAS_ARG | OPT_INT | OPT_SPEC | OPT_EXPERT | OPT_OUTPUT, { .off = OFFSET(max_muxing_queue_size) },
4823 "maximum number of packets that can be buffered while waiting for all streams to initialize", "packets" },
4824 { "muxing_queue_data_threshold", HAS_ARG | OPT_INT | OPT_SPEC | OPT_EXPERT | OPT_OUTPUT, { .off = OFFSET(muxing_queue_data_threshold) },
4825 "set the threshold after which max_muxing_queue_size is taken into account", "bytes" },
4826
4827 /* data codec support */
4828 { "dcodec", HAS_ARG | OPT_DATA | OPT_PERFILE | OPT_EXPERT | OPT_INPUT | OPT_OUTPUT, { .func_arg = opt_data_codec },
4829 "force data codec ('copy' to copy stream)", "codec" },
4830 { "dn", OPT_BOOL | OPT_VIDEO | OPT_OFFSET | OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(data_disable) },
4831 "disable data" },
4832
4833 #if CONFIG_VAAPI
4834 { "vaapi_device", HAS_ARG | OPT_EXPERT, { .func_arg = opt_vaapi_device },
4835 "set VAAPI hardware device (DRM path or X11 display name)", "device" },
4836 #endif
4837
4838 #if CONFIG_QSV
4839 { "qsv_device", HAS_ARG | OPT_EXPERT, { .func_arg = opt_qsv_device },
4840 "set QSV hardware device (DirectX adapter index, DRM path or X11 display name)", "device"},
4841 #endif
4842
4843 { "init_hw_device", HAS_ARG | OPT_EXPERT, { .func_arg = opt_init_hw_device },
4844 "initialise hardware device", "args" },
4845 { "filter_hw_device", HAS_ARG | OPT_EXPERT, { .func_arg = opt_filter_hw_device },
4846 "set hardware device used when filtering", "device" },
4847
4848 { NULL, },
4849 };
4850
4851 ffmpeg_options = options;
4852
4853 int ret;
4855
4856 int savedCode = setjmp(ex_buf__);
4857 if (savedCode == 0) {
4858
4860
4861 init_dynload();
4862
4864
4865 av_log_set_flags(AV_LOG_SKIP_REPEATED);
4866 parse_loglevel(argc, argv, options);
4867
4868 #if CONFIG_AVDEVICE
4869 avdevice_register_all();
4870 #endif
4871 avformat_network_init();
4872
4873 show_banner(argc, argv, options);
4874
4875 /* parse options and open all input/output files */
4876 ret = ffmpeg_parse_options(argc, argv);
4877 if (ret < 0)
4878 exit_program(1);
4879
4880 if (nb_output_files <= 0 && nb_input_files == 0) {
4881 show_usage();
4882 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4883 exit_program(1);
4884 }
4885
4886 /* file converter / grab */
4887 if (nb_output_files <= 0) {
4888 av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4889 exit_program(1);
4890 }
4891
4893 if (transcode() < 0)
4894 exit_program(1);
4895 if (do_benchmark) {
4896 int64_t utime, stime, rtime;
4898 utime = current_time.user_usec - ti.user_usec;
4899 stime = current_time.sys_usec - ti.sys_usec;
4900 rtime = current_time.real_usec - ti.real_usec;
4901 av_log(NULL, AV_LOG_INFO,
4902 "bench: utime=%0.3fs stime=%0.3fs rtime=%0.3fs\n",
4903 utime / 1000000.0, stime / 1000000.0, rtime / 1000000.0);
4904 }
4905 av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4908 exit_program(69);
4909
4911
4912 } else {
4914 }
4915
4917}
__thread jmp_buf ex_buf__
void exit_program(int ret)
void init_dynload(void)
void print_error(const char *filename, int err)
void report_and_exit(int ret)
__thread char * program_name
void parse_loglevel(int argc, char **argv, const OptionDef *options)
__thread int program_birth_year
void register_exit(void(*cb)(int ret))
void uninit_opts(void)
__thread int hide_banner
#define OPT_VIDEO
#define OPT_SPEC
#define OPT_BOOL
#define OPT_INT64
#define OPT_PERFILE
#define OPT_INT
#define OPT_FLOAT
#define AV_LOG_STDERR
#define OPT_INPUT
#define OPT_DOUBLE
#define OPT_STRING
__thread int find_stream_info
void show_banner(int argc, char **argv, const OptionDef *options)
int opt_timelimit(void *optctx, const char *opt, const char *arg)
#define OPT_AUDIO
#define OPT_DATA
#define OPT_SUBTITLE
#define OPT_EXPERT
#define OPT_EXIT
#define OPT_OUTPUT
#define OPT_TIME
#define OPT_OFFSET
#define HAS_ARG
int opt_channel_layout(void *optctx, const char *opt, const char *arg)
static OutputStream * ost_iter(OutputStream *prev)
int opt_sdp_file(void *optctx, const char *opt, const char *arg)
static int transcode(void)
int opt_timecode(void *optctx, const char *opt, const char *arg)
__thread OptionDef * ffmpeg_options
int opt_vstats_file(void *optctx, const char *opt, const char *arg)
static void set_tty_echo(int on)
static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output, int *decode_failed)
__thread const AVIOInterruptCB int_cb
static int check_keyboard_interaction(int64_t cur_time)
static void do_audio_out(OutputFile *of, OutputStream *ost, AVFrame *frame)
static BenchmarkTimeStamps get_benchmark_time_stamps(void)
int opt_data_codec(void *optctx, const char *opt, const char *arg)
int opt_streamid(void *optctx, const char *opt, const char *arg)
static int need_output(void)
void term_exit(void)
static volatile int received_sigterm
void cancelSession(long sessionId)
int opt_qscale(void *optctx, const char *opt, const char *arg)
int opt_sameq(void *optctx, const char *opt, const char *arg)
__thread OutputFile ** output_files
int opt_filter_complex_script(void *optctx, const char *opt, const char *arg)
int opt_filter_complex(void *optctx, const char *opt, const char *arg)
static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
static void do_subtitle_out(OutputFile *of, OutputStream *ost, AVSubtitle *sub)
int opt_vsync(void *optctx, const char *opt, const char *arg)
static void sub2video_push_ref(InputStream *ist, int64_t pts)
static void forward_report(uint64_t frame_number, float fps, float quality, int64_t total_size, int seconds, int microseconds, double bitrate, double speed)
static int reap_filters(int flush)
static void ts_discontinuity_process(InputFile *ifile, InputStream *ist, AVPacket *pkt)
static void do_video_out(OutputFile *of, OutputStream *ost, AVFrame *next_picture)
__thread BenchmarkTimeStamps current_time
__thread int nb_input_files
static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
static void print_final_stats(int64_t total_size)
static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
__thread int first_report
__thread int nb_enc_stats_files
static int trigger_fix_sub_duration_heartbeat(OutputStream *ost, const AVPacket *pkt)
__thread int recast_media
__thread int nb_output_files
static double psnr(double d)
static __thread FILE * vstats_file
volatile int handleSIGINT
static int init_output_stream_encode(OutputStream *ost, AVFrame *frame)
static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
int opt_subtitle_codec(void *optctx, const char *opt, const char *arg)
__thread int ffmpeg_exited
static int check_recording_time(OutputStream *ost, int64_t ts, AVRational tb)
volatile int handleSIGTERM
void set_report_callback(void(*callback)(int, float, float, int64_t, double, double, double))
int opt_video_standard(void *optctx, const char *opt, const char *arg)
__thread int64_t nb_frames_drop
static int copy_av_subtitle(AVSubtitle *dst, AVSubtitle *src)
static int fix_sub_duration_heartbeat(InputStream *ist, int64_t signal_pts)
int opt_profile(void *optctx, const char *opt, const char *arg)
static int64_t getmaxrss(void)
int opt_abort_on(void *optctx, const char *opt, const char *arg)
static void reset_eagain(void)
__thread int copy_unknown_streams
int opt_video_codec(void *optctx, const char *opt, const char *arg)
int opt_data_frames(void *optctx, const char *opt, const char *arg)
int opt_video_filters(void *optctx, const char *opt, const char *arg)
int opt_attach(void *optctx, const char *opt, const char *arg)
int opt_filter_hw_device(void *optctx, const char *opt, const char *arg)
static int init_output_stream_wrapper(OutputStream *ost, AVFrame *frame, unsigned int fatal)
static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof, int *decode_failed)
__thread atomic_int transcode_init_done
static int init_input_stream(InputStream *ist, char *error, int error_len)
static void check_decode_result(InputStream *ist, int *got_output, int ret)
__thread int64_t keyboard_last_time
static void sub2video_heartbeat(InputStream *ist, int64_t pts)
__thread int64_t nb_frames_dup
int opt_audio_frames(void *optctx, const char *opt, const char *arg)
static int process_input(int file_index)
__thread int no_file_overwrite
int opt_target(void *optctx, const char *opt, const char *arg)
__thread int qp_histogram[52]
__thread struct EncStatsFile * enc_stats_files
__thread int longjmp_value
static void video_sync_process(OutputFile *of, OutputStream *ost, AVFrame *next_picture, double duration, int64_t *nb_frames, int64_t *nb_frames_prev)
static int check_output_constraints(InputStream *ist, OutputStream *ost)
static void decode_flush(InputFile *ifile)
__thread int file_overwrite
int opt_map(void *optctx, const char *opt, const char *arg)
void cancel_operation(long id)
static void set_encoder_id(OutputFile *of, OutputStream *ost)
__thread AVIOContext * progress_avio
__thread int64_t copy_ts_first_pts
__thread InputFile ** input_files
static int read_key(void)
static void close_output_stream(OutputStream *ost)
#define SIGNAL(sig, func)
void sub2video_update(InputStream *ist, int64_t heartbeat_pts, AVSubtitle *sub)
int opt_vstats(void *optctx, const char *opt, const char *arg)
int opt_video_channel(void *optctx, const char *opt, const char *arg)
__thread FilterGraph ** filtergraphs
static int init_output_stream(OutputStream *ost, AVFrame *frame, char *error, int error_len)
static int got_eagain(void)
int opt_video_frames(void *optctx, const char *opt, const char *arg)
static void ts_discontinuity_detect(InputFile *ifile, InputStream *ist, AVPacket *pkt)
__thread unsigned nb_output_dumped
static int send_filter_eof(InputStream *ist)
__thread int main_ffmpeg_return_code
int decode_interrupt_cb(void *ctx)
int opt_audio_filters(void *optctx, const char *opt, const char *arg)
static void term_exit_sigsafe(void)
static void sub2video_flush(InputStream *ist)
static int ifilter_parameters_from_codecpar(InputFilter *ifilter, AVCodecParameters *par)
static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h, AVSubtitleRect *r)
void remove_avoptions(AVDictionary **a, AVDictionary *b)
static int transcode_init(void)
static volatile int received_nb_signals
int opt_audio_qscale(void *optctx, const char *opt, const char *arg)
__thread long globalSessionId
int opt_filter_threads(void *optctx, const char *opt, const char *arg)
static int decode(InputStream *ist, AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
__thread uint64_t dup_warning
__thread int64_t decode_error_stat[2]
static void abort_codec_experimental(const AVCodec *c, int encoder)
void(* report_callback)(int, float, float, int64_t, double, double, double)
int show_hwaccels(void *optctx, const char *opt, const char *arg)
static void update_benchmark(const char *fmt,...)
static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame, int keep_reference)
static void ffmpeg_cleanup(int ret)
int opt_preset(void *optctx, const char *opt, const char *arg)
static double adjust_frame_pts_to_encoder_tb(OutputFile *of, OutputStream *ost, AVFrame *frame)
static int transcode_step(void)
static int ifilter_has_all_input_formats(FilterGraph *fg)
static OutputStream * choose_output(void)
static int submit_encode_frame(OutputFile *of, OutputStream *ost, AVFrame *frame)
int opt_old2new(void *optctx, const char *opt, const char *arg)
static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
__thread int nb_filtergraphs
static int init_output_stream_streamcopy(OutputStream *ost)
void term_init(void)
int opt_stats_period(void *optctx, const char *opt, const char *arg)
int cancelRequested(long sessionId)
__thread int64_t last_time
volatile int handleSIGPIPE
#define OFFSET(x)
static int transcode_subtitles(InputStream *ist, const AVPacket *pkt, int *got_output, int *decode_failed)
static int encode_frame(OutputFile *of, OutputStream *ost, AVFrame *frame)
enum AVPictureType forced_kf_apply(void *logctx, KeyframeForceCtx *kf, AVRational tb, const AVFrame *in_picture, int dup_idx)
int opt_bitrate(void *optctx, const char *opt, const char *arg)
volatile int handleSIGXCPU
static int process_subtitle(InputStream *ist, AVSubtitle *subtitle, int *got_output)
static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
static int sub2video_get_blank_frame(InputStream *ist)
static void flush_encoders(void)
__thread int do_psnr
int opt_map_channel(void *optctx, const char *opt, const char *arg)
int opt_progress(void *optctx, const char *opt, const char *arg)
int opt_audio_codec(void *optctx, const char *opt, const char *arg)
InputStream * ist_iter(InputStream *prev)
int opt_recording_timestamp(void *optctx, const char *opt, const char *arg)
void assert_avoptions(AVDictionary *m)
__thread int want_sdp
void ffmpeg_var_cleanup()
static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
int ffmpeg_execute(int argc, char **argv)
static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
__thread int ignore_unknown_streams
static void sigterm_handler(int sig)
void enc_stats_write(OutputStream *ost, EncStats *es, const AVFrame *frame, const AVPacket *pkt, uint64_t frame_num)
volatile int handleSIGQUIT
int opt_init_hw_device(void *optctx, const char *opt, const char *arg)
static void update_video_stats(OutputStream *ost, const AVPacket *pkt, int write_vstats)
__thread float dts_delta_threshold
int hw_device_setup_for_encode(OutputStream *ost)
__thread int copy_tb
@ HWACCEL_GENERIC
@ HWACCEL_AUTO
int ifilter_parameters_from_frame(InputFilter *ifilter, const AVFrame *frame)
void of_close(OutputFile **pof)
int64_t of_filesize(OutputFile *of)
__thread int64_t stats_period
void ifile_close(InputFile **f)
__thread int print_stats
__thread int filter_complex_nbthreads
__thread int abort_on_flags
__thread float max_error_rate
__thread int copy_ts
__thread int stdin_interaction
__thread float dts_error_threshold
int hwaccel_decode_init(AVCodecContext *avctx)
@ ENCODER_FINISHED
@ MUXER_FINISHED
void show_usage(void)
void of_output_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int eof)
int ifile_get_packet(InputFile *f, AVPacket **pkt)
#define DECODING_FOR_FILTER
__thread char * filter_nbthreads
__thread int do_benchmark
__thread float frame_drop_threshold
__thread int vstats_version
__thread char * vstats_filename
int hw_device_setup_for_decode(InputStream *ist)
void hw_device_free_all(void)
__thread float audio_drift_threshold
void of_enc_stats_close(void)
int of_write_trailer(OutputFile *of)
__thread int do_benchmark_all
__thread int start_at_zero
@ KF_FORCE_SOURCE
@ KF_FORCE_SOURCE_NO_DROP
@ FKF_PREV_FORCED_N
@ FKF_T
@ FKF_PREV_FORCED_T
@ FKF_N_FORCED
@ FKF_N
int of_stream_init(OutputFile *of, OutputStream *ost)
__thread int exit_on_error
int ffmpeg_parse_options(int argc, char **argv)
@ VSYNC_VFR
@ VSYNC_AUTO
@ VSYNC_PASSTHROUGH
@ VSYNC_CFR
@ VSYNC_DROP
@ VSYNC_VSCFR
#define ABORT_ON_FLAG_EMPTY_OUTPUT
#define DECODING_FOR_OST
__thread int qp_hist
int filtergraph_is_simple(FilterGraph *fg)
int configure_filtergraph(FilterGraph *fg)
__thread int do_hex_dump
__thread int do_pkt_dump
__thread int debug_ts
__thread int auto_conversion_filters
#define ABORT_ON_FLAG_EMPTY_OUTPUT_STREAM
@ ENC_STATS_STREAM_IDX
@ ENC_STATS_PTS_TIME
@ ENC_STATS_SAMPLE_NUM
@ ENC_STATS_AVG_BITRATE
@ ENC_STATS_LITERAL
@ ENC_STATS_TIMEBASE
@ ENC_STATS_DTS_TIME
@ ENC_STATS_PKT_SIZE
@ ENC_STATS_FRAME_NUM_IN
@ ENC_STATS_PTS
@ ENC_STATS_FRAME_NUM
@ ENC_STATS_FILE_IDX
@ ENC_STATS_DTS
@ ENC_STATS_BITRATE
@ ENC_STATS_PTS_IN
@ ENC_STATS_TIMEBASE_IN
@ ENC_STATS_PTS_TIME_IN
@ ENC_STATS_NB_SAMPLES
static void dump_attachment(AVStream *st, const char *filename)
__thread int nb_streams
int show_decoders(void *optctx, const char *opt, const char *arg)
int opt_loglevel(void *optctx, const char *opt, const char *arg)
int opt_cpuflags(void *optctx, const char *opt, const char *arg)
int show_help(void *optctx, const char *opt, const char *arg)
int show_filters(void *optctx, const char *opt, const char *arg)
int show_sample_fmts(void *optctx, const char *opt, const char *arg)
int show_muxers(void *optctx, const char *opt, const char *arg)
int show_bsfs(void *optctx, const char *opt, const char *arg)
int show_dispositions(void *optctx, const char *opt, const char *arg)
int show_layouts(void *optctx, const char *opt, const char *arg)
int show_encoders(void *optctx, const char *opt, const char *arg)
int show_version(void *optctx, const char *opt, const char *arg)
int opt_cpucount(void *optctx, const char *opt, const char *arg)
int show_license(void *optctx, const char *opt, const char *arg)
int show_codecs(void *optctx, const char *opt, const char *arg)
int show_buildconf(void *optctx, const char *opt, const char *arg)
int show_devices(void *optctx, const char *opt, const char *arg)
int show_formats(void *optctx, const char *opt, const char *arg)
int show_protocols(void *optctx, const char *opt, const char *arg)
int opt_max_alloc(void *optctx, const char *opt, const char *arg)
int opt_report(void *optctx, const char *opt, const char *arg)
int show_colors(void *optctx, const char *opt, const char *arg)
int show_pix_fmts(void *optctx, const char *opt, const char *arg)
int show_demuxers(void *optctx, const char *opt, const char *arg)
int sq_send(SyncQueue *sq, unsigned int stream_idx, SyncQueueFrame frame)
void sq_set_tb(SyncQueue *sq, unsigned int stream_idx, AVRational tb)
int sq_receive(SyncQueue *sq, int stream_idx, SyncQueueFrame frame)
#define SQFRAME(frame)
enum EncStatsType type
AVIOContext * io
EncStatsComponent * components
OutputFilter ** outputs
const char * graph_desc
AVFilterGraph * graph
InputFilter ** inputs
uint64_t idx
AVRational tb
int64_t ts_offset
int64_t ts_offset_discont
AVFormatContext * ctx
AVThreadMessageQueue * audio_duration_queue
int64_t recording_time
int64_t last_ts
int64_t start_time_effective
InputStream ** streams
int64_t start_time
AVBufferRef * hw_frames_ctx
uint8_t * name
struct InputStream * ist
int32_t * displaymatrix
AVFifo * frame_queue
AVFilterContext * filter
AVChannelLayout ch_layout
enum AVMediaType type
struct FilterGraph * graph
AVRational sample_aspect_ratio
unsigned int initialize
marks if sub2video_update should force an initialization
AVFifo * sub_queue
queue of AVSubtitle* before filter init
enum AVPixelFormat hwaccel_pix_fmt
AVFrame * decoded_frame
int64_t * dts_buffer
int64_t dts
dts of the last packet read for this stream (in AV_TIME_BASE units)
int64_t cfr_next_pts
AVCodecContext * dec_ctx
int64_t pts
current pts of the decoded frame (in AV_TIME_BASE units)
enum HWAccelID hwaccel_id
uint64_t data_size
int64_t filter_in_rescale_delta_last
AVCodecParameters * par
AVPacket * pkt
int64_t first_dts
dts of the first packet read for this stream (in AV_TIME_BASE units)
uint64_t samples_decoded
int64_t next_pts
synthetic pts for the next decode frame (in AV_TIME_BASE units)
struct InputStream::@3 prev_sub
uint64_t frames_decoded
struct InputStream::sub2video sub2video
AVStream * st
InputFilter ** filters
int(* hwaccel_retrieve_data)(AVCodecContext *s, AVFrame *frame)
uint64_t nb_packets
AVSubtitle subtitle
int64_t prev_pkt_pts
AVDictionary * decoder_opts
const AVCodec * dec
enum AVHWDeviceType hwaccel_device_type
int64_t nb_samples
AVRational framerate
double expr_const_values[FKF_NB]
const AVOutputFormat * format
SyncQueue * sq_encode
const char * url
OutputStream ** streams
int64_t start_time
start time in microseconds == AV_TIME_BASE units
int64_t recording_time
desired length of the resulting file in microseconds == AV_TIME_BASE units
AVFilterInOut * out_tmp
struct OutputStream * ost
AVFilterContext * filter
struct FilterGraph * graph
AVChannelLayout ch_layout
uint64_t data_size_mux
unsigned int fix_sub_duration_heartbeat
int copy_initial_nonkeyframes
AVRational mux_timebase
int64_t vsync_frame_number
OSTFinished finished
AVPacket * pkt
AVRational frame_aspect_ratio
double rotate_override_value
int64_t last_filter_pts
uint64_t data_size_enc
int64_t ts_copy_start
AVFrame * last_frame
EncStats enc_stats_pre
int64_t error[4]
uint64_t frames_encoded
enum VideoSyncMethod vsync_method
InputStream * ist
KeyframeForceCtx kf
AVRational max_frame_rate
AVRational enc_timebase
AVFrame * sq_frame
AVRational frame_rate
int64_t last_nb0_frames[3]
AVCodecContext * enc_ctx
AVDictionary * encoder_opts
AVStream * st
char * filters
filtergraph associated to the -filter option
atomic_uint_least64_t packets_written
EncStats enc_stats_post
uint64_t samples_encoded
char * filters_script
filtergraph script associated to the -filter_script option
OutputFilter * filter
uint64_t packets_encoded
int64_t last_dropped