diff --git a/android/ffmpeg-kit-android-lib/src/main/cpp/fftools_cmdutils.c b/android/ffmpeg-kit-android-lib/src/main/cpp/fftools_cmdutils.c index 0b90194..8f9df9e 100644 --- a/android/ffmpeg-kit-android-lib/src/main/cpp/fftools_cmdutils.c +++ b/android/ffmpeg-kit-android-lib/src/main/cpp/fftools_cmdutils.c @@ -83,9 +83,6 @@ #include "libavutil/ffversion.h" #include "libavutil/version.h" #include "fftools_cmdutils.h" -#if CONFIG_NETWORK -#include "libavformat/network.h" -#endif #if HAVE_SYS_RESOURCE_H #include #include @@ -152,7 +149,7 @@ void log_callback_report(void *ptr, int level, const char *fmt, va_list vl) void init_dynload(void) { -#if HAVE_SETDLLDIRECTORY +#if HAVE_SETDLLDIRECTORY && defined(_WIN32) /* Calling SetDllDirectory with the empty string (but not NULL) removes the * current working directory from the DLL search path as a security pre-caution. */ SetDllDirectory(""); @@ -218,7 +215,7 @@ void show_help_options(const OptionDef *options, const char *msg, int req_flags, first = 1; for (po = options; po->name; po++) { - char buf[64]; + char buf[128]; if (((po->flags & req_flags) != req_flags) || (alt_flags && !(po->flags & alt_flags)) || @@ -241,13 +238,14 @@ void show_help_options(const OptionDef *options, const char *msg, int req_flags, void show_help_children(const AVClass *class, int flags) { - const AVClass *child = NULL; + void *iter = NULL; + const AVClass *child; if (class->option) { av_opt_show2(&class, NULL, flags, 0); av_log(NULL, AV_LOG_STDERR, "\n"); } - while ((child = av_opt_child_class_next(class, child))) + while ((child = av_opt_child_class_iterate(class, &iter))) show_help_children(child, flags); } @@ -510,7 +508,7 @@ int locate_option(int argc, char **argv, const OptionDef *options, return 0; } -void dump_argument(const char *a) +static void dump_argument(const char *a) { const unsigned char *p; @@ -1020,7 +1018,7 @@ static void expand_filename_template(AVBPrint *bp, const char *template, } } -int init_report(const char *env) +static int init_report(const char *env) { char *filename_template = NULL; char *key, *val; @@ -1464,10 +1462,6 @@ static void print_codec(const AVCodec *c) av_log(NULL, AV_LOG_STDERR, "threads "); if (c->capabilities & AV_CODEC_CAP_AVOID_PROBING) av_log(NULL, AV_LOG_STDERR, "avoidprobe "); - if (c->capabilities & AV_CODEC_CAP_INTRA_ONLY) - av_log(NULL, AV_LOG_STDERR, "intraonly "); - if (c->capabilities & AV_CODEC_CAP_LOSSLESS) - av_log(NULL, AV_LOG_STDERR, "lossless "); if (c->capabilities & AV_CODEC_CAP_HARDWARE) av_log(NULL, AV_LOG_STDERR, "hardware "); if (c->capabilities & AV_CODEC_CAP_HYBRID) @@ -1541,13 +1535,14 @@ static char get_media_type_char(enum AVMediaType type) } } -static const AVCodec *next_codec_for_id(enum AVCodecID id, const AVCodec *prev, +static const AVCodec *next_codec_for_id(enum AVCodecID id, void **iter, int encoder) { - while ((prev = av_codec_next(prev))) { - if (prev->id == id && - (encoder ? av_codec_is_encoder(prev) : av_codec_is_decoder(prev))) - return prev; + const AVCodec *c; + while ((c = av_codec_iterate(iter))) { + if (c->id == id && + (encoder ? av_codec_is_encoder(c) : av_codec_is_decoder(c))) + return c; } return NULL; } @@ -1584,11 +1579,12 @@ static unsigned get_codecs_sorted(const AVCodecDescriptor ***rcodecs) static void print_codecs_for_id(enum AVCodecID id, int encoder) { - const AVCodec *codec = NULL; + void *iter = NULL; + const AVCodec *codec; av_log(NULL, AV_LOG_STDERR, " (%s: ", encoder ? "encoders" : "decoders"); - while ((codec = next_codec_for_id(id, codec, encoder))) + while ((codec = next_codec_for_id(id, &iter, encoder))) av_log(NULL, AV_LOG_STDERR, "%s ", codec->name); av_log(NULL, AV_LOG_STDERR, ")"); @@ -1611,7 +1607,8 @@ int show_codecs(void *optctx, const char *opt, const char *arg) " -------\n"); for (i = 0; i < nb_codecs; i++) { const AVCodecDescriptor *desc = codecs[i]; - const AVCodec *codec = NULL; + const AVCodec *codec; + void *iter = NULL; if (strstr(desc->name, "_deprecated")) continue; @@ -1629,14 +1626,14 @@ int show_codecs(void *optctx, const char *opt, const char *arg) /* print decoders/encoders when there's more than one or their * names are different from codec name */ - while ((codec = next_codec_for_id(desc->id, codec, 0))) { + while ((codec = next_codec_for_id(desc->id, &iter, 0))) { if (strcmp(codec->name, desc->name)) { print_codecs_for_id(desc->id, 0); break; } } - codec = NULL; - while ((codec = next_codec_for_id(desc->id, codec, 1))) { + iter = NULL; + while ((codec = next_codec_for_id(desc->id, &iter, 1))) { if (strcmp(codec->name, desc->name)) { print_codecs_for_id(desc->id, 1); break; @@ -1667,9 +1664,10 @@ static void print_codecs(int encoder) encoder ? "Encoders" : "Decoders"); for (i = 0; i < nb_codecs; i++) { const AVCodecDescriptor *desc = codecs[i]; - const AVCodec *codec = NULL; + const AVCodec *codec; + void *iter = NULL; - while ((codec = next_codec_for_id(desc->id, codec, encoder))) { + while ((codec = next_codec_for_id(desc->id, &iter, encoder))) { av_log(NULL, AV_LOG_STDERR, " %c", get_media_type_char(desc->type)); av_log(NULL, AV_LOG_STDERR, (codec->capabilities & AV_CODEC_CAP_FRAME_THREADS) ? "F" : "."); av_log(NULL, AV_LOG_STDERR, (codec->capabilities & AV_CODEC_CAP_SLICE_THREADS) ? "S" : "."); @@ -1874,9 +1872,10 @@ static void show_help_codec(const char *name, int encoder) if (codec) print_codec(codec); else if ((desc = avcodec_descriptor_get_by_name(name))) { + void *iter = NULL; int printed = 0; - while ((codec = next_codec_for_id(desc->id, codec, encoder))) { + while ((codec = next_codec_for_id(desc->id, &iter, encoder))) { printed = 1; print_codec(codec); } @@ -1911,6 +1910,24 @@ static void show_help_demuxer(const char *name) show_help_children(fmt->priv_class, AV_OPT_FLAG_DECODING_PARAM); } +static void show_help_protocol(const char *name) +{ + const AVClass *proto_class; + + if (!name) { + av_log(NULL, AV_LOG_ERROR, "No protocol name specified.\n"); + return; + } + + proto_class = avio_protocol_get_class(name); + if (!proto_class) { + av_log(NULL, AV_LOG_ERROR, "Unknown protocol '%s'.\n", name); + return; + } + + show_help_children(proto_class, AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_ENCODING_PARAM); +} + static void show_help_muxer(const char *name) { const AVCodecDescriptor *desc; @@ -2044,6 +2061,8 @@ int show_help(void *optctx, const char *opt, const char *arg) show_help_demuxer(par); } else if (!strcmp(topic, "muxer")) { show_help_muxer(par); + } else if (!strcmp(topic, "protocol")) { + show_help_protocol(par); #if CONFIG_AVFILTER } else if (!strcmp(topic, "filter")) { show_help_filter(par); @@ -2087,7 +2106,7 @@ FILE *get_preset_file(char *filename, size_t filename_size, av_strlcpy(filename, preset_name, filename_size); f = fopen(filename, "r"); } else { -#if HAVE_GETMODULEHANDLE +#if HAVE_GETMODULEHANDLE && defined(_WIN32) char datadir[MAX_PATH], *ls; base[2] = NULL; @@ -2240,7 +2259,7 @@ double get_rotation(AVStream *st) if (fabs(theta - 90*round(theta/90)) > 2) av_log(NULL, AV_LOG_WARNING, "Odd rotation angle.\n" "If you want to help, upload a sample " - "of this file to ftp://upload.ffmpeg.org/incoming/ " + "of this file to https://streams.videolan.org/upload/ " "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)"); return theta; @@ -2337,7 +2356,7 @@ int show_sources(void *optctx, const char *opt, const char *arg) int ret = 0; int error_level = av_log_get_level(); - av_log_set_level(AV_LOG_ERROR); + av_log_set_level(AV_LOG_WARNING); if ((ret = show_sinks_sources_parse_arg(arg, &dev, &opts)) < 0) goto fail; @@ -2375,7 +2394,7 @@ int show_sinks(void *optctx, const char *opt, const char *arg) int ret = 0; int error_level = av_log_get_level(); - av_log_set_level(AV_LOG_ERROR); + av_log_set_level(AV_LOG_WARNING); if ((ret = show_sinks_sources_parse_arg(arg, &dev, &opts)) < 0) goto fail; diff --git a/android/ffmpeg-kit-android-lib/src/main/cpp/fftools_ffmpeg.c b/android/ffmpeg-kit-android-lib/src/main/cpp/fftools_ffmpeg.c index 0840f9d..9520fd6 100644 --- a/android/ffmpeg-kit-android-lib/src/main/cpp/fftools_ffmpeg.c +++ b/android/ffmpeg-kit-android-lib/src/main/cpp/fftools_ffmpeg.c @@ -167,6 +167,7 @@ __thread int nb_frames_dup = 0; __thread unsigned dup_warning = 1000; __thread int nb_frames_drop = 0; __thread int64_t decode_error_stat[2]; +__thread unsigned nb_output_dumped = 0; __thread int want_sdp = 1; @@ -188,6 +189,11 @@ __thread int nb_output_files = 0; __thread FilterGraph **filtergraphs; __thread int nb_filtergraphs; +__thread int64_t last_time = -1; +__thread int64_t keyboard_last_time = 0; +__thread int first_report = 1; +__thread int qp_histogram[52]; + void (*report_callback)(int, float, float, int64_t, int, double, double) = NULL; extern __thread int file_overwrite; @@ -202,6 +208,7 @@ extern int opt_progress(void *optctx, const char *opt, const char *arg); extern int opt_target(void *optctx, const char *opt, const char *arg); extern int opt_vsync(void *optctx, const char *opt, const char *arg); extern int opt_abort_on(void *optctx, const char *opt, const char *arg); +extern int opt_stats_period(void *optctx, const char *opt, const char *arg); extern int opt_qscale(void *optctx, const char *opt, const char *arg); extern int opt_profile(void *optctx, const char *opt, const char *arg); extern int opt_filter_complex(void *optctx, const char *opt, const char *arg); @@ -271,7 +278,7 @@ static int sub2video_get_blank_frame(InputStream *ist) ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w; ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h; ist->sub2video.frame->format = AV_PIX_FMT_RGB32; - if ((ret = av_frame_get_buffer(frame, 32)) < 0) + if ((ret = av_frame_get_buffer(frame, 0)) < 0) return ret; memset(frame->data[0], 0, frame->height * frame->linesize[0]); return 0; @@ -326,7 +333,7 @@ static void sub2video_push_ref(InputStream *ist, int64_t pts) } } -void sub2video_update(InputStream *ist, AVSubtitle *sub) +void sub2video_update(InputStream *ist, int64_t heartbeat_pts, AVSubtitle *sub) { AVFrame *frame = ist->sub2video.frame; int8_t *dst; @@ -343,7 +350,12 @@ void sub2video_update(InputStream *ist, AVSubtitle *sub) AV_TIME_BASE_Q, ist->st->time_base); num_rects = sub->num_rects; } else { - pts = ist->sub2video.end_pts; + /* If we are initializing the system, utilize current heartbeat + PTS as the start time, and show until the following subpicture + is received. Otherwise, utilize the previous subpicture's end time + as the fall-back value. */ + pts = ist->sub2video.initialize ? + heartbeat_pts : ist->sub2video.end_pts; end_pts = INT64_MAX; num_rects = 0; } @@ -358,6 +370,7 @@ void sub2video_update(InputStream *ist, AVSubtitle *sub) sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]); sub2video_push_ref(ist, pts); ist->sub2video.end_pts = end_pts; + ist->sub2video.initialize = 0; } static void sub2video_heartbeat(InputStream *ist, int64_t pts) @@ -380,9 +393,11 @@ static void sub2video_heartbeat(InputStream *ist, int64_t pts) /* do not send the heartbeat frame if the subtitle is already ahead */ if (pts2 <= ist2->sub2video.last_pts) continue; - if (pts2 >= ist2->sub2video.end_pts || - (!ist2->sub2video.frame->data[0] && ist2->sub2video.end_pts < INT64_MAX)) - sub2video_update(ist2, NULL); + if (pts2 >= ist2->sub2video.end_pts || ist2->sub2video.initialize) + /* if we have hit the end of the current displayed subpicture, + or if we need to initialize the system, update the + overlayed subpicture and its start/end times */ + sub2video_update(ist2, pts2 + 1, NULL); for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++) nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter); if (nb_reqs) @@ -396,7 +411,7 @@ static void sub2video_flush(InputStream *ist) int ret; if (ist->sub2video.end_pts < INT64_MAX) - sub2video_update(ist, NULL); + sub2video_update(ist, INT64_MAX, NULL); for (i = 0; i < ist->nb_filters; i++) { ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL); if (ret != AVERROR_EOF && ret < 0) @@ -425,6 +440,7 @@ static volatile int received_nb_signals = 0; __thread atomic_int transcode_init_done = ATOMIC_VAR_INIT(0); __thread volatile int ffmpeg_exited = 0; __thread volatile int main_ffmpeg_return_code = 0; +__thread int64_t copy_ts_first_pts = AV_NOPTS_VALUE; extern __thread volatile int longjmp_value; static void @@ -474,8 +490,30 @@ static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType) } #endif +#ifdef __linux__ +#define SIGNAL(sig, func) \ + do { \ + action.sa_handler = func; \ + sigaction(sig, &action, NULL); \ + } while (0) +#else +#define SIGNAL(sig, func) \ + signal(sig, func) +#endif + void term_init(void) { +#if defined __linux__ + struct sigaction action = {0}; + action.sa_handler = sigterm_handler; + + /* block other interrupts while processing this one */ + sigfillset(&action.sa_mask); + + /* restart interruptible functions (i.e. don't fail with EINTR) */ + action.sa_flags = SA_RESTART; +#endif + #if HAVE_TERMIOS_H if (!run_as_daemon && stdin_interaction) { struct termios tty; @@ -595,32 +633,38 @@ static void ffmpeg_cleanup(int ret) FilterGraph *fg = filtergraphs[i]; avfilter_graph_free(&fg->graph); for (j = 0; j < fg->nb_inputs; j++) { - while (av_fifo_size(fg->inputs[j]->frame_queue)) { + InputFilter *ifilter = fg->inputs[j]; + struct InputStream *ist = ifilter->ist; + + while (av_fifo_size(ifilter->frame_queue)) { AVFrame *frame; - av_fifo_generic_read(fg->inputs[j]->frame_queue, &frame, + av_fifo_generic_read(ifilter->frame_queue, &frame, sizeof(frame), NULL); av_frame_free(&frame); } - av_fifo_freep(&fg->inputs[j]->frame_queue); - if (fg->inputs[j]->ist->sub2video.sub_queue) { - while (av_fifo_size(fg->inputs[j]->ist->sub2video.sub_queue)) { + av_fifo_freep(&ifilter->frame_queue); + if (ist->sub2video.sub_queue) { + while (av_fifo_size(ist->sub2video.sub_queue)) { AVSubtitle sub; - av_fifo_generic_read(fg->inputs[j]->ist->sub2video.sub_queue, + av_fifo_generic_read(ist->sub2video.sub_queue, &sub, sizeof(sub), NULL); avsubtitle_free(&sub); } - av_fifo_freep(&fg->inputs[j]->ist->sub2video.sub_queue); + av_fifo_freep(&ist->sub2video.sub_queue); } - av_buffer_unref(&fg->inputs[j]->hw_frames_ctx); - av_freep(&fg->inputs[j]->name); + av_buffer_unref(&ifilter->hw_frames_ctx); + av_freep(&ifilter->name); av_freep(&fg->inputs[j]); } av_freep(&fg->inputs); for (j = 0; j < fg->nb_outputs; j++) { - av_freep(&fg->outputs[j]->name); - av_freep(&fg->outputs[j]->formats); - av_freep(&fg->outputs[j]->channel_layouts); - av_freep(&fg->outputs[j]->sample_rates); + OutputFilter *ofilter = fg->outputs[j]; + + avfilter_inout_free(&ofilter->out_tmp); + av_freep(&ofilter->name); + av_freep(&ofilter->formats); + av_freep(&ofilter->channel_layouts); + av_freep(&ofilter->sample_rates); av_freep(&fg->outputs[j]); } av_freep(&fg->outputs); @@ -652,9 +696,7 @@ static void ffmpeg_cleanup(int ret) if (!ost) continue; - for (j = 0; j < ost->nb_bitstream_filters; j++) - av_bsf_free(&ost->bsf_ctx[j]); - av_freep(&ost->bsf_ctx); + av_bsf_free(&ost->bsf_ctx); av_frame_free(&ost->filtered_frame); av_frame_free(&ost->last_frame); @@ -730,7 +772,7 @@ static void ffmpeg_cleanup(int ret) av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n", (int) received_sigterm); } else if (cancelRequested(sessionId)) { - av_log(NULL, AV_LOG_INFO, "Exiting normally, received cancel signal.\n"); + av_log(NULL, AV_LOG_INFO, "Exiting normally, received cancel request.\n"); } else if (ret && atomic_load(&transcode_init_done)) { av_log(NULL, AV_LOG_INFO, "Conversion failed!\n"); } @@ -817,8 +859,13 @@ static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int u AVPacket tmp_pkt = {0}; /* the muxer is not initialized yet, buffer the packet */ if (!av_fifo_space(ost->muxing_queue)) { - int new_size = FFMIN(2 * av_fifo_size(ost->muxing_queue), - ost->max_muxing_queue_size); + unsigned int are_we_over_size = + (ost->muxing_queue_data_size + pkt->size) > ost->muxing_queue_data_threshold; + int new_size = are_we_over_size ? + FFMIN(2 * av_fifo_size(ost->muxing_queue), + ost->max_muxing_queue_size) : + 2 * av_fifo_size(ost->muxing_queue); + if (new_size <= av_fifo_size(ost->muxing_queue)) { av_log(NULL, AV_LOG_ERROR, "Too many packets buffered for output stream %d:%d.\n", @@ -833,6 +880,7 @@ static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int u if (ret < 0) exit_program(1); av_packet_move_ref(&tmp_pkt, pkt); + ost->muxing_queue_data_size += tmp_pkt.size; av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL); return; } @@ -884,6 +932,8 @@ static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int u int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT); if (pkt->dts < max) { int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG; + if (exit_on_error) + loglevel = AV_LOG_ERROR; av_log(s, loglevel, "Non-monotonous DTS in output stream " "%d:%d; previous: %"PRId64", current: %"PRId64"; ", ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts); @@ -953,40 +1003,15 @@ static void output_packet(OutputFile *of, AVPacket *pkt, { int ret = 0; - /* apply the output bitstream filters, if any */ - if (ost->nb_bitstream_filters) { - int idx; - - ret = av_bsf_send_packet(ost->bsf_ctx[0], eof ? NULL : pkt); - if (ret < 0) - goto finish; - - eof = 0; - idx = 1; - while (idx) { - /* get a packet from the previous filter up the chain */ - ret = av_bsf_receive_packet(ost->bsf_ctx[idx - 1], pkt); - if (ret == AVERROR(EAGAIN)) { - ret = 0; - idx--; - continue; - } else if (ret == AVERROR_EOF) { - eof = 1; - } else if (ret < 0) - goto finish; - - /* send it to the next filter down the chain or to the muxer */ - if (idx < ost->nb_bitstream_filters) { - ret = av_bsf_send_packet(ost->bsf_ctx[idx], eof ? NULL : pkt); + /* apply the output bitstream filters */ + if (ost->bsf_ctx) { + ret = av_bsf_send_packet(ost->bsf_ctx, eof ? NULL : pkt); if (ret < 0) goto finish; - idx++; - eof = 0; - } else if (eof) - goto finish; - else + while ((ret = av_bsf_receive_packet(ost->bsf_ctx, pkt)) >= 0) write_packet(of, pkt, ost, 0); - } + if (ret == AVERROR(EAGAIN)) + ret = 0; } else if (!eof) write_packet(of, pkt, ost, 0); @@ -1012,6 +1037,71 @@ static int check_recording_time(OutputStream *ost) return 1; } +static double adjust_frame_pts_to_encoder_tb(OutputFile *of, OutputStream *ost, AVFrame *frame) +{ + double float_pts = AV_NOPTS_VALUE; // this is identical to frame.pts but with higher precision + AVCodecContext *enc = ost->enc_ctx; + if (!frame || frame->pts == AV_NOPTS_VALUE || + !enc || !ost->filter || !ost->filter->graph->graph) + goto early_exit; + + { + AVFilterContext *filter = ost->filter->filter; + + int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time; + AVRational filter_tb = av_buffersink_get_time_base(filter); + AVRational tb = enc->time_base; + int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16); + + tb.den <<= extra_bits; + float_pts = + av_rescale_q(frame->pts, filter_tb, tb) - + av_rescale_q(start_time, AV_TIME_BASE_Q, tb); + float_pts /= 1 << extra_bits; + // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers + float_pts += FFSIGN(float_pts) * 1.0 / (1<<17); + + frame->pts = + av_rescale_q(frame->pts, filter_tb, enc->time_base) - + av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base); + } + +early_exit: + + if (debug_ts) { + av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n", + frame ? av_ts2str(frame->pts) : "NULL", + frame ? av_ts2timestr(frame->pts, &enc->time_base) : "NULL", + float_pts, + enc ? enc->time_base.num : -1, + enc ? enc->time_base.den : -1); + } + + return float_pts; +} + +static int init_output_stream(OutputStream *ost, AVFrame *frame, char *error, int error_len); + +static int init_output_stream_wrapper(OutputStream *ost, AVFrame *frame, unsigned int fatal) +{ + int ret = AVERROR_BUG; + char error[1024] = {0}; + + if (ost->initialized) + return 0; + + ret = init_output_stream(ost, frame, error, sizeof(error)); + if (ret < 0) { + av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n", + ost->file_index, ost->index, error); + + if (fatal) + exit_program(1); + } + + return ret; +} + static void do_audio_out(OutputFile *of, OutputStream *ost, AVFrame *frame) { @@ -1023,6 +1113,8 @@ static void do_audio_out(OutputFile *of, OutputStream *ost, pkt.data = NULL; pkt.size = 0; + adjust_frame_pts_to_encoder_tb(of, ost, frame); + if (!check_recording_time(ost)) return; @@ -1157,21 +1249,23 @@ static void do_subtitle_out(OutputFile *of, static void do_video_out(OutputFile *of, OutputStream *ost, - AVFrame *next_picture, - double sync_ipts) + AVFrame *next_picture) { int ret, format_video_sync; AVPacket pkt; AVCodecContext *enc = ost->enc_ctx; - AVCodecParameters *mux_par = ost->st->codecpar; AVRational frame_rate; int nb_frames, nb0_frames, i; double delta, delta0; double duration = 0; + double sync_ipts = AV_NOPTS_VALUE; int frame_size = 0; InputStream *ist = NULL; AVFilterContext *filter = ost->filter->filter; + init_output_stream_wrapper(ost, next_picture, 1); + sync_ipts = adjust_frame_pts_to_encoder_tb(of, ost, next_picture); + if (ost->source_index >= 0) ist = input_streams[ost->source_index]; @@ -1241,7 +1335,7 @@ static void do_video_out(OutputFile *of, av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0)); delta = duration; delta0 = 0; - ost->sync_opts = lrint(sync_ipts); + ost->sync_opts = llrint(sync_ipts); } case VSYNC_CFR: // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c @@ -1252,18 +1346,18 @@ static void do_video_out(OutputFile *of, else if (delta > 1.1) { nb_frames = lrintf(delta); if (delta0 > 1.1) - nb0_frames = lrintf(delta0 - 0.6); + nb0_frames = llrintf(delta0 - 0.6); } break; case VSYNC_VFR: if (delta <= -0.6) nb_frames = 0; else if (delta > 0.6) - ost->sync_opts = lrint(sync_ipts); + ost->sync_opts = llrint(sync_ipts); break; case VSYNC_DROP: case VSYNC_PASSTHROUGH: - ost->sync_opts = lrint(sync_ipts); + ost->sync_opts = llrint(sync_ipts); break; default: av_assert0(0); @@ -1321,18 +1415,6 @@ static void do_video_out(OutputFile *of, if (!check_recording_time(ost)) return; - if (enc->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) && - ost->top_field_first >= 0) - in_picture->top_field_first = !!ost->top_field_first; - - if (in_picture->interlaced_frame) { - if (enc->codec->id == AV_CODEC_ID_MJPEG) - mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB; - else - mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT; - } else - mux_par->field_order = AV_FIELD_PROGRESSIVE; - in_picture->quality = enc->global_quality; in_picture->pict_type = 0; @@ -1370,7 +1452,8 @@ static void do_video_out(OutputFile *of, ost->forced_keyframes_expr_const_values[FKF_N] += 1; } else if ( ost->forced_keyframes && !strncmp(ost->forced_keyframes, "source", 6) - && in_picture->key_frame==1) { + && in_picture->key_frame==1 + && !i) { forced_keyframe = 1; } @@ -1504,8 +1587,6 @@ static void do_video_stats(OutputStream *ost, int frame_size) } } -static int init_output_stream(OutputStream *ost, char *error, int error_len); - static void finish_output_stream(OutputStream *ost) { OutputFile *of = output_files[ost->file_index]; @@ -1542,15 +1623,17 @@ static int reap_filters(int flush) continue; filter = ost->filter->filter; - if (!ost->initialized) { - char error[1024] = ""; - ret = init_output_stream(ost, error, sizeof(error)); - if (ret < 0) { - av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n", - ost->file_index, ost->index, error); - exit_program(1); - } - } + /* + * Unlike video, with audio the audio frame size matters. + * Currently we are fully reliant on the lavfi filter chain to + * do the buffering deed for us, and thus the frame size parameter + * needs to be set accordingly. Where does one get the required + * frame size? From the initialized AVCodecContext of an audio + * encoder. Thus, if we have gotten to an audio stream, initialize + * the encoder earlier than receiving the first AVFrame. + */ + if (av_buffersink_get_type(filter) == AVMEDIA_TYPE_AUDIO) + init_output_stream_wrapper(ost, NULL, 1); if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) { return AVERROR(ENOMEM); @@ -1558,7 +1641,6 @@ static int reap_filters(int flush) filtered_frame = ost->filtered_frame; while (1) { - double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision ret = av_buffersink_get_frame_flags(filter, filtered_frame, AV_BUFFERSINK_FLAG_NO_REQUEST); if (ret < 0) { @@ -1567,7 +1649,7 @@ static int reap_filters(int flush) "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret)); } else if (flush && ret == AVERROR_EOF) { if (av_buffersink_get_type(filter) == AVMEDIA_TYPE_VIDEO) - do_video_out(of, ost, NULL, AV_NOPTS_VALUE); + do_video_out(of, ost, NULL); } break; } @@ -1575,38 +1657,13 @@ static int reap_filters(int flush) av_frame_unref(filtered_frame); continue; } - if (filtered_frame->pts != AV_NOPTS_VALUE) { - int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time; - AVRational filter_tb = av_buffersink_get_time_base(filter); - AVRational tb = enc->time_base; - int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16); - - tb.den <<= extra_bits; - float_pts = - av_rescale_q(filtered_frame->pts, filter_tb, tb) - - av_rescale_q(start_time, AV_TIME_BASE_Q, tb); - float_pts /= 1 << extra_bits; - // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers - float_pts += FFSIGN(float_pts) * 1.0 / (1<<17); - - filtered_frame->pts = - av_rescale_q(filtered_frame->pts, filter_tb, enc->time_base) - - av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base); - } switch (av_buffersink_get_type(filter)) { case AVMEDIA_TYPE_VIDEO: if (!ost->frame_aspect_ratio.num) enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio; - if (debug_ts) { - av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n", - av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base), - float_pts, - enc->time_base.num, enc->time_base.den); - } - - do_video_out(of, ost, filtered_frame, float_pts); + do_video_out(of, ost, filtered_frame); break; case AVMEDIA_TYPE_AUDIO: if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) && @@ -1746,13 +1803,11 @@ static void print_final_stats(int64_t total_size) } } - static void forward_report(int is_last_report, int64_t timer_start, int64_t cur_time) { AVFormatContext *oc = NULL; AVCodecContext *enc = NULL; OutputStream *ost = NULL; - static int64_t last_time = -1; int64_t pts = INT64_MIN + 1; int vid, i; @@ -1764,22 +1819,11 @@ static void forward_report(int is_last_report, int64_t timer_start, int64_t cur_ double bitrate = 0.0; double speed = 0.0; - // 1. calculate operation duration - if (!is_last_report) { - if (last_time == -1) { - last_time = cur_time; - return; - } - if ((cur_time - last_time) < 500000) { - return; - } - last_time = cur_time; - } float t = (cur_time-timer_start) / 1000000.0; oc = output_files[0]->ctx; - // 2. calculate size + // 1. calculate size total_size = avio_size(oc->pb); if (total_size <= 0) { total_size = avio_tell(oc->pb); @@ -1792,20 +1836,20 @@ static void forward_report(int is_last_report, int64_t timer_start, int64_t cur_ if (!ost->stream_copy) { - // 3. extract quality + // 2. extract quality quality = ost->quality / (float) FF_QP2LAMBDA; } if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) { - // 4. extract frame number + // 3. extract frame number frame_number = ost->frame_number; - // 5. calculate fps + // 4. calculate fps fps = t > 1 ? frame_number / t : 0; } - // 6. calculate time + // 5. calculate time if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE) pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st), ost->st->time_base, AV_TIME_BASE_Q)); @@ -1813,10 +1857,10 @@ static void forward_report(int is_last_report, int64_t timer_start, int64_t cur_ vid = 1; } - // 7. calculate time, with microseconds to milliseconds conversion + // 6. calculate time, with microseconds to milliseconds conversion seconds = FFABS(pts) / 1000; - // 8. calculating kbit/s value + // 7. calculating kbit/s value bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1; // 9. calculate processing speed = processed stream duration/operation duration @@ -1839,8 +1883,6 @@ static void print_report(int is_last_report, int64_t timer_start, int64_t cur_ti double bitrate; double speed; int64_t pts = INT64_MIN + 1; - static int64_t last_time = -1; - static int qp_histogram[52]; int hours, mins, secs, us; const char *hours_sign; int ret; @@ -1849,9 +1891,9 @@ static void print_report(int is_last_report, int64_t timer_start, int64_t cur_ti if (!is_last_report) { if (last_time == -1) { last_time = cur_time; - return; } - if ((cur_time - last_time) < 500000) + if (((cur_time - last_time) < stats_period && !first_report) || + (first_report && nb_output_dumped < nb_output_files)) return; last_time = cur_time; } @@ -1939,9 +1981,17 @@ static void print_report(int is_last_report, int64_t timer_start, int64_t cur_ti vid = 1; } /* compute min output value */ - if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE) + if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE) { pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st), ost->st->time_base, AV_TIME_BASE_Q)); + if (copy_ts) { + if (copy_ts_first_pts == AV_NOPTS_VALUE && pts > 1) + copy_ts_first_pts = pts; + if (copy_ts_first_pts != AV_NOPTS_VALUE) + pts -= copy_ts_first_pts; + } + } + if (is_last_report) nb_frames_drop += ost->last_dropped; } @@ -2025,6 +2075,8 @@ static void print_report(int is_last_report, int64_t timer_start, int64_t cur_ti } } + first_report = 0; + if (is_last_report) print_final_stats(total_size); } @@ -2058,7 +2110,6 @@ static void flush_encoders(void) // Maybe we should just let encoding fail instead. if (!ost->initialized) { FilterGraph *fg = ost->filter->graph; - char error[1024] = ""; av_log(NULL, AV_LOG_WARNING, "Finishing stream %d:%d without any data written to it.\n", @@ -2084,16 +2135,8 @@ static void flush_encoders(void) finish_output_stream(ost); } - ret = init_output_stream(ost, error, sizeof(error)); - if (ret < 0) { - av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n", - ost->file_index, ost->index, error); - exit_program(1); + init_output_stream_wrapper(ost, NULL, 1); } - } - - if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1) - continue; if (enc->codec_type != AVMEDIA_TYPE_VIDEO && enc->codec_type != AVMEDIA_TYPE_AUDIO) continue; @@ -2235,20 +2278,20 @@ static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *p if (pkt->pts != AV_NOPTS_VALUE) opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time; - if (pkt->dts == AV_NOPTS_VALUE) + if (pkt->dts == AV_NOPTS_VALUE) { opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase); - else - opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase); - opkt.dts -= ost_tb_start_time; - - if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) { + } else if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) { int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size); if(!duration) duration = ist->dec_ctx->frame_size; - opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts, - (AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last, - ost->mux_timebase) - ost_tb_start_time; - } + opkt.dts = av_rescale_delta(ist->st->time_base, pkt->dts, + (AVRational){1, ist->dec_ctx->sample_rate}, duration, + &ist->filter_in_rescale_delta_last, ost->mux_timebase); + /* dts will be set immediately afterwards to what pts is now */ + opkt.pts = opkt.dts - ost_tb_start_time; + } else + opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase); + opkt.dts -= ost_tb_start_time; opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase); @@ -2576,7 +2619,7 @@ static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_ av_log(ist->dec_ctx, AV_LOG_WARNING, "video_delay is larger in decoder than demuxer %d > %d.\n" "If you want to help, upload a sample " - "of this file to ftp://upload.ffmpeg.org/incoming/ " + "of this file to https://streams.videolan.org/upload/ " "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n", ist->dec_ctx->has_b_frames, ist->st->codecpar->video_delay); @@ -2698,7 +2741,7 @@ static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output, return ret; if (ist->sub2video.frame) { - sub2video_update(ist, &subtitle); + sub2video_update(ist, INT64_MIN, &subtitle); } else if (ist->nb_filters) { if (!ist->sub2video.sub_queue) ist->sub2video.sub_queue = av_fifo_alloc(8 * sizeof(AVSubtitle)); @@ -2967,7 +3010,7 @@ static void print_sdp(void) if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) { av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename); } else { - avio_printf(sdp_pb, "SDP:\n%s", sdp); + avio_print(sdp_pb, sdp); avio_closep(&sdp_pb); av_freep(&sdp_filename); } @@ -3089,7 +3132,9 @@ static int init_input_stream(int ist_index, char *error, int error_len) ist->dec_ctx->opaque = ist; ist->dec_ctx->get_format = get_format; ist->dec_ctx->get_buffer2 = get_buffer; +#if LIBAVCODEC_VERSION_MAJOR < 60 ist->dec_ctx->thread_safe_callbacks = 1; +#endif av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0); if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE && @@ -3175,6 +3220,7 @@ static int check_init_output_file(OutputFile *of, int file_index) of->header_written = 1; av_dump_format(of->ctx, file_index, of->ctx->url, 1); + nb_output_dumped++; if (sdp_filename || want_sdp) print_sdp(); @@ -3190,6 +3236,7 @@ static int check_init_output_file(OutputFile *of, int file_index) while (av_fifo_size(ost->muxing_queue)) { AVPacket pkt; av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL); + ost->muxing_queue_data_size -= pkt.size; write_packet(of, &pkt, ost, 1); } } @@ -3199,31 +3246,25 @@ static int check_init_output_file(OutputFile *of, int file_index) static int init_output_bsfs(OutputStream *ost) { - AVBSFContext *ctx; - int i, ret; + AVBSFContext *ctx = ost->bsf_ctx; + int ret; - if (!ost->nb_bitstream_filters) + if (!ctx) return 0; - for (i = 0; i < ost->nb_bitstream_filters; i++) { - ctx = ost->bsf_ctx[i]; - - ret = avcodec_parameters_copy(ctx->par_in, - i ? ost->bsf_ctx[i - 1]->par_out : ost->st->codecpar); + ret = avcodec_parameters_copy(ctx->par_in, ost->st->codecpar); if (ret < 0) return ret; - ctx->time_base_in = i ? ost->bsf_ctx[i - 1]->time_base_out : ost->st->time_base; + ctx->time_base_in = ost->st->time_base; ret = av_bsf_init(ctx); if (ret < 0) { av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n", - ost->bsf_ctx[i]->filter->name); + ctx->filter->name); return ret; } - } - ctx = ost->bsf_ctx[ost->nb_bitstream_filters - 1]; ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out); if (ret < 0) return ret; @@ -3475,7 +3516,7 @@ static void init_encoder_time_base(OutputStream *ost, AVRational default_time_ba enc_ctx->time_base = default_time_base; } -static int init_output_stream_encode(OutputStream *ost) +static int init_output_stream_encode(OutputStream *ost, AVFrame *frame) { InputStream *ist = get_input_stream(ost); AVCodecContext *enc_ctx = ost->enc_ctx; @@ -3559,10 +3600,6 @@ static int init_output_stream_encode(OutputStream *ost) av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n" "Please consider specifying a lower framerate, a different muxer or -vsync 2\n"); } - for (j = 0; j < ost->forced_kf_count; j++) - ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j], - AV_TIME_BASE_Q, - enc_ctx->time_base); enc_ctx->width = av_buffersink_get_w(ost->filter->filter); enc_ctx->height = av_buffersink_get_h(ost->filter->filter); @@ -3576,6 +3613,14 @@ static int init_output_stream_encode(OutputStream *ost) enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample, av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth); + if (frame) { + enc_ctx->color_range = frame->color_range; + enc_ctx->color_primaries = frame->color_primaries; + enc_ctx->color_trc = frame->color_trc; + enc_ctx->colorspace = frame->colorspace; + enc_ctx->chroma_sample_location = frame->chroma_location; + } + enc_ctx->framerate = ost->frame_rate; ost->st->avg_frame_rate = ost->frame_rate; @@ -3593,6 +3638,20 @@ static int init_output_stream_encode(OutputStream *ost) enc_ctx->field_order = AV_FIELD_TT; } + if (frame) { + if (enc_ctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) && + ost->top_field_first >= 0) + frame->top_field_first = !!ost->top_field_first; + + if (frame->interlaced_frame) { + if (enc_ctx->codec->id == AV_CODEC_ID_MJPEG) + enc_ctx->field_order = frame->top_field_first ? AV_FIELD_TT:AV_FIELD_BB; + else + enc_ctx->field_order = frame->top_field_first ? AV_FIELD_TB:AV_FIELD_BT; + } else + enc_ctx->field_order = AV_FIELD_PROGRESSIVE; + } + if (ost->forced_keyframes) { if (!strncmp(ost->forced_keyframes, "expr:", 5)) { ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5, @@ -3633,7 +3692,7 @@ static int init_output_stream_encode(OutputStream *ost) return 0; } -static int init_output_stream(OutputStream *ost, char *error, int error_len) +static int init_output_stream(OutputStream *ost, AVFrame *frame, char *error, int error_len) { int ret = 0; @@ -3642,7 +3701,7 @@ static int init_output_stream(OutputStream *ost, char *error, int error_len) AVCodecContext *dec = NULL; InputStream *ist; - ret = init_output_stream_encode(ost); + ret = init_output_stream_encode(ost, frame); if (ret < 0) return ret; @@ -3664,21 +3723,14 @@ static int init_output_stream(OutputStream *ost, char *error, int error_len) !av_dict_get(ost->encoder_opts, "ab", NULL, 0)) av_dict_set(&ost->encoder_opts, "b", "128000", 0); - if (ost->filter && av_buffersink_get_hw_frames_ctx(ost->filter->filter) && - ((AVHWFramesContext*)av_buffersink_get_hw_frames_ctx(ost->filter->filter)->data)->format == - av_buffersink_get_format(ost->filter->filter)) { - ost->enc_ctx->hw_frames_ctx = av_buffer_ref(av_buffersink_get_hw_frames_ctx(ost->filter->filter)); - if (!ost->enc_ctx->hw_frames_ctx) - return AVERROR(ENOMEM); - } else { - ret = hw_device_setup_for_encode(ost); - if (ret < 0) { - snprintf(error, error_len, "Device setup failed for " - "encoder on output stream #%d:%d : %s", - ost->file_index, ost->index, av_err2str(ret)); - return ret; - } + ret = hw_device_setup_for_encode(ost); + if (ret < 0) { + snprintf(error, error_len, "Device setup failed for " + "encoder on output stream #%d:%d : %s", + ost->file_index, ost->index, av_err2str(ret)); + return ret; } + if (ist && ist->dec->type == AVMEDIA_TYPE_SUBTITLE && ost->enc->type == AVMEDIA_TYPE_SUBTITLE) { int input_props = 0, output_props = 0; AVCodecDescriptor const *input_descriptor = @@ -3722,12 +3774,6 @@ static int init_output_stream(OutputStream *ost, char *error, int error_len) "Error initializing the output stream codec context.\n"); exit_program(1); } - /* - * FIXME: ost->st->codec should't be needed here anymore. - */ - ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx); - if (ret < 0) - return ret; if (ost->enc_ctx->nb_coded_side_data) { int i; @@ -3754,12 +3800,14 @@ static int init_output_stream(OutputStream *ost, char *error, int error_len) int i; for (i = 0; i < ist->st->nb_side_data; i++) { AVPacketSideData *sd = &ist->st->side_data[i]; - uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size); - if (!dst) - return AVERROR(ENOMEM); - memcpy(dst, sd->data, sd->size); - if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX) - av_display_rotation_set((uint32_t *)dst, 0); + if (sd->type != AV_PKT_DATA_CPB_PROPERTIES) { + uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size); + if (!dst) + return AVERROR(ENOMEM); + memcpy(dst, sd->data, sd->size); + if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX) + av_display_rotation_set((uint32_t *)dst, 0); + } } } @@ -3770,8 +3818,6 @@ static int init_output_stream(OutputStream *ost, char *error, int error_len) // copy estimated duration as a hint to the muxer if (ost->st->duration <= 0 && ist && ist->st->duration > 0) ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base); - - ost->st->codec->codec= ost->enc_ctx->codec; } else if (ost->stream_copy) { ret = init_output_stream_streamcopy(ost); if (ret < 0) @@ -3884,13 +3930,22 @@ static int transcode_init(void) goto dump_format; } - /* open each encoder */ + /* + * initialize stream copy and subtitle/data streams. + * Encoded AVFrame based streams will get initialized as follows: + * - when the first AVFrame is received in do_video_out + * - just before the first AVFrame is received in either transcode_step + * or reap_filters due to us requiring the filter chain buffer sink + * to be configured with the correct audio frame size, which is only + * known after the encoder is initialized. + */ for (i = 0; i < nb_output_streams; i++) { - // skip streams fed from filtergraphs until we have a frame for them - if (output_streams[i]->filter) + if (!output_streams[i]->stream_copy && + (output_streams[i]->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO || + output_streams[i]->enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO)) continue; - ret = init_output_stream(output_streams[i], error, sizeof(error)); + ret = init_output_stream_wrapper(output_streams[i], NULL, 0); if (ret < 0) goto dump_format; } @@ -4088,13 +4143,12 @@ static void set_tty_echo(int on) static int check_keyboard_interaction(int64_t cur_time) { int i, ret, key; - static int64_t last_time; if (received_nb_signals) return AVERROR_EXIT; /* read_key() returns 0 on EOF */ - if(cur_time - last_time >= 100000 && !run_as_daemon){ + if(cur_time - keyboard_last_time >= 100000 && !run_as_daemon){ key = read_key(); - last_time = cur_time; + keyboard_last_time = cur_time; }else key = -1; if (key == 'q') @@ -4154,13 +4208,9 @@ static int check_keyboard_interaction(int64_t cur_time) if (key == 'd' || key == 'D'){ int debug=0; if(key == 'D') { - debug = input_streams[0]->st->codec->debug<<1; + debug = input_streams[0]->dec_ctx->debug << 1; if(!debug) debug = 1; - while(debug & (FF_DEBUG_DCT_COEFF -#if FF_API_DEBUG_MV - |FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE -#endif - )) //unsupported, would just crash + while (debug & FF_DEBUG_DCT_COEFF) //unsupported, would just crash debug += debug; }else{ char buf[32]; @@ -4177,7 +4227,7 @@ static int check_keyboard_interaction(int64_t cur_time) fprintf(stderr,"error parsing debug value\n"); } for(i=0;ist->codec->debug = debug; + input_streams[i]->dec_ctx->debug = debug; } for(i=0;ithread_queue_size < 0) + f->thread_queue_size = (nb_input_files > 1 ? 8 : 0); + if (!f->thread_queue_size) return 0; if (f->ctx->pb ? !f->ctx->pb->seekable : @@ -4327,7 +4379,7 @@ static int get_input_packet(InputFile *f, AVPacket *pkt) } #if HAVE_THREADS - if (nb_input_files > 1) + if (f->thread_queue_size) return get_input_packet_mt(f, pkt); #endif return av_read_frame(f->ctx, pkt); @@ -4418,6 +4470,7 @@ static int seek_to_start(InputFile *ifile, AVFormatContext *is) ifile->time_base = ist->st->time_base; /* the total duration of the stream, max_pts - min_pts is * the duration of the stream without the last frame */ + if (ist->max_pts > ist->min_pts && ist->max_pts - (uint64_t)ist->min_pts < INT64_MAX - duration) duration += ist->max_pts - ist->min_pts; ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base, ifile->time_base); @@ -4445,6 +4498,7 @@ static int process_input(int file_index) int ret, thread_ret, i, j; int64_t duration; int64_t pkt_dts; + int disable_discontinuity_correction = copy_ts; is = ifile->ctx; ret = get_input_packet(ifile, &pkt); @@ -4646,10 +4700,20 @@ static int process_input(int file_index) pkt.dts += duration; pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX); + + if (copy_ts && pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE && + (is->iformat->flags & AVFMT_TS_DISCONT) && ist->st->pts_wrap_bits < 60) { + int64_t wrap_dts = av_rescale_q_rnd(pkt.dts + (1LL<st->pts_wrap_bits), + ist->st->time_base, AV_TIME_BASE_Q, + AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX); + if (FFABS(wrap_dts - ist->next_dts) < FFABS(pkt_dts - ist->next_dts)/10) + disable_discontinuity_correction = 0; + } + if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO || ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) && pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE && - !copy_ts) { + !disable_discontinuity_correction) { int64_t delta = pkt_dts - ist->next_dts; if (is->iformat->flags & AVFMT_TS_DISCONT) { if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE || @@ -4787,15 +4851,30 @@ static int transcode_step(void) } if (ost->filter && ost->filter->graph->graph) { - if (!ost->initialized) { - char error[1024] = {0}; - ret = init_output_stream(ost, error, sizeof(error)); - if (ret < 0) { - av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n", - ost->file_index, ost->index, error); - exit_program(1); - } - } + /* + * Similar case to the early audio initialization in reap_filters. + * Audio is special in ffmpeg.c currently as we depend on lavfi's + * audio frame buffering/creation to get the output audio frame size + * in samples correct. The audio frame size for the filter chain is + * configured during the output stream initialization. + * + * Apparently avfilter_graph_request_oldest (called in + * transcode_from_filter just down the line) peeks. Peeking already + * puts one frame "ready to be given out", which means that any + * update in filter buffer sink configuration afterwards will not + * help us. And yes, even if it would be utilized, + * av_buffersink_get_samples is affected, as it internally utilizes + * the same early exit for peeked frames. + * + * In other words, if avfilter_graph_request_oldest would not make + * further filter chain configuration or usage of + * av_buffersink_get_samples useless (by just causing the return + * of the peeked AVFrame as-is), we could get rid of this additional + * early encoder initialization. + */ + if (av_buffersink_get_type(ost->filter->filter) == AVMEDIA_TYPE_AUDIO) + init_output_stream_wrapper(ost, NULL, 1); + if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0) return ret; if (!ist) @@ -4923,6 +5002,10 @@ static int transcode(void) av_freep(&ost->enc_ctx->stats_in); } total_packets_written += ost->packets_written; + if (!ost->packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT_STREAM)) { + av_log(NULL, AV_LOG_FATAL, "Empty output on stream %d.\n", i); + exit_program(1); + } } if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) { @@ -4940,7 +5023,6 @@ static int transcode(void) } } - av_buffer_unref(&hw_device_ctx); hw_device_free_all(); /* finished ! */ @@ -5029,11 +5111,13 @@ void ffmpeg_var_cleanup() { received_sigterm = 0; received_nb_signals = 0; ffmpeg_exited = 0; + copy_ts_first_pts = AV_NOPTS_VALUE; run_as_daemon = 0; nb_frames_dup = 0; dup_warning = 1000; nb_frames_drop = 0; + nb_output_dumped = 0; want_sdp = 1; @@ -5051,6 +5135,10 @@ void ffmpeg_var_cleanup() { filtergraphs = NULL; nb_filtergraphs = 0; + + last_time = -1; + keyboard_last_time = 0; + first_report = 1; } void set_report_callback(void (*callback)(int, float, float, int64_t, int, double, double)) @@ -5216,6 +5304,10 @@ int ffmpeg_execute(int argc, char **argv) "shift input timestamps to start at 0 when using copyts" }, { "copytb", HAS_ARG | OPT_INT | OPT_EXPERT, { ©_tb }, "copy input stream time base when stream copying", "mode" }, + { "start_at_zero", OPT_BOOL | OPT_EXPERT, { &start_at_zero }, + "shift input timestamps to start at 0 when using copyts" }, + { "copytb", HAS_ARG | OPT_INT | OPT_EXPERT, { ©_tb }, + "copy input stream time base when stream copying", "mode" }, { "shortest", OPT_BOOL | OPT_EXPERT | OPT_OFFSET | OPT_OUTPUT, { .off = OFFSET(shortest) }, "finish encoding within shortest input" }, @@ -5267,8 +5359,12 @@ int ffmpeg_execute(int argc, char **argv) "create a complex filtergraph", "graph_description" }, { "filter_complex_script", HAS_ARG | OPT_EXPERT, { .func_arg = opt_filter_complex_script }, "read complex filtergraph description from a file", "filename" }, + { "auto_conversion_filters", OPT_BOOL | OPT_EXPERT, { &auto_conversion_filters }, + "enable automatic conversion filters globally" }, { "stats", OPT_BOOL, { &print_stats }, "print progress report during encoding", }, + { "stats_period", HAS_ARG | OPT_EXPERT, { .func_arg = opt_stats_period }, + "set the period at which ffmpeg updates stats and -progress output", "time" }, { "attach", HAS_ARG | OPT_PERFILE | OPT_EXPERT | OPT_OUTPUT, { .func_arg = opt_attach }, "add an attachment to the output file", "filename" }, @@ -5390,6 +5486,9 @@ int ffmpeg_execute(int argc, char **argv) { "autorotate", HAS_ARG | OPT_BOOL | OPT_SPEC | OPT_EXPERT | OPT_INPUT, { .off = OFFSET(autorotate) }, "automatically insert correct rotate filters" }, + { "autoscale", HAS_ARG | OPT_BOOL | OPT_SPEC | + OPT_EXPERT | OPT_OUTPUT, { .off = OFFSET(autoscale) }, + "automatically insert a scale filter at the end of the filter graph" }, /* audio options */ { "aframes", OPT_AUDIO | HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_audio_frames }, @@ -5476,6 +5575,8 @@ int ffmpeg_execute(int argc, char **argv) { "max_muxing_queue_size", HAS_ARG | OPT_INT | OPT_SPEC | OPT_EXPERT | OPT_OUTPUT, { .off = OFFSET(max_muxing_queue_size) }, "maximum number of packets that can be buffered while waiting for all streams to initialize", "packets" }, + { "muxing_queue_data_threshold", HAS_ARG | OPT_INT | OPT_SPEC | OPT_EXPERT | OPT_OUTPUT, { .off = OFFSET(muxing_queue_data_threshold) }, + "set the threshold after which max_muxing_queue_size is taken into account", "bytes" }, /* data codec support */ { "dcodec", HAS_ARG | OPT_DATA | OPT_PERFILE | OPT_EXPERT | OPT_INPUT | OPT_OUTPUT, { .func_arg = opt_data_codec }, diff --git a/android/ffmpeg-kit-android-lib/src/main/cpp/fftools_ffmpeg.h b/android/ffmpeg-kit-android-lib/src/main/cpp/fftools_ffmpeg.h index 0c82515..599d7c7 100644 --- a/android/ffmpeg-kit-android-lib/src/main/cpp/fftools_ffmpeg.h +++ b/android/ffmpeg-kit-android-lib/src/main/cpp/fftools_ffmpeg.h @@ -84,7 +84,6 @@ enum HWAccelID { HWACCEL_GENERIC, HWACCEL_VIDEOTOOLBOX, HWACCEL_QSV, - HWACCEL_CUVID, }; typedef struct HWAccel { @@ -239,6 +238,8 @@ typedef struct OptionsContext { int nb_passlogfiles; SpecifierOpt *max_muxing_queue_size; int nb_max_muxing_queue_size; + SpecifierOpt *muxing_queue_data_threshold; + int nb_muxing_queue_data_threshold; SpecifierOpt *guess_layout_max; int nb_guess_layout_max; SpecifierOpt *apad; @@ -253,6 +254,8 @@ typedef struct OptionsContext { int nb_time_bases; SpecifierOpt *enc_time_bases; int nb_enc_time_bases; + SpecifierOpt *autoscale; + int nb_autoscale; } OptionsContext; typedef struct InputFilter { @@ -372,6 +375,7 @@ typedef struct InputStream { AVFifoBuffer *sub_queue; ///< queue of AVSubtitle* before filter init AVFrame *frame; int w, h; + unsigned int initialize; ///< marks if sub2video_update should force an initialization } sub2video; int dr1; @@ -454,6 +458,7 @@ enum forced_keyframes_const { }; #define ABORT_ON_FLAG_EMPTY_OUTPUT (1 << 0) +#define ABORT_ON_FLAG_EMPTY_OUTPUT_STREAM (1 << 1) extern const char *const forced_keyframes_const_names[]; @@ -482,8 +487,7 @@ typedef struct OutputStream { AVRational mux_timebase; AVRational enc_timebase; - int nb_bitstream_filters; - AVBSFContext **bsf_ctx; + AVBSFContext *bsf_ctx; AVCodecContext *enc_ctx; AVCodecParameters *ref_par; /* associated input codec parameters with encoders options applied */ @@ -502,6 +506,7 @@ typedef struct OutputStream { int force_fps; int top_field_first; int rotate_overridden; + int autoscale; double rotate_override_value; AVRational frame_aspect_ratio; @@ -567,6 +572,15 @@ typedef struct OutputStream { /* the packets are buffered here until the muxer is ready to be initialized */ AVFifoBuffer *muxing_queue; + /* + * The size of the AVPackets' buffers in queue. + * Updated when a packet is either pushed or pulled from the queue. + */ + size_t muxing_queue_data_size; + + /* Threshold after which max_muxing_queue_size will be in effect */ + size_t muxing_queue_data_threshold; + /* packet picture type */ int pict_type; @@ -623,6 +637,7 @@ extern __thread int debug_ts; extern __thread int exit_on_error; extern __thread int abort_on_flags; extern __thread int print_stats; +extern __thread int64_t stats_period; extern __thread int qp_hist; extern __thread int stdin_interaction; extern __thread int frame_bits_per_raw_sample; @@ -633,11 +648,11 @@ extern __thread char *videotoolbox_pixfmt; extern __thread int filter_nbthreads; extern __thread int filter_complex_nbthreads; extern __thread int vstats_version; +extern __thread int auto_conversion_filters; extern __thread const AVIOInterruptCB int_cb; extern const HWAccel hwaccels[]; -extern __thread AVBufferRef *hw_device_ctx; #if CONFIG_QSV extern __thread char *qsv_device; #endif @@ -656,8 +671,8 @@ void assert_avoptions(AVDictionary *m); int guess_input_channel_layout(InputStream *ist); -enum AVPixelFormat choose_pixel_fmt(AVStream *st, AVCodecContext *avctx, AVCodec *codec, enum AVPixelFormat target); -void choose_sample_fmt(AVStream *st, AVCodec *codec); +enum AVPixelFormat choose_pixel_fmt(AVStream *st, AVCodecContext *avctx, const AVCodec *codec, enum AVPixelFormat target); +void choose_sample_fmt(AVStream *st, const AVCodec *codec); int configure_filtergraph(FilterGraph *fg); int configure_output_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOut *out); @@ -667,7 +682,7 @@ int filtergraph_is_simple(FilterGraph *fg); int init_simple_filtergraph(InputStream *ist, OutputStream *ost); int init_complex_filtergraph(FilterGraph *fg); -void sub2video_update(InputStream *ist, AVSubtitle *sub); +void sub2video_update(InputStream *ist, int64_t heartbeat_pts, AVSubtitle *sub); int ifilter_parameters_from_frame(InputFilter *ifilter, const AVFrame *frame); @@ -675,7 +690,6 @@ int ffmpeg_parse_options(int argc, char **argv); int videotoolbox_init(AVCodecContext *s); int qsv_init(AVCodecContext *s); -int cuvid_init(AVCodecContext *s); HWDevice *hw_device_get_by_name(const char *name); int hw_device_init_from_string(const char *arg, HWDevice **dev); @@ -683,6 +697,7 @@ void hw_device_free_all(void); int hw_device_setup_for_decode(InputStream *ist); int hw_device_setup_for_encode(OutputStream *ost); +int hw_device_setup_for_filter(FilterGraph *fg); int hwaccel_decode_init(AVCodecContext *avctx); @@ -698,6 +713,7 @@ int opt_progress(void *optctx, const char *opt, const char *arg); int opt_target(void *optctx, const char *opt, const char *arg); int opt_vsync(void *optctx, const char *opt, const char *arg); int opt_abort_on(void *optctx, const char *opt, const char *arg); +int opt_stats_period(void *optctx, const char *opt, const char *arg); int opt_qscale(void *optctx, const char *opt, const char *arg); int opt_profile(void *optctx, const char *opt, const char *arg); int opt_filter_complex(void *optctx, const char *opt, const char *arg); diff --git a/android/ffmpeg-kit-android-lib/src/main/cpp/fftools_ffmpeg_filter.c b/android/ffmpeg-kit-android-lib/src/main/cpp/fftools_ffmpeg_filter.c index 5877d76..70a4e98 100644 --- a/android/ffmpeg-kit-android-lib/src/main/cpp/fftools_ffmpeg_filter.c +++ b/android/ffmpeg-kit-android-lib/src/main/cpp/fftools_ffmpeg_filter.c @@ -68,7 +68,7 @@ static const enum AVPixelFormat *get_compliance_unofficial_pix_fmts(enum AVCodec } } -enum AVPixelFormat choose_pixel_fmt(AVStream *st, AVCodecContext *enc_ctx, AVCodec *codec, enum AVPixelFormat target) +enum AVPixelFormat choose_pixel_fmt(AVStream *st, AVCodecContext *enc_ctx, const AVCodec *codec, enum AVPixelFormat target) { if (codec && codec->pix_fmts) { const enum AVPixelFormat *p = codec->pix_fmts; @@ -98,7 +98,7 @@ enum AVPixelFormat choose_pixel_fmt(AVStream *st, AVCodecContext *enc_ctx, AVCod return target; } -void choose_sample_fmt(AVStream *st, AVCodec *codec) +void choose_sample_fmt(AVStream *st, const AVCodec *codec) { if (codec && codec->sample_fmts) { const enum AVSampleFormat *p = codec->sample_fmts; @@ -107,7 +107,8 @@ void choose_sample_fmt(AVStream *st, AVCodec *codec) break; } if (*p == -1) { - if((codec->capabilities & AV_CODEC_CAP_LOSSLESS) && av_get_sample_fmt_name(st->codecpar->format) > av_get_sample_fmt_name(codec->sample_fmts[0])) + const AVCodecDescriptor *desc = avcodec_descriptor_get(codec->id); + if(desc && (desc->props & AV_CODEC_PROP_LOSSLESS) && av_get_sample_fmt_name(st->codecpar->format) > av_get_sample_fmt_name(codec->sample_fmts[0])) av_log(NULL, AV_LOG_ERROR, "Conversion will not be lossless.\n"); if(av_get_sample_fmt_name(st->codecpar->format)) av_log(NULL, AV_LOG_WARNING, @@ -477,7 +478,7 @@ static int configure_output_video_filter(FilterGraph *fg, OutputFilter *ofilter, if (ret < 0) return ret; - if (ofilter->width || ofilter->height) { + if ((ofilter->width || ofilter->height) && ofilter->ost->autoscale) { char args[255]; AVFilterContext *filter; AVDictionaryEntry *e = NULL; @@ -748,6 +749,12 @@ static int sub2video_prepare(InputStream *ist, InputFilter *ifilter) return AVERROR(ENOMEM); ist->sub2video.last_pts = INT64_MIN; ist->sub2video.end_pts = INT64_MIN; + + /* sub2video structure has been (re-)initialized. + Mark it as such so that the system will be + initialized with the first received heartbeat. */ + ist->sub2video.initialize = 1; + return 0; } @@ -794,10 +801,9 @@ static int configure_input_video_filter(FilterGraph *fg, InputFilter *ifilter, av_bprint_init(&args, 0, AV_BPRINT_SIZE_AUTOMATIC); av_bprintf(&args, "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:" - "pixel_aspect=%d/%d:sws_param=flags=%d", + "pixel_aspect=%d/%d", ifilter->width, ifilter->height, ifilter->format, - tb.num, tb.den, sar.num, sar.den, - SWS_BILINEAR + ((ist->dec_ctx->flags&AV_CODEC_FLAG_BITEXACT) ? SWS_BITEXACT:0)); + tb.num, tb.den, sar.num, sar.den); if (fr.num && fr.den) av_bprintf(&args, ":frame_rate=%d/%d", fr.num, fr.den); snprintf(name, sizeof(name), "graph %d input from stream %d:%d", fg->index, @@ -1064,17 +1070,9 @@ int configure_filtergraph(FilterGraph *fg) if ((ret = avfilter_graph_parse2(fg->graph, graph_desc, &inputs, &outputs)) < 0) goto fail; - if (filter_hw_device || hw_device_ctx) { - AVBufferRef *device = filter_hw_device ? filter_hw_device->device_ref - : hw_device_ctx; - for (i = 0; i < fg->graph->nb_filters; i++) { - fg->graph->filters[i]->hw_device_ctx = av_buffer_ref(device); - if (!fg->graph->filters[i]->hw_device_ctx) { - ret = AVERROR(ENOMEM); - goto fail; - } - } - } + ret = hw_device_setup_for_filter(fg); + if (ret < 0) + goto fail; if (simple && (!inputs || inputs->next || !outputs || outputs->next)) { const char *num_inputs; @@ -1114,6 +1112,8 @@ int configure_filtergraph(FilterGraph *fg) configure_output_filter(fg, fg->outputs[i], cur); avfilter_inout_free(&outputs); + if (!auto_conversion_filters) + avfilter_graph_set_auto_convert(fg->graph, AVFILTER_AUTO_CONVERT_NONE); if ((ret = avfilter_graph_config(fg->graph, NULL)) < 0) goto fail; @@ -1177,7 +1177,7 @@ int configure_filtergraph(FilterGraph *fg) while (av_fifo_size(ist->sub2video.sub_queue)) { AVSubtitle tmp; av_fifo_generic_read(ist->sub2video.sub_queue, &tmp, sizeof(tmp), NULL); - sub2video_update(ist, &tmp); + sub2video_update(ist, INT64_MIN, &tmp); avsubtitle_free(&tmp); } } diff --git a/android/ffmpeg-kit-android-lib/src/main/cpp/fftools_ffmpeg_hw.c b/android/ffmpeg-kit-android-lib/src/main/cpp/fftools_ffmpeg_hw.c index 019ee17..283474c 100644 --- a/android/ffmpeg-kit-android-lib/src/main/cpp/fftools_ffmpeg_hw.c +++ b/android/ffmpeg-kit-android-lib/src/main/cpp/fftools_ffmpeg_hw.c @@ -28,6 +28,8 @@ #include #include "libavutil/avstring.h" +#include "libavutil/pixdesc.h" +#include "libavfilter/buffersink.h" #include "fftools_ffmpeg.h" @@ -425,18 +427,57 @@ int hw_device_setup_for_decode(InputStream *ist) int hw_device_setup_for_encode(OutputStream *ost) { - HWDevice *dev; + const AVCodecHWConfig *config; + HWDevice *dev = NULL; + AVBufferRef *frames_ref = NULL; + int i; + + if (ost->filter) { + frames_ref = av_buffersink_get_hw_frames_ctx(ost->filter->filter); + if (frames_ref && + ((AVHWFramesContext*)frames_ref->data)->format == + ost->enc_ctx->pix_fmt) { + // Matching format, will try to use hw_frames_ctx. + } else { + frames_ref = NULL; + } + } + + for (i = 0;; i++) { + config = avcodec_get_hw_config(ost->enc, i); + if (!config) + break; + + if (frames_ref && + config->methods & AV_CODEC_HW_CONFIG_METHOD_HW_FRAMES_CTX && + (config->pix_fmt == AV_PIX_FMT_NONE || + config->pix_fmt == ost->enc_ctx->pix_fmt)) { + av_log(ost->enc_ctx, AV_LOG_VERBOSE, "Using input " + "frames context (format %s) with %s encoder.\n", + av_get_pix_fmt_name(ost->enc_ctx->pix_fmt), + ost->enc->name); + ost->enc_ctx->hw_frames_ctx = av_buffer_ref(frames_ref); + if (!ost->enc_ctx->hw_frames_ctx) + return AVERROR(ENOMEM); + return 0; + } + + if (!dev && + config->methods & AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX) + dev = hw_device_get_by_type(config->device_type); + } - dev = hw_device_match_by_codec(ost->enc); if (dev) { + av_log(ost->enc_ctx, AV_LOG_VERBOSE, "Using device %s " + "(type %s) with %s encoder.\n", dev->name, + av_hwdevice_get_type_name(dev->type), ost->enc->name); ost->enc_ctx->hw_device_ctx = av_buffer_ref(dev->device_ref); if (!ost->enc_ctx->hw_device_ctx) return AVERROR(ENOMEM); - return 0; } else { // No device required, or no device available. - return 0; } + return 0; } static int hwaccel_retrieve_data(AVCodecContext *avctx, AVFrame *input) @@ -489,3 +530,31 @@ int hwaccel_decode_init(AVCodecContext *avctx) return 0; } + +int hw_device_setup_for_filter(FilterGraph *fg) +{ + HWDevice *dev; + int i; + + // If the user has supplied exactly one hardware device then just + // give it straight to every filter for convenience. If more than + // one device is available then the user needs to pick one explcitly + // with the filter_hw_device option. + if (filter_hw_device) + dev = filter_hw_device; + else if (nb_hw_devices == 1) + dev = hw_devices[0]; + else + dev = NULL; + + if (dev) { + for (i = 0; i < fg->graph->nb_filters; i++) { + fg->graph->filters[i]->hw_device_ctx = + av_buffer_ref(dev->device_ref); + if (!fg->graph->filters[i]->hw_device_ctx) + return AVERROR(ENOMEM); + } + } + + return 0; +} diff --git a/android/ffmpeg-kit-android-lib/src/main/cpp/fftools_ffmpeg_opt.c b/android/ffmpeg-kit-android-lib/src/main/cpp/fftools_ffmpeg_opt.c index 35d8ac5..5618a96 100644 --- a/android/ffmpeg-kit-android-lib/src/main/cpp/fftools_ffmpeg_opt.c +++ b/android/ffmpeg-kit-android-lib/src/main/cpp/fftools_ffmpeg_opt.c @@ -62,16 +62,82 @@ #define DEFAULT_PASS_LOGFILENAME_PREFIX "ffmpeg2pass" +#define SPECIFIER_OPT_FMT_str "%s" +#define SPECIFIER_OPT_FMT_i "%i" +#define SPECIFIER_OPT_FMT_i64 "%"PRId64 +#define SPECIFIER_OPT_FMT_ui64 "%"PRIu64 +#define SPECIFIER_OPT_FMT_f "%f" +#define SPECIFIER_OPT_FMT_dbl "%lf" + +static const char *const opt_name_codec_names[] = {"c", "codec", "acodec", "vcodec", "scodec", "dcodec", NULL}; +static const char *const opt_name_audio_channels[] = {"ac", NULL}; +static const char *const opt_name_audio_sample_rate[] = {"ar", NULL}; +static const char *const opt_name_frame_rates[] = {"r", NULL}; +static const char *const opt_name_frame_sizes[] = {"s", NULL}; +static const char *const opt_name_frame_pix_fmts[] = {"pix_fmt", NULL}; +static const char *const opt_name_ts_scale[] = {"itsscale", NULL}; +static const char *const opt_name_hwaccels[] = {"hwaccel", NULL}; +static const char *const opt_name_hwaccel_devices[] = {"hwaccel_device", NULL}; +static const char *const opt_name_hwaccel_output_formats[] = {"hwaccel_output_format", NULL}; +static const char *const opt_name_autorotate[] = {"autorotate", NULL}; +static const char *const opt_name_autoscale[] = {"autoscale", NULL}; +static const char *const opt_name_max_frames[] = {"frames", "aframes", "vframes", "dframes", NULL}; +static const char *const opt_name_bitstream_filters[] = {"bsf", "absf", "vbsf", NULL}; +static const char *const opt_name_codec_tags[] = {"tag", "atag", "vtag", "stag", NULL}; +static const char *const opt_name_sample_fmts[] = {"sample_fmt", NULL}; +static const char *const opt_name_qscale[] = {"q", "qscale", NULL}; +static const char *const opt_name_forced_key_frames[] = {"forced_key_frames", NULL}; +static const char *const opt_name_force_fps[] = {"force_fps", NULL}; +static const char *const opt_name_frame_aspect_ratios[] = {"aspect", NULL}; +static const char *const opt_name_rc_overrides[] = {"rc_override", NULL}; +static const char *const opt_name_intra_matrices[] = {"intra_matrix", NULL}; +static const char *const opt_name_inter_matrices[] = {"inter_matrix", NULL}; +static const char *const opt_name_chroma_intra_matrices[] = {"chroma_intra_matrix", NULL}; +static const char *const opt_name_top_field_first[] = {"top", NULL}; +static const char *const opt_name_presets[] = {"pre", "apre", "vpre", "spre", NULL}; +static const char *const opt_name_copy_initial_nonkeyframes[] = {"copyinkfr", NULL}; +static const char *const opt_name_copy_prior_start[] = {"copypriorss", NULL}; +static const char *const opt_name_filters[] = {"filter", "af", "vf", NULL}; +static const char *const opt_name_filter_scripts[] = {"filter_script", NULL}; +static const char *const opt_name_reinit_filters[] = {"reinit_filter", NULL}; +static const char *const opt_name_fix_sub_duration[] = {"fix_sub_duration", NULL}; +static const char *const opt_name_canvas_sizes[] = {"canvas_size", NULL}; +static const char *const opt_name_pass[] = {"pass", NULL}; +static const char *const opt_name_passlogfiles[] = {"passlogfile", NULL}; +static const char *const opt_name_max_muxing_queue_size[] = {"max_muxing_queue_size", NULL}; +static const char *const opt_name_muxing_queue_data_threshold[] = {"muxing_queue_data_threshold", NULL}; +static const char *const opt_name_guess_layout_max[] = {"guess_layout_max", NULL}; +static const char *const opt_name_apad[] = {"apad", NULL}; +static const char *const opt_name_discard[] = {"discard", NULL}; +static const char *const opt_name_disposition[] = {"disposition", NULL}; +static const char *const opt_name_time_bases[] = {"time_base", NULL}; +static const char *const opt_name_enc_time_bases[] = {"enc_time_base", NULL}; + +#define WARN_MULTIPLE_OPT_USAGE(name, type, so, st)\ +{\ + char namestr[128] = "";\ + const char *spec = so->specifier && so->specifier[0] ? so->specifier : "";\ + for (i = 0; opt_name_##name[i]; i++)\ + av_strlcatf(namestr, sizeof(namestr), "-%s%s", opt_name_##name[i], opt_name_##name[i+1] ? (opt_name_##name[i+2] ? ", " : " or ") : "");\ + av_log(NULL, AV_LOG_WARNING, "Multiple %s options specified for stream %d, only the last option '-%s%s%s "SPECIFIER_OPT_FMT_##type"' will be used.\n",\ + namestr, st->index, opt_name_##name[0], spec[0] ? ":" : "", spec, so->u.type);\ +} + #define MATCH_PER_STREAM_OPT(name, type, outvar, fmtctx, st)\ {\ - int i, ret;\ + int i, ret, matches = 0;\ + SpecifierOpt *so;\ for (i = 0; i < o->nb_ ## name; i++) {\ char *spec = o->name[i].specifier;\ - if ((ret = check_stream_specifier(fmtctx, st, spec)) > 0)\ + if ((ret = check_stream_specifier(fmtctx, st, spec)) > 0) {\ outvar = o->name[i].u.type;\ - else if (ret < 0)\ + so = &o->name[i];\ + matches++;\ + } else if (ret < 0)\ exit_program(1);\ }\ + if (matches > 1)\ + WARN_MULTIPLE_OPT_USAGE(name, type, so, st);\ } #define MATCH_PER_TYPE_OPT(name, type, outvar, fmtctx, mediatype)\ @@ -90,13 +156,9 @@ const HWAccel hwaccels[] = { #endif #if CONFIG_LIBMFX { "qsv", qsv_init, HWACCEL_QSV, AV_PIX_FMT_QSV }, -#endif -#if CONFIG_CUVID - { "cuvid", cuvid_init, HWACCEL_CUVID, AV_PIX_FMT_CUDA }, #endif { 0 }, }; -__thread AVBufferRef *hw_device_ctx; __thread HWDevice *filter_hw_device; __thread char *vstats_filename; @@ -129,6 +191,8 @@ __thread float max_error_rate = 2.0/3; __thread int filter_nbthreads = 0; __thread int filter_complex_nbthreads = 0; __thread int vstats_version = 2; +__thread int auto_conversion_filters = 1; +__thread int64_t stats_period = 500000; __thread int intra_only = 0; @@ -187,19 +251,17 @@ void init_options(OptionsContext *o) o->limit_filesize = UINT64_MAX; o->chapters_input_file = INT_MAX; o->accurate_seek = 1; + o->thread_queue_size = -1; } int show_hwaccels(void *optctx, const char *opt, const char *arg) { enum AVHWDeviceType type = AV_HWDEVICE_TYPE_NONE; - int i; av_log(NULL, AV_LOG_STDERR, "Hardware acceleration methods:\n"); while ((type = av_hwdevice_iterate_types(type)) != AV_HWDEVICE_TYPE_NONE) av_log(NULL, AV_LOG_STDERR, "%s\n", av_hwdevice_get_type_name(type)); - for (i = 0; hwaccels[i].name; i++) - av_log(NULL, AV_LOG_STDERR, "%s\n", hwaccels[i].name); av_log(NULL, AV_LOG_STDERR, "\n"); return 0; } @@ -227,6 +289,7 @@ int opt_abort_on(void *optctx, const char *opt, const char *arg) const AVOption opts[] = { { "abort_on" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, (double)INT64_MAX, .unit = "flags" }, { "empty_output" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = ABORT_ON_FLAG_EMPTY_OUTPUT }, .unit = "flags" }, + { "empty_output_stream", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = ABORT_ON_FLAG_EMPTY_OUTPUT_STREAM }, .unit = "flags" }, { NULL }, }; const AVClass class = { @@ -240,6 +303,21 @@ int opt_abort_on(void *optctx, const char *opt, const char *arg) return av_opt_eval_flags(&pclass, &opts[0], arg, &abort_on_flags); } +int opt_stats_period(void *optctx, const char *opt, const char *arg) +{ + int64_t user_stats_period = parse_time_or_die(opt, arg, 1); + + if (user_stats_period <= 0) { + av_log(NULL, AV_LOG_ERROR, "stats_period %s must be positive.\n", arg); + return AVERROR(EINVAL); + } + + stats_period = user_stats_period; + av_log(NULL, AV_LOG_INFO, "ffmpeg stats and -progress period set to %s.\n", arg); + + return 0; +} + int opt_sameq(void *optctx, const char *opt, const char *arg) { av_log(NULL, AV_LOG_ERROR, "Option '%s' was removed. " @@ -498,21 +576,15 @@ int opt_sdp_file(void *optctx, const char *opt, const char *arg) #if CONFIG_VAAPI int opt_vaapi_device(void *optctx, const char *opt, const char *arg) { - HWDevice *dev; const char *prefix = "vaapi:"; char *tmp; int err; tmp = av_asprintf("%s%s", prefix, arg); if (!tmp) return AVERROR(ENOMEM); - err = hw_device_init_from_string(tmp, &dev); + err = hw_device_init_from_string(tmp, NULL); av_free(tmp); - if (err < 0) return err; - hw_device_ctx = av_buffer_ref(dev->device_ref); - if (!hw_device_ctx) - return AVERROR(ENOMEM); - return 0; } #endif @@ -815,15 +887,6 @@ void add_input_streams(OptionsContext *o, AVFormatContext *ic) case AVMEDIA_TYPE_VIDEO: if(!ist->dec) ist->dec = avcodec_find_decoder(par->codec_id); -#if FF_API_LOWRES - if (st->codec->lowres) { - ist->dec_ctx->lowres = st->codec->lowres; - ist->dec_ctx->width = st->codec->width; - ist->dec_ctx->height = st->codec->height; - ist->dec_ctx->coded_width = st->codec->coded_width; - ist->dec_ctx->coded_height = st->codec->coded_height; - } -#endif // avformat_find_stream_info() doesn't set this for us anymore. ist->dec_ctx->framerate = st->avg_frame_rate; @@ -840,9 +903,28 @@ void add_input_streams(OptionsContext *o, AVFormatContext *ic) MATCH_PER_STREAM_OPT(top_field_first, i, ist->top_field_first, ic, st); MATCH_PER_STREAM_OPT(hwaccels, str, hwaccel, ic, st); + MATCH_PER_STREAM_OPT(hwaccel_output_formats, str, + hwaccel_output_format, ic, st); + + if (!hwaccel_output_format && hwaccel && !strcmp(hwaccel, "cuvid")) { + av_log(NULL, AV_LOG_WARNING, + "WARNING: defaulting hwaccel_output_format to cuda for compatibility " + "with old commandlines. This behaviour is DEPRECATED and will be removed " + "in the future. Please explicitly set \"-hwaccel_output_format cuda\".\n"); + ist->hwaccel_output_format = AV_PIX_FMT_CUDA; + } else if (hwaccel_output_format) { + ist->hwaccel_output_format = av_get_pix_fmt(hwaccel_output_format); + if (ist->hwaccel_output_format == AV_PIX_FMT_NONE) { + av_log(NULL, AV_LOG_FATAL, "Unrecognised hwaccel output " + "format: %s", hwaccel_output_format); + } + } else { + ist->hwaccel_output_format = AV_PIX_FMT_NONE; + } + if (hwaccel) { // The NVDEC hwaccels use a CUDA device, so remap the name here. - if (!strcmp(hwaccel, "nvdec")) + if (!strcmp(hwaccel, "nvdec") || !strcmp(hwaccel, "cuvid")) hwaccel = "cuda"; if (!strcmp(hwaccel, "none")) @@ -876,8 +958,6 @@ void add_input_streams(OptionsContext *o, AVFormatContext *ic) AV_HWDEVICE_TYPE_NONE) av_log(NULL, AV_LOG_FATAL, "%s ", av_hwdevice_get_type_name(type)); - for (i = 0; hwaccels[i].name; i++) - av_log(NULL, AV_LOG_FATAL, "%s ", hwaccels[i].name); av_log(NULL, AV_LOG_FATAL, "\n"); exit_program(1); } @@ -891,18 +971,6 @@ void add_input_streams(OptionsContext *o, AVFormatContext *ic) exit_program(1); } - MATCH_PER_STREAM_OPT(hwaccel_output_formats, str, - hwaccel_output_format, ic, st); - if (hwaccel_output_format) { - ist->hwaccel_output_format = av_get_pix_fmt(hwaccel_output_format); - if (ist->hwaccel_output_format == AV_PIX_FMT_NONE) { - av_log(NULL, AV_LOG_FATAL, "Unrecognised hwaccel output " - "format: %s", hwaccel_output_format); - } - } else { - ist->hwaccel_output_format = AV_PIX_FMT_NONE; - } - ist->hwaccel_pix_fmt = AV_PIX_FMT_NONE; break; @@ -1232,7 +1300,7 @@ int open_input_file(OptionsContext *o, const char *filename) f->duration = 0; f->time_base = (AVRational){ 1, 1 }; #if HAVE_THREADS - f->thread_queue_size = o->thread_queue_size > 0 ? o->thread_queue_size : 8; + f->thread_queue_size = o->thread_queue_size; #endif /* check if all codec options have been used */ @@ -1425,6 +1493,8 @@ OutputStream *new_output_stream(OptionsContext *o, AVFormatContext *oc, enum AVM ost->encoder_opts = filter_codec_opts(o->g->codec_opts, ost->enc->id, oc, st, ost->enc); MATCH_PER_STREAM_OPT(presets, str, preset, oc, st); + ost->autoscale = 1; + MATCH_PER_STREAM_OPT(autoscale, i, ost->autoscale, oc, st); if (preset && (!(ret = get_preset_file_2(preset, ost->enc->name, &s)))) { do { buf = get_line(s); @@ -1492,55 +1562,13 @@ OutputStream *new_output_stream(OptionsContext *o, AVFormatContext *oc, enum AVM MATCH_PER_STREAM_OPT(copy_prior_start, i, ost->copy_prior_start, oc ,st); MATCH_PER_STREAM_OPT(bitstream_filters, str, bsfs, oc, st); - while (bsfs && *bsfs) { - const AVBitStreamFilter *filter; - char *bsf, *bsf_options_str, *bsf_name; - - bsf = av_get_token(&bsfs, ","); - if (!bsf) - exit_program(1); - bsf_name = av_strtok(bsf, "=", &bsf_options_str); - if (!bsf_name) - exit_program(1); - - filter = av_bsf_get_by_name(bsf_name); - if (!filter) { - av_log(NULL, AV_LOG_FATAL, "Unknown bitstream filter %s\n", bsf_name); - exit_program(1); - } - - ost->bsf_ctx = av_realloc_array(ost->bsf_ctx, - ost->nb_bitstream_filters + 1, - sizeof(*ost->bsf_ctx)); - if (!ost->bsf_ctx) - exit_program(1); - - ret = av_bsf_alloc(filter, &ost->bsf_ctx[ost->nb_bitstream_filters]); + if (bsfs && *bsfs) { + ret = av_bsf_list_parse_str(bsfs, &ost->bsf_ctx); if (ret < 0) { - av_log(NULL, AV_LOG_ERROR, "Error allocating a bitstream filter context\n"); - exit_program(1); - } - - ost->nb_bitstream_filters++; - - if (bsf_options_str && filter->priv_class) { - const AVOption *opt = av_opt_next(ost->bsf_ctx[ost->nb_bitstream_filters-1]->priv_data, NULL); - const char * shorthand[2] = {NULL}; - - if (opt) - shorthand[0] = opt->name; - - ret = av_opt_set_from_string(ost->bsf_ctx[ost->nb_bitstream_filters-1]->priv_data, bsf_options_str, shorthand, "=", ":"); - if (ret < 0) { - av_log(NULL, AV_LOG_ERROR, "Error parsing options for bitstream filter %s\n", bsf_name); + av_log(NULL, AV_LOG_ERROR, "Error parsing bitstream filter sequence '%s': %s\n", bsfs, av_err2str(ret)); exit_program(1); } } - av_freep(&bsf); - - if (*bsfs) - bsfs++; - } MATCH_PER_STREAM_OPT(codec_tags, str, codec_tag, oc, st); if (codec_tag) { @@ -1564,6 +1592,11 @@ OutputStream *new_output_stream(OptionsContext *o, AVFormatContext *oc, enum AVM MATCH_PER_STREAM_OPT(max_muxing_queue_size, i, ost->max_muxing_queue_size, oc, st); ost->max_muxing_queue_size *= sizeof(AVPacket); + ost->muxing_queue_data_size = 0; + + ost->muxing_queue_data_threshold = 50*1024*1024; + MATCH_PER_STREAM_OPT(muxing_queue_data_threshold, i, ost->muxing_queue_data_threshold, oc, st); + if (oc->oformat->flags & AVFMT_GLOBALHEADER) ost->enc_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER; @@ -1702,8 +1735,6 @@ OutputStream *new_video_stream(OptionsContext *o, AVFormatContext *oc, int sourc MATCH_PER_STREAM_OPT(filter_scripts, str, ost->filters_script, oc, st); MATCH_PER_STREAM_OPT(filters, str, ost->filters, oc, st); - if (o->nb_filters > 1) - av_log(NULL, AV_LOG_ERROR, "Only '-vf %s' read, ignoring remaining -vf options: Use ',' to separate filters\n", ost->filters); if (!ost->stream_copy) { const char *p = NULL; @@ -1885,8 +1916,6 @@ OutputStream *new_audio_stream(OptionsContext *o, AVFormatContext *oc, int sourc MATCH_PER_STREAM_OPT(filter_scripts, str, ost->filters_script, oc, st); MATCH_PER_STREAM_OPT(filters, str, ost->filters, oc, st); - if (o->nb_filters > 1) - av_log(NULL, AV_LOG_ERROR, "Only '-af %s' read, ignoring remaining -af options: Use ',' to separate filters\n", ost->filters); if (!ost->stream_copy) { char *sample_fmt = NULL; @@ -2214,22 +2243,23 @@ int open_output_file(OptionsContext *o, const char *filename) /* video: highest resolution */ if (!o->video_disable && av_guess_codec(oc->oformat, NULL, filename, NULL, AVMEDIA_TYPE_VIDEO) != AV_CODEC_ID_NONE) { - int area = 0, idx = -1; + int best_score = 0, idx = -1; int qcr = avformat_query_codec(oc->oformat, oc->oformat->video_codec, 0); for (i = 0; i < nb_input_streams; i++) { - int new_area; + int score; ist = input_streams[i]; - new_area = ist->st->codecpar->width * ist->st->codecpar->height + 100000000*!!ist->st->codec_info_nb_frames + score = ist->st->codecpar->width * ist->st->codecpar->height + + 100000000 * !!(ist->st->event_flags & AVSTREAM_EVENT_FLAG_NEW_PACKETS) + 5000000*!!(ist->st->disposition & AV_DISPOSITION_DEFAULT); if (ist->user_set_discard == AVDISCARD_ALL) continue; if((qcr!=MKTAG('A', 'P', 'I', 'C')) && (ist->st->disposition & AV_DISPOSITION_ATTACHED_PIC)) - new_area = 1; + score = 1; if (ist->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && - new_area > area) { + score > best_score) { if((qcr==MKTAG('A', 'P', 'I', 'C')) && !(ist->st->disposition & AV_DISPOSITION_ATTACHED_PIC)) continue; - area = new_area; + best_score = score; idx = i; } } @@ -2393,12 +2423,14 @@ loop_end: o->attachments[i]); exit_program(1); } - if (!(attachment = av_malloc(len))) { - av_log(NULL, AV_LOG_FATAL, "Attachment %s too large to fit into memory.\n", + if (len > INT_MAX - AV_INPUT_BUFFER_PADDING_SIZE || + !(attachment = av_malloc(len + AV_INPUT_BUFFER_PADDING_SIZE))) { + av_log(NULL, AV_LOG_FATAL, "Attachment %s too large.\n", o->attachments[i]); exit_program(1); } avio_read(pb, attachment, len); + memset(attachment + len, 0, AV_INPUT_BUFFER_PADDING_SIZE); ost = new_attachment_stream(o, oc, -1); ost->stream_copy = 0; @@ -3217,7 +3249,7 @@ void show_help_default_ffmpeg(const char *opt, const char *arg) " -h -- print basic options\n" " -h long -- print more options\n" " -h full -- print all options (including all format and codec specific options, very long)\n" - " -h type=name -- print all options for the named decoder/encoder/demuxer/muxer/filter/bsf\n" + " -h type=name -- print all options for the named decoder/encoder/demuxer/muxer/filter/bsf/protocol\n" " See man %s for detailed description of the options.\n" "\n", program_name); @@ -3225,7 +3257,7 @@ void show_help_default_ffmpeg(const char *opt, const char *arg) OPT_EXIT, 0, 0); show_help_options(options, "Global options (affect whole program " - "instead of just one file:", + "instead of just one file):", 0, per_file | OPT_EXIT | OPT_EXPERT, 0); if (show_advanced) show_help_options(options, "Advanced global options:", OPT_EXPERT, @@ -3301,6 +3333,7 @@ int open_files(OptionGroupList *l, const char *inout, if (ret < 0) { av_log(NULL, AV_LOG_ERROR, "Error parsing options for %s file " "%s.\n", inout, g->arg); + uninit_options(&o); return ret; } diff --git a/android/ffmpeg-kit-android-lib/src/main/cpp/fftools_ffprobe.c b/android/ffmpeg-kit-android-lib/src/main/cpp/fftools_ffprobe.c index b1b4d10..d38ec22 100644 --- a/android/ffmpeg-kit-android-lib/src/main/cpp/fftools_ffprobe.c +++ b/android/ffmpeg-kit-android-lib/src/main/cpp/fftools_ffprobe.c @@ -42,7 +42,9 @@ #include "libavutil/bprint.h" #include "libavutil/display.h" #include "libavutil/hash.h" +#include "libavutil/hdr_dynamic_metadata.h" #include "libavutil/mastering_display_metadata.h" +#include "libavutil/dovi_meta.h" #include "libavutil/opt.h" #include "libavutil/pixdesc.h" #include "libavutil/spherical.h" @@ -257,6 +259,7 @@ __thread OptionDef *ffprobe_options = NULL; /* FFprobe context */ __thread const char *input_filename; +__thread const char *print_input_filename; __thread AVInputFormat *iformat = NULL; __thread struct AVHashContext *hash; @@ -1089,12 +1092,12 @@ typedef struct CompactContext { #define OFFSET(x) offsetof(CompactContext, x) static const AVOption compact_options[]= { - {"item_sep", "set item separator", OFFSET(item_sep_str), AV_OPT_TYPE_STRING, {.str="|"}, CHAR_MIN, CHAR_MAX }, - {"s", "set item separator", OFFSET(item_sep_str), AV_OPT_TYPE_STRING, {.str="|"}, CHAR_MIN, CHAR_MAX }, + {"item_sep", "set item separator", OFFSET(item_sep_str), AV_OPT_TYPE_STRING, {.str="|"}, 0, 0 }, + {"s", "set item separator", OFFSET(item_sep_str), AV_OPT_TYPE_STRING, {.str="|"}, 0, 0 }, {"nokey", "force no key printing", OFFSET(nokey), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1 }, {"nk", "force no key printing", OFFSET(nokey), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1 }, - {"escape", "set escape mode", OFFSET(escape_mode_str), AV_OPT_TYPE_STRING, {.str="c"}, CHAR_MIN, CHAR_MAX }, - {"e", "set escape mode", OFFSET(escape_mode_str), AV_OPT_TYPE_STRING, {.str="c"}, CHAR_MIN, CHAR_MAX }, + {"escape", "set escape mode", OFFSET(escape_mode_str), AV_OPT_TYPE_STRING, {.str="c"}, 0, 0 }, + {"e", "set escape mode", OFFSET(escape_mode_str), AV_OPT_TYPE_STRING, {.str="c"}, 0, 0 }, {"print_section", "print section name", OFFSET(print_section), AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1 }, {"p", "print section name", OFFSET(print_section), AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1 }, {NULL}, @@ -1205,12 +1208,12 @@ static const Writer compact_writer = { #define OFFSET(x) offsetof(CompactContext, x) static const AVOption csv_options[] = { - {"item_sep", "set item separator", OFFSET(item_sep_str), AV_OPT_TYPE_STRING, {.str=","}, CHAR_MIN, CHAR_MAX }, - {"s", "set item separator", OFFSET(item_sep_str), AV_OPT_TYPE_STRING, {.str=","}, CHAR_MIN, CHAR_MAX }, + {"item_sep", "set item separator", OFFSET(item_sep_str), AV_OPT_TYPE_STRING, {.str=","}, 0, 0 }, + {"s", "set item separator", OFFSET(item_sep_str), AV_OPT_TYPE_STRING, {.str=","}, 0, 0 }, {"nokey", "force no key printing", OFFSET(nokey), AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1 }, {"nk", "force no key printing", OFFSET(nokey), AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1 }, - {"escape", "set escape mode", OFFSET(escape_mode_str), AV_OPT_TYPE_STRING, {.str="csv"}, CHAR_MIN, CHAR_MAX }, - {"e", "set escape mode", OFFSET(escape_mode_str), AV_OPT_TYPE_STRING, {.str="csv"}, CHAR_MIN, CHAR_MAX }, + {"escape", "set escape mode", OFFSET(escape_mode_str), AV_OPT_TYPE_STRING, {.str="csv"}, 0, 0 }, + {"e", "set escape mode", OFFSET(escape_mode_str), AV_OPT_TYPE_STRING, {.str="csv"}, 0, 0 }, {"print_section", "print section name", OFFSET(print_section), AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1 }, {"p", "print section name", OFFSET(print_section), AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1 }, {NULL}, @@ -1243,8 +1246,8 @@ typedef struct FlatContext { #define OFFSET(x) offsetof(FlatContext, x) static const AVOption flat_options[]= { - {"sep_char", "set separator", OFFSET(sep_str), AV_OPT_TYPE_STRING, {.str="."}, CHAR_MIN, CHAR_MAX }, - {"s", "set separator", OFFSET(sep_str), AV_OPT_TYPE_STRING, {.str="."}, CHAR_MIN, CHAR_MAX }, + {"sep_char", "set separator", OFFSET(sep_str), AV_OPT_TYPE_STRING, {.str="."}, 0, 0 }, + {"s", "set separator", OFFSET(sep_str), AV_OPT_TYPE_STRING, {.str="."}, 0, 0 }, {"hierarchical", "specify if the section specification should be hierarchical", OFFSET(hierarchical), AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1 }, {"h", "specify if the section specification should be hierarchical", OFFSET(hierarchical), AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1 }, {NULL}, @@ -1859,6 +1862,105 @@ static inline int show_tags(WriterContext *w, AVDictionary *tags, int section_id return ret; } +static void print_dynamic_hdr10_plus(WriterContext *w, const AVDynamicHDRPlus *metadata) +{ + if (!metadata) + return; + print_int("application version", metadata->application_version); + print_int("num_windows", metadata->num_windows); + for (int n = 1; n < metadata->num_windows; n++) { + const AVHDRPlusColorTransformParams *params = &metadata->params[n]; + print_q("window_upper_left_corner_x", + params->window_upper_left_corner_x,'/'); + print_q("window_upper_left_corner_y", + params->window_upper_left_corner_y,'/'); + print_q("window_lower_right_corner_x", + params->window_lower_right_corner_x,'/'); + print_q("window_lower_right_corner_y", + params->window_lower_right_corner_y,'/'); + print_q("window_upper_left_corner_x", + params->window_upper_left_corner_x,'/'); + print_q("window_upper_left_corner_y", + params->window_upper_left_corner_y,'/'); + print_int("center_of_ellipse_x", + params->center_of_ellipse_x ) ; + print_int("center_of_ellipse_y", + params->center_of_ellipse_y ); + print_int("rotation_angle", + params->rotation_angle); + print_int("semimajor_axis_internal_ellipse", + params->semimajor_axis_internal_ellipse); + print_int("semimajor_axis_external_ellipse", + params->semimajor_axis_external_ellipse); + print_int("semiminor_axis_external_ellipse", + params->semiminor_axis_external_ellipse); + print_int("overlap_process_option", + params->overlap_process_option); + } + print_q("targeted_system_display_maximum_luminance", + metadata->targeted_system_display_maximum_luminance,'/'); + if (metadata->targeted_system_display_actual_peak_luminance_flag) { + print_int("num_rows_targeted_system_display_actual_peak_luminance", + metadata->num_rows_targeted_system_display_actual_peak_luminance); + print_int("num_cols_targeted_system_display_actual_peak_luminance", + metadata->num_cols_targeted_system_display_actual_peak_luminance); + for (int i = 0; i < metadata->num_rows_targeted_system_display_actual_peak_luminance; i++) { + for (int j = 0; j < metadata->num_cols_targeted_system_display_actual_peak_luminance; j++) { + print_q("targeted_system_display_actual_peak_luminance", + metadata->targeted_system_display_actual_peak_luminance[i][j],'/'); + } + } + } + for (int n = 0; n < metadata->num_windows; n++) { + const AVHDRPlusColorTransformParams *params = &metadata->params[n]; + for (int i = 0; i < 3; i++) { + print_q("maxscl",params->maxscl[i],'/'); + } + print_q("average_maxrgb", + params->average_maxrgb,'/'); + print_int("num_distribution_maxrgb_percentiles", + params->num_distribution_maxrgb_percentiles); + for (int i = 0; i < params->num_distribution_maxrgb_percentiles; i++) { + print_int("distribution_maxrgb_percentage", + params->distribution_maxrgb[i].percentage); + print_q("distribution_maxrgb_percentile", + params->distribution_maxrgb[i].percentile,'/'); + } + print_q("fraction_bright_pixels", + params->fraction_bright_pixels,'/'); + } + if (metadata->mastering_display_actual_peak_luminance_flag) { + print_int("num_rows_mastering_display_actual_peak_luminance", + metadata->num_rows_mastering_display_actual_peak_luminance); + print_int("num_cols_mastering_display_actual_peak_luminance", + metadata->num_cols_mastering_display_actual_peak_luminance); + for (int i = 0; i < metadata->num_rows_mastering_display_actual_peak_luminance; i++) { + for (int j = 0; j < metadata->num_cols_mastering_display_actual_peak_luminance; j++) { + print_q("mastering_display_actual_peak_luminance", + metadata->mastering_display_actual_peak_luminance[i][j],'/'); + } + } + } + + for (int n = 0; n < metadata->num_windows; n++) { + const AVHDRPlusColorTransformParams *params = &metadata->params[n]; + if (params->tone_mapping_flag) { + print_q("knee_point_x", params->knee_point_x,'/'); + print_q("knee_point_y", params->knee_point_y,'/'); + print_int("num_bezier_curve_anchors", + params->num_bezier_curve_anchors ); + for (int i = 0; i < params->num_bezier_curve_anchors; i++) { + print_q("bezier_curve_anchors", + params->bezier_curve_anchors[i],'/'); + } + } + if (params->color_saturation_mapping_flag) { + print_q("color_saturation_weight", + params->color_saturation_weight,'/'); + } + } +} + static void print_pkt_side_data(WriterContext *w, AVCodecParameters *par, const AVPacketSideData *side_data, @@ -1928,6 +2030,16 @@ static void print_pkt_side_data(WriterContext *w, AVContentLightMetadata *metadata = (AVContentLightMetadata *)sd->data; print_int("max_content", metadata->MaxCLL); print_int("max_average", metadata->MaxFALL); + } else if (sd->type == AV_PKT_DATA_DOVI_CONF) { + AVDOVIDecoderConfigurationRecord *dovi = (AVDOVIDecoderConfigurationRecord *)sd->data; + print_int("dv_version_major", dovi->dv_version_major); + print_int("dv_version_minor", dovi->dv_version_minor); + print_int("dv_profile", dovi->dv_profile); + print_int("dv_level", dovi->dv_level); + print_int("rpu_present_flag", dovi->rpu_present_flag); + print_int("el_present_flag", dovi->el_present_flag); + print_int("bl_present_flag", dovi->bl_present_flag); + print_int("dv_bl_signal_compatibility_id", dovi->dv_bl_signal_compatibility_id); } writer_print_section_footer(w); } @@ -2214,7 +2326,7 @@ static void show_frame(WriterContext *w, AVFrame *frame, AVStream *stream, writer_print_section_header(w, SECTION_ID_FRAME_SIDE_DATA_TIMECODE_LIST); for (int j = 1; j <= m ; j++) { char tcbuf[AV_TIMECODE_STR_SIZE]; - av_timecode_make_smpte_tc_string(tcbuf, tc[j], 0); + av_timecode_make_smpte_tc_string2(tcbuf, stream->avg_frame_rate, tc[j], 0, 0); writer_print_section_header(w, SECTION_ID_FRAME_SIDE_DATA_TIMECODE); print_str("value", tcbuf); writer_print_section_footer(w); @@ -2239,6 +2351,9 @@ static void show_frame(WriterContext *w, AVFrame *frame, AVStream *stream, print_q("min_luminance", metadata->min_luminance, '/'); print_q("max_luminance", metadata->max_luminance, '/'); } + } else if (sd->type == AV_FRAME_DATA_DYNAMIC_HDR_PLUS) { + AVDynamicHDRPlus *metadata = (AVDynamicHDRPlus *)sd->data; + print_dynamic_hdr10_plus(w, metadata); } else if (sd->type == AV_FRAME_DATA_CONTENT_LIGHT_LEVEL) { AVContentLightMetadata *metadata = (AVContentLightMetadata *)sd->data; print_int("max_content", metadata->MaxCLL); @@ -2539,6 +2654,7 @@ static int show_stream(WriterContext *w, AVFormatContext *fmt_ctx, int stream_id if (dec_ctx) { print_int("coded_width", dec_ctx->coded_width); print_int("coded_height", dec_ctx->coded_height); + print_int("closed_captions", !!(dec_ctx->properties & FF_CODEC_PROPERTY_CLOSED_CAPTIONS)); } #endif print_int("has_b_frames", par->video_delay); @@ -2840,11 +2956,11 @@ static void show_error(WriterContext *w, int err) writer_print_section_footer(w); } -static int open_input_file(InputFile *ifile, const char *filename) +static int open_input_file(InputFile *ifile, const char *filename, const char *print_filename) { int err, i; AVFormatContext *fmt_ctx = NULL; - AVDictionaryEntry *t; + AVDictionaryEntry *t = NULL; int scan_all_pmts_set = 0; fmt_ctx = avformat_alloc_context(); @@ -2862,13 +2978,15 @@ static int open_input_file(InputFile *ifile, const char *filename) print_error(filename, err); return err; } + if (print_filename) { + av_freep(&fmt_ctx->url); + fmt_ctx->url = av_strdup(print_filename); + } ifile->fmt_ctx = fmt_ctx; if (scan_all_pmts_set) av_dict_set(&format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE); - if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) { - av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key); - return AVERROR_OPTION_NOT_FOUND; - } + while ((t = av_dict_get(format_opts, "", t, AV_DICT_IGNORE_SUFFIX))) + av_log(NULL, AV_LOG_WARNING, "Option %s skipped - not known to demuxer.\n", t->key); if (find_stream_info) { AVDictionary **opts = setup_find_stream_info_opts(fmt_ctx, codec_opts); @@ -2938,6 +3056,7 @@ static int open_input_file(InputFile *ifile, const char *filename) ist->dec_ctx->pkt_timebase = stream->time_base; ist->dec_ctx->framerate = stream->avg_frame_rate; #if FF_API_LAVF_AVCTX + ist->dec_ctx->properties = stream->codec->properties; ist->dec_ctx->coded_width = stream->codec->coded_width; ist->dec_ctx->coded_height = stream->codec->coded_height; #endif @@ -2975,7 +3094,8 @@ static void close_input_file(InputFile *ifile) avformat_close_input(&ifile->fmt_ctx); } -static int probe_file(WriterContext *wctx, const char *filename) +static int probe_file(WriterContext *wctx, const char *filename, + const char *print_filename) { InputFile ifile = { 0 }; int ret, i; @@ -2984,7 +3104,7 @@ static int probe_file(WriterContext *wctx, const char *filename) do_read_frames = do_show_frames || do_count_frames; do_read_packets = do_show_packets || do_count_packets; - ret = open_input_file(&ifile, filename); + ret = open_input_file(&ifile, filename, print_filename); if (ret < 0) goto end; @@ -3289,6 +3409,12 @@ static int opt_input_file_i(void *optctx, const char *opt, const char *arg) return 0; } +static int opt_print_filename(void *optctx, const char *opt, const char *arg) +{ + print_input_filename = arg; + return 0; +} + void show_help_default_ffprobe(const char *opt, const char *arg) { show_usage(); @@ -3563,10 +3689,12 @@ void ffprobe_var_cleanup() { read_intervals = NULL; read_intervals_nb = 0; + find_stream_info = 1; ffprobe_options = NULL; input_filename = NULL; + print_input_filename = NULL; iformat = NULL; hash = NULL; @@ -3633,13 +3761,13 @@ int ffprobe_execute(int argc, char **argv) "use sexagesimal format HOURS:MM:SS.MICROSECONDS for time units" }, { "pretty", 0, {.func_arg = opt_pretty}, "prettify the format of displayed values, make it more human readable" }, - { "print_format", OPT_STRING | HAS_ARG, {(void*)&print_format}, + { "print_format", OPT_STRING | HAS_ARG, { &print_format }, "set the output printing format (available formats are: default, compact, csv, flat, ini, json, xml)", "format" }, - { "of", OPT_STRING | HAS_ARG, {(void*)&print_format}, "alias for -print_format", "format" }, - { "select_streams", OPT_STRING | HAS_ARG, {(void*)&stream_specifier}, "select the specified streams", "stream_specifier" }, + { "of", OPT_STRING | HAS_ARG, { &print_format }, "alias for -print_format", "format" }, + { "select_streams", OPT_STRING | HAS_ARG, { &stream_specifier }, "select the specified streams", "stream_specifier" }, { "sections", OPT_EXIT, {.func_arg = opt_sections}, "print sections structure and section information, and exit" }, - { "show_data", OPT_BOOL, {(void*)&do_show_data}, "show packets data" }, - { "show_data_hash", OPT_STRING | HAS_ARG, {(void*)&show_data_hash}, "show packets data hash" }, + { "show_data", OPT_BOOL, { &do_show_data }, "show packets data" }, + { "show_data_hash", OPT_STRING | HAS_ARG, { &show_data_hash }, "show packets data hash" }, { "show_error", 0, { .func_arg = &opt_show_error }, "show probing error" }, { "show_format", 0, { .func_arg = &opt_show_format }, "show format/container info" }, { "show_frames", 0, { .func_arg = &opt_show_frames }, "show frames info" }, @@ -3648,24 +3776,25 @@ int ffprobe_execute(int argc, char **argv) { "show_entries", HAS_ARG, {.func_arg = opt_show_entries}, "show a set of specified entries", "entry_list" }, #if HAVE_THREADS - { "show_log", OPT_INT|HAS_ARG, {(void*)&do_show_log}, "show log" }, + { "show_log", OPT_INT|HAS_ARG, { &do_show_log }, "show log" }, #endif { "show_packets", 0, { .func_arg = &opt_show_packets }, "show packets info" }, { "show_programs", 0, { .func_arg = &opt_show_programs }, "show programs info" }, { "show_streams", 0, { .func_arg = &opt_show_streams }, "show streams info" }, { "show_chapters", 0, { .func_arg = &opt_show_chapters }, "show chapters info" }, - { "count_frames", OPT_BOOL, {(void*)&do_count_frames}, "count the number of frames per stream" }, - { "count_packets", OPT_BOOL, {(void*)&do_count_packets}, "count the number of packets per stream" }, + { "count_frames", OPT_BOOL, { &do_count_frames }, "count the number of frames per stream" }, + { "count_packets", OPT_BOOL, { &do_count_packets }, "count the number of packets per stream" }, { "show_program_version", 0, { .func_arg = &opt_show_program_version }, "show ffprobe version" }, { "show_library_versions", 0, { .func_arg = &opt_show_library_versions }, "show library versions" }, { "show_versions", 0, { .func_arg = &opt_show_versions }, "show program and library versions" }, { "show_pixel_formats", 0, { .func_arg = &opt_show_pixel_formats }, "show pixel format descriptions" }, - { "show_private_data", OPT_BOOL, {(void*)&show_private_data}, "show private data" }, - { "private", OPT_BOOL, {(void*)&show_private_data}, "same as show_private_data" }, + { "show_private_data", OPT_BOOL, { &show_private_data }, "show private data" }, + { "private", OPT_BOOL, { &show_private_data }, "same as show_private_data" }, { "bitexact", OPT_BOOL, {&do_bitexact}, "force bitexact output" }, { "read_intervals", HAS_ARG, {.func_arg = opt_read_intervals}, "set read intervals", "read_intervals" }, { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {.func_arg = opt_default}, "generic catch all option", "" }, { "i", HAS_ARG, {.func_arg = opt_input_file_i}, "read specified file", "input_file"}, + { "print_filename", HAS_ARG, {.func_arg = opt_print_filename}, "override the printed input filename", "print_file"}, { "find_stream_info", OPT_BOOL | OPT_INPUT | OPT_EXPERT, { &find_stream_info }, "read and decode the streams to fill missing information with heuristics" }, { NULL, }, @@ -3800,7 +3929,7 @@ int ffprobe_execute(int argc, char **argv) av_log(NULL, AV_LOG_ERROR, "Use -h to get full help or, even better, run 'man %s'.\n", program_name); ret = AVERROR(EINVAL); } else if (input_filename) { - ret = probe_file(wctx, input_filename); + ret = probe_file(wctx, input_filename, print_input_filename); if (ret < 0 && do_show_error) show_error(wctx, ret); } diff --git a/apple/src/ArchDetect.h b/apple/src/ArchDetect.h index 3f91b84..cc935f9 100644 --- a/apple/src/ArchDetect.h +++ b/apple/src/ArchDetect.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2020 Taner Sener + * Copyright (c) 2018-2021 Taner Sener * * This file is part of FFmpegKit. * diff --git a/apple/src/ArchDetect.m b/apple/src/ArchDetect.m index ceb322e..128606b 100644 --- a/apple/src/ArchDetect.m +++ b/apple/src/ArchDetect.m @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2020 Taner Sener + * Copyright (c) 2018-2021 Taner Sener * * This file is part of FFmpegKit. * diff --git a/apple/src/AtomicLong.h b/apple/src/AtomicLong.h index 0e348ad..2bb1622 100644 --- a/apple/src/AtomicLong.h +++ b/apple/src/AtomicLong.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020 Taner Sener + * Copyright (c) 2020-2021 Taner Sener * * This file is part of FFmpegKit. * diff --git a/apple/src/AtomicLong.m b/apple/src/AtomicLong.m index ad03c96..f084fe2 100644 --- a/apple/src/AtomicLong.m +++ b/apple/src/AtomicLong.m @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020 Taner Sener + * Copyright (c) 2020-2021 Taner Sener * * This file is part of FFmpegKit. * diff --git a/apple/src/ExecuteDelegate.h b/apple/src/ExecuteDelegate.h index 98f5026..e7cfff1 100644 --- a/apple/src/ExecuteDelegate.h +++ b/apple/src/ExecuteDelegate.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020 Taner Sener + * Copyright (c) 2020-2021 Taner Sener * * This file is part of FFmpegKit. * diff --git a/apple/src/FFmpegExecution.h b/apple/src/FFmpegExecution.h index f2c83b5..c7b047c 100644 --- a/apple/src/FFmpegExecution.h +++ b/apple/src/FFmpegExecution.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020 Taner Sener + * Copyright (c) 2020-2021 Taner Sener * * This file is part of FFmpegKit. * diff --git a/apple/src/FFmpegExecution.m b/apple/src/FFmpegExecution.m index af2f9a4..a314e5d 100644 --- a/apple/src/FFmpegExecution.m +++ b/apple/src/FFmpegExecution.m @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020 Taner Sener + * Copyright (c) 2020-2021 Taner Sener * * This file is part of FFmpegKit. * diff --git a/apple/src/FFmpegKit.h b/apple/src/FFmpegKit.h index f92b7a6..d18c53d 100644 --- a/apple/src/FFmpegKit.h +++ b/apple/src/FFmpegKit.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2020 Taner Sener + * Copyright (c) 2018-2021 Taner Sener * * This file is part of FFmpegKit. * diff --git a/apple/src/FFmpegKit.m b/apple/src/FFmpegKit.m index b6bafed..0f503e7 100644 --- a/apple/src/FFmpegKit.m +++ b/apple/src/FFmpegKit.m @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2020 Taner Sener + * Copyright (c) 2018-2021 Taner Sener * * This file is part of FFmpegKit. * diff --git a/apple/src/FFmpegKitConfig.h b/apple/src/FFmpegKitConfig.h index af9a98e..f457298 100644 --- a/apple/src/FFmpegKitConfig.h +++ b/apple/src/FFmpegKitConfig.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2020 Taner Sener + * Copyright (c) 2018-2021 Taner Sener * * This file is part of FFmpegKit. * diff --git a/apple/src/FFmpegKitConfig.m b/apple/src/FFmpegKitConfig.m index 4acdcdd..25da595 100644 --- a/apple/src/FFmpegKitConfig.m +++ b/apple/src/FFmpegKitConfig.m @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2020 Taner Sener + * Copyright (c) 2018-2021 Taner Sener * * This file is part of FFmpegKit. * diff --git a/apple/src/FFprobeKit.h b/apple/src/FFprobeKit.h index 8d58507..b6215c4 100644 --- a/apple/src/FFprobeKit.h +++ b/apple/src/FFprobeKit.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020 Taner Sener + * Copyright (c) 2020-2021 Taner Sener * * This file is part of FFmpegKit. * diff --git a/apple/src/FFprobeKit.m b/apple/src/FFprobeKit.m index 5a34ed4..9eda150 100644 --- a/apple/src/FFprobeKit.m +++ b/apple/src/FFprobeKit.m @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020 Taner Sener + * Copyright (c) 2020-2021 Taner Sener * * This file is part of FFmpegKit. * diff --git a/apple/src/LogDelegate.h b/apple/src/LogDelegate.h index 072d7be..18a0c36 100644 --- a/apple/src/LogDelegate.h +++ b/apple/src/LogDelegate.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2020 Taner Sener + * Copyright (c) 2018-2021 Taner Sener * * This file is part of FFmpegKit. * diff --git a/apple/src/MediaInformation.h b/apple/src/MediaInformation.h index 7b597db..164c925 100644 --- a/apple/src/MediaInformation.h +++ b/apple/src/MediaInformation.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2020 Taner Sener + * Copyright (c) 2018-2021 Taner Sener * * This file is part of FFmpegKit. * diff --git a/apple/src/MediaInformation.m b/apple/src/MediaInformation.m index f370d23..a44f0b6 100644 --- a/apple/src/MediaInformation.m +++ b/apple/src/MediaInformation.m @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2020 Taner Sener + * Copyright (c) 2018-2021 Taner Sener * * This file is part of FFmpegKit. * diff --git a/apple/src/MediaInformationParser.h b/apple/src/MediaInformationParser.h index cc5e9a9..271551d 100644 --- a/apple/src/MediaInformationParser.h +++ b/apple/src/MediaInformationParser.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2020 Taner Sener + * Copyright (c) 2018-2021 Taner Sener * * This file is part of FFmpegKit. * diff --git a/apple/src/MediaInformationParser.m b/apple/src/MediaInformationParser.m index e916634..2405832 100644 --- a/apple/src/MediaInformationParser.m +++ b/apple/src/MediaInformationParser.m @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2020 Taner Sener + * Copyright (c) 2018-2021 Taner Sener * * This file is part of FFmpegKit. * diff --git a/apple/src/Statistics.h b/apple/src/Statistics.h index ef5d738..5fab2af 100644 --- a/apple/src/Statistics.h +++ b/apple/src/Statistics.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2020 Taner Sener + * Copyright (c) 2018-2021 Taner Sener * * This file is part of FFmpegKit. * diff --git a/apple/src/Statistics.m b/apple/src/Statistics.m index 5a351cf..87b68e7 100644 --- a/apple/src/Statistics.m +++ b/apple/src/Statistics.m @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2020 Taner Sener + * Copyright (c) 2018-2021 Taner Sener * * This file is part of FFmpegKit. * diff --git a/apple/src/StatisticsDelegate.h b/apple/src/StatisticsDelegate.h index 32754e5..b366f05 100644 --- a/apple/src/StatisticsDelegate.h +++ b/apple/src/StatisticsDelegate.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2020 Taner Sener + * Copyright (c) 2018-2021 Taner Sener * * This file is part of FFmpegKit. * diff --git a/apple/src/StreamInformation.h b/apple/src/StreamInformation.h index 4010aa4..59657f1 100644 --- a/apple/src/StreamInformation.h +++ b/apple/src/StreamInformation.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2020 Taner Sener + * Copyright (c) 2018-2021 Taner Sener * * This file is part of FFmpegKit. * diff --git a/apple/src/StreamInformation.m b/apple/src/StreamInformation.m index 77ee4b0..861d1e7 100644 --- a/apple/src/StreamInformation.m +++ b/apple/src/StreamInformation.m @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2020 Taner Sener + * Copyright (c) 2018-2021 Taner Sener * * This file is part of FFmpegKit. * diff --git a/apple/src/ffmpegkit_exception.h b/apple/src/ffmpegkit_exception.h index d58b77e..daf3acc 100644 --- a/apple/src/ffmpegkit_exception.h +++ b/apple/src/ffmpegkit_exception.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2020 Taner Sener + * Copyright (c) 2018-2021 Taner Sener * * This file is part of FFmpegKit. * diff --git a/apple/src/ffmpegkit_exception.m b/apple/src/ffmpegkit_exception.m index a1209de..a13a9ab 100644 --- a/apple/src/ffmpegkit_exception.m +++ b/apple/src/ffmpegkit_exception.m @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2020 Taner Sener + * Copyright (c) 2018-2021 Taner Sener * * This file is part of FFmpegKit. * diff --git a/apple/src/fftools_cmdutils.c b/apple/src/fftools_cmdutils.c index 6f1013b..c1f6325 100644 --- a/apple/src/fftools_cmdutils.c +++ b/apple/src/fftools_cmdutils.c @@ -30,17 +30,17 @@ * * CHANGES 08.2018 * -------------------------------------------------------- - * - fftools_ prefix added to file name and parent header + * - fftools_ prefix added to the file name and parent header * * CHANGES 07.2018 * -------------------------------------------------------- * - Unused headers removed - * - Parentheses placed around assignments in condition to prevent -Wparentheses warning + * - Parentheses placed around assignments in conditions to prevent -Wparentheses warning * - exit_program updated with longjmp, disabling exit * - longjmp_value added to store exit code * - (optindex < argc) validation added before accessing argv[optindex] inside split_commandline() * and parse_options() - * - All av_log_set_callback invocations updated to set ffmpegkit_log_callback_function from Config.m. Unused + * - All av_log_set_callback invocations updated to set ffmpegkit_log_callback_function from FFmpegKitConfig.m. Unused * log_callback_help and log_callback_help methods removed * - (idx + 1 < argc) validation added in parse_loglevel() */ @@ -80,9 +80,6 @@ #include "libavutil/ffversion.h" #include "libavutil/version.h" #include "fftools_cmdutils.h" -#if CONFIG_NETWORK -#include "libavformat/network.h" -#endif #if HAVE_SYS_RESOURCE_H #include #include @@ -149,7 +146,7 @@ void log_callback_report(void *ptr, int level, const char *fmt, va_list vl) void init_dynload(void) { -#if HAVE_SETDLLDIRECTORY +#if HAVE_SETDLLDIRECTORY && defined(_WIN32) /* Calling SetDllDirectory with the empty string (but not NULL) removes the * current working directory from the DLL search path as a security pre-caution. */ SetDllDirectory(""); @@ -215,7 +212,7 @@ void show_help_options(const OptionDef *options, const char *msg, int req_flags, first = 1; for (po = options; po->name; po++) { - char buf[64]; + char buf[128]; if (((po->flags & req_flags) != req_flags) || (alt_flags && !(po->flags & alt_flags)) || @@ -238,13 +235,14 @@ void show_help_options(const OptionDef *options, const char *msg, int req_flags, void show_help_children(const AVClass *class, int flags) { - const AVClass *child = NULL; + void *iter = NULL; + const AVClass *child; if (class->option) { av_opt_show2(&class, NULL, flags, 0); av_log(NULL, AV_LOG_STDERR, "\n"); } - while ((child = av_opt_child_class_next(class, child))) + while ((child = av_opt_child_class_iterate(class, &iter))) show_help_children(child, flags); } @@ -358,7 +356,7 @@ static int write_option(void *optctx, const OptionDef *po, const char *opt, } else if (po->flags & OPT_BOOL || po->flags & OPT_INT) { *(int *)dst = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX); } else if (po->flags & OPT_INT64) { - *(int64_t *)dst = parse_number_or_die(opt, arg, OPT_INT64, INT64_MIN, INT64_MAX); + *(int64_t *)dst = parse_number_or_die(opt, arg, OPT_INT64, INT64_MIN, (double)INT64_MAX); } else if (po->flags & OPT_TIME) { *(int64_t *)dst = parse_time_or_die(opt, arg, 1); } else if (po->flags & OPT_FLOAT) { @@ -507,7 +505,7 @@ int locate_option(int argc, char **argv, const OptionDef *options, return 0; } -void dump_argument(const char *a) +static void dump_argument(const char *a) { const unsigned char *p; @@ -1017,7 +1015,7 @@ static void expand_filename_template(AVBPrint *bp, const char *template, } } -int init_report(const char *env) +static int init_report(const char *env) { char *filename_template = NULL; char *key, *val; @@ -1461,10 +1459,6 @@ static void print_codec(const AVCodec *c) av_log(NULL, AV_LOG_STDERR, "threads "); if (c->capabilities & AV_CODEC_CAP_AVOID_PROBING) av_log(NULL, AV_LOG_STDERR, "avoidprobe "); - if (c->capabilities & AV_CODEC_CAP_INTRA_ONLY) - av_log(NULL, AV_LOG_STDERR, "intraonly "); - if (c->capabilities & AV_CODEC_CAP_LOSSLESS) - av_log(NULL, AV_LOG_STDERR, "lossless "); if (c->capabilities & AV_CODEC_CAP_HARDWARE) av_log(NULL, AV_LOG_STDERR, "hardware "); if (c->capabilities & AV_CODEC_CAP_HYBRID) @@ -1538,13 +1532,14 @@ static char get_media_type_char(enum AVMediaType type) } } -static const AVCodec *next_codec_for_id(enum AVCodecID id, const AVCodec *prev, +static const AVCodec *next_codec_for_id(enum AVCodecID id, void **iter, int encoder) { - while ((prev = av_codec_next(prev))) { - if (prev->id == id && - (encoder ? av_codec_is_encoder(prev) : av_codec_is_decoder(prev))) - return prev; + const AVCodec *c; + while ((c = av_codec_iterate(iter))) { + if (c->id == id && + (encoder ? av_codec_is_encoder(c) : av_codec_is_decoder(c))) + return c; } return NULL; } @@ -1581,11 +1576,12 @@ static unsigned get_codecs_sorted(const AVCodecDescriptor ***rcodecs) static void print_codecs_for_id(enum AVCodecID id, int encoder) { - const AVCodec *codec = NULL; + void *iter = NULL; + const AVCodec *codec; av_log(NULL, AV_LOG_STDERR, " (%s: ", encoder ? "encoders" : "decoders"); - while ((codec = next_codec_for_id(id, codec, encoder))) + while ((codec = next_codec_for_id(id, &iter, encoder))) av_log(NULL, AV_LOG_STDERR, "%s ", codec->name); av_log(NULL, AV_LOG_STDERR, ")"); @@ -1608,7 +1604,8 @@ int show_codecs(void *optctx, const char *opt, const char *arg) " -------\n"); for (i = 0; i < nb_codecs; i++) { const AVCodecDescriptor *desc = codecs[i]; - const AVCodec *codec = NULL; + const AVCodec *codec; + void *iter = NULL; if (strstr(desc->name, "_deprecated")) continue; @@ -1626,14 +1623,14 @@ int show_codecs(void *optctx, const char *opt, const char *arg) /* print decoders/encoders when there's more than one or their * names are different from codec name */ - while ((codec = next_codec_for_id(desc->id, codec, 0))) { + while ((codec = next_codec_for_id(desc->id, &iter, 0))) { if (strcmp(codec->name, desc->name)) { print_codecs_for_id(desc->id, 0); break; } } - codec = NULL; - while ((codec = next_codec_for_id(desc->id, codec, 1))) { + iter = NULL; + while ((codec = next_codec_for_id(desc->id, &iter, 1))) { if (strcmp(codec->name, desc->name)) { print_codecs_for_id(desc->id, 1); break; @@ -1664,9 +1661,10 @@ static void print_codecs(int encoder) encoder ? "Encoders" : "Decoders"); for (i = 0; i < nb_codecs; i++) { const AVCodecDescriptor *desc = codecs[i]; - const AVCodec *codec = NULL; + const AVCodec *codec; + void *iter = NULL; - while ((codec = next_codec_for_id(desc->id, codec, encoder))) { + while ((codec = next_codec_for_id(desc->id, &iter, encoder))) { av_log(NULL, AV_LOG_STDERR, " %c", get_media_type_char(desc->type)); av_log(NULL, AV_LOG_STDERR, (codec->capabilities & AV_CODEC_CAP_FRAME_THREADS) ? "F" : "."); av_log(NULL, AV_LOG_STDERR, (codec->capabilities & AV_CODEC_CAP_SLICE_THREADS) ? "S" : "."); @@ -1871,9 +1869,10 @@ static void show_help_codec(const char *name, int encoder) if (codec) print_codec(codec); else if ((desc = avcodec_descriptor_get_by_name(name))) { + void *iter = NULL; int printed = 0; - while ((codec = next_codec_for_id(desc->id, codec, encoder))) { + while ((codec = next_codec_for_id(desc->id, &iter, encoder))) { printed = 1; print_codec(codec); } @@ -1908,6 +1907,24 @@ static void show_help_demuxer(const char *name) show_help_children(fmt->priv_class, AV_OPT_FLAG_DECODING_PARAM); } +static void show_help_protocol(const char *name) +{ + const AVClass *proto_class; + + if (!name) { + av_log(NULL, AV_LOG_ERROR, "No protocol name specified.\n"); + return; + } + + proto_class = avio_protocol_get_class(name); + if (!proto_class) { + av_log(NULL, AV_LOG_ERROR, "Unknown protocol '%s'.\n", name); + return; + } + + show_help_children(proto_class, AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_ENCODING_PARAM); +} + static void show_help_muxer(const char *name) { const AVCodecDescriptor *desc; @@ -2041,6 +2058,8 @@ int show_help(void *optctx, const char *opt, const char *arg) show_help_demuxer(par); } else if (!strcmp(topic, "muxer")) { show_help_muxer(par); + } else if (!strcmp(topic, "protocol")) { + show_help_protocol(par); #if CONFIG_AVFILTER } else if (!strcmp(topic, "filter")) { show_help_filter(par); @@ -2084,7 +2103,7 @@ FILE *get_preset_file(char *filename, size_t filename_size, av_strlcpy(filename, preset_name, filename_size); f = fopen(filename, "r"); } else { -#if HAVE_GETMODULEHANDLE +#if HAVE_GETMODULEHANDLE && defined(_WIN32) char datadir[MAX_PATH], *ls; base[2] = NULL; @@ -2237,7 +2256,7 @@ double get_rotation(AVStream *st) if (fabs(theta - 90*round(theta/90)) > 2) av_log(NULL, AV_LOG_WARNING, "Odd rotation angle.\n" "If you want to help, upload a sample " - "of this file to ftp://upload.ffmpeg.org/incoming/ " + "of this file to https://streams.videolan.org/upload/ " "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)"); return theta; @@ -2334,7 +2353,7 @@ int show_sources(void *optctx, const char *opt, const char *arg) int ret = 0; int error_level = av_log_get_level(); - av_log_set_level(AV_LOG_ERROR); + av_log_set_level(AV_LOG_WARNING); if ((ret = show_sinks_sources_parse_arg(arg, &dev, &opts)) < 0) goto fail; @@ -2372,7 +2391,7 @@ int show_sinks(void *optctx, const char *opt, const char *arg) int ret = 0; int error_level = av_log_get_level(); - av_log_set_level(AV_LOG_ERROR); + av_log_set_level(AV_LOG_WARNING); if ((ret = show_sinks_sources_parse_arg(arg, &dev, &opts)) < 0) goto fail; diff --git a/apple/src/fftools_ffmpeg.c b/apple/src/fftools_ffmpeg.c index 71fe688..2d2ca9f 100644 --- a/apple/src/fftools_ffmpeg.c +++ b/apple/src/fftools_ffmpeg.c @@ -164,6 +164,7 @@ __thread int nb_frames_dup = 0; __thread unsigned dup_warning = 1000; __thread int nb_frames_drop = 0; __thread int64_t decode_error_stat[2]; +__thread unsigned nb_output_dumped = 0; __thread int want_sdp = 1; @@ -185,6 +186,11 @@ __thread int nb_output_files = 0; __thread FilterGraph **filtergraphs; __thread int nb_filtergraphs; +__thread int64_t last_time = -1; +__thread int64_t keyboard_last_time = 0; +__thread int first_report = 1; +__thread int qp_histogram[52]; + void (*report_callback)(int, float, float, int64_t, int, double, double) = NULL; extern __thread int file_overwrite; @@ -199,6 +205,7 @@ extern int opt_progress(void *optctx, const char *opt, const char *arg); extern int opt_target(void *optctx, const char *opt, const char *arg); extern int opt_vsync(void *optctx, const char *opt, const char *arg); extern int opt_abort_on(void *optctx, const char *opt, const char *arg); +extern int opt_stats_period(void *optctx, const char *opt, const char *arg); extern int opt_qscale(void *optctx, const char *opt, const char *arg); extern int opt_profile(void *optctx, const char *opt, const char *arg); extern int opt_filter_complex(void *optctx, const char *opt, const char *arg); @@ -268,7 +275,7 @@ static int sub2video_get_blank_frame(InputStream *ist) ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w; ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h; ist->sub2video.frame->format = AV_PIX_FMT_RGB32; - if ((ret = av_frame_get_buffer(frame, 32)) < 0) + if ((ret = av_frame_get_buffer(frame, 0)) < 0) return ret; memset(frame->data[0], 0, frame->height * frame->linesize[0]); return 0; @@ -323,7 +330,7 @@ static void sub2video_push_ref(InputStream *ist, int64_t pts) } } -void sub2video_update(InputStream *ist, AVSubtitle *sub) +void sub2video_update(InputStream *ist, int64_t heartbeat_pts, AVSubtitle *sub) { AVFrame *frame = ist->sub2video.frame; int8_t *dst; @@ -340,7 +347,12 @@ void sub2video_update(InputStream *ist, AVSubtitle *sub) AV_TIME_BASE_Q, ist->st->time_base); num_rects = sub->num_rects; } else { - pts = ist->sub2video.end_pts; + /* If we are initializing the system, utilize current heartbeat + PTS as the start time, and show until the following subpicture + is received. Otherwise, utilize the previous subpicture's end time + as the fall-back value. */ + pts = ist->sub2video.initialize ? + heartbeat_pts : ist->sub2video.end_pts; end_pts = INT64_MAX; num_rects = 0; } @@ -355,6 +367,7 @@ void sub2video_update(InputStream *ist, AVSubtitle *sub) sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]); sub2video_push_ref(ist, pts); ist->sub2video.end_pts = end_pts; + ist->sub2video.initialize = 0; } static void sub2video_heartbeat(InputStream *ist, int64_t pts) @@ -377,9 +390,11 @@ static void sub2video_heartbeat(InputStream *ist, int64_t pts) /* do not send the heartbeat frame if the subtitle is already ahead */ if (pts2 <= ist2->sub2video.last_pts) continue; - if (pts2 >= ist2->sub2video.end_pts || - (!ist2->sub2video.frame->data[0] && ist2->sub2video.end_pts < INT64_MAX)) - sub2video_update(ist2, NULL); + if (pts2 >= ist2->sub2video.end_pts || ist2->sub2video.initialize) + /* if we have hit the end of the current displayed subpicture, + or if we need to initialize the system, update the + overlayed subpicture and its start/end times */ + sub2video_update(ist2, pts2 + 1, NULL); for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++) nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter); if (nb_reqs) @@ -393,7 +408,7 @@ static void sub2video_flush(InputStream *ist) int ret; if (ist->sub2video.end_pts < INT64_MAX) - sub2video_update(ist, NULL); + sub2video_update(ist, INT64_MAX, NULL); for (i = 0; i < ist->nb_filters; i++) { ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL); if (ret != AVERROR_EOF && ret < 0) @@ -422,6 +437,7 @@ static volatile int received_nb_signals = 0; __thread atomic_int transcode_init_done = ATOMIC_VAR_INIT(0); __thread volatile int ffmpeg_exited = 0; __thread volatile int main_ffmpeg_return_code = 0; +__thread int64_t copy_ts_first_pts = AV_NOPTS_VALUE; extern __thread volatile int longjmp_value; static void @@ -471,8 +487,30 @@ static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType) } #endif +#ifdef __linux__ +#define SIGNAL(sig, func) \ + do { \ + action.sa_handler = func; \ + sigaction(sig, &action, NULL); \ + } while (0) +#else +#define SIGNAL(sig, func) \ + signal(sig, func) +#endif + void term_init(void) { +#if defined __linux__ + struct sigaction action = {0}; + action.sa_handler = sigterm_handler; + + /* block other interrupts while processing this one */ + sigfillset(&action.sa_mask); + + /* restart interruptible functions (i.e. don't fail with EINTR) */ + action.sa_flags = SA_RESTART; +#endif + #if HAVE_TERMIOS_H if (!run_as_daemon && stdin_interaction) { struct termios tty; @@ -592,32 +630,38 @@ static void ffmpeg_cleanup(int ret) FilterGraph *fg = filtergraphs[i]; avfilter_graph_free(&fg->graph); for (j = 0; j < fg->nb_inputs; j++) { - while (av_fifo_size(fg->inputs[j]->frame_queue)) { + InputFilter *ifilter = fg->inputs[j]; + struct InputStream *ist = ifilter->ist; + + while (av_fifo_size(ifilter->frame_queue)) { AVFrame *frame; - av_fifo_generic_read(fg->inputs[j]->frame_queue, &frame, + av_fifo_generic_read(ifilter->frame_queue, &frame, sizeof(frame), NULL); av_frame_free(&frame); } - av_fifo_freep(&fg->inputs[j]->frame_queue); - if (fg->inputs[j]->ist->sub2video.sub_queue) { - while (av_fifo_size(fg->inputs[j]->ist->sub2video.sub_queue)) { + av_fifo_freep(&ifilter->frame_queue); + if (ist->sub2video.sub_queue) { + while (av_fifo_size(ist->sub2video.sub_queue)) { AVSubtitle sub; - av_fifo_generic_read(fg->inputs[j]->ist->sub2video.sub_queue, + av_fifo_generic_read(ist->sub2video.sub_queue, &sub, sizeof(sub), NULL); avsubtitle_free(&sub); } - av_fifo_freep(&fg->inputs[j]->ist->sub2video.sub_queue); + av_fifo_freep(&ist->sub2video.sub_queue); } - av_buffer_unref(&fg->inputs[j]->hw_frames_ctx); - av_freep(&fg->inputs[j]->name); + av_buffer_unref(&ifilter->hw_frames_ctx); + av_freep(&ifilter->name); av_freep(&fg->inputs[j]); } av_freep(&fg->inputs); for (j = 0; j < fg->nb_outputs; j++) { - av_freep(&fg->outputs[j]->name); - av_freep(&fg->outputs[j]->formats); - av_freep(&fg->outputs[j]->channel_layouts); - av_freep(&fg->outputs[j]->sample_rates); + OutputFilter *ofilter = fg->outputs[j]; + + avfilter_inout_free(&ofilter->out_tmp); + av_freep(&ofilter->name); + av_freep(&ofilter->formats); + av_freep(&ofilter->channel_layouts); + av_freep(&ofilter->sample_rates); av_freep(&fg->outputs[j]); } av_freep(&fg->outputs); @@ -649,9 +693,7 @@ static void ffmpeg_cleanup(int ret) if (!ost) continue; - for (j = 0; j < ost->nb_bitstream_filters; j++) - av_bsf_free(&ost->bsf_ctx[j]); - av_freep(&ost->bsf_ctx); + av_bsf_free(&ost->bsf_ctx); av_frame_free(&ost->filtered_frame); av_frame_free(&ost->last_frame); @@ -727,7 +769,7 @@ static void ffmpeg_cleanup(int ret) av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n", (int) received_sigterm); } else if (cancelRequested(executionId)) { - av_log(NULL, AV_LOG_INFO, "Exiting normally, received cancel signal.\n"); + av_log(NULL, AV_LOG_INFO, "Exiting normally, received cancel request.\n"); } else if (ret && atomic_load(&transcode_init_done)) { av_log(NULL, AV_LOG_INFO, "Conversion failed!\n"); } @@ -814,8 +856,13 @@ static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int u AVPacket tmp_pkt = {0}; /* the muxer is not initialized yet, buffer the packet */ if (!av_fifo_space(ost->muxing_queue)) { - int new_size = FFMIN(2 * av_fifo_size(ost->muxing_queue), - ost->max_muxing_queue_size); + unsigned int are_we_over_size = + (ost->muxing_queue_data_size + pkt->size) > ost->muxing_queue_data_threshold; + int new_size = are_we_over_size ? + FFMIN(2 * av_fifo_size(ost->muxing_queue), + ost->max_muxing_queue_size) : + 2 * av_fifo_size(ost->muxing_queue); + if (new_size <= av_fifo_size(ost->muxing_queue)) { av_log(NULL, AV_LOG_ERROR, "Too many packets buffered for output stream %d:%d.\n", @@ -830,6 +877,7 @@ static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int u if (ret < 0) exit_program(1); av_packet_move_ref(&tmp_pkt, pkt); + ost->muxing_queue_data_size += tmp_pkt.size; av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL); return; } @@ -881,6 +929,8 @@ static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int u int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT); if (pkt->dts < max) { int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG; + if (exit_on_error) + loglevel = AV_LOG_ERROR; av_log(s, loglevel, "Non-monotonous DTS in output stream " "%d:%d; previous: %"PRId64", current: %"PRId64"; ", ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts); @@ -950,40 +1000,15 @@ static void output_packet(OutputFile *of, AVPacket *pkt, { int ret = 0; - /* apply the output bitstream filters, if any */ - if (ost->nb_bitstream_filters) { - int idx; - - ret = av_bsf_send_packet(ost->bsf_ctx[0], eof ? NULL : pkt); - if (ret < 0) - goto finish; - - eof = 0; - idx = 1; - while (idx) { - /* get a packet from the previous filter up the chain */ - ret = av_bsf_receive_packet(ost->bsf_ctx[idx - 1], pkt); - if (ret == AVERROR(EAGAIN)) { - ret = 0; - idx--; - continue; - } else if (ret == AVERROR_EOF) { - eof = 1; - } else if (ret < 0) - goto finish; - - /* send it to the next filter down the chain or to the muxer */ - if (idx < ost->nb_bitstream_filters) { - ret = av_bsf_send_packet(ost->bsf_ctx[idx], eof ? NULL : pkt); + /* apply the output bitstream filters */ + if (ost->bsf_ctx) { + ret = av_bsf_send_packet(ost->bsf_ctx, eof ? NULL : pkt); if (ret < 0) goto finish; - idx++; - eof = 0; - } else if (eof) - goto finish; - else + while ((ret = av_bsf_receive_packet(ost->bsf_ctx, pkt)) >= 0) write_packet(of, pkt, ost, 0); - } + if (ret == AVERROR(EAGAIN)) + ret = 0; } else if (!eof) write_packet(of, pkt, ost, 0); @@ -1009,6 +1034,71 @@ static int check_recording_time(OutputStream *ost) return 1; } +static double adjust_frame_pts_to_encoder_tb(OutputFile *of, OutputStream *ost, AVFrame *frame) +{ + double float_pts = AV_NOPTS_VALUE; // this is identical to frame.pts but with higher precision + AVCodecContext *enc = ost->enc_ctx; + if (!frame || frame->pts == AV_NOPTS_VALUE || + !enc || !ost->filter || !ost->filter->graph->graph) + goto early_exit; + + { + AVFilterContext *filter = ost->filter->filter; + + int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time; + AVRational filter_tb = av_buffersink_get_time_base(filter); + AVRational tb = enc->time_base; + int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16); + + tb.den <<= extra_bits; + float_pts = + av_rescale_q(frame->pts, filter_tb, tb) - + av_rescale_q(start_time, AV_TIME_BASE_Q, tb); + float_pts /= 1 << extra_bits; + // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers + float_pts += FFSIGN(float_pts) * 1.0 / (1<<17); + + frame->pts = + av_rescale_q(frame->pts, filter_tb, enc->time_base) - + av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base); + } + +early_exit: + + if (debug_ts) { + av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n", + frame ? av_ts2str(frame->pts) : "NULL", + frame ? av_ts2timestr(frame->pts, &enc->time_base) : "NULL", + float_pts, + enc ? enc->time_base.num : -1, + enc ? enc->time_base.den : -1); + } + + return float_pts; +} + +static int init_output_stream(OutputStream *ost, AVFrame *frame, char *error, int error_len); + +static int init_output_stream_wrapper(OutputStream *ost, AVFrame *frame, unsigned int fatal) +{ + int ret = AVERROR_BUG; + char error[1024] = {0}; + + if (ost->initialized) + return 0; + + ret = init_output_stream(ost, frame, error, sizeof(error)); + if (ret < 0) { + av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n", + ost->file_index, ost->index, error); + + if (fatal) + exit_program(1); + } + + return ret; +} + static void do_audio_out(OutputFile *of, OutputStream *ost, AVFrame *frame) { @@ -1020,6 +1110,8 @@ static void do_audio_out(OutputFile *of, OutputStream *ost, pkt.data = NULL; pkt.size = 0; + adjust_frame_pts_to_encoder_tb(of, ost, frame); + if (!check_recording_time(ost)) return; @@ -1154,21 +1246,23 @@ static void do_subtitle_out(OutputFile *of, static void do_video_out(OutputFile *of, OutputStream *ost, - AVFrame *next_picture, - double sync_ipts) + AVFrame *next_picture) { int ret, format_video_sync; AVPacket pkt; AVCodecContext *enc = ost->enc_ctx; - AVCodecParameters *mux_par = ost->st->codecpar; AVRational frame_rate; int nb_frames, nb0_frames, i; double delta, delta0; double duration = 0; + double sync_ipts = AV_NOPTS_VALUE; int frame_size = 0; InputStream *ist = NULL; AVFilterContext *filter = ost->filter->filter; + init_output_stream_wrapper(ost, next_picture, 1); + sync_ipts = adjust_frame_pts_to_encoder_tb(of, ost, next_picture); + if (ost->source_index >= 0) ist = input_streams[ost->source_index]; @@ -1238,7 +1332,7 @@ static void do_video_out(OutputFile *of, av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0)); delta = duration; delta0 = 0; - ost->sync_opts = lrint(sync_ipts); + ost->sync_opts = llrint(sync_ipts); } case VSYNC_CFR: // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c @@ -1249,18 +1343,18 @@ static void do_video_out(OutputFile *of, else if (delta > 1.1) { nb_frames = lrintf(delta); if (delta0 > 1.1) - nb0_frames = lrintf(delta0 - 0.6); + nb0_frames = llrintf(delta0 - 0.6); } break; case VSYNC_VFR: if (delta <= -0.6) nb_frames = 0; else if (delta > 0.6) - ost->sync_opts = lrint(sync_ipts); + ost->sync_opts = llrint(sync_ipts); break; case VSYNC_DROP: case VSYNC_PASSTHROUGH: - ost->sync_opts = lrint(sync_ipts); + ost->sync_opts = llrint(sync_ipts); break; default: av_assert0(0); @@ -1318,115 +1412,104 @@ static void do_video_out(OutputFile *of, if (!check_recording_time(ost)) return; - if (enc->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) && - ost->top_field_first >= 0) - in_picture->top_field_first = !!ost->top_field_first; + in_picture->quality = enc->global_quality; + in_picture->pict_type = 0; - if (in_picture->interlaced_frame) { - if (enc->codec->id == AV_CODEC_ID_MJPEG) - mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB; - else - mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT; - } else - mux_par->field_order = AV_FIELD_PROGRESSIVE; + if (ost->forced_kf_ref_pts == AV_NOPTS_VALUE && + in_picture->pts != AV_NOPTS_VALUE) + ost->forced_kf_ref_pts = in_picture->pts; - in_picture->quality = enc->global_quality; - in_picture->pict_type = 0; - - if (ost->forced_kf_ref_pts == AV_NOPTS_VALUE && - in_picture->pts != AV_NOPTS_VALUE) - ost->forced_kf_ref_pts = in_picture->pts; - - pts_time = in_picture->pts != AV_NOPTS_VALUE ? - (in_picture->pts - ost->forced_kf_ref_pts) * av_q2d(enc->time_base) : NAN; - if (ost->forced_kf_index < ost->forced_kf_count && - in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) { - ost->forced_kf_index++; - forced_keyframe = 1; - } else if (ost->forced_keyframes_pexpr) { - double res; - ost->forced_keyframes_expr_const_values[FKF_T] = pts_time; - res = av_expr_eval(ost->forced_keyframes_pexpr, - ost->forced_keyframes_expr_const_values, NULL); - ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n", - ost->forced_keyframes_expr_const_values[FKF_N], - ost->forced_keyframes_expr_const_values[FKF_N_FORCED], - ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N], - ost->forced_keyframes_expr_const_values[FKF_T], - ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T], - res); - if (res) { - forced_keyframe = 1; - ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = - ost->forced_keyframes_expr_const_values[FKF_N]; - ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = - ost->forced_keyframes_expr_const_values[FKF_T]; - ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1; - } - - ost->forced_keyframes_expr_const_values[FKF_N] += 1; - } else if ( ost->forced_keyframes - && !strncmp(ost->forced_keyframes, "source", 6) - && in_picture->key_frame==1) { + pts_time = in_picture->pts != AV_NOPTS_VALUE ? + (in_picture->pts - ost->forced_kf_ref_pts) * av_q2d(enc->time_base) : NAN; + if (ost->forced_kf_index < ost->forced_kf_count && + in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) { + ost->forced_kf_index++; + forced_keyframe = 1; + } else if (ost->forced_keyframes_pexpr) { + double res; + ost->forced_keyframes_expr_const_values[FKF_T] = pts_time; + res = av_expr_eval(ost->forced_keyframes_pexpr, + ost->forced_keyframes_expr_const_values, NULL); + ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n", + ost->forced_keyframes_expr_const_values[FKF_N], + ost->forced_keyframes_expr_const_values[FKF_N_FORCED], + ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N], + ost->forced_keyframes_expr_const_values[FKF_T], + ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T], + res); + if (res) { forced_keyframe = 1; + ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = + ost->forced_keyframes_expr_const_values[FKF_N]; + ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = + ost->forced_keyframes_expr_const_values[FKF_T]; + ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1; } - if (forced_keyframe) { - in_picture->pict_type = AV_PICTURE_TYPE_I; - av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time); - } + ost->forced_keyframes_expr_const_values[FKF_N] += 1; + } else if ( ost->forced_keyframes + && !strncmp(ost->forced_keyframes, "source", 6) + && in_picture->key_frame==1 + && !i) { + forced_keyframe = 1; + } - update_benchmark(NULL); - if (debug_ts) { - av_log(NULL, AV_LOG_INFO, "encoder <- type:video " - "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n", - av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base), - enc->time_base.num, enc->time_base.den); - } + if (forced_keyframe) { + in_picture->pict_type = AV_PICTURE_TYPE_I; + av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time); + } - ost->frames_encoded++; + update_benchmark(NULL); + if (debug_ts) { + av_log(NULL, AV_LOG_INFO, "encoder <- type:video " + "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n", + av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base), + enc->time_base.num, enc->time_base.den); + } - ret = avcodec_send_frame(enc, in_picture); + ost->frames_encoded++; + + ret = avcodec_send_frame(enc, in_picture); + if (ret < 0) + goto error; + // Make sure Closed Captions will not be duplicated + av_frame_remove_side_data(in_picture, AV_FRAME_DATA_A53_CC); + + while (1) { + ret = avcodec_receive_packet(enc, &pkt); + update_benchmark("encode_video %d.%d", ost->file_index, ost->index); + if (ret == AVERROR(EAGAIN)) + break; if (ret < 0) goto error; - // Make sure Closed Captions will not be duplicated - av_frame_remove_side_data(in_picture, AV_FRAME_DATA_A53_CC); - while (1) { - ret = avcodec_receive_packet(enc, &pkt); - update_benchmark("encode_video %d.%d", ost->file_index, ost->index); - if (ret == AVERROR(EAGAIN)) - break; - if (ret < 0) - goto error; - - if (debug_ts) { - av_log(NULL, AV_LOG_INFO, "encoder -> type:video " - "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n", - av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base), - av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base)); - } - - if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY)) - pkt.pts = ost->sync_opts; - - av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase); - - if (debug_ts) { - av_log(NULL, AV_LOG_INFO, "encoder -> type:video " - "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n", - av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->mux_timebase), - av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->mux_timebase)); - } - - frame_size = pkt.size; - output_packet(of, &pkt, ost, 0); - - /* if two pass, output log */ - if (ost->logfile && enc->stats_out) { - fprintf(ost->logfile, "%s", enc->stats_out); - } + if (debug_ts) { + av_log(NULL, AV_LOG_INFO, "encoder -> type:video " + "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n", + av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base), + av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base)); } + + if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY)) + pkt.pts = ost->sync_opts; + + av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase); + + if (debug_ts) { + av_log(NULL, AV_LOG_INFO, "encoder -> type:video " + "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n", + av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->mux_timebase), + av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->mux_timebase)); + } + + frame_size = pkt.size; + output_packet(of, &pkt, ost, 0); + + /* if two pass, output log */ + if (ost->logfile && enc->stats_out) { + fprintf(ost->logfile, "%s", enc->stats_out); + } + } ost->sync_opts++; /* * For video, number of frames in == number of packets out. @@ -1501,8 +1584,6 @@ static void do_video_stats(OutputStream *ost, int frame_size) } } -static int init_output_stream(OutputStream *ost, char *error, int error_len); - static void finish_output_stream(OutputStream *ost) { OutputFile *of = output_files[ost->file_index]; @@ -1539,15 +1620,17 @@ static int reap_filters(int flush) continue; filter = ost->filter->filter; - if (!ost->initialized) { - char error[1024] = ""; - ret = init_output_stream(ost, error, sizeof(error)); - if (ret < 0) { - av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n", - ost->file_index, ost->index, error); - exit_program(1); - } - } + /* + * Unlike video, with audio the audio frame size matters. + * Currently we are fully reliant on the lavfi filter chain to + * do the buffering deed for us, and thus the frame size parameter + * needs to be set accordingly. Where does one get the required + * frame size? From the initialized AVCodecContext of an audio + * encoder. Thus, if we have gotten to an audio stream, initialize + * the encoder earlier than receiving the first AVFrame. + */ + if (av_buffersink_get_type(filter) == AVMEDIA_TYPE_AUDIO) + init_output_stream_wrapper(ost, NULL, 1); if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) { return AVERROR(ENOMEM); @@ -1555,7 +1638,6 @@ static int reap_filters(int flush) filtered_frame = ost->filtered_frame; while (1) { - double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision ret = av_buffersink_get_frame_flags(filter, filtered_frame, AV_BUFFERSINK_FLAG_NO_REQUEST); if (ret < 0) { @@ -1564,7 +1646,7 @@ static int reap_filters(int flush) "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret)); } else if (flush && ret == AVERROR_EOF) { if (av_buffersink_get_type(filter) == AVMEDIA_TYPE_VIDEO) - do_video_out(of, ost, NULL, AV_NOPTS_VALUE); + do_video_out(of, ost, NULL); } break; } @@ -1572,38 +1654,13 @@ static int reap_filters(int flush) av_frame_unref(filtered_frame); continue; } - if (filtered_frame->pts != AV_NOPTS_VALUE) { - int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time; - AVRational filter_tb = av_buffersink_get_time_base(filter); - AVRational tb = enc->time_base; - int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16); - - tb.den <<= extra_bits; - float_pts = - av_rescale_q(filtered_frame->pts, filter_tb, tb) - - av_rescale_q(start_time, AV_TIME_BASE_Q, tb); - float_pts /= 1 << extra_bits; - // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers - float_pts += FFSIGN(float_pts) * 1.0 / (1<<17); - - filtered_frame->pts = - av_rescale_q(filtered_frame->pts, filter_tb, enc->time_base) - - av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base); - } switch (av_buffersink_get_type(filter)) { case AVMEDIA_TYPE_VIDEO: if (!ost->frame_aspect_ratio.num) enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio; - if (debug_ts) { - av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n", - av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base), - float_pts, - enc->time_base.num, enc->time_base.den); - } - - do_video_out(of, ost, filtered_frame, float_pts); + do_video_out(of, ost, filtered_frame); break; case AVMEDIA_TYPE_AUDIO: if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) && @@ -1743,13 +1800,11 @@ static void print_final_stats(int64_t total_size) } } - static void forward_report(int is_last_report, int64_t timer_start, int64_t cur_time) { AVFormatContext *oc = NULL; AVCodecContext *enc = NULL; OutputStream *ost = NULL; - static int64_t last_time = -1; int64_t pts = INT64_MIN + 1; int vid, i; @@ -1761,22 +1816,11 @@ static void forward_report(int is_last_report, int64_t timer_start, int64_t cur_ double bitrate = 0.0; double speed = 0.0; - // 1. calculate operation duration - if (!is_last_report) { - if (last_time == -1) { - last_time = cur_time; - return; - } - if ((cur_time - last_time) < 500000) { - return; - } - last_time = cur_time; - } float t = (cur_time-timer_start) / 1000000.0; oc = output_files[0]->ctx; - // 2. calculate size + // 1. calculate size total_size = avio_size(oc->pb); if (total_size <= 0) { total_size = avio_tell(oc->pb); @@ -1789,20 +1833,20 @@ static void forward_report(int is_last_report, int64_t timer_start, int64_t cur_ if (!ost->stream_copy) { - // 3. extract quality + // 2. extract quality quality = ost->quality / (float) FF_QP2LAMBDA; } if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) { - // 4. extract frame number + // 3. extract frame number frame_number = ost->frame_number; - // 5. calculate fps + // 4. calculate fps fps = t > 1 ? frame_number / t : 0; } - // 6. calculate time + // 5. calculate time if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE) pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st), ost->st->time_base, AV_TIME_BASE_Q)); @@ -1810,10 +1854,10 @@ static void forward_report(int is_last_report, int64_t timer_start, int64_t cur_ vid = 1; } - // 7. calculate time, with microseconds to milliseconds conversion + // 6. calculate time, with microseconds to milliseconds conversion seconds = FFABS(pts) / 1000; - // 8. calculating kbit/s value + // 7. calculating kbit/s value bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1; // 9. calculate processing speed = processed stream duration/operation duration @@ -1836,8 +1880,6 @@ static void print_report(int is_last_report, int64_t timer_start, int64_t cur_ti double bitrate; double speed; int64_t pts = INT64_MIN + 1; - static int64_t last_time = -1; - static int qp_histogram[52]; int hours, mins, secs, us; const char *hours_sign; int ret; @@ -1846,9 +1888,9 @@ static void print_report(int is_last_report, int64_t timer_start, int64_t cur_ti if (!is_last_report) { if (last_time == -1) { last_time = cur_time; - return; } - if ((cur_time - last_time) < 500000) + if (((cur_time - last_time) < stats_period && !first_report) || + (first_report && nb_output_dumped < nb_output_files)) return; last_time = cur_time; } @@ -1936,9 +1978,17 @@ static void print_report(int is_last_report, int64_t timer_start, int64_t cur_ti vid = 1; } /* compute min output value */ - if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE) + if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE) { pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st), ost->st->time_base, AV_TIME_BASE_Q)); + if (copy_ts) { + if (copy_ts_first_pts == AV_NOPTS_VALUE && pts > 1) + copy_ts_first_pts = pts; + if (copy_ts_first_pts != AV_NOPTS_VALUE) + pts -= copy_ts_first_pts; + } + } + if (is_last_report) nb_frames_drop += ost->last_dropped; } @@ -2022,6 +2072,8 @@ static void print_report(int is_last_report, int64_t timer_start, int64_t cur_ti } } + first_report = 0; + if (is_last_report) print_final_stats(total_size); } @@ -2055,7 +2107,6 @@ static void flush_encoders(void) // Maybe we should just let encoding fail instead. if (!ost->initialized) { FilterGraph *fg = ost->filter->graph; - char error[1024] = ""; av_log(NULL, AV_LOG_WARNING, "Finishing stream %d:%d without any data written to it.\n", @@ -2081,16 +2132,8 @@ static void flush_encoders(void) finish_output_stream(ost); } - ret = init_output_stream(ost, error, sizeof(error)); - if (ret < 0) { - av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n", - ost->file_index, ost->index, error); - exit_program(1); + init_output_stream_wrapper(ost, NULL, 1); } - } - - if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1) - continue; if (enc->codec_type != AVMEDIA_TYPE_VIDEO && enc->codec_type != AVMEDIA_TYPE_AUDIO) continue; @@ -2232,20 +2275,20 @@ static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *p if (pkt->pts != AV_NOPTS_VALUE) opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time; - if (pkt->dts == AV_NOPTS_VALUE) + if (pkt->dts == AV_NOPTS_VALUE) { opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase); - else - opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase); - opkt.dts -= ost_tb_start_time; - - if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) { + } else if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) { int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size); if(!duration) duration = ist->dec_ctx->frame_size; - opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts, - (AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last, - ost->mux_timebase) - ost_tb_start_time; - } + opkt.dts = av_rescale_delta(ist->st->time_base, pkt->dts, + (AVRational){1, ist->dec_ctx->sample_rate}, duration, + &ist->filter_in_rescale_delta_last, ost->mux_timebase); + /* dts will be set immediately afterwards to what pts is now */ + opkt.pts = opkt.dts - ost_tb_start_time; + } else + opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase); + opkt.dts -= ost_tb_start_time; opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase); @@ -2573,7 +2616,7 @@ static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_ av_log(ist->dec_ctx, AV_LOG_WARNING, "video_delay is larger in decoder than demuxer %d > %d.\n" "If you want to help, upload a sample " - "of this file to ftp://upload.ffmpeg.org/incoming/ " + "of this file to https://streams.videolan.org/upload/ " "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n", ist->dec_ctx->has_b_frames, ist->st->codecpar->video_delay); @@ -2695,7 +2738,7 @@ static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output, return ret; if (ist->sub2video.frame) { - sub2video_update(ist, &subtitle); + sub2video_update(ist, INT64_MIN, &subtitle); } else if (ist->nb_filters) { if (!ist->sub2video.sub_queue) ist->sub2video.sub_queue = av_fifo_alloc(8 * sizeof(AVSubtitle)); @@ -2964,7 +3007,7 @@ static void print_sdp(void) if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) { av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename); } else { - avio_printf(sdp_pb, "SDP:\n%s", sdp); + avio_print(sdp_pb, sdp); avio_closep(&sdp_pb); av_freep(&sdp_filename); } @@ -3086,7 +3129,9 @@ static int init_input_stream(int ist_index, char *error, int error_len) ist->dec_ctx->opaque = ist; ist->dec_ctx->get_format = get_format; ist->dec_ctx->get_buffer2 = get_buffer; +#if LIBAVCODEC_VERSION_MAJOR < 60 ist->dec_ctx->thread_safe_callbacks = 1; +#endif av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0); if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE && @@ -3172,6 +3217,7 @@ static int check_init_output_file(OutputFile *of, int file_index) of->header_written = 1; av_dump_format(of->ctx, file_index, of->ctx->url, 1); + nb_output_dumped++; if (sdp_filename || want_sdp) print_sdp(); @@ -3187,6 +3233,7 @@ static int check_init_output_file(OutputFile *of, int file_index) while (av_fifo_size(ost->muxing_queue)) { AVPacket pkt; av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL); + ost->muxing_queue_data_size -= pkt.size; write_packet(of, &pkt, ost, 1); } } @@ -3196,31 +3243,25 @@ static int check_init_output_file(OutputFile *of, int file_index) static int init_output_bsfs(OutputStream *ost) { - AVBSFContext *ctx; - int i, ret; + AVBSFContext *ctx = ost->bsf_ctx; + int ret; - if (!ost->nb_bitstream_filters) + if (!ctx) return 0; - for (i = 0; i < ost->nb_bitstream_filters; i++) { - ctx = ost->bsf_ctx[i]; - - ret = avcodec_parameters_copy(ctx->par_in, - i ? ost->bsf_ctx[i - 1]->par_out : ost->st->codecpar); + ret = avcodec_parameters_copy(ctx->par_in, ost->st->codecpar); if (ret < 0) return ret; - ctx->time_base_in = i ? ost->bsf_ctx[i - 1]->time_base_out : ost->st->time_base; + ctx->time_base_in = ost->st->time_base; ret = av_bsf_init(ctx); if (ret < 0) { av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n", - ost->bsf_ctx[i]->filter->name); + ctx->filter->name); return ret; } - } - ctx = ost->bsf_ctx[ost->nb_bitstream_filters - 1]; ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out); if (ret < 0) return ret; @@ -3472,7 +3513,7 @@ static void init_encoder_time_base(OutputStream *ost, AVRational default_time_ba enc_ctx->time_base = default_time_base; } -static int init_output_stream_encode(OutputStream *ost) +static int init_output_stream_encode(OutputStream *ost, AVFrame *frame) { InputStream *ist = get_input_stream(ost); AVCodecContext *enc_ctx = ost->enc_ctx; @@ -3556,10 +3597,6 @@ static int init_output_stream_encode(OutputStream *ost) av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n" "Please consider specifying a lower framerate, a different muxer or -vsync 2\n"); } - for (j = 0; j < ost->forced_kf_count; j++) - ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j], - AV_TIME_BASE_Q, - enc_ctx->time_base); enc_ctx->width = av_buffersink_get_w(ost->filter->filter); enc_ctx->height = av_buffersink_get_h(ost->filter->filter); @@ -3573,6 +3610,14 @@ static int init_output_stream_encode(OutputStream *ost) enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample, av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth); + if (frame) { + enc_ctx->color_range = frame->color_range; + enc_ctx->color_primaries = frame->color_primaries; + enc_ctx->color_trc = frame->color_trc; + enc_ctx->colorspace = frame->colorspace; + enc_ctx->chroma_sample_location = frame->chroma_location; + } + enc_ctx->framerate = ost->frame_rate; ost->st->avg_frame_rate = ost->frame_rate; @@ -3590,6 +3635,20 @@ static int init_output_stream_encode(OutputStream *ost) enc_ctx->field_order = AV_FIELD_TT; } + if (frame) { + if (enc_ctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) && + ost->top_field_first >= 0) + frame->top_field_first = !!ost->top_field_first; + + if (frame->interlaced_frame) { + if (enc_ctx->codec->id == AV_CODEC_ID_MJPEG) + enc_ctx->field_order = frame->top_field_first ? AV_FIELD_TT:AV_FIELD_BB; + else + enc_ctx->field_order = frame->top_field_first ? AV_FIELD_TB:AV_FIELD_BT; + } else + enc_ctx->field_order = AV_FIELD_PROGRESSIVE; + } + if (ost->forced_keyframes) { if (!strncmp(ost->forced_keyframes, "expr:", 5)) { ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5, @@ -3630,7 +3689,7 @@ static int init_output_stream_encode(OutputStream *ost) return 0; } -static int init_output_stream(OutputStream *ost, char *error, int error_len) +static int init_output_stream(OutputStream *ost, AVFrame *frame, char *error, int error_len) { int ret = 0; @@ -3639,7 +3698,7 @@ static int init_output_stream(OutputStream *ost, char *error, int error_len) AVCodecContext *dec = NULL; InputStream *ist; - ret = init_output_stream_encode(ost); + ret = init_output_stream_encode(ost, frame); if (ret < 0) return ret; @@ -3661,21 +3720,14 @@ static int init_output_stream(OutputStream *ost, char *error, int error_len) !av_dict_get(ost->encoder_opts, "ab", NULL, 0)) av_dict_set(&ost->encoder_opts, "b", "128000", 0); - if (ost->filter && av_buffersink_get_hw_frames_ctx(ost->filter->filter) && - ((AVHWFramesContext*)av_buffersink_get_hw_frames_ctx(ost->filter->filter)->data)->format == - av_buffersink_get_format(ost->filter->filter)) { - ost->enc_ctx->hw_frames_ctx = av_buffer_ref(av_buffersink_get_hw_frames_ctx(ost->filter->filter)); - if (!ost->enc_ctx->hw_frames_ctx) - return AVERROR(ENOMEM); - } else { - ret = hw_device_setup_for_encode(ost); - if (ret < 0) { - snprintf(error, error_len, "Device setup failed for " - "encoder on output stream #%d:%d : %s", - ost->file_index, ost->index, av_err2str(ret)); - return ret; - } + ret = hw_device_setup_for_encode(ost); + if (ret < 0) { + snprintf(error, error_len, "Device setup failed for " + "encoder on output stream #%d:%d : %s", + ost->file_index, ost->index, av_err2str(ret)); + return ret; } + if (ist && ist->dec->type == AVMEDIA_TYPE_SUBTITLE && ost->enc->type == AVMEDIA_TYPE_SUBTITLE) { int input_props = 0, output_props = 0; AVCodecDescriptor const *input_descriptor = @@ -3719,12 +3771,6 @@ static int init_output_stream(OutputStream *ost, char *error, int error_len) "Error initializing the output stream codec context.\n"); exit_program(1); } - /* - * FIXME: ost->st->codec should't be needed here anymore. - */ - ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx); - if (ret < 0) - return ret; if (ost->enc_ctx->nb_coded_side_data) { int i; @@ -3751,12 +3797,14 @@ static int init_output_stream(OutputStream *ost, char *error, int error_len) int i; for (i = 0; i < ist->st->nb_side_data; i++) { AVPacketSideData *sd = &ist->st->side_data[i]; - uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size); - if (!dst) - return AVERROR(ENOMEM); - memcpy(dst, sd->data, sd->size); - if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX) - av_display_rotation_set((uint32_t *)dst, 0); + if (sd->type != AV_PKT_DATA_CPB_PROPERTIES) { + uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size); + if (!dst) + return AVERROR(ENOMEM); + memcpy(dst, sd->data, sd->size); + if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX) + av_display_rotation_set((uint32_t *)dst, 0); + } } } @@ -3767,8 +3815,6 @@ static int init_output_stream(OutputStream *ost, char *error, int error_len) // copy estimated duration as a hint to the muxer if (ost->st->duration <= 0 && ist && ist->st->duration > 0) ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base); - - ost->st->codec->codec= ost->enc_ctx->codec; } else if (ost->stream_copy) { ret = init_output_stream_streamcopy(ost); if (ret < 0) @@ -3778,7 +3824,7 @@ static int init_output_stream(OutputStream *ost, char *error, int error_len) // parse user provided disposition, and update stream values if (ost->disposition) { static const AVOption opts[] = { - { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" }, + { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, (double)INT64_MAX, .unit = "flags" }, { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" }, { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" }, { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" }, @@ -3881,13 +3927,22 @@ static int transcode_init(void) goto dump_format; } - /* open each encoder */ + /* + * initialize stream copy and subtitle/data streams. + * Encoded AVFrame based streams will get initialized as follows: + * - when the first AVFrame is received in do_video_out + * - just before the first AVFrame is received in either transcode_step + * or reap_filters due to us requiring the filter chain buffer sink + * to be configured with the correct audio frame size, which is only + * known after the encoder is initialized. + */ for (i = 0; i < nb_output_streams; i++) { - // skip streams fed from filtergraphs until we have a frame for them - if (output_streams[i]->filter) + if (!output_streams[i]->stream_copy && + (output_streams[i]->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO || + output_streams[i]->enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO)) continue; - ret = init_output_stream(output_streams[i], error, sizeof(error)); + ret = init_output_stream_wrapper(output_streams[i], NULL, 0); if (ret < 0) goto dump_format; } @@ -4085,13 +4140,12 @@ static void set_tty_echo(int on) static int check_keyboard_interaction(int64_t cur_time) { int i, ret, key; - static int64_t last_time; if (received_nb_signals) return AVERROR_EXIT; /* read_key() returns 0 on EOF */ - if(cur_time - last_time >= 100000 && !run_as_daemon){ + if(cur_time - keyboard_last_time >= 100000 && !run_as_daemon){ key = read_key(); - last_time = cur_time; + keyboard_last_time = cur_time; }else key = -1; if (key == 'q') @@ -4151,13 +4205,9 @@ static int check_keyboard_interaction(int64_t cur_time) if (key == 'd' || key == 'D'){ int debug=0; if(key == 'D') { - debug = input_streams[0]->st->codec->debug<<1; + debug = input_streams[0]->dec_ctx->debug << 1; if(!debug) debug = 1; - while(debug & (FF_DEBUG_DCT_COEFF -#if FF_API_DEBUG_MV - |FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE -#endif - )) //unsupported, would just crash + while (debug & FF_DEBUG_DCT_COEFF) //unsupported, would just crash debug += debug; }else{ char buf[32]; @@ -4174,7 +4224,7 @@ static int check_keyboard_interaction(int64_t cur_time) fprintf(stderr,"error parsing debug value\n"); } for(i=0;ist->codec->debug = debug; + input_streams[i]->dec_ctx->debug = debug; } for(i=0;ithread_queue_size < 0) + f->thread_queue_size = (nb_input_files > 1 ? 8 : 0); + if (!f->thread_queue_size) return 0; if (f->ctx->pb ? !f->ctx->pb->seekable : @@ -4324,7 +4376,7 @@ static int get_input_packet(InputFile *f, AVPacket *pkt) } #if HAVE_THREADS - if (nb_input_files > 1) + if (f->thread_queue_size) return get_input_packet_mt(f, pkt); #endif return av_read_frame(f->ctx, pkt); @@ -4415,6 +4467,7 @@ static int seek_to_start(InputFile *ifile, AVFormatContext *is) ifile->time_base = ist->st->time_base; /* the total duration of the stream, max_pts - min_pts is * the duration of the stream without the last frame */ + if (ist->max_pts > ist->min_pts && ist->max_pts - (uint64_t)ist->min_pts < INT64_MAX - duration) duration += ist->max_pts - ist->min_pts; ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base, ifile->time_base); @@ -4442,6 +4495,7 @@ static int process_input(int file_index) int ret, thread_ret, i, j; int64_t duration; int64_t pkt_dts; + int disable_discontinuity_correction = copy_ts; is = ifile->ctx; ret = get_input_packet(ifile, &pkt); @@ -4643,10 +4697,20 @@ static int process_input(int file_index) pkt.dts += duration; pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX); + + if (copy_ts && pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE && + (is->iformat->flags & AVFMT_TS_DISCONT) && ist->st->pts_wrap_bits < 60) { + int64_t wrap_dts = av_rescale_q_rnd(pkt.dts + (1LL<st->pts_wrap_bits), + ist->st->time_base, AV_TIME_BASE_Q, + AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX); + if (FFABS(wrap_dts - ist->next_dts) < FFABS(pkt_dts - ist->next_dts)/10) + disable_discontinuity_correction = 0; + } + if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO || ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) && pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE && - !copy_ts) { + !disable_discontinuity_correction) { int64_t delta = pkt_dts - ist->next_dts; if (is->iformat->flags & AVFMT_TS_DISCONT) { if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE || @@ -4784,15 +4848,30 @@ static int transcode_step(void) } if (ost->filter && ost->filter->graph->graph) { - if (!ost->initialized) { - char error[1024] = {0}; - ret = init_output_stream(ost, error, sizeof(error)); - if (ret < 0) { - av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n", - ost->file_index, ost->index, error); - exit_program(1); - } - } + /* + * Similar case to the early audio initialization in reap_filters. + * Audio is special in ffmpeg.c currently as we depend on lavfi's + * audio frame buffering/creation to get the output audio frame size + * in samples correct. The audio frame size for the filter chain is + * configured during the output stream initialization. + * + * Apparently avfilter_graph_request_oldest (called in + * transcode_from_filter just down the line) peeks. Peeking already + * puts one frame "ready to be given out", which means that any + * update in filter buffer sink configuration afterwards will not + * help us. And yes, even if it would be utilized, + * av_buffersink_get_samples is affected, as it internally utilizes + * the same early exit for peeked frames. + * + * In other words, if avfilter_graph_request_oldest would not make + * further filter chain configuration or usage of + * av_buffersink_get_samples useless (by just causing the return + * of the peeked AVFrame as-is), we could get rid of this additional + * early encoder initialization. + */ + if (av_buffersink_get_type(ost->filter->filter) == AVMEDIA_TYPE_AUDIO) + init_output_stream_wrapper(ost, NULL, 1); + if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0) return ret; if (!ist) @@ -4920,6 +4999,10 @@ static int transcode(void) av_freep(&ost->enc_ctx->stats_in); } total_packets_written += ost->packets_written; + if (!ost->packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT_STREAM)) { + av_log(NULL, AV_LOG_FATAL, "Empty output on stream %d.\n", i); + exit_program(1); + } } if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) { @@ -4937,7 +5020,6 @@ static int transcode(void) } } - av_buffer_unref(&hw_device_ctx); hw_device_free_all(); /* finished ! */ @@ -5026,11 +5108,13 @@ void ffmpeg_var_cleanup() { received_sigterm = 0; received_nb_signals = 0; ffmpeg_exited = 0; + copy_ts_first_pts = AV_NOPTS_VALUE; run_as_daemon = 0; nb_frames_dup = 0; dup_warning = 1000; nb_frames_drop = 0; + nb_output_dumped = 0; want_sdp = 1; @@ -5048,6 +5132,10 @@ void ffmpeg_var_cleanup() { filtergraphs = NULL; nb_filtergraphs = 0; + + last_time = -1; + keyboard_last_time = 0; + first_report = 1; } void set_report_callback(void (*callback)(int, float, float, int64_t, int, double, double)) @@ -5213,6 +5301,10 @@ int ffmpeg_execute(int argc, char **argv) "shift input timestamps to start at 0 when using copyts" }, { "copytb", HAS_ARG | OPT_INT | OPT_EXPERT, { ©_tb }, "copy input stream time base when stream copying", "mode" }, + { "start_at_zero", OPT_BOOL | OPT_EXPERT, { &start_at_zero }, + "shift input timestamps to start at 0 when using copyts" }, + { "copytb", HAS_ARG | OPT_INT | OPT_EXPERT, { ©_tb }, + "copy input stream time base when stream copying", "mode" }, { "shortest", OPT_BOOL | OPT_EXPERT | OPT_OFFSET | OPT_OUTPUT, { .off = OFFSET(shortest) }, "finish encoding within shortest input" }, @@ -5264,8 +5356,12 @@ int ffmpeg_execute(int argc, char **argv) "create a complex filtergraph", "graph_description" }, { "filter_complex_script", HAS_ARG | OPT_EXPERT, { .func_arg = opt_filter_complex_script }, "read complex filtergraph description from a file", "filename" }, + { "auto_conversion_filters", OPT_BOOL | OPT_EXPERT, { &auto_conversion_filters }, + "enable automatic conversion filters globally" }, { "stats", OPT_BOOL, { &print_stats }, "print progress report during encoding", }, + { "stats_period", HAS_ARG | OPT_EXPERT, { .func_arg = opt_stats_period }, + "set the period at which ffmpeg updates stats and -progress output", "time" }, { "attach", HAS_ARG | OPT_PERFILE | OPT_EXPERT | OPT_OUTPUT, { .func_arg = opt_attach }, "add an attachment to the output file", "filename" }, @@ -5387,6 +5483,9 @@ int ffmpeg_execute(int argc, char **argv) { "autorotate", HAS_ARG | OPT_BOOL | OPT_SPEC | OPT_EXPERT | OPT_INPUT, { .off = OFFSET(autorotate) }, "automatically insert correct rotate filters" }, + { "autoscale", HAS_ARG | OPT_BOOL | OPT_SPEC | + OPT_EXPERT | OPT_OUTPUT, { .off = OFFSET(autoscale) }, + "automatically insert a scale filter at the end of the filter graph" }, /* audio options */ { "aframes", OPT_AUDIO | HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_audio_frames }, @@ -5473,6 +5572,8 @@ int ffmpeg_execute(int argc, char **argv) { "max_muxing_queue_size", HAS_ARG | OPT_INT | OPT_SPEC | OPT_EXPERT | OPT_OUTPUT, { .off = OFFSET(max_muxing_queue_size) }, "maximum number of packets that can be buffered while waiting for all streams to initialize", "packets" }, + { "muxing_queue_data_threshold", HAS_ARG | OPT_INT | OPT_SPEC | OPT_EXPERT | OPT_OUTPUT, { .off = OFFSET(muxing_queue_data_threshold) }, + "set the threshold after which max_muxing_queue_size is taken into account", "bytes" }, /* data codec support */ { "dcodec", HAS_ARG | OPT_DATA | OPT_PERFILE | OPT_EXPERT | OPT_INPUT | OPT_OUTPUT, { .func_arg = opt_data_codec }, diff --git a/apple/src/fftools_ffmpeg.h b/apple/src/fftools_ffmpeg.h index 0c82515..599d7c7 100644 --- a/apple/src/fftools_ffmpeg.h +++ b/apple/src/fftools_ffmpeg.h @@ -84,7 +84,6 @@ enum HWAccelID { HWACCEL_GENERIC, HWACCEL_VIDEOTOOLBOX, HWACCEL_QSV, - HWACCEL_CUVID, }; typedef struct HWAccel { @@ -239,6 +238,8 @@ typedef struct OptionsContext { int nb_passlogfiles; SpecifierOpt *max_muxing_queue_size; int nb_max_muxing_queue_size; + SpecifierOpt *muxing_queue_data_threshold; + int nb_muxing_queue_data_threshold; SpecifierOpt *guess_layout_max; int nb_guess_layout_max; SpecifierOpt *apad; @@ -253,6 +254,8 @@ typedef struct OptionsContext { int nb_time_bases; SpecifierOpt *enc_time_bases; int nb_enc_time_bases; + SpecifierOpt *autoscale; + int nb_autoscale; } OptionsContext; typedef struct InputFilter { @@ -372,6 +375,7 @@ typedef struct InputStream { AVFifoBuffer *sub_queue; ///< queue of AVSubtitle* before filter init AVFrame *frame; int w, h; + unsigned int initialize; ///< marks if sub2video_update should force an initialization } sub2video; int dr1; @@ -454,6 +458,7 @@ enum forced_keyframes_const { }; #define ABORT_ON_FLAG_EMPTY_OUTPUT (1 << 0) +#define ABORT_ON_FLAG_EMPTY_OUTPUT_STREAM (1 << 1) extern const char *const forced_keyframes_const_names[]; @@ -482,8 +487,7 @@ typedef struct OutputStream { AVRational mux_timebase; AVRational enc_timebase; - int nb_bitstream_filters; - AVBSFContext **bsf_ctx; + AVBSFContext *bsf_ctx; AVCodecContext *enc_ctx; AVCodecParameters *ref_par; /* associated input codec parameters with encoders options applied */ @@ -502,6 +506,7 @@ typedef struct OutputStream { int force_fps; int top_field_first; int rotate_overridden; + int autoscale; double rotate_override_value; AVRational frame_aspect_ratio; @@ -567,6 +572,15 @@ typedef struct OutputStream { /* the packets are buffered here until the muxer is ready to be initialized */ AVFifoBuffer *muxing_queue; + /* + * The size of the AVPackets' buffers in queue. + * Updated when a packet is either pushed or pulled from the queue. + */ + size_t muxing_queue_data_size; + + /* Threshold after which max_muxing_queue_size will be in effect */ + size_t muxing_queue_data_threshold; + /* packet picture type */ int pict_type; @@ -623,6 +637,7 @@ extern __thread int debug_ts; extern __thread int exit_on_error; extern __thread int abort_on_flags; extern __thread int print_stats; +extern __thread int64_t stats_period; extern __thread int qp_hist; extern __thread int stdin_interaction; extern __thread int frame_bits_per_raw_sample; @@ -633,11 +648,11 @@ extern __thread char *videotoolbox_pixfmt; extern __thread int filter_nbthreads; extern __thread int filter_complex_nbthreads; extern __thread int vstats_version; +extern __thread int auto_conversion_filters; extern __thread const AVIOInterruptCB int_cb; extern const HWAccel hwaccels[]; -extern __thread AVBufferRef *hw_device_ctx; #if CONFIG_QSV extern __thread char *qsv_device; #endif @@ -656,8 +671,8 @@ void assert_avoptions(AVDictionary *m); int guess_input_channel_layout(InputStream *ist); -enum AVPixelFormat choose_pixel_fmt(AVStream *st, AVCodecContext *avctx, AVCodec *codec, enum AVPixelFormat target); -void choose_sample_fmt(AVStream *st, AVCodec *codec); +enum AVPixelFormat choose_pixel_fmt(AVStream *st, AVCodecContext *avctx, const AVCodec *codec, enum AVPixelFormat target); +void choose_sample_fmt(AVStream *st, const AVCodec *codec); int configure_filtergraph(FilterGraph *fg); int configure_output_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOut *out); @@ -667,7 +682,7 @@ int filtergraph_is_simple(FilterGraph *fg); int init_simple_filtergraph(InputStream *ist, OutputStream *ost); int init_complex_filtergraph(FilterGraph *fg); -void sub2video_update(InputStream *ist, AVSubtitle *sub); +void sub2video_update(InputStream *ist, int64_t heartbeat_pts, AVSubtitle *sub); int ifilter_parameters_from_frame(InputFilter *ifilter, const AVFrame *frame); @@ -675,7 +690,6 @@ int ffmpeg_parse_options(int argc, char **argv); int videotoolbox_init(AVCodecContext *s); int qsv_init(AVCodecContext *s); -int cuvid_init(AVCodecContext *s); HWDevice *hw_device_get_by_name(const char *name); int hw_device_init_from_string(const char *arg, HWDevice **dev); @@ -683,6 +697,7 @@ void hw_device_free_all(void); int hw_device_setup_for_decode(InputStream *ist); int hw_device_setup_for_encode(OutputStream *ost); +int hw_device_setup_for_filter(FilterGraph *fg); int hwaccel_decode_init(AVCodecContext *avctx); @@ -698,6 +713,7 @@ int opt_progress(void *optctx, const char *opt, const char *arg); int opt_target(void *optctx, const char *opt, const char *arg); int opt_vsync(void *optctx, const char *opt, const char *arg); int opt_abort_on(void *optctx, const char *opt, const char *arg); +int opt_stats_period(void *optctx, const char *opt, const char *arg); int opt_qscale(void *optctx, const char *opt, const char *arg); int opt_profile(void *optctx, const char *opt, const char *arg); int opt_filter_complex(void *optctx, const char *opt, const char *arg); diff --git a/apple/src/fftools_ffmpeg_filter.c b/apple/src/fftools_ffmpeg_filter.c index 5877d76..70a4e98 100644 --- a/apple/src/fftools_ffmpeg_filter.c +++ b/apple/src/fftools_ffmpeg_filter.c @@ -68,7 +68,7 @@ static const enum AVPixelFormat *get_compliance_unofficial_pix_fmts(enum AVCodec } } -enum AVPixelFormat choose_pixel_fmt(AVStream *st, AVCodecContext *enc_ctx, AVCodec *codec, enum AVPixelFormat target) +enum AVPixelFormat choose_pixel_fmt(AVStream *st, AVCodecContext *enc_ctx, const AVCodec *codec, enum AVPixelFormat target) { if (codec && codec->pix_fmts) { const enum AVPixelFormat *p = codec->pix_fmts; @@ -98,7 +98,7 @@ enum AVPixelFormat choose_pixel_fmt(AVStream *st, AVCodecContext *enc_ctx, AVCod return target; } -void choose_sample_fmt(AVStream *st, AVCodec *codec) +void choose_sample_fmt(AVStream *st, const AVCodec *codec) { if (codec && codec->sample_fmts) { const enum AVSampleFormat *p = codec->sample_fmts; @@ -107,7 +107,8 @@ void choose_sample_fmt(AVStream *st, AVCodec *codec) break; } if (*p == -1) { - if((codec->capabilities & AV_CODEC_CAP_LOSSLESS) && av_get_sample_fmt_name(st->codecpar->format) > av_get_sample_fmt_name(codec->sample_fmts[0])) + const AVCodecDescriptor *desc = avcodec_descriptor_get(codec->id); + if(desc && (desc->props & AV_CODEC_PROP_LOSSLESS) && av_get_sample_fmt_name(st->codecpar->format) > av_get_sample_fmt_name(codec->sample_fmts[0])) av_log(NULL, AV_LOG_ERROR, "Conversion will not be lossless.\n"); if(av_get_sample_fmt_name(st->codecpar->format)) av_log(NULL, AV_LOG_WARNING, @@ -477,7 +478,7 @@ static int configure_output_video_filter(FilterGraph *fg, OutputFilter *ofilter, if (ret < 0) return ret; - if (ofilter->width || ofilter->height) { + if ((ofilter->width || ofilter->height) && ofilter->ost->autoscale) { char args[255]; AVFilterContext *filter; AVDictionaryEntry *e = NULL; @@ -748,6 +749,12 @@ static int sub2video_prepare(InputStream *ist, InputFilter *ifilter) return AVERROR(ENOMEM); ist->sub2video.last_pts = INT64_MIN; ist->sub2video.end_pts = INT64_MIN; + + /* sub2video structure has been (re-)initialized. + Mark it as such so that the system will be + initialized with the first received heartbeat. */ + ist->sub2video.initialize = 1; + return 0; } @@ -794,10 +801,9 @@ static int configure_input_video_filter(FilterGraph *fg, InputFilter *ifilter, av_bprint_init(&args, 0, AV_BPRINT_SIZE_AUTOMATIC); av_bprintf(&args, "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:" - "pixel_aspect=%d/%d:sws_param=flags=%d", + "pixel_aspect=%d/%d", ifilter->width, ifilter->height, ifilter->format, - tb.num, tb.den, sar.num, sar.den, - SWS_BILINEAR + ((ist->dec_ctx->flags&AV_CODEC_FLAG_BITEXACT) ? SWS_BITEXACT:0)); + tb.num, tb.den, sar.num, sar.den); if (fr.num && fr.den) av_bprintf(&args, ":frame_rate=%d/%d", fr.num, fr.den); snprintf(name, sizeof(name), "graph %d input from stream %d:%d", fg->index, @@ -1064,17 +1070,9 @@ int configure_filtergraph(FilterGraph *fg) if ((ret = avfilter_graph_parse2(fg->graph, graph_desc, &inputs, &outputs)) < 0) goto fail; - if (filter_hw_device || hw_device_ctx) { - AVBufferRef *device = filter_hw_device ? filter_hw_device->device_ref - : hw_device_ctx; - for (i = 0; i < fg->graph->nb_filters; i++) { - fg->graph->filters[i]->hw_device_ctx = av_buffer_ref(device); - if (!fg->graph->filters[i]->hw_device_ctx) { - ret = AVERROR(ENOMEM); - goto fail; - } - } - } + ret = hw_device_setup_for_filter(fg); + if (ret < 0) + goto fail; if (simple && (!inputs || inputs->next || !outputs || outputs->next)) { const char *num_inputs; @@ -1114,6 +1112,8 @@ int configure_filtergraph(FilterGraph *fg) configure_output_filter(fg, fg->outputs[i], cur); avfilter_inout_free(&outputs); + if (!auto_conversion_filters) + avfilter_graph_set_auto_convert(fg->graph, AVFILTER_AUTO_CONVERT_NONE); if ((ret = avfilter_graph_config(fg->graph, NULL)) < 0) goto fail; @@ -1177,7 +1177,7 @@ int configure_filtergraph(FilterGraph *fg) while (av_fifo_size(ist->sub2video.sub_queue)) { AVSubtitle tmp; av_fifo_generic_read(ist->sub2video.sub_queue, &tmp, sizeof(tmp), NULL); - sub2video_update(ist, &tmp); + sub2video_update(ist, INT64_MIN, &tmp); avsubtitle_free(&tmp); } } diff --git a/apple/src/fftools_ffmpeg_hw.c b/apple/src/fftools_ffmpeg_hw.c index 019ee17..283474c 100644 --- a/apple/src/fftools_ffmpeg_hw.c +++ b/apple/src/fftools_ffmpeg_hw.c @@ -28,6 +28,8 @@ #include #include "libavutil/avstring.h" +#include "libavutil/pixdesc.h" +#include "libavfilter/buffersink.h" #include "fftools_ffmpeg.h" @@ -425,18 +427,57 @@ int hw_device_setup_for_decode(InputStream *ist) int hw_device_setup_for_encode(OutputStream *ost) { - HWDevice *dev; + const AVCodecHWConfig *config; + HWDevice *dev = NULL; + AVBufferRef *frames_ref = NULL; + int i; + + if (ost->filter) { + frames_ref = av_buffersink_get_hw_frames_ctx(ost->filter->filter); + if (frames_ref && + ((AVHWFramesContext*)frames_ref->data)->format == + ost->enc_ctx->pix_fmt) { + // Matching format, will try to use hw_frames_ctx. + } else { + frames_ref = NULL; + } + } + + for (i = 0;; i++) { + config = avcodec_get_hw_config(ost->enc, i); + if (!config) + break; + + if (frames_ref && + config->methods & AV_CODEC_HW_CONFIG_METHOD_HW_FRAMES_CTX && + (config->pix_fmt == AV_PIX_FMT_NONE || + config->pix_fmt == ost->enc_ctx->pix_fmt)) { + av_log(ost->enc_ctx, AV_LOG_VERBOSE, "Using input " + "frames context (format %s) with %s encoder.\n", + av_get_pix_fmt_name(ost->enc_ctx->pix_fmt), + ost->enc->name); + ost->enc_ctx->hw_frames_ctx = av_buffer_ref(frames_ref); + if (!ost->enc_ctx->hw_frames_ctx) + return AVERROR(ENOMEM); + return 0; + } + + if (!dev && + config->methods & AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX) + dev = hw_device_get_by_type(config->device_type); + } - dev = hw_device_match_by_codec(ost->enc); if (dev) { + av_log(ost->enc_ctx, AV_LOG_VERBOSE, "Using device %s " + "(type %s) with %s encoder.\n", dev->name, + av_hwdevice_get_type_name(dev->type), ost->enc->name); ost->enc_ctx->hw_device_ctx = av_buffer_ref(dev->device_ref); if (!ost->enc_ctx->hw_device_ctx) return AVERROR(ENOMEM); - return 0; } else { // No device required, or no device available. - return 0; } + return 0; } static int hwaccel_retrieve_data(AVCodecContext *avctx, AVFrame *input) @@ -489,3 +530,31 @@ int hwaccel_decode_init(AVCodecContext *avctx) return 0; } + +int hw_device_setup_for_filter(FilterGraph *fg) +{ + HWDevice *dev; + int i; + + // If the user has supplied exactly one hardware device then just + // give it straight to every filter for convenience. If more than + // one device is available then the user needs to pick one explcitly + // with the filter_hw_device option. + if (filter_hw_device) + dev = filter_hw_device; + else if (nb_hw_devices == 1) + dev = hw_devices[0]; + else + dev = NULL; + + if (dev) { + for (i = 0; i < fg->graph->nb_filters; i++) { + fg->graph->filters[i]->hw_device_ctx = + av_buffer_ref(dev->device_ref); + if (!fg->graph->filters[i]->hw_device_ctx) + return AVERROR(ENOMEM); + } + } + + return 0; +} diff --git a/apple/src/fftools_ffmpeg_opt.c b/apple/src/fftools_ffmpeg_opt.c index 7ffe979..de7c497 100644 --- a/apple/src/fftools_ffmpeg_opt.c +++ b/apple/src/fftools_ffmpeg_opt.c @@ -59,16 +59,82 @@ #define DEFAULT_PASS_LOGFILENAME_PREFIX "ffmpeg2pass" +#define SPECIFIER_OPT_FMT_str "%s" +#define SPECIFIER_OPT_FMT_i "%i" +#define SPECIFIER_OPT_FMT_i64 "%"PRId64 +#define SPECIFIER_OPT_FMT_ui64 "%"PRIu64 +#define SPECIFIER_OPT_FMT_f "%f" +#define SPECIFIER_OPT_FMT_dbl "%lf" + +static const char *const opt_name_codec_names[] = {"c", "codec", "acodec", "vcodec", "scodec", "dcodec", NULL}; +static const char *const opt_name_audio_channels[] = {"ac", NULL}; +static const char *const opt_name_audio_sample_rate[] = {"ar", NULL}; +static const char *const opt_name_frame_rates[] = {"r", NULL}; +static const char *const opt_name_frame_sizes[] = {"s", NULL}; +static const char *const opt_name_frame_pix_fmts[] = {"pix_fmt", NULL}; +static const char *const opt_name_ts_scale[] = {"itsscale", NULL}; +static const char *const opt_name_hwaccels[] = {"hwaccel", NULL}; +static const char *const opt_name_hwaccel_devices[] = {"hwaccel_device", NULL}; +static const char *const opt_name_hwaccel_output_formats[] = {"hwaccel_output_format", NULL}; +static const char *const opt_name_autorotate[] = {"autorotate", NULL}; +static const char *const opt_name_autoscale[] = {"autoscale", NULL}; +static const char *const opt_name_max_frames[] = {"frames", "aframes", "vframes", "dframes", NULL}; +static const char *const opt_name_bitstream_filters[] = {"bsf", "absf", "vbsf", NULL}; +static const char *const opt_name_codec_tags[] = {"tag", "atag", "vtag", "stag", NULL}; +static const char *const opt_name_sample_fmts[] = {"sample_fmt", NULL}; +static const char *const opt_name_qscale[] = {"q", "qscale", NULL}; +static const char *const opt_name_forced_key_frames[] = {"forced_key_frames", NULL}; +static const char *const opt_name_force_fps[] = {"force_fps", NULL}; +static const char *const opt_name_frame_aspect_ratios[] = {"aspect", NULL}; +static const char *const opt_name_rc_overrides[] = {"rc_override", NULL}; +static const char *const opt_name_intra_matrices[] = {"intra_matrix", NULL}; +static const char *const opt_name_inter_matrices[] = {"inter_matrix", NULL}; +static const char *const opt_name_chroma_intra_matrices[] = {"chroma_intra_matrix", NULL}; +static const char *const opt_name_top_field_first[] = {"top", NULL}; +static const char *const opt_name_presets[] = {"pre", "apre", "vpre", "spre", NULL}; +static const char *const opt_name_copy_initial_nonkeyframes[] = {"copyinkfr", NULL}; +static const char *const opt_name_copy_prior_start[] = {"copypriorss", NULL}; +static const char *const opt_name_filters[] = {"filter", "af", "vf", NULL}; +static const char *const opt_name_filter_scripts[] = {"filter_script", NULL}; +static const char *const opt_name_reinit_filters[] = {"reinit_filter", NULL}; +static const char *const opt_name_fix_sub_duration[] = {"fix_sub_duration", NULL}; +static const char *const opt_name_canvas_sizes[] = {"canvas_size", NULL}; +static const char *const opt_name_pass[] = {"pass", NULL}; +static const char *const opt_name_passlogfiles[] = {"passlogfile", NULL}; +static const char *const opt_name_max_muxing_queue_size[] = {"max_muxing_queue_size", NULL}; +static const char *const opt_name_muxing_queue_data_threshold[] = {"muxing_queue_data_threshold", NULL}; +static const char *const opt_name_guess_layout_max[] = {"guess_layout_max", NULL}; +static const char *const opt_name_apad[] = {"apad", NULL}; +static const char *const opt_name_discard[] = {"discard", NULL}; +static const char *const opt_name_disposition[] = {"disposition", NULL}; +static const char *const opt_name_time_bases[] = {"time_base", NULL}; +static const char *const opt_name_enc_time_bases[] = {"enc_time_base", NULL}; + +#define WARN_MULTIPLE_OPT_USAGE(name, type, so, st)\ +{\ + char namestr[128] = "";\ + const char *spec = so->specifier && so->specifier[0] ? so->specifier : "";\ + for (i = 0; opt_name_##name[i]; i++)\ + av_strlcatf(namestr, sizeof(namestr), "-%s%s", opt_name_##name[i], opt_name_##name[i+1] ? (opt_name_##name[i+2] ? ", " : " or ") : "");\ + av_log(NULL, AV_LOG_WARNING, "Multiple %s options specified for stream %d, only the last option '-%s%s%s "SPECIFIER_OPT_FMT_##type"' will be used.\n",\ + namestr, st->index, opt_name_##name[0], spec[0] ? ":" : "", spec, so->u.type);\ +} + #define MATCH_PER_STREAM_OPT(name, type, outvar, fmtctx, st)\ {\ - int i, ret;\ + int i, ret, matches = 0;\ + SpecifierOpt *so;\ for (i = 0; i < o->nb_ ## name; i++) {\ char *spec = o->name[i].specifier;\ - if ((ret = check_stream_specifier(fmtctx, st, spec)) > 0)\ + if ((ret = check_stream_specifier(fmtctx, st, spec)) > 0) {\ outvar = o->name[i].u.type;\ - else if (ret < 0)\ + so = &o->name[i];\ + matches++;\ + } else if (ret < 0)\ exit_program(1);\ }\ + if (matches > 1)\ + WARN_MULTIPLE_OPT_USAGE(name, type, so, st);\ } #define MATCH_PER_TYPE_OPT(name, type, outvar, fmtctx, mediatype)\ @@ -87,13 +153,9 @@ const HWAccel hwaccels[] = { #endif #if CONFIG_LIBMFX { "qsv", qsv_init, HWACCEL_QSV, AV_PIX_FMT_QSV }, -#endif -#if CONFIG_CUVID - { "cuvid", cuvid_init, HWACCEL_CUVID, AV_PIX_FMT_CUDA }, #endif { 0 }, }; -__thread AVBufferRef *hw_device_ctx; __thread HWDevice *filter_hw_device; __thread char *vstats_filename; @@ -126,6 +188,8 @@ __thread float max_error_rate = 2.0/3; __thread int filter_nbthreads = 0; __thread int filter_complex_nbthreads = 0; __thread int vstats_version = 2; +__thread int auto_conversion_filters = 1; +__thread int64_t stats_period = 500000; __thread int intra_only = 0; @@ -184,19 +248,17 @@ void init_options(OptionsContext *o) o->limit_filesize = UINT64_MAX; o->chapters_input_file = INT_MAX; o->accurate_seek = 1; + o->thread_queue_size = -1; } int show_hwaccels(void *optctx, const char *opt, const char *arg) { enum AVHWDeviceType type = AV_HWDEVICE_TYPE_NONE; - int i; av_log(NULL, AV_LOG_STDERR, "Hardware acceleration methods:\n"); while ((type = av_hwdevice_iterate_types(type)) != AV_HWDEVICE_TYPE_NONE) av_log(NULL, AV_LOG_STDERR, "%s\n", av_hwdevice_get_type_name(type)); - for (i = 0; hwaccels[i].name; i++) - av_log(NULL, AV_LOG_STDERR, "%s\n", hwaccels[i].name); av_log(NULL, AV_LOG_STDERR, "\n"); return 0; } @@ -222,8 +284,9 @@ AVDictionary *strip_specifiers(AVDictionary *dict) int opt_abort_on(void *optctx, const char *opt, const char *arg) { const AVOption opts[] = { - { "abort_on" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" }, + { "abort_on" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, (double)INT64_MAX, .unit = "flags" }, { "empty_output" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = ABORT_ON_FLAG_EMPTY_OUTPUT }, .unit = "flags" }, + { "empty_output_stream", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = ABORT_ON_FLAG_EMPTY_OUTPUT_STREAM }, .unit = "flags" }, { NULL }, }; const AVClass class = { @@ -237,6 +300,21 @@ int opt_abort_on(void *optctx, const char *opt, const char *arg) return av_opt_eval_flags(&pclass, &opts[0], arg, &abort_on_flags); } +int opt_stats_period(void *optctx, const char *opt, const char *arg) +{ + int64_t user_stats_period = parse_time_or_die(opt, arg, 1); + + if (user_stats_period <= 0) { + av_log(NULL, AV_LOG_ERROR, "stats_period %s must be positive.\n", arg); + return AVERROR(EINVAL); + } + + stats_period = user_stats_period; + av_log(NULL, AV_LOG_INFO, "ffmpeg stats and -progress period set to %s.\n", arg); + + return 0; +} + int opt_sameq(void *optctx, const char *opt, const char *arg) { av_log(NULL, AV_LOG_ERROR, "Option '%s' was removed. " @@ -495,21 +573,15 @@ int opt_sdp_file(void *optctx, const char *opt, const char *arg) #if CONFIG_VAAPI int opt_vaapi_device(void *optctx, const char *opt, const char *arg) { - HWDevice *dev; const char *prefix = "vaapi:"; char *tmp; int err; tmp = av_asprintf("%s%s", prefix, arg); if (!tmp) return AVERROR(ENOMEM); - err = hw_device_init_from_string(tmp, &dev); + err = hw_device_init_from_string(tmp, NULL); av_free(tmp); - if (err < 0) return err; - hw_device_ctx = av_buffer_ref(dev->device_ref); - if (!hw_device_ctx) - return AVERROR(ENOMEM); - return 0; } #endif @@ -812,15 +884,6 @@ void add_input_streams(OptionsContext *o, AVFormatContext *ic) case AVMEDIA_TYPE_VIDEO: if(!ist->dec) ist->dec = avcodec_find_decoder(par->codec_id); -#if FF_API_LOWRES - if (st->codec->lowres) { - ist->dec_ctx->lowres = st->codec->lowres; - ist->dec_ctx->width = st->codec->width; - ist->dec_ctx->height = st->codec->height; - ist->dec_ctx->coded_width = st->codec->coded_width; - ist->dec_ctx->coded_height = st->codec->coded_height; - } -#endif // avformat_find_stream_info() doesn't set this for us anymore. ist->dec_ctx->framerate = st->avg_frame_rate; @@ -837,9 +900,28 @@ void add_input_streams(OptionsContext *o, AVFormatContext *ic) MATCH_PER_STREAM_OPT(top_field_first, i, ist->top_field_first, ic, st); MATCH_PER_STREAM_OPT(hwaccels, str, hwaccel, ic, st); + MATCH_PER_STREAM_OPT(hwaccel_output_formats, str, + hwaccel_output_format, ic, st); + + if (!hwaccel_output_format && hwaccel && !strcmp(hwaccel, "cuvid")) { + av_log(NULL, AV_LOG_WARNING, + "WARNING: defaulting hwaccel_output_format to cuda for compatibility " + "with old commandlines. This behaviour is DEPRECATED and will be removed " + "in the future. Please explicitly set \"-hwaccel_output_format cuda\".\n"); + ist->hwaccel_output_format = AV_PIX_FMT_CUDA; + } else if (hwaccel_output_format) { + ist->hwaccel_output_format = av_get_pix_fmt(hwaccel_output_format); + if (ist->hwaccel_output_format == AV_PIX_FMT_NONE) { + av_log(NULL, AV_LOG_FATAL, "Unrecognised hwaccel output " + "format: %s", hwaccel_output_format); + } + } else { + ist->hwaccel_output_format = AV_PIX_FMT_NONE; + } + if (hwaccel) { // The NVDEC hwaccels use a CUDA device, so remap the name here. - if (!strcmp(hwaccel, "nvdec")) + if (!strcmp(hwaccel, "nvdec") || !strcmp(hwaccel, "cuvid")) hwaccel = "cuda"; if (!strcmp(hwaccel, "none")) @@ -873,8 +955,6 @@ void add_input_streams(OptionsContext *o, AVFormatContext *ic) AV_HWDEVICE_TYPE_NONE) av_log(NULL, AV_LOG_FATAL, "%s ", av_hwdevice_get_type_name(type)); - for (i = 0; hwaccels[i].name; i++) - av_log(NULL, AV_LOG_FATAL, "%s ", hwaccels[i].name); av_log(NULL, AV_LOG_FATAL, "\n"); exit_program(1); } @@ -888,18 +968,6 @@ void add_input_streams(OptionsContext *o, AVFormatContext *ic) exit_program(1); } - MATCH_PER_STREAM_OPT(hwaccel_output_formats, str, - hwaccel_output_format, ic, st); - if (hwaccel_output_format) { - ist->hwaccel_output_format = av_get_pix_fmt(hwaccel_output_format); - if (ist->hwaccel_output_format == AV_PIX_FMT_NONE) { - av_log(NULL, AV_LOG_FATAL, "Unrecognised hwaccel output " - "format: %s", hwaccel_output_format); - } - } else { - ist->hwaccel_output_format = AV_PIX_FMT_NONE; - } - ist->hwaccel_pix_fmt = AV_PIX_FMT_NONE; break; @@ -1229,7 +1297,7 @@ int open_input_file(OptionsContext *o, const char *filename) f->duration = 0; f->time_base = (AVRational){ 1, 1 }; #if HAVE_THREADS - f->thread_queue_size = o->thread_queue_size > 0 ? o->thread_queue_size : 8; + f->thread_queue_size = o->thread_queue_size; #endif /* check if all codec options have been used */ @@ -1422,6 +1490,8 @@ OutputStream *new_output_stream(OptionsContext *o, AVFormatContext *oc, enum AVM ost->encoder_opts = filter_codec_opts(o->g->codec_opts, ost->enc->id, oc, st, ost->enc); MATCH_PER_STREAM_OPT(presets, str, preset, oc, st); + ost->autoscale = 1; + MATCH_PER_STREAM_OPT(autoscale, i, ost->autoscale, oc, st); if (preset && (!(ret = get_preset_file_2(preset, ost->enc->name, &s)))) { do { buf = get_line(s); @@ -1489,55 +1559,13 @@ OutputStream *new_output_stream(OptionsContext *o, AVFormatContext *oc, enum AVM MATCH_PER_STREAM_OPT(copy_prior_start, i, ost->copy_prior_start, oc ,st); MATCH_PER_STREAM_OPT(bitstream_filters, str, bsfs, oc, st); - while (bsfs && *bsfs) { - const AVBitStreamFilter *filter; - char *bsf, *bsf_options_str, *bsf_name; - - bsf = av_get_token(&bsfs, ","); - if (!bsf) - exit_program(1); - bsf_name = av_strtok(bsf, "=", &bsf_options_str); - if (!bsf_name) - exit_program(1); - - filter = av_bsf_get_by_name(bsf_name); - if (!filter) { - av_log(NULL, AV_LOG_FATAL, "Unknown bitstream filter %s\n", bsf_name); - exit_program(1); - } - - ost->bsf_ctx = av_realloc_array(ost->bsf_ctx, - ost->nb_bitstream_filters + 1, - sizeof(*ost->bsf_ctx)); - if (!ost->bsf_ctx) - exit_program(1); - - ret = av_bsf_alloc(filter, &ost->bsf_ctx[ost->nb_bitstream_filters]); + if (bsfs && *bsfs) { + ret = av_bsf_list_parse_str(bsfs, &ost->bsf_ctx); if (ret < 0) { - av_log(NULL, AV_LOG_ERROR, "Error allocating a bitstream filter context\n"); - exit_program(1); - } - - ost->nb_bitstream_filters++; - - if (bsf_options_str && filter->priv_class) { - const AVOption *opt = av_opt_next(ost->bsf_ctx[ost->nb_bitstream_filters-1]->priv_data, NULL); - const char * shorthand[2] = {NULL}; - - if (opt) - shorthand[0] = opt->name; - - ret = av_opt_set_from_string(ost->bsf_ctx[ost->nb_bitstream_filters-1]->priv_data, bsf_options_str, shorthand, "=", ":"); - if (ret < 0) { - av_log(NULL, AV_LOG_ERROR, "Error parsing options for bitstream filter %s\n", bsf_name); + av_log(NULL, AV_LOG_ERROR, "Error parsing bitstream filter sequence '%s': %s\n", bsfs, av_err2str(ret)); exit_program(1); } } - av_freep(&bsf); - - if (*bsfs) - bsfs++; - } MATCH_PER_STREAM_OPT(codec_tags, str, codec_tag, oc, st); if (codec_tag) { @@ -1561,6 +1589,11 @@ OutputStream *new_output_stream(OptionsContext *o, AVFormatContext *oc, enum AVM MATCH_PER_STREAM_OPT(max_muxing_queue_size, i, ost->max_muxing_queue_size, oc, st); ost->max_muxing_queue_size *= sizeof(AVPacket); + ost->muxing_queue_data_size = 0; + + ost->muxing_queue_data_threshold = 50*1024*1024; + MATCH_PER_STREAM_OPT(muxing_queue_data_threshold, i, ost->muxing_queue_data_threshold, oc, st); + if (oc->oformat->flags & AVFMT_GLOBALHEADER) ost->enc_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER; @@ -1699,8 +1732,6 @@ OutputStream *new_video_stream(OptionsContext *o, AVFormatContext *oc, int sourc MATCH_PER_STREAM_OPT(filter_scripts, str, ost->filters_script, oc, st); MATCH_PER_STREAM_OPT(filters, str, ost->filters, oc, st); - if (o->nb_filters > 1) - av_log(NULL, AV_LOG_ERROR, "Only '-vf %s' read, ignoring remaining -vf options: Use ',' to separate filters\n", ost->filters); if (!ost->stream_copy) { const char *p = NULL; @@ -1882,8 +1913,6 @@ OutputStream *new_audio_stream(OptionsContext *o, AVFormatContext *oc, int sourc MATCH_PER_STREAM_OPT(filter_scripts, str, ost->filters_script, oc, st); MATCH_PER_STREAM_OPT(filters, str, ost->filters, oc, st); - if (o->nb_filters > 1) - av_log(NULL, AV_LOG_ERROR, "Only '-af %s' read, ignoring remaining -af options: Use ',' to separate filters\n", ost->filters); if (!ost->stream_copy) { char *sample_fmt = NULL; @@ -2211,22 +2240,23 @@ int open_output_file(OptionsContext *o, const char *filename) /* video: highest resolution */ if (!o->video_disable && av_guess_codec(oc->oformat, NULL, filename, NULL, AVMEDIA_TYPE_VIDEO) != AV_CODEC_ID_NONE) { - int area = 0, idx = -1; + int best_score = 0, idx = -1; int qcr = avformat_query_codec(oc->oformat, oc->oformat->video_codec, 0); for (i = 0; i < nb_input_streams; i++) { - int new_area; + int score; ist = input_streams[i]; - new_area = ist->st->codecpar->width * ist->st->codecpar->height + 100000000*!!ist->st->codec_info_nb_frames + score = ist->st->codecpar->width * ist->st->codecpar->height + + 100000000 * !!(ist->st->event_flags & AVSTREAM_EVENT_FLAG_NEW_PACKETS) + 5000000*!!(ist->st->disposition & AV_DISPOSITION_DEFAULT); if (ist->user_set_discard == AVDISCARD_ALL) continue; if((qcr!=MKTAG('A', 'P', 'I', 'C')) && (ist->st->disposition & AV_DISPOSITION_ATTACHED_PIC)) - new_area = 1; + score = 1; if (ist->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && - new_area > area) { + score > best_score) { if((qcr==MKTAG('A', 'P', 'I', 'C')) && !(ist->st->disposition & AV_DISPOSITION_ATTACHED_PIC)) continue; - area = new_area; + best_score = score; idx = i; } } @@ -2390,12 +2420,14 @@ loop_end: o->attachments[i]); exit_program(1); } - if (!(attachment = av_malloc(len))) { - av_log(NULL, AV_LOG_FATAL, "Attachment %s too large to fit into memory.\n", + if (len > INT_MAX - AV_INPUT_BUFFER_PADDING_SIZE || + !(attachment = av_malloc(len + AV_INPUT_BUFFER_PADDING_SIZE))) { + av_log(NULL, AV_LOG_FATAL, "Attachment %s too large.\n", o->attachments[i]); exit_program(1); } avio_read(pb, attachment, len); + memset(attachment + len, 0, AV_INPUT_BUFFER_PADDING_SIZE); ost = new_attachment_stream(o, oc, -1); ost->stream_copy = 0; @@ -3214,7 +3246,7 @@ void show_help_default_ffmpeg(const char *opt, const char *arg) " -h -- print basic options\n" " -h long -- print more options\n" " -h full -- print all options (including all format and codec specific options, very long)\n" - " -h type=name -- print all options for the named decoder/encoder/demuxer/muxer/filter/bsf\n" + " -h type=name -- print all options for the named decoder/encoder/demuxer/muxer/filter/bsf/protocol\n" " See man %s for detailed description of the options.\n" "\n", program_name); @@ -3222,7 +3254,7 @@ void show_help_default_ffmpeg(const char *opt, const char *arg) OPT_EXIT, 0, 0); show_help_options(options, "Global options (affect whole program " - "instead of just one file:", + "instead of just one file):", 0, per_file | OPT_EXIT | OPT_EXPERT, 0); if (show_advanced) show_help_options(options, "Advanced global options:", OPT_EXPERT, @@ -3298,6 +3330,7 @@ int open_files(OptionGroupList *l, const char *inout, if (ret < 0) { av_log(NULL, AV_LOG_ERROR, "Error parsing options for %s file " "%s.\n", inout, g->arg); + uninit_options(&o); return ret; } diff --git a/apple/src/fftools_ffprobe.c b/apple/src/fftools_ffprobe.c index b1b4d10..d38ec22 100644 --- a/apple/src/fftools_ffprobe.c +++ b/apple/src/fftools_ffprobe.c @@ -42,7 +42,9 @@ #include "libavutil/bprint.h" #include "libavutil/display.h" #include "libavutil/hash.h" +#include "libavutil/hdr_dynamic_metadata.h" #include "libavutil/mastering_display_metadata.h" +#include "libavutil/dovi_meta.h" #include "libavutil/opt.h" #include "libavutil/pixdesc.h" #include "libavutil/spherical.h" @@ -257,6 +259,7 @@ __thread OptionDef *ffprobe_options = NULL; /* FFprobe context */ __thread const char *input_filename; +__thread const char *print_input_filename; __thread AVInputFormat *iformat = NULL; __thread struct AVHashContext *hash; @@ -1089,12 +1092,12 @@ typedef struct CompactContext { #define OFFSET(x) offsetof(CompactContext, x) static const AVOption compact_options[]= { - {"item_sep", "set item separator", OFFSET(item_sep_str), AV_OPT_TYPE_STRING, {.str="|"}, CHAR_MIN, CHAR_MAX }, - {"s", "set item separator", OFFSET(item_sep_str), AV_OPT_TYPE_STRING, {.str="|"}, CHAR_MIN, CHAR_MAX }, + {"item_sep", "set item separator", OFFSET(item_sep_str), AV_OPT_TYPE_STRING, {.str="|"}, 0, 0 }, + {"s", "set item separator", OFFSET(item_sep_str), AV_OPT_TYPE_STRING, {.str="|"}, 0, 0 }, {"nokey", "force no key printing", OFFSET(nokey), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1 }, {"nk", "force no key printing", OFFSET(nokey), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1 }, - {"escape", "set escape mode", OFFSET(escape_mode_str), AV_OPT_TYPE_STRING, {.str="c"}, CHAR_MIN, CHAR_MAX }, - {"e", "set escape mode", OFFSET(escape_mode_str), AV_OPT_TYPE_STRING, {.str="c"}, CHAR_MIN, CHAR_MAX }, + {"escape", "set escape mode", OFFSET(escape_mode_str), AV_OPT_TYPE_STRING, {.str="c"}, 0, 0 }, + {"e", "set escape mode", OFFSET(escape_mode_str), AV_OPT_TYPE_STRING, {.str="c"}, 0, 0 }, {"print_section", "print section name", OFFSET(print_section), AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1 }, {"p", "print section name", OFFSET(print_section), AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1 }, {NULL}, @@ -1205,12 +1208,12 @@ static const Writer compact_writer = { #define OFFSET(x) offsetof(CompactContext, x) static const AVOption csv_options[] = { - {"item_sep", "set item separator", OFFSET(item_sep_str), AV_OPT_TYPE_STRING, {.str=","}, CHAR_MIN, CHAR_MAX }, - {"s", "set item separator", OFFSET(item_sep_str), AV_OPT_TYPE_STRING, {.str=","}, CHAR_MIN, CHAR_MAX }, + {"item_sep", "set item separator", OFFSET(item_sep_str), AV_OPT_TYPE_STRING, {.str=","}, 0, 0 }, + {"s", "set item separator", OFFSET(item_sep_str), AV_OPT_TYPE_STRING, {.str=","}, 0, 0 }, {"nokey", "force no key printing", OFFSET(nokey), AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1 }, {"nk", "force no key printing", OFFSET(nokey), AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1 }, - {"escape", "set escape mode", OFFSET(escape_mode_str), AV_OPT_TYPE_STRING, {.str="csv"}, CHAR_MIN, CHAR_MAX }, - {"e", "set escape mode", OFFSET(escape_mode_str), AV_OPT_TYPE_STRING, {.str="csv"}, CHAR_MIN, CHAR_MAX }, + {"escape", "set escape mode", OFFSET(escape_mode_str), AV_OPT_TYPE_STRING, {.str="csv"}, 0, 0 }, + {"e", "set escape mode", OFFSET(escape_mode_str), AV_OPT_TYPE_STRING, {.str="csv"}, 0, 0 }, {"print_section", "print section name", OFFSET(print_section), AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1 }, {"p", "print section name", OFFSET(print_section), AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1 }, {NULL}, @@ -1243,8 +1246,8 @@ typedef struct FlatContext { #define OFFSET(x) offsetof(FlatContext, x) static const AVOption flat_options[]= { - {"sep_char", "set separator", OFFSET(sep_str), AV_OPT_TYPE_STRING, {.str="."}, CHAR_MIN, CHAR_MAX }, - {"s", "set separator", OFFSET(sep_str), AV_OPT_TYPE_STRING, {.str="."}, CHAR_MIN, CHAR_MAX }, + {"sep_char", "set separator", OFFSET(sep_str), AV_OPT_TYPE_STRING, {.str="."}, 0, 0 }, + {"s", "set separator", OFFSET(sep_str), AV_OPT_TYPE_STRING, {.str="."}, 0, 0 }, {"hierarchical", "specify if the section specification should be hierarchical", OFFSET(hierarchical), AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1 }, {"h", "specify if the section specification should be hierarchical", OFFSET(hierarchical), AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1 }, {NULL}, @@ -1859,6 +1862,105 @@ static inline int show_tags(WriterContext *w, AVDictionary *tags, int section_id return ret; } +static void print_dynamic_hdr10_plus(WriterContext *w, const AVDynamicHDRPlus *metadata) +{ + if (!metadata) + return; + print_int("application version", metadata->application_version); + print_int("num_windows", metadata->num_windows); + for (int n = 1; n < metadata->num_windows; n++) { + const AVHDRPlusColorTransformParams *params = &metadata->params[n]; + print_q("window_upper_left_corner_x", + params->window_upper_left_corner_x,'/'); + print_q("window_upper_left_corner_y", + params->window_upper_left_corner_y,'/'); + print_q("window_lower_right_corner_x", + params->window_lower_right_corner_x,'/'); + print_q("window_lower_right_corner_y", + params->window_lower_right_corner_y,'/'); + print_q("window_upper_left_corner_x", + params->window_upper_left_corner_x,'/'); + print_q("window_upper_left_corner_y", + params->window_upper_left_corner_y,'/'); + print_int("center_of_ellipse_x", + params->center_of_ellipse_x ) ; + print_int("center_of_ellipse_y", + params->center_of_ellipse_y ); + print_int("rotation_angle", + params->rotation_angle); + print_int("semimajor_axis_internal_ellipse", + params->semimajor_axis_internal_ellipse); + print_int("semimajor_axis_external_ellipse", + params->semimajor_axis_external_ellipse); + print_int("semiminor_axis_external_ellipse", + params->semiminor_axis_external_ellipse); + print_int("overlap_process_option", + params->overlap_process_option); + } + print_q("targeted_system_display_maximum_luminance", + metadata->targeted_system_display_maximum_luminance,'/'); + if (metadata->targeted_system_display_actual_peak_luminance_flag) { + print_int("num_rows_targeted_system_display_actual_peak_luminance", + metadata->num_rows_targeted_system_display_actual_peak_luminance); + print_int("num_cols_targeted_system_display_actual_peak_luminance", + metadata->num_cols_targeted_system_display_actual_peak_luminance); + for (int i = 0; i < metadata->num_rows_targeted_system_display_actual_peak_luminance; i++) { + for (int j = 0; j < metadata->num_cols_targeted_system_display_actual_peak_luminance; j++) { + print_q("targeted_system_display_actual_peak_luminance", + metadata->targeted_system_display_actual_peak_luminance[i][j],'/'); + } + } + } + for (int n = 0; n < metadata->num_windows; n++) { + const AVHDRPlusColorTransformParams *params = &metadata->params[n]; + for (int i = 0; i < 3; i++) { + print_q("maxscl",params->maxscl[i],'/'); + } + print_q("average_maxrgb", + params->average_maxrgb,'/'); + print_int("num_distribution_maxrgb_percentiles", + params->num_distribution_maxrgb_percentiles); + for (int i = 0; i < params->num_distribution_maxrgb_percentiles; i++) { + print_int("distribution_maxrgb_percentage", + params->distribution_maxrgb[i].percentage); + print_q("distribution_maxrgb_percentile", + params->distribution_maxrgb[i].percentile,'/'); + } + print_q("fraction_bright_pixels", + params->fraction_bright_pixels,'/'); + } + if (metadata->mastering_display_actual_peak_luminance_flag) { + print_int("num_rows_mastering_display_actual_peak_luminance", + metadata->num_rows_mastering_display_actual_peak_luminance); + print_int("num_cols_mastering_display_actual_peak_luminance", + metadata->num_cols_mastering_display_actual_peak_luminance); + for (int i = 0; i < metadata->num_rows_mastering_display_actual_peak_luminance; i++) { + for (int j = 0; j < metadata->num_cols_mastering_display_actual_peak_luminance; j++) { + print_q("mastering_display_actual_peak_luminance", + metadata->mastering_display_actual_peak_luminance[i][j],'/'); + } + } + } + + for (int n = 0; n < metadata->num_windows; n++) { + const AVHDRPlusColorTransformParams *params = &metadata->params[n]; + if (params->tone_mapping_flag) { + print_q("knee_point_x", params->knee_point_x,'/'); + print_q("knee_point_y", params->knee_point_y,'/'); + print_int("num_bezier_curve_anchors", + params->num_bezier_curve_anchors ); + for (int i = 0; i < params->num_bezier_curve_anchors; i++) { + print_q("bezier_curve_anchors", + params->bezier_curve_anchors[i],'/'); + } + } + if (params->color_saturation_mapping_flag) { + print_q("color_saturation_weight", + params->color_saturation_weight,'/'); + } + } +} + static void print_pkt_side_data(WriterContext *w, AVCodecParameters *par, const AVPacketSideData *side_data, @@ -1928,6 +2030,16 @@ static void print_pkt_side_data(WriterContext *w, AVContentLightMetadata *metadata = (AVContentLightMetadata *)sd->data; print_int("max_content", metadata->MaxCLL); print_int("max_average", metadata->MaxFALL); + } else if (sd->type == AV_PKT_DATA_DOVI_CONF) { + AVDOVIDecoderConfigurationRecord *dovi = (AVDOVIDecoderConfigurationRecord *)sd->data; + print_int("dv_version_major", dovi->dv_version_major); + print_int("dv_version_minor", dovi->dv_version_minor); + print_int("dv_profile", dovi->dv_profile); + print_int("dv_level", dovi->dv_level); + print_int("rpu_present_flag", dovi->rpu_present_flag); + print_int("el_present_flag", dovi->el_present_flag); + print_int("bl_present_flag", dovi->bl_present_flag); + print_int("dv_bl_signal_compatibility_id", dovi->dv_bl_signal_compatibility_id); } writer_print_section_footer(w); } @@ -2214,7 +2326,7 @@ static void show_frame(WriterContext *w, AVFrame *frame, AVStream *stream, writer_print_section_header(w, SECTION_ID_FRAME_SIDE_DATA_TIMECODE_LIST); for (int j = 1; j <= m ; j++) { char tcbuf[AV_TIMECODE_STR_SIZE]; - av_timecode_make_smpte_tc_string(tcbuf, tc[j], 0); + av_timecode_make_smpte_tc_string2(tcbuf, stream->avg_frame_rate, tc[j], 0, 0); writer_print_section_header(w, SECTION_ID_FRAME_SIDE_DATA_TIMECODE); print_str("value", tcbuf); writer_print_section_footer(w); @@ -2239,6 +2351,9 @@ static void show_frame(WriterContext *w, AVFrame *frame, AVStream *stream, print_q("min_luminance", metadata->min_luminance, '/'); print_q("max_luminance", metadata->max_luminance, '/'); } + } else if (sd->type == AV_FRAME_DATA_DYNAMIC_HDR_PLUS) { + AVDynamicHDRPlus *metadata = (AVDynamicHDRPlus *)sd->data; + print_dynamic_hdr10_plus(w, metadata); } else if (sd->type == AV_FRAME_DATA_CONTENT_LIGHT_LEVEL) { AVContentLightMetadata *metadata = (AVContentLightMetadata *)sd->data; print_int("max_content", metadata->MaxCLL); @@ -2539,6 +2654,7 @@ static int show_stream(WriterContext *w, AVFormatContext *fmt_ctx, int stream_id if (dec_ctx) { print_int("coded_width", dec_ctx->coded_width); print_int("coded_height", dec_ctx->coded_height); + print_int("closed_captions", !!(dec_ctx->properties & FF_CODEC_PROPERTY_CLOSED_CAPTIONS)); } #endif print_int("has_b_frames", par->video_delay); @@ -2840,11 +2956,11 @@ static void show_error(WriterContext *w, int err) writer_print_section_footer(w); } -static int open_input_file(InputFile *ifile, const char *filename) +static int open_input_file(InputFile *ifile, const char *filename, const char *print_filename) { int err, i; AVFormatContext *fmt_ctx = NULL; - AVDictionaryEntry *t; + AVDictionaryEntry *t = NULL; int scan_all_pmts_set = 0; fmt_ctx = avformat_alloc_context(); @@ -2862,13 +2978,15 @@ static int open_input_file(InputFile *ifile, const char *filename) print_error(filename, err); return err; } + if (print_filename) { + av_freep(&fmt_ctx->url); + fmt_ctx->url = av_strdup(print_filename); + } ifile->fmt_ctx = fmt_ctx; if (scan_all_pmts_set) av_dict_set(&format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE); - if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) { - av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key); - return AVERROR_OPTION_NOT_FOUND; - } + while ((t = av_dict_get(format_opts, "", t, AV_DICT_IGNORE_SUFFIX))) + av_log(NULL, AV_LOG_WARNING, "Option %s skipped - not known to demuxer.\n", t->key); if (find_stream_info) { AVDictionary **opts = setup_find_stream_info_opts(fmt_ctx, codec_opts); @@ -2938,6 +3056,7 @@ static int open_input_file(InputFile *ifile, const char *filename) ist->dec_ctx->pkt_timebase = stream->time_base; ist->dec_ctx->framerate = stream->avg_frame_rate; #if FF_API_LAVF_AVCTX + ist->dec_ctx->properties = stream->codec->properties; ist->dec_ctx->coded_width = stream->codec->coded_width; ist->dec_ctx->coded_height = stream->codec->coded_height; #endif @@ -2975,7 +3094,8 @@ static void close_input_file(InputFile *ifile) avformat_close_input(&ifile->fmt_ctx); } -static int probe_file(WriterContext *wctx, const char *filename) +static int probe_file(WriterContext *wctx, const char *filename, + const char *print_filename) { InputFile ifile = { 0 }; int ret, i; @@ -2984,7 +3104,7 @@ static int probe_file(WriterContext *wctx, const char *filename) do_read_frames = do_show_frames || do_count_frames; do_read_packets = do_show_packets || do_count_packets; - ret = open_input_file(&ifile, filename); + ret = open_input_file(&ifile, filename, print_filename); if (ret < 0) goto end; @@ -3289,6 +3409,12 @@ static int opt_input_file_i(void *optctx, const char *opt, const char *arg) return 0; } +static int opt_print_filename(void *optctx, const char *opt, const char *arg) +{ + print_input_filename = arg; + return 0; +} + void show_help_default_ffprobe(const char *opt, const char *arg) { show_usage(); @@ -3563,10 +3689,12 @@ void ffprobe_var_cleanup() { read_intervals = NULL; read_intervals_nb = 0; + find_stream_info = 1; ffprobe_options = NULL; input_filename = NULL; + print_input_filename = NULL; iformat = NULL; hash = NULL; @@ -3633,13 +3761,13 @@ int ffprobe_execute(int argc, char **argv) "use sexagesimal format HOURS:MM:SS.MICROSECONDS for time units" }, { "pretty", 0, {.func_arg = opt_pretty}, "prettify the format of displayed values, make it more human readable" }, - { "print_format", OPT_STRING | HAS_ARG, {(void*)&print_format}, + { "print_format", OPT_STRING | HAS_ARG, { &print_format }, "set the output printing format (available formats are: default, compact, csv, flat, ini, json, xml)", "format" }, - { "of", OPT_STRING | HAS_ARG, {(void*)&print_format}, "alias for -print_format", "format" }, - { "select_streams", OPT_STRING | HAS_ARG, {(void*)&stream_specifier}, "select the specified streams", "stream_specifier" }, + { "of", OPT_STRING | HAS_ARG, { &print_format }, "alias for -print_format", "format" }, + { "select_streams", OPT_STRING | HAS_ARG, { &stream_specifier }, "select the specified streams", "stream_specifier" }, { "sections", OPT_EXIT, {.func_arg = opt_sections}, "print sections structure and section information, and exit" }, - { "show_data", OPT_BOOL, {(void*)&do_show_data}, "show packets data" }, - { "show_data_hash", OPT_STRING | HAS_ARG, {(void*)&show_data_hash}, "show packets data hash" }, + { "show_data", OPT_BOOL, { &do_show_data }, "show packets data" }, + { "show_data_hash", OPT_STRING | HAS_ARG, { &show_data_hash }, "show packets data hash" }, { "show_error", 0, { .func_arg = &opt_show_error }, "show probing error" }, { "show_format", 0, { .func_arg = &opt_show_format }, "show format/container info" }, { "show_frames", 0, { .func_arg = &opt_show_frames }, "show frames info" }, @@ -3648,24 +3776,25 @@ int ffprobe_execute(int argc, char **argv) { "show_entries", HAS_ARG, {.func_arg = opt_show_entries}, "show a set of specified entries", "entry_list" }, #if HAVE_THREADS - { "show_log", OPT_INT|HAS_ARG, {(void*)&do_show_log}, "show log" }, + { "show_log", OPT_INT|HAS_ARG, { &do_show_log }, "show log" }, #endif { "show_packets", 0, { .func_arg = &opt_show_packets }, "show packets info" }, { "show_programs", 0, { .func_arg = &opt_show_programs }, "show programs info" }, { "show_streams", 0, { .func_arg = &opt_show_streams }, "show streams info" }, { "show_chapters", 0, { .func_arg = &opt_show_chapters }, "show chapters info" }, - { "count_frames", OPT_BOOL, {(void*)&do_count_frames}, "count the number of frames per stream" }, - { "count_packets", OPT_BOOL, {(void*)&do_count_packets}, "count the number of packets per stream" }, + { "count_frames", OPT_BOOL, { &do_count_frames }, "count the number of frames per stream" }, + { "count_packets", OPT_BOOL, { &do_count_packets }, "count the number of packets per stream" }, { "show_program_version", 0, { .func_arg = &opt_show_program_version }, "show ffprobe version" }, { "show_library_versions", 0, { .func_arg = &opt_show_library_versions }, "show library versions" }, { "show_versions", 0, { .func_arg = &opt_show_versions }, "show program and library versions" }, { "show_pixel_formats", 0, { .func_arg = &opt_show_pixel_formats }, "show pixel format descriptions" }, - { "show_private_data", OPT_BOOL, {(void*)&show_private_data}, "show private data" }, - { "private", OPT_BOOL, {(void*)&show_private_data}, "same as show_private_data" }, + { "show_private_data", OPT_BOOL, { &show_private_data }, "show private data" }, + { "private", OPT_BOOL, { &show_private_data }, "same as show_private_data" }, { "bitexact", OPT_BOOL, {&do_bitexact}, "force bitexact output" }, { "read_intervals", HAS_ARG, {.func_arg = opt_read_intervals}, "set read intervals", "read_intervals" }, { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {.func_arg = opt_default}, "generic catch all option", "" }, { "i", HAS_ARG, {.func_arg = opt_input_file_i}, "read specified file", "input_file"}, + { "print_filename", HAS_ARG, {.func_arg = opt_print_filename}, "override the printed input filename", "print_file"}, { "find_stream_info", OPT_BOOL | OPT_INPUT | OPT_EXPERT, { &find_stream_info }, "read and decode the streams to fill missing information with heuristics" }, { NULL, }, @@ -3800,7 +3929,7 @@ int ffprobe_execute(int argc, char **argv) av_log(NULL, AV_LOG_ERROR, "Use -h to get full help or, even better, run 'man %s'.\n", program_name); ret = AVERROR(EINVAL); } else if (input_filename) { - ret = probe_file(wctx, input_filename); + ret = probe_file(wctx, input_filename, print_input_filename); if (ret < 0 && do_show_error) show_error(wctx, ret); } diff --git a/scripts/android/ffmpeg.sh b/scripts/android/ffmpeg.sh index 155534e..55699fc 100755 --- a/scripts/android/ffmpeg.sh +++ b/scripts/android/ffmpeg.sh @@ -354,6 +354,10 @@ ulimit -n 2048 1>>"${BASEDIR}"/build.log 2>&1 # 1. Use thread local log levels ${SED_INLINE} 's/static int av_log_level/__thread int av_log_level/g' "${BASEDIR}"/src/"${LIB_NAME}"/libavutil/log.c 1>>"${BASEDIR}"/build.log 2>&1 || exit 1 +# 2. Set friendly ffmpeg version +FFMPEG_VERSION="v$(get_user_friendly_ffmpeg_version)" +${SED_INLINE} "s/\$version/$FFMPEG_VERSION/g" "${BASEDIR}"/src/"${LIB_NAME}"/ffbuild/version.sh 1>>"${BASEDIR}"/build.log 2>&1 || exit 1 + ################################################################### ./configure \ diff --git a/scripts/android/fribidi.sh b/scripts/android/fribidi.sh index eac38af..9d5f9bb 100755 --- a/scripts/android/fribidi.sh +++ b/scripts/android/fribidi.sh @@ -20,7 +20,7 @@ fi --host="${HOST}" || return 1 # WORKAROUND TO DISABLE BUILDING OF doc FOLDER (doc depends on c2man which is not available on all platforms) -$SED_INLINE 's/ doc / /g' "${BASEDIR}"/src/"${LIB_NAME}"/Makefile || return 1 +${SED_INLINE} 's/ doc / /g' "${BASEDIR}"/src/"${LIB_NAME}"/Makefile || return 1 make -j$(get_cpu_count) || return 1 diff --git a/scripts/android/libtheora.sh b/scripts/android/libtheora.sh index 60002c3..c84f049 100755 --- a/scripts/android/libtheora.sh +++ b/scripts/android/libtheora.sh @@ -14,7 +14,7 @@ make distclean 2>/dev/null 1>/dev/null if [[ ! -f "${BASEDIR}"/src/"${LIB_NAME}"/configure ]] || [[ ${RECONF_libtheora} -eq 1 ]]; then # WORKAROUND NOT TO RUN CONFIGURE AT THE END OF autogen.sh - $SED_INLINE 's/$srcdir\/configure/#$srcdir\/configure/g' "${BASEDIR}"/src/"${LIB_NAME}"/autogen.sh || return 1 + ${SED_INLINE} 's/$srcdir\/configure/#$srcdir\/configure/g' "${BASEDIR}"/src/"${LIB_NAME}"/autogen.sh || return 1 ./autogen.sh || return 1 fi diff --git a/scripts/android/twolame.sh b/scripts/android/twolame.sh index 7d44d8d..8b6a340 100755 --- a/scripts/android/twolame.sh +++ b/scripts/android/twolame.sh @@ -22,7 +22,7 @@ fi --host="${HOST}" || return 1 # WORKAROUND TO DISABLE BUILDING OF DOCBOOK - BUILD SCRIPTS DO NOT GENERATE A TARGET FOR IT -$SED_INLINE 's/dist_man_MANS = .*/dist_man_MANS =/g' "${BASEDIR}"/src/"${LIB_NAME}"/doc/Makefile || return 1 +${SED_INLINE} 's/dist_man_MANS = .*/dist_man_MANS =/g' "${BASEDIR}"/src/"${LIB_NAME}"/doc/Makefile || return 1 make -j$(get_cpu_count) || return 1 diff --git a/scripts/apple/ffmpeg.sh b/scripts/apple/ffmpeg.sh index 7e5969a..a01d8cb 100755 --- a/scripts/apple/ffmpeg.sh +++ b/scripts/apple/ffmpeg.sh @@ -444,6 +444,10 @@ fi # 3. Use thread local log levels ${SED_INLINE} 's/static int av_log_level/__thread int av_log_level/g' "${BASEDIR}"/src/${LIB_NAME}/libavutil/log.c 1>>"${BASEDIR}"/build.log 2>&1 || exit 1 +# 4. Set friendly ffmpeg version +FFMPEG_VERSION="v$(get_user_friendly_ffmpeg_version)" +${SED_INLINE} "s/\$version/$FFMPEG_VERSION/g" "${BASEDIR}"/src/"${LIB_NAME}"/ffbuild/version.sh 1>>"${BASEDIR}"/build.log 2>&1 || exit 1 + ################################################################### ./configure \ diff --git a/scripts/apple/fribidi.sh b/scripts/apple/fribidi.sh index bf30d90..32840c8 100755 --- a/scripts/apple/fribidi.sh +++ b/scripts/apple/fribidi.sh @@ -20,7 +20,7 @@ fi --host="${HOST}" || return 1 # WORKAROUND TO DISABLE BUILDING OF doc FOLDER (doc depends on c2man which is not available on all platforms) -$SED_INLINE 's/ doc / /g' "${BASEDIR}"/src/"${LIB_NAME}"/Makefile || return 1 +${SED_INLINE} 's/ doc / /g' "${BASEDIR}"/src/"${LIB_NAME}"/Makefile || return 1 make -j$(get_cpu_count) || return 1 diff --git a/scripts/apple/libtheora.sh b/scripts/apple/libtheora.sh index ce8eb3c..84696bb 100755 --- a/scripts/apple/libtheora.sh +++ b/scripts/apple/libtheora.sh @@ -7,7 +7,7 @@ make distclean 2>/dev/null 1>/dev/null if [[ ! -f "${BASEDIR}"/src/"${LIB_NAME}"/configure ]] || [[ ${RECONF_libtheora} -eq 1 ]]; then # WORKAROUND NOT TO RUN CONFIGURE AT THE END OF autogen.sh - $SED_INLINE 's/$srcdir\/configure/#$srcdir\/configure/g' "${BASEDIR}"/src/"${LIB_NAME}"/autogen.sh || return 1 + ${SED_INLINE} 's/$srcdir\/configure/#$srcdir\/configure/g' "${BASEDIR}"/src/"${LIB_NAME}"/autogen.sh || return 1 ./autogen.sh || return 1 fi diff --git a/scripts/apple/twolame.sh b/scripts/apple/twolame.sh index 056f508..ae27b2f 100755 --- a/scripts/apple/twolame.sh +++ b/scripts/apple/twolame.sh @@ -22,7 +22,7 @@ fi --host="${HOST}" || return 1 # WORKAROUND TO DISABLE BUILDING OF DOCBOOK - BUILD SCRIPTS DO NOT GENERATE A TARGET FOR IT -$SED_INLINE 's/dist_man_MANS = .*/dist_man_MANS =/g' "${BASEDIR}"/src/"${LIB_NAME}"/doc/Makefile || return 1 +${SED_INLINE} 's/dist_man_MANS = .*/dist_man_MANS =/g' "${BASEDIR}"/src/"${LIB_NAME}"/doc/Makefile || return 1 make -j$(get_cpu_count) || return 1 diff --git a/scripts/function-apple.sh b/scripts/function-apple.sh old mode 100644 new mode 100755 diff --git a/scripts/function.sh b/scripts/function.sh index 4af7f54..1317f9e 100755 --- a/scripts/function.sh +++ b/scripts/function.sh @@ -246,7 +246,7 @@ from_library_name() { is_library_supported_on_platform() { local library_index=$(from_library_name "$1") case ${library_index} in - 0 | 1 | 2 | 3 | 4 | 5 | 6 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20) + 0 | 1 | 2 | 3 | 4 | 5 | 6 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 17 | 18 | 19 | 20) echo "0" ;; 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 39 | 40) @@ -1616,6 +1616,11 @@ is_gnu_config_files_up_to_date() { echo $(grep aarch64-apple-darwin config.guess | wc -l 2>>"${BASEDIR}"/build.log) } +get_user_friendly_ffmpeg_version() { + local USER_FRIENDLY_NAME=$(get_library_source "ffmpeg" 4) + echo ${USER_FRIENDLY_NAME:1} +} + get_cpu_count() { if [ "$(uname)" == "Darwin" ]; then echo $(sysctl -n hw.logicalcpu) diff --git a/scripts/source.sh b/scripts/source.sh index 52f6b08..3c0ebbb 100755 --- a/scripts/source.sh +++ b/scripts/source.sh @@ -28,8 +28,9 @@ get_library_source() { ;; ffmpeg) SOURCE_REPO_URL="https://github.com/tanersener/FFmpeg" - SOURCE_ID="d222da435e63a2665b85c0305ad2cf8a07b1af6d" # COMMIT -> v4.4-dev-416 + SOURCE_ID="9f38fac053010205806ece11e6aea9b7d3bde041" SOURCE_TYPE="COMMIT" + SOURCE_GIT_DESCRIBE="n4.4-dev-2765-g9f38fac053" # git describe --tags ;; fontconfig) SOURCE_REPO_URL="https://github.com/tanersener/fontconfig" @@ -263,5 +264,8 @@ get_library_source() { 3) echo "${SOURCE_TYPE}" ;; + 4) + echo "${SOURCE_GIT_DESCRIBE}" + ;; esac }