update external libraries

This commit is contained in:
Taner Sener 2021-05-14 23:55:25 +01:00
parent c356b059c7
commit 22d20d7de7
23 changed files with 759 additions and 822 deletions

View File

@ -251,11 +251,9 @@ void show_help_children(const AVClass *class, int flags)
static const OptionDef *find_option(const OptionDef *po, const char *name)
{
const char *p = strchr(name, ':');
int len = p ? p - name : strlen(name);
while (po->name) {
if (!strncmp(name, po->name, len) && strlen(po->name) == len)
const char *end;
if (av_strstart(name, po->name, &end) && (!*end || *end == ':'))
break;
po++;
}
@ -587,9 +585,6 @@ int opt_default(void *optctx, const char *opt, const char *arg)
char opt_stripped[128];
const char *p;
const AVClass *cc = avcodec_get_class(), *fc = avformat_get_class();
#if CONFIG_AVRESAMPLE
const AVClass *rc = avresample_get_class();
#endif
#if CONFIG_SWSCALE
const AVClass *sc = sws_get_class();
#endif
@ -659,13 +654,6 @@ int opt_default(void *optctx, const char *opt, const char *arg)
consumed = 1;
}
#endif
#if CONFIG_AVRESAMPLE
if ((o=opt_find(&rc, opt, NULL, 0,
AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ))) {
av_dict_set(&resample_opts, opt, arg, FLAGS);
consumed = 1;
}
#endif
if (consumed)
return 0;
@ -1208,13 +1196,13 @@ static void print_buildconf(int flags, int level)
// Change all the ' --' strings to '~--' so that
// they can be identified as tokens.
while ((conflist = strstr(str, " --")) != NULL) {
strncpy(conflist, "~--", 3);
conflist[0] = '~';
}
// Compensate for the weirdness this would cause
// when passing 'pkg-config --static'.
while ((remove_tilde = strstr(str, "pkg-config~")) != NULL) {
strncpy(remove_tilde, "pkg-config ", 11);
remove_tilde[sizeof("pkg-config~") - 2] = ' ';
}
splitconf = strtok(str, "~");
@ -1458,7 +1446,7 @@ static void print_codec(const AVCodec *c)
av_log(NULL, AV_LOG_STDERR, "variable ");
if (c->capabilities & (AV_CODEC_CAP_FRAME_THREADS |
AV_CODEC_CAP_SLICE_THREADS |
AV_CODEC_CAP_AUTO_THREADS))
AV_CODEC_CAP_OTHER_THREADS))
av_log(NULL, AV_LOG_STDERR, "threads ");
if (c->capabilities & AV_CODEC_CAP_AVOID_PROBING)
av_log(NULL, AV_LOG_STDERR, "avoidprobe ");
@ -1475,12 +1463,12 @@ static void print_codec(const AVCodec *c)
av_log(NULL, AV_LOG_STDERR, " Threading capabilities: ");
switch (c->capabilities & (AV_CODEC_CAP_FRAME_THREADS |
AV_CODEC_CAP_SLICE_THREADS |
AV_CODEC_CAP_AUTO_THREADS)) {
AV_CODEC_CAP_OTHER_THREADS)) {
case AV_CODEC_CAP_FRAME_THREADS |
AV_CODEC_CAP_SLICE_THREADS: av_log(NULL, AV_LOG_STDERR, "frame and slice"); break;
case AV_CODEC_CAP_FRAME_THREADS: av_log(NULL, AV_LOG_STDERR, "frame"); break;
case AV_CODEC_CAP_SLICE_THREADS: av_log(NULL, AV_LOG_STDERR, "slice"); break;
case AV_CODEC_CAP_AUTO_THREADS : av_log(NULL, AV_LOG_STDERR, "auto"); break;
case AV_CODEC_CAP_OTHER_THREADS : av_log(NULL, AV_LOG_STDERR, "other"); break;
default: av_log(NULL, AV_LOG_STDERR, "none"); break;
}
av_log(NULL, AV_LOG_STDERR, "\n");
@ -2151,7 +2139,7 @@ int check_stream_specifier(AVFormatContext *s, AVStream *st, const char *spec)
}
AVDictionary *filter_codec_opts(AVDictionary *opts, enum AVCodecID codec_id,
AVFormatContext *s, AVStream *st, AVCodec *codec)
AVFormatContext *s, AVStream *st, const AVCodec *codec)
{
AVDictionary *ret = NULL;
AVDictionaryEntry *t = NULL;
@ -2180,6 +2168,7 @@ AVDictionary *filter_codec_opts(AVDictionary *opts, enum AVCodecID codec_id,
}
while ((t = av_dict_get(opts, "", t, AV_DICT_IGNORE_SUFFIX))) {
const AVClass *priv_class;
char *p = strchr(t->key, ':');
/* check stream specification in opt name */
@ -2192,8 +2181,8 @@ AVDictionary *filter_codec_opts(AVDictionary *opts, enum AVCodecID codec_id,
if (av_opt_find(&cc, t->key, NULL, flags, AV_OPT_SEARCH_FAKE_OBJ) ||
!codec ||
(codec->priv_class &&
av_opt_find(&codec->priv_class, t->key, NULL, flags,
((priv_class = codec->priv_class) &&
av_opt_find(&priv_class, t->key, NULL, flags,
AV_OPT_SEARCH_FAKE_OBJ)))
av_dict_set(&ret, t->key, t->value, 0);
else if (t->key[0] == prefix &&
@ -2266,7 +2255,7 @@ double get_rotation(AVStream *st)
}
#if CONFIG_AVDEVICE
static int print_device_sources(AVInputFormat *fmt, AVDictionary *opts)
static int print_device_sources(const AVInputFormat *fmt, AVDictionary *opts)
{
int ret, i;
AVDeviceInfoList *device_list = NULL;
@ -2296,7 +2285,7 @@ static int print_device_sources(AVInputFormat *fmt, AVDictionary *opts)
return ret;
}
static int print_device_sinks(AVOutputFormat *fmt, AVDictionary *opts)
static int print_device_sinks(const AVOutputFormat *fmt, AVDictionary *opts)
{
int ret, i;
AVDeviceInfoList *device_list = NULL;
@ -2350,7 +2339,7 @@ static int show_sinks_sources_parse_arg(const char *arg, char **dev, AVDictionar
int show_sources(void *optctx, const char *opt, const char *arg)
{
AVInputFormat *fmt = NULL;
const AVInputFormat *fmt = NULL;
char *dev = NULL;
AVDictionary *opts = NULL;
int ret = 0;
@ -2388,7 +2377,7 @@ int show_sources(void *optctx, const char *opt, const char *arg)
int show_sinks(void *optctx, const char *opt, const char *arg)
{
AVOutputFormat *fmt = NULL;
const AVOutputFormat *fmt = NULL;
char *dev = NULL;
AVDictionary *opts = NULL;
int ret = 0;

View File

@ -235,7 +235,7 @@ void show_help_options(const OptionDef *options, const char *msg, int req_flags,
void show_help_children(const AVClass *class, int flags);
/**
* Per-fftool specific help handlers. Implemented in each
* Per-fftool specific help handler. Implemented in each
* fftool, called by show_help().
*/
void show_help_default_ffmpeg(const char *opt, const char *arg);
@ -402,7 +402,7 @@ int check_stream_specifier(AVFormatContext *s, AVStream *st, const char *spec);
* @return a pointer to the created dictionary
*/
AVDictionary *filter_codec_opts(AVDictionary *opts, enum AVCodecID codec_id,
AVFormatContext *s, AVStream *st, AVCodec *codec);
AVFormatContext *s, AVStream *st, const AVCodec *codec);
/**
* Setup AVCodecContext options for avformat_find_stream_info().

View File

@ -700,6 +700,7 @@ static void ffmpeg_cleanup(int ret)
av_frame_free(&ost->filtered_frame);
av_frame_free(&ost->last_frame);
av_packet_free(&ost->pkt);
av_dict_free(&ost->encoder_opts);
av_freep(&ost->forced_keyframes);
@ -718,9 +719,9 @@ static void ffmpeg_cleanup(int ret)
if (ost->muxing_queue) {
while (av_fifo_size(ost->muxing_queue)) {
AVPacket pkt;
AVPacket *pkt;
av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
av_packet_unref(&pkt);
av_packet_free(&pkt);
}
av_fifo_freep(&ost->muxing_queue);
}
@ -732,6 +733,7 @@ static void ffmpeg_cleanup(int ret)
#endif
for (i = 0; i < nb_input_files; i++) {
avformat_close_input(&input_files[i]->ctx);
av_packet_free(&input_files[i]->pkt);
av_freep(&input_files[i]);
}
for (i = 0; i < nb_input_streams; i++) {
@ -739,6 +741,7 @@ static void ffmpeg_cleanup(int ret)
av_frame_free(&ist->decoded_frame);
av_frame_free(&ist->filter_frame);
av_packet_free(&ist->pkt);
av_dict_free(&ist->decoder_opts);
avsubtitle_free(&ist->prev_sub.subtitle);
av_frame_free(&ist->sub2video.frame);
@ -798,7 +801,7 @@ void assert_avoptions(AVDictionary *m)
}
}
static void abort_codec_experimental(AVCodec *c, int encoder)
static void abort_codec_experimental(const AVCodec *c, int encoder)
{
exit_program(1);
}
@ -856,7 +859,7 @@ static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int u
}
if (!of->header_written) {
AVPacket tmp_pkt = {0};
AVPacket *tmp_pkt;
/* the muxer is not initialized yet, buffer the packet */
if (!av_fifo_space(ost->muxing_queue)) {
unsigned int are_we_over_size =
@ -879,8 +882,11 @@ static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int u
ret = av_packet_make_refcounted(pkt);
if (ret < 0)
exit_program(1);
av_packet_move_ref(&tmp_pkt, pkt);
ost->muxing_queue_data_size += tmp_pkt.size;
tmp_pkt = av_packet_alloc();
if (!tmp_pkt)
exit_program(1);
av_packet_move_ref(tmp_pkt, pkt);
ost->muxing_queue_data_size += tmp_pkt->size;
av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
return;
}
@ -1106,13 +1112,9 @@ static void do_audio_out(OutputFile *of, OutputStream *ost,
AVFrame *frame)
{
AVCodecContext *enc = ost->enc_ctx;
AVPacket pkt;
AVPacket *pkt = ost->pkt;
int ret;
av_init_packet(&pkt);
pkt.data = NULL;
pkt.size = 0;
adjust_frame_pts_to_encoder_tb(of, ost, frame);
if (!check_recording_time(ost))
@ -1124,7 +1126,6 @@ static void do_audio_out(OutputFile *of, OutputStream *ost,
ost->samples_encoded += frame->nb_samples;
ost->frames_encoded++;
av_assert0(pkt.size || !pkt.data);
update_benchmark(NULL);
if (debug_ts) {
av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
@ -1138,7 +1139,8 @@ static void do_audio_out(OutputFile *of, OutputStream *ost,
goto error;
while (1) {
ret = avcodec_receive_packet(enc, &pkt);
av_packet_unref(pkt);
ret = avcodec_receive_packet(enc, pkt);
if (ret == AVERROR(EAGAIN))
break;
if (ret < 0)
@ -1146,16 +1148,16 @@ static void do_audio_out(OutputFile *of, OutputStream *ost,
update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
av_packet_rescale_ts(pkt, enc->time_base, ost->mux_timebase);
if (debug_ts) {
av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
"pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &enc->time_base),
av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &enc->time_base));
}
output_packet(of, &pkt, ost, 0);
output_packet(of, pkt, ost, 0);
}
return;
@ -1171,7 +1173,7 @@ static void do_subtitle_out(OutputFile *of,
int subtitle_out_max_size = 1024 * 1024;
int subtitle_out_size, nb, i;
AVCodecContext *enc;
AVPacket pkt;
AVPacket *pkt = ost->pkt;
int64_t pts;
if (sub->pts == AV_NOPTS_VALUE) {
@ -1229,21 +1231,21 @@ static void do_subtitle_out(OutputFile *of,
exit_program(1);
}
av_init_packet(&pkt);
pkt.data = subtitle_out;
pkt.size = subtitle_out_size;
pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase);
pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
av_packet_unref(pkt);
pkt->data = subtitle_out;
pkt->size = subtitle_out_size;
pkt->pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase);
pkt->duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
/* XXX: the pts correction is handled here. Maybe handling
it in the codec would be better */
if (i == 0)
pkt.pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
pkt->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
else
pkt.pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
pkt->pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
}
pkt.dts = pkt.pts;
output_packet(of, &pkt, ost, 0);
pkt->dts = pkt->pts;
output_packet(of, pkt, ost, 0);
}
}
@ -1252,7 +1254,7 @@ static void do_video_out(OutputFile *of,
AVFrame *next_picture)
{
int ret, format_video_sync;
AVPacket pkt;
AVPacket *pkt = ost->pkt;
AVCodecContext *enc = ost->enc_ctx;
AVRational frame_rate;
int nb_frames, nb0_frames, i;
@ -1398,9 +1400,6 @@ static void do_video_out(OutputFile *of,
AVFrame *in_picture;
int forced_keyframe = 0;
double pts_time;
av_init_packet(&pkt);
pkt.data = NULL;
pkt.size = 0;
if (i < nb0_frames && ost->last_frame) {
in_picture = ost->last_frame;
@ -1479,7 +1478,8 @@ static void do_video_out(OutputFile *of,
av_frame_remove_side_data(in_picture, AV_FRAME_DATA_A53_CC);
while (1) {
ret = avcodec_receive_packet(enc, &pkt);
av_packet_unref(pkt);
ret = avcodec_receive_packet(enc, pkt);
update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
if (ret == AVERROR(EAGAIN))
break;
@ -1489,24 +1489,24 @@ static void do_video_out(OutputFile *of,
if (debug_ts) {
av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
"pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &enc->time_base),
av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &enc->time_base));
}
if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
pkt.pts = ost->sync_opts;
if (pkt->pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
pkt->pts = ost->sync_opts;
av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
av_packet_rescale_ts(pkt, enc->time_base, ost->mux_timebase);
if (debug_ts) {
av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
"pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->mux_timebase),
av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->mux_timebase));
av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->mux_timebase),
av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->mux_timebase));
}
frame_size = pkt.size;
output_packet(of, &pkt, ost, 0);
frame_size = pkt->size;
output_packet(of, pkt, ost, 0);
/* if two pass, output log */
if (ost->logfile && enc->stats_out) {
@ -1635,6 +1635,9 @@ static int reap_filters(int flush)
if (av_buffersink_get_type(filter) == AVMEDIA_TYPE_AUDIO)
init_output_stream_wrapper(ost, NULL, 1);
if (!ost->pkt && !(ost->pkt = av_packet_alloc())) {
return AVERROR(ENOMEM);
}
if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
return AVERROR(ENOMEM);
}
@ -2143,7 +2146,7 @@ static void flush_encoders(void)
for (;;) {
const char *desc = NULL;
AVPacket pkt;
AVPacket *pkt = ost->pkt;
int pkt_size;
switch (enc->codec_type) {
@ -2157,13 +2160,10 @@ static void flush_encoders(void)
av_assert0(0);
}
av_init_packet(&pkt);
pkt.data = NULL;
pkt.size = 0;
update_benchmark(NULL);
while ((ret = avcodec_receive_packet(enc, &pkt)) == AVERROR(EAGAIN)) {
av_packet_unref(pkt);
while ((ret = avcodec_receive_packet(enc, pkt)) == AVERROR(EAGAIN)) {
ret = avcodec_send_frame(enc, NULL);
if (ret < 0) {
av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
@ -2184,16 +2184,16 @@ static void flush_encoders(void)
fprintf(ost->logfile, "%s", enc->stats_out);
}
if (ret == AVERROR_EOF) {
output_packet(of, &pkt, ost, 1);
output_packet(of, pkt, ost, 1);
break;
}
if (ost->finished & MUXER_FINISHED) {
av_packet_unref(&pkt);
av_packet_unref(pkt);
continue;
}
av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
pkt_size = pkt.size;
output_packet(of, &pkt, ost, 0);
av_packet_rescale_ts(pkt, enc->time_base, ost->mux_timebase);
pkt_size = pkt->size;
output_packet(of, pkt, ost, 0);
if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
do_video_stats(ost, pkt_size);
}
@ -2227,14 +2227,12 @@ static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *p
InputFile *f = input_files [ist->file_index];
int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
AVPacket opkt;
AVPacket *opkt = ost->pkt;
av_packet_unref(opkt);
// EOF: flush output bitstream filters.
if (!pkt) {
av_init_packet(&opkt);
opkt.data = NULL;
opkt.size = 0;
output_packet(of, &opkt, ost, 1);
output_packet(of, opkt, ost, 1);
return;
}
@ -2272,30 +2270,30 @@ static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *p
if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
ost->sync_opts++;
if (av_packet_ref(&opkt, pkt) < 0)
if (av_packet_ref(opkt, pkt) < 0)
exit_program(1);
if (pkt->pts != AV_NOPTS_VALUE)
opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
opkt->pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
if (pkt->dts == AV_NOPTS_VALUE) {
opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase);
opkt->dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase);
} else if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
if(!duration)
duration = ist->dec_ctx->frame_size;
opkt.dts = av_rescale_delta(ist->st->time_base, pkt->dts,
opkt->dts = av_rescale_delta(ist->st->time_base, pkt->dts,
(AVRational){1, ist->dec_ctx->sample_rate}, duration,
&ist->filter_in_rescale_delta_last, ost->mux_timebase);
/* dts will be set immediately afterwards to what pts is now */
opkt.pts = opkt.dts - ost_tb_start_time;
opkt->pts = opkt->dts - ost_tb_start_time;
} else
opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
opkt.dts -= ost_tb_start_time;
opkt->dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
opkt->dts -= ost_tb_start_time;
opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
opkt->duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
output_packet(of, &opkt, ost, 0);
output_packet(of, opkt, ost, 0);
}
int guess_input_channel_layout(InputStream *ist)
@ -2574,7 +2572,6 @@ static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_
int i, ret = 0, err = 0;
int64_t best_effort_timestamp;
int64_t dts = AV_NOPTS_VALUE;
AVPacket avpkt;
// With fate-indeo3-2, we're getting 0-sized packets before EOF for some
// reason. This seems like a semi-critical bug. Don't trigger EOF, and
@ -2590,8 +2587,7 @@ static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_
if (ist->dts != AV_NOPTS_VALUE)
dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
if (pkt) {
avpkt = *pkt;
avpkt.dts = dts; // ffmpeg.c probably shouldn't do this
pkt->dts = dts; // ffmpeg.c probably shouldn't do this
}
// The old code used to set dts on the drain packet, which does not work
@ -2605,7 +2601,7 @@ static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_
}
update_benchmark(NULL);
ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL);
ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt);
update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
if (ret < 0)
*decode_failed = 1;
@ -2764,6 +2760,8 @@ static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output,
for (i = 0; i < nb_output_streams; i++) {
OutputStream *ost = output_streams[i];
if (!ost->pkt && !(ost->pkt = av_packet_alloc()))
exit_program(1);
if (!check_output_constraints(ist, ost) || !ost->encoding_needed
|| ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
continue;
@ -2799,7 +2797,12 @@ static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eo
int repeating = 0;
int eof_reached = 0;
AVPacket avpkt;
AVPacket *avpkt;
if (!ist->pkt && !(ist->pkt = av_packet_alloc()))
return AVERROR(ENOMEM);
avpkt = ist->pkt;
if (!ist->saw_first_ts) {
ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
ist->pts = 0;
@ -2815,13 +2818,11 @@ static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eo
if (ist->next_pts == AV_NOPTS_VALUE)
ist->next_pts = ist->pts;
if (!pkt) {
/* EOF handling */
av_init_packet(&avpkt);
avpkt.data = NULL;
avpkt.size = 0;
} else {
avpkt = *pkt;
if (pkt) {
av_packet_unref(avpkt);
ret = av_packet_ref(avpkt, pkt);
if (ret < 0)
return ret;
}
if (pkt && pkt->dts != AV_NOPTS_VALUE) {
@ -2842,11 +2843,12 @@ static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eo
switch (ist->dec_ctx->codec_type) {
case AVMEDIA_TYPE_AUDIO:
ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output,
ret = decode_audio (ist, repeating ? NULL : avpkt, &got_output,
&decode_failed);
av_packet_unref(avpkt);
break;
case AVMEDIA_TYPE_VIDEO:
ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, &duration_pts, !pkt,
ret = decode_video (ist, repeating ? NULL : avpkt, &got_output, &duration_pts, !pkt,
&decode_failed);
if (!repeating || !pkt || got_output) {
if (pkt && pkt->duration) {
@ -2871,13 +2873,15 @@ static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eo
ist->next_pts += duration_dts;
}
}
av_packet_unref(avpkt);
break;
case AVMEDIA_TYPE_SUBTITLE:
if (repeating)
break;
ret = transcode_subtitles(ist, &avpkt, &got_output, &decode_failed);
ret = transcode_subtitles(ist, avpkt, &got_output, &decode_failed);
if (!pkt && ret >= 0)
ret = AVERROR_EOF;
av_packet_unref(avpkt);
break;
default:
return -1;
@ -2966,6 +2970,8 @@ static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eo
for (i = 0; i < nb_output_streams; i++) {
OutputStream *ost = output_streams[i];
if (!ost->pkt && !(ost->pkt = av_packet_alloc()))
exit_program(1);
if (!check_output_constraints(ist, ost) || ost->encoding_needed)
continue;
@ -3122,7 +3128,7 @@ static int init_input_stream(int ist_index, char *error, int error_len)
InputStream *ist = input_streams[ist_index];
if (ist->decoding_needed) {
AVCodec *codec = ist->dec;
const AVCodec *codec = ist->dec;
if (!codec) {
snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
@ -3136,7 +3142,6 @@ static int init_input_stream(int ist_index, char *error, int error_len)
ist->dec_ctx->thread_safe_callbacks = 1;
#endif
av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
(ist->decoding_needed & DECODING_FOR_OST)) {
av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
@ -3234,10 +3239,11 @@ static int check_init_output_file(OutputFile *of, int file_index)
ost->mux_timebase = ost->st->time_base;
while (av_fifo_size(ost->muxing_queue)) {
AVPacket pkt;
AVPacket *pkt;
av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
ost->muxing_queue_data_size -= pkt.size;
write_packet(of, &pkt, ost, 1);
ost->muxing_queue_data_size -= pkt->size;
write_packet(of, pkt, ost, 1);
av_packet_free(&pkt);
}
}
@ -3318,15 +3324,23 @@ static int init_output_stream_streamcopy(OutputStream *ost)
if (!ost->frame_rate.num)
ost->frame_rate = ist->framerate;
if (ost->frame_rate.num)
ost->st->avg_frame_rate = ost->frame_rate;
else
ost->st->avg_frame_rate = ist->st->avg_frame_rate;
ret = avformat_transfer_internal_stream_timing_info(of->ctx->oformat, ost->st, ist->st, copy_tb);
if (ret < 0)
return ret;
// copy timebase while removing common factors
if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0) {
if (ost->frame_rate.num)
ost->st->time_base = av_inv_q(ost->frame_rate);
else
ost->st->time_base = av_add_q(av_stream_get_codec_timebase(ost->st), (AVRational){0, 1});
}
// copy estimated duration as a hint to the muxer
if (ost->st->duration <= 0 && ist->st->duration > 0)
@ -3702,7 +3716,7 @@ static int init_output_stream(OutputStream *ost, AVFrame *frame, char *error, in
int ret = 0;
if (ost->encoding_needed) {
AVCodec *codec = ost->enc;
const AVCodec *codec = ost->enc;
AVCodecContext *dec = NULL;
InputStream *ist;
@ -4123,7 +4137,7 @@ static OutputStream *choose_output(void)
ost->st->index, ost->st->id, ost->initialized, ost->inputs_done, ost->finished);
if (!ost->initialized && !ost->inputs_done)
return ost;
return ost->unavailable ? NULL : ost;
if (!ost->finished && opts < opts_min) {
opts_min = opts;
@ -4261,12 +4275,12 @@ static int check_keyboard_interaction(int64_t cur_time)
static void *input_thread(void *arg)
{
InputFile *f = arg;
AVPacket *pkt = f->pkt, *queue_pkt;
unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
int ret = 0;
while (1) {
AVPacket pkt;
ret = av_read_frame(f->ctx, &pkt);
ret = av_read_frame(f->ctx, pkt);
if (ret == AVERROR(EAGAIN)) {
av_usleep(10000);
@ -4276,10 +4290,17 @@ static void *input_thread(void *arg)
av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
break;
}
ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
queue_pkt = av_packet_alloc();
if (!queue_pkt) {
av_packet_unref(pkt);
av_thread_message_queue_set_err_recv(f->in_thread_queue, AVERROR(ENOMEM));
break;
}
av_packet_move_ref(queue_pkt, pkt);
ret = av_thread_message_queue_send(f->in_thread_queue, &queue_pkt, flags);
if (flags && ret == AVERROR(EAGAIN)) {
flags = 0;
ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
ret = av_thread_message_queue_send(f->in_thread_queue, &queue_pkt, flags);
av_log(f->ctx, AV_LOG_WARNING,
"Thread message queue blocking; consider raising the "
"thread_queue_size option (current value: %d)\n",
@ -4290,7 +4311,7 @@ static void *input_thread(void *arg)
av_log(f->ctx, AV_LOG_ERROR,
"Unable to send packet to main thread: %s\n",
av_err2str(ret));
av_packet_unref(&pkt);
av_packet_free(&queue_pkt);
av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
break;
}
@ -4302,13 +4323,13 @@ static void *input_thread(void *arg)
static void free_input_thread(int i)
{
InputFile *f = input_files[i];
AVPacket pkt;
AVPacket *pkt;
if (!f || !f->in_thread_queue)
return;
av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
av_packet_unref(&pkt);
av_packet_free(&pkt);
pthread_join(f->thread, NULL);
f->joined = 1;
@ -4337,7 +4358,7 @@ static int init_input_thread(int i)
strcmp(f->ctx->iformat->name, "lavfi"))
f->non_blocking = 1;
ret = av_thread_message_queue_alloc(&f->in_thread_queue,
f->thread_queue_size, sizeof(AVPacket));
f->thread_queue_size, sizeof(f->pkt));
if (ret < 0)
return ret;
@ -4362,7 +4383,7 @@ static int init_input_threads(void)
return 0;
}
static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
static int get_input_packet_mt(InputFile *f, AVPacket **pkt)
{
return av_thread_message_queue_recv(f->in_thread_queue, pkt,
f->non_blocking ?
@ -4370,7 +4391,7 @@ static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
}
#endif
static int get_input_packet(InputFile *f, AVPacket *pkt)
static int get_input_packet(InputFile *f, AVPacket **pkt)
{
if (f->rate_emu) {
int i;
@ -4387,7 +4408,8 @@ static int get_input_packet(InputFile *f, AVPacket *pkt)
if (f->thread_queue_size)
return get_input_packet_mt(f, pkt);
#endif
return av_read_frame(f->ctx, pkt);
*pkt = f->pkt;
return av_read_frame(f->ctx, *pkt);
}
static int got_eagain(void)
@ -4499,7 +4521,7 @@ static int process_input(int file_index)
InputFile *ifile = input_files[file_index];
AVFormatContext *is;
InputStream *ist;
AVPacket pkt;
AVPacket *pkt;
int ret, thread_ret, i, j;
int64_t duration;
int64_t pkt_dts;
@ -4574,27 +4596,27 @@ static int process_input(int file_index)
reset_eagain();
if (do_pkt_dump) {
av_pkt_dump_log2(NULL, AV_LOG_INFO, &pkt, do_hex_dump,
is->streams[pkt.stream_index]);
av_pkt_dump_log2(NULL, AV_LOG_INFO, pkt, do_hex_dump,
is->streams[pkt->stream_index]);
}
/* the following test is needed in case new streams appear
dynamically in stream : we ignore them */
if (pkt.stream_index >= ifile->nb_streams) {
report_new_stream(file_index, &pkt);
if (pkt->stream_index >= ifile->nb_streams) {
report_new_stream(file_index, pkt);
goto discard_packet;
}
ist = input_streams[ifile->ist_index + pkt.stream_index];
ist = input_streams[ifile->ist_index + pkt->stream_index];
ist->data_size += pkt.size;
ist->data_size += pkt->size;
ist->nb_packets++;
if (ist->discard)
goto discard_packet;
if (pkt.flags & AV_PKT_FLAG_CORRUPT) {
if (pkt->flags & AV_PKT_FLAG_CORRUPT) {
av_log(NULL, exit_on_error ? AV_LOG_FATAL : AV_LOG_WARNING,
"%s: corrupt input packet in stream %d\n", is->url, pkt.stream_index);
"%s: corrupt input packet in stream %d\n", is->url, pkt->stream_index);
if (exit_on_error)
exit_program(1);
}
@ -4602,11 +4624,11 @@ static int process_input(int file_index)
if (debug_ts) {
av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
"next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
ifile->ist_index + pkt->stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ist->st->time_base),
av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ist->st->time_base),
av_ts2str(input_files[ist->file_index]->ts_offset),
av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
}
@ -4636,12 +4658,12 @@ static int process_input(int file_index)
stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
ist->wrap_correction_done = 1;
if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
if(stime2 > stime && pkt->dts != AV_NOPTS_VALUE && pkt->dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
pkt->dts -= 1ULL<<ist->st->pts_wrap_bits;
ist->wrap_correction_done = 0;
}
if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
if(stime2 > stime && pkt->pts != AV_NOPTS_VALUE && pkt->pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
pkt->pts -= 1ULL<<ist->st->pts_wrap_bits;
ist->wrap_correction_done = 0;
}
}
@ -4655,10 +4677,10 @@ static int process_input(int file_index)
if (src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
continue;
if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
if (av_packet_get_side_data(pkt, src_sd->type, NULL))
continue;
dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
dst_data = av_packet_new_side_data(pkt, src_sd->type, src_sd->size);
if (!dst_data)
exit_program(1);
@ -4666,17 +4688,17 @@ static int process_input(int file_index)
}
}
if (pkt.dts != AV_NOPTS_VALUE)
pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
if (pkt.pts != AV_NOPTS_VALUE)
pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
if (pkt->dts != AV_NOPTS_VALUE)
pkt->dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
if (pkt->pts != AV_NOPTS_VALUE)
pkt->pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
if (pkt.pts != AV_NOPTS_VALUE)
pkt.pts *= ist->ts_scale;
if (pkt.dts != AV_NOPTS_VALUE)
pkt.dts *= ist->ts_scale;
if (pkt->pts != AV_NOPTS_VALUE)
pkt->pts *= ist->ts_scale;
if (pkt->dts != AV_NOPTS_VALUE)
pkt->dts *= ist->ts_scale;
pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
pkt_dts = av_rescale_q_rnd(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
@ -4688,27 +4710,27 @@ static int process_input(int file_index)
av_log(NULL, AV_LOG_DEBUG,
"Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
delta, ifile->ts_offset);
pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
if (pkt.pts != AV_NOPTS_VALUE)
pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
pkt->dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
if (pkt->pts != AV_NOPTS_VALUE)
pkt->pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
}
}
duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
if (pkt.pts != AV_NOPTS_VALUE) {
pkt.pts += duration;
ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
if (pkt->pts != AV_NOPTS_VALUE) {
pkt->pts += duration;
ist->max_pts = FFMAX(pkt->pts, ist->max_pts);
ist->min_pts = FFMIN(pkt->pts, ist->min_pts);
}
if (pkt.dts != AV_NOPTS_VALUE)
pkt.dts += duration;
if (pkt->dts != AV_NOPTS_VALUE)
pkt->dts += duration;
pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
pkt_dts = av_rescale_q_rnd(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
if (copy_ts && pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
(is->iformat->flags & AVFMT_TS_DISCONT) && ist->st->pts_wrap_bits < 60) {
int64_t wrap_dts = av_rescale_q_rnd(pkt.dts + (1LL<<ist->st->pts_wrap_bits),
int64_t wrap_dts = av_rescale_q_rnd(pkt->dts + (1LL<<ist->st->pts_wrap_bits),
ist->st->time_base, AV_TIME_BASE_Q,
AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
if (FFABS(wrap_dts - ist->next_dts) < FFABS(pkt_dts - ist->next_dts)/10)
@ -4731,46 +4753,51 @@ static int process_input(int file_index)
ist->file_index, ist->st->index, ist->st->id,
av_get_media_type_string(ist->dec_ctx->codec_type),
delta, ifile->ts_offset);
pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
if (pkt.pts != AV_NOPTS_VALUE)
pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
pkt->dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
if (pkt->pts != AV_NOPTS_VALUE)
pkt->pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
}
} else {
if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
pkt.dts = AV_NOPTS_VALUE;
av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt->dts, ist->next_dts, pkt->stream_index);
pkt->dts = AV_NOPTS_VALUE;
}
if (pkt.pts != AV_NOPTS_VALUE){
int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
if (pkt->pts != AV_NOPTS_VALUE){
int64_t pkt_pts = av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
delta = pkt_pts - ist->next_dts;
if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
pkt.pts = AV_NOPTS_VALUE;
av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt->pts, ist->next_dts, pkt->stream_index);
pkt->pts = AV_NOPTS_VALUE;
}
}
}
}
if (pkt.dts != AV_NOPTS_VALUE)
ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
if (pkt->dts != AV_NOPTS_VALUE)
ifile->last_ts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
if (debug_ts) {
av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
ifile->ist_index + pkt->stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ist->st->time_base),
av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ist->st->time_base),
av_ts2str(input_files[ist->file_index]->ts_offset),
av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
}
sub2video_heartbeat(ist, pkt.pts);
sub2video_heartbeat(ist, pkt->pts);
process_input_packet(ist, &pkt, 0);
process_input_packet(ist, pkt, 0);
discard_packet:
av_packet_unref(&pkt);
#if HAVE_THREADS
if (ifile->thread_queue_size)
av_packet_free(&pkt);
else
#endif
av_packet_unref(pkt);
return 0;
}

View File

@ -330,9 +330,10 @@ typedef struct InputStream {
#define DECODING_FOR_FILTER 2
AVCodecContext *dec_ctx;
AVCodec *dec;
const AVCodec *dec;
AVFrame *decoded_frame;
AVFrame *filter_frame; /* a ref of decoded_frame, to be sent to filters */
AVPacket *pkt;
int64_t start; /* time when read started */
/* predicted dts of the next packet read for this stream or (when there are
@ -441,6 +442,8 @@ typedef struct InputFile {
int rate_emu;
int accurate_seek;
AVPacket *pkt;
#if HAVE_THREADS
AVThreadMessageQueue *in_thread_queue;
pthread_t thread; /* thread reading from this file */
@ -493,10 +496,11 @@ typedef struct OutputStream {
AVCodecContext *enc_ctx;
AVCodecParameters *ref_par; /* associated input codec parameters with encoders options applied */
AVCodec *enc;
const AVCodec *enc;
int64_t max_frames;
AVFrame *filtered_frame;
AVFrame *last_frame;
AVPacket *pkt;
int last_dropped;
int last_nb0_frames[3];
@ -757,8 +761,8 @@ void init_options(OptionsContext *o);
AVDictionary *strip_specifiers(AVDictionary *dict);
void parse_meta_type(char *arg, char *type, int *index, const char **stream_spec);
int fftools_copy_metadata(char *outspec, char *inspec, AVFormatContext *oc, AVFormatContext *ic, OptionsContext *o);
AVCodec *find_codec_or_die(const char *name, enum AVMediaType type, int encoder);
AVCodec *choose_decoder(OptionsContext *o, AVFormatContext *s, AVStream *st);
const AVCodec *find_codec_or_die(const char *name, enum AVMediaType type, int encoder);
const AVCodec *choose_decoder(OptionsContext *o, AVFormatContext *s, AVStream *st);
int open_input_file(OptionsContext *o, const char *filename);
int get_preset_file_2(const char *preset_name, const char *codec_name, AVIOContext **s);
int choose_encoder(OptionsContext *o, AVFormatContext *s, OutputStream *ost);

View File

@ -47,28 +47,23 @@
#include "libavutil/imgutils.h"
#include "libavutil/samplefmt.h"
static const enum AVPixelFormat *get_compliance_unofficial_pix_fmts(enum AVCodecID codec_id, const enum AVPixelFormat default_formats[])
// FIXME: YUV420P etc. are actually supported with full color range,
// yet the latter information isn't available here.
static const enum AVPixelFormat *get_compliance_normal_pix_fmts(const AVCodec *codec, const enum AVPixelFormat default_formats[])
{
static const enum AVPixelFormat mjpeg_formats[] =
{ AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ444P,
AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV444P,
AV_PIX_FMT_NONE };
static const enum AVPixelFormat ljpeg_formats[] =
{ AV_PIX_FMT_BGR24 , AV_PIX_FMT_BGRA , AV_PIX_FMT_BGR0,
AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ422P,
AV_PIX_FMT_YUV420P , AV_PIX_FMT_YUV444P , AV_PIX_FMT_YUV422P,
AV_PIX_FMT_NONE};
if (codec_id == AV_CODEC_ID_MJPEG) {
if (!strcmp(codec->name, "mjpeg")) {
return mjpeg_formats;
} else if (codec_id == AV_CODEC_ID_LJPEG) {
return ljpeg_formats;
} else {
return default_formats;
}
}
enum AVPixelFormat choose_pixel_fmt(AVStream *st, AVCodecContext *enc_ctx, const AVCodec *codec, enum AVPixelFormat target)
enum AVPixelFormat choose_pixel_fmt(AVStream *st, AVCodecContext *enc_ctx,
const AVCodec *codec, enum AVPixelFormat target)
{
if (codec && codec->pix_fmts) {
const enum AVPixelFormat *p = codec->pix_fmts;
@ -77,11 +72,11 @@ enum AVPixelFormat choose_pixel_fmt(AVStream *st, AVCodecContext *enc_ctx, const
int has_alpha = desc ? desc->nb_components % 2 == 0 : 0;
enum AVPixelFormat best= AV_PIX_FMT_NONE;
if (enc_ctx->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL) {
p = get_compliance_unofficial_pix_fmts(enc_ctx->codec_id, p);
if (enc_ctx->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL) {
p = get_compliance_normal_pix_fmts(codec, p);
}
for (; *p != AV_PIX_FMT_NONE; p++) {
best= avcodec_find_best_pix_fmt_of_2(best, *p, target, has_alpha, NULL);
best = av_find_best_pix_fmt_of_2(best, *p, target, has_alpha, NULL);
if (*p == target)
break;
}
@ -98,29 +93,6 @@ enum AVPixelFormat choose_pixel_fmt(AVStream *st, AVCodecContext *enc_ctx, const
return target;
}
void choose_sample_fmt(AVStream *st, const AVCodec *codec)
{
if (codec && codec->sample_fmts) {
const enum AVSampleFormat *p = codec->sample_fmts;
for (; *p != -1; p++) {
if (*p == st->codecpar->format)
break;
}
if (*p == -1) {
const AVCodecDescriptor *desc = avcodec_descriptor_get(codec->id);
if(desc && (desc->props & AV_CODEC_PROP_LOSSLESS) && av_get_sample_fmt_name(st->codecpar->format) > av_get_sample_fmt_name(codec->sample_fmts[0]))
av_log(NULL, AV_LOG_ERROR, "Conversion will not be lossless.\n");
if(av_get_sample_fmt_name(st->codecpar->format))
av_log(NULL, AV_LOG_WARNING,
"Incompatible sample format '%s' for codec '%s', auto-selecting format '%s'\n",
av_get_sample_fmt_name(st->codecpar->format),
codec->name,
av_get_sample_fmt_name(codec->sample_fmts[0]));
st->codecpar->format = codec->sample_fmts[0];
}
}
}
static char *choose_pix_fmts(OutputFilter *ofilter)
{
OutputStream *ost = ofilter->ost;
@ -148,8 +120,8 @@ static char *choose_pix_fmts(OutputFilter *ofilter)
exit_program(1);
p = ost->enc->pix_fmts;
if (ost->enc_ctx->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL) {
p = get_compliance_unofficial_pix_fmts(ost->enc_ctx->codec_id, p);
if (ost->enc_ctx->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL) {
p = get_compliance_normal_pix_fmts(ost->enc, p);
}
for (; *p != AV_PIX_FMT_NONE; p++) {
@ -163,45 +135,39 @@ static char *choose_pix_fmts(OutputFilter *ofilter)
return NULL;
}
/* Define a function for building a string containing a list of
* allowed formats. */
#define DEF_CHOOSE_FORMAT(suffix, type, var, supported_list, none, get_name) \
static char *choose_ ## suffix (OutputFilter *ofilter) \
/* Define a function for appending a list of allowed formats
* to an AVBPrint. If nonempty, the list will have a header. */
#define DEF_CHOOSE_FORMAT(name, type, var, supported_list, none, printf_format, get_name) \
static void choose_ ## name (OutputFilter *ofilter, AVBPrint *bprint) \
{ \
if (ofilter->var == none && !ofilter->supported_list) \
return; \
av_bprintf(bprint, #name "="); \
if (ofilter->var != none) { \
get_name(ofilter->var); \
return av_strdup(name); \
} else if (ofilter->supported_list) { \
av_bprintf(bprint, printf_format, get_name(ofilter->var)); \
} else { \
const type *p; \
AVIOContext *s = NULL; \
uint8_t *ret; \
int len; \
\
if (avio_open_dyn_buf(&s) < 0) \
exit_program(1); \
\
for (p = ofilter->supported_list; *p != none; p++) { \
get_name(*p); \
avio_printf(s, "%s|", name); \
av_bprintf(bprint, printf_format "|", get_name(*p)); \
} \
len = avio_close_dyn_buf(s, &ret); \
ret[len - 1] = 0; \
return ret; \
} else \
return NULL; \
if (bprint->len > 0) \
bprint->str[--bprint->len] = '\0'; \
} \
av_bprint_chars(bprint, ':', 1); \
}
//DEF_CHOOSE_FORMAT(pix_fmts, enum AVPixelFormat, format, formats, AV_PIX_FMT_NONE,
// GET_PIX_FMT_NAME)
DEF_CHOOSE_FORMAT(sample_fmts, enum AVSampleFormat, format, formats,
AV_SAMPLE_FMT_NONE, GET_SAMPLE_FMT_NAME)
AV_SAMPLE_FMT_NONE, "%s", av_get_sample_fmt_name)
DEF_CHOOSE_FORMAT(sample_rates, int, sample_rate, sample_rates, 0,
GET_SAMPLE_RATE_NAME)
"%d", )
DEF_CHOOSE_FORMAT(channel_layouts, uint64_t, channel_layout, channel_layouts, 0,
GET_CH_LAYOUT_NAME)
"0x%"PRIx64, )
int init_simple_filtergraph(InputStream *ist, OutputStream *ost)
{
@ -505,8 +471,7 @@ static int configure_output_video_filter(FilterGraph *fg, OutputFilter *ofilter,
if ((pix_fmts = choose_pix_fmts(ofilter))) {
AVFilterContext *filter;
snprintf(name, sizeof(name), "format_out_%d_%d",
ost->file_index, ost->index);
ret = avfilter_graph_create_filter(&filter,
avfilter_get_by_name("format"),
"format", pix_fmts, NULL, fg->graph);
@ -561,7 +526,7 @@ static int configure_output_audio_filter(FilterGraph *fg, OutputFilter *ofilter,
AVCodecContext *codec = ost->enc_ctx;
AVFilterContext *last_filter = out->filter_ctx;
int pad_idx = out->pad_idx;
char *sample_fmts, *sample_rates, *channel_layouts;
AVBPrint args;
char name[255];
int ret;
@ -584,72 +549,58 @@ static int configure_output_audio_filter(FilterGraph *fg, OutputFilter *ofilter,
avfilter_get_by_name(filter_name), \
filter_name, arg, NULL, fg->graph); \
if (ret < 0) \
return ret; \
goto fail; \
\
ret = avfilter_link(last_filter, pad_idx, filt_ctx, 0); \
if (ret < 0) \
return ret; \
goto fail; \
\
last_filter = filt_ctx; \
pad_idx = 0; \
} while (0)
av_bprint_init(&args, 0, AV_BPRINT_SIZE_UNLIMITED);
if (ost->audio_channels_mapped) {
int i;
AVBPrint pan_buf;
av_bprint_init(&pan_buf, 256, 8192);
av_bprintf(&pan_buf, "0x%"PRIx64,
av_bprintf(&args, "0x%"PRIx64,
av_get_default_channel_layout(ost->audio_channels_mapped));
for (i = 0; i < ost->audio_channels_mapped; i++)
if (ost->audio_channels_map[i] != -1)
av_bprintf(&pan_buf, "|c%d=c%d", i, ost->audio_channels_map[i]);
av_bprintf(&args, "|c%d=c%d", i, ost->audio_channels_map[i]);
AUTO_INSERT_FILTER("-map_channel", "pan", pan_buf.str);
av_bprint_finalize(&pan_buf, NULL);
AUTO_INSERT_FILTER("-map_channel", "pan", args.str);
av_bprint_clear(&args);
}
if (codec->channels && !codec->channel_layout)
codec->channel_layout = av_get_default_channel_layout(codec->channels);
sample_fmts = choose_sample_fmts(ofilter);
sample_rates = choose_sample_rates(ofilter);
channel_layouts = choose_channel_layouts(ofilter);
if (sample_fmts || sample_rates || channel_layouts) {
choose_sample_fmts(ofilter, &args);
choose_sample_rates(ofilter, &args);
choose_channel_layouts(ofilter, &args);
if (!av_bprint_is_complete(&args)) {
ret = AVERROR(ENOMEM);
goto fail;
}
if (args.len) {
AVFilterContext *format;
char args[256];
args[0] = 0;
if (sample_fmts)
av_strlcatf(args, sizeof(args), "sample_fmts=%s:",
sample_fmts);
if (sample_rates)
av_strlcatf(args, sizeof(args), "sample_rates=%s:",
sample_rates);
if (channel_layouts)
av_strlcatf(args, sizeof(args), "channel_layouts=%s:",
channel_layouts);
av_freep(&sample_fmts);
av_freep(&sample_rates);
av_freep(&channel_layouts);
snprintf(name, sizeof(name), "format_out_%d_%d",
ost->file_index, ost->index);
ret = avfilter_graph_create_filter(&format,
avfilter_get_by_name("aformat"),
name, args, NULL, fg->graph);
name, args.str, NULL, fg->graph);
if (ret < 0)
return ret;
goto fail;
ret = avfilter_link(last_filter, pad_idx, format, 0);
if (ret < 0)
return ret;
goto fail;
last_filter = format;
pad_idx = 0;
}
if (ost->apad && of->shortest) {
char args[256];
int i;
for (i=0; i<of->ctx->nb_streams; i++)
@ -657,8 +608,7 @@ static int configure_output_audio_filter(FilterGraph *fg, OutputFilter *ofilter,
break;
if (i<of->ctx->nb_streams) {
snprintf(args, sizeof(args), "%s", ost->apad);
AUTO_INSERT_FILTER("-apad", "apad", args);
AUTO_INSERT_FILTER("-apad", "apad", ost->apad);
}
}
@ -667,15 +617,18 @@ static int configure_output_audio_filter(FilterGraph *fg, OutputFilter *ofilter,
ret = insert_trim(of->start_time, of->recording_time,
&last_filter, &pad_idx, name);
if (ret < 0)
return ret;
goto fail;
if ((ret = avfilter_link(last_filter, pad_idx, ofilter->filter, 0)) < 0)
return ret;
goto fail;
fail:
av_bprint_finalize(&args, NULL);
return 0;
return ret;
}
int configure_output_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOut *out)
int configure_output_filter(FilterGraph *fg, OutputFilter *ofilter,
AVFilterInOut *out)
{
if (!ofilter->ost) {
av_log(NULL, AV_LOG_FATAL, "Filter %s has an unconnected output\n", ofilter->name);

View File

@ -757,11 +757,11 @@ int opt_recording_timestamp(void *optctx, const char *opt, const char *arg)
return 0;
}
AVCodec *find_codec_or_die(const char *name, enum AVMediaType type, int encoder)
const AVCodec *find_codec_or_die(const char *name, enum AVMediaType type, int encoder)
{
const AVCodecDescriptor *desc;
const char *codec_string = encoder ? "encoder" : "decoder";
AVCodec *codec;
const AVCodec *codec;
codec = encoder ?
avcodec_find_encoder_by_name(name) :
@ -786,13 +786,13 @@ AVCodec *find_codec_or_die(const char *name, enum AVMediaType type, int encoder)
return codec;
}
AVCodec *choose_decoder(OptionsContext *o, AVFormatContext *s, AVStream *st)
const AVCodec *choose_decoder(OptionsContext *o, AVFormatContext *s, AVStream *st)
{
char *codec_name = NULL;
MATCH_PER_STREAM_OPT(codec_names, str, codec_name, s, st);
if (codec_name) {
AVCodec *codec = find_codec_or_die(codec_name, st->codecpar->codec_type, 0);
const AVCodec *codec = find_codec_or_die(codec_name, st->codecpar->codec_type, 0);
st->codecpar->codec_id = codec->id;
return codec;
} else
@ -1088,7 +1088,7 @@ int open_input_file(OptionsContext *o, const char *filename)
{
InputFile *f;
AVFormatContext *ic;
AVInputFormat *file_iformat = NULL;
const AVInputFormat *file_iformat = NULL;
int err, i, ret;
int64_t timestamp;
AVDictionary *unused_opts = NULL;
@ -1137,20 +1137,22 @@ int open_input_file(OptionsContext *o, const char *filename)
av_dict_set_int(&o->g->format_opts, "sample_rate", o->audio_sample_rate[o->nb_audio_sample_rate - 1].u.i, 0);
}
if (o->nb_audio_channels) {
const AVClass *priv_class;
/* because we set audio_channels based on both the "ac" and
* "channel_layout" options, we need to check that the specified
* demuxer actually has the "channels" option before setting it */
if (file_iformat && file_iformat->priv_class &&
av_opt_find(&file_iformat->priv_class, "channels", NULL, 0,
if (file_iformat && (priv_class = file_iformat->priv_class) &&
av_opt_find(&priv_class, "channels", NULL, 0,
AV_OPT_SEARCH_FAKE_OBJ)) {
av_dict_set_int(&o->g->format_opts, "channels", o->audio_channels[o->nb_audio_channels - 1].u.i, 0);
}
}
if (o->nb_frame_rates) {
const AVClass *priv_class;
/* set the format-level framerate option;
* this is important for video grabbers, e.g. x11 */
if (file_iformat && file_iformat->priv_class &&
av_opt_find(&file_iformat->priv_class, "framerate", NULL, 0,
if (file_iformat && (priv_class = file_iformat->priv_class) &&
av_opt_find(&priv_class, "framerate", NULL, 0,
AV_OPT_SEARCH_FAKE_OBJ)) {
av_dict_set(&o->g->format_opts, "framerate",
o->frame_rates[o->nb_frame_rates - 1].u.str, 0);
@ -1300,6 +1302,9 @@ int open_input_file(OptionsContext *o, const char *filename)
f->loop = o->loop;
f->duration = 0;
f->time_base = (AVRational){ 1, 1 };
f->pkt = av_packet_alloc();
if (!f->pkt)
exit_program(1);
#if HAVE_THREADS
f->thread_queue_size = o->thread_queue_size;
#endif
@ -1591,7 +1596,7 @@ OutputStream *new_output_stream(OptionsContext *o, AVFormatContext *oc, enum AVM
ost->max_muxing_queue_size = 128;
MATCH_PER_STREAM_OPT(max_muxing_queue_size, i, ost->max_muxing_queue_size, oc, st);
ost->max_muxing_queue_size *= sizeof(AVPacket);
ost->max_muxing_queue_size *= sizeof(ost->pkt);
ost->muxing_queue_data_size = 0;
@ -2287,7 +2292,8 @@ int open_output_file(OptionsContext *o, const char *filename)
for (i = 0; i < nb_input_streams; i++) {
int score;
ist = input_streams[i];
score = ist->st->codecpar->channels + 100000000*!!ist->st->codec_info_nb_frames
score = ist->st->codecpar->channels
+ 100000000 * !!(ist->st->event_flags & AVSTREAM_EVENT_FLAG_NEW_PACKETS)
+ 5000000*!!(ist->st->disposition & AV_DISPOSITION_DEFAULT);
if (ist->user_set_discard == AVDISCARD_ALL)
continue;
@ -2457,19 +2463,6 @@ loop_end:
avio_closep(&pb);
}
#if FF_API_LAVF_AVCTX
for (i = nb_output_streams - oc->nb_streams; i < nb_output_streams; i++) { //for all streams of this output file
AVDictionaryEntry *e;
ost = output_streams[i];
if ((ost->stream_copy || ost->attachment_filename)
&& (e = av_dict_get(o->g->codec_opts, "flags", NULL, AV_DICT_IGNORE_SUFFIX))
&& (!e->key[5] || check_stream_specifier(oc, ost->st, e->key+6)))
if (av_opt_set(ost->st->codec, "flags", e->value, 0) < 0)
exit_program(1);
}
#endif
if (!oc->nb_streams && !(oc->oformat->flags & AVFMT_NOSTREAMS)) {
av_dump_format(oc, nb_output_files - 1, oc->url, 1);
av_log(NULL, AV_LOG_ERROR, "Output file #%d does not contain any stream\n", nb_output_files - 1);

View File

@ -122,6 +122,11 @@ __thread int use_byte_value_binary_prefix = 0;
__thread int use_value_sexagesimal_format = 0;
__thread int show_private_data = 1;
#define SHOW_OPTIONAL_FIELDS_AUTO -1
#define SHOW_OPTIONAL_FIELDS_NEVER 0
#define SHOW_OPTIONAL_FIELDS_ALWAYS 1
__thread int show_optional_fields = SHOW_OPTIONAL_FIELDS_AUTO;
__thread char *print_format;
__thread char *stream_specifier;
__thread char *show_data_hash;
@ -260,7 +265,7 @@ __thread OptionDef *ffprobe_options = NULL;
/* FFprobe context */
__thread const char *input_filename;
__thread const char *print_input_filename;
__thread AVInputFormat *iformat = NULL;
__thread const AVInputFormat *iformat = NULL;
__thread struct AVHashContext *hash;
@ -751,8 +756,10 @@ static inline int writer_print_string(WriterContext *wctx,
const struct section *section = wctx->section[wctx->level];
int ret = 0;
if ((flags & PRINT_STRING_OPT)
&& !(wctx->writer->flags & WRITER_FLAG_DISPLAY_OPTIONAL_FIELDS))
if (show_optional_fields == SHOW_OPTIONAL_FIELDS_NEVER ||
(show_optional_fields == SHOW_OPTIONAL_FIELDS_AUTO
&& (flags & PRINT_STRING_OPT)
&& !(wctx->writer->flags & WRITER_FLAG_DISPLAY_OPTIONAL_FIELDS)))
return 0;
if (section->show_all_entries || av_dict_get(section->entries_to_show, key, NULL, 0)) {
@ -1666,36 +1673,11 @@ static av_cold int xml_init(WriterContext *wctx)
CHECK_COMPLIANCE(show_private_data, "private");
CHECK_COMPLIANCE(show_value_unit, "unit");
CHECK_COMPLIANCE(use_value_prefix, "prefix");
if (do_show_frames && do_show_packets) {
av_log(wctx, AV_LOG_ERROR,
"Interleaved frames and packets are not allowed in XSD. "
"Select only one between the -show_frames and the -show_packets options.\n");
return AVERROR(EINVAL);
}
}
return 0;
}
static const char *xml_escape_str(AVBPrint *dst, const char *src, void *log_ctx)
{
const char *p;
for (p = src; *p; p++) {
switch (*p) {
case '&' : av_bprintf(dst, "%s", "&amp;"); break;
case '<' : av_bprintf(dst, "%s", "&lt;"); break;
case '>' : av_bprintf(dst, "%s", "&gt;"); break;
case '"' : av_bprintf(dst, "%s", "&quot;"); break;
case '\'': av_bprintf(dst, "%s", "&apos;"); break;
default: av_bprint_chars(dst, *p, 1);
}
}
return dst->str;
}
#define XML_INDENT() av_log(NULL, AV_LOG_STDERR, "%*c", xml->indent_level * 4, ' ')
static void xml_print_section_header(WriterContext *wctx)
@ -1767,14 +1749,22 @@ static void xml_print_str(WriterContext *wctx, const char *key, const char *valu
if (section->flags & SECTION_FLAG_HAS_VARIABLE_FIELDS) {
XML_INDENT();
av_bprint_escape(&buf, key, NULL,
AV_ESCAPE_MODE_XML, AV_ESCAPE_FLAG_XML_DOUBLE_QUOTES);
av_log(NULL, AV_LOG_STDERR, "<%s key=\"%s\"",
section->element_name, xml_escape_str(&buf, key, wctx));
section->element_name, buf.str);
av_bprint_clear(&buf);
av_log(NULL, AV_LOG_STDERR, " value=\"%s\"/>\n", xml_escape_str(&buf, value, wctx));
av_bprint_escape(&buf, value, NULL,
AV_ESCAPE_MODE_XML, AV_ESCAPE_FLAG_XML_DOUBLE_QUOTES);
av_log(NULL, AV_LOG_STDERR, " value=\"%s\"/>\n", buf.str);
} else {
if (wctx->nb_item[wctx->level])
av_log(NULL, AV_LOG_STDERR, " ");
av_log(NULL, AV_LOG_STDERR, "%s=\"%s\"", key, xml_escape_str(&buf, value, wctx));
av_bprint_escape(&buf, value, NULL,
AV_ESCAPE_MODE_XML, AV_ESCAPE_FLAG_XML_DOUBLE_QUOTES);
av_log(NULL, AV_LOG_STDERR, "%s=\"%s\"", key, buf.str);
}
av_bprint_finalize(&buf, NULL);
@ -2040,6 +2030,23 @@ static void print_pkt_side_data(WriterContext *w,
print_int("el_present_flag", dovi->el_present_flag);
print_int("bl_present_flag", dovi->bl_present_flag);
print_int("dv_bl_signal_compatibility_id", dovi->dv_bl_signal_compatibility_id);
} else if (sd->type == AV_PKT_DATA_AUDIO_SERVICE_TYPE) {
enum AVAudioServiceType *t = (enum AVAudioServiceType *)sd->data;
print_int("type", *t);
} else if (sd->type == AV_PKT_DATA_MPEGTS_STREAM_ID) {
print_int("id", *sd->data);
} else if (sd->type == AV_PKT_DATA_CPB_PROPERTIES) {
const AVCPBProperties *prop = (AVCPBProperties *)sd->data;
print_int("max_bitrate", prop->max_bitrate);
print_int("min_bitrate", prop->min_bitrate);
print_int("avg_bitrate", prop->avg_bitrate);
print_int("buffer_size", prop->buffer_size);
print_int("vbv_delay", prop->vbv_delay);
} else if (sd->type == AV_PKT_DATA_WEBVTT_IDENTIFIER ||
sd->type == AV_PKT_DATA_WEBVTT_SETTINGS) {
if (do_show_data)
writer_print_data(w, "data", sd->data, sd->size);
writer_print_data_hash(w, "data_hash", sd->data, sd->size);
}
writer_print_section_footer(w);
}
@ -2169,8 +2176,6 @@ static void show_packet(WriterContext *w, InputFile *ifile, AVPacket *pkt, int p
print_time("dts_time", pkt->dts, &st->time_base);
print_duration_ts("duration", pkt->duration);
print_duration_time("duration_time", pkt->duration, &st->time_base);
print_duration_ts("convergence_duration", pkt->convergence_duration);
print_duration_time("convergence_duration_time", pkt->convergence_duration, &st->time_base);
print_val("size", pkt->size, unit_byte_str);
if (pkt->pos != -1) print_fmt ("pos", "%"PRId64, pkt->pos);
else print_str_opt("pos", "N/A");
@ -2178,7 +2183,7 @@ static void show_packet(WriterContext *w, InputFile *ifile, AVPacket *pkt, int p
pkt->flags & AV_PKT_FLAG_DISCARD ? 'D' : '_');
if (pkt->side_data_elems) {
int size;
size_t size;
const uint8_t *side_metadata;
side_metadata = av_packet_get_side_data(pkt, AV_PKT_DATA_STRINGS_METADATA, &size);
@ -2469,14 +2474,12 @@ static int read_interval_packets(WriterContext *w, InputFile *ifile,
const ReadInterval *interval, int64_t *cur_ts)
{
AVFormatContext *fmt_ctx = ifile->fmt_ctx;
AVPacket pkt;
AVPacket *pkt = NULL;
AVFrame *frame = NULL;
int ret = 0, i = 0, frame_count = 0;
int64_t start = -INT64_MAX, end = interval->end;
int has_start = 0, has_end = interval->has_end && !interval->end_is_offset;
av_init_packet(&pkt);
av_log(NULL, AV_LOG_VERBOSE, "Processing read interval ");
log_read_interval(interval, NULL, AV_LOG_VERBOSE);
@ -2509,18 +2512,23 @@ static int read_interval_packets(WriterContext *w, InputFile *ifile,
ret = AVERROR(ENOMEM);
goto end;
}
while (!av_read_frame(fmt_ctx, &pkt)) {
pkt = av_packet_alloc();
if (!pkt) {
ret = AVERROR(ENOMEM);
goto end;
}
while (!av_read_frame(fmt_ctx, pkt)) {
if (fmt_ctx->nb_streams > nb_streams) {
REALLOCZ_ARRAY_STREAM(nb_streams_frames, nb_streams, fmt_ctx->nb_streams);
REALLOCZ_ARRAY_STREAM(nb_streams_packets, nb_streams, fmt_ctx->nb_streams);
REALLOCZ_ARRAY_STREAM(selected_streams, nb_streams, fmt_ctx->nb_streams);
nb_streams = fmt_ctx->nb_streams;
}
if (selected_streams[pkt.stream_index]) {
AVRational tb = ifile->streams[pkt.stream_index].st->time_base;
if (selected_streams[pkt->stream_index]) {
AVRational tb = ifile->streams[pkt->stream_index].st->time_base;
if (pkt.pts != AV_NOPTS_VALUE)
*cur_ts = av_rescale_q(pkt.pts, tb, AV_TIME_BASE_Q);
if (pkt->pts != AV_NOPTS_VALUE)
*cur_ts = av_rescale_q(pkt->pts, tb, AV_TIME_BASE_Q);
if (!has_start && *cur_ts != AV_NOPTS_VALUE) {
start = *cur_ts;
@ -2542,26 +2550,27 @@ static int read_interval_packets(WriterContext *w, InputFile *ifile,
frame_count++;
if (do_read_packets) {
if (do_show_packets)
show_packet(w, ifile, &pkt, i++);
nb_streams_packets[pkt.stream_index]++;
show_packet(w, ifile, pkt, i++);
nb_streams_packets[pkt->stream_index]++;
}
if (do_read_frames) {
int packet_new = 1;
while (process_frame(w, ifile, frame, &pkt, &packet_new) > 0);
while (process_frame(w, ifile, frame, pkt, &packet_new) > 0);
}
}
av_packet_unref(&pkt);
av_packet_unref(pkt);
}
av_packet_unref(&pkt);
av_packet_unref(pkt);
//Flush remaining frames that are cached in the decoder
for (i = 0; i < fmt_ctx->nb_streams; i++) {
pkt.stream_index = i;
pkt->stream_index = i;
if (do_read_frames)
while (process_frame(w, ifile, frame, &pkt, &(int){1}) > 0);
while (process_frame(w, ifile, frame, pkt, &(int){1}) > 0);
}
end:
av_frame_free(&frame);
av_packet_free(&pkt);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Could not read packets in interval ");
log_read_interval(interval, NULL, AV_LOG_ERROR);
@ -2637,10 +2646,6 @@ static int show_stream(WriterContext *w, AVFormatContext *fmt_ctx, int stream_id
s = av_get_media_type_string(par->codec_type);
if (s) print_str ("codec_type", s);
else print_str_opt("codec_type", "unknown");
#if FF_API_LAVF_AVCTX
if (dec_ctx)
print_q("codec_time_base", dec_ctx->time_base, '/');
#endif
/* print AVI/FourCC tag */
print_str("codec_tag_string", av_fourcc2str(par->codec_tag));
@ -2650,13 +2655,11 @@ static int show_stream(WriterContext *w, AVFormatContext *fmt_ctx, int stream_id
case AVMEDIA_TYPE_VIDEO:
print_int("width", par->width);
print_int("height", par->height);
#if FF_API_LAVF_AVCTX
if (dec_ctx) {
print_int("coded_width", dec_ctx->coded_width);
print_int("coded_height", dec_ctx->coded_height);
print_int("closed_captions", !!(dec_ctx->properties & FF_CODEC_PROPERTY_CLOSED_CAPTIONS));
}
#endif
print_int("has_b_frames", par->video_delay);
sar = av_guess_sample_aspect_ratio(fmt_ctx, stream, NULL);
if (sar.num) {
@ -2694,15 +2697,6 @@ static int show_stream(WriterContext *w, AVFormatContext *fmt_ctx, int stream_id
else
print_str_opt("field_order", "unknown");
#if FF_API_PRIVATE_OPT
if (dec_ctx && dec_ctx->timecode_frame_start >= 0) {
char tcbuf[AV_TIMECODE_STR_SIZE];
av_timecode_make_mpeg_tc_string(tcbuf, dec_ctx->timecode_frame_start);
print_str("timecode", tcbuf);
} else {
print_str_opt("timecode", "N/A");
}
#endif
if (dec_ctx)
print_int("refs", dec_ctx->refs);
break;
@ -2741,7 +2735,7 @@ static int show_stream(WriterContext *w, AVFormatContext *fmt_ctx, int stream_id
const AVOption *opt = NULL;
while ((opt = av_opt_next(dec_ctx->priv_data,opt))) {
uint8_t *str;
if (opt->flags) continue;
if (!(opt->flags & AV_OPT_FLAG_EXPORT)) continue;
if (av_opt_get(dec_ctx->priv_data, opt->name, 0, &str) >= 0) {
print_str(opt->name, str);
av_free(str);
@ -2760,10 +2754,10 @@ static int show_stream(WriterContext *w, AVFormatContext *fmt_ctx, int stream_id
print_time("duration", stream->duration, &stream->time_base);
if (par->bit_rate > 0) print_val ("bit_rate", par->bit_rate, unit_bit_per_second_str);
else print_str_opt("bit_rate", "N/A");
#if FF_API_LAVF_AVCTX
if (stream->codec->rc_max_rate > 0) print_val ("max_bit_rate", stream->codec->rc_max_rate, unit_bit_per_second_str);
else print_str_opt("max_bit_rate", "N/A");
#endif
if (dec_ctx && dec_ctx->rc_max_rate > 0)
print_val ("max_bit_rate", dec_ctx->rc_max_rate, unit_bit_per_second_str);
else
print_str_opt("max_bit_rate", "N/A");
if (dec_ctx && dec_ctx->bits_per_raw_sample > 0) print_fmt("bits_per_raw_sample", "%d", dec_ctx->bits_per_raw_sample);
else print_str_opt("bits_per_raw_sample", "N/A");
if (stream->nb_frames) print_fmt ("nb_frames", "%"PRId64, stream->nb_frames);
@ -2775,8 +2769,11 @@ static int show_stream(WriterContext *w, AVFormatContext *fmt_ctx, int stream_id
if (do_show_data)
writer_print_data(w, "extradata", par->extradata,
par->extradata_size);
if (par->extradata_size > 0) {
writer_print_data_hash(w, "extradata_hash", par->extradata,
par->extradata_size);
}
/* Print disposition information */
#define PRINT_DISPOSITION(flagname, name) do { \
@ -2797,6 +2794,11 @@ static int show_stream(WriterContext *w, AVFormatContext *fmt_ctx, int stream_id
PRINT_DISPOSITION(CLEAN_EFFECTS, "clean_effects");
PRINT_DISPOSITION(ATTACHED_PIC, "attached_pic");
PRINT_DISPOSITION(TIMED_THUMBNAILS, "timed_thumbnails");
PRINT_DISPOSITION(CAPTIONS, "captions");
PRINT_DISPOSITION(DESCRIPTIONS, "descriptions");
PRINT_DISPOSITION(METADATA, "metadata");
PRINT_DISPOSITION(DEPENDENT, "dependent");
PRINT_DISPOSITION(STILL_IMAGE, "still_image");
writer_print_section_footer(w);
}
@ -3016,7 +3018,7 @@ static int open_input_file(InputFile *ifile, const char *filename, const char *p
for (i = 0; i < fmt_ctx->nb_streams; i++) {
InputStream *ist = &ifile->streams[i];
AVStream *stream = fmt_ctx->streams[i];
AVCodec *codec;
const AVCodec *codec;
ist->st = stream;
@ -3054,12 +3056,6 @@ static int open_input_file(InputFile *ifile, const char *filename, const char *p
}
ist->dec_ctx->pkt_timebase = stream->time_base;
ist->dec_ctx->framerate = stream->avg_frame_rate;
#if FF_API_LAVF_AVCTX
ist->dec_ctx->properties = stream->codec->properties;
ist->dec_ctx->coded_width = stream->codec->coded_width;
ist->dec_ctx->coded_height = stream->codec->coded_height;
#endif
if (avcodec_open2(ist->dec_ctx, codec, &opts) < 0) {
av_log(NULL, AV_LOG_WARNING, "Could not open codec for input stream %d\n",
@ -3259,9 +3255,6 @@ static void ffprobe_show_pixel_formats(WriterContext *w)
PRINT_PIX_FMT_FLAG(HWACCEL, "hwaccel");
PRINT_PIX_FMT_FLAG(PLANAR, "planar");
PRINT_PIX_FMT_FLAG(RGB, "rgb");
#if FF_API_PSEUDOPAL
PRINT_PIX_FMT_FLAG(PSEUDOPAL, "pseudopal");
#endif
PRINT_PIX_FMT_FLAG(ALPHA, "alpha");
writer_print_section_footer(w);
}
@ -3280,6 +3273,17 @@ static void ffprobe_show_pixel_formats(WriterContext *w)
writer_print_section_footer(w);
}
static int opt_show_optional_fields(void *optctx, const char *opt, const char *arg)
{
if (!av_strcasecmp(arg, "always")) show_optional_fields = SHOW_OPTIONAL_FIELDS_ALWAYS;
else if (!av_strcasecmp(arg, "never")) show_optional_fields = SHOW_OPTIONAL_FIELDS_NEVER;
else if (!av_strcasecmp(arg, "auto")) show_optional_fields = SHOW_OPTIONAL_FIELDS_AUTO;
if (show_optional_fields == SHOW_OPTIONAL_FIELDS_AUTO && av_strcasecmp(arg, "auto"))
show_optional_fields = parse_number_or_die("show_optional_fields", arg, OPT_INT, SHOW_OPTIONAL_FIELDS_AUTO, SHOW_OPTIONAL_FIELDS_ALWAYS);
return 0;
}
static int opt_format(void *optctx, const char *opt, const char *arg)
{
iformat = av_find_input_format(arg);
@ -3788,6 +3792,7 @@ int ffprobe_execute(int argc, char **argv)
{ "show_library_versions", 0, { .func_arg = &opt_show_library_versions }, "show library versions" },
{ "show_versions", 0, { .func_arg = &opt_show_versions }, "show program and library versions" },
{ "show_pixel_formats", 0, { .func_arg = &opt_show_pixel_formats }, "show pixel format descriptions" },
{ "show_optional_fields", HAS_ARG, { .func_arg = &opt_show_optional_fields }, "show optional fields" },
{ "show_private_data", OPT_BOOL, { &show_private_data }, "show private data" },
{ "private", OPT_BOOL, { &show_private_data }, "same as show_private_data" },
{ "bitexact", OPT_BOOL, {&do_bitexact}, "force bitexact output" },

View File

@ -248,11 +248,9 @@ void show_help_children(const AVClass *class, int flags)
static const OptionDef *find_option(const OptionDef *po, const char *name)
{
const char *p = strchr(name, ':');
int len = p ? p - name : strlen(name);
while (po->name) {
if (!strncmp(name, po->name, len) && strlen(po->name) == len)
const char *end;
if (av_strstart(name, po->name, &end) && (!*end || *end == ':'))
break;
po++;
}
@ -584,9 +582,6 @@ int opt_default(void *optctx, const char *opt, const char *arg)
char opt_stripped[128];
const char *p;
const AVClass *cc = avcodec_get_class(), *fc = avformat_get_class();
#if CONFIG_AVRESAMPLE
const AVClass *rc = avresample_get_class();
#endif
#if CONFIG_SWSCALE
const AVClass *sc = sws_get_class();
#endif
@ -656,13 +651,6 @@ int opt_default(void *optctx, const char *opt, const char *arg)
consumed = 1;
}
#endif
#if CONFIG_AVRESAMPLE
if ((o=opt_find(&rc, opt, NULL, 0,
AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ))) {
av_dict_set(&resample_opts, opt, arg, FLAGS);
consumed = 1;
}
#endif
if (consumed)
return 0;
@ -1205,13 +1193,13 @@ static void print_buildconf(int flags, int level)
// Change all the ' --' strings to '~--' so that
// they can be identified as tokens.
while ((conflist = strstr(str, " --")) != NULL) {
strncpy(conflist, "~--", 3);
conflist[0] = '~';
}
// Compensate for the weirdness this would cause
// when passing 'pkg-config --static'.
while ((remove_tilde = strstr(str, "pkg-config~")) != NULL) {
strncpy(remove_tilde, "pkg-config ", 11);
remove_tilde[sizeof("pkg-config~") - 2] = ' ';
}
splitconf = strtok(str, "~");
@ -1455,7 +1443,7 @@ static void print_codec(const AVCodec *c)
av_log(NULL, AV_LOG_STDERR, "variable ");
if (c->capabilities & (AV_CODEC_CAP_FRAME_THREADS |
AV_CODEC_CAP_SLICE_THREADS |
AV_CODEC_CAP_AUTO_THREADS))
AV_CODEC_CAP_OTHER_THREADS))
av_log(NULL, AV_LOG_STDERR, "threads ");
if (c->capabilities & AV_CODEC_CAP_AVOID_PROBING)
av_log(NULL, AV_LOG_STDERR, "avoidprobe ");
@ -1472,12 +1460,12 @@ static void print_codec(const AVCodec *c)
av_log(NULL, AV_LOG_STDERR, " Threading capabilities: ");
switch (c->capabilities & (AV_CODEC_CAP_FRAME_THREADS |
AV_CODEC_CAP_SLICE_THREADS |
AV_CODEC_CAP_AUTO_THREADS)) {
AV_CODEC_CAP_OTHER_THREADS)) {
case AV_CODEC_CAP_FRAME_THREADS |
AV_CODEC_CAP_SLICE_THREADS: av_log(NULL, AV_LOG_STDERR, "frame and slice"); break;
case AV_CODEC_CAP_FRAME_THREADS: av_log(NULL, AV_LOG_STDERR, "frame"); break;
case AV_CODEC_CAP_SLICE_THREADS: av_log(NULL, AV_LOG_STDERR, "slice"); break;
case AV_CODEC_CAP_AUTO_THREADS : av_log(NULL, AV_LOG_STDERR, "auto"); break;
case AV_CODEC_CAP_OTHER_THREADS : av_log(NULL, AV_LOG_STDERR, "other"); break;
default: av_log(NULL, AV_LOG_STDERR, "none"); break;
}
av_log(NULL, AV_LOG_STDERR, "\n");
@ -2148,7 +2136,7 @@ int check_stream_specifier(AVFormatContext *s, AVStream *st, const char *spec)
}
AVDictionary *filter_codec_opts(AVDictionary *opts, enum AVCodecID codec_id,
AVFormatContext *s, AVStream *st, AVCodec *codec)
AVFormatContext *s, AVStream *st, const AVCodec *codec)
{
AVDictionary *ret = NULL;
AVDictionaryEntry *t = NULL;
@ -2177,6 +2165,7 @@ AVDictionary *filter_codec_opts(AVDictionary *opts, enum AVCodecID codec_id,
}
while ((t = av_dict_get(opts, "", t, AV_DICT_IGNORE_SUFFIX))) {
const AVClass *priv_class;
char *p = strchr(t->key, ':');
/* check stream specification in opt name */
@ -2189,8 +2178,8 @@ AVDictionary *filter_codec_opts(AVDictionary *opts, enum AVCodecID codec_id,
if (av_opt_find(&cc, t->key, NULL, flags, AV_OPT_SEARCH_FAKE_OBJ) ||
!codec ||
(codec->priv_class &&
av_opt_find(&codec->priv_class, t->key, NULL, flags,
((priv_class = codec->priv_class) &&
av_opt_find(&priv_class, t->key, NULL, flags,
AV_OPT_SEARCH_FAKE_OBJ)))
av_dict_set(&ret, t->key, t->value, 0);
else if (t->key[0] == prefix &&
@ -2263,7 +2252,7 @@ double get_rotation(AVStream *st)
}
#if CONFIG_AVDEVICE
static int print_device_sources(AVInputFormat *fmt, AVDictionary *opts)
static int print_device_sources(const AVInputFormat *fmt, AVDictionary *opts)
{
int ret, i;
AVDeviceInfoList *device_list = NULL;
@ -2293,7 +2282,7 @@ static int print_device_sources(AVInputFormat *fmt, AVDictionary *opts)
return ret;
}
static int print_device_sinks(AVOutputFormat *fmt, AVDictionary *opts)
static int print_device_sinks(const AVOutputFormat *fmt, AVDictionary *opts)
{
int ret, i;
AVDeviceInfoList *device_list = NULL;
@ -2347,7 +2336,7 @@ static int show_sinks_sources_parse_arg(const char *arg, char **dev, AVDictionar
int show_sources(void *optctx, const char *opt, const char *arg)
{
AVInputFormat *fmt = NULL;
const AVInputFormat *fmt = NULL;
char *dev = NULL;
AVDictionary *opts = NULL;
int ret = 0;
@ -2385,7 +2374,7 @@ int show_sources(void *optctx, const char *opt, const char *arg)
int show_sinks(void *optctx, const char *opt, const char *arg)
{
AVOutputFormat *fmt = NULL;
const AVOutputFormat *fmt = NULL;
char *dev = NULL;
AVDictionary *opts = NULL;
int ret = 0;

View File

@ -235,7 +235,7 @@ void show_help_options(const OptionDef *options, const char *msg, int req_flags,
void show_help_children(const AVClass *class, int flags);
/**
* Per-fftool specific help handlers. Implemented in each
* Per-fftool specific help handler. Implemented in each
* fftool, called by show_help().
*/
void show_help_default_ffmpeg(const char *opt, const char *arg);
@ -402,7 +402,7 @@ int check_stream_specifier(AVFormatContext *s, AVStream *st, const char *spec);
* @return a pointer to the created dictionary
*/
AVDictionary *filter_codec_opts(AVDictionary *opts, enum AVCodecID codec_id,
AVFormatContext *s, AVStream *st, AVCodec *codec);
AVFormatContext *s, AVStream *st, const AVCodec *codec);
/**
* Setup AVCodecContext options for avformat_find_stream_info().

View File

@ -257,9 +257,9 @@ extern volatile int handleSIGTERM;
extern volatile int handleSIGXCPU;
extern volatile int handleSIGPIPE;
extern __thread volatile long _sessionId;
extern void cancelSession(long sessionId);
extern int cancelRequested(long sessionId);
extern __thread volatile long sessionId;
extern void cancelSession(long id);
extern int cancelRequested(long id);
/* sub2video hack:
Convert subtitles to video with alpha to insert them in filter graphs.
@ -697,6 +697,7 @@ static void ffmpeg_cleanup(int ret)
av_frame_free(&ost->filtered_frame);
av_frame_free(&ost->last_frame);
av_packet_free(&ost->pkt);
av_dict_free(&ost->encoder_opts);
av_freep(&ost->forced_keyframes);
@ -715,9 +716,9 @@ static void ffmpeg_cleanup(int ret)
if (ost->muxing_queue) {
while (av_fifo_size(ost->muxing_queue)) {
AVPacket pkt;
AVPacket *pkt;
av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
av_packet_unref(&pkt);
av_packet_free(&pkt);
}
av_fifo_freep(&ost->muxing_queue);
}
@ -729,6 +730,7 @@ static void ffmpeg_cleanup(int ret)
#endif
for (i = 0; i < nb_input_files; i++) {
avformat_close_input(&input_files[i]->ctx);
av_packet_free(&input_files[i]->pkt);
av_freep(&input_files[i]);
}
for (i = 0; i < nb_input_streams; i++) {
@ -736,6 +738,7 @@ static void ffmpeg_cleanup(int ret)
av_frame_free(&ist->decoded_frame);
av_frame_free(&ist->filter_frame);
av_packet_free(&ist->pkt);
av_dict_free(&ist->decoder_opts);
avsubtitle_free(&ist->prev_sub.subtitle);
av_frame_free(&ist->sub2video.frame);
@ -768,7 +771,7 @@ static void ffmpeg_cleanup(int ret)
if (received_sigterm) {
av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
(int) received_sigterm);
} else if (cancelRequested(_sessionId)) {
} else if (cancelRequested(sessionId)) {
av_log(NULL, AV_LOG_INFO, "Exiting normally, received cancel request.\n");
} else if (ret && atomic_load(&transcode_init_done)) {
av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
@ -795,7 +798,7 @@ void assert_avoptions(AVDictionary *m)
}
}
static void abort_codec_experimental(AVCodec *c, int encoder)
static void abort_codec_experimental(const AVCodec *c, int encoder)
{
exit_program(1);
}
@ -853,7 +856,7 @@ static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int u
}
if (!of->header_written) {
AVPacket tmp_pkt = {0};
AVPacket *tmp_pkt;
/* the muxer is not initialized yet, buffer the packet */
if (!av_fifo_space(ost->muxing_queue)) {
unsigned int are_we_over_size =
@ -876,8 +879,11 @@ static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int u
ret = av_packet_make_refcounted(pkt);
if (ret < 0)
exit_program(1);
av_packet_move_ref(&tmp_pkt, pkt);
ost->muxing_queue_data_size += tmp_pkt.size;
tmp_pkt = av_packet_alloc();
if (!tmp_pkt)
exit_program(1);
av_packet_move_ref(tmp_pkt, pkt);
ost->muxing_queue_data_size += tmp_pkt->size;
av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
return;
}
@ -1103,13 +1109,9 @@ static void do_audio_out(OutputFile *of, OutputStream *ost,
AVFrame *frame)
{
AVCodecContext *enc = ost->enc_ctx;
AVPacket pkt;
AVPacket *pkt = ost->pkt;
int ret;
av_init_packet(&pkt);
pkt.data = NULL;
pkt.size = 0;
adjust_frame_pts_to_encoder_tb(of, ost, frame);
if (!check_recording_time(ost))
@ -1121,7 +1123,6 @@ static void do_audio_out(OutputFile *of, OutputStream *ost,
ost->samples_encoded += frame->nb_samples;
ost->frames_encoded++;
av_assert0(pkt.size || !pkt.data);
update_benchmark(NULL);
if (debug_ts) {
av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
@ -1135,7 +1136,8 @@ static void do_audio_out(OutputFile *of, OutputStream *ost,
goto error;
while (1) {
ret = avcodec_receive_packet(enc, &pkt);
av_packet_unref(pkt);
ret = avcodec_receive_packet(enc, pkt);
if (ret == AVERROR(EAGAIN))
break;
if (ret < 0)
@ -1143,16 +1145,16 @@ static void do_audio_out(OutputFile *of, OutputStream *ost,
update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
av_packet_rescale_ts(pkt, enc->time_base, ost->mux_timebase);
if (debug_ts) {
av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
"pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &enc->time_base),
av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &enc->time_base));
}
output_packet(of, &pkt, ost, 0);
output_packet(of, pkt, ost, 0);
}
return;
@ -1168,7 +1170,7 @@ static void do_subtitle_out(OutputFile *of,
int subtitle_out_max_size = 1024 * 1024;
int subtitle_out_size, nb, i;
AVCodecContext *enc;
AVPacket pkt;
AVPacket *pkt = ost->pkt;
int64_t pts;
if (sub->pts == AV_NOPTS_VALUE) {
@ -1226,21 +1228,21 @@ static void do_subtitle_out(OutputFile *of,
exit_program(1);
}
av_init_packet(&pkt);
pkt.data = subtitle_out;
pkt.size = subtitle_out_size;
pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase);
pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
av_packet_unref(pkt);
pkt->data = subtitle_out;
pkt->size = subtitle_out_size;
pkt->pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase);
pkt->duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
/* XXX: the pts correction is handled here. Maybe handling
it in the codec would be better */
if (i == 0)
pkt.pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
pkt->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
else
pkt.pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
pkt->pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
}
pkt.dts = pkt.pts;
output_packet(of, &pkt, ost, 0);
pkt->dts = pkt->pts;
output_packet(of, pkt, ost, 0);
}
}
@ -1249,7 +1251,7 @@ static void do_video_out(OutputFile *of,
AVFrame *next_picture)
{
int ret, format_video_sync;
AVPacket pkt;
AVPacket *pkt = ost->pkt;
AVCodecContext *enc = ost->enc_ctx;
AVRational frame_rate;
int nb_frames, nb0_frames, i;
@ -1395,9 +1397,6 @@ static void do_video_out(OutputFile *of,
AVFrame *in_picture;
int forced_keyframe = 0;
double pts_time;
av_init_packet(&pkt);
pkt.data = NULL;
pkt.size = 0;
if (i < nb0_frames && ost->last_frame) {
in_picture = ost->last_frame;
@ -1476,7 +1475,8 @@ static void do_video_out(OutputFile *of,
av_frame_remove_side_data(in_picture, AV_FRAME_DATA_A53_CC);
while (1) {
ret = avcodec_receive_packet(enc, &pkt);
av_packet_unref(pkt);
ret = avcodec_receive_packet(enc, pkt);
update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
if (ret == AVERROR(EAGAIN))
break;
@ -1486,24 +1486,24 @@ static void do_video_out(OutputFile *of,
if (debug_ts) {
av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
"pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &enc->time_base),
av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &enc->time_base));
}
if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
pkt.pts = ost->sync_opts;
if (pkt->pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
pkt->pts = ost->sync_opts;
av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
av_packet_rescale_ts(pkt, enc->time_base, ost->mux_timebase);
if (debug_ts) {
av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
"pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->mux_timebase),
av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->mux_timebase));
av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->mux_timebase),
av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->mux_timebase));
}
frame_size = pkt.size;
output_packet(of, &pkt, ost, 0);
frame_size = pkt->size;
output_packet(of, pkt, ost, 0);
/* if two pass, output log */
if (ost->logfile && enc->stats_out) {
@ -1632,6 +1632,9 @@ static int reap_filters(int flush)
if (av_buffersink_get_type(filter) == AVMEDIA_TYPE_AUDIO)
init_output_stream_wrapper(ost, NULL, 1);
if (!ost->pkt && !(ost->pkt = av_packet_alloc())) {
return AVERROR(ENOMEM);
}
if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
return AVERROR(ENOMEM);
}
@ -2140,7 +2143,7 @@ static void flush_encoders(void)
for (;;) {
const char *desc = NULL;
AVPacket pkt;
AVPacket *pkt = ost->pkt;
int pkt_size;
switch (enc->codec_type) {
@ -2154,13 +2157,10 @@ static void flush_encoders(void)
av_assert0(0);
}
av_init_packet(&pkt);
pkt.data = NULL;
pkt.size = 0;
update_benchmark(NULL);
while ((ret = avcodec_receive_packet(enc, &pkt)) == AVERROR(EAGAIN)) {
av_packet_unref(pkt);
while ((ret = avcodec_receive_packet(enc, pkt)) == AVERROR(EAGAIN)) {
ret = avcodec_send_frame(enc, NULL);
if (ret < 0) {
av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
@ -2181,16 +2181,16 @@ static void flush_encoders(void)
fprintf(ost->logfile, "%s", enc->stats_out);
}
if (ret == AVERROR_EOF) {
output_packet(of, &pkt, ost, 1);
output_packet(of, pkt, ost, 1);
break;
}
if (ost->finished & MUXER_FINISHED) {
av_packet_unref(&pkt);
av_packet_unref(pkt);
continue;
}
av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
pkt_size = pkt.size;
output_packet(of, &pkt, ost, 0);
av_packet_rescale_ts(pkt, enc->time_base, ost->mux_timebase);
pkt_size = pkt->size;
output_packet(of, pkt, ost, 0);
if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
do_video_stats(ost, pkt_size);
}
@ -2224,14 +2224,12 @@ static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *p
InputFile *f = input_files [ist->file_index];
int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
AVPacket opkt;
AVPacket *opkt = ost->pkt;
av_packet_unref(opkt);
// EOF: flush output bitstream filters.
if (!pkt) {
av_init_packet(&opkt);
opkt.data = NULL;
opkt.size = 0;
output_packet(of, &opkt, ost, 1);
output_packet(of, opkt, ost, 1);
return;
}
@ -2269,30 +2267,30 @@ static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *p
if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
ost->sync_opts++;
if (av_packet_ref(&opkt, pkt) < 0)
if (av_packet_ref(opkt, pkt) < 0)
exit_program(1);
if (pkt->pts != AV_NOPTS_VALUE)
opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
opkt->pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
if (pkt->dts == AV_NOPTS_VALUE) {
opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase);
opkt->dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase);
} else if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
if(!duration)
duration = ist->dec_ctx->frame_size;
opkt.dts = av_rescale_delta(ist->st->time_base, pkt->dts,
opkt->dts = av_rescale_delta(ist->st->time_base, pkt->dts,
(AVRational){1, ist->dec_ctx->sample_rate}, duration,
&ist->filter_in_rescale_delta_last, ost->mux_timebase);
/* dts will be set immediately afterwards to what pts is now */
opkt.pts = opkt.dts - ost_tb_start_time;
opkt->pts = opkt->dts - ost_tb_start_time;
} else
opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
opkt.dts -= ost_tb_start_time;
opkt->dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
opkt->dts -= ost_tb_start_time;
opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
opkt->duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
output_packet(of, &opkt, ost, 0);
output_packet(of, opkt, ost, 0);
}
int guess_input_channel_layout(InputStream *ist)
@ -2431,7 +2429,7 @@ static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
if (ifilter->filter) {
/* THIS VALIDATION IS REQUIRED TO COMPLETE CANCELLATION */
if (!received_sigterm && !cancelRequested(_sessionId)) {
if (!received_sigterm && !cancelRequested(sessionId)) {
ret = av_buffersrc_close(ifilter->filter, pts, AV_BUFFERSRC_FLAG_PUSH);
}
if (ret < 0)
@ -2571,7 +2569,6 @@ static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_
int i, ret = 0, err = 0;
int64_t best_effort_timestamp;
int64_t dts = AV_NOPTS_VALUE;
AVPacket avpkt;
// With fate-indeo3-2, we're getting 0-sized packets before EOF for some
// reason. This seems like a semi-critical bug. Don't trigger EOF, and
@ -2587,8 +2584,7 @@ static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_
if (ist->dts != AV_NOPTS_VALUE)
dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
if (pkt) {
avpkt = *pkt;
avpkt.dts = dts; // ffmpeg.c probably shouldn't do this
pkt->dts = dts; // ffmpeg.c probably shouldn't do this
}
// The old code used to set dts on the drain packet, which does not work
@ -2602,7 +2598,7 @@ static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_
}
update_benchmark(NULL);
ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL);
ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt);
update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
if (ret < 0)
*decode_failed = 1;
@ -2761,6 +2757,8 @@ static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output,
for (i = 0; i < nb_output_streams; i++) {
OutputStream *ost = output_streams[i];
if (!ost->pkt && !(ost->pkt = av_packet_alloc()))
exit_program(1);
if (!check_output_constraints(ist, ost) || !ost->encoding_needed
|| ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
continue;
@ -2796,7 +2794,12 @@ static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eo
int repeating = 0;
int eof_reached = 0;
AVPacket avpkt;
AVPacket *avpkt;
if (!ist->pkt && !(ist->pkt = av_packet_alloc()))
return AVERROR(ENOMEM);
avpkt = ist->pkt;
if (!ist->saw_first_ts) {
ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
ist->pts = 0;
@ -2812,13 +2815,11 @@ static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eo
if (ist->next_pts == AV_NOPTS_VALUE)
ist->next_pts = ist->pts;
if (!pkt) {
/* EOF handling */
av_init_packet(&avpkt);
avpkt.data = NULL;
avpkt.size = 0;
} else {
avpkt = *pkt;
if (pkt) {
av_packet_unref(avpkt);
ret = av_packet_ref(avpkt, pkt);
if (ret < 0)
return ret;
}
if (pkt && pkt->dts != AV_NOPTS_VALUE) {
@ -2839,11 +2840,12 @@ static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eo
switch (ist->dec_ctx->codec_type) {
case AVMEDIA_TYPE_AUDIO:
ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output,
ret = decode_audio (ist, repeating ? NULL : avpkt, &got_output,
&decode_failed);
av_packet_unref(avpkt);
break;
case AVMEDIA_TYPE_VIDEO:
ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, &duration_pts, !pkt,
ret = decode_video (ist, repeating ? NULL : avpkt, &got_output, &duration_pts, !pkt,
&decode_failed);
if (!repeating || !pkt || got_output) {
if (pkt && pkt->duration) {
@ -2868,13 +2870,15 @@ static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eo
ist->next_pts += duration_dts;
}
}
av_packet_unref(avpkt);
break;
case AVMEDIA_TYPE_SUBTITLE:
if (repeating)
break;
ret = transcode_subtitles(ist, &avpkt, &got_output, &decode_failed);
ret = transcode_subtitles(ist, avpkt, &got_output, &decode_failed);
if (!pkt && ret >= 0)
ret = AVERROR_EOF;
av_packet_unref(avpkt);
break;
default:
return -1;
@ -2963,6 +2967,8 @@ static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eo
for (i = 0; i < nb_output_streams; i++) {
OutputStream *ost = output_streams[i];
if (!ost->pkt && !(ost->pkt = av_packet_alloc()))
exit_program(1);
if (!check_output_constraints(ist, ost) || ost->encoding_needed)
continue;
@ -3119,7 +3125,7 @@ static int init_input_stream(int ist_index, char *error, int error_len)
InputStream *ist = input_streams[ist_index];
if (ist->decoding_needed) {
AVCodec *codec = ist->dec;
const AVCodec *codec = ist->dec;
if (!codec) {
snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
@ -3133,7 +3139,6 @@ static int init_input_stream(int ist_index, char *error, int error_len)
ist->dec_ctx->thread_safe_callbacks = 1;
#endif
av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
(ist->decoding_needed & DECODING_FOR_OST)) {
av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
@ -3231,10 +3236,11 @@ static int check_init_output_file(OutputFile *of, int file_index)
ost->mux_timebase = ost->st->time_base;
while (av_fifo_size(ost->muxing_queue)) {
AVPacket pkt;
AVPacket *pkt;
av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
ost->muxing_queue_data_size -= pkt.size;
write_packet(of, &pkt, ost, 1);
ost->muxing_queue_data_size -= pkt->size;
write_packet(of, pkt, ost, 1);
av_packet_free(&pkt);
}
}
@ -3315,15 +3321,23 @@ static int init_output_stream_streamcopy(OutputStream *ost)
if (!ost->frame_rate.num)
ost->frame_rate = ist->framerate;
if (ost->frame_rate.num)
ost->st->avg_frame_rate = ost->frame_rate;
else
ost->st->avg_frame_rate = ist->st->avg_frame_rate;
ret = avformat_transfer_internal_stream_timing_info(of->ctx->oformat, ost->st, ist->st, copy_tb);
if (ret < 0)
return ret;
// copy timebase while removing common factors
if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0) {
if (ost->frame_rate.num)
ost->st->time_base = av_inv_q(ost->frame_rate);
else
ost->st->time_base = av_add_q(av_stream_get_codec_timebase(ost->st), (AVRational){0, 1});
}
// copy estimated duration as a hint to the muxer
if (ost->st->duration <= 0 && ist->st->duration > 0)
@ -3699,7 +3713,7 @@ static int init_output_stream(OutputStream *ost, AVFrame *frame, char *error, in
int ret = 0;
if (ost->encoding_needed) {
AVCodec *codec = ost->enc;
const AVCodec *codec = ost->enc;
AVCodecContext *dec = NULL;
InputStream *ist;
@ -4120,7 +4134,7 @@ static OutputStream *choose_output(void)
ost->st->index, ost->st->id, ost->initialized, ost->inputs_done, ost->finished);
if (!ost->initialized && !ost->inputs_done)
return ost;
return ost->unavailable ? NULL : ost;
if (!ost->finished && opts < opts_min) {
opts_min = opts;
@ -4258,12 +4272,12 @@ static int check_keyboard_interaction(int64_t cur_time)
static void *input_thread(void *arg)
{
InputFile *f = arg;
AVPacket *pkt = f->pkt, *queue_pkt;
unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
int ret = 0;
while (1) {
AVPacket pkt;
ret = av_read_frame(f->ctx, &pkt);
ret = av_read_frame(f->ctx, pkt);
if (ret == AVERROR(EAGAIN)) {
av_usleep(10000);
@ -4273,10 +4287,17 @@ static void *input_thread(void *arg)
av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
break;
}
ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
queue_pkt = av_packet_alloc();
if (!queue_pkt) {
av_packet_unref(pkt);
av_thread_message_queue_set_err_recv(f->in_thread_queue, AVERROR(ENOMEM));
break;
}
av_packet_move_ref(queue_pkt, pkt);
ret = av_thread_message_queue_send(f->in_thread_queue, &queue_pkt, flags);
if (flags && ret == AVERROR(EAGAIN)) {
flags = 0;
ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
ret = av_thread_message_queue_send(f->in_thread_queue, &queue_pkt, flags);
av_log(f->ctx, AV_LOG_WARNING,
"Thread message queue blocking; consider raising the "
"thread_queue_size option (current value: %d)\n",
@ -4287,7 +4308,7 @@ static void *input_thread(void *arg)
av_log(f->ctx, AV_LOG_ERROR,
"Unable to send packet to main thread: %s\n",
av_err2str(ret));
av_packet_unref(&pkt);
av_packet_free(&queue_pkt);
av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
break;
}
@ -4299,13 +4320,13 @@ static void *input_thread(void *arg)
static void free_input_thread(int i)
{
InputFile *f = input_files[i];
AVPacket pkt;
AVPacket *pkt;
if (!f || !f->in_thread_queue)
return;
av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
av_packet_unref(&pkt);
av_packet_free(&pkt);
pthread_join(f->thread, NULL);
f->joined = 1;
@ -4334,7 +4355,7 @@ static int init_input_thread(int i)
strcmp(f->ctx->iformat->name, "lavfi"))
f->non_blocking = 1;
ret = av_thread_message_queue_alloc(&f->in_thread_queue,
f->thread_queue_size, sizeof(AVPacket));
f->thread_queue_size, sizeof(f->pkt));
if (ret < 0)
return ret;
@ -4359,7 +4380,7 @@ static int init_input_threads(void)
return 0;
}
static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
static int get_input_packet_mt(InputFile *f, AVPacket **pkt)
{
return av_thread_message_queue_recv(f->in_thread_queue, pkt,
f->non_blocking ?
@ -4367,7 +4388,7 @@ static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
}
#endif
static int get_input_packet(InputFile *f, AVPacket *pkt)
static int get_input_packet(InputFile *f, AVPacket **pkt)
{
if (f->rate_emu) {
int i;
@ -4384,7 +4405,8 @@ static int get_input_packet(InputFile *f, AVPacket *pkt)
if (f->thread_queue_size)
return get_input_packet_mt(f, pkt);
#endif
return av_read_frame(f->ctx, pkt);
*pkt = f->pkt;
return av_read_frame(f->ctx, *pkt);
}
static int got_eagain(void)
@ -4496,7 +4518,7 @@ static int process_input(int file_index)
InputFile *ifile = input_files[file_index];
AVFormatContext *is;
InputStream *ist;
AVPacket pkt;
AVPacket *pkt;
int ret, thread_ret, i, j;
int64_t duration;
int64_t pkt_dts;
@ -4571,27 +4593,27 @@ static int process_input(int file_index)
reset_eagain();
if (do_pkt_dump) {
av_pkt_dump_log2(NULL, AV_LOG_INFO, &pkt, do_hex_dump,
is->streams[pkt.stream_index]);
av_pkt_dump_log2(NULL, AV_LOG_INFO, pkt, do_hex_dump,
is->streams[pkt->stream_index]);
}
/* the following test is needed in case new streams appear
dynamically in stream : we ignore them */
if (pkt.stream_index >= ifile->nb_streams) {
report_new_stream(file_index, &pkt);
if (pkt->stream_index >= ifile->nb_streams) {
report_new_stream(file_index, pkt);
goto discard_packet;
}
ist = input_streams[ifile->ist_index + pkt.stream_index];
ist = input_streams[ifile->ist_index + pkt->stream_index];
ist->data_size += pkt.size;
ist->data_size += pkt->size;
ist->nb_packets++;
if (ist->discard)
goto discard_packet;
if (pkt.flags & AV_PKT_FLAG_CORRUPT) {
if (pkt->flags & AV_PKT_FLAG_CORRUPT) {
av_log(NULL, exit_on_error ? AV_LOG_FATAL : AV_LOG_WARNING,
"%s: corrupt input packet in stream %d\n", is->url, pkt.stream_index);
"%s: corrupt input packet in stream %d\n", is->url, pkt->stream_index);
if (exit_on_error)
exit_program(1);
}
@ -4599,11 +4621,11 @@ static int process_input(int file_index)
if (debug_ts) {
av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
"next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
ifile->ist_index + pkt->stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ist->st->time_base),
av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ist->st->time_base),
av_ts2str(input_files[ist->file_index]->ts_offset),
av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
}
@ -4633,12 +4655,12 @@ static int process_input(int file_index)
stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
ist->wrap_correction_done = 1;
if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
if(stime2 > stime && pkt->dts != AV_NOPTS_VALUE && pkt->dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
pkt->dts -= 1ULL<<ist->st->pts_wrap_bits;
ist->wrap_correction_done = 0;
}
if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
if(stime2 > stime && pkt->pts != AV_NOPTS_VALUE && pkt->pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
pkt->pts -= 1ULL<<ist->st->pts_wrap_bits;
ist->wrap_correction_done = 0;
}
}
@ -4652,10 +4674,10 @@ static int process_input(int file_index)
if (src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
continue;
if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
if (av_packet_get_side_data(pkt, src_sd->type, NULL))
continue;
dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
dst_data = av_packet_new_side_data(pkt, src_sd->type, src_sd->size);
if (!dst_data)
exit_program(1);
@ -4663,17 +4685,17 @@ static int process_input(int file_index)
}
}
if (pkt.dts != AV_NOPTS_VALUE)
pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
if (pkt.pts != AV_NOPTS_VALUE)
pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
if (pkt->dts != AV_NOPTS_VALUE)
pkt->dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
if (pkt->pts != AV_NOPTS_VALUE)
pkt->pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
if (pkt.pts != AV_NOPTS_VALUE)
pkt.pts *= ist->ts_scale;
if (pkt.dts != AV_NOPTS_VALUE)
pkt.dts *= ist->ts_scale;
if (pkt->pts != AV_NOPTS_VALUE)
pkt->pts *= ist->ts_scale;
if (pkt->dts != AV_NOPTS_VALUE)
pkt->dts *= ist->ts_scale;
pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
pkt_dts = av_rescale_q_rnd(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
@ -4685,27 +4707,27 @@ static int process_input(int file_index)
av_log(NULL, AV_LOG_DEBUG,
"Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
delta, ifile->ts_offset);
pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
if (pkt.pts != AV_NOPTS_VALUE)
pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
pkt->dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
if (pkt->pts != AV_NOPTS_VALUE)
pkt->pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
}
}
duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
if (pkt.pts != AV_NOPTS_VALUE) {
pkt.pts += duration;
ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
if (pkt->pts != AV_NOPTS_VALUE) {
pkt->pts += duration;
ist->max_pts = FFMAX(pkt->pts, ist->max_pts);
ist->min_pts = FFMIN(pkt->pts, ist->min_pts);
}
if (pkt.dts != AV_NOPTS_VALUE)
pkt.dts += duration;
if (pkt->dts != AV_NOPTS_VALUE)
pkt->dts += duration;
pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
pkt_dts = av_rescale_q_rnd(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
if (copy_ts && pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
(is->iformat->flags & AVFMT_TS_DISCONT) && ist->st->pts_wrap_bits < 60) {
int64_t wrap_dts = av_rescale_q_rnd(pkt.dts + (1LL<<ist->st->pts_wrap_bits),
int64_t wrap_dts = av_rescale_q_rnd(pkt->dts + (1LL<<ist->st->pts_wrap_bits),
ist->st->time_base, AV_TIME_BASE_Q,
AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
if (FFABS(wrap_dts - ist->next_dts) < FFABS(pkt_dts - ist->next_dts)/10)
@ -4728,46 +4750,51 @@ static int process_input(int file_index)
ist->file_index, ist->st->index, ist->st->id,
av_get_media_type_string(ist->dec_ctx->codec_type),
delta, ifile->ts_offset);
pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
if (pkt.pts != AV_NOPTS_VALUE)
pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
pkt->dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
if (pkt->pts != AV_NOPTS_VALUE)
pkt->pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
}
} else {
if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
pkt.dts = AV_NOPTS_VALUE;
av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt->dts, ist->next_dts, pkt->stream_index);
pkt->dts = AV_NOPTS_VALUE;
}
if (pkt.pts != AV_NOPTS_VALUE){
int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
if (pkt->pts != AV_NOPTS_VALUE){
int64_t pkt_pts = av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
delta = pkt_pts - ist->next_dts;
if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
pkt.pts = AV_NOPTS_VALUE;
av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt->pts, ist->next_dts, pkt->stream_index);
pkt->pts = AV_NOPTS_VALUE;
}
}
}
}
if (pkt.dts != AV_NOPTS_VALUE)
ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
if (pkt->dts != AV_NOPTS_VALUE)
ifile->last_ts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
if (debug_ts) {
av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
ifile->ist_index + pkt->stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ist->st->time_base),
av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ist->st->time_base),
av_ts2str(input_files[ist->file_index]->ts_offset),
av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
}
sub2video_heartbeat(ist, pkt.pts);
sub2video_heartbeat(ist, pkt->pts);
process_input_packet(ist, &pkt, 0);
process_input_packet(ist, pkt, 0);
discard_packet:
av_packet_unref(&pkt);
#if HAVE_THREADS
if (ifile->thread_queue_size)
av_packet_free(&pkt);
else
#endif
av_packet_unref(pkt);
return 0;
}
@ -4939,7 +4966,7 @@ static int transcode(void)
goto fail;
#endif
while (!received_sigterm && !cancelRequested(_sessionId)) {
while (!received_sigterm && !cancelRequested(sessionId)) {
int64_t cur_time= av_gettime_relative();
/* if 'q' pressed, exits */
@ -5677,10 +5704,10 @@ int ffmpeg_execute(int argc, char **argv)
if ((decode_error_stat[0] + decode_error_stat[1]) * max_error_rate < decode_error_stat[1])
exit_program(69);
exit_program((received_nb_signals || cancelRequested(_sessionId))? 255 : main_ffmpeg_return_code);
exit_program((received_nb_signals || cancelRequested(sessionId))? 255 : main_ffmpeg_return_code);
} else {
main_ffmpeg_return_code = (received_nb_signals || cancelRequested(_sessionId)) ? 255 : longjmp_value;
main_ffmpeg_return_code = (received_nb_signals || cancelRequested(sessionId)) ? 255 : longjmp_value;
}
return main_ffmpeg_return_code;

View File

@ -330,9 +330,10 @@ typedef struct InputStream {
#define DECODING_FOR_FILTER 2
AVCodecContext *dec_ctx;
AVCodec *dec;
const AVCodec *dec;
AVFrame *decoded_frame;
AVFrame *filter_frame; /* a ref of decoded_frame, to be sent to filters */
AVPacket *pkt;
int64_t start; /* time when read started */
/* predicted dts of the next packet read for this stream or (when there are
@ -441,6 +442,8 @@ typedef struct InputFile {
int rate_emu;
int accurate_seek;
AVPacket *pkt;
#if HAVE_THREADS
AVThreadMessageQueue *in_thread_queue;
pthread_t thread; /* thread reading from this file */
@ -493,10 +496,11 @@ typedef struct OutputStream {
AVCodecContext *enc_ctx;
AVCodecParameters *ref_par; /* associated input codec parameters with encoders options applied */
AVCodec *enc;
const AVCodec *enc;
int64_t max_frames;
AVFrame *filtered_frame;
AVFrame *last_frame;
AVPacket *pkt;
int last_dropped;
int last_nb0_frames[3];
@ -757,8 +761,8 @@ void init_options(OptionsContext *o);
AVDictionary *strip_specifiers(AVDictionary *dict);
void parse_meta_type(char *arg, char *type, int *index, const char **stream_spec);
int fftools_copy_metadata(char *outspec, char *inspec, AVFormatContext *oc, AVFormatContext *ic, OptionsContext *o);
AVCodec *find_codec_or_die(const char *name, enum AVMediaType type, int encoder);
AVCodec *choose_decoder(OptionsContext *o, AVFormatContext *s, AVStream *st);
const AVCodec *find_codec_or_die(const char *name, enum AVMediaType type, int encoder);
const AVCodec *choose_decoder(OptionsContext *o, AVFormatContext *s, AVStream *st);
int open_input_file(OptionsContext *o, const char *filename);
int get_preset_file_2(const char *preset_name, const char *codec_name, AVIOContext **s);
int choose_encoder(OptionsContext *o, AVFormatContext *s, OutputStream *ost);

View File

@ -47,22 +47,16 @@
#include "libavutil/imgutils.h"
#include "libavutil/samplefmt.h"
static const enum AVPixelFormat *get_compliance_unofficial_pix_fmts(enum AVCodecID codec_id, const enum AVPixelFormat default_formats[])
// FIXME: YUV420P etc. are actually supported with full color range,
// yet the latter information isn't available here.
static const enum AVPixelFormat *get_compliance_normal_pix_fmts(const AVCodec *codec, const enum AVPixelFormat default_formats[])
{
static const enum AVPixelFormat mjpeg_formats[] =
{ AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ444P,
AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV444P,
AV_PIX_FMT_NONE };
static const enum AVPixelFormat ljpeg_formats[] =
{ AV_PIX_FMT_BGR24 , AV_PIX_FMT_BGRA , AV_PIX_FMT_BGR0,
AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ422P,
AV_PIX_FMT_YUV420P , AV_PIX_FMT_YUV444P , AV_PIX_FMT_YUV422P,
AV_PIX_FMT_NONE};
if (codec_id == AV_CODEC_ID_MJPEG) {
if (!strcmp(codec->name, "mjpeg")) {
return mjpeg_formats;
} else if (codec_id == AV_CODEC_ID_LJPEG) {
return ljpeg_formats;
} else {
return default_formats;
}
@ -77,11 +71,11 @@ enum AVPixelFormat choose_pixel_fmt(AVStream *st, AVCodecContext *enc_ctx, const
int has_alpha = desc ? desc->nb_components % 2 == 0 : 0;
enum AVPixelFormat best= AV_PIX_FMT_NONE;
if (enc_ctx->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL) {
p = get_compliance_unofficial_pix_fmts(enc_ctx->codec_id, p);
if (enc_ctx->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL) {
p = get_compliance_normal_pix_fmts(codec, p);
}
for (; *p != AV_PIX_FMT_NONE; p++) {
best= avcodec_find_best_pix_fmt_of_2(best, *p, target, has_alpha, NULL);
best = av_find_best_pix_fmt_of_2(best, *p, target, has_alpha, NULL);
if (*p == target)
break;
}
@ -98,29 +92,6 @@ enum AVPixelFormat choose_pixel_fmt(AVStream *st, AVCodecContext *enc_ctx, const
return target;
}
void choose_sample_fmt(AVStream *st, const AVCodec *codec)
{
if (codec && codec->sample_fmts) {
const enum AVSampleFormat *p = codec->sample_fmts;
for (; *p != -1; p++) {
if (*p == st->codecpar->format)
break;
}
if (*p == -1) {
const AVCodecDescriptor *desc = avcodec_descriptor_get(codec->id);
if(desc && (desc->props & AV_CODEC_PROP_LOSSLESS) && av_get_sample_fmt_name(st->codecpar->format) > av_get_sample_fmt_name(codec->sample_fmts[0]))
av_log(NULL, AV_LOG_ERROR, "Conversion will not be lossless.\n");
if(av_get_sample_fmt_name(st->codecpar->format))
av_log(NULL, AV_LOG_WARNING,
"Incompatible sample format '%s' for codec '%s', auto-selecting format '%s'\n",
av_get_sample_fmt_name(st->codecpar->format),
codec->name,
av_get_sample_fmt_name(codec->sample_fmts[0]));
st->codecpar->format = codec->sample_fmts[0];
}
}
}
static char *choose_pix_fmts(OutputFilter *ofilter)
{
OutputStream *ost = ofilter->ost;
@ -148,8 +119,8 @@ static char *choose_pix_fmts(OutputFilter *ofilter)
exit_program(1);
p = ost->enc->pix_fmts;
if (ost->enc_ctx->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL) {
p = get_compliance_unofficial_pix_fmts(ost->enc_ctx->codec_id, p);
if (ost->enc_ctx->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL) {
p = get_compliance_normal_pix_fmts(ost->enc, p);
}
for (; *p != AV_PIX_FMT_NONE; p++) {
@ -163,45 +134,39 @@ static char *choose_pix_fmts(OutputFilter *ofilter)
return NULL;
}
/* Define a function for building a string containing a list of
* allowed formats. */
#define DEF_CHOOSE_FORMAT(suffix, type, var, supported_list, none, get_name) \
static char *choose_ ## suffix (OutputFilter *ofilter) \
/* Define a function for appending a list of allowed formats
* to an AVBPrint. If nonempty, the list will have a header. */
#define DEF_CHOOSE_FORMAT(name, type, var, supported_list, none, printf_format, get_name) \
static void choose_ ## name (OutputFilter *ofilter, AVBPrint *bprint) \
{ \
if (ofilter->var == none && !ofilter->supported_list) \
return; \
av_bprintf(bprint, #name "="); \
if (ofilter->var != none) { \
get_name(ofilter->var); \
return av_strdup(name); \
} else if (ofilter->supported_list) { \
av_bprintf(bprint, printf_format, get_name(ofilter->var)); \
} else { \
const type *p; \
AVIOContext *s = NULL; \
uint8_t *ret; \
int len; \
\
if (avio_open_dyn_buf(&s) < 0) \
exit_program(1); \
\
for (p = ofilter->supported_list; *p != none; p++) { \
get_name(*p); \
avio_printf(s, "%s|", name); \
av_bprintf(bprint, printf_format "|", get_name(*p)); \
} \
len = avio_close_dyn_buf(s, &ret); \
ret[len - 1] = 0; \
return ret; \
} else \
return NULL; \
if (bprint->len > 0) \
bprint->str[--bprint->len] = '\0'; \
} \
av_bprint_chars(bprint, ':', 1); \
}
//DEF_CHOOSE_FORMAT(pix_fmts, enum AVPixelFormat, format, formats, AV_PIX_FMT_NONE,
// GET_PIX_FMT_NAME)
DEF_CHOOSE_FORMAT(sample_fmts, enum AVSampleFormat, format, formats,
AV_SAMPLE_FMT_NONE, GET_SAMPLE_FMT_NAME)
AV_SAMPLE_FMT_NONE, "%s", av_get_sample_fmt_name)
DEF_CHOOSE_FORMAT(sample_rates, int, sample_rate, sample_rates, 0,
GET_SAMPLE_RATE_NAME)
"%d", )
DEF_CHOOSE_FORMAT(channel_layouts, uint64_t, channel_layout, channel_layouts, 0,
GET_CH_LAYOUT_NAME)
"0x%"PRIx64, )
int init_simple_filtergraph(InputStream *ist, OutputStream *ost)
{
@ -505,8 +470,7 @@ static int configure_output_video_filter(FilterGraph *fg, OutputFilter *ofilter,
if ((pix_fmts = choose_pix_fmts(ofilter))) {
AVFilterContext *filter;
snprintf(name, sizeof(name), "format_out_%d_%d",
ost->file_index, ost->index);
ret = avfilter_graph_create_filter(&filter,
avfilter_get_by_name("format"),
"format", pix_fmts, NULL, fg->graph);
@ -561,7 +525,7 @@ static int configure_output_audio_filter(FilterGraph *fg, OutputFilter *ofilter,
AVCodecContext *codec = ost->enc_ctx;
AVFilterContext *last_filter = out->filter_ctx;
int pad_idx = out->pad_idx;
char *sample_fmts, *sample_rates, *channel_layouts;
AVBPrint args;
char name[255];
int ret;
@ -584,72 +548,58 @@ static int configure_output_audio_filter(FilterGraph *fg, OutputFilter *ofilter,
avfilter_get_by_name(filter_name), \
filter_name, arg, NULL, fg->graph); \
if (ret < 0) \
return ret; \
goto fail; \
\
ret = avfilter_link(last_filter, pad_idx, filt_ctx, 0); \
if (ret < 0) \
return ret; \
goto fail; \
\
last_filter = filt_ctx; \
pad_idx = 0; \
} while (0)
av_bprint_init(&args, 0, AV_BPRINT_SIZE_UNLIMITED);
if (ost->audio_channels_mapped) {
int i;
AVBPrint pan_buf;
av_bprint_init(&pan_buf, 256, 8192);
av_bprintf(&pan_buf, "0x%"PRIx64,
av_bprintf(&args, "0x%"PRIx64,
av_get_default_channel_layout(ost->audio_channels_mapped));
for (i = 0; i < ost->audio_channels_mapped; i++)
if (ost->audio_channels_map[i] != -1)
av_bprintf(&pan_buf, "|c%d=c%d", i, ost->audio_channels_map[i]);
av_bprintf(&args, "|c%d=c%d", i, ost->audio_channels_map[i]);
AUTO_INSERT_FILTER("-map_channel", "pan", pan_buf.str);
av_bprint_finalize(&pan_buf, NULL);
AUTO_INSERT_FILTER("-map_channel", "pan", args.str);
av_bprint_clear(&args);
}
if (codec->channels && !codec->channel_layout)
codec->channel_layout = av_get_default_channel_layout(codec->channels);
sample_fmts = choose_sample_fmts(ofilter);
sample_rates = choose_sample_rates(ofilter);
channel_layouts = choose_channel_layouts(ofilter);
if (sample_fmts || sample_rates || channel_layouts) {
choose_sample_fmts(ofilter, &args);
choose_sample_rates(ofilter, &args);
choose_channel_layouts(ofilter, &args);
if (!av_bprint_is_complete(&args)) {
ret = AVERROR(ENOMEM);
goto fail;
}
if (args.len) {
AVFilterContext *format;
char args[256];
args[0] = 0;
if (sample_fmts)
av_strlcatf(args, sizeof(args), "sample_fmts=%s:",
sample_fmts);
if (sample_rates)
av_strlcatf(args, sizeof(args), "sample_rates=%s:",
sample_rates);
if (channel_layouts)
av_strlcatf(args, sizeof(args), "channel_layouts=%s:",
channel_layouts);
av_freep(&sample_fmts);
av_freep(&sample_rates);
av_freep(&channel_layouts);
snprintf(name, sizeof(name), "format_out_%d_%d",
ost->file_index, ost->index);
ret = avfilter_graph_create_filter(&format,
avfilter_get_by_name("aformat"),
name, args, NULL, fg->graph);
name, args.str, NULL, fg->graph);
if (ret < 0)
return ret;
goto fail;
ret = avfilter_link(last_filter, pad_idx, format, 0);
if (ret < 0)
return ret;
goto fail;
last_filter = format;
pad_idx = 0;
}
if (ost->apad && of->shortest) {
char args[256];
int i;
for (i=0; i<of->ctx->nb_streams; i++)
@ -657,8 +607,7 @@ static int configure_output_audio_filter(FilterGraph *fg, OutputFilter *ofilter,
break;
if (i<of->ctx->nb_streams) {
snprintf(args, sizeof(args), "%s", ost->apad);
AUTO_INSERT_FILTER("-apad", "apad", args);
AUTO_INSERT_FILTER("-apad", "apad", ost->apad);
}
}
@ -667,12 +616,14 @@ static int configure_output_audio_filter(FilterGraph *fg, OutputFilter *ofilter,
ret = insert_trim(of->start_time, of->recording_time,
&last_filter, &pad_idx, name);
if (ret < 0)
return ret;
goto fail;
if ((ret = avfilter_link(last_filter, pad_idx, ofilter->filter, 0)) < 0)
return ret;
goto fail;
fail:
av_bprint_finalize(&args, NULL);
return 0;
return ret;
}
int configure_output_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOut *out)

View File

@ -754,11 +754,11 @@ int opt_recording_timestamp(void *optctx, const char *opt, const char *arg)
return 0;
}
AVCodec *find_codec_or_die(const char *name, enum AVMediaType type, int encoder)
const AVCodec *find_codec_or_die(const char *name, enum AVMediaType type, int encoder)
{
const AVCodecDescriptor *desc;
const char *codec_string = encoder ? "encoder" : "decoder";
AVCodec *codec;
const AVCodec *codec;
codec = encoder ?
avcodec_find_encoder_by_name(name) :
@ -783,13 +783,13 @@ AVCodec *find_codec_or_die(const char *name, enum AVMediaType type, int encoder)
return codec;
}
AVCodec *choose_decoder(OptionsContext *o, AVFormatContext *s, AVStream *st)
const AVCodec *choose_decoder(OptionsContext *o, AVFormatContext *s, AVStream *st)
{
char *codec_name = NULL;
MATCH_PER_STREAM_OPT(codec_names, str, codec_name, s, st);
if (codec_name) {
AVCodec *codec = find_codec_or_die(codec_name, st->codecpar->codec_type, 0);
const AVCodec *codec = find_codec_or_die(codec_name, st->codecpar->codec_type, 0);
st->codecpar->codec_id = codec->id;
return codec;
} else
@ -1085,7 +1085,7 @@ int open_input_file(OptionsContext *o, const char *filename)
{
InputFile *f;
AVFormatContext *ic;
AVInputFormat *file_iformat = NULL;
const AVInputFormat *file_iformat = NULL;
int err, i, ret;
int64_t timestamp;
AVDictionary *unused_opts = NULL;
@ -1134,20 +1134,22 @@ int open_input_file(OptionsContext *o, const char *filename)
av_dict_set_int(&o->g->format_opts, "sample_rate", o->audio_sample_rate[o->nb_audio_sample_rate - 1].u.i, 0);
}
if (o->nb_audio_channels) {
const AVClass *priv_class;
/* because we set audio_channels based on both the "ac" and
* "channel_layout" options, we need to check that the specified
* demuxer actually has the "channels" option before setting it */
if (file_iformat && file_iformat->priv_class &&
av_opt_find(&file_iformat->priv_class, "channels", NULL, 0,
if (file_iformat && (priv_class = file_iformat->priv_class) &&
av_opt_find(&priv_class, "channels", NULL, 0,
AV_OPT_SEARCH_FAKE_OBJ)) {
av_dict_set_int(&o->g->format_opts, "channels", o->audio_channels[o->nb_audio_channels - 1].u.i, 0);
}
}
if (o->nb_frame_rates) {
const AVClass *priv_class;
/* set the format-level framerate option;
* this is important for video grabbers, e.g. x11 */
if (file_iformat && file_iformat->priv_class &&
av_opt_find(&file_iformat->priv_class, "framerate", NULL, 0,
if (file_iformat && (priv_class = file_iformat->priv_class) &&
av_opt_find(&priv_class, "framerate", NULL, 0,
AV_OPT_SEARCH_FAKE_OBJ)) {
av_dict_set(&o->g->format_opts, "framerate",
o->frame_rates[o->nb_frame_rates - 1].u.str, 0);
@ -1297,6 +1299,9 @@ int open_input_file(OptionsContext *o, const char *filename)
f->loop = o->loop;
f->duration = 0;
f->time_base = (AVRational){ 1, 1 };
f->pkt = av_packet_alloc();
if (!f->pkt)
exit_program(1);
#if HAVE_THREADS
f->thread_queue_size = o->thread_queue_size;
#endif
@ -1588,7 +1593,7 @@ OutputStream *new_output_stream(OptionsContext *o, AVFormatContext *oc, enum AVM
ost->max_muxing_queue_size = 128;
MATCH_PER_STREAM_OPT(max_muxing_queue_size, i, ost->max_muxing_queue_size, oc, st);
ost->max_muxing_queue_size *= sizeof(AVPacket);
ost->max_muxing_queue_size *= sizeof(ost->pkt);
ost->muxing_queue_data_size = 0;
@ -2284,7 +2289,8 @@ int open_output_file(OptionsContext *o, const char *filename)
for (i = 0; i < nb_input_streams; i++) {
int score;
ist = input_streams[i];
score = ist->st->codecpar->channels + 100000000*!!ist->st->codec_info_nb_frames
score = ist->st->codecpar->channels
+ 100000000 * !!(ist->st->event_flags & AVSTREAM_EVENT_FLAG_NEW_PACKETS)
+ 5000000*!!(ist->st->disposition & AV_DISPOSITION_DEFAULT);
if (ist->user_set_discard == AVDISCARD_ALL)
continue;
@ -2454,19 +2460,6 @@ loop_end:
avio_closep(&pb);
}
#if FF_API_LAVF_AVCTX
for (i = nb_output_streams - oc->nb_streams; i < nb_output_streams; i++) { //for all streams of this output file
AVDictionaryEntry *e;
ost = output_streams[i];
if ((ost->stream_copy || ost->attachment_filename)
&& (e = av_dict_get(o->g->codec_opts, "flags", NULL, AV_DICT_IGNORE_SUFFIX))
&& (!e->key[5] || check_stream_specifier(oc, ost->st, e->key+6)))
if (av_opt_set(ost->st->codec, "flags", e->value, 0) < 0)
exit_program(1);
}
#endif
if (!oc->nb_streams && !(oc->oformat->flags & AVFMT_NOSTREAMS)) {
av_dump_format(oc, nb_output_files - 1, oc->url, 1);
av_log(NULL, AV_LOG_ERROR, "Output file #%d does not contain any stream\n", nb_output_files - 1);

View File

@ -67,7 +67,7 @@ static int videotoolbox_retrieve_data(AVCodecContext *s, AVFrame *frame)
vt->tmp_frame->width = frame->width;
vt->tmp_frame->height = frame->height;
ret = av_frame_get_buffer(vt->tmp_frame, 32);
ret = av_frame_get_buffer(vt->tmp_frame, 0);
if (ret < 0)
return ret;

View File

@ -122,6 +122,11 @@ __thread int use_byte_value_binary_prefix = 0;
__thread int use_value_sexagesimal_format = 0;
__thread int show_private_data = 1;
#define SHOW_OPTIONAL_FIELDS_AUTO -1
#define SHOW_OPTIONAL_FIELDS_NEVER 0
#define SHOW_OPTIONAL_FIELDS_ALWAYS 1
__thread int show_optional_fields = SHOW_OPTIONAL_FIELDS_AUTO;
__thread char *print_format;
__thread char *stream_specifier;
__thread char *show_data_hash;
@ -260,7 +265,7 @@ __thread OptionDef *ffprobe_options = NULL;
/* FFprobe context */
__thread const char *input_filename;
__thread const char *print_input_filename;
__thread AVInputFormat *iformat = NULL;
__thread const AVInputFormat *iformat = NULL;
__thread struct AVHashContext *hash;
@ -751,8 +756,10 @@ static inline int writer_print_string(WriterContext *wctx,
const struct section *section = wctx->section[wctx->level];
int ret = 0;
if ((flags & PRINT_STRING_OPT)
&& !(wctx->writer->flags & WRITER_FLAG_DISPLAY_OPTIONAL_FIELDS))
if (show_optional_fields == SHOW_OPTIONAL_FIELDS_NEVER ||
(show_optional_fields == SHOW_OPTIONAL_FIELDS_AUTO
&& (flags & PRINT_STRING_OPT)
&& !(wctx->writer->flags & WRITER_FLAG_DISPLAY_OPTIONAL_FIELDS)))
return 0;
if (section->show_all_entries || av_dict_get(section->entries_to_show, key, NULL, 0)) {
@ -1666,36 +1673,11 @@ static av_cold int xml_init(WriterContext *wctx)
CHECK_COMPLIANCE(show_private_data, "private");
CHECK_COMPLIANCE(show_value_unit, "unit");
CHECK_COMPLIANCE(use_value_prefix, "prefix");
if (do_show_frames && do_show_packets) {
av_log(wctx, AV_LOG_ERROR,
"Interleaved frames and packets are not allowed in XSD. "
"Select only one between the -show_frames and the -show_packets options.\n");
return AVERROR(EINVAL);
}
}
return 0;
}
static const char *xml_escape_str(AVBPrint *dst, const char *src, void *log_ctx)
{
const char *p;
for (p = src; *p; p++) {
switch (*p) {
case '&' : av_bprintf(dst, "%s", "&amp;"); break;
case '<' : av_bprintf(dst, "%s", "&lt;"); break;
case '>' : av_bprintf(dst, "%s", "&gt;"); break;
case '"' : av_bprintf(dst, "%s", "&quot;"); break;
case '\'': av_bprintf(dst, "%s", "&apos;"); break;
default: av_bprint_chars(dst, *p, 1);
}
}
return dst->str;
}
#define XML_INDENT() av_log(NULL, AV_LOG_STDERR, "%*c", xml->indent_level * 4, ' ')
static void xml_print_section_header(WriterContext *wctx)
@ -1767,14 +1749,22 @@ static void xml_print_str(WriterContext *wctx, const char *key, const char *valu
if (section->flags & SECTION_FLAG_HAS_VARIABLE_FIELDS) {
XML_INDENT();
av_bprint_escape(&buf, key, NULL,
AV_ESCAPE_MODE_XML, AV_ESCAPE_FLAG_XML_DOUBLE_QUOTES);
av_log(NULL, AV_LOG_STDERR, "<%s key=\"%s\"",
section->element_name, xml_escape_str(&buf, key, wctx));
section->element_name, buf.str);
av_bprint_clear(&buf);
av_log(NULL, AV_LOG_STDERR, " value=\"%s\"/>\n", xml_escape_str(&buf, value, wctx));
av_bprint_escape(&buf, value, NULL,
AV_ESCAPE_MODE_XML, AV_ESCAPE_FLAG_XML_DOUBLE_QUOTES);
av_log(NULL, AV_LOG_STDERR, " value=\"%s\"/>\n", buf.str);
} else {
if (wctx->nb_item[wctx->level])
av_log(NULL, AV_LOG_STDERR, " ");
av_log(NULL, AV_LOG_STDERR, "%s=\"%s\"", key, xml_escape_str(&buf, value, wctx));
av_bprint_escape(&buf, value, NULL,
AV_ESCAPE_MODE_XML, AV_ESCAPE_FLAG_XML_DOUBLE_QUOTES);
av_log(NULL, AV_LOG_STDERR, "%s=\"%s\"", key, buf.str);
}
av_bprint_finalize(&buf, NULL);
@ -2040,6 +2030,23 @@ static void print_pkt_side_data(WriterContext *w,
print_int("el_present_flag", dovi->el_present_flag);
print_int("bl_present_flag", dovi->bl_present_flag);
print_int("dv_bl_signal_compatibility_id", dovi->dv_bl_signal_compatibility_id);
} else if (sd->type == AV_PKT_DATA_AUDIO_SERVICE_TYPE) {
enum AVAudioServiceType *t = (enum AVAudioServiceType *)sd->data;
print_int("type", *t);
} else if (sd->type == AV_PKT_DATA_MPEGTS_STREAM_ID) {
print_int("id", *sd->data);
} else if (sd->type == AV_PKT_DATA_CPB_PROPERTIES) {
const AVCPBProperties *prop = (AVCPBProperties *)sd->data;
print_int("max_bitrate", prop->max_bitrate);
print_int("min_bitrate", prop->min_bitrate);
print_int("avg_bitrate", prop->avg_bitrate);
print_int("buffer_size", prop->buffer_size);
print_int("vbv_delay", prop->vbv_delay);
} else if (sd->type == AV_PKT_DATA_WEBVTT_IDENTIFIER ||
sd->type == AV_PKT_DATA_WEBVTT_SETTINGS) {
if (do_show_data)
writer_print_data(w, "data", sd->data, sd->size);
writer_print_data_hash(w, "data_hash", sd->data, sd->size);
}
writer_print_section_footer(w);
}
@ -2169,8 +2176,6 @@ static void show_packet(WriterContext *w, InputFile *ifile, AVPacket *pkt, int p
print_time("dts_time", pkt->dts, &st->time_base);
print_duration_ts("duration", pkt->duration);
print_duration_time("duration_time", pkt->duration, &st->time_base);
print_duration_ts("convergence_duration", pkt->convergence_duration);
print_duration_time("convergence_duration_time", pkt->convergence_duration, &st->time_base);
print_val("size", pkt->size, unit_byte_str);
if (pkt->pos != -1) print_fmt ("pos", "%"PRId64, pkt->pos);
else print_str_opt("pos", "N/A");
@ -2178,7 +2183,7 @@ static void show_packet(WriterContext *w, InputFile *ifile, AVPacket *pkt, int p
pkt->flags & AV_PKT_FLAG_DISCARD ? 'D' : '_');
if (pkt->side_data_elems) {
int size;
size_t size;
const uint8_t *side_metadata;
side_metadata = av_packet_get_side_data(pkt, AV_PKT_DATA_STRINGS_METADATA, &size);
@ -2469,14 +2474,12 @@ static int read_interval_packets(WriterContext *w, InputFile *ifile,
const ReadInterval *interval, int64_t *cur_ts)
{
AVFormatContext *fmt_ctx = ifile->fmt_ctx;
AVPacket pkt;
AVPacket *pkt = NULL;
AVFrame *frame = NULL;
int ret = 0, i = 0, frame_count = 0;
int64_t start = -INT64_MAX, end = interval->end;
int has_start = 0, has_end = interval->has_end && !interval->end_is_offset;
av_init_packet(&pkt);
av_log(NULL, AV_LOG_VERBOSE, "Processing read interval ");
log_read_interval(interval, NULL, AV_LOG_VERBOSE);
@ -2509,18 +2512,23 @@ static int read_interval_packets(WriterContext *w, InputFile *ifile,
ret = AVERROR(ENOMEM);
goto end;
}
while (!av_read_frame(fmt_ctx, &pkt)) {
pkt = av_packet_alloc();
if (!pkt) {
ret = AVERROR(ENOMEM);
goto end;
}
while (!av_read_frame(fmt_ctx, pkt)) {
if (fmt_ctx->nb_streams > nb_streams) {
REALLOCZ_ARRAY_STREAM(nb_streams_frames, nb_streams, fmt_ctx->nb_streams);
REALLOCZ_ARRAY_STREAM(nb_streams_packets, nb_streams, fmt_ctx->nb_streams);
REALLOCZ_ARRAY_STREAM(selected_streams, nb_streams, fmt_ctx->nb_streams);
nb_streams = fmt_ctx->nb_streams;
}
if (selected_streams[pkt.stream_index]) {
AVRational tb = ifile->streams[pkt.stream_index].st->time_base;
if (selected_streams[pkt->stream_index]) {
AVRational tb = ifile->streams[pkt->stream_index].st->time_base;
if (pkt.pts != AV_NOPTS_VALUE)
*cur_ts = av_rescale_q(pkt.pts, tb, AV_TIME_BASE_Q);
if (pkt->pts != AV_NOPTS_VALUE)
*cur_ts = av_rescale_q(pkt->pts, tb, AV_TIME_BASE_Q);
if (!has_start && *cur_ts != AV_NOPTS_VALUE) {
start = *cur_ts;
@ -2542,26 +2550,27 @@ static int read_interval_packets(WriterContext *w, InputFile *ifile,
frame_count++;
if (do_read_packets) {
if (do_show_packets)
show_packet(w, ifile, &pkt, i++);
nb_streams_packets[pkt.stream_index]++;
show_packet(w, ifile, pkt, i++);
nb_streams_packets[pkt->stream_index]++;
}
if (do_read_frames) {
int packet_new = 1;
while (process_frame(w, ifile, frame, &pkt, &packet_new) > 0);
while (process_frame(w, ifile, frame, pkt, &packet_new) > 0);
}
}
av_packet_unref(&pkt);
av_packet_unref(pkt);
}
av_packet_unref(&pkt);
av_packet_unref(pkt);
//Flush remaining frames that are cached in the decoder
for (i = 0; i < fmt_ctx->nb_streams; i++) {
pkt.stream_index = i;
pkt->stream_index = i;
if (do_read_frames)
while (process_frame(w, ifile, frame, &pkt, &(int){1}) > 0);
while (process_frame(w, ifile, frame, pkt, &(int){1}) > 0);
}
end:
av_frame_free(&frame);
av_packet_free(&pkt);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Could not read packets in interval ");
log_read_interval(interval, NULL, AV_LOG_ERROR);
@ -2637,10 +2646,6 @@ static int show_stream(WriterContext *w, AVFormatContext *fmt_ctx, int stream_id
s = av_get_media_type_string(par->codec_type);
if (s) print_str ("codec_type", s);
else print_str_opt("codec_type", "unknown");
#if FF_API_LAVF_AVCTX
if (dec_ctx)
print_q("codec_time_base", dec_ctx->time_base, '/');
#endif
/* print AVI/FourCC tag */
print_str("codec_tag_string", av_fourcc2str(par->codec_tag));
@ -2650,13 +2655,11 @@ static int show_stream(WriterContext *w, AVFormatContext *fmt_ctx, int stream_id
case AVMEDIA_TYPE_VIDEO:
print_int("width", par->width);
print_int("height", par->height);
#if FF_API_LAVF_AVCTX
if (dec_ctx) {
print_int("coded_width", dec_ctx->coded_width);
print_int("coded_height", dec_ctx->coded_height);
print_int("closed_captions", !!(dec_ctx->properties & FF_CODEC_PROPERTY_CLOSED_CAPTIONS));
}
#endif
print_int("has_b_frames", par->video_delay);
sar = av_guess_sample_aspect_ratio(fmt_ctx, stream, NULL);
if (sar.num) {
@ -2694,15 +2697,6 @@ static int show_stream(WriterContext *w, AVFormatContext *fmt_ctx, int stream_id
else
print_str_opt("field_order", "unknown");
#if FF_API_PRIVATE_OPT
if (dec_ctx && dec_ctx->timecode_frame_start >= 0) {
char tcbuf[AV_TIMECODE_STR_SIZE];
av_timecode_make_mpeg_tc_string(tcbuf, dec_ctx->timecode_frame_start);
print_str("timecode", tcbuf);
} else {
print_str_opt("timecode", "N/A");
}
#endif
if (dec_ctx)
print_int("refs", dec_ctx->refs);
break;
@ -2741,7 +2735,7 @@ static int show_stream(WriterContext *w, AVFormatContext *fmt_ctx, int stream_id
const AVOption *opt = NULL;
while ((opt = av_opt_next(dec_ctx->priv_data,opt))) {
uint8_t *str;
if (opt->flags) continue;
if (!(opt->flags & AV_OPT_FLAG_EXPORT)) continue;
if (av_opt_get(dec_ctx->priv_data, opt->name, 0, &str) >= 0) {
print_str(opt->name, str);
av_free(str);
@ -2760,10 +2754,10 @@ static int show_stream(WriterContext *w, AVFormatContext *fmt_ctx, int stream_id
print_time("duration", stream->duration, &stream->time_base);
if (par->bit_rate > 0) print_val ("bit_rate", par->bit_rate, unit_bit_per_second_str);
else print_str_opt("bit_rate", "N/A");
#if FF_API_LAVF_AVCTX
if (stream->codec->rc_max_rate > 0) print_val ("max_bit_rate", stream->codec->rc_max_rate, unit_bit_per_second_str);
else print_str_opt("max_bit_rate", "N/A");
#endif
if (dec_ctx && dec_ctx->rc_max_rate > 0)
print_val ("max_bit_rate", dec_ctx->rc_max_rate, unit_bit_per_second_str);
else
print_str_opt("max_bit_rate", "N/A");
if (dec_ctx && dec_ctx->bits_per_raw_sample > 0) print_fmt("bits_per_raw_sample", "%d", dec_ctx->bits_per_raw_sample);
else print_str_opt("bits_per_raw_sample", "N/A");
if (stream->nb_frames) print_fmt ("nb_frames", "%"PRId64, stream->nb_frames);
@ -2775,8 +2769,11 @@ static int show_stream(WriterContext *w, AVFormatContext *fmt_ctx, int stream_id
if (do_show_data)
writer_print_data(w, "extradata", par->extradata,
par->extradata_size);
if (par->extradata_size > 0) {
writer_print_data_hash(w, "extradata_hash", par->extradata,
par->extradata_size);
}
/* Print disposition information */
#define PRINT_DISPOSITION(flagname, name) do { \
@ -2797,6 +2794,11 @@ static int show_stream(WriterContext *w, AVFormatContext *fmt_ctx, int stream_id
PRINT_DISPOSITION(CLEAN_EFFECTS, "clean_effects");
PRINT_DISPOSITION(ATTACHED_PIC, "attached_pic");
PRINT_DISPOSITION(TIMED_THUMBNAILS, "timed_thumbnails");
PRINT_DISPOSITION(CAPTIONS, "captions");
PRINT_DISPOSITION(DESCRIPTIONS, "descriptions");
PRINT_DISPOSITION(METADATA, "metadata");
PRINT_DISPOSITION(DEPENDENT, "dependent");
PRINT_DISPOSITION(STILL_IMAGE, "still_image");
writer_print_section_footer(w);
}
@ -3016,7 +3018,7 @@ static int open_input_file(InputFile *ifile, const char *filename, const char *p
for (i = 0; i < fmt_ctx->nb_streams; i++) {
InputStream *ist = &ifile->streams[i];
AVStream *stream = fmt_ctx->streams[i];
AVCodec *codec;
const AVCodec *codec;
ist->st = stream;
@ -3054,12 +3056,6 @@ static int open_input_file(InputFile *ifile, const char *filename, const char *p
}
ist->dec_ctx->pkt_timebase = stream->time_base;
ist->dec_ctx->framerate = stream->avg_frame_rate;
#if FF_API_LAVF_AVCTX
ist->dec_ctx->properties = stream->codec->properties;
ist->dec_ctx->coded_width = stream->codec->coded_width;
ist->dec_ctx->coded_height = stream->codec->coded_height;
#endif
if (avcodec_open2(ist->dec_ctx, codec, &opts) < 0) {
av_log(NULL, AV_LOG_WARNING, "Could not open codec for input stream %d\n",
@ -3259,9 +3255,6 @@ static void ffprobe_show_pixel_formats(WriterContext *w)
PRINT_PIX_FMT_FLAG(HWACCEL, "hwaccel");
PRINT_PIX_FMT_FLAG(PLANAR, "planar");
PRINT_PIX_FMT_FLAG(RGB, "rgb");
#if FF_API_PSEUDOPAL
PRINT_PIX_FMT_FLAG(PSEUDOPAL, "pseudopal");
#endif
PRINT_PIX_FMT_FLAG(ALPHA, "alpha");
writer_print_section_footer(w);
}
@ -3280,6 +3273,17 @@ static void ffprobe_show_pixel_formats(WriterContext *w)
writer_print_section_footer(w);
}
static int opt_show_optional_fields(void *optctx, const char *opt, const char *arg)
{
if (!av_strcasecmp(arg, "always")) show_optional_fields = SHOW_OPTIONAL_FIELDS_ALWAYS;
else if (!av_strcasecmp(arg, "never")) show_optional_fields = SHOW_OPTIONAL_FIELDS_NEVER;
else if (!av_strcasecmp(arg, "auto")) show_optional_fields = SHOW_OPTIONAL_FIELDS_AUTO;
if (show_optional_fields == SHOW_OPTIONAL_FIELDS_AUTO && av_strcasecmp(arg, "auto"))
show_optional_fields = parse_number_or_die("show_optional_fields", arg, OPT_INT, SHOW_OPTIONAL_FIELDS_AUTO, SHOW_OPTIONAL_FIELDS_ALWAYS);
return 0;
}
static int opt_format(void *optctx, const char *opt, const char *arg)
{
iformat = av_find_input_format(arg);
@ -3788,6 +3792,7 @@ int ffprobe_execute(int argc, char **argv)
{ "show_library_versions", 0, { .func_arg = &opt_show_library_versions }, "show library versions" },
{ "show_versions", 0, { .func_arg = &opt_show_versions }, "show program and library versions" },
{ "show_pixel_formats", 0, { .func_arg = &opt_show_pixel_formats }, "show pixel format descriptions" },
{ "show_optional_fields", HAS_ARG, { .func_arg = &opt_show_optional_fields }, "show optional fields" },
{ "show_private_data", OPT_BOOL, { &show_private_data }, "show private data" },
{ "private", OPT_BOOL, { &show_private_data }, "same as show_private_data" },
{ "bitexact", OPT_BOOL, {&do_bitexact}, "force bitexact output" },

View File

@ -13,7 +13,6 @@ fi
--with-pic \
--with-sysroot="${ANDROID_SYSROOT}" \
--with-glib=no \
--with-fontconfig=yes \
--with-freetype=yes \
--enable-static \
--disable-shared \

View File

@ -1,5 +1,8 @@
#!/bin/bash
# DISABLE ASM WORKAROUNDS BEFORE APPLYING THEM AGAIN
git checkout ${BASEDIR}/src/${LIB_NAME}/aom_ports 1>>"${BASEDIR}"/build.log 2>&1
# SET BUILD OPTIONS
ASM_OPTIONS=""
case ${ARCH} in
@ -53,4 +56,4 @@ make -j$(get_cpu_count) || return 1
make install || return 1
# CREATE PACKAGE CONFIG MANUALLY
create_libaom_package_config "2.0.1" || return 1
create_libaom_package_config "3.1.0" || return 1

View File

@ -34,4 +34,4 @@ make -j$(get_cpu_count) || return 1
make install || return 1
# CREATE PACKAGE CONFIG MANUALLY
create_libxml2_package_config "2.9.10" || return 1
create_libxml2_package_config "2.9.12" || return 1

View File

@ -13,7 +13,6 @@ fi
--with-pic \
--with-sysroot="${SDK_PATH}" \
--with-glib=no \
--with-fontconfig=yes \
--with-freetype=yes \
--enable-static \
--disable-shared \

View File

@ -1,9 +1,7 @@
#!/bin/bash
# DISABLE x86-64 ASM WORKAROUNDS BEFORE APPLYING THEM AGAIN
${SED_INLINE} 's/define aom_clear_system_state()/define aom_clear_system_state() aom_reset_mmx_state()/g' ${BASEDIR}/src/${LIB_NAME}/aom_ports/system_state.h
${SED_INLINE} 's/#add_asm_library("aom_ports/ add_asm_library("aom_ports/g' ${BASEDIR}/src/${LIB_NAME}/aom_ports/aom_ports.cmake
${SED_INLINE} 's/#target_sources(aom_ports/ target_sources(aom_ports/g' ${BASEDIR}/src/${LIB_NAME}/aom_ports/aom_ports.cmake
# DISABLE ASM WORKAROUNDS BEFORE APPLYING THEM AGAIN
git checkout ${BASEDIR}/src/${LIB_NAME}/aom_ports 1>>"${BASEDIR}"/build.log 2>&1
# SET BUILD OPTIONS
ASM_OPTIONS=""

View File

@ -28,4 +28,4 @@ make -j$(get_cpu_count) || return 1
make install || return 1
# CREATE PACKAGE CONFIG MANUALLY
create_libxml2_package_config "2.9.10" || return 1
create_libxml2_package_config "2.9.12" || return 1

View File

@ -825,7 +825,6 @@ set_library() {
;;
harfbuzz)
ENABLED_LIBRARIES[LIBRARY_HARFBUZZ]=$2
set_library "fontconfig" $2
set_library "freetype" $2
;;
kvazaar)
@ -1063,7 +1062,6 @@ check_if_dependency_rebuilt() {
;;
fontconfig)
set_dependency_rebuilt_flag "libass"
set_dependency_rebuilt_flag "harfbuzz"
;;
freetype)
set_dependency_rebuilt_flag "fontconfig"

View File

@ -28,14 +28,14 @@ get_library_source() {
;;
expat)
SOURCE_REPO_URL="https://github.com/tanersener/libexpat"
SOURCE_ID="R_2_2_10"
SOURCE_ID="R_2_3_0"
SOURCE_TYPE="TAG"
;;
ffmpeg)
SOURCE_REPO_URL="https://github.com/tanersener/FFmpeg"
SOURCE_ID="8f1580c31a3cfb9994bda7b3914a97e09b9f1d48"
SOURCE_ID="c9a79532e5ec4ea265d3a82f185fca6e196088c2"
SOURCE_TYPE="COMMIT"
SOURCE_GIT_DESCRIBE="n4.4-dev-3468-g8f1580c31a" # git describe --tags
SOURCE_GIT_DESCRIBE="n4.5-dev-899-gc9a79532e5" # git describe --tags
;;
fontconfig)
SOURCE_REPO_URL="https://github.com/tanersener/fontconfig"
@ -69,12 +69,12 @@ get_library_source() {
;;
harfbuzz)
SOURCE_REPO_URL="https://github.com/tanersener/harfbuzz"
SOURCE_ID="2.7.4"
SOURCE_ID="2.8.1"
SOURCE_TYPE="TAG"
;;
jpeg)
SOURCE_REPO_URL="https://github.com/tanersener/libjpeg-turbo"
SOURCE_ID="2.0.6"
SOURCE_ID="2.1.0"
SOURCE_TYPE="TAG"
;;
kvazaar)
@ -94,7 +94,7 @@ get_library_source() {
;;
libaom)
SOURCE_REPO_URL="https://github.com/tanersener/libaom"
SOURCE_ID="v2.0.1"
SOURCE_ID="v3.1.0"
SOURCE_TYPE="TAG"
;;
libass)
@ -129,7 +129,7 @@ get_library_source() {
;;
libsndfile)
SOURCE_REPO_URL="https://github.com/tanersener/libsndfile"
SOURCE_ID="v1.0.30"
SOURCE_ID="1.0.31"
SOURCE_TYPE="TAG"
;;
libtheora)
@ -154,22 +154,22 @@ get_library_source() {
;;
libvpx)
SOURCE_REPO_URL="https://github.com/tanersener/libvpx"
SOURCE_ID="v1.9.0"
SOURCE_ID="v1.10.0"
SOURCE_TYPE="TAG"
;;
libwebp)
SOURCE_REPO_URL="https://github.com/tanersener/libwebp"
SOURCE_ID="v1.1.0"
SOURCE_ID="v1.2.0"
SOURCE_TYPE="TAG"
;;
libxml2)
SOURCE_REPO_URL="https://github.com/tanersener/libxml2"
SOURCE_ID="v2.9.10"
SOURCE_ID="v2.9.12"
SOURCE_TYPE="TAG"
;;
nettle)
SOURCE_REPO_URL="https://github.com/tanersener/nettle"
SOURCE_ID="nettle_3.7_release_20210104"
SOURCE_ID="nettle_3.7.2_release_20210321"
SOURCE_TYPE="TAG"
;;
opencore-amr)
@ -239,7 +239,7 @@ get_library_source() {
;;
x264)
SOURCE_REPO_URL="https://github.com/tanersener/x264"
SOURCE_ID="d198931a63049db1f2c92d96c34904c69fde8117"
SOURCE_ID="55d517bc4569272a2c9a367a4106c234aba2ffbc"
SOURCE_TYPE="COMMIT" # COMMIT -> r3027
;;
x265)