As the names imply, they are structs meant to be internal and private to the filter handling code. If a field is required in other modules, then it can be moved to the public facing structs, which is done in this commit.
Signed-off-by: James Almer <jamr...@gmail.com> --- fftools/ffmpeg.h | 17 ++ fftools/ffmpeg_filter.c | 319 ++++++++++++++++++++++++++----------- fftools/ffmpeg_filter.h | 192 ---------------------- fftools/graph/graphprint.c | 14 +- fftools/resources/resman.c | 1 - 5 files changed, 254 insertions(+), 289 deletions(-) delete mode 100644 fftools/ffmpeg_filter.h diff --git a/fftools/ffmpeg.h b/fftools/ffmpeg.h index 641582ae63..7868f3d85f 100644 --- a/fftools/ffmpeg.h +++ b/fftools/ffmpeg.h @@ -353,6 +353,18 @@ typedef struct OutputFilterOptions { typedef struct InputFilter { struct FilterGraph *graph; uint8_t *name; + int index; + + // filter data type + enum AVMediaType type; + + AVFilterContext *filter; + + char *input_name; + + /* for filters that are not yet bound to an input stream, + * this stores the input linklabel, if any */ + uint8_t *linklabel; } InputFilter; typedef struct OutputFilter { @@ -360,6 +372,11 @@ typedef struct OutputFilter { struct FilterGraph *graph; uint8_t *name; + int index; + + AVFilterContext *filter; + + char *output_name; /* for filters that are not yet bound to an output stream, * this stores the output linklabel, if any */ diff --git a/fftools/ffmpeg_filter.c b/fftools/ffmpeg_filter.c index 464e17ca7c..e0c40ffe00 100644 --- a/fftools/ffmpeg_filter.c +++ b/fftools/ffmpeg_filter.c @@ -21,7 +21,6 @@ #include <stdint.h> #include "ffmpeg.h" -#include "ffmpeg_filter.h" #include "graph/graphprint.h" #include "libavfilter/avfilter.h" @@ -102,6 +101,141 @@ typedef struct FilterGraphThread { uint8_t *eof_out; } FilterGraphThread; +typedef struct InputFilterPriv { + InputFilter ifilter; + + InputFilterOptions opts; + + // used to hold submitted input + AVFrame *frame; + + // source data type: AVMEDIA_TYPE_SUBTITLE for sub2video, + // same as type otherwise + enum AVMediaType type_src; + + int eof; + int bound; + int drop_warned; + uint64_t nb_dropped; + + // parameters configured for this input + int format; + + int width, height; + AVRational sample_aspect_ratio; + enum AVColorSpace color_space; + enum AVColorRange color_range; + + int sample_rate; + AVChannelLayout ch_layout; + + AVRational time_base; + + AVFrameSideData **side_data; + int nb_side_data; + + AVFifo *frame_queue; + + AVBufferRef *hw_frames_ctx; + + int displaymatrix_present; + int displaymatrix_applied; + int32_t displaymatrix[9]; + + int downmixinfo_present; + AVDownmixInfo downmixinfo; + + struct { + AVFrame *frame; + + int64_t last_pts; + int64_t end_pts; + + /// marks if sub2video_update should force an initialization + unsigned int initialize; + } sub2video; +} InputFilterPriv; + +static InputFilterPriv *ifp_from_ifilter(InputFilter *ifilter) +{ + return (InputFilterPriv*)ifilter; +} + +typedef struct FPSConvContext { + AVFrame *last_frame; + /* number of frames emitted by the video-encoding sync code */ + int64_t frame_number; + /* history of nb_frames_prev, i.e. the number of times the + * previous frame was duplicated by vsync code in recent + * do_video_out() calls */ + int64_t frames_prev_hist[3]; + + uint64_t dup_warning; + + int last_dropped; + int dropped_keyframe; + + enum VideoSyncMethod vsync_method; + + AVRational framerate; + AVRational framerate_max; + const AVRational *framerate_supported; + int framerate_clip; +} FPSConvContext; + +typedef struct OutputFilterPriv { + OutputFilter ofilter; + + void *log_parent; + char log_name[32]; + + /* desired output stream properties */ + int format; + int width, height; + int sample_rate; + AVChannelLayout ch_layout; + enum AVColorSpace color_space; + enum AVColorRange color_range; + + AVFrameSideData **side_data; + int nb_side_data; + + // time base in which the output is sent to our downstream + // does not need to match the filtersink's timebase + AVRational tb_out; + // at least one frame with the above timebase was sent + // to our downstream, so it cannot change anymore + int tb_out_locked; + + AVRational sample_aspect_ratio; + + AVDictionary *sws_opts; + AVDictionary *swr_opts; + + // those are only set if no format is specified and the encoder gives us multiple options + // They point directly to the relevant lists of the encoder. + const int *formats; + const AVChannelLayout *ch_layouts; + const int *sample_rates; + const enum AVColorSpace *color_spaces; + const enum AVColorRange *color_ranges; + + AVRational enc_timebase; + int64_t trim_start_us; + int64_t trim_duration_us; + // offset for output timestamps, in AV_TIME_BASE_Q + int64_t ts_offset; + int64_t next_pts; + FPSConvContext fps; + + unsigned flags; +} OutputFilterPriv; + +static OutputFilterPriv *ofp_from_ofilter(OutputFilter *ofilter) +{ + return (OutputFilterPriv*)ofilter; +} + typedef struct FilterCommand { char *target; char *command; @@ -182,7 +316,7 @@ static void sub2video_push_ref(InputFilterPriv *ifp, int64_t pts) av_assert1(frame->data[0]); ifp->sub2video.last_pts = frame->pts = pts; - ret = av_buffersrc_add_frame_flags(ifp->filter, frame, + ret = av_buffersrc_add_frame_flags(ifp->ifilter.filter, frame, AV_BUFFERSRC_FLAG_KEEP_REF | AV_BUFFERSRC_FLAG_PUSH); if (ret != AVERROR_EOF && ret < 0) @@ -516,10 +650,10 @@ static OutputFilter *ofilter_alloc(FilterGraph *fg, enum AVMediaType type) ofp->format = -1; ofp->color_space = AVCOL_SPC_UNSPECIFIED; ofp->color_range = AVCOL_RANGE_UNSPECIFIED; - ofp->index = fg->nb_outputs - 1; + ofilter->index = fg->nb_outputs - 1; snprintf(ofp->log_name, sizeof(ofp->log_name), "%co%d", - av_get_media_type_string(type)[0], ofp->index); + av_get_media_type_string(type)[0], ofilter->index); return ofilter; } @@ -535,10 +669,10 @@ static int ifilter_bind_ist(InputFilter *ifilter, InputStream *ist, av_assert0(!ifp->bound); ifp->bound = 1; - if (ifp->type != ist->par->codec_type && - !(ifp->type == AVMEDIA_TYPE_VIDEO && ist->par->codec_type == AVMEDIA_TYPE_SUBTITLE)) { + if (ifilter->type != ist->par->codec_type && + !(ifilter->type == AVMEDIA_TYPE_VIDEO && ist->par->codec_type == AVMEDIA_TYPE_SUBTITLE)) { av_log(fgp, AV_LOG_ERROR, "Tried to connect %s stream to %s filtergraph input\n", - av_get_media_type_string(ist->par->codec_type), av_get_media_type_string(ifp->type)); + av_get_media_type_string(ist->par->codec_type), av_get_media_type_string(ifilter->type)); return AVERROR(EINVAL); } @@ -553,8 +687,12 @@ static int ifilter_bind_ist(InputFilter *ifilter, InputStream *ist, if (ret < 0) return ret; + ifilter->input_name = av_strdup(ifp->opts.name); + if (!ifilter->input_name) + return AVERROR(EINVAL); + ret = sch_connect(fgp->sch, - src, SCH_FILTER_IN(fgp->sch_idx, ifp->index)); + src, SCH_FILTER_IN(fgp->sch_idx, ifilter->index)); if (ret < 0) return ret; @@ -589,19 +727,23 @@ static int ifilter_bind_dec(InputFilterPriv *ifp, Decoder *dec, av_assert0(!ifp->bound); ifp->bound = 1; - if (ifp->type != dec->type) { + if (ifp->ifilter.type != dec->type) { av_log(fgp, AV_LOG_ERROR, "Tried to connect %s decoder to %s filtergraph input\n", - av_get_media_type_string(dec->type), av_get_media_type_string(ifp->type)); + av_get_media_type_string(dec->type), av_get_media_type_string(ifp->ifilter.type)); return AVERROR(EINVAL); } - ifp->type_src = ifp->type; + ifp->type_src = ifp->ifilter.type; ret = dec_filter_add(dec, &ifp->ifilter, &ifp->opts, vs, &src); if (ret < 0) return ret; - ret = sch_connect(fgp->sch, src, SCH_FILTER_IN(fgp->sch_idx, ifp->index)); + ifp->ifilter.input_name = av_strdup(ifp->opts.name); + if (!ifp->ifilter.input_name) + return AVERROR(EINVAL); + + ret = sch_connect(fgp->sch, src, SCH_FILTER_IN(fgp->sch_idx, ifp->ifilter.index)); if (ret < 0) return ret; @@ -670,8 +812,8 @@ int ofilter_bind_enc(OutputFilter *ofilter, unsigned sched_idx_enc, ofp->trim_start_us = opts->trim_start_us; ofp->trim_duration_us = opts->trim_duration_us; - ofp->name = av_strdup(opts->name); - if (!ofp->name) + ofilter->output_name = av_strdup(opts->name); + if (!ofilter->output_name) return AVERROR(EINVAL); ret = av_dict_copy(&ofp->sws_opts, opts->sws_opts, 0); @@ -691,7 +833,7 @@ int ofilter_bind_enc(OutputFilter *ofilter, unsigned sched_idx_enc, ofp->log_parent = NULL; av_strlcpy(ofp->log_name, fgp->log_name, sizeof(ofp->log_name)); } else - av_strlcatf(ofp->log_name, sizeof(ofp->log_name), "->%s", ofp->name); + av_strlcatf(ofp->log_name, sizeof(ofp->log_name), "->%s", ofilter->output_name); switch (ofilter->type) { case AVMEDIA_TYPE_VIDEO: @@ -750,7 +892,7 @@ int ofilter_bind_enc(OutputFilter *ofilter, unsigned sched_idx_enc, break; } - ret = sch_connect(fgp->sch, SCH_FILTER_OUT(fgp->sch_idx, ofp->index), + ret = sch_connect(fgp->sch, SCH_FILTER_OUT(fgp->sch_idx, ofilter->index), SCH_ENC(sched_idx_enc)); if (ret < 0) return ret; @@ -764,16 +906,16 @@ static int ofilter_bind_ifilter(OutputFilter *ofilter, InputFilterPriv *ifp, OutputFilterPriv *ofp = ofp_from_ofilter(ofilter); av_assert0(!ofilter->bound); - av_assert0(ofilter->type == ifp->type); + av_assert0(ofilter->type == ifp->ifilter.type); ofilter->bound = 1; av_freep(&ofilter->linklabel); - ofp->name = av_strdup(opts->name); - if (!ofp->name) + ofilter->output_name = av_strdup(opts->name); + if (!ofilter->output_name) return AVERROR(EINVAL); - av_strlcatf(ofp->log_name, sizeof(ofp->log_name), "->%s", ofp->name); + av_strlcatf(ofp->log_name, sizeof(ofp->log_name), "->%s", ofilter->output_name); return 0; } @@ -789,18 +931,18 @@ static int ifilter_bind_fg(InputFilterPriv *ifp, FilterGraph *fg_src, int out_id av_assert0(!ifp->bound); ifp->bound = 1; - if (ifp->type != ofilter_src->type) { + if (ifp->ifilter.type != ofilter_src->type) { av_log(fgp, AV_LOG_ERROR, "Tried to connect %s output to %s input\n", av_get_media_type_string(ofilter_src->type), - av_get_media_type_string(ifp->type)); + av_get_media_type_string(ifp->ifilter.type)); return AVERROR(EINVAL); } - ifp->type_src = ifp->type; + ifp->type_src = ifp->ifilter.type; memset(&opts, 0, sizeof(opts)); - snprintf(name, sizeof(name), "fg:%d:%d", fgp->fg.index, ifp->index); + snprintf(name, sizeof(name), "fg:%d:%d", fgp->fg.index, ifp->ifilter.index); opts.name = name; ret = ofilter_bind_ifilter(ofilter_src, ifp, &opts); @@ -808,7 +950,7 @@ static int ifilter_bind_fg(InputFilterPriv *ifp, FilterGraph *fg_src, int out_id return ret; ret = sch_connect(fgp->sch, SCH_FILTER_OUT(fg_src->index, out_idx), - SCH_FILTER_IN(fgp->sch_idx, ifp->index)); + SCH_FILTER_IN(fgp->sch_idx, ifp->ifilter.index)); if (ret < 0) return ret; @@ -831,7 +973,7 @@ static InputFilter *ifilter_alloc(FilterGraph *fg) if (!ifp->frame) return NULL; - ifp->index = fg->nb_inputs - 1; + ifilter->index = fg->nb_inputs - 1; ifp->format = -1; ifp->color_space = AVCOL_SPC_UNSPECIFIED; ifp->color_range = AVCOL_RANGE_UNSPECIFIED; @@ -868,10 +1010,11 @@ void fg_free(FilterGraph **pfg) av_frame_free(&ifp->opts.fallback); av_buffer_unref(&ifp->hw_frames_ctx); - av_freep(&ifp->linklabel); + av_freep(&ifilter->linklabel); av_freep(&ifp->opts.name); av_frame_side_data_free(&ifp->side_data, &ifp->nb_side_data); av_freep(&ifilter->name); + av_freep(&ifilter->input_name); av_freep(&fg->inputs[j]); } av_freep(&fg->inputs); @@ -885,8 +1028,8 @@ void fg_free(FilterGraph **pfg) av_freep(&ofilter->linklabel); av_freep(&ofilter->name); + av_freep(&ofilter->output_name); av_freep(&ofilter->apad); - av_freep(&ofp->name); av_channel_layout_uninit(&ofp->ch_layout); av_frame_side_data_free(&ofp->side_data, &ofp->nb_side_data); av_freep(&fg->outputs[j]); @@ -981,21 +1124,19 @@ int fg_create(FilterGraph **pfg, char *graph_desc, Scheduler *sch) for (AVFilterInOut *cur = inputs; cur; cur = cur->next) { InputFilter *const ifilter = ifilter_alloc(fg); - InputFilterPriv *ifp; if (!ifilter) { ret = AVERROR(ENOMEM); goto fail; } - ifp = ifp_from_ifilter(ifilter); - ifp->linklabel = cur->name; + ifilter->linklabel = cur->name; cur->name = NULL; - ifp->type = avfilter_pad_get_type(cur->filter_ctx->input_pads, + ifilter->type = avfilter_pad_get_type(cur->filter_ctx->input_pads, cur->pad_idx); - if (ifp->type != AVMEDIA_TYPE_VIDEO && ifp->type != AVMEDIA_TYPE_AUDIO) { + if (ifilter->type != AVMEDIA_TYPE_VIDEO && ifilter->type != AVMEDIA_TYPE_AUDIO) { av_log(fg, AV_LOG_FATAL, "Only video and audio filters supported " "currently.\n"); ret = AVERROR(ENOSYS); @@ -1108,17 +1249,17 @@ static int fg_complex_bind_input(FilterGraph *fg, InputFilter *ifilter) { InputFilterPriv *ifp = ifp_from_ifilter(ifilter); InputStream *ist = NULL; - enum AVMediaType type = ifp->type; + enum AVMediaType type = ifilter->type; ViewSpecifier vs = { .type = VIEW_SPECIFIER_TYPE_NONE }; const char *spec; char *p; int i, ret; - if (ifp->linklabel && !strncmp(ifp->linklabel, "dec:", 4)) { + if (ifilter->linklabel && !strncmp(ifilter->linklabel, "dec:", 4)) { // bind to a standalone decoder int dec_idx; - dec_idx = strtol(ifp->linklabel + 4, &p, 0); + dec_idx = strtol(ifilter->linklabel + 4, &p, 0); if (dec_idx < 0 || dec_idx >= nb_decoders) { av_log(fg, AV_LOG_ERROR, "Invalid decoder index %d in filtergraph description %s\n", dec_idx, fg->graph_desc); @@ -1137,7 +1278,7 @@ static int fg_complex_bind_input(FilterGraph *fg, InputFilter *ifilter) av_log(fg, AV_LOG_ERROR, "Error binding a decoder to filtergraph input %s\n", ifilter->name); return ret; - } else if (ifp->linklabel) { + } else if (ifilter->linklabel) { StreamSpecifier ss; AVFormatContext *s; AVStream *st = NULL; @@ -1154,22 +1295,22 @@ static int fg_complex_bind_input(FilterGraph *fg, InputFilter *ifilter) OutputFilter *ofilter = fg_src->outputs[j]; if (!ofilter->bound && ofilter->linklabel && - !strcmp(ofilter->linklabel, ifp->linklabel)) { + !strcmp(ofilter->linklabel, ifilter->linklabel)) { av_log(fg, AV_LOG_VERBOSE, "Binding input with label '%s' to filtergraph output %d:%d\n", - ifp->linklabel, i, j); + ifilter->linklabel, i, j); ret = ifilter_bind_fg(ifp, fg_src, j); if (ret < 0) av_log(fg, AV_LOG_ERROR, "Error binding filtergraph input %s\n", - ifp->linklabel); + ifilter->linklabel); return ret; } } } // bind to an explicitly specified demuxer stream - file_idx = strtol(ifp->linklabel, &p, 0); + file_idx = strtol(ifilter->linklabel, &p, 0); if (file_idx < 0 || file_idx >= nb_input_files) { av_log(fg, AV_LOG_FATAL, "Invalid file index %d in filtergraph description %s.\n", file_idx, fg->graph_desc); @@ -1213,7 +1354,7 @@ static int fg_complex_bind_input(FilterGraph *fg, InputFilter *ifilter) av_log(fg, AV_LOG_VERBOSE, "Binding input with label '%s' to input stream %d:%d\n", - ifp->linklabel, ist->file->index, ist->index); + ifilter->linklabel, ist->file->index, ist->index); } else { ist = ist_find_unused(type); if (!ist) { @@ -1226,7 +1367,7 @@ static int fg_complex_bind_input(FilterGraph *fg, InputFilter *ifilter) av_log(fg, AV_LOG_VERBOSE, "Binding unlabeled input %d to input stream %d:%d\n", - ifp->index, ist->file->index, ist->index); + ifilter->index, ist->file->index, ist->index); } av_assert0(ist); @@ -1375,8 +1516,8 @@ static int configure_output_video_filter(FilterGraphPriv *fgp, AVFilterGraph *gr int ret; char name[255]; - snprintf(name, sizeof(name), "out_%s", ofp->name); - ret = avfilter_graph_create_filter(&ofp->filter, + snprintf(name, sizeof(name), "out_%s", ofilter->output_name); + ret = avfilter_graph_create_filter(&ofilter->filter, avfilter_get_by_name("buffersink"), name, NULL, NULL, graph); @@ -1395,7 +1536,7 @@ static int configure_output_video_filter(FilterGraphPriv *fgp, AVFilterGraph *gr av_strlcatf(args, sizeof(args), ":%s=%s", e->key, e->value); } - snprintf(name, sizeof(name), "scaler_out_%s", ofp->name); + snprintf(name, sizeof(name), "scaler_out_%s", ofilter->output_name); if ((ret = avfilter_graph_create_filter(&filter, avfilter_get_by_name("scale"), name, args, NULL, graph)) < 0) return ret; @@ -1431,14 +1572,14 @@ static int configure_output_video_filter(FilterGraphPriv *fgp, AVFilterGraph *gr pad_idx = 0; } - snprintf(name, sizeof(name), "trim_out_%s", ofp->name); + snprintf(name, sizeof(name), "trim_out_%s", ofilter->output_name); ret = insert_trim(fgp, ofp->trim_start_us, ofp->trim_duration_us, &last_filter, &pad_idx, name); if (ret < 0) return ret; - if ((ret = avfilter_link(last_filter, pad_idx, ofp->filter, 0)) < 0) + if ((ret = avfilter_link(last_filter, pad_idx, ofilter->filter, 0)) < 0) return ret; return 0; @@ -1454,8 +1595,8 @@ static int configure_output_audio_filter(FilterGraphPriv *fgp, AVFilterGraph *gr char name[255]; int ret; - snprintf(name, sizeof(name), "out_%s", ofp->name); - ret = avfilter_graph_create_filter(&ofp->filter, + snprintf(name, sizeof(name), "out_%s", ofilter->output_name); + ret = avfilter_graph_create_filter(&ofilter->filter, avfilter_get_by_name("abuffersink"), name, NULL, NULL, graph); if (ret < 0) @@ -1492,7 +1633,7 @@ static int configure_output_audio_filter(FilterGraphPriv *fgp, AVFilterGraph *gr if (args.len) { AVFilterContext *format; - snprintf(name, sizeof(name), "format_out_%s", ofp->name); + snprintf(name, sizeof(name), "format_out_%s", ofilter->output_name); ret = avfilter_graph_create_filter(&format, avfilter_get_by_name("aformat"), name, args.str, NULL, graph); @@ -1512,13 +1653,13 @@ static int configure_output_audio_filter(FilterGraphPriv *fgp, AVFilterGraph *gr fgp->have_sources = 1; } - snprintf(name, sizeof(name), "trim for output %s", ofp->name); + snprintf(name, sizeof(name), "trim for output %s", ofilter->output_name); ret = insert_trim(fgp, ofp->trim_start_us, ofp->trim_duration_us, &last_filter, &pad_idx, name); if (ret < 0) goto fail; - if ((ret = avfilter_link(last_filter, pad_idx, ofp->filter, 0)) < 0) + if ((ret = avfilter_link(last_filter, pad_idx, ofilter->filter, 0)) < 0) goto fail; fail: av_bprint_finalize(&args, NULL); @@ -1567,8 +1708,8 @@ static int configure_input_video_filter(FilterGraph *fg, AVFilterGraph *graph, snprintf(name, sizeof(name), "graph %d input from stream %s", fg->index, ifp->opts.name); - ifp->filter = avfilter_graph_alloc_filter(graph, buffer_filt, name); - if (!ifp->filter) { + ifilter->filter = avfilter_graph_alloc_filter(graph, buffer_filt, name); + if (!ifilter->filter) { ret = AVERROR(ENOMEM); goto fail; } @@ -1586,16 +1727,16 @@ static int configure_input_video_filter(FilterGraph *fg, AVFilterGraph *graph, par->side_data = ifp->side_data; par->nb_side_data = ifp->nb_side_data; - ret = av_buffersrc_parameters_set(ifp->filter, par); + ret = av_buffersrc_parameters_set(ifilter->filter, par); if (ret < 0) goto fail; av_freep(&par); - ret = avfilter_init_dict(ifp->filter, NULL); + ret = avfilter_init_dict(ifilter->filter, NULL); if (ret < 0) goto fail; - last_filter = ifp->filter; + last_filter = ifilter->filter; desc = av_pix_fmt_desc_get(ifp->format); av_assert0(desc); @@ -1689,7 +1830,7 @@ static int configure_input_audio_filter(FilterGraph *fg, AVFilterGraph *graph, av_bprintf(&args, ":channels=%d", ifp->ch_layout.nb_channels); snprintf(name, sizeof(name), "graph_%d_in_%s", fg->index, ifp->opts.name); - if ((ret = avfilter_graph_create_filter(&ifp->filter, abuffer_filt, + if ((ret = avfilter_graph_create_filter(&ifilter->filter, abuffer_filt, name, args.str, NULL, graph)) < 0) return ret; @@ -1698,11 +1839,11 @@ static int configure_input_audio_filter(FilterGraph *fg, AVFilterGraph *graph, return AVERROR(ENOMEM); par->side_data = ifp->side_data; par->nb_side_data = ifp->nb_side_data; - ret = av_buffersrc_parameters_set(ifp->filter, par); + ret = av_buffersrc_parameters_set(ifilter->filter, par); av_free(par); if (ret < 0) return ret; - last_filter = ifp->filter; + last_filter = ifilter->filter; snprintf(name, sizeof(name), "trim for input stream %s", ifp->opts.name); ret = insert_trim(fg, ifp->opts.trim_start_us, ifp->opts.trim_end_us, @@ -1719,7 +1860,7 @@ static int configure_input_audio_filter(FilterGraph *fg, AVFilterGraph *graph, static int configure_input_filter(FilterGraph *fg, AVFilterGraph *graph, InputFilter *ifilter, AVFilterInOut *in) { - switch (ifp_from_ifilter(ifilter)->type) { + switch (ifilter->type) { case AVMEDIA_TYPE_VIDEO: return configure_input_video_filter(fg, graph, ifilter, in); case AVMEDIA_TYPE_AUDIO: return configure_input_audio_filter(fg, graph, ifilter, in); default: av_assert0(0); return 0; @@ -1729,9 +1870,9 @@ static int configure_input_filter(FilterGraph *fg, AVFilterGraph *graph, static void cleanup_filtergraph(FilterGraph *fg, FilterGraphThread *fgt) { for (int i = 0; i < fg->nb_outputs; i++) - ofp_from_ofilter(fg->outputs[i])->filter = NULL; + fg->outputs[i]->filter = NULL; for (int i = 0; i < fg->nb_inputs; i++) - ifp_from_ifilter(fg->inputs[i])->filter = NULL; + fg->inputs[i]->filter = NULL; avfilter_graph_free(&fgt->graph); } @@ -1845,7 +1986,7 @@ static int configure_filtergraph(FilterGraph *fg, FilterGraphThread *fgt) int nb_sd; OutputFilter *ofilter = fg->outputs[i]; OutputFilterPriv *ofp = ofp_from_ofilter(ofilter); - AVFilterContext *sink = ofp->filter; + AVFilterContext *sink = ofilter->filter; ofp->format = av_buffersink_get_format(sink); @@ -1885,6 +2026,7 @@ static int configure_filtergraph(FilterGraph *fg, FilterGraphThread *fgt) } for (int i = 0; i < fg->nb_inputs; i++) { + InputFilter *ifilter = fg->inputs[i]; InputFilterPriv *ifp = ifp_from_ifilter(fg->inputs[i]); AVFrame *tmp; while (av_fifo_read(ifp->frame_queue, &tmp, 1) >= 0) { @@ -1895,7 +2037,7 @@ static int configure_filtergraph(FilterGraph *fg, FilterGraphThread *fgt) if (ifp->displaymatrix_applied) av_frame_remove_side_data(tmp, AV_FRAME_DATA_DISPLAYMATRIX); } - ret = av_buffersrc_add_frame(ifp->filter, tmp); + ret = av_buffersrc_add_frame(ifilter->filter, tmp); } av_frame_free(&tmp); if (ret < 0) @@ -1905,9 +2047,9 @@ static int configure_filtergraph(FilterGraph *fg, FilterGraphThread *fgt) /* send the EOFs for the finished inputs */ for (int i = 0; i < fg->nb_inputs; i++) { - InputFilterPriv *ifp = ifp_from_ifilter(fg->inputs[i]); + InputFilter *ifilter = fg->inputs[i]; if (fgt->eof_in[i]) { - ret = av_buffersrc_add_frame(ifp->filter, NULL); + ret = av_buffersrc_add_frame(ifilter->filter, NULL); if (ret < 0) goto fail; have_input_eof = 1; @@ -1937,7 +2079,7 @@ static int ifilter_parameters_from_frame(InputFilter *ifilter, const AVFrame *fr if (ret < 0) return ret; - ifp->time_base = (ifp->type == AVMEDIA_TYPE_AUDIO) ? (AVRational){ 1, frame->sample_rate } : + ifp->time_base = (ifilter->type == AVMEDIA_TYPE_AUDIO) ? (AVRational){ 1, frame->sample_rate } : (ifp->opts.flags & IFILTER_FLAG_CFR) ? av_inv_q(ifp->opts.framerate) : frame->time_base; @@ -2027,12 +2169,11 @@ static int choose_input(const FilterGraph *fg, const FilterGraphThread *fgt) for (int i = 0; i < fg->nb_inputs; i++) { InputFilter *ifilter = fg->inputs[i]; - InputFilterPriv *ifp = ifp_from_ifilter(ifilter); if (fgt->eof_in[i]) continue; - nb_requests = av_buffersrc_get_nb_failed_requests(ifp->filter); + nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter); if (nb_requests > nb_requests_max) { nb_requests_max = nb_requests; best_input = i; @@ -2076,7 +2217,7 @@ static int choose_out_timebase(OutputFilterPriv *ofp, AVFrame *frame) fr = fps->framerate; if (!fr.num) { - AVRational fr_sink = av_buffersink_get_frame_rate(ofp->filter); + AVRational fr_sink = av_buffersink_get_frame_rate(ofilter->filter); if (fr_sink.num > 0 && fr_sink.den > 0) fr = fr_sink; } @@ -2329,16 +2470,16 @@ static int close_output(OutputFilterPriv *ofp, FilterGraphThread *fgt) "No filtered frames for output stream, trying to " "initialize anyway.\n"); - ret = sch_filter_send(fgp->sch, fgp->sch_idx, ofp->index, frame); + ret = sch_filter_send(fgp->sch, fgp->sch_idx, ofp->ofilter.index, frame); if (ret < 0) { av_frame_unref(frame); return ret; } } - fgt->eof_out[ofp->index] = 1; + fgt->eof_out[ofp->ofilter.index] = 1; - ret = sch_filter_send(fgp->sch, fgp->sch_idx, ofp->index, NULL); + ret = sch_filter_send(fgp->sch, fgp->sch_idx, ofp->ofilter.index, NULL); return (ret == AVERROR_EOF) ? 0 : ret; } @@ -2391,12 +2532,12 @@ static int fg_output_frame(OutputFilterPriv *ofp, FilterGraphThread *fgt, } // send the frame to consumers - ret = sch_filter_send(fgp->sch, fgp->sch_idx, ofp->index, frame_out); + ret = sch_filter_send(fgp->sch, fgp->sch_idx, ofp->ofilter.index, frame_out); if (ret < 0) { av_frame_unref(frame_out); - if (!fgt->eof_out[ofp->index]) { - fgt->eof_out[ofp->index] = 1; + if (!fgt->eof_out[ofp->ofilter.index]) { + fgt->eof_out[ofp->ofilter.index] = 1; fgp->nb_outputs_done++; } @@ -2429,13 +2570,13 @@ static int fg_output_step(OutputFilterPriv *ofp, FilterGraphThread *fgt, AVFrame *frame) { FilterGraphPriv *fgp = fgp_from_fg(ofp->ofilter.graph); - AVFilterContext *filter = ofp->filter; + AVFilterContext *filter = ofp->ofilter.filter; FrameData *fd; int ret; ret = av_buffersink_get_frame_flags(filter, frame, AV_BUFFERSINK_FLAG_NO_REQUEST); - if (ret == AVERROR_EOF && !fgt->eof_out[ofp->index]) { + if (ret == AVERROR_EOF && !fgt->eof_out[ofp->ofilter.index]) { ret = fg_output_frame(ofp, fgt, NULL); return (ret < 0) ? ret : 1; } else if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) { @@ -2447,7 +2588,7 @@ static int fg_output_step(OutputFilterPriv *ofp, FilterGraphThread *fgt, return ret; } - if (fgt->eof_out[ofp->index]) { + if (fgt->eof_out[ofp->ofilter.index]) { av_frame_unref(frame); return 0; } @@ -2622,7 +2763,7 @@ static int sub2video_frame(InputFilter *ifilter, AVFrame *frame, int buffer) if (ifp->sub2video.end_pts < INT64_MAX) sub2video_update(ifp, INT64_MAX, NULL); - return av_buffersrc_add_frame(ifp->filter, NULL); + return av_buffersrc_add_frame(ifilter->filter, NULL); } ifp->width = frame->width ? frame->width : ifp->width; @@ -2639,16 +2780,16 @@ static int send_eof(FilterGraphThread *fgt, InputFilter *ifilter, InputFilterPriv *ifp = ifp_from_ifilter(ifilter); int ret; - if (fgt->eof_in[ifp->index]) + if (fgt->eof_in[ifilter->index]) return 0; - fgt->eof_in[ifp->index] = 1; + fgt->eof_in[ifilter->index] = 1; - if (ifp->filter) { + if (ifilter->filter) { pts = av_rescale_q_rnd(pts, tb, ifp->time_base, AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX); - ret = av_buffersrc_close(ifp->filter, pts, AV_BUFFERSRC_FLAG_PUSH); + ret = av_buffersrc_close(ifilter->filter, pts, AV_BUFFERSRC_FLAG_PUSH); if (ret < 0) return ret; } else { @@ -2717,7 +2858,7 @@ static int send_frame(FilterGraph *fg, FilterGraphThread *fgt, int need_reinit = 0, ret; /* determine if the parameters for this input changed */ - switch (ifp->type) { + switch (ifilter->type) { case AVMEDIA_TYPE_AUDIO: if (ifp->format != frame->format || ifp->sample_rate != frame->sample_rate || @@ -2837,7 +2978,7 @@ static int send_frame(FilterGraph *fg, FilterGraphThread *fgt, return AVERROR(ENOMEM); fd->wallclock[LATENCY_PROBE_FILTER_PRE] = av_gettime_relative(); - ret = av_buffersrc_add_frame_flags(ifp->filter, frame, + ret = av_buffersrc_add_frame_flags(ifilter->filter, frame, AV_BUFFERSRC_FLAG_PUSH); if (ret < 0) { av_frame_unref(frame); @@ -2856,7 +2997,7 @@ static void fg_thread_set_name(const FilterGraph *fg) OutputFilterPriv *ofp = ofp_from_ofilter(fg->outputs[0]); snprintf(name, sizeof(name), "%cf%s", av_get_media_type_string(ofp->ofilter.type)[0], - ofp->name); + ofp->ofilter.output_name); } else { snprintf(name, sizeof(name), "fc%d", fg->index); } diff --git a/fftools/ffmpeg_filter.h b/fftools/ffmpeg_filter.h deleted file mode 100644 index bf690bdc91..0000000000 --- a/fftools/ffmpeg_filter.h +++ /dev/null @@ -1,192 +0,0 @@ -/* - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#ifndef FFTOOLS_FFMPEG_FILTER_H -#define FFTOOLS_FFMPEG_FILTER_H - -#include "ffmpeg.h" - -#include <stdint.h> - -#include "ffmpeg_sched.h" -#include "sync_queue.h" - -#include "libavfilter/avfilter.h" - -#include "libavutil/avutil.h" -#include "libavutil/dict.h" -#include "libavutil/fifo.h" -#include "libavutil/pixfmt.h" -#include "libavutil/rational.h" -#include "libavutil/bprint.h" -#include "libavutil/channel_layout.h" -#include "libavutil/downmix_info.h" - -typedef struct InputFilterPriv { - InputFilter ifilter; - - InputFilterOptions opts; - - int index; - - AVFilterContext *filter; - - // used to hold submitted input - AVFrame *frame; - - /* for filters that are not yet bound to an input stream, - * this stores the input linklabel, if any */ - uint8_t *linklabel; - - // filter data type - enum AVMediaType type; - // source data type: AVMEDIA_TYPE_SUBTITLE for sub2video, - // same as type otherwise - enum AVMediaType type_src; - - int eof; - int bound; - int drop_warned; - uint64_t nb_dropped; - - // parameters configured for this input - int format; - - int width, height; - AVRational sample_aspect_ratio; - enum AVColorSpace color_space; - enum AVColorRange color_range; - - int sample_rate; - AVChannelLayout ch_layout; - - AVRational time_base; - - AVFrameSideData **side_data; - int nb_side_data; - - AVFifo *frame_queue; - - AVBufferRef *hw_frames_ctx; - - int displaymatrix_present; - int displaymatrix_applied; - int32_t displaymatrix[9]; - - int downmixinfo_present; - AVDownmixInfo downmixinfo; - - struct { - AVFrame *frame; - - int64_t last_pts; - int64_t end_pts; - - /// marks if sub2video_update should force an initialization - unsigned int initialize; - } sub2video; -} InputFilterPriv; - -static inline InputFilterPriv *ifp_from_ifilter(InputFilter *ifilter) -{ - return (InputFilterPriv*)ifilter; -} - -typedef struct FPSConvContext { - AVFrame *last_frame; - /* number of frames emitted by the video-encoding sync code */ - int64_t frame_number; - /* history of nb_frames_prev, i.e. the number of times the - * previous frame was duplicated by vsync code in recent - * do_video_out() calls */ - int64_t frames_prev_hist[3]; - - uint64_t dup_warning; - - int last_dropped; - int dropped_keyframe; - - enum VideoSyncMethod vsync_method; - - AVRational framerate; - AVRational framerate_max; - const AVRational *framerate_supported; - int framerate_clip; -} FPSConvContext; - - -typedef struct OutputFilterPriv { - OutputFilter ofilter; - - int index; - - void *log_parent; - char log_name[32]; - - char *name; - - AVFilterContext *filter; - - /* desired output stream properties */ - int format; - int width, height; - int sample_rate; - AVChannelLayout ch_layout; - enum AVColorSpace color_space; - enum AVColorRange color_range; - - AVFrameSideData **side_data; - int nb_side_data; - - // time base in which the output is sent to our downstream - // does not need to match the filtersink's timebase - AVRational tb_out; - // at least one frame with the above timebase was sent - // to our downstream, so it cannot change anymore - int tb_out_locked; - - AVRational sample_aspect_ratio; - - AVDictionary *sws_opts; - AVDictionary *swr_opts; - - // those are only set if no format is specified and the encoder gives us multiple options - // They point directly to the relevant lists of the encoder. - const int *formats; - const AVChannelLayout *ch_layouts; - const int *sample_rates; - const enum AVColorSpace *color_spaces; - const enum AVColorRange *color_ranges; - - AVRational enc_timebase; - int64_t trim_start_us; - int64_t trim_duration_us; - // offset for output timestamps, in AV_TIME_BASE_Q - int64_t ts_offset; - int64_t next_pts; - FPSConvContext fps; - - unsigned flags; -} OutputFilterPriv; - -static inline OutputFilterPriv *ofp_from_ofilter(OutputFilter *ofilter) -{ - return (OutputFilterPriv*)ofilter; -} - -#endif /* FFTOOLS_FFMPEG_FILTER_H */ diff --git a/fftools/graph/graphprint.c b/fftools/graph/graphprint.c index e55c8d7507..1d6490a255 100644 --- a/fftools/graph/graphprint.c +++ b/fftools/graph/graphprint.c @@ -28,7 +28,7 @@ #include "graphprint.h" -#include "fftools/ffmpeg_filter.h" +#include "fftools/ffmpeg.h" #include "fftools/ffmpeg_mux.h" #include "libavutil/avassert.h" @@ -490,7 +490,7 @@ static void print_filtergraph_single(GraphPrintContext *gpc, FilterGraph *fg, AV print_section_header_id(gpc, SECTION_ID_GRAPH_INPUTS, "Input_File", 0); for (int i = 0; i < fg->nb_inputs; i++) { - InputFilterPriv *ifilter = ifp_from_ifilter(fg->inputs[i]); + InputFilter *ifilter = fg->inputs[i]; enum AVMediaType media_type = ifilter->type; avtext_print_section_header(tfc, NULL, SECTION_ID_GRAPH_INPUT); @@ -507,8 +507,8 @@ static void print_filtergraph_single(GraphPrintContext *gpc, FilterGraph *fg, AV if (ifilter->linklabel && ifilter->filter) av_dict_set(&input_map, ifilter->filter->name, (const char *)ifilter->linklabel, 0); - else if (ifilter->opts.name && ifilter->filter) - av_dict_set(&input_map, ifilter->filter->name, (const char *)ifilter->opts.name, 0); + else if (ifilter->input_name && ifilter->filter) + av_dict_set(&input_map, ifilter->filter->name, (const char *)ifilter->input_name, 0); print_str("media_type", av_get_media_type_string(media_type)); @@ -520,13 +520,13 @@ static void print_filtergraph_single(GraphPrintContext *gpc, FilterGraph *fg, AV print_section_header_id(gpc, SECTION_ID_GRAPH_OUTPUTS, "Output_File", 0); for (int i = 0; i < fg->nb_outputs; i++) { - OutputFilterPriv *ofilter = ofp_from_ofilter(fg->outputs[i]); + OutputFilter *ofilter = fg->outputs[i]; avtext_print_section_header(tfc, NULL, SECTION_ID_GRAPH_OUTPUT); print_int("output_index", ofilter->index); - print_str("name", ofilter->name); + print_str("name", ofilter->output_name); if (fg->outputs[i]->linklabel) print_str("link_label", (const char*)fg->outputs[i]->linklabel); @@ -540,7 +540,7 @@ static void print_filtergraph_single(GraphPrintContext *gpc, FilterGraph *fg, AV av_dict_set(&output_map, ofilter->filter->name, ofilter->name, 0); - print_str("media_type", av_get_media_type_string(fg->outputs[i]->type)); + print_str("media_type", av_get_media_type_string(ofilter->type)); avtext_print_section_footer(tfc); // SECTION_ID_GRAPH_OUTPUT } diff --git a/fftools/resources/resman.c b/fftools/resources/resman.c index a9e21626fa..bce3589169 100644 --- a/fftools/resources/resman.c +++ b/fftools/resources/resman.c @@ -32,7 +32,6 @@ #endif #include "resman.h" -#include "fftools/ffmpeg_filter.h" #include "libavutil/avassert.h" #include "libavutil/pixdesc.h" #include "libavutil/dict.h" -- 2.49.0 _______________________________________________ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org https://ffmpeg.org/mailman/listinfo/ffmpeg-devel To unsubscribe, visit link above, or email ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".