[FFmpeg-cvslog] lavfi/framesync2: add dualinput helper functions.
ffmpeg | branch: master | Nicolas George | Mon Jul 17 20:45:52 2017 +0200| [0ae8df4109da6bae77bef24fc889eceadd98c6b7] | committer: Nicolas George lavfi/framesync2: add dualinput helper functions. > http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=0ae8df4109da6bae77bef24fc889eceadd98c6b7 --- libavfilter/framesync2.c | 99 ++-- libavfilter/framesync2.h | 25 2 files changed, 103 insertions(+), 21 deletions(-) diff --git a/libavfilter/framesync2.c b/libavfilter/framesync2.c index 0e9f6f210c..0f78a1733b 100644 --- a/libavfilter/framesync2.c +++ b/libavfilter/framesync2.c @@ -46,6 +46,8 @@ enum { STATE_EOF, }; +static int consume_from_fifos(FFFrameSync *fs); + int ff_framesync2_init(FFFrameSync *fs, AVFilterContext *parent, unsigned nb_in) { /* For filters with several outputs, we will not be able to assume which @@ -127,30 +129,20 @@ int ff_framesync2_configure(FFFrameSync *fs) return 0; } -static void framesync_advance(FFFrameSync *fs) +static int framesync_advance(FFFrameSync *fs) { -int latest; unsigned i; int64_t pts; +int ret; -if (fs->eof) -return; -while (!fs->frame_ready) { -latest = -1; -for (i = 0; i < fs->nb_in; i++) { -if (!fs->in[i].have_next) { -if (latest < 0 || fs->in[i].pts < fs->in[latest].pts) -latest = i; -} -} -if (latest >= 0) { -fs->in_request = latest; -break; -} +while (!(fs->frame_ready || fs->eof)) { +ret = consume_from_fifos(fs); +if (ret <= 0) +return ret; -pts = fs->in[0].pts_next; -for (i = 1; i < fs->nb_in; i++) -if (fs->in[i].pts_next < pts) +pts = INT64_MAX; +for (i = 0; i < fs->nb_in; i++) +if (fs->in[i].have_next && fs->in[i].pts_next < pts) pts = fs->in[i].pts_next; if (pts == INT64_MAX) { framesync_eof(fs); @@ -181,6 +173,7 @@ static void framesync_advance(FFFrameSync *fs) fs->frame_ready = 0; fs->pts = pts; } +return 0; } static int64_t framesync_pts_extrapolate(FFFrameSync *fs, unsigned in, @@ -264,7 +257,7 @@ void ff_framesync2_uninit(FFFrameSync *fs) av_freep(&fs->in); } -int ff_framesync2_activate(FFFrameSync *fs) +static int consume_from_fifos(FFFrameSync *fs) { AVFilterContext *ctx = fs->parent; AVFrame *frame = NULL; @@ -300,8 +293,16 @@ int ff_framesync2_activate(FFFrameSync *fs) ff_inlink_request_frame(ctx->inputs[i]); return 0; } +return 1; +} -framesync_advance(fs); +int ff_framesync2_activate(FFFrameSync *fs) +{ +int ret; + +ret = framesync_advance(fs); +if (ret < 0) +return ret; if (fs->eof || !fs->frame_ready) return 0; ret = fs->on_event(fs); @@ -311,3 +312,59 @@ int ff_framesync2_activate(FFFrameSync *fs) return 0; } + +int ff_framesync2_init_dualinput(FFFrameSync *fs, AVFilterContext *parent) +{ +int ret; + +ret = ff_framesync2_init(fs, parent, 2); +if (ret < 0) +return ret; +fs->in[0].time_base = parent->inputs[0]->time_base; +fs->in[1].time_base = parent->inputs[1]->time_base; +fs->in[0].sync = 2; +fs->in[0].before = EXT_STOP; +fs->in[0].after = EXT_INFINITY; +fs->in[1].sync = 1; +fs->in[1].before = EXT_NULL; +fs->in[1].after = EXT_INFINITY; +return 0; +} + +int ff_framesync2_dualinput_get(FFFrameSync *fs, AVFrame **f0, AVFrame **f1) +{ +AVFilterContext *ctx = fs->parent; +AVFrame *mainpic = NULL, *secondpic = NULL; +int ret = 0; + +if ((ret = ff_framesync2_get_frame(fs, 0, &mainpic, 1)) < 0 || +(ret = ff_framesync2_get_frame(fs, 1, &secondpic, 0)) < 0) { +av_frame_free(&mainpic); +return ret; +} +if (ret < 0) +return ret; +av_assert0(mainpic); +mainpic->pts = av_rescale_q(fs->pts, fs->time_base, ctx->outputs[0]->time_base); +if (ctx->is_disabled) +secondpic = NULL; +*f0 = mainpic; +*f1 = secondpic; +return 0; +} + +int ff_framesync2_dualinput_get_writable(FFFrameSync *fs, AVFrame **f0, AVFrame **f1) +{ +int ret; + +ret = ff_framesync2_dualinput_get(fs, f0, f1); +if (ret < 0) +return ret; +ret = ff_inlink_make_frame_writable(fs->parent->inputs[0], f0); +if (ret < 0) { +av_frame_free(f0); +av_frame_free(f1); +return ret; +} +return 0; +} diff --git a/libavfilter/framesync2.h b/libavfilter/framesync2.h index 2b37636ebb..9a54b2b701 100644 --- a/libavfilter/framesync2.h +++ b/libavfilter/framesync2.h @@ -245,4 +245,29 @@ int ff_framesync2_get_frame(FFFrameSync *fs, unsigned in, AVFrame **rframe, */ int ff_framesync2_activate(FFFrameSync *fs); +/** + * Initialize a frame sync structure for dualinput. + * + * Compar
[FFmpeg-cvslog] lavfi/f_streamselect: convert to framesync2.
ffmpeg | branch: master | Nicolas George | Mon Jul 31 13:47:34 2017 +0200| [6bde475cf2f930ff929517f89f493a2ac4a2c3df] | committer: Nicolas George lavfi/f_streamselect: convert to framesync2. > http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=6bde475cf2f930ff929517f89f493a2ac4a2c3df --- libavfilter/Makefile | 4 ++-- libavfilter/f_streamselect.c | 33 + 2 files changed, 15 insertions(+), 22 deletions(-) diff --git a/libavfilter/Makefile b/libavfilter/Makefile index 1d92dc172c..55b6ce933a 100644 --- a/libavfilter/Makefile +++ b/libavfilter/Makefile @@ -67,7 +67,7 @@ OBJS-$(CONFIG_ASHOWINFO_FILTER) += af_ashowinfo.o OBJS-$(CONFIG_ASIDEDATA_FILTER) += f_sidedata.o OBJS-$(CONFIG_ASPLIT_FILTER) += split.o OBJS-$(CONFIG_ASTATS_FILTER) += af_astats.o -OBJS-$(CONFIG_ASTREAMSELECT_FILTER) += f_streamselect.o +OBJS-$(CONFIG_ASTREAMSELECT_FILTER) += f_streamselect.o framesync2.o OBJS-$(CONFIG_ATEMPO_FILTER) += af_atempo.o OBJS-$(CONFIG_ATRIM_FILTER) += trim.o OBJS-$(CONFIG_AZMQ_FILTER) += f_zmq.o @@ -303,7 +303,7 @@ OBJS-$(CONFIG_SPLIT_FILTER) += split.o OBJS-$(CONFIG_SPP_FILTER)+= vf_spp.o OBJS-$(CONFIG_SSIM_FILTER) += vf_ssim.o dualinput.o framesync.o OBJS-$(CONFIG_STEREO3D_FILTER) += vf_stereo3d.o -OBJS-$(CONFIG_STREAMSELECT_FILTER) += f_streamselect.o +OBJS-$(CONFIG_STREAMSELECT_FILTER) += f_streamselect.o framesync2.o OBJS-$(CONFIG_SUBTITLES_FILTER) += vf_subtitles.o OBJS-$(CONFIG_SUPER2XSAI_FILTER) += vf_super2xsai.o OBJS-$(CONFIG_SWAPRECT_FILTER) += vf_swaprect.o diff --git a/libavfilter/f_streamselect.c b/libavfilter/f_streamselect.c index 1a517bfc95..10607de9b8 100644 --- a/libavfilter/f_streamselect.c +++ b/libavfilter/f_streamselect.c @@ -22,7 +22,7 @@ #include "avfilter.h" #include "audio.h" #include "formats.h" -#include "framesync.h" +#include "framesync2.h" #include "internal.h" #include "video.h" @@ -48,12 +48,6 @@ static const AVOption streamselect_options[] = { AVFILTER_DEFINE_CLASS(streamselect); -static int filter_frame(AVFilterLink *inlink, AVFrame *in) -{ -StreamSelectContext *s = inlink->dst->priv; -return ff_framesync_filter_frame(&s->fs, inlink, in); -} - static int process_frame(FFFrameSync *fs) { AVFilterContext *ctx = fs->parent; @@ -62,7 +56,7 @@ static int process_frame(FFFrameSync *fs) int i, j, ret = 0; for (i = 0; i < ctx->nb_inputs; i++) { -if ((ret = ff_framesync_get_frame(&s->fs, i, &in[i], 0)) < 0) +if ((ret = ff_framesync2_get_frame(&s->fs, i, &in[i], 0)) < 0) return ret; } @@ -90,10 +84,10 @@ static int process_frame(FFFrameSync *fs) return ret; } -static int request_frame(AVFilterLink *outlink) +static int activate(AVFilterContext *ctx) { -StreamSelectContext *s = outlink->src->priv; -return ff_framesync_request_frame(&s->fs, outlink); +StreamSelectContext *s = ctx->priv; +return ff_framesync2_activate(&s->fs); } static int config_output(AVFilterLink *outlink) @@ -130,7 +124,7 @@ static int config_output(AVFilterLink *outlink) if (s->fs.opaque == s) return 0; -if ((ret = ff_framesync_init(&s->fs, ctx, ctx->nb_inputs)) < 0) +if ((ret = ff_framesync2_init(&s->fs, ctx, ctx->nb_inputs)) < 0) return ret; in = s->fs.in; @@ -148,12 +142,11 @@ static int config_output(AVFilterLink *outlink) if (!s->frames) return AVERROR(ENOMEM); -return ff_framesync_configure(&s->fs); +return ff_framesync2_configure(&s->fs); } -static int parse_definition(AVFilterContext *ctx, int nb_pads, void *filter_frame, int is_audio) +static int parse_definition(AVFilterContext *ctx, int nb_pads, int is_input, int is_audio) { -const int is_input = !!filter_frame; const char *padtype = is_input ? "in" : "out"; int i = 0, ret = 0; @@ -169,11 +162,9 @@ static int parse_definition(AVFilterContext *ctx, int nb_pads, void *filter_fram av_log(ctx, AV_LOG_DEBUG, "Add %s pad %s\n", padtype, pad.name); if (is_input) { -pad.filter_frame = filter_frame; ret = ff_insert_inpad(ctx, i, &pad); } else { pad.config_props = config_output; -pad.request_frame = request_frame; ret = ff_insert_outpad(ctx, i, &pad); } @@ -281,8 +272,8 @@ static av_cold int init(AVFilterContext *ctx) if (!s->last_pts) return AVERROR(ENOMEM); -if ((ret = parse_definition(ctx, s->nb_inputs, filter_frame, s->is_audio)) < 0 || -(ret = parse_definition(ctx, nb_outputs, NULL, s->is_audio)) < 0) +if ((ret = parse_definition(ctx, s->nb_inputs, 1, s->is_audio)) < 0 || +(ret = parse_definition(ctx, nb_outputs, 0, s
[FFmpeg-cvslog] lavfi: add a preinit callback to filters.
ffmpeg | branch: master | Nicolas George | Mon Jul 31 00:29:01 2017 +0200| [f8d7b5febba075035a94de5d7d1dc9083ad2f3ed] | committer: Nicolas George lavfi: add a preinit callback to filters. It is necessary for filters with child objects, to set the class and default options values. > http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=f8d7b5febba075035a94de5d7d1dc9083ad2f3ed --- libavfilter/avfilter.c | 8 libavfilter/avfilter.h | 15 +++ 2 files changed, 23 insertions(+) diff --git a/libavfilter/avfilter.c b/libavfilter/avfilter.c index 185ba8df00..dcd975e104 100644 --- a/libavfilter/avfilter.c +++ b/libavfilter/avfilter.c @@ -692,6 +692,7 @@ static int default_execute(AVFilterContext *ctx, avfilter_action_func *func, voi AVFilterContext *ff_filter_alloc(const AVFilter *filter, const char *inst_name) { AVFilterContext *ret; +int preinited = 0; if (!filter) return NULL; @@ -708,6 +709,11 @@ AVFilterContext *ff_filter_alloc(const AVFilter *filter, const char *inst_name) if (!ret->priv) goto err; } +if (filter->preinit) { +if (filter->preinit(ret) < 0) +goto err; +preinited = 1; +} av_opt_set_defaults(ret); if (filter->priv_class) { @@ -745,6 +751,8 @@ AVFilterContext *ff_filter_alloc(const AVFilter *filter, const char *inst_name) return ret; err: +if (preinited) +filter->uninit(ret); av_freep(&ret->inputs); av_freep(&ret->input_pads); ret->nb_inputs = 0; diff --git a/libavfilter/avfilter.h b/libavfilter/avfilter.h index 60662c19ac..73a723d583 100644 --- a/libavfilter/avfilter.h +++ b/libavfilter/avfilter.h @@ -195,6 +195,21 @@ typedef struct AVFilter { */ /** + * Filter pre-initialization function + * + * This callback will be called immediately after the filter context is + * allocated, to allow allocating and initing sub-objects. + * + * If this callback is not NULL, the uninit callback will be called on + * allocation failure. + * + * @return 0 on success, + * AVERROR code on failure (but the code will be + * dropped and treated as ENOMEM by the calling code) + */ +int (*preinit)(AVFilterContext *ctx); + +/** * Filter initialization function. * * This callback will be called only once during the filter lifetime, after ___ ffmpeg-cvslog mailing list ffmpeg-cvslog@ffmpeg.org http://ffmpeg.org/mailman/listinfo/ffmpeg-cvslog
[FFmpeg-cvslog] lavfi/vf_overlay: move to framesync2.
ffmpeg | branch: master | Nicolas George | Mon Jul 17 20:46:31 2017 +0200| [19804024d5b26e9568ce2f21f15c6664717006cd] | committer: Nicolas George lavfi/vf_overlay: move to framesync2. > http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=19804024d5b26e9568ce2f21f15c6664717006cd --- libavfilter/Makefile | 2 +- libavfilter/vf_overlay.c | 70 ++-- 2 files changed, 39 insertions(+), 33 deletions(-) diff --git a/libavfilter/Makefile b/libavfilter/Makefile index 55b6ce933a..e5fe47f2cc 100644 --- a/libavfilter/Makefile +++ b/libavfilter/Makefile @@ -245,7 +245,7 @@ OBJS-$(CONFIG_OCR_FILTER)+= vf_ocr.o OBJS-$(CONFIG_OCV_FILTER)+= vf_libopencv.o OBJS-$(CONFIG_OPENCL)+= deshake_opencl.o unsharp_opencl.o OBJS-$(CONFIG_OSCILLOSCOPE_FILTER) += vf_datascope.o -OBJS-$(CONFIG_OVERLAY_FILTER)+= vf_overlay.o dualinput.o framesync.o +OBJS-$(CONFIG_OVERLAY_FILTER)+= vf_overlay.o framesync2.o OBJS-$(CONFIG_OWDENOISE_FILTER) += vf_owdenoise.o OBJS-$(CONFIG_PAD_FILTER)+= vf_pad.o OBJS-$(CONFIG_PALETTEGEN_FILTER) += vf_palettegen.o diff --git a/libavfilter/vf_overlay.c b/libavfilter/vf_overlay.c index 52f09fae1f..34e652bdd0 100644 --- a/libavfilter/vf_overlay.c +++ b/libavfilter/vf_overlay.c @@ -36,8 +36,8 @@ #include "libavutil/opt.h" #include "libavutil/timestamp.h" #include "internal.h" -#include "dualinput.h" #include "drawutils.h" +#include "framesync2.h" #include "video.h" static const char *const var_names[] = { @@ -121,7 +121,7 @@ typedef struct OverlayContext { int format; ///< OverlayFormat int eval_mode; ///< EvalMode -FFDualInputContext dinput; +FFFrameSync fs; int main_pix_step[4]; ///< steps per pixel for each plane of the main output int overlay_pix_step[4];///< steps per pixel for each plane of the overlay @@ -132,6 +132,8 @@ typedef struct OverlayContext { char *x_expr, *y_expr; int eof_action; ///< action to take on EOF from source +int opt_shortest; +int opt_repeatlast; AVExpr *x_pexpr, *y_pexpr; @@ -142,7 +144,7 @@ static av_cold void uninit(AVFilterContext *ctx) { OverlayContext *s = ctx->priv; -ff_dualinput_uninit(&s->dinput); +ff_framesync2_uninit(&s->fs); av_expr_free(s->x_pexpr); s->x_pexpr = NULL; av_expr_free(s->y_pexpr); s->y_pexpr = NULL; } @@ -390,14 +392,20 @@ static int config_output(AVFilterLink *outlink) OverlayContext *s = ctx->priv; int ret; -if ((ret = ff_dualinput_init(ctx, &s->dinput)) < 0) +if ((ret = ff_framesync2_init_dualinput(&s->fs, ctx)) < 0) return ret; +if (s->opt_shortest) +s->fs.in[0].after = s->fs.in[1].after = EXT_STOP; +if (!s->opt_repeatlast) { +s->fs.in[1].after = EXT_NULL; +s->fs.in[1].sync = 0; +} outlink->w = ctx->inputs[MAIN]->w; outlink->h = ctx->inputs[MAIN]->h; outlink->time_base = ctx->inputs[MAIN]->time_base; -return 0; +return ff_framesync2_configure(&s->fs); } // divide by 255 and round to nearest @@ -770,11 +778,19 @@ static int config_input_main(AVFilterLink *inlink) return 0; } -static AVFrame *do_blend(AVFilterContext *ctx, AVFrame *mainpic, - const AVFrame *second) +static int do_blend(FFFrameSync *fs) { +AVFilterContext *ctx = fs->parent; +AVFrame *mainpic, *second; OverlayContext *s = ctx->priv; AVFilterLink *inlink = ctx->inputs[0]; +int ret; + +ret = ff_framesync2_dualinput_get_writable(fs, &mainpic, &second); +if (ret < 0) +return ret; +if (!second) +return ff_filter_frame(ctx->outputs[0], mainpic); if (s->eval_mode == EVAL_MODE_FRAME) { int64_t pos = mainpic->pkt_pos; @@ -799,39 +815,32 @@ static AVFrame *do_blend(AVFilterContext *ctx, AVFrame *mainpic, if (s->x < mainpic->width && s->x + second->width >= 0 || s->y < mainpic->height && s->y + second->height >= 0) s->blend_image(ctx, mainpic, second, s->x, s->y); -return mainpic; -} - -static int filter_frame(AVFilterLink *inlink, AVFrame *inpicref) -{ -OverlayContext *s = inlink->dst->priv; -av_log(inlink->dst, AV_LOG_DEBUG, "Incoming frame (time:%s) from link #%d\n", av_ts2timestr(inpicref->pts, &inlink->time_base), FF_INLINK_IDX(inlink)); -return ff_dualinput_filter_frame(&s->dinput, inlink, inpicref); -} - -static int request_frame(AVFilterLink *outlink) -{ -OverlayContext *s = outlink->src->priv; -return ff_dualinput_request_frame(&s->dinput, outlink); +return ff_filter_frame(ctx->outputs[0], mainpic); } static av_cold int init(AVFilterContext *ctx) { OverlayContext *s = ctx->priv; -if (!s->dinput.repeatlast || s->eof_action == EOF_ACTION_PASS) { -s->dinput.repeatl
[FFmpeg-cvslog] lavfi: search options on child objects.
ffmpeg | branch: master | Nicolas George | Sun Jul 30 16:57:12 2017 +0200| [dfa3aaa22a2bac6c98ed0eb4a42cd93347d9a954] | committer: Nicolas George lavfi: search options on child objects. The child objects must be allocated and inited in the preinit() callback. > http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=dfa3aaa22a2bac6c98ed0eb4a42cd93347d9a954 --- libavfilter/avfilter.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/libavfilter/avfilter.c b/libavfilter/avfilter.c index dcd975e104..6a97456054 100644 --- a/libavfilter/avfilter.c +++ b/libavfilter/avfilter.c @@ -897,7 +897,7 @@ static int process_options(AVFilterContext *ctx, AVDictionary **options, } } else { av_dict_set(options, key, value, 0); -if ((ret = av_opt_set(ctx->priv, key, value, 0)) < 0) { +if ((ret = av_opt_set(ctx->priv, key, value, AV_OPT_SEARCH_CHILDREN)) < 0) { if (!av_opt_find(ctx->priv, key, NULL, 0, AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ)) { if (ret == AVERROR_OPTION_NOT_FOUND) av_log(ctx, AV_LOG_ERROR, "Option '%s' not found\n", key); @@ -948,7 +948,7 @@ int avfilter_init_dict(AVFilterContext *ctx, AVDictionary **options) } if (ctx->filter->priv_class) { -ret = av_opt_set_dict(ctx->priv, options); +ret = av_opt_set_dict2(ctx->priv, options, AV_OPT_SEARCH_CHILDREN); if (ret < 0) { av_log(ctx, AV_LOG_ERROR, "Error applying options to the filter.\n"); return ret; ___ ffmpeg-cvslog mailing list ffmpeg-cvslog@ffmpeg.org http://ffmpeg.org/mailman/listinfo/ffmpeg-cvslog
[FFmpeg-cvslog] lavfi/framesync2: add common options.
ffmpeg | branch: master | Nicolas George | Sun Jul 30 16:58:49 2017 +0200| [05a23b2565849c9ad96526c9e2ccdb9272add565] | committer: Nicolas George lavfi/framesync2: add common options. Also add functions and macros to help filters chaining these options to their own. > http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=05a23b2565849c9ad96526c9e2ccdb9272add565 --- libavfilter/framesync2.c | 55 ++-- libavfilter/framesync2.h | 44 +- 2 files changed, 96 insertions(+), 3 deletions(-) diff --git a/libavfilter/framesync2.c b/libavfilter/framesync2.c index 0f78a1733b..fae06aa1f5 100644 --- a/libavfilter/framesync2.c +++ b/libavfilter/framesync2.c @@ -19,24 +19,43 @@ */ #include "libavutil/avassert.h" +#include "libavutil/opt.h" #include "avfilter.h" #include "filters.h" #include "framesync2.h" #include "internal.h" #define OFFSET(member) offsetof(FFFrameSync, member) +#define FLAGS AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_FILTERING_PARAM + +enum EOFAction { +EOF_ACTION_REPEAT, +EOF_ACTION_ENDALL, +EOF_ACTION_PASS +}; static const char *framesync_name(void *ptr) { return "framesync"; } +static const AVOption framesync_options[] = { +{ "eof_action", "Action to take when encountering EOF from secondary input ", +OFFSET(opt_eof_action), AV_OPT_TYPE_INT, { .i64 = EOF_ACTION_REPEAT }, +EOF_ACTION_REPEAT, EOF_ACTION_PASS, .flags = FLAGS, "eof_action" }, +{ "repeat", "Repeat the previous frame.", 0, AV_OPT_TYPE_CONST, { .i64 = EOF_ACTION_REPEAT }, .flags = FLAGS, "eof_action" }, +{ "endall", "End both streams.",0, AV_OPT_TYPE_CONST, { .i64 = EOF_ACTION_ENDALL }, .flags = FLAGS, "eof_action" }, +{ "pass", "Pass through the main input.", 0, AV_OPT_TYPE_CONST, { .i64 = EOF_ACTION_PASS }, .flags = FLAGS, "eof_action" }, +{ "shortest", "force termination when the shortest input terminates", OFFSET(opt_shortest), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, FLAGS }, +{ "repeatlast", "repeat overlay of the last overlay frame", OFFSET(opt_repeatlast), AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1, FLAGS }, +{ NULL } +}; static const AVClass framesync_class = { .version = LIBAVUTIL_VERSION_INT, .class_name= "framesync", .item_name = framesync_name, .category = AV_CLASS_CATEGORY_FILTER, -.option= NULL, +.option= framesync_options, .parent_log_context_offset = OFFSET(parent), }; @@ -48,6 +67,19 @@ enum { static int consume_from_fifos(FFFrameSync *fs); +const AVClass *framesync2_get_class(void) +{ +return &framesync_class; +} + +void ff_framesync2_preinit(FFFrameSync *fs) +{ +if (fs->class) +return; +fs->class = &framesync_class; +av_opt_set_defaults(fs); +} + int ff_framesync2_init(FFFrameSync *fs, AVFilterContext *parent, unsigned nb_in) { /* For filters with several outputs, we will not be able to assume which @@ -55,7 +87,7 @@ int ff_framesync2_init(FFFrameSync *fs, AVFilterContext *parent, unsigned nb_in) ff_outlink_set_status(). To be designed when needed. */ av_assert0(parent->nb_outputs == 1); -fs->class = &framesync_class; +ff_framesync2_preinit(fs); fs->parent = parent; fs->nb_in = nb_in; @@ -93,6 +125,25 @@ int ff_framesync2_configure(FFFrameSync *fs) unsigned i; int64_t gcd, lcm; +if (!fs->opt_repeatlast || fs->opt_eof_action == EOF_ACTION_PASS) { +fs->opt_repeatlast = 0; +fs->opt_eof_action = EOF_ACTION_PASS; +} +if (fs->opt_shortest || fs->opt_eof_action == EOF_ACTION_ENDALL) { +fs->opt_shortest = 1; +fs->opt_eof_action = EOF_ACTION_ENDALL; +} +if (fs->opt_shortest) { +for (i = 0; i < fs->nb_in; i++) +fs->in[i].after = EXT_STOP; +} +if (!fs->opt_repeatlast) { +for (i = 1; i < fs->nb_in; i++) { +fs->in[i].after = EXT_NULL; +fs->in[i].sync = 0; +} +} + if (!fs->time_base.num) { for (i = 0; i < fs->nb_in; i++) { if (fs->in[i].sync) { diff --git a/libavfilter/framesync2.h b/libavfilter/framesync2.h index 9a54b2b701..745e896bc8 100644 --- a/libavfilter/framesync2.h +++ b/libavfilter/framesync2.h @@ -196,12 +196,30 @@ typedef struct FFFrameSync { */ FFFrameSyncIn *in; +int opt_repeatlast; +int opt_shortest; +int opt_eof_action; + } FFFrameSync; /** - * Initialize a frame sync structure. + * Get the class for the framesync2 object. + */ +const AVClass *framesync2_get_class(void); + +/** + * Pre-initialize a frame sync structure. * + * It sets the class pointer and inits the options to their default values. * The entire structure is expected to be already set to 0. + * This step is optional, but necessary to use the options. + *
[FFmpeg-cvslog] lavfi/vf_blend: convert to framesync2.
ffmpeg | branch: master | Nicolas George | Mon Jul 31 11:02:14 2017 +0200| [c1d8d33a51e3ef36e6ce2e8428c5c33cfae5b02d] | committer: Nicolas George lavfi/vf_blend: convert to framesync2. > http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=c1d8d33a51e3ef36e6ce2e8428c5c33cfae5b02d --- libavfilter/Makefile | 4 ++-- libavfilter/vf_blend.c | 48 +++- 2 files changed, 29 insertions(+), 23 deletions(-) diff --git a/libavfilter/Makefile b/libavfilter/Makefile index e5fe47f2cc..57dd395d6a 100644 --- a/libavfilter/Makefile +++ b/libavfilter/Makefile @@ -136,7 +136,7 @@ OBJS-$(CONFIG_BENCH_FILTER) += f_bench.o OBJS-$(CONFIG_BITPLANENOISE_FILTER) += vf_bitplanenoise.o OBJS-$(CONFIG_BLACKDETECT_FILTER)+= vf_blackdetect.o OBJS-$(CONFIG_BLACKFRAME_FILTER) += vf_blackframe.o -OBJS-$(CONFIG_BLEND_FILTER) += vf_blend.o dualinput.o framesync.o +OBJS-$(CONFIG_BLEND_FILTER) += vf_blend.o framesync2.o OBJS-$(CONFIG_BOXBLUR_FILTER)+= vf_boxblur.o OBJS-$(CONFIG_BWDIF_FILTER) += vf_bwdif.o OBJS-$(CONFIG_CHROMAKEY_FILTER) += vf_chromakey.o @@ -308,7 +308,7 @@ OBJS-$(CONFIG_SUBTITLES_FILTER) += vf_subtitles.o OBJS-$(CONFIG_SUPER2XSAI_FILTER) += vf_super2xsai.o OBJS-$(CONFIG_SWAPRECT_FILTER) += vf_swaprect.o OBJS-$(CONFIG_SWAPUV_FILTER) += vf_swapuv.o -OBJS-$(CONFIG_TBLEND_FILTER) += vf_blend.o dualinput.o framesync.o +OBJS-$(CONFIG_TBLEND_FILTER) += vf_blend.o framesync2.o OBJS-$(CONFIG_TELECINE_FILTER) += vf_telecine.o OBJS-$(CONFIG_THRESHOLD_FILTER) += vf_threshold.o framesync2.o OBJS-$(CONFIG_THUMBNAIL_FILTER) += vf_thumbnail.o diff --git a/libavfilter/vf_blend.c b/libavfilter/vf_blend.c index 109a51fa92..4939b10150 100644 --- a/libavfilter/vf_blend.c +++ b/libavfilter/vf_blend.c @@ -25,8 +25,8 @@ #include "avfilter.h" #include "bufferqueue.h" #include "formats.h" +#include "framesync2.h" #include "internal.h" -#include "dualinput.h" #include "video.h" #include "blend.h" @@ -35,7 +35,7 @@ typedef struct BlendContext { const AVClass *class; -FFDualInputContext dinput; +FFFrameSync fs; int hsub, vsub; ///< chroma subsampling values int nb_planes; char *all_expr; @@ -116,12 +116,10 @@ typedef struct ThreadData { static const AVOption blend_options[] = { COMMON_OPTIONS, -{ "shortest","force termination when the shortest input terminates", OFFSET(dinput.shortest), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS }, -{ "repeatlast", "repeat last bottom frame", OFFSET(dinput.repeatlast), AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1, FLAGS }, { NULL } }; -AVFILTER_DEFINE_CLASS(blend); +FRAMESYNC_DEFINE_CLASS(blend, BlendContext, fs); #define COPY(src)\ static void blend_copy ## src(const uint8_t *top, ptrdiff_t top_linesize,\ @@ -407,13 +405,28 @@ static AVFrame *blend_frame(AVFilterContext *ctx, AVFrame *top_buf, return dst_buf; } +static int blend_frame_for_dualinput(FFFrameSync *fs) +{ +AVFilterContext *ctx = fs->parent; +AVFrame *top_buf, *bottom_buf, *dst_buf; +int ret; + +ret = ff_framesync2_dualinput_get(fs, &top_buf, &bottom_buf); +if (ret < 0) +return ret; +if (!bottom_buf) +return ff_filter_frame(ctx->outputs[0], top_buf); +dst_buf = blend_frame(ctx, top_buf, bottom_buf); +return ff_filter_frame(ctx->outputs[0], dst_buf); +} + static av_cold int init(AVFilterContext *ctx) { BlendContext *s = ctx->priv; s->tblend = !strcmp(ctx->filter->name, "tblend"); -s->dinput.process = blend_frame; +s->fs.on_event = blend_frame_for_dualinput; return 0; } @@ -441,7 +454,7 @@ static av_cold void uninit(AVFilterContext *ctx) BlendContext *s = ctx->priv; int i; -ff_dualinput_uninit(&s->dinput); +ff_framesync2_uninit(&s->fs); av_frame_free(&s->prev_frame); for (i = 0; i < FF_ARRAY_ELEMS(s->params); i++) @@ -541,7 +554,7 @@ static int config_output(AVFilterLink *outlink) s->nb_planes = av_pix_fmt_count_planes(toplink->format); if (!s->tblend) -if ((ret = ff_dualinput_init(ctx, &s->dinput)) < 0) +if ((ret = ff_framesync2_init_dualinput(&s->fs, ctx)) < 0) return ret; for (plane = 0; plane < FF_ARRAY_ELEMS(s->params); plane++) { @@ -568,32 +581,24 @@ static int config_output(AVFilterLink *outlink) } } -return 0; +return s->tblend ? 0 : ff_framesync2_configure(&s->fs); } #if CONFIG_BLEND_FILTER -static int request_frame(AVFilterLink *outlink) +static int activate(AVFilterContext *ctx) { -BlendContext *s = outlink->src->priv; -return ff_dualinput_request_frame(&s->dinput, outlink); -} - -static
[FFmpeg-cvslog] lavfi/vf_overlay: use framesync2 options.
ffmpeg | branch: master | Nicolas George | Sun Jul 30 17:00:00 2017 +0200| [878fd0545a930391baf1ded32550af1d1d2e8c88] | committer: Nicolas George lavfi/vf_overlay: use framesync2 options. > http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=878fd0545a930391baf1ded32550af1d1d2e8c88 --- libavfilter/vf_overlay.c | 45 - 1 file changed, 4 insertions(+), 41 deletions(-) diff --git a/libavfilter/vf_overlay.c b/libavfilter/vf_overlay.c index 34e652bdd0..4166e7c095 100644 --- a/libavfilter/vf_overlay.c +++ b/libavfilter/vf_overlay.c @@ -70,16 +70,6 @@ enum var_name { VAR_VARS_NB }; -enum EOFAction { -EOF_ACTION_REPEAT, -EOF_ACTION_ENDALL, -EOF_ACTION_PASS -}; - -static const char * const eof_action_str[] = { -"repeat", "endall", "pass" -}; - #define MAIN0 #define OVERLAY 1 @@ -131,10 +121,6 @@ typedef struct OverlayContext { double var_values[VAR_VARS_NB]; char *x_expr, *y_expr; -int eof_action; ///< action to take on EOF from source -int opt_shortest; -int opt_repeatlast; - AVExpr *x_pexpr, *y_pexpr; void (*blend_image)(AVFilterContext *ctx, AVFrame *dst, const AVFrame *src, int x, int y); @@ -377,12 +363,11 @@ static int config_input_overlay(AVFilterLink *inlink) } av_log(ctx, AV_LOG_VERBOSE, - "main w:%d h:%d fmt:%s overlay w:%d h:%d fmt:%s eof_action:%s\n", + "main w:%d h:%d fmt:%s overlay w:%d h:%d fmt:%s\n", ctx->inputs[MAIN]->w, ctx->inputs[MAIN]->h, av_get_pix_fmt_name(ctx->inputs[MAIN]->format), ctx->inputs[OVERLAY]->w, ctx->inputs[OVERLAY]->h, - av_get_pix_fmt_name(ctx->inputs[OVERLAY]->format), - eof_action_str[s->eof_action]); + av_get_pix_fmt_name(ctx->inputs[OVERLAY]->format)); return 0; } @@ -394,12 +379,6 @@ static int config_output(AVFilterLink *outlink) if ((ret = ff_framesync2_init_dualinput(&s->fs, ctx)) < 0) return ret; -if (s->opt_shortest) -s->fs.in[0].after = s->fs.in[1].after = EXT_STOP; -if (!s->opt_repeatlast) { -s->fs.in[1].after = EXT_NULL; -s->fs.in[1].sync = 0; -} outlink->w = ctx->inputs[MAIN]->w; outlink->h = ctx->inputs[MAIN]->h; @@ -822,15 +801,6 @@ static av_cold int init(AVFilterContext *ctx) { OverlayContext *s = ctx->priv; -if (!s->opt_repeatlast || s->eof_action == EOF_ACTION_PASS) { -s->opt_repeatlast = 0; -s->eof_action = EOF_ACTION_PASS; -} -if (s->opt_shortest || s->eof_action == EOF_ACTION_ENDALL) { -s->opt_shortest = 1; -s->eof_action = EOF_ACTION_ENDALL; -} - s->fs.on_event = do_blend; return 0; } @@ -847,16 +817,9 @@ static int activate(AVFilterContext *ctx) static const AVOption overlay_options[] = { { "x", "set the x expression", OFFSET(x_expr), AV_OPT_TYPE_STRING, {.str = "0"}, CHAR_MIN, CHAR_MAX, FLAGS }, { "y", "set the y expression", OFFSET(y_expr), AV_OPT_TYPE_STRING, {.str = "0"}, CHAR_MIN, CHAR_MAX, FLAGS }, -{ "eof_action", "Action to take when encountering EOF from secondary input ", -OFFSET(eof_action), AV_OPT_TYPE_INT, { .i64 = EOF_ACTION_REPEAT }, -EOF_ACTION_REPEAT, EOF_ACTION_PASS, .flags = FLAGS, "eof_action" }, -{ "repeat", "Repeat the previous frame.", 0, AV_OPT_TYPE_CONST, { .i64 = EOF_ACTION_REPEAT }, .flags = FLAGS, "eof_action" }, -{ "endall", "End both streams.",0, AV_OPT_TYPE_CONST, { .i64 = EOF_ACTION_ENDALL }, .flags = FLAGS, "eof_action" }, -{ "pass", "Pass through the main input.", 0, AV_OPT_TYPE_CONST, { .i64 = EOF_ACTION_PASS }, .flags = FLAGS, "eof_action" }, { "eval", "specify when to evaluate expressions", OFFSET(eval_mode), AV_OPT_TYPE_INT, {.i64 = EVAL_MODE_FRAME}, 0, EVAL_MODE_NB-1, FLAGS, "eval" }, { "init", "eval expressions once during initialization", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_INIT}, .flags = FLAGS, .unit = "eval" }, { "frame", "eval expressions per-frame", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_FRAME}, .flags = FLAGS, .unit = "eval" }, -{ "shortest", "force termination when the shortest input terminates", OFFSET(opt_shortest), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, FLAGS }, { "format", "set output format", OFFSET(format), AV_OPT_TYPE_INT, {.i64=OVERLAY_FORMAT_YUV420}, 0, OVERLAY_FORMAT_NB-1, FLAGS, "format" }, { "yuv420", "", 0, AV_OPT_TYPE_CONST, {.i64=OVERLAY_FORMAT_YUV420}, .flags = FLAGS, .unit = "format" }, { "yuv422", "", 0, AV_OPT_TYPE_CONST, {.i64=OVERLAY_FORMAT_YUV422}, .flags = FLAGS, .unit = "format" }, @@ -864,11 +827,10 @@ static const AVOption overlay_options[] = { { "rgb","", 0, AV_OPT_TYPE_CONST, {.i64=OVERLAY_FORMAT_RGB}, .flags = FLAGS, .unit = "format" }, { "gbrp", "", 0, AV_OPT_TYPE_CONST, {.i64=OVERLAY_FORMAT_GBRP}, .flags = FLAGS, .unit =
[FFmpeg-cvslog] lavfi/vf_libvmaf: convert to framesync2.
ffmpeg | branch: master | Nicolas George | Mon Jul 31 11:40:25 2017 +0200| [a8ab52fae7286d4e7eec9256a08b6ad0b1e39d6c] | committer: Nicolas George lavfi/vf_libvmaf: convert to framesync2. After this commit, the code compiles, but on my setup it segfaults before and after. It also prints the very worrying warning: src/libavfilter/vf_libvmaf.c:161:66: warning: passing argument 4 of ‘compute_vmaf’ from incompatible pointer type [-Wincompatible-pointer-types] /tmp/i/include/libvmaf.h:26:8: note: expected ‘int (*)(float *, float *, float *, int, void *)’ but argument is of type ‘int (*)(float *, float *, float *, int, double *, void *)’ ==12116== Thread 6: ==12116== Conditional jump or move depends on uninitialised value(s) ==12116==at 0x526D432: cons_ (ocval.h:1188) ==12116==by 0x526D432: GenericIT (ocval.h:1119) ==12116==by 0x526D432: OC::TranslateForNumPyClassesToArray(OC::Val&) (pickleloader.h:92) ==12116==by 0x5211F5D: loads (pickleloader.h:566) ==12116==by 0x5211F5D: LoadValFromArray (chooseser.h:290) ==12116==by 0x5211F5D: LoadValFromFile (chooseser.h:405) ==12116==by 0x5211F5D: _read_and_assert_model(char const*, OC::Val&, OC::Val&, OC::Val&, OC::Val&, OC::Val&, OC::Val&) (vmaf.cpp:77) ==12116==by 0x5212B0F: VmafRunner::run(Asset, int (*)(float*, float*, float*, int, void*), void*, bool, bool, bool, bool, bool) (vmaf.cpp:149) ==12116==by 0x52165B6: RunVmaf(char const*, int, int, int (*)(float*, float*, float*, int, void*), void*, char const*, char const*, char const*, bool, bool, bool, bool, bool, char const*) (vmaf.cpp:645) ==12116==by 0x518AFFF: compute_vmaf_score (vf_libvmaf.c:161) ==12116==by 0x518AFFF: call_vmaf (vf_libvmaf.c:170) ==12116==by 0x7967493: start_thread (pthread_create.c:333) ==12116==by 0x7F69A8E: clone (clone.S:97) ==12116== ==12116== Conditional jump or move depends on uninitialised value(s) ==12116==at 0x526D432: cons_ (ocval.h:1188) ==12116==by 0x526D432: GenericIT (ocval.h:1119) ==12116==by 0x526D432: OC::TranslateForNumPyClassesToArray(OC::Val&) (pickleloader.h:92) ==12116==by 0x526D50D: OC::TranslateForNumPyClassesToArray(OC::Val&) (pickleloader.h:94) ==12116==by 0x5211F5D: loads (pickleloader.h:566) ==12116==by 0x5211F5D: LoadValFromArray (chooseser.h:290) ==12116==by 0x5211F5D: LoadValFromFile (chooseser.h:405) ==12116==by 0x5211F5D: _read_and_assert_model(char const*, OC::Val&, OC::Val&, OC::Val&, OC::Val&, OC::Val&, OC::Val&) (vmaf.cpp:77) ==12116==by 0x5212B0F: VmafRunner::run(Asset, int (*)(float*, float*, float*, int, void*), void*, bool, bool, bool, bool, bool) (vmaf.cpp:149) ==12116==by 0x52165B6: RunVmaf(char const*, int, int, int (*)(float*, float*, float*, int, void*), void*, char const*, char const*, char const*, bool, bool, bool, bool, bool, char const*) (vmaf.cpp:645) ==12116==by 0x518AFFF: compute_vmaf_score (vf_libvmaf.c:161) ==12116==by 0x518AFFF: call_vmaf (vf_libvmaf.c:170) ==12116==by 0x7967493: start_thread (pthread_create.c:333) ==12116==by 0x7F69A8E: clone (clone.S:97) ==12116== ==12116== Conditional jump or move depends on uninitialised value(s) ==12116==at 0x526D432: cons_ (ocval.h:1188) ==12116==by 0x526D432: GenericIT (ocval.h:1119) ==12116==by 0x526D432: OC::TranslateForNumPyClassesToArray(OC::Val&) (pickleloader.h:92) ==12116==by 0x526D50D: OC::TranslateForNumPyClassesToArray(OC::Val&) (pickleloader.h:94) ==12116==by 0x526D50D: OC::TranslateForNumPyClassesToArray(OC::Val&) (pickleloader.h:94) ==12116==by 0x5211F5D: loads (pickleloader.h:566) ==12116==by 0x5211F5D: LoadValFromArray (chooseser.h:290) ==12116==by 0x5211F5D: LoadValFromFile (chooseser.h:405) ==12116==by 0x5211F5D: _read_and_assert_model(char const*, OC::Val&, OC::Val&, OC::Val&, OC::Val&, OC::Val&, OC::Val&) (vmaf.cpp:77) ==12116==by 0x5212B0F: VmafRunner::run(Asset, int (*)(float*, float*, float*, int, void*), void*, bool, bool, bool, bool, bool) (vmaf.cpp:149) ==12116==by 0x52165B6: RunVmaf(char const*, int, int, int (*)(float*, float*, float*, int, void*), void*, char const*, char const*, char const*, bool, bool, bool, bool, bool, char const*) (vmaf.cpp:645) ==12116==by 0x518AFFF: compute_vmaf_score (vf_libvmaf.c:161) ==12116==by 0x518AFFF: call_vmaf (vf_libvmaf.c:170) ==12116==by 0x7967493: start_thread (pthread_create.c:333) ==12116==by 0x7F69A8E: clone (clone.S:97) ==12116== ==12116== Use of uninitialised value of size 8 ==12116==at 0x518AC79: read_frame_8bit (vf_libvmaf.c:147) ==12116==by 0x52AB5E8: combo (combo.c:149) ==12116==by 0x5212E95: VmafRunner::run(Asset, int (*)(float*, float*, float*, int, void*), void*, bool, bool, bool, bool, bool) (vmaf.cpp:278) ==12116==by 0x52165B6: RunVmaf(char const*, int, int, int (*)(float*, float*, float*, int, void*), void*, char const*, char const*, char const*, bool, bool, bool, bool, bool, char const*) (vmaf.cpp:645) ==12116=
[FFmpeg-cvslog] lavfi/vf_lut3d: convert to framesync2.
ffmpeg | branch: master | Nicolas George | Mon Jul 31 12:03:03 2017 +0200| [eacb3ec961035b39e4bf69b589df9b110f591710] | committer: Nicolas George lavfi/vf_lut3d: convert to framesync2. > http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=eacb3ec961035b39e4bf69b589df9b110f591710 --- libavfilter/Makefile | 2 +- libavfilter/vf_lut3d.c | 49 ++--- 2 files changed, 27 insertions(+), 24 deletions(-) diff --git a/libavfilter/Makefile b/libavfilter/Makefile index 06e3428e56..4b8c34d35b 100644 --- a/libavfilter/Makefile +++ b/libavfilter/Makefile @@ -197,7 +197,7 @@ OBJS-$(CONFIG_FSPP_FILTER) += vf_fspp.o OBJS-$(CONFIG_GBLUR_FILTER) += vf_gblur.o OBJS-$(CONFIG_GEQ_FILTER)+= vf_geq.o OBJS-$(CONFIG_GRADFUN_FILTER)+= vf_gradfun.o -OBJS-$(CONFIG_HALDCLUT_FILTER) += vf_lut3d.o dualinput.o framesync.o +OBJS-$(CONFIG_HALDCLUT_FILTER) += vf_lut3d.o framesync2.o OBJS-$(CONFIG_HFLIP_FILTER) += vf_hflip.o OBJS-$(CONFIG_HISTEQ_FILTER) += vf_histeq.o OBJS-$(CONFIG_HISTOGRAM_FILTER) += vf_histogram.o diff --git a/libavfilter/vf_lut3d.c b/libavfilter/vf_lut3d.c index 7a294b0761..5ba91f7e47 100644 --- a/libavfilter/vf_lut3d.c +++ b/libavfilter/vf_lut3d.c @@ -31,8 +31,8 @@ #include "libavutil/avstring.h" #include "avfilter.h" #include "drawutils.h" -#include "dualinput.h" #include "formats.h" +#include "framesync2.h" #include "internal.h" #include "video.h" @@ -70,7 +70,7 @@ typedef struct LUT3DContext { int clut_step; int clut_is16bit; int clut_width; -FFDualInputContext dinput; +FFFrameSync fs; #endif } LUT3DContext; @@ -681,24 +681,21 @@ static int config_output(AVFilterLink *outlink) LUT3DContext *lut3d = ctx->priv; int ret; +ret = ff_framesync2_init_dualinput(&lut3d->fs, ctx); +if (ret < 0) +return ret; outlink->w = ctx->inputs[0]->w; outlink->h = ctx->inputs[0]->h; outlink->time_base = ctx->inputs[0]->time_base; -if ((ret = ff_dualinput_init(ctx, &lut3d->dinput)) < 0) +if ((ret = ff_framesync2_configure(&lut3d->fs)) < 0) return ret; return 0; } -static int filter_frame_hald(AVFilterLink *inlink, AVFrame *inpicref) -{ -LUT3DContext *s = inlink->dst->priv; -return ff_dualinput_filter_frame(&s->dinput, inlink, inpicref); -} - -static int request_frame(AVFilterLink *outlink) +static int activate(AVFilterContext *ctx) { -LUT3DContext *s = outlink->src->priv; -return ff_dualinput_request_frame(&s->dinput, outlink); +LUT3DContext *s = ctx->priv; +return ff_framesync2_activate(&s->fs); } static int config_clut(AVFilterLink *inlink) @@ -751,45 +748,50 @@ static int config_clut(AVFilterLink *inlink) return 0; } -static AVFrame *update_apply_clut(AVFilterContext *ctx, AVFrame *main, - const AVFrame *second) +static int update_apply_clut(FFFrameSync *fs) { +AVFilterContext *ctx = fs->parent; AVFilterLink *inlink = ctx->inputs[0]; +AVFrame *main, *second, *out; +int ret; + +ret = ff_framesync2_dualinput_get(fs, &main, &second); +if (ret < 0) +return ret; +if (!second) +return ff_filter_frame(ctx->outputs[0], main); update_clut(ctx->priv, second); -return apply_lut(inlink, main); +out = apply_lut(inlink, main); +return ff_filter_frame(ctx->outputs[0], out); } static av_cold int haldclut_init(AVFilterContext *ctx) { LUT3DContext *lut3d = ctx->priv; -lut3d->dinput.process = update_apply_clut; +lut3d->fs.on_event = update_apply_clut; return 0; } static av_cold void haldclut_uninit(AVFilterContext *ctx) { LUT3DContext *lut3d = ctx->priv; -ff_dualinput_uninit(&lut3d->dinput); +ff_framesync2_uninit(&lut3d->fs); } static const AVOption haldclut_options[] = { -{ "shortest", "force termination when the shortest input terminates", OFFSET(dinput.shortest), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, FLAGS }, -{ "repeatlast", "continue applying the last clut after eos", OFFSET(dinput.repeatlast), AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1, FLAGS }, COMMON_OPTIONS }; -AVFILTER_DEFINE_CLASS(haldclut); +FRAMESYNC_DEFINE_CLASS(haldclut, LUT3DContext, fs); static const AVFilterPad haldclut_inputs[] = { { .name = "main", .type = AVMEDIA_TYPE_VIDEO, -.filter_frame = filter_frame_hald, .config_props = config_input, },{ .name = "clut", .type = AVMEDIA_TYPE_VIDEO, -.filter_frame = filter_frame_hald, .config_props = config_clut, }, { NULL } @@ -799,7 +801,6 @@ static const AVFilterPad haldclut_outputs[] = { { .name = "default", .type = AVMEDIA_TYPE_VIDEO, -.request_frame = request_fra
[FFmpeg-cvslog] lavfi/vf_psnr: convert to framesync2.
ffmpeg | branch: master | Nicolas George | Mon Jul 31 12:44:13 2017 +0200| [3bd11df459866d7303c1ad6779473528c2cfb76d] | committer: Nicolas George lavfi/vf_psnr: convert to framesync2. > http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=3bd11df459866d7303c1ad6779473528c2cfb76d --- libavfilter/Makefile | 2 +- libavfilter/vf_psnr.c | 50 +++--- 2 files changed, 28 insertions(+), 24 deletions(-) diff --git a/libavfilter/Makefile b/libavfilter/Makefile index b6229b0570..3da821ee3e 100644 --- a/libavfilter/Makefile +++ b/libavfilter/Makefile @@ -260,7 +260,7 @@ OBJS-$(CONFIG_PP7_FILTER)+= vf_pp7.o OBJS-$(CONFIG_PREMULTIPLY_FILTER)+= vf_premultiply.o framesync2.o OBJS-$(CONFIG_PREWITT_FILTER)+= vf_convolution.o OBJS-$(CONFIG_PSEUDOCOLOR_FILTER)+= vf_pseudocolor.o -OBJS-$(CONFIG_PSNR_FILTER) += vf_psnr.o dualinput.o framesync.o +OBJS-$(CONFIG_PSNR_FILTER) += vf_psnr.o framesync2.o OBJS-$(CONFIG_PULLUP_FILTER) += vf_pullup.o OBJS-$(CONFIG_QP_FILTER) += vf_qp.o OBJS-$(CONFIG_RANDOM_FILTER) += vf_random.o diff --git a/libavfilter/vf_psnr.c b/libavfilter/vf_psnr.c index 6ab21d8e22..a8eb315445 100644 --- a/libavfilter/vf_psnr.c +++ b/libavfilter/vf_psnr.c @@ -29,16 +29,16 @@ #include "libavutil/opt.h" #include "libavutil/pixdesc.h" #include "avfilter.h" -#include "dualinput.h" #include "drawutils.h" #include "formats.h" +#include "framesync2.h" #include "internal.h" #include "psnr.h" #include "video.h" typedef struct PSNRContext { const AVClass *class; -FFDualInputContext dinput; +FFFrameSync fs; double mse, min_mse, max_mse, mse_comp[4]; uint64_t nb_frames; FILE *stats_file; @@ -68,7 +68,7 @@ static const AVOption psnr_options[] = { { NULL } }; -AVFILTER_DEFINE_CLASS(psnr); +FRAMESYNC_DEFINE_CLASS(psnr, PSNRContext, fs); static inline unsigned pow_2(unsigned base) { @@ -142,13 +142,21 @@ static void set_meta(AVDictionary **metadata, const char *key, char comp, float } } -static AVFrame *do_psnr(AVFilterContext *ctx, AVFrame *main, -const AVFrame *ref) +static int do_psnr(FFFrameSync *fs) { +AVFilterContext *ctx = fs->parent; PSNRContext *s = ctx->priv; +AVFrame *main, *ref; double comp_mse[4], mse = 0; -int j, c; -AVDictionary **metadata = &main->metadata; +int ret, j, c; +AVDictionary **metadata; + +ret = ff_framesync2_dualinput_get(fs, &main, &ref); +if (ret < 0) +return ret; +if (!ref) +return ff_filter_frame(ctx->outputs[0], main); +metadata = &main->metadata; compute_images_mse(s, (const uint8_t **)main->data, main->linesize, (const uint8_t **)ref->data, ref->linesize, @@ -214,7 +222,7 @@ static AVFrame *do_psnr(AVFilterContext *ctx, AVFrame *main, fprintf(s->stats_file, "\n"); } -return main; +return ff_filter_frame(ctx->outputs[0], main); } static av_cold int init(AVFilterContext *ctx) @@ -245,7 +253,7 @@ static av_cold int init(AVFilterContext *ctx) } } -s->dinput.process = do_psnr; +s->fs.on_event = do_psnr; return 0; } @@ -331,27 +339,24 @@ static int config_output(AVFilterLink *outlink) AVFilterLink *mainlink = ctx->inputs[0]; int ret; +ret = ff_framesync2_init_dualinput(&s->fs, ctx); +if (ret < 0) +return ret; outlink->w = mainlink->w; outlink->h = mainlink->h; outlink->time_base = mainlink->time_base; outlink->sample_aspect_ratio = mainlink->sample_aspect_ratio; outlink->frame_rate = mainlink->frame_rate; -if ((ret = ff_dualinput_init(ctx, &s->dinput)) < 0) +if ((ret = ff_framesync2_configure(&s->fs)) < 0) return ret; return 0; } -static int filter_frame(AVFilterLink *inlink, AVFrame *inpicref) +static int activate(AVFilterContext *ctx) { -PSNRContext *s = inlink->dst->priv; -return ff_dualinput_filter_frame(&s->dinput, inlink, inpicref); -} - -static int request_frame(AVFilterLink *outlink) -{ -PSNRContext *s = outlink->src->priv; -return ff_dualinput_request_frame(&s->dinput, outlink); +PSNRContext *s = ctx->priv; +return ff_framesync2_activate(&s->fs); } static av_cold void uninit(AVFilterContext *ctx) @@ -375,7 +380,7 @@ static av_cold void uninit(AVFilterContext *ctx) get_psnr(s->min_mse, 1, s->average_max)); } -ff_dualinput_uninit(&s->dinput); +ff_framesync2_uninit(&s->fs); if (s->stats_file && s->stats_file != stdout) fclose(s->stats_file); @@ -385,11 +390,9 @@ static const AVFilterPad psnr_inputs[] = { { .name = "main", .type = AVMEDIA_TYPE_VIDEO, -.filter_frame = filter_frame, },{ .name = "reference", .type
[FFmpeg-cvslog] lavfi/vf_paletteuse: convert to framesync2.
ffmpeg | branch: master | Nicolas George | Mon Jul 31 12:27:58 2017 +0200| [23000c3de5940271f6daa58d629871c9355cd37c] | committer: Nicolas George lavfi/vf_paletteuse: convert to framesync2. > http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=23000c3de5940271f6daa58d629871c9355cd37c --- libavfilter/Makefile| 2 +- libavfilter/vf_paletteuse.c | 60 - 2 files changed, 39 insertions(+), 23 deletions(-) diff --git a/libavfilter/Makefile b/libavfilter/Makefile index 4b8c34d35b..b6229b0570 100644 --- a/libavfilter/Makefile +++ b/libavfilter/Makefile @@ -249,7 +249,7 @@ OBJS-$(CONFIG_OVERLAY_FILTER)+= vf_overlay.o framesync2.o OBJS-$(CONFIG_OWDENOISE_FILTER) += vf_owdenoise.o OBJS-$(CONFIG_PAD_FILTER)+= vf_pad.o OBJS-$(CONFIG_PALETTEGEN_FILTER) += vf_palettegen.o -OBJS-$(CONFIG_PALETTEUSE_FILTER) += vf_paletteuse.o dualinput.o framesync.o +OBJS-$(CONFIG_PALETTEUSE_FILTER) += vf_paletteuse.o framesync2.o OBJS-$(CONFIG_PERMS_FILTER) += f_perms.o OBJS-$(CONFIG_PERSPECTIVE_FILTER)+= vf_perspective.o OBJS-$(CONFIG_PHASE_FILTER) += vf_phase.o diff --git a/libavfilter/vf_paletteuse.c b/libavfilter/vf_paletteuse.c index b25c6a9eac..3d16c2dd84 100644 --- a/libavfilter/vf_paletteuse.c +++ b/libavfilter/vf_paletteuse.c @@ -27,8 +27,10 @@ #include "libavutil/internal.h" #include "libavutil/opt.h" #include "libavutil/qsort.h" -#include "dualinput.h" #include "avfilter.h" +#include "filters.h" +#include "framesync2.h" +#include "internal.h" enum dithering_mode { DITHERING_NONE, @@ -80,7 +82,7 @@ typedef int (*set_frame_func)(struct PaletteUseContext *s, AVFrame *out, AVFrame typedef struct PaletteUseContext { const AVClass *class; -FFDualInputContext dinput; +FFFrameSync fs; struct cache_node cache[CACHE_SIZE];/* lookup cache */ struct color_node map[AVPALETTE_COUNT]; /* 3D-Tree (KD-Tree with K=3) for reverse colormap */ uint32_t palette[AVPALETTE_COUNT]; @@ -129,6 +131,8 @@ static const AVOption paletteuse_options[] = { AVFILTER_DEFINE_CLASS(paletteuse); +static int load_apply_palette(FFFrameSync *fs); + static int query_formats(AVFilterContext *ctx) { static const enum AVPixelFormat in_fmts[]= {AV_PIX_FMT_RGB32, AV_PIX_FMT_NONE}; @@ -900,11 +904,18 @@ static int config_output(AVFilterLink *outlink) AVFilterContext *ctx = outlink->src; PaletteUseContext *s = ctx->priv; +ret = ff_framesync2_init_dualinput(&s->fs, ctx); +if (ret < 0) +return ret; +s->fs.opt_repeatlast = 1; // only 1 frame in the palette +s->fs.in[1].before = s->fs.in[1].after = EXT_INFINITY; +s->fs.on_event = load_apply_palette; + outlink->w = ctx->inputs[0]->w; outlink->h = ctx->inputs[0]->h; outlink->time_base = ctx->inputs[0]->time_base; -if ((ret = ff_dualinput_init(ctx, &s->dinput)) < 0) +if ((ret = ff_framesync2_configure(&s->fs)) < 0) return ret; return 0; } @@ -951,21 +962,32 @@ static void load_palette(PaletteUseContext *s, const AVFrame *palette_frame) s->palette_loaded = 1; } -static AVFrame *load_apply_palette(AVFilterContext *ctx, AVFrame *main, - const AVFrame *second) +static int load_apply_palette(FFFrameSync *fs) { +AVFilterContext *ctx = fs->parent; AVFilterLink *inlink = ctx->inputs[0]; PaletteUseContext *s = ctx->priv; +AVFrame *main, *second, *out; +int ret; + +// writable for error diffusal dithering +ret = ff_framesync2_dualinput_get_writable(fs, &main, &second); +if (ret < 0) +return ret; +if (!main || !second) { +ret = AVERROR_BUG; +goto error; +} if (!s->palette_loaded) { load_palette(s, second); } -return apply_palette(inlink, main); -} +out = apply_palette(inlink, main); +return ff_filter_frame(ctx->outputs[0], out); -static int filter_frame(AVFilterLink *inlink, AVFrame *in) -{ -PaletteUseContext *s = inlink->dst->priv; -return ff_dualinput_filter_frame(&s->dinput, inlink, in); +error: +av_frame_free(&main); +av_frame_free(&second); +return ret; } #define DEFINE_SET_FRAME(color_search, name, value) \ @@ -1013,9 +1035,6 @@ static int dither_value(int p) static av_cold int init(AVFilterContext *ctx) { PaletteUseContext *s = ctx->priv; -s->dinput.repeatlast = 1; // only 1 frame in the palette -s->dinput.skip_initial_unpaired = 1; -s->dinput.process= load_apply_palette; s->set_frame = set_frame_lut[s->color_search_method][s->dither]; @@ -1030,10 +1049,10 @@ static av_cold int init(AVFilterContext *ctx) return 0; } -static int request_frame(AVFilterLink *outlink) +static int activate(AVFilterContext *ctx) { -PaletteUseContext *s = outlink->src->priv;
[FFmpeg-cvslog] vf_ssim: convert to framesync2.
ffmpeg | branch: master | Nicolas George | Mon Jul 31 13:37:06 2017 +0200| [ef2176473d3d5ac5bf36aa9bcd7bba760121ad84] | committer: Nicolas George vf_ssim: convert to framesync2. > http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=ef2176473d3d5ac5bf36aa9bcd7bba760121ad84 --- libavfilter/Makefile | 2 +- libavfilter/vf_ssim.c | 52 ++- 2 files changed, 28 insertions(+), 26 deletions(-) diff --git a/libavfilter/Makefile b/libavfilter/Makefile index 3da821ee3e..ee840b02e5 100644 --- a/libavfilter/Makefile +++ b/libavfilter/Makefile @@ -301,7 +301,7 @@ OBJS-$(CONFIG_SMARTBLUR_FILTER) += vf_smartblur.o OBJS-$(CONFIG_SOBEL_FILTER) += vf_convolution.o OBJS-$(CONFIG_SPLIT_FILTER) += split.o OBJS-$(CONFIG_SPP_FILTER)+= vf_spp.o -OBJS-$(CONFIG_SSIM_FILTER) += vf_ssim.o dualinput.o framesync.o +OBJS-$(CONFIG_SSIM_FILTER) += vf_ssim.o framesync2.o OBJS-$(CONFIG_STEREO3D_FILTER) += vf_stereo3d.o OBJS-$(CONFIG_STREAMSELECT_FILTER) += f_streamselect.o framesync2.o OBJS-$(CONFIG_SUBTITLES_FILTER) += vf_subtitles.o diff --git a/libavfilter/vf_ssim.c b/libavfilter/vf_ssim.c index 371f6dba70..5f5bed7a0e 100644 --- a/libavfilter/vf_ssim.c +++ b/libavfilter/vf_ssim.c @@ -38,16 +38,16 @@ #include "libavutil/opt.h" #include "libavutil/pixdesc.h" #include "avfilter.h" -#include "dualinput.h" #include "drawutils.h" #include "formats.h" +#include "framesync2.h" #include "internal.h" #include "ssim.h" #include "video.h" typedef struct SSIMContext { const AVClass *class; -FFDualInputContext dinput; +FFFrameSync fs; FILE *stats_file; char *stats_file_str; int nb_components; @@ -78,7 +78,7 @@ static const AVOption ssim_options[] = { { NULL } }; -AVFILTER_DEFINE_CLASS(ssim); +FRAMESYNC_DEFINE_CLASS(ssim, SSIMContext, fs); static void set_meta(AVDictionary **metadata, const char *key, char comp, float d) { @@ -282,13 +282,21 @@ static double ssim_db(double ssim, double weight) return 10 * log10(weight / (weight - ssim)); } -static AVFrame *do_ssim(AVFilterContext *ctx, AVFrame *main, -const AVFrame *ref) +static int do_ssim(FFFrameSync *fs) { -AVDictionary **metadata = &main->metadata; +AVFilterContext *ctx = fs->parent; SSIMContext *s = ctx->priv; +AVFrame *main, *ref; +AVDictionary **metadata; float c[4], ssimv = 0.0; -int i; +int ret, i; + +ret = ff_framesync2_dualinput_get(fs, &main, &ref); +if (ret < 0) +return ret; +if (!ref) +return ff_filter_frame(ctx->outputs[0], main); +metadata = &main->metadata; s->nb_frames++; @@ -320,7 +328,7 @@ static AVFrame *do_ssim(AVFilterContext *ctx, AVFrame *main, fprintf(s->stats_file, "All:%f (%f)\n", ssimv, ssim_db(ssimv, 1.0)); } -return main; +return ff_filter_frame(ctx->outputs[0], main); } static av_cold int init(AVFilterContext *ctx) @@ -343,9 +351,7 @@ static av_cold int init(AVFilterContext *ctx) } } -s->dinput.process = do_ssim; -s->dinput.shortest = 1; -s->dinput.repeatlast = 0; +s->fs.on_event = do_ssim; return 0; } @@ -425,28 +431,25 @@ static int config_output(AVFilterLink *outlink) AVFilterLink *mainlink = ctx->inputs[0]; int ret; +ret = ff_framesync2_init_dualinput(&s->fs, ctx); +if (ret < 0) +return ret; outlink->w = mainlink->w; outlink->h = mainlink->h; outlink->time_base = mainlink->time_base; outlink->sample_aspect_ratio = mainlink->sample_aspect_ratio; outlink->frame_rate = mainlink->frame_rate; -if ((ret = ff_dualinput_init(ctx, &s->dinput)) < 0) +if ((ret = ff_framesync2_configure(&s->fs)) < 0) return ret; return 0; } -static int filter_frame(AVFilterLink *inlink, AVFrame *buf) +static int activate(AVFilterContext *ctx) { -SSIMContext *s = inlink->dst->priv; -return ff_dualinput_filter_frame(&s->dinput, inlink, buf); -} - -static int request_frame(AVFilterLink *outlink) -{ -SSIMContext *s = outlink->src->priv; -return ff_dualinput_request_frame(&s->dinput, outlink); +SSIMContext *s = ctx->priv; +return ff_framesync2_activate(&s->fs); } static av_cold void uninit(AVFilterContext *ctx) @@ -466,7 +469,7 @@ static av_cold void uninit(AVFilterContext *ctx) s->ssim_total / s->nb_frames, ssim_db(s->ssim_total, s->nb_frames)); } -ff_dualinput_uninit(&s->dinput); +ff_framesync2_uninit(&s->fs); if (s->stats_file && s->stats_file != stdout) fclose(s->stats_file); @@ -478,11 +481,9 @@ static const AVFilterPad ssim_inputs[] = { { .name = "main", .type = AVMEDIA_TYPE_VIDEO, -.filter_frame = filter_frame, },{ .name = "reference",
[FFmpeg-cvslog] lavfi: remove dualinput.
ffmpeg | branch: master | Nicolas George | Mon Jul 31 13:38:22 2017 +0200| [607900c905c4112bb43f2710bd76d98491d584a9] | committer: Nicolas George lavfi: remove dualinput. > http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=607900c905c4112bb43f2710bd76d98491d584a9 --- libavfilter/dualinput.c | 90 - libavfilter/dualinput.h | 46 - 2 files changed, 136 deletions(-) diff --git a/libavfilter/dualinput.c b/libavfilter/dualinput.c deleted file mode 100644 index 44750973a6..00 --- a/libavfilter/dualinput.c +++ /dev/null @@ -1,90 +0,0 @@ -/* - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#include "dualinput.h" -#include "libavutil/timestamp.h" - -static int process_frame(FFFrameSync *fs) -{ -AVFilterContext *ctx = fs->parent; -FFDualInputContext *s = fs->opaque; -AVFrame *mainpic = NULL, *secondpic = NULL; -int ret = 0; - -if ((ret = ff_framesync_get_frame(&s->fs, 0, &mainpic, 1)) < 0 || -(ret = ff_framesync_get_frame(&s->fs, 1, &secondpic, 0)) < 0) { -av_frame_free(&mainpic); -return ret; -} -av_assert0(mainpic); -mainpic->pts = av_rescale_q(s->fs.pts, s->fs.time_base, ctx->outputs[0]->time_base); -if (secondpic && !ctx->is_disabled) -mainpic = s->process(ctx, mainpic, secondpic); -ret = ff_filter_frame(ctx->outputs[0], mainpic); -av_assert1(ret != AVERROR(EAGAIN)); -return ret; -} - -int ff_dualinput_init(AVFilterContext *ctx, FFDualInputContext *s) -{ -FFFrameSyncIn *in; -int ret; - -if ((ret = ff_framesync_init(&s->fs, ctx, 2)) < 0) -return ret; - -in = s->fs.in; -s->fs.opaque = s; -s->fs.on_event = process_frame; -in[0].time_base = ctx->inputs[0]->time_base; -in[1].time_base = ctx->inputs[1]->time_base; -in[0].sync = 2; -in[0].before = EXT_STOP; -in[0].after = EXT_INFINITY; -in[1].sync = 1; -in[1].before = EXT_NULL; -in[1].after = EXT_INFINITY; - -if (s->shortest) -in[0].after = in[1].after = EXT_STOP; -if (!s->repeatlast) { -in[1].after = EXT_NULL; -in[1].sync = 0; -} -if (s->skip_initial_unpaired) { -in[1].before = EXT_STOP; -} - -return ff_framesync_configure(&s->fs); -} - -int ff_dualinput_filter_frame(FFDualInputContext *s, - AVFilterLink *inlink, AVFrame *in) -{ -return ff_framesync_filter_frame(&s->fs, inlink, in); -} - -int ff_dualinput_request_frame(FFDualInputContext *s, AVFilterLink *outlink) -{ -return ff_framesync_request_frame(&s->fs, outlink); -} - -void ff_dualinput_uninit(FFDualInputContext *s) -{ -ff_framesync_uninit(&s->fs); -} diff --git a/libavfilter/dualinput.h b/libavfilter/dualinput.h deleted file mode 100644 index fcde0d6aa1..00 --- a/libavfilter/dualinput.h +++ /dev/null @@ -1,46 +0,0 @@ -/* - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -/** - * @file - * Double input streams helper for filters - */ - -#ifndef AVFILTER_DUALINPUT_H -#define AVFILTER_DUALINPUT_H - -#include -#include "bufferqueue.h" -#include "framesync.h" -#include "internal.h" - -typedef struct FFDualInputContext { -FFFrameSync fs; - -AVFrame *(*process)(AVFilterContext *ctx, AVFrame *main, const AVFrame *second); -int shortest; ///< terminate stream when the second input terminates -int repeatlast; ///< repeat last second frame -int skip_initial_unpaired; ///< Skip initial frame
[FFmpeg-cvslog] doc/filters: document framesync options.
ffmpeg | branch: master | Nicolas George | Thu Aug 10 13:32:19 2017 +0200| [844bc0d89e1d59f1593448bfc0fc52fec34e0946] | committer: Nicolas George doc/filters: document framesync options. > http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=844bc0d89e1d59f1593448bfc0fc52fec34e0946 --- Changelog| 3 +++ doc/filters.texi | 76 +--- 2 files changed, 48 insertions(+), 31 deletions(-) diff --git a/Changelog b/Changelog index 1dfb8b5714..b064328c3f 100644 --- a/Changelog +++ b/Changelog @@ -35,6 +35,9 @@ version : - pseudocolor video filter - raw G.726 muxer and demuxer, left- and right-justified - NewTek NDI input/output device +- Some video filters with several inputs now use a common set of options: + blend, libvmaf, lut3d, overlay, psnr, ssim. + They must always be used by name. version 3.3: - CrystalHD decoder moved to new decode API diff --git a/doc/filters.texi b/doc/filters.texi index 779fd53317..19e13a1346 100644 --- a/doc/filters.texi +++ b/doc/filters.texi @@ -311,6 +311,39 @@ See @code{ffmpeg -filters} to view which filters have timeline support. @c man end FILTERGRAPH DESCRIPTION +@anchor{framesync} +@chapter Options for filters with several inputs (framesync) +@c man begin OPTIONS FOR FILTERS WITH SEVERAL INPUTS + +Some filters with several inputs support a common set of options. +These options can only be set by name, not with the short notation. + +@table @option +@item eof_action +The action to take when EOF is encountered on the secondary input; it accepts +one of the following values: + +@table @option +@item repeat +Repeat the last frame (the default). +@item endall +End both streams. +@item pass +Pass the main input through. +@end table + +@item shortest +If set to 1, force the output to terminate when the shortest input +terminates. Default value is 0. + +@item repeatlast +If set to 1, force the filter to draw the last overlay frame over the +main input until the end of the stream. A value of 0 disables this +behavior. Default value is 1. +@end table + +@c man end OPTIONS FOR FILTERS WITH SEVERAL INPUTS + @chapter Audio Filters @c man begin AUDIO FILTERS @@ -4949,17 +4982,10 @@ Value of pixel component at current location for first video frame (top layer). @item BOTTOM, B Value of pixel component at current location for second video frame (bottom layer). @end table - -@item shortest -Force termination when the shortest input terminates. Default is -@code{0}. This option is only defined for the @code{blend} filter. - -@item repeatlast -Continue applying the last bottom frame after the end of the stream. A value of -@code{0} disable the filter after the last frame of the bottom layer is reached. -Default is @code{1}. This option is only defined for the @code{blend} filter. @end table +The @code{blend} filter also supports the @ref{framesync} options. + @subsection Examples @itemize @@ -9757,6 +9783,8 @@ Enables computing ms_ssim along with vmaf. Set the pool method to be used for computing vmaf. @end table +This filter also supports the @ref{framesync} options. + For example: @example ffmpeg -i main.mpg -i ref.mpg -lavfi libvmaf -f null - @@ -9838,6 +9866,8 @@ Interpolate values using a tetrahedron. @end table @end table +This filter also supports the @ref{framesync} options. + @section lumakey Turn certain luma values into transparency. @@ -10866,19 +10896,6 @@ on the main video. Default value is "0" for both expressions. In case the expression is invalid, it is set to a huge value (meaning that the overlay will not be displayed within the output visible area). -@item eof_action -The action to take when EOF is encountered on the secondary input; it accepts -one of the following values: - -@table @option -@item repeat -Repeat the last frame (the default). -@item endall -End both streams. -@item pass -Pass the main input through. -@end table - @item eval Set when the expressions for @option{x}, and @option{y} are evaluated. @@ -10894,10 +10911,6 @@ evaluate expressions for each incoming frame Default value is @samp{frame}. -@item shortest -If set to 1, force the output to terminate when the shortest input -terminates. Default value is 0. - @item format Set the format for the output video. @@ -10923,11 +10936,6 @@ automatically pick format @end table Default value is @samp{yuv420}. - -@item repeatlast -If set to 1, force the filter to draw the last overlay frame over the -main input until the end of the stream. A value of 0 disables this -behavior. Default value is 1. @end table The @option{x}, and @option{y} expressions can contain the following @@ -10964,6 +10972,8 @@ The timestamp, expressed in seconds. It's NAN if the input timestamp is unknown. @end table +This filter also supports the @ref{framesync} options. + Note that the @var{n}, @var{pos}, @var{t} variables are available only when evaluation is done @emph{per frame}, and will eval
[FFmpeg-cvslog] lavfi: bump minor version after change in options.
ffmpeg | branch: master | Nicolas George | Tue Aug 29 15:46:36 2017 +0200| [7302d5e325977d4a8ff61422fdabdae0dc504ec0] | committer: Nicolas George lavfi: bump minor version after change in options. > http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=7302d5e325977d4a8ff61422fdabdae0dc504ec0 --- libavfilter/version.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libavfilter/version.h b/libavfilter/version.h index 22207127d0..60f18f7c51 100644 --- a/libavfilter/version.h +++ b/libavfilter/version.h @@ -30,7 +30,7 @@ #include "libavutil/version.h" #define LIBAVFILTER_VERSION_MAJOR 6 -#define LIBAVFILTER_VERSION_MINOR 100 +#define LIBAVFILTER_VERSION_MINOR 101 #define LIBAVFILTER_VERSION_MICRO 100 #define LIBAVFILTER_VERSION_INT AV_VERSION_INT(LIBAVFILTER_VERSION_MAJOR, \ ___ ffmpeg-cvslog mailing list ffmpeg-cvslog@ffmpeg.org http://ffmpeg.org/mailman/listinfo/ffmpeg-cvslog
[FFmpeg-cvslog] avformat/mxfenc: Check that the video codec in D-10 is MPEG-2
ffmpeg | branch: master | Michael Niedermayer | Tue Aug 29 02:13:20 2017 +0200| [429f3266c14ac4851da007fcb3461f9acc99cbad] | committer: Michael Niedermayer avformat/mxfenc: Check that the video codec in D-10 is MPEG-2 Others do not work, but nothing rejects them prior to this patch if the parameters otherwise match Reviewed-by: Matthieu Bouron Signed-off-by: Michael Niedermayer > http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=429f3266c14ac4851da007fcb3461f9acc99cbad --- libavformat/mxfenc.c | 4 1 file changed, 4 insertions(+) diff --git a/libavformat/mxfenc.c b/libavformat/mxfenc.c index 12fc9abbc6..a07da69b11 100644 --- a/libavformat/mxfenc.c +++ b/libavformat/mxfenc.c @@ -2101,6 +2101,10 @@ static int mxf_write_header(AVFormatContext *s) sc->video_bit_rate = st->codecpar->bit_rate; if (s->oformat == &ff_mxf_d10_muxer) { +if (st->codecpar->codec_id != AV_CODEC_ID_MPEG2VIDEO) { +av_log(s, AV_LOG_ERROR, "error MXF D-10 only support MPEG-2 Video\n"); +return AVERROR(EINVAL); +} if ((sc->video_bit_rate == 5000) && (mxf->time_base.den == 25)) { sc->index = 3; } else if ((sc->video_bit_rate == 4840 || sc->video_bit_rate == 5000) && (mxf->time_base.den != 25)) { ___ ffmpeg-cvslog mailing list ffmpeg-cvslog@ffmpeg.org http://ffmpeg.org/mailman/listinfo/ffmpeg-cvslog
[FFmpeg-cvslog] avcodec/h264idct_template: Fix integer overflow in ff_h264_idct8_add()
ffmpeg | branch: master | Michael Niedermayer | Thu Aug 17 01:06:14 2017 +0200| [6def8b8d924af985514ebb1b1a51871c94d26d33] | committer: Michael Niedermayer avcodec/h264idct_template: Fix integer overflow in ff_h264_idct8_add() Fixes: 2891/clusterfuzz-testcase-minimized-5881795457318912 Fixes: runtime error: signed integer overflow: 1551827968 - -775913984 cannot be represented in type 'int' Found-by: continuous fuzzing process https://github.com/google/oss-fuzz/tree/master/projects/ffmpeg Signed-off-by: Michael Niedermayer > http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=6def8b8d924af985514ebb1b1a51871c94d26d33 --- libavcodec/h264idct_template.c | 28 ++-- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/libavcodec/h264idct_template.c b/libavcodec/h264idct_template.c index 288107d5a4..3ad58c4a11 100644 --- a/libavcodec/h264idct_template.c +++ b/libavcodec/h264idct_template.c @@ -76,20 +76,20 @@ void FUNCC(ff_h264_idct8_add)(uint8_t *_dst, int16_t *_block, int stride){ for( i = 0; i < 8; i++ ) { -const int a0 = block[i+0*8] + block[i+4*8]; -const int a2 = block[i+0*8] - block[i+4*8]; -const int a4 = (block[i+2*8]>>1) - block[i+6*8]; -const int a6 = (block[i+6*8]>>1) + block[i+2*8]; - -const int b0 = a0 + a6; -const int b2 = a2 + a4; -const int b4 = a2 - a4; -const int b6 = a0 - a6; - -const int a1 = -block[i+3*8] + block[i+5*8] - block[i+7*8] - (block[i+7*8]>>1); -const int a3 = block[i+1*8] + block[i+7*8] - block[i+3*8] - (block[i+3*8]>>1); -const int a5 = -block[i+1*8] + block[i+7*8] + block[i+5*8] + (block[i+5*8]>>1); -const int a7 = block[i+3*8] + block[i+5*8] + block[i+1*8] + (block[i+1*8]>>1); +const unsigned int a0 = block[i+0*8] + block[i+4*8]; +const unsigned int a2 = block[i+0*8] - block[i+4*8]; +const unsigned int a4 = (block[i+2*8]>>1) - block[i+6*8]; +const unsigned int a6 = (block[i+6*8]>>1) + block[i+2*8]; + +const unsigned int b0 = a0 + a6; +const unsigned int b2 = a2 + a4; +const unsigned int b4 = a2 - a4; +const unsigned int b6 = a0 - a6; + +const int a1 = -block[i+3*8] + (unsigned)block[i+5*8] - block[i+7*8] - (block[i+7*8]>>1); +const int a3 = block[i+1*8] + (unsigned)block[i+7*8] - block[i+3*8] - (block[i+3*8]>>1); +const int a5 = -block[i+1*8] + (unsigned)block[i+7*8] + block[i+5*8] + (block[i+5*8]>>1); +const int a7 = block[i+3*8] + (unsigned)block[i+5*8] + block[i+1*8] + (block[i+1*8]>>1); const int b1 = (a7>>2) + a1; const int b3 = a3 + (a5>>2); ___ ffmpeg-cvslog mailing list ffmpeg-cvslog@ffmpeg.org http://ffmpeg.org/mailman/listinfo/ffmpeg-cvslog
[FFmpeg-cvslog] avcodec/snowdec: Fix integer overflow in decode_subband_slice_buffered()
ffmpeg | branch: master | Michael Niedermayer | Mon Aug 28 00:30:33 2017 +0200| [732f9764561558a388c05483ed6a722a5c67b05c] | committer: Michael Niedermayer avcodec/snowdec: Fix integer overflow in decode_subband_slice_buffered() Fixes: runtime error: signed integer overflow: 267 * 8388608 cannot be represented in type 'int' Fixes: 2743/clusterfuzz-testcase-minimized-5820652076400640 Found-by: continuous fuzzing process https://github.com/google/oss-fuzz/tree/master/projects/ffmpeg Signed-off-by: Michael Niedermayer > http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=732f9764561558a388c05483ed6a722a5c67b05c --- libavcodec/snowdec.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libavcodec/snowdec.c b/libavcodec/snowdec.c index 734f43e7d1..b74c468ce3 100644 --- a/libavcodec/snowdec.c +++ b/libavcodec/snowdec.c @@ -140,7 +140,7 @@ static inline void decode_subband_slice_buffered(SnowContext *s, SubBand *b, sli v = b->x_coeff[new_index].coeff; x = b->x_coeff[new_index++].x; while(x < w){ -register int t= ( (v>>1)*qmul + qadd)>>QEXPSHIFT; +register int t= (int)( (v>>1)*(unsigned)qmul + qadd)>>QEXPSHIFT; register int u= -(v&1); line[x] = (t^u) - u; ___ ffmpeg-cvslog mailing list ffmpeg-cvslog@ffmpeg.org http://ffmpeg.org/mailman/listinfo/ffmpeg-cvslog
[FFmpeg-cvslog] avcodec/hevc_ps: Fix undefined shift in pcm code
ffmpeg | branch: master | Michael Niedermayer | Sun Aug 27 23:59:09 2017 +0200| [2a83866c9f9531eb096c9b9fe0550e742b931ad1] | committer: Michael Niedermayer avcodec/hevc_ps: Fix undefined shift in pcm code Fixes: runtime error: shift exponent -1 is negative Fixes: 3091/clusterfuzz-testcase-minimized-6229767969832960 Found-by: continuous fuzzing process https://github.com/google/oss-fuzz/tree/master/projects/ffmpeg Signed-off-by: Michael Niedermayer > http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=2a83866c9f9531eb096c9b9fe0550e742b931ad1 --- libavcodec/hevc_ps.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/libavcodec/hevc_ps.c b/libavcodec/hevc_ps.c index 37eae226e2..ee31cc093c 100644 --- a/libavcodec/hevc_ps.c +++ b/libavcodec/hevc_ps.c @@ -1028,10 +1028,10 @@ int ff_hevc_parse_sps(HEVCSPS *sps, GetBitContext *gb, unsigned int *sps_id, sps->pcm.log2_min_pcm_cb_size = get_ue_golomb_long(gb) + 3; sps->pcm.log2_max_pcm_cb_size = sps->pcm.log2_min_pcm_cb_size + get_ue_golomb_long(gb); -if (sps->pcm.bit_depth > sps->bit_depth) { +if (FFMAX(sps->pcm.bit_depth, sps->pcm.bit_depth_chroma) > sps->bit_depth) { av_log(avctx, AV_LOG_ERROR, - "PCM bit depth (%d) is greater than normal bit depth (%d)\n", - sps->pcm.bit_depth, sps->bit_depth); + "PCM bit depth (%d, %d) is greater than normal bit depth (%d)\n", + sps->pcm.bit_depth, sps->pcm.bit_depth_chroma, sps->bit_depth); return AVERROR_INVALIDDATA; } ___ ffmpeg-cvslog mailing list ffmpeg-cvslog@ffmpeg.org http://ffmpeg.org/mailman/listinfo/ffmpeg-cvslog
[FFmpeg-cvslog] avformat/mxfenc: Replace literal numbers by named enum values.
ffmpeg | branch: master | Michael Niedermayer | Tue Aug 29 02:13:21 2017 +0200| [f762555a90790dbfca016ff81b3a4972c39babb4] | committer: Michael Niedermayer avformat/mxfenc: Replace literal numbers by named enum values. Signed-off-by: Michael Niedermayer > http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=f762555a90790dbfca016ff81b3a4972c39babb4 --- libavformat/mxfenc.c | 83 ++-- 1 file changed, 61 insertions(+), 22 deletions(-) diff --git a/libavformat/mxfenc.c b/libavformat/mxfenc.c index a07da69b11..cc9ec8c93a 100644 --- a/libavformat/mxfenc.c +++ b/libavformat/mxfenc.c @@ -120,6 +120,45 @@ static void mxf_write_mpegvideo_desc(AVFormatContext *s, AVStream *st); static void mxf_write_cdci_desc(AVFormatContext *s, AVStream *st); static void mxf_write_generic_sound_desc(AVFormatContext *s, AVStream *st); +enum { +INDEX_MPEG2 = 0, +INDEX_AES3, +INDEX_WAV, +INDEX_D10_625_50_50_VIDEO, +INDEX_D10_625_50_50_AUDIO, +INDEX_D10_525_60_50_VIDEO, +INDEX_D10_525_60_50_AUDIO, +INDEX_D10_625_50_40_VIDEO, +INDEX_D10_625_50_40_AUDIO, +INDEX_D10_525_60_40_VIDEO, +INDEX_D10_525_60_40_AUDIO, +INDEX_D10_625_50_30_VIDEO, +INDEX_D10_625_50_30_AUDIO, +INDEX_D10_525_60_30_VIDEO, +INDEX_D10_525_60_30_AUDIO, +INDEX_DV, +INDEX_DV25_525_60, +INDEX_DV25_625_50, +INDEX_DV50_525_60, +INDEX_DV50_625_50, +INDEX_DV100_1080_60, +INDEX_DV100_1080_50, +INDEX_DV100_720_60, +INDEX_DV100_720_50, +INDEX_DNXHD_1080p_10bit_HIGH, +INDEX_DNXHD_1080p_8bit_MEDIUM, +INDEX_DNXHD_1080p_8bit_HIGH, +INDEX_DNXHD_1080i_10bit_HIGH, +INDEX_DNXHD_1080i_8bit_MEDIUM, +INDEX_DNXHD_1080i_8bit_HIGH, +INDEX_DNXHD_720p_10bit, +INDEX_DNXHD_720p_8bit_HIGH, +INDEX_DNXHD_720p_8bit_MEDIUM, +INDEX_DNXHD_720p_8bit_LOW, +INDEX_JPEG2000, +INDEX_H264, +}; + static const MXFContainerEssenceEntry mxf_essence_container_uls[] = { { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x02,0x0D,0x01,0x03,0x01,0x02,0x04,0x60,0x01 }, { 0x06,0x0E,0x2B,0x34,0x01,0x02,0x01,0x01,0x0D,0x01,0x03,0x01,0x15,0x01,0x05,0x00 }, @@ -1680,37 +1719,37 @@ AVPacket *pkt) switch (cid) { case 1235: -sc->index = 24; +sc->index = INDEX_DNXHD_1080p_10bit_HIGH; sc->component_depth = 10; break; case 1237: -sc->index = 25; +sc->index = INDEX_DNXHD_1080p_8bit_MEDIUM; break; case 1238: -sc->index = 26; +sc->index = INDEX_DNXHD_1080p_8bit_HIGH; break; case 1241: -sc->index = 27; +sc->index = INDEX_DNXHD_1080i_10bit_HIGH; sc->component_depth = 10; break; case 1242: -sc->index = 28; +sc->index = INDEX_DNXHD_1080i_8bit_MEDIUM; break; case 1243: -sc->index = 29; +sc->index = INDEX_DNXHD_1080i_8bit_HIGH; break; case 1250: -sc->index = 30; +sc->index = INDEX_DNXHD_720p_10bit; sc->component_depth = 10; break; case 1251: -sc->index = 31; +sc->index = INDEX_DNXHD_720p_8bit_HIGH; break; case 1252: -sc->index = 32; +sc->index = INDEX_DNXHD_720p_8bit_MEDIUM; break; case 1253: -sc->index = 33; +sc->index = INDEX_DNXHD_720p_8bit_LOW; break; default: return -1; @@ -1771,7 +1810,7 @@ static int mxf_parse_dv_frame(AVFormatContext *s, AVStream *st, AVPacket *pkt) switch (stype) { case 0x18: // DV100 720p -ul_index = 6 + pal; +ul_index = INDEX_DV100_720_50 + pal; frame_size = pal ? 288000 : 24; if (sc->interlaced) { av_log(s, AV_LOG_ERROR, "source marked as interlaced but codec profile is progressive\n"); @@ -1779,19 +1818,19 @@ static int mxf_parse_dv_frame(AVFormatContext *s, AVStream *st, AVPacket *pkt) } break; case 0x14: // DV100 1080i -ul_index = 4 + pal; +ul_index = INDEX_DV100_1080_50 + pal; frame_size = pal ? 576000 : 48; break; case 0x04: // DV50 -ul_index = 2 + pal; +ul_index = INDEX_DV50_525_60 + pal; frame_size = pal ? 288000 : 24; break; default: // DV25 -ul_index = 0 + pal; +ul_index = INDEX_DV25_525_60 + pal; frame_size = pal ? 144000 : 12; } -sc->index = ul_index + 16; +sc->index = ul_index; sc->codec_ul = &mxf_essence_container_uls[sc->index].codec_ul; if(s->oformat == &ff_mxf_opatom_muxer) { @@ -2106,15 +2145,15 @@ static int mxf_write_header(AVFormatContext *s) return AVERROR(EINVAL); } if ((sc->video_bit_rate == 5000) && (mxf->time_base.den == 25)) { -sc->index = 3; +sc->index = INDEX_D10_625_50_50_VIDEO; } else i