--- Changelog | 1 + doc/filters.texi | 7 ++ libavfilter/vf_framestep.c | 242 ++++++++++++++++++++++++++++++++++---- tests/fate/filter-video.mak | 5 + tests/ref/fate/filter-framestep-1 | 17 +++ tests/ref/fate/filter-framestep-2 | 17 +++ tests/ref/fate/filter-framestep-3 | 17 +++ 7 files changed, 286 insertions(+), 20 deletions(-) create mode 100644 tests/ref/fate/filter-framestep-1 create mode 100644 tests/ref/fate/filter-framestep-2 create mode 100644 tests/ref/fate/filter-framestep-3
diff --git a/Changelog b/Changelog index e76b324..880f10a 100644 --- a/Changelog +++ b/Changelog @@ -2,6 +2,7 @@ Entries are sorted chronologically from oldest to youngest within each release, releases are sorted from youngest to oldest. version <next>: +- framestep filter: add blend parameter for motion blur effect version 3.3: - CrystalHD decoder moved to new decode API diff --git a/doc/filters.texi b/doc/filters.texi index bc37e66..3ccb727 100644 --- a/doc/filters.texi +++ b/doc/filters.texi @@ -8246,6 +8246,13 @@ This filter accepts the following option: @item step Select frame after every @code{step} frames. Allowed values are positive integers higher than 0. Default value is @code{1}. +@item blend +Blend @code{blend} consequentive frames on every step, +to produce a motion blur effect. +Allowed values are positive integers between @code{1} and @code{step}, +where @code{1} corresponds to no motion blur, and @code{step} +corresponds to maximal motion blur. +Default value is @code{1}. @end table @anchor{frei0r} diff --git a/libavfilter/vf_framestep.c b/libavfilter/vf_framestep.c index 8102e7c..d68ed2d 100644 --- a/libavfilter/vf_framestep.c +++ b/libavfilter/vf_framestep.c @@ -1,5 +1,6 @@ /* * Copyright (c) 2012 Stefano Sabatini + * Copyright (c) 2017 Matthias C. M. Troffaes * * This file is part of FFmpeg. * @@ -24,13 +25,25 @@ */ #include "libavutil/opt.h" +#include "libavutil/pixdesc.h" #include "avfilter.h" #include "internal.h" #include "video.h" typedef struct NullContext { const AVClass *class; - int frame_step; + int frame_step; ///< step size in frames + int frame_blend; ///< how many frames to blend on each step + int nb_planes; ///< number of planes in the pixel format + int planewidth[4]; ///< width of each plane (after subsampling) + int planeheight[4]; ///< height of each plane (after subsampling) + int linesize[4]; ///< linesize of buffer in bytes + uint32_t *data[4]; ///< buffer for blending input frames + + void (*blend_set)(AVFilterContext *ctx, AVFrame *in, int plane); + void (*blend_add)(AVFilterContext *ctx, AVFrame *in, int plane); + void (*blend_div)(AVFilterContext *ctx, AVFrame *in, int plane); + int (*filter_frame)(AVFilterLink *inlink, AVFrame *in); } FrameStepContext; #define OFFSET(x) offsetof(FrameStepContext, x) @@ -38,43 +51,229 @@ typedef struct NullContext { static const AVOption framestep_options[] = { { "step", "set frame step", OFFSET(frame_step), AV_OPT_TYPE_INT, {.i64=1}, 1, INT_MAX, FLAGS}, + { "blend", "number of frames to blend per step", OFFSET(frame_blend), AV_OPT_TYPE_INT, {.i64=1}, 1, 65535, FLAGS}, { NULL }, }; AVFILTER_DEFINE_CLASS(framestep); +#define DEFINE_BLEND(NAME, TYPE, DECL, EXPR) \ +static void blend_##NAME##_##TYPE(AVFilterContext *ctx, AVFrame *in, int plane)\ +{ \ + FrameStepContext *s = ctx->priv; \ + DECL \ + const int height = s->planeheight[plane]; \ + const int width = s->planewidth[plane]; \ + const int stride = in->linesize[plane] / sizeof(TYPE); \ + TYPE *src = (TYPE *)in->data[plane]; \ + uint32_t *dst = s->data[plane]; \ + int y, x; \ + \ + for (y = 0; y < height; y++) { \ + for (x = 0; x < width; x++) { \ + EXPR; \ + } \ + src += stride; \ + } \ +} + +#define SET_DECL +#define SET_EXPR *dst++ = src[x] +#define ADD_DECL +#define ADD_EXPR *dst++ += src[x] +#define DIV_DECL const int frame_blend = s->frame_blend; +#define DIV_EXPR src[x] = *dst++ / frame_blend + +DEFINE_BLEND(set, uint8_t, SET_DECL, SET_EXPR) +DEFINE_BLEND(set, uint16_t, SET_DECL, SET_EXPR) +DEFINE_BLEND(add, uint8_t, ADD_DECL, ADD_EXPR) +DEFINE_BLEND(add, uint16_t, ADD_DECL, ADD_EXPR) +DEFINE_BLEND(div, uint8_t, DIV_DECL, DIV_EXPR) +DEFINE_BLEND(div, uint16_t, DIV_DECL, DIV_EXPR) + +#undef SET_DECL +#undef SET_EXPR +#undef ADD_DECL +#undef ADD_EXPR +#undef DIV_DECL +#undef DIV_EXPR +#undef DEFINE_BLEND + +static int filter_frame_generic(AVFilterLink *inlink, AVFrame *in) +{ + AVFilterContext *ctx = inlink->dst; + FrameStepContext *s = ctx->priv; + AVFilterLink *outlink = ctx->outputs[0]; + AVFrame *out = NULL; + uint64_t frame_pos = inlink->frame_count_out % s->frame_step; + int direct = 0; + + /* update destination frame buffer (framestep->data); we need to + do this even if filter is disabled because buffer might be used + for later frames when filter is re-enabled */ + if (!frame_pos) { + /* copy first frame to destination frame buffer */ + for (int plane = 0; plane < s->nb_planes; plane++) + s->blend_set(ctx, in, plane); + } else if (frame_pos < s->frame_blend) { + /* add current frame to destination frame buffer */ + for (int plane = 0; plane < s->nb_planes; plane++) + s->blend_add(ctx, in, plane); + } + + /* write frame */ + if (ctx->is_disabled) { + /* filter is disabled, so pass input frame as is */ + return ff_filter_frame(outlink, in); + } else if ((frame_pos + 1) == s->frame_blend) { + /* filter is enabled, so write when all frames are blended */ + /* create a writable frame */ + if (av_frame_is_writable(in)) { + direct = 1; + out = in; + } else { + out = ff_get_video_buffer(outlink, outlink->w, outlink->h); + if (!out) { + av_frame_free(&in); + return AVERROR(ENOMEM); + } + av_frame_copy_props(out, in); + } + /* finalize destination frame */ + for (int plane = 0; plane < s->nb_planes; plane++) + s->blend_div(ctx, out, plane); + /* free extra frame if created, and pass on output frame */ + if (!direct) + av_frame_free(&in); + return ff_filter_frame(outlink, out); + } else { + av_frame_free(&in); + return 0; + } +} + +/* special case of filter_frame when frame_blend is 1 */ +static int filter_frame_single(AVFilterLink *inlink, AVFrame *in) +{ + AVFilterContext *ctx = inlink->dst; + FrameStepContext *s = ctx->priv; + + if (!(inlink->frame_count_out % s->frame_step) || ctx->is_disabled) { + return ff_filter_frame(ctx->outputs[0], in); + } else { + av_frame_free(&in); + return 0; + } +} + +static int query_formats(AVFilterContext *ctx) +{ + static const enum AVPixelFormat pix_fmts[] = { + AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV411P, + AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, + AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV444P, + AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P, + AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ420P, + AV_PIX_FMT_YUVJ411P, + AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA444P, + AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRAP, AV_PIX_FMT_GRAY8, + AV_PIX_FMT_YUV420P16, AV_PIX_FMT_YUV422P16, AV_PIX_FMT_YUV444P16, + AV_PIX_FMT_YUVA420P16, AV_PIX_FMT_YUVA422P16, AV_PIX_FMT_YUVA444P16, + AV_PIX_FMT_GBRP16, AV_PIX_FMT_GRAY16, + AV_PIX_FMT_NV12, AV_PIX_FMT_NV21, + AV_PIX_FMT_NONE + }; + FrameStepContext *s = ctx->priv; + AVFilterFormats *fmts_list = NULL; + + if (s->frame_blend == 1) { + fmts_list = ff_all_formats(AVMEDIA_TYPE_VIDEO); + } else { + fmts_list = ff_make_format_list(pix_fmts); + } + if (!fmts_list) + return AVERROR(ENOMEM); + return ff_set_common_formats(ctx, fmts_list); +} + +static int config_input_props(AVFilterLink *inlink) +{ + const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format); + const AVFilterContext *ctx = inlink->dst; + FrameStepContext *s = ctx->priv; + + s->planewidth[0] = s->planewidth[3] = inlink->w; + s->planewidth[1] = s->planewidth[2] = AV_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w); + s->planeheight[0] = s->planeheight[3] = inlink->h; + s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h); + s->nb_planes = av_pix_fmt_count_planes(inlink->format); + for (int plane = 0; plane < s->nb_planes; plane++) { + const int planesize = s->planewidth[plane] * s->planeheight[plane]; + s->data[plane] = av_mallocz_array(planesize, sizeof(uint32_t)); + if (!s->data[plane]) + return AVERROR(ENOMEM); + } + if (s->frame_blend == 1) { + s->filter_frame = filter_frame_single; + } else { + s->filter_frame = filter_frame_generic; + if (desc->comp[0].depth == 8) { + s->blend_set = blend_set_uint8_t; + s->blend_add = blend_add_uint8_t; + s->blend_div = blend_div_uint8_t; + } else if (desc->comp[0].depth == 16) { + s->blend_set = blend_set_uint16_t; + s->blend_add = blend_add_uint16_t; + s->blend_div = blend_div_uint16_t; + } else { + return AVERROR(AVERROR_BUG); + } + } + return 0; +} + static int config_output_props(AVFilterLink *outlink) { AVFilterContext *ctx = outlink->src; - FrameStepContext *framestep = ctx->priv; - AVFilterLink *inlink = ctx->inputs[0]; + const FrameStepContext *s = ctx->priv; + const AVFilterLink *inlink = ctx->inputs[0]; outlink->frame_rate = - av_div_q(inlink->frame_rate, (AVRational){framestep->frame_step, 1}); + av_div_q(inlink->frame_rate, (AVRational){s->frame_step, 1}); av_log(ctx, AV_LOG_VERBOSE, "step:%d frame_rate:%d/%d(%f) -> frame_rate:%d/%d(%f)\n", - framestep->frame_step, + s->frame_step, inlink->frame_rate.num, inlink->frame_rate.den, av_q2d(inlink->frame_rate), outlink->frame_rate.num, outlink->frame_rate.den, av_q2d(outlink->frame_rate)); + return 0; } -static int filter_frame(AVFilterLink *inlink, AVFrame *ref) +static av_cold int init(AVFilterContext *ctx) { - FrameStepContext *framestep = inlink->dst->priv; + FrameStepContext *s = ctx->priv; + s->frame_blend = FFMIN(s->frame_blend, s->frame_step); + return 0; +} - if (!(inlink->frame_count_out % framestep->frame_step)) { - return ff_filter_frame(inlink->dst->outputs[0], ref); - } else { - av_frame_free(&ref); - return 0; - } +static av_cold void uninit(AVFilterContext *ctx) +{ + FrameStepContext *s = ctx->priv; + for (int plane = 0; plane < s->nb_planes; plane++) + av_freep(&s->data[plane]); +} + +static int filter_frame(AVFilterLink *inlink, AVFrame *in) +{ + FrameStepContext *s = inlink->dst->priv; + return s->filter_frame(inlink, in); } static const AVFilterPad framestep_inputs[] = { { .name = "default", .type = AVMEDIA_TYPE_VIDEO, + .config_props = config_input_props, .filter_frame = filter_frame, }, { NULL } @@ -90,11 +289,14 @@ static const AVFilterPad framestep_outputs[] = { }; AVFilter ff_vf_framestep = { - .name = "framestep", - .description = NULL_IF_CONFIG_SMALL("Select one frame every N frames."), - .priv_size = sizeof(FrameStepContext), - .priv_class = &framestep_class, - .inputs = framestep_inputs, - .outputs = framestep_outputs, - .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, + .name = "framestep", + .description = NULL_IF_CONFIG_SMALL("Select one frame every N frames."), + .priv_size = sizeof(FrameStepContext), + .priv_class = &framestep_class, + .init = init, + .uninit = uninit, + .query_formats = query_formats, + .inputs = framestep_inputs, + .outputs = framestep_outputs, + .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL, }; diff --git a/tests/fate/filter-video.mak b/tests/fate/filter-video.mak index b40422a..216ab27 100644 --- a/tests/fate/filter-video.mak +++ b/tests/fate/filter-video.mak @@ -381,6 +381,11 @@ fate-filter-fps-cfr: CMD = framecrc -i $(TARGET_SAMPLES)/qtrle/apple-animation-v fate-filter-fps-r: CMD = framecrc -i $(TARGET_SAMPLES)/qtrle/apple-animation-variable-fps-bug.mov -r 30 -vf fps -pix_fmt yuv420p fate-filter-fps: CMD = framecrc -i $(TARGET_SAMPLES)/qtrle/apple-animation-variable-fps-bug.mov -vf fps=30 -pix_fmt yuv420p +FATE_FILTER_SAMPLES-$(call ALLYES, FRAMESTEP_FILTER) += fate-filter-framestep-1 fate-filter-framestep-2 fate-filter-framestep-3 +fate-filter-framestep-1: CMD = framecrc -i $(TARGET_SAMPLES)/filter/anim.mkv -vf framestep=step=6 +fate-filter-framestep-2: CMD = framecrc -i $(TARGET_SAMPLES)/filter/anim.mkv -pix_fmt yuv420p -vf framestep=step=6:blend=3 +fate-filter-framestep-3: CMD = framecrc -i $(TARGET_SAMPLES)/filter/anim.mkv -pix_fmt yuv420p16le -vf framestep=step=6:blend=3 + FATE_FILTER_VSYNTH-$(call ALLYES, FORMAT_FILTER SPLIT_FILTER ALPHAEXTRACT_FILTER ALPHAMERGE_FILTER) += fate-filter-alphaextract_alphamerge_rgb fate-filter-alphaextract_alphamerge_rgb: tests/data/filtergraphs/alphamerge_alphaextract_rgb fate-filter-alphaextract_alphamerge_rgb: CMD = framecrc -c:v pgmyuv -i $(SRC) -filter_complex_script $(TARGET_PATH)/tests/data/filtergraphs/alphamerge_alphaextract_rgb diff --git a/tests/ref/fate/filter-framestep-1 b/tests/ref/fate/filter-framestep-1 new file mode 100644 index 0000000..0a6dd19 --- /dev/null +++ b/tests/ref/fate/filter-framestep-1 @@ -0,0 +1,17 @@ +#tb 0: 1001/4000 +#media_type 0: video +#codec_id 0: rawvideo +#dimensions 0: 320x180 +#sar 0: 1/1 +0, 0, 0, 1, 172800, 0x5adff92c +0, 1, 1, 1, 172800, 0x37b7f659 +0, 2, 2, 1, 172800, 0xb4a6f1d1 +0, 3, 3, 1, 172800, 0xd596f9c6 +0, 4, 4, 1, 172800, 0xff5a015b +0, 5, 5, 1, 172800, 0x65477f11 +0, 6, 6, 1, 172800, 0x41569400 +0, 7, 7, 1, 172800, 0xcff9ddf9 +0, 8, 8, 1, 172800, 0xd6daba1e +0, 9, 9, 1, 172800, 0xad83bda1 +0, 10, 10, 1, 172800, 0x1518bdb3 +0, 11, 11, 1, 172800, 0xfdd1c7ca diff --git a/tests/ref/fate/filter-framestep-2 b/tests/ref/fate/filter-framestep-2 new file mode 100644 index 0000000..d8dfb3c --- /dev/null +++ b/tests/ref/fate/filter-framestep-2 @@ -0,0 +1,17 @@ +#tb 0: 1001/4000 +#media_type 0: video +#codec_id 0: rawvideo +#dimensions 0: 320x180 +#sar 0: 1/1 +0, 0, 0, 1, 86400, 0x5a5fa606 +0, 1, 1, 1, 86400, 0xaadd94b9 +0, 2, 2, 1, 86400, 0x91879f92 +0, 3, 3, 1, 86400, 0x62e3aa29 +0, 4, 4, 1, 86400, 0xb0a5b0b4 +0, 5, 5, 1, 86400, 0x49f4cb42 +0, 6, 6, 1, 86400, 0x396befa1 +0, 7, 7, 1, 86400, 0xc30e7e5d +0, 8, 8, 1, 86400, 0x677b4d09 +0, 9, 9, 1, 86400, 0xe7384e86 +0, 10, 10, 1, 86400, 0xf48d4e8b +0, 11, 11, 1, 86400, 0x43834cdd diff --git a/tests/ref/fate/filter-framestep-3 b/tests/ref/fate/filter-framestep-3 new file mode 100644 index 0000000..11ce032 --- /dev/null +++ b/tests/ref/fate/filter-framestep-3 @@ -0,0 +1,17 @@ +#tb 0: 1001/4000 +#media_type 0: video +#codec_id 0: rawvideo +#dimensions 0: 320x180 +#sar 0: 1/1 +0, 0, 0, 1, 172800, 0x2c4a2095 +0, 1, 1, 1, 172800, 0x43bc1f93 +0, 2, 2, 1, 172800, 0x7e3eebc2 +0, 3, 3, 1, 172800, 0xa8e31a76 +0, 4, 4, 1, 172800, 0x60d6265f +0, 5, 5, 1, 172800, 0x4eea61e0 +0, 6, 6, 1, 172800, 0x80c044a7 +0, 7, 7, 1, 172800, 0x730bc794 +0, 8, 8, 1, 172800, 0xbe9d4fea +0, 9, 9, 1, 172800, 0x29006c79 +0, 10, 10, 1, 172800, 0x36135cbe +0, 11, 11, 1, 172800, 0x40535425 -- 2.7.4 _______________________________________________ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org http://ffmpeg.org/mailman/listinfo/ffmpeg-devel