ffmpeg | branch: master | Mark Thompson <s...@jkqxz.net> | Sun Mar 12 16:55:32 2017 +0000| [b9acc7fbd9fe6aeb76c9c91a65f1f5dab141fce4] | committer: Mark Thompson
Merge commit 'ad71d3276fef0ee7e791e62bbfe9c4e540047417' * commit 'ad71d3276fef0ee7e791e62bbfe9c4e540047417': lavfi: add a QSV deinterlacing filter Minor fixup for lavfi differences. Merged-by: Mark Thompson <s...@jkqxz.net> > http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=b9acc7fbd9fe6aeb76c9c91a65f1f5dab141fce4 --- Changelog | 2 +- configure | 1 + libavfilter/Makefile | 1 + libavfilter/allfilters.c | 1 + libavfilter/version.h | 2 +- libavfilter/vf_deinterlace_qsv.c | 575 +++++++++++++++++++++++++++++++++++++++ 6 files changed, 580 insertions(+), 2 deletions(-) diff --git a/Changelog b/Changelog index ac9998e..8cefcb5 100644 --- a/Changelog +++ b/Changelog @@ -26,7 +26,7 @@ version <next>: - native Opus encoder - ScreenPressor decoder - incomplete ClearVideo decoder -- Intel QSV video scaling filter +- Intel QSV video scaling and deinterlacing filters version 3.2: - libopenmpt demuxer diff --git a/configure b/configure index dc18bfa..1e2e774 100755 --- a/configure +++ b/configure @@ -3083,6 +3083,7 @@ bs2b_filter_deps="libbs2b" colormatrix_filter_deps="gpl" cover_rect_filter_deps="avcodec avformat gpl" cropdetect_filter_deps="gpl" +deinterlace_qsv_filter_deps="libmfx" deinterlace_vaapi_filter_deps="vaapi" delogo_filter_deps="gpl" deshake_filter_select="pixelutils" diff --git a/libavfilter/Makefile b/libavfilter/Makefile index 4d1180b..428251f 100644 --- a/libavfilter/Makefile +++ b/libavfilter/Makefile @@ -156,6 +156,7 @@ OBJS-$(CONFIG_DCTDNOIZ_FILTER) += vf_dctdnoiz.o OBJS-$(CONFIG_DEBAND_FILTER) += vf_deband.o OBJS-$(CONFIG_DECIMATE_FILTER) += vf_decimate.o OBJS-$(CONFIG_DEFLATE_FILTER) += vf_neighbor.o +OBJS-$(CONFIG_DEINTERLACE_QSV_FILTER) += vf_deinterlace_qsv.o OBJS-$(CONFIG_DEINTERLACE_VAAPI_FILTER) += vf_deinterlace_vaapi.o OBJS-$(CONFIG_DEJUDDER_FILTER) += vf_dejudder.o OBJS-$(CONFIG_DELOGO_FILTER) += vf_delogo.o diff --git a/libavfilter/allfilters.c b/libavfilter/allfilters.c index 6aa482d..2bf34ef 100644 --- a/libavfilter/allfilters.c +++ b/libavfilter/allfilters.c @@ -167,6 +167,7 @@ static void register_all(void) REGISTER_FILTER(DEBAND, deband, vf); REGISTER_FILTER(DECIMATE, decimate, vf); REGISTER_FILTER(DEFLATE, deflate, vf); + REGISTER_FILTER(DEINTERLACE_QSV,deinterlace_qsv,vf); REGISTER_FILTER(DEINTERLACE_VAAPI, deinterlace_vaapi, vf); REGISTER_FILTER(DEJUDDER, dejudder, vf); REGISTER_FILTER(DELOGO, delogo, vf); diff --git a/libavfilter/version.h b/libavfilter/version.h index 878711d..e67f34b 100644 --- a/libavfilter/version.h +++ b/libavfilter/version.h @@ -30,7 +30,7 @@ #include "libavutil/version.h" #define LIBAVFILTER_VERSION_MAJOR 6 -#define LIBAVFILTER_VERSION_MINOR 75 +#define LIBAVFILTER_VERSION_MINOR 76 #define LIBAVFILTER_VERSION_MICRO 100 #define LIBAVFILTER_VERSION_INT AV_VERSION_INT(LIBAVFILTER_VERSION_MAJOR, \ diff --git a/libavfilter/vf_deinterlace_qsv.c b/libavfilter/vf_deinterlace_qsv.c new file mode 100644 index 0000000..e7491e1 --- /dev/null +++ b/libavfilter/vf_deinterlace_qsv.c @@ -0,0 +1,575 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * deinterlace video filter - QSV + */ + +#include <mfx/mfxvideo.h> + +#include <stdio.h> +#include <string.h> + +#include "libavutil/avstring.h" +#include "libavutil/common.h" +#include "libavutil/hwcontext.h" +#include "libavutil/hwcontext_qsv.h" +#include "libavutil/internal.h" +#include "libavutil/mathematics.h" +#include "libavutil/opt.h" +#include "libavutil/pixdesc.h" +#include "libavutil/time.h" + +#include "avfilter.h" +#include "formats.h" +#include "internal.h" +#include "video.h" + +enum { + QSVDEINT_MORE_OUTPUT = 1, + QSVDEINT_MORE_INPUT, +}; + +typedef struct QSVFrame { + AVFrame *frame; + mfxFrameSurface1 surface; + int used; + + struct QSVFrame *next; +} QSVFrame; + +typedef struct QSVDeintContext { + const AVClass *class; + + AVBufferRef *hw_frames_ctx; + /* a clone of the main session, used internally for deinterlacing */ + mfxSession session; + + mfxMemId *mem_ids; + int nb_mem_ids; + + mfxFrameSurface1 **surface_ptrs; + int nb_surface_ptrs; + + mfxExtOpaqueSurfaceAlloc opaque_alloc; + mfxExtBuffer *ext_buffers[1]; + + QSVFrame *work_frames; + + int64_t last_pts; + + int eof; +} QSVDeintContext; + +static void qsvdeint_uninit(AVFilterContext *ctx) +{ + QSVDeintContext *s = ctx->priv; + QSVFrame *cur; + + if (s->session) { + MFXClose(s->session); + s->session = NULL; + } + av_buffer_unref(&s->hw_frames_ctx); + + cur = s->work_frames; + while (cur) { + s->work_frames = cur->next; + av_frame_free(&cur->frame); + av_freep(&cur); + cur = s->work_frames; + } + + av_freep(&s->mem_ids); + s->nb_mem_ids = 0; + + av_freep(&s->surface_ptrs); + s->nb_surface_ptrs = 0; +} + +static int qsvdeint_query_formats(AVFilterContext *ctx) +{ + static const enum AVPixelFormat pixel_formats[] = { + AV_PIX_FMT_QSV, AV_PIX_FMT_NONE, + }; + AVFilterFormats *pix_fmts = ff_make_format_list(pixel_formats); + int ret; + + if ((ret = ff_set_common_formats(ctx, pix_fmts)) < 0) + return ret; + + return 0; +} + +static mfxStatus frame_alloc(mfxHDL pthis, mfxFrameAllocRequest *req, + mfxFrameAllocResponse *resp) +{ + AVFilterContext *ctx = pthis; + QSVDeintContext *s = ctx->priv; + + if (!(req->Type & MFX_MEMTYPE_VIDEO_MEMORY_PROCESSOR_TARGET) || + !(req->Type & (MFX_MEMTYPE_FROM_VPPIN | MFX_MEMTYPE_FROM_VPPOUT)) || + !(req->Type & MFX_MEMTYPE_EXTERNAL_FRAME)) + return MFX_ERR_UNSUPPORTED; + + resp->mids = s->mem_ids; + resp->NumFrameActual = s->nb_mem_ids; + + return MFX_ERR_NONE; +} + +static mfxStatus frame_free(mfxHDL pthis, mfxFrameAllocResponse *resp) +{ + return MFX_ERR_NONE; +} + +static mfxStatus frame_lock(mfxHDL pthis, mfxMemId mid, mfxFrameData *ptr) +{ + return MFX_ERR_UNSUPPORTED; +} + +static mfxStatus frame_unlock(mfxHDL pthis, mfxMemId mid, mfxFrameData *ptr) +{ + return MFX_ERR_UNSUPPORTED; +} + +static mfxStatus frame_get_hdl(mfxHDL pthis, mfxMemId mid, mfxHDL *hdl) +{ + *hdl = mid; + return MFX_ERR_NONE; +} + +static const mfxHandleType handle_types[] = { + MFX_HANDLE_VA_DISPLAY, + MFX_HANDLE_D3D9_DEVICE_MANAGER, + MFX_HANDLE_D3D11_DEVICE, +}; + +static int init_out_session(AVFilterContext *ctx) +{ + + QSVDeintContext *s = ctx->priv; + AVHWFramesContext *hw_frames_ctx = (AVHWFramesContext*)s->hw_frames_ctx->data; + AVQSVFramesContext *hw_frames_hwctx = hw_frames_ctx->hwctx; + AVQSVDeviceContext *device_hwctx = hw_frames_ctx->device_ctx->hwctx; + + int opaque = !!(hw_frames_hwctx->frame_type & MFX_MEMTYPE_OPAQUE_FRAME); + + mfxHDL handle = NULL; + mfxHandleType handle_type; + mfxVersion ver; + mfxIMPL impl; + mfxVideoParam par; + mfxStatus err; + int i; + + /* extract the properties of the "master" session given to us */ + err = MFXQueryIMPL(device_hwctx->session, &impl); + if (err == MFX_ERR_NONE) + err = MFXQueryVersion(device_hwctx->session, &ver); + if (err != MFX_ERR_NONE) { + av_log(ctx, AV_LOG_ERROR, "Error querying the session attributes\n"); + return AVERROR_UNKNOWN; + } + + for (i = 0; i < FF_ARRAY_ELEMS(handle_types); i++) { + err = MFXVideoCORE_GetHandle(device_hwctx->session, handle_types[i], &handle); + if (err == MFX_ERR_NONE) { + handle_type = handle_types[i]; + break; + } + } + + /* create a "slave" session with those same properties, to be used for + * actual deinterlacing */ + err = MFXInit(impl, &ver, &s->session); + if (err != MFX_ERR_NONE) { + av_log(ctx, AV_LOG_ERROR, "Error initializing a session for deinterlacing\n"); + return AVERROR_UNKNOWN; + } + + if (handle) { + err = MFXVideoCORE_SetHandle(s->session, handle_type, handle); + if (err != MFX_ERR_NONE) + return AVERROR_UNKNOWN; + } + + memset(&par, 0, sizeof(par)); + + if (opaque) { + s->surface_ptrs = av_mallocz_array(hw_frames_hwctx->nb_surfaces, + sizeof(*s->surface_ptrs)); + if (!s->surface_ptrs) + return AVERROR(ENOMEM); + for (i = 0; i < hw_frames_hwctx->nb_surfaces; i++) + s->surface_ptrs[i] = hw_frames_hwctx->surfaces + i; + s->nb_surface_ptrs = hw_frames_hwctx->nb_surfaces; + + s->opaque_alloc.In.Surfaces = s->surface_ptrs; + s->opaque_alloc.In.NumSurface = s->nb_surface_ptrs; + s->opaque_alloc.In.Type = hw_frames_hwctx->frame_type; + + s->opaque_alloc.Out = s->opaque_alloc.In; + + s->opaque_alloc.Header.BufferId = MFX_EXTBUFF_OPAQUE_SURFACE_ALLOCATION; + s->opaque_alloc.Header.BufferSz = sizeof(s->opaque_alloc); + + s->ext_buffers[0] = (mfxExtBuffer*)&s->opaque_alloc; + + par.ExtParam = s->ext_buffers; + par.NumExtParam = FF_ARRAY_ELEMS(s->ext_buffers); + + par.IOPattern = MFX_IOPATTERN_IN_OPAQUE_MEMORY | MFX_IOPATTERN_OUT_OPAQUE_MEMORY; + } else { + mfxFrameAllocator frame_allocator = { + .pthis = ctx, + .Alloc = frame_alloc, + .Lock = frame_lock, + .Unlock = frame_unlock, + .GetHDL = frame_get_hdl, + .Free = frame_free, + }; + + s->mem_ids = av_mallocz_array(hw_frames_hwctx->nb_surfaces, + sizeof(*s->mem_ids)); + if (!s->mem_ids) + return AVERROR(ENOMEM); + for (i = 0; i < hw_frames_hwctx->nb_surfaces; i++) + s->mem_ids[i] = hw_frames_hwctx->surfaces[i].Data.MemId; + s->nb_mem_ids = hw_frames_hwctx->nb_surfaces; + + err = MFXVideoCORE_SetFrameAllocator(s->session, &frame_allocator); + if (err != MFX_ERR_NONE) + return AVERROR_UNKNOWN; + + par.IOPattern = MFX_IOPATTERN_IN_VIDEO_MEMORY | MFX_IOPATTERN_OUT_VIDEO_MEMORY; + } + + par.AsyncDepth = 1; // TODO async + + par.vpp.In = hw_frames_hwctx->surfaces[0].Info; + + par.vpp.In.CropW = ctx->inputs[0]->w; + par.vpp.In.CropH = ctx->inputs[0]->h; + + if (ctx->inputs[0]->frame_rate.num) { + par.vpp.In.FrameRateExtN = ctx->inputs[0]->frame_rate.num; + par.vpp.In.FrameRateExtD = ctx->inputs[0]->frame_rate.den; + } else { + par.vpp.In.FrameRateExtN = ctx->inputs[0]->time_base.num; + par.vpp.In.FrameRateExtD = ctx->inputs[0]->time_base.den; + } + + par.vpp.Out = par.vpp.In; + + if (ctx->outputs[0]->frame_rate.num) { + par.vpp.Out.FrameRateExtN = ctx->outputs[0]->frame_rate.num; + par.vpp.Out.FrameRateExtD = ctx->outputs[0]->frame_rate.den; + } else { + par.vpp.Out.FrameRateExtN = ctx->outputs[0]->time_base.num; + par.vpp.Out.FrameRateExtD = ctx->outputs[0]->time_base.den; + } + + err = MFXVideoVPP_Init(s->session, &par); + if (err != MFX_ERR_NONE) { + av_log(ctx, AV_LOG_ERROR, "Error opening the VPP for deinterlacing: %d\n", err); + return AVERROR_UNKNOWN; + } + + return 0; +} + +static int qsvdeint_config_props(AVFilterLink *outlink) +{ + AVFilterContext *ctx = outlink->src; + AVFilterLink *inlink = ctx->inputs[0]; + QSVDeintContext *s = ctx->priv; + int ret; + + qsvdeint_uninit(ctx); + + s->last_pts = AV_NOPTS_VALUE; + outlink->frame_rate = av_mul_q(inlink->frame_rate, + (AVRational){ 2, 1 }); + outlink->time_base = av_mul_q(inlink->time_base, + (AVRational){ 1, 2 }); + + /* check that we have a hw context */ + if (!inlink->hw_frames_ctx) { + av_log(ctx, AV_LOG_ERROR, "No hw context provided on input\n"); + return AVERROR(EINVAL); + } + + s->hw_frames_ctx = av_buffer_ref(inlink->hw_frames_ctx); + if (!s->hw_frames_ctx) + return AVERROR(ENOMEM); + + av_buffer_unref(&outlink->hw_frames_ctx); + outlink->hw_frames_ctx = av_buffer_ref(inlink->hw_frames_ctx); + if (!outlink->hw_frames_ctx) { + qsvdeint_uninit(ctx); + return AVERROR(ENOMEM); + } + + ret = init_out_session(ctx); + if (ret < 0) + return ret; + + + return 0; +} + +static void clear_unused_frames(QSVDeintContext *s) +{ + QSVFrame *cur = s->work_frames; + while (cur) { + if (!cur->surface.Data.Locked) { + av_frame_free(&cur->frame); + cur->used = 0; + } + cur = cur->next; + } +} + +static int get_free_frame(QSVDeintContext *s, QSVFrame **f) +{ + QSVFrame *frame, **last; + + clear_unused_frames(s); + + frame = s->work_frames; + last = &s->work_frames; + while (frame) { + if (!frame->used) { + *f = frame; + return 0; + } + + last = &frame->next; + frame = frame->next; + } + + frame = av_mallocz(sizeof(*frame)); + if (!frame) + return AVERROR(ENOMEM); + *last = frame; + *f = frame; + + return 0; +} + +static int submit_frame(AVFilterContext *ctx, AVFrame *frame, + mfxFrameSurface1 **surface) +{ + QSVDeintContext *s = ctx->priv; + QSVFrame *qf; + int ret; + + ret = get_free_frame(s, &qf); + if (ret < 0) + return ret; + + qf->frame = frame; + + qf->surface = *(mfxFrameSurface1*)qf->frame->data[3]; + + qf->surface.Data.Locked = 0; + qf->surface.Info.CropW = qf->frame->width; + qf->surface.Info.CropH = qf->frame->height; + + qf->surface.Info.PicStruct = !qf->frame->interlaced_frame ? MFX_PICSTRUCT_PROGRESSIVE : + (qf->frame->top_field_first ? MFX_PICSTRUCT_FIELD_TFF : + MFX_PICSTRUCT_FIELD_BFF); + if (qf->frame->repeat_pict == 1) + qf->surface.Info.PicStruct |= MFX_PICSTRUCT_FIELD_REPEATED; + else if (qf->frame->repeat_pict == 2) + qf->surface.Info.PicStruct |= MFX_PICSTRUCT_FRAME_DOUBLING; + else if (qf->frame->repeat_pict == 4) + qf->surface.Info.PicStruct |= MFX_PICSTRUCT_FRAME_TRIPLING; + + if (ctx->inputs[0]->frame_rate.num) { + qf->surface.Info.FrameRateExtN = ctx->inputs[0]->frame_rate.num; + qf->surface.Info.FrameRateExtD = ctx->inputs[0]->frame_rate.den; + } else { + qf->surface.Info.FrameRateExtN = ctx->inputs[0]->time_base.num; + qf->surface.Info.FrameRateExtD = ctx->inputs[0]->time_base.den; + } + + qf->surface.Data.TimeStamp = av_rescale_q(qf->frame->pts, + ctx->inputs[0]->time_base, + (AVRational){1, 90000}); + + *surface = &qf->surface; + qf->used = 1; + + return 0; +} + +static int process_frame(AVFilterContext *ctx, const AVFrame *in, + mfxFrameSurface1 *surf_in) +{ + QSVDeintContext *s = ctx->priv; + AVFilterLink *inlink = ctx->inputs[0]; + AVFilterLink *outlink = ctx->outputs[0]; + + AVFrame *out; + mfxFrameSurface1 *surf_out; + mfxSyncPoint sync = NULL; + mfxStatus err; + int ret, again = 0; + + out = av_frame_alloc(); + if (!out) + return AVERROR(ENOMEM); + + ret = av_hwframe_get_buffer(s->hw_frames_ctx, out, 0); + if (ret < 0) + goto fail; + + surf_out = (mfxFrameSurface1*)out->data[3]; + surf_out->Info.CropW = outlink->w; + surf_out->Info.CropH = outlink->h; + surf_out->Info.PicStruct = MFX_PICSTRUCT_PROGRESSIVE; + + do { + err = MFXVideoVPP_RunFrameVPPAsync(s->session, surf_in, surf_out, + NULL, &sync); + if (err == MFX_WRN_DEVICE_BUSY) + av_usleep(1); + } while (err == MFX_WRN_DEVICE_BUSY); + + if (err == MFX_ERR_MORE_DATA) { + av_frame_free(&out); + return QSVDEINT_MORE_INPUT; + } + + if ((err < 0 && err != MFX_ERR_MORE_SURFACE) || !sync) { + av_log(ctx, AV_LOG_ERROR, "Error during deinterlacing: %d\n", err); + ret = AVERROR_UNKNOWN; + goto fail; + } + if (err == MFX_ERR_MORE_SURFACE) + again = 1; + + do { + err = MFXVideoCORE_SyncOperation(s->session, sync, 1000); + } while (err == MFX_WRN_IN_EXECUTION); + if (err < 0) { + av_log(ctx, AV_LOG_ERROR, "Error synchronizing the operation: %d\n", err); + ret = AVERROR_UNKNOWN; + goto fail; + } + + ret = av_frame_copy_props(out, in); + if (ret < 0) + goto fail; + + out->width = outlink->w; + out->height = outlink->h; + out->interlaced_frame = 0; + + out->pts = av_rescale_q(out->pts, inlink->time_base, outlink->time_base); + if (out->pts == s->last_pts) + out->pts++; + s->last_pts = out->pts; + + ret = ff_filter_frame(outlink, out); + if (ret < 0) + return ret; + + return again ? QSVDEINT_MORE_OUTPUT : 0; +fail: + av_frame_free(&out); + return ret; +} + +static int qsvdeint_filter_frame(AVFilterLink *link, AVFrame *in) +{ + AVFilterContext *ctx = link->dst; + + mfxFrameSurface1 *surf_in; + int ret; + + ret = submit_frame(ctx, in, &surf_in); + if (ret < 0) { + av_frame_free(&in); + return ret; + } + + do { + ret = process_frame(ctx, in, surf_in); + if (ret < 0) + return ret; + } while (ret == QSVDEINT_MORE_OUTPUT); + + return 0; +} + +static int qsvdeint_request_frame(AVFilterLink *outlink) +{ + AVFilterContext *ctx = outlink->src; + + return ff_request_frame(ctx->inputs[0]); +} + +#define OFFSET(x) offsetof(QSVDeintContext, x) +#define FLAGS AV_OPT_FLAG_VIDEO_PARAM +static const AVOption options[] = { + { NULL }, +}; + +static const AVClass qsvdeint_class = { + .class_name = "deinterlace_qsv", + .item_name = av_default_item_name, + .option = options, + .version = LIBAVUTIL_VERSION_INT, +}; + +static const AVFilterPad qsvdeint_inputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + .filter_frame = qsvdeint_filter_frame, + }, + { NULL } +}; + +static const AVFilterPad qsvdeint_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + .config_props = qsvdeint_config_props, + .request_frame = qsvdeint_request_frame, + }, + { NULL } +}; + +AVFilter ff_vf_deinterlace_qsv = { + .name = "deinterlace_qsv", + .description = NULL_IF_CONFIG_SMALL("QuickSync video deinterlacing"), + + .uninit = qsvdeint_uninit, + .query_formats = qsvdeint_query_formats, + + .priv_size = sizeof(QSVDeintContext), + .priv_class = &qsvdeint_class, + + .inputs = qsvdeint_inputs, + .outputs = qsvdeint_outputs, +}; ====================================================================== diff --cc Changelog index ac9998e,d3201b3..8cefcb5 --- a/Changelog +++ b/Changelog @@@ -2,285 -2,17 +2,285 @@@ Entries are sorted chronologically fro releases are sorted from youngest to oldest. version <next>: -- aliases and defaults for Ogg subtypes (opus, spx) -- HEVC/H.265 RTP payload format (draft v6) packetizer and depacketizer -- avplay now exits by default at the end of playback -- XCB-based screen-grabber -- creating DASH compatible fragmented MP4, MPEG-DASH segmenting muxer -- H.261 RTP payload format (RFC 4587) depacketizer and experimental packetizer +- CrystalHD decoder moved to new decode API +- add internal ebur128 library, remove external libebur128 dependency +- Pro-MPEG CoP #3-R2 FEC protocol +- premultiply video filter +- Support for spherical videos +- configure now fails if autodetect-libraries are requested but not found +- PSD Decoder +- 16.8 floating point pcm decoder +- 24.0 floating point pcm decoder +- Apple Pixlet decoder +- QDMC audio decoder +- NewTek SpeedHQ decoder +- MIDI Sample Dump Standard demuxer +- readeia608 filter +- Sample Dump eXchange demuxer +- abitscope multimedia filter +- Scenarist Closed Captions demuxer and muxer +- threshold filter +- midequalizer filter +- Optimal Huffman tables for (M)JPEG encoding +- FM Screen Capture Codec decoder +- native Opus encoder +- ScreenPressor decoder +- incomplete ClearVideo decoder - - Intel QSV video scaling filter ++- Intel QSV video scaling and deinterlacing filters + +version 3.2: +- libopenmpt demuxer +- tee protocol +- Changed metadata print option to accept general urls +- Alias muxer for Ogg Video (.ogv) +- VP8 in Ogg muxing +- curves filter doesn't automatically insert points at x=0 and x=1 anymore +- 16-bit support in curves filter and selectivecolor filter +- OpenH264 decoder wrapper +- MediaCodec H.264/HEVC/MPEG-4/VP8/VP9 hwaccel +- True Audio (TTA) muxer +- crystalizer audio filter +- acrusher audio filter +- bitplanenoise video filter +- floating point support in als decoder +- fifo muxer +- maskedclamp filter +- hysteresis filter +- lut2 filter +- yuvtestsrc filter +- CUDA CUVID H.263/VP8/VP9/10 bit HEVC (Dithered) Decoding +- vaguedenoiser filter +- added threads option per filter instance +- weave filter +- gblur filter +- avgblur filter +- sobel and prewitt filter +- MediaCodec HEVC/MPEG-4/VP8/VP9 decoding +- Meridian Lossless Packing (MLP) / TrueHD encoder +- Non-Local Means (nlmeans) denoising filter +- sdl2 output device and ffplay support +- sdl1 output device and sdl1 support removed +- extended mov edit list support +- libfaac encoder removed +- Matroska muxer now writes CRC32 elements by default in all Level 1 elements +- sidedata video and asidedata audio filter +- Changed mapping of rtp MIME type G726 to codec g726le. +- spec compliant VAAPI/DXVA2 VC-1 decoding of slices in frame-coded images + + +version 3.1: +- DXVA2-accelerated HEVC Main10 decoding +- fieldhint filter +- loop video filter and aloop audio filter +- Bob Weaver deinterlacing filter +- firequalizer filter +- datascope filter +- bench and abench filters +- ciescope filter +- protocol blacklisting API +- MediaCodec H264 decoding +- VC-2 HQ RTP payload format (draft v1) depacketizer and packetizer +- VP9 RTP payload format (draft v2) packetizer +- AudioToolbox audio decoders +- AudioToolbox audio encoders +- coreimage filter (GPU based image filtering on OSX) +- libdcadec removed +- bitstream filter for extracting DTS core +- ADPCM IMA DAT4 decoder +- musx demuxer +- aix demuxer +- remap filter +- hash and framehash muxers +- colorspace filter +- hdcd filter +- readvitc filter +- VAAPI-accelerated format conversion and scaling +- libnpp/CUDA-accelerated format conversion and scaling +- Duck TrueMotion 2.0 Real Time decoder +- Wideband Single-bit Data (WSD) demuxer +- VAAPI-accelerated H.264/HEVC/MJPEG encoding +- DTS Express (LBR) decoder +- Generic OpenMAX IL encoder with support for Raspberry Pi +- IFF ANIM demuxer & decoder +- Direct Stream Transfer (DST) decoder +- loudnorm filter +- MTAF demuxer and decoder +- MagicYUV decoder +- OpenExr improvements (tile data and B44/B44A support) +- BitJazz SheerVideo decoder +- CUDA CUVID H264/HEVC decoder +- 10-bit depth support in native utvideo decoder +- libutvideo wrapper removed +- YUY2 Lossless Codec decoder +- VideoToolbox H.264 encoder +- VAAPI-accelerated MPEG-2 and VP8 encoding + + +version 3.0: +- Common Encryption (CENC) MP4 encoding and decoding support +- DXV decoding +- extrastereo filter +- ocr filter +- alimiter filter +- stereowiden filter +- stereotools filter +- rubberband filter +- tremolo filter +- agate filter +- chromakey filter +- maskedmerge filter +- Screenpresso SPV1 decoding +- chromaprint fingerprinting muxer +- ffplay dynamic volume control +- displace filter +- selectivecolor filter +- extensive native AAC encoder improvements and removal of experimental flag +- ADPCM PSX decoder +- 3dostr, dcstr, fsb, genh, vag, xvag, ads, msf, svag & vpk demuxer +- zscale filter +- wve demuxer +- zero-copy Intel QSV transcoding in ffmpeg +- shuffleframes filter +- SDX2 DPCM decoder +- vibrato filter +- innoHeim/Rsupport Screen Capture Codec decoder +- ADPCM AICA decoder +- Interplay ACM demuxer and audio decoder +- XMA1 & XMA2 decoder +- realtime filter +- anoisesrc audio filter source +- IVR demuxer +- compensationdelay filter +- acompressor filter +- support encoding 16-bit RLE SGI images +- apulsator filter +- sidechaingate audio filter +- mipsdspr1 option has been renamed to mipsdsp +- aemphasis filter +- mips32r5 option has been removed +- mips64r6 option has been removed +- DXVA2-accelerated VP9 decoding +- SOFAlizer: virtual binaural acoustics filter +- VAAPI VP9 hwaccel +- audio high-order multiband parametric equalizer +- automatic bitstream filtering +- showspectrumpic filter +- libstagefright support removed +- spectrumsynth filter +- ahistogram filter +- only seek with the right mouse button in ffplay +- toggle full screen when double-clicking with the left mouse button in ffplay +- afftfilt filter +- convolution filter +- libquvi support removed +- support for dvaudio in wav and avi +- libaacplus and libvo-aacenc support removed +- Cineform HD decoder +- new DCA decoder with full support for DTS-HD extensions +- significant performance improvements in Windows Television (WTV) demuxer +- nnedi deinterlacer +- streamselect video and astreamselect audio filter +- swaprect filter +- metadata video and ametadata audio filter +- SMPTE VC-2 HQ profile support for the Dirac decoder +- SMPTE VC-2 native encoder supporting the HQ profile + + +version 2.8: +- colorkey video filter +- BFSTM/BCSTM demuxer +- little-endian ADPCM_THP decoder +- Hap decoder and encoder +- DirectDraw Surface image/texture decoder +- ssim filter +- optional new ASF demuxer +- showvolume filter +- Many improvements to the JPEG 2000 decoder +- Go2Meeting decoding support +- adrawgraph audio and drawgraph video filter +- removegrain video filter +- Intel QSV-accelerated MPEG-2 video and HEVC encoding +- Intel QSV-accelerated MPEG-2 video and HEVC decoding +- Intel QSV-accelerated VC-1 video decoding +- libkvazaar HEVC encoder +- erosion, dilation, deflate and inflate video filters +- Dynamic Audio Normalizer as dynaudnorm filter +- Reverse video and areverse audio filter +- Random filter +- deband filter +- AAC fixed-point decoding +- sidechaincompress audio filter +- bitstream filter for converting HEVC from MP4 to Annex B +- acrossfade audio filter +- allyuv and allrgb video sources +- atadenoise video filter +- OS X VideoToolbox support +- aphasemeter filter +- showfreqs filter +- vectorscope filter +- waveform filter +- hstack and vstack filter +- Support DNx100 (1440x1080@8) +- VAAPI hevc hwaccel +- VDPAU hevc hwaccel +- framerate filter +- Switched default encoders for webm to VP9 and Opus +- Removed experimental flag from the JPEG 2000 encoder + + +version 2.7: +- FFT video filter +- TDSC decoder +- DTS lossless extension (XLL) decoding (not lossless, disabled by default) +- showwavespic filter +- DTS decoding through libdcadec +- Drop support for nvenc API before 5.0 +- nvenc HEVC encoder +- Detelecine filter +- Intel QSV-accelerated H.264 encoding +- MMAL-accelerated H.264 decoding +- basic APNG encoder and muxer with default extension "apng" +- unpack DivX-style packed B-frames in MPEG-4 bitstream filter +- WebM Live Chunk Muxer +- nvenc level and tier options +- chorus filter +- Canopus HQ/HQA decoder +- Automatically rotate videos based on metadata in ffmpeg +- improved Quickdraw compatibility +- VP9 high bit-depth and extended colorspaces decoding support +- WebPAnimEncoder API when available for encoding and muxing WebP +- Direct3D11-accelerated decoding +- Support Secure Transport +- Multipart JPEG demuxer + + +version 2.6: +- nvenc encoder +- 10bit spp filter +- colorlevels filter +- RIFX format for *.wav files - RTP/mpegts muxer -- VP8 in Ogg demuxing +- non continuous cache protocol support +- tblend filter +- cropdetect support for non 8bpp, absolute (if limit >= 1) and relative (if limit < 1.0) threshold +- Camellia symmetric block cipher - OpenH264 encoder wrapper +- VOC seeking support +- Closed caption Decoder +- fspp, uspp, pp7 MPlayer postprocessing filters ported to native filters +- showpalette filter +- Twofish symmetric block cipher - Support DNx100 (960x720@8) -- Direct3D11-accelerated decoding +- eq2 filter ported from libmpcodecs as eq filter +- removed libmpcodecs +- Changed default DNxHD colour range in QuickTime .mov derivatives to mpeg range +- ported softpulldown filter from libmpcodecs as repeatfields filter +- dcshift filter +- RTP depacketizer for loss tolerant payload format for MP3 audio (RFC 5219) +- RTP depacketizer for AC3 payload format (RFC 4184) +- palettegen and paletteuse filters +- VP9 RTP payload format (draft 0) experimental depacketizer +- RTP depacketizer for DV (RFC 6469) - DXVA2-accelerated HEVC decoding - AAC ELD 480 decoding - Intel QSV-accelerated H.264 decoding diff --cc configure index dc18bfa,f12fa6f..1e2e774 --- a/configure +++ b/configure @@@ -3080,84 -2401,20 +3080,85 @@@ azmq_filter_deps="libzmq blackframe_filter_deps="gpl" boxblur_filter_deps="gpl" bs2b_filter_deps="libbs2b" +colormatrix_filter_deps="gpl" +cover_rect_filter_deps="avcodec avformat gpl" cropdetect_filter_deps="gpl" + deinterlace_qsv_filter_deps="libmfx" +deinterlace_vaapi_filter_deps="vaapi" delogo_filter_deps="gpl" +deshake_filter_select="pixelutils" drawtext_filter_deps="libfreetype" +eq_filter_deps="gpl" +fftfilt_filter_deps="avcodec" +fftfilt_filter_select="rdft" +find_rect_filter_deps="avcodec avformat gpl" +firequalizer_filter_deps="avcodec" +firequalizer_filter_select="rdft" +flite_filter_deps="libflite" +framerate_filter_select="pixelutils" frei0r_filter_deps="frei0r dlopen" -frei0r_filter_extralibs='$ldl' frei0r_src_filter_deps="frei0r dlopen" -frei0r_src_filter_extralibs='$ldl' +fspp_filter_deps="gpl" +geq_filter_deps="gpl" +histeq_filter_deps="gpl" hqdn3d_filter_deps="gpl" interlace_filter_deps="gpl" +kerndeint_filter_deps="gpl" +ladspa_filter_deps="ladspa dlopen" +mcdeint_filter_deps="avcodec gpl" +movie_filter_deps="avcodec avformat" +mpdecimate_filter_deps="gpl" +mpdecimate_filter_select="pixelutils" +mptestsrc_filter_deps="gpl" +negate_filter_deps="lut_filter" +nnedi_filter_deps="gpl" +ocr_filter_deps="libtesseract" ocv_filter_deps="libopencv" +owdenoise_filter_deps="gpl" +pan_filter_deps="swresample" +perspective_filter_deps="gpl" +phase_filter_deps="gpl" +pp7_filter_deps="gpl" +pp_filter_deps="gpl postproc" +pullup_filter_deps="gpl" +removelogo_filter_deps="avcodec avformat swscale" +repeatfields_filter_deps="gpl" resample_filter_deps="avresample" +rubberband_filter_deps="librubberband" +sab_filter_deps="gpl swscale" +scale2ref_filter_deps="swscale" scale_filter_deps="swscale" scale_qsv_filter_deps="libmfx" +select_filter_select="pixelutils" +showcqt_filter_deps="avcodec avformat swscale" +showcqt_filter_select="fft" +showfreqs_filter_deps="avcodec" +showfreqs_filter_select="fft" +showspectrum_filter_deps="avcodec" +showspectrum_filter_select="fft" +showspectrumpic_filter_deps="avcodec" +showspectrumpic_filter_select="fft" +smartblur_filter_deps="gpl swscale" +sofalizer_filter_deps="netcdf avcodec" +sofalizer_filter_select="fft" +spectrumsynth_filter_deps="avcodec" +spectrumsynth_filter_select="fft" +spp_filter_deps="gpl avcodec" +spp_filter_select="fft idctdsp fdctdsp me_cmp pixblockdsp" +stereo3d_filter_deps="gpl" +subtitles_filter_deps="avformat avcodec libass" +super2xsai_filter_deps="gpl" +pixfmts_super2xsai_test_deps="super2xsai_filter" +tinterlace_filter_deps="gpl" +tinterlace_merge_test_deps="tinterlace_filter" +tinterlace_pad_test_deps="tinterlace_filter" +uspp_filter_deps="gpl avcodec" +vaguedenoiser_filter_deps="gpl" +vidstabdetect_filter_deps="libvidstab" +vidstabtransform_filter_deps="libvidstab" +zmq_filter_deps="libzmq" +zoompan_filter_deps="swscale" +zscale_filter_deps="libzimg" scale_vaapi_filter_deps="vaapi VAProcPipelineParameterBuffer" # examples diff --cc libavfilter/Makefile index 4d1180b,dea8ffa..428251f --- a/libavfilter/Makefile +++ b/libavfilter/Makefile @@@ -78,107 -33,26 +78,108 @@@ OBJS-$(CONFIG_BIQUAD_FILTER OBJS-$(CONFIG_BS2B_FILTER) += af_bs2b.o OBJS-$(CONFIG_CHANNELMAP_FILTER) += af_channelmap.o OBJS-$(CONFIG_CHANNELSPLIT_FILTER) += af_channelsplit.o +OBJS-$(CONFIG_CHORUS_FILTER) += af_chorus.o generate_wave_table.o OBJS-$(CONFIG_COMPAND_FILTER) += af_compand.o +OBJS-$(CONFIG_COMPENSATIONDELAY_FILTER) += af_compensationdelay.o +OBJS-$(CONFIG_CRYSTALIZER_FILTER) += af_crystalizer.o +OBJS-$(CONFIG_DCSHIFT_FILTER) += af_dcshift.o +OBJS-$(CONFIG_DYNAUDNORM_FILTER) += af_dynaudnorm.o +OBJS-$(CONFIG_EARWAX_FILTER) += af_earwax.o +OBJS-$(CONFIG_EBUR128_FILTER) += f_ebur128.o +OBJS-$(CONFIG_EQUALIZER_FILTER) += af_biquads.o +OBJS-$(CONFIG_EXTRASTEREO_FILTER) += af_extrastereo.o +OBJS-$(CONFIG_FIREQUALIZER_FILTER) += af_firequalizer.o +OBJS-$(CONFIG_FLANGER_FILTER) += af_flanger.o generate_wave_table.o +OBJS-$(CONFIG_HDCD_FILTER) += af_hdcd.o +OBJS-$(CONFIG_HIGHPASS_FILTER) += af_biquads.o OBJS-$(CONFIG_JOIN_FILTER) += af_join.o +OBJS-$(CONFIG_LADSPA_FILTER) += af_ladspa.o +OBJS-$(CONFIG_LOUDNORM_FILTER) += af_loudnorm.o ebur128.o +OBJS-$(CONFIG_LOWPASS_FILTER) += af_biquads.o +OBJS-$(CONFIG_PAN_FILTER) += af_pan.o +OBJS-$(CONFIG_REPLAYGAIN_FILTER) += af_replaygain.o OBJS-$(CONFIG_RESAMPLE_FILTER) += af_resample.o +OBJS-$(CONFIG_RUBBERBAND_FILTER) += af_rubberband.o +OBJS-$(CONFIG_SIDECHAINCOMPRESS_FILTER) += af_sidechaincompress.o +OBJS-$(CONFIG_SIDECHAINGATE_FILTER) += af_agate.o +OBJS-$(CONFIG_SILENCEDETECT_FILTER) += af_silencedetect.o +OBJS-$(CONFIG_SILENCEREMOVE_FILTER) += af_silenceremove.o +OBJS-$(CONFIG_SOFALIZER_FILTER) += af_sofalizer.o +OBJS-$(CONFIG_STEREOTOOLS_FILTER) += af_stereotools.o +OBJS-$(CONFIG_STEREOWIDEN_FILTER) += af_stereowiden.o +OBJS-$(CONFIG_TREBLE_FILTER) += af_biquads.o +OBJS-$(CONFIG_TREMOLO_FILTER) += af_tremolo.o +OBJS-$(CONFIG_VIBRATO_FILTER) += af_vibrato.o generate_wave_table.o OBJS-$(CONFIG_VOLUME_FILTER) += af_volume.o +OBJS-$(CONFIG_VOLUMEDETECT_FILTER) += af_volumedetect.o -OBJS-$(CONFIG_ANULLSINK_FILTER) += asink_anullsink.o +OBJS-$(CONFIG_AEVALSRC_FILTER) += aeval.o +OBJS-$(CONFIG_ANOISESRC_FILTER) += asrc_anoisesrc.o OBJS-$(CONFIG_ANULLSRC_FILTER) += asrc_anullsrc.o +OBJS-$(CONFIG_FLITE_FILTER) += asrc_flite.o +OBJS-$(CONFIG_SINE_FILTER) += asrc_sine.o + +OBJS-$(CONFIG_ANULLSINK_FILTER) += asink_anullsink.o # video filters +OBJS-$(CONFIG_ALPHAEXTRACT_FILTER) += vf_extractplanes.o +OBJS-$(CONFIG_ALPHAMERGE_FILTER) += vf_alphamerge.o +OBJS-$(CONFIG_ASS_FILTER) += vf_subtitles.o +OBJS-$(CONFIG_ATADENOISE_FILTER) += vf_atadenoise.o +OBJS-$(CONFIG_AVGBLUR_FILTER) += vf_avgblur.o +OBJS-$(CONFIG_BBOX_FILTER) += bbox.o vf_bbox.o +OBJS-$(CONFIG_BENCH_FILTER) += f_bench.o +OBJS-$(CONFIG_BITPLANENOISE_FILTER) += vf_bitplanenoise.o +OBJS-$(CONFIG_BLACKDETECT_FILTER) += vf_blackdetect.o OBJS-$(CONFIG_BLACKFRAME_FILTER) += vf_blackframe.o +OBJS-$(CONFIG_BLEND_FILTER) += vf_blend.o dualinput.o framesync.o OBJS-$(CONFIG_BOXBLUR_FILTER) += vf_boxblur.o +OBJS-$(CONFIG_BWDIF_FILTER) += vf_bwdif.o +OBJS-$(CONFIG_CHROMAKEY_FILTER) += vf_chromakey.o +OBJS-$(CONFIG_CIESCOPE_FILTER) += vf_ciescope.o +OBJS-$(CONFIG_CODECVIEW_FILTER) += vf_codecview.o +OBJS-$(CONFIG_COLORBALANCE_FILTER) += vf_colorbalance.o +OBJS-$(CONFIG_COLORCHANNELMIXER_FILTER) += vf_colorchannelmixer.o +OBJS-$(CONFIG_COLORKEY_FILTER) += vf_colorkey.o +OBJS-$(CONFIG_COLORLEVELS_FILTER) += vf_colorlevels.o +OBJS-$(CONFIG_COLORMATRIX_FILTER) += vf_colormatrix.o +OBJS-$(CONFIG_COLORSPACE_FILTER) += vf_colorspace.o colorspacedsp.o +OBJS-$(CONFIG_CONVOLUTION_FILTER) += vf_convolution.o OBJS-$(CONFIG_COPY_FILTER) += vf_copy.o +OBJS-$(CONFIG_COREIMAGE_FILTER) += vf_coreimage.o +OBJS-$(CONFIG_COVER_RECT_FILTER) += vf_cover_rect.o lavfutils.o OBJS-$(CONFIG_CROP_FILTER) += vf_crop.o OBJS-$(CONFIG_CROPDETECT_FILTER) += vf_cropdetect.o +OBJS-$(CONFIG_CURVES_FILTER) += vf_curves.o +OBJS-$(CONFIG_DATASCOPE_FILTER) += vf_datascope.o +OBJS-$(CONFIG_DCTDNOIZ_FILTER) += vf_dctdnoiz.o +OBJS-$(CONFIG_DEBAND_FILTER) += vf_deband.o +OBJS-$(CONFIG_DECIMATE_FILTER) += vf_decimate.o +OBJS-$(CONFIG_DEFLATE_FILTER) += vf_neighbor.o + OBJS-$(CONFIG_DEINTERLACE_QSV_FILTER) += vf_deinterlace_qsv.o +OBJS-$(CONFIG_DEINTERLACE_VAAPI_FILTER) += vf_deinterlace_vaapi.o +OBJS-$(CONFIG_DEJUDDER_FILTER) += vf_dejudder.o OBJS-$(CONFIG_DELOGO_FILTER) += vf_delogo.o +OBJS-$(CONFIG_DESHAKE_FILTER) += vf_deshake.o +OBJS-$(CONFIG_DETELECINE_FILTER) += vf_detelecine.o +OBJS-$(CONFIG_DILATION_FILTER) += vf_neighbor.o +OBJS-$(CONFIG_DISPLACE_FILTER) += vf_displace.o framesync.o OBJS-$(CONFIG_DRAWBOX_FILTER) += vf_drawbox.o +OBJS-$(CONFIG_DRAWGRAPH_FILTER) += f_drawgraph.o +OBJS-$(CONFIG_DRAWGRID_FILTER) += vf_drawbox.o OBJS-$(CONFIG_DRAWTEXT_FILTER) += vf_drawtext.o +OBJS-$(CONFIG_EDGEDETECT_FILTER) += vf_edgedetect.o +OBJS-$(CONFIG_ELBG_FILTER) += vf_elbg.o +OBJS-$(CONFIG_EQ_FILTER) += vf_eq.o +OBJS-$(CONFIG_EROSION_FILTER) += vf_neighbor.o +OBJS-$(CONFIG_EXTRACTPLANES_FILTER) += vf_extractplanes.o OBJS-$(CONFIG_FADE_FILTER) += vf_fade.o +OBJS-$(CONFIG_FFTFILT_FILTER) += vf_fftfilt.o +OBJS-$(CONFIG_FIELD_FILTER) += vf_field.o +OBJS-$(CONFIG_FIELDHINT_FILTER) += vf_fieldhint.o +OBJS-$(CONFIG_FIELDMATCH_FILTER) += vf_fieldmatch.o OBJS-$(CONFIG_FIELDORDER_FILTER) += vf_fieldorder.o +OBJS-$(CONFIG_FIND_RECT_FILTER) += vf_find_rect.o lavfutils.o OBJS-$(CONFIG_FORMAT_FILTER) += vf_format.o OBJS-$(CONFIG_FPS_FILTER) += vf_fps.o OBJS-$(CONFIG_FRAMEPACK_FILTER) += vf_framepack.o diff --cc libavfilter/allfilters.c index 6aa482d,de49d65..2bf34ef --- a/libavfilter/allfilters.c +++ b/libavfilter/allfilters.c @@@ -133,63 -65,17 +133,64 @@@ static void register_all(void REGISTER_FILTER(ANULLSINK, anullsink, asink); + REGISTER_FILTER(ALPHAEXTRACT, alphaextract, vf); + REGISTER_FILTER(ALPHAMERGE, alphamerge, vf); + REGISTER_FILTER(ASS, ass, vf); + REGISTER_FILTER(ATADENOISE, atadenoise, vf); + REGISTER_FILTER(AVGBLUR, avgblur, vf); + REGISTER_FILTER(BBOX, bbox, vf); + REGISTER_FILTER(BENCH, bench, vf); + REGISTER_FILTER(BITPLANENOISE, bitplanenoise, vf); + REGISTER_FILTER(BLACKDETECT, blackdetect, vf); REGISTER_FILTER(BLACKFRAME, blackframe, vf); + REGISTER_FILTER(BLEND, blend, vf); REGISTER_FILTER(BOXBLUR, boxblur, vf); + REGISTER_FILTER(BWDIF, bwdif, vf); + REGISTER_FILTER(CHROMAKEY, chromakey, vf); + REGISTER_FILTER(CIESCOPE, ciescope, vf); + REGISTER_FILTER(CODECVIEW, codecview, vf); + REGISTER_FILTER(COLORBALANCE, colorbalance, vf); + REGISTER_FILTER(COLORCHANNELMIXER, colorchannelmixer, vf); + REGISTER_FILTER(COLORKEY, colorkey, vf); + REGISTER_FILTER(COLORLEVELS, colorlevels, vf); + REGISTER_FILTER(COLORMATRIX, colormatrix, vf); + REGISTER_FILTER(COLORSPACE, colorspace, vf); + REGISTER_FILTER(CONVOLUTION, convolution, vf); REGISTER_FILTER(COPY, copy, vf); + REGISTER_FILTER(COREIMAGE, coreimage, vf); + REGISTER_FILTER(COVER_RECT, cover_rect, vf); REGISTER_FILTER(CROP, crop, vf); REGISTER_FILTER(CROPDETECT, cropdetect, vf); + REGISTER_FILTER(CURVES, curves, vf); + REGISTER_FILTER(DATASCOPE, datascope, vf); + REGISTER_FILTER(DCTDNOIZ, dctdnoiz, vf); + REGISTER_FILTER(DEBAND, deband, vf); + REGISTER_FILTER(DECIMATE, decimate, vf); + REGISTER_FILTER(DEFLATE, deflate, vf); + REGISTER_FILTER(DEINTERLACE_QSV,deinterlace_qsv,vf); + REGISTER_FILTER(DEINTERLACE_VAAPI, deinterlace_vaapi, vf); + REGISTER_FILTER(DEJUDDER, dejudder, vf); REGISTER_FILTER(DELOGO, delogo, vf); + REGISTER_FILTER(DESHAKE, deshake, vf); + REGISTER_FILTER(DETELECINE, detelecine, vf); + REGISTER_FILTER(DILATION, dilation, vf); + REGISTER_FILTER(DISPLACE, displace, vf); REGISTER_FILTER(DRAWBOX, drawbox, vf); + REGISTER_FILTER(DRAWGRAPH, drawgraph, vf); + REGISTER_FILTER(DRAWGRID, drawgrid, vf); REGISTER_FILTER(DRAWTEXT, drawtext, vf); + REGISTER_FILTER(EDGEDETECT, edgedetect, vf); + REGISTER_FILTER(ELBG, elbg, vf); + REGISTER_FILTER(EQ, eq, vf); + REGISTER_FILTER(EROSION, erosion, vf); + REGISTER_FILTER(EXTRACTPLANES, extractplanes, vf); REGISTER_FILTER(FADE, fade, vf); + REGISTER_FILTER(FFTFILT, fftfilt, vf); + REGISTER_FILTER(FIELD, field, vf); + REGISTER_FILTER(FIELDHINT, fieldhint, vf); + REGISTER_FILTER(FIELDMATCH, fieldmatch, vf); REGISTER_FILTER(FIELDORDER, fieldorder, vf); + REGISTER_FILTER(FIND_RECT, find_rect, vf); REGISTER_FILTER(FORMAT, format, vf); REGISTER_FILTER(FPS, fps, vf); REGISTER_FILTER(FRAMEPACK, framepack, vf); diff --cc libavfilter/version.h index 878711d,7f3ede2..e67f34b --- a/libavfilter/version.h +++ b/libavfilter/version.h @@@ -29,9 -29,9 +29,9 @@@ #include "libavutil/version.h" -#define LIBAVFILTER_VERSION_MAJOR 6 -#define LIBAVFILTER_VERSION_MINOR 6 -#define LIBAVFILTER_VERSION_MICRO 0 +#define LIBAVFILTER_VERSION_MAJOR 6 - #define LIBAVFILTER_VERSION_MINOR 75 ++#define LIBAVFILTER_VERSION_MINOR 76 +#define LIBAVFILTER_VERSION_MICRO 100 #define LIBAVFILTER_VERSION_INT AV_VERSION_INT(LIBAVFILTER_VERSION_MAJOR, \ LIBAVFILTER_VERSION_MINOR, \ diff --cc libavfilter/vf_deinterlace_qsv.c index 0000000,b26a900..e7491e1 mode 000000,100644..100644 --- a/libavfilter/vf_deinterlace_qsv.c +++ b/libavfilter/vf_deinterlace_qsv.c @@@ -1,0 -1,580 +1,575 @@@ + /* - * This file is part of Libav. ++ * This file is part of FFmpeg. + * - * Libav is free software; you can redistribute it and/or ++ * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * - * Libav is distributed in the hope that it will be useful, ++ * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software ++ * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + + /** + * @file + * deinterlace video filter - QSV + */ + + #include <mfx/mfxvideo.h> + + #include <stdio.h> + #include <string.h> + + #include "libavutil/avstring.h" + #include "libavutil/common.h" + #include "libavutil/hwcontext.h" + #include "libavutil/hwcontext_qsv.h" + #include "libavutil/internal.h" + #include "libavutil/mathematics.h" + #include "libavutil/opt.h" + #include "libavutil/pixdesc.h" + #include "libavutil/time.h" + + #include "avfilter.h" + #include "formats.h" + #include "internal.h" + #include "video.h" + + enum { + QSVDEINT_MORE_OUTPUT = 1, + QSVDEINT_MORE_INPUT, + }; + + typedef struct QSVFrame { + AVFrame *frame; + mfxFrameSurface1 surface; + int used; + + struct QSVFrame *next; + } QSVFrame; + + typedef struct QSVDeintContext { + const AVClass *class; + + AVBufferRef *hw_frames_ctx; + /* a clone of the main session, used internally for deinterlacing */ + mfxSession session; + + mfxMemId *mem_ids; + int nb_mem_ids; + + mfxFrameSurface1 **surface_ptrs; + int nb_surface_ptrs; + + mfxExtOpaqueSurfaceAlloc opaque_alloc; + mfxExtBuffer *ext_buffers[1]; + + QSVFrame *work_frames; + + int64_t last_pts; + - int got_output_frame; + int eof; + } QSVDeintContext; + + static void qsvdeint_uninit(AVFilterContext *ctx) + { + QSVDeintContext *s = ctx->priv; + QSVFrame *cur; + + if (s->session) { + MFXClose(s->session); + s->session = NULL; + } + av_buffer_unref(&s->hw_frames_ctx); + + cur = s->work_frames; + while (cur) { + s->work_frames = cur->next; + av_frame_free(&cur->frame); + av_freep(&cur); + cur = s->work_frames; + } + + av_freep(&s->mem_ids); + s->nb_mem_ids = 0; + + av_freep(&s->surface_ptrs); + s->nb_surface_ptrs = 0; + } + + static int qsvdeint_query_formats(AVFilterContext *ctx) + { + static const enum AVPixelFormat pixel_formats[] = { + AV_PIX_FMT_QSV, AV_PIX_FMT_NONE, + }; + AVFilterFormats *pix_fmts = ff_make_format_list(pixel_formats); ++ int ret; + - ff_set_common_formats(ctx, pix_fmts); ++ if ((ret = ff_set_common_formats(ctx, pix_fmts)) < 0) ++ return ret; + + return 0; + } + + static mfxStatus frame_alloc(mfxHDL pthis, mfxFrameAllocRequest *req, + mfxFrameAllocResponse *resp) + { + AVFilterContext *ctx = pthis; + QSVDeintContext *s = ctx->priv; + + if (!(req->Type & MFX_MEMTYPE_VIDEO_MEMORY_PROCESSOR_TARGET) || + !(req->Type & (MFX_MEMTYPE_FROM_VPPIN | MFX_MEMTYPE_FROM_VPPOUT)) || + !(req->Type & MFX_MEMTYPE_EXTERNAL_FRAME)) + return MFX_ERR_UNSUPPORTED; + + resp->mids = s->mem_ids; + resp->NumFrameActual = s->nb_mem_ids; + + return MFX_ERR_NONE; + } + + static mfxStatus frame_free(mfxHDL pthis, mfxFrameAllocResponse *resp) + { + return MFX_ERR_NONE; + } + + static mfxStatus frame_lock(mfxHDL pthis, mfxMemId mid, mfxFrameData *ptr) + { + return MFX_ERR_UNSUPPORTED; + } + + static mfxStatus frame_unlock(mfxHDL pthis, mfxMemId mid, mfxFrameData *ptr) + { + return MFX_ERR_UNSUPPORTED; + } + + static mfxStatus frame_get_hdl(mfxHDL pthis, mfxMemId mid, mfxHDL *hdl) + { + *hdl = mid; + return MFX_ERR_NONE; + } + + static const mfxHandleType handle_types[] = { + MFX_HANDLE_VA_DISPLAY, + MFX_HANDLE_D3D9_DEVICE_MANAGER, + MFX_HANDLE_D3D11_DEVICE, + }; + + static int init_out_session(AVFilterContext *ctx) + { + + QSVDeintContext *s = ctx->priv; + AVHWFramesContext *hw_frames_ctx = (AVHWFramesContext*)s->hw_frames_ctx->data; + AVQSVFramesContext *hw_frames_hwctx = hw_frames_ctx->hwctx; + AVQSVDeviceContext *device_hwctx = hw_frames_ctx->device_ctx->hwctx; + + int opaque = !!(hw_frames_hwctx->frame_type & MFX_MEMTYPE_OPAQUE_FRAME); + + mfxHDL handle = NULL; + mfxHandleType handle_type; + mfxVersion ver; + mfxIMPL impl; + mfxVideoParam par; + mfxStatus err; + int i; + + /* extract the properties of the "master" session given to us */ + err = MFXQueryIMPL(device_hwctx->session, &impl); + if (err == MFX_ERR_NONE) + err = MFXQueryVersion(device_hwctx->session, &ver); + if (err != MFX_ERR_NONE) { + av_log(ctx, AV_LOG_ERROR, "Error querying the session attributes\n"); + return AVERROR_UNKNOWN; + } + + for (i = 0; i < FF_ARRAY_ELEMS(handle_types); i++) { + err = MFXVideoCORE_GetHandle(device_hwctx->session, handle_types[i], &handle); + if (err == MFX_ERR_NONE) { + handle_type = handle_types[i]; + break; + } + } + + /* create a "slave" session with those same properties, to be used for + * actual deinterlacing */ + err = MFXInit(impl, &ver, &s->session); + if (err != MFX_ERR_NONE) { + av_log(ctx, AV_LOG_ERROR, "Error initializing a session for deinterlacing\n"); + return AVERROR_UNKNOWN; + } + + if (handle) { + err = MFXVideoCORE_SetHandle(s->session, handle_type, handle); + if (err != MFX_ERR_NONE) + return AVERROR_UNKNOWN; + } + + memset(&par, 0, sizeof(par)); + + if (opaque) { + s->surface_ptrs = av_mallocz_array(hw_frames_hwctx->nb_surfaces, + sizeof(*s->surface_ptrs)); + if (!s->surface_ptrs) + return AVERROR(ENOMEM); + for (i = 0; i < hw_frames_hwctx->nb_surfaces; i++) + s->surface_ptrs[i] = hw_frames_hwctx->surfaces + i; + s->nb_surface_ptrs = hw_frames_hwctx->nb_surfaces; + + s->opaque_alloc.In.Surfaces = s->surface_ptrs; + s->opaque_alloc.In.NumSurface = s->nb_surface_ptrs; + s->opaque_alloc.In.Type = hw_frames_hwctx->frame_type; + + s->opaque_alloc.Out = s->opaque_alloc.In; + + s->opaque_alloc.Header.BufferId = MFX_EXTBUFF_OPAQUE_SURFACE_ALLOCATION; + s->opaque_alloc.Header.BufferSz = sizeof(s->opaque_alloc); + + s->ext_buffers[0] = (mfxExtBuffer*)&s->opaque_alloc; + + par.ExtParam = s->ext_buffers; + par.NumExtParam = FF_ARRAY_ELEMS(s->ext_buffers); + + par.IOPattern = MFX_IOPATTERN_IN_OPAQUE_MEMORY | MFX_IOPATTERN_OUT_OPAQUE_MEMORY; + } else { + mfxFrameAllocator frame_allocator = { + .pthis = ctx, + .Alloc = frame_alloc, + .Lock = frame_lock, + .Unlock = frame_unlock, + .GetHDL = frame_get_hdl, + .Free = frame_free, + }; + + s->mem_ids = av_mallocz_array(hw_frames_hwctx->nb_surfaces, + sizeof(*s->mem_ids)); + if (!s->mem_ids) + return AVERROR(ENOMEM); + for (i = 0; i < hw_frames_hwctx->nb_surfaces; i++) + s->mem_ids[i] = hw_frames_hwctx->surfaces[i].Data.MemId; + s->nb_mem_ids = hw_frames_hwctx->nb_surfaces; + + err = MFXVideoCORE_SetFrameAllocator(s->session, &frame_allocator); + if (err != MFX_ERR_NONE) + return AVERROR_UNKNOWN; + + par.IOPattern = MFX_IOPATTERN_IN_VIDEO_MEMORY | MFX_IOPATTERN_OUT_VIDEO_MEMORY; + } + + par.AsyncDepth = 1; // TODO async + + par.vpp.In = hw_frames_hwctx->surfaces[0].Info; + + par.vpp.In.CropW = ctx->inputs[0]->w; + par.vpp.In.CropH = ctx->inputs[0]->h; + + if (ctx->inputs[0]->frame_rate.num) { + par.vpp.In.FrameRateExtN = ctx->inputs[0]->frame_rate.num; + par.vpp.In.FrameRateExtD = ctx->inputs[0]->frame_rate.den; + } else { + par.vpp.In.FrameRateExtN = ctx->inputs[0]->time_base.num; + par.vpp.In.FrameRateExtD = ctx->inputs[0]->time_base.den; + } + + par.vpp.Out = par.vpp.In; + + if (ctx->outputs[0]->frame_rate.num) { + par.vpp.Out.FrameRateExtN = ctx->outputs[0]->frame_rate.num; + par.vpp.Out.FrameRateExtD = ctx->outputs[0]->frame_rate.den; + } else { + par.vpp.Out.FrameRateExtN = ctx->outputs[0]->time_base.num; + par.vpp.Out.FrameRateExtD = ctx->outputs[0]->time_base.den; + } + + err = MFXVideoVPP_Init(s->session, &par); + if (err != MFX_ERR_NONE) { + av_log(ctx, AV_LOG_ERROR, "Error opening the VPP for deinterlacing: %d\n", err); + return AVERROR_UNKNOWN; + } + + return 0; + } + + static int qsvdeint_config_props(AVFilterLink *outlink) + { + AVFilterContext *ctx = outlink->src; + AVFilterLink *inlink = ctx->inputs[0]; + QSVDeintContext *s = ctx->priv; + int ret; + + qsvdeint_uninit(ctx); + + s->last_pts = AV_NOPTS_VALUE; + outlink->frame_rate = av_mul_q(inlink->frame_rate, + (AVRational){ 2, 1 }); + outlink->time_base = av_mul_q(inlink->time_base, + (AVRational){ 1, 2 }); + + /* check that we have a hw context */ + if (!inlink->hw_frames_ctx) { + av_log(ctx, AV_LOG_ERROR, "No hw context provided on input\n"); + return AVERROR(EINVAL); + } + + s->hw_frames_ctx = av_buffer_ref(inlink->hw_frames_ctx); + if (!s->hw_frames_ctx) + return AVERROR(ENOMEM); + + av_buffer_unref(&outlink->hw_frames_ctx); + outlink->hw_frames_ctx = av_buffer_ref(inlink->hw_frames_ctx); + if (!outlink->hw_frames_ctx) { + qsvdeint_uninit(ctx); + return AVERROR(ENOMEM); + } + + ret = init_out_session(ctx); + if (ret < 0) + return ret; + + + return 0; + } + + static void clear_unused_frames(QSVDeintContext *s) + { + QSVFrame *cur = s->work_frames; + while (cur) { + if (!cur->surface.Data.Locked) { + av_frame_free(&cur->frame); + cur->used = 0; + } + cur = cur->next; + } + } + + static int get_free_frame(QSVDeintContext *s, QSVFrame **f) + { + QSVFrame *frame, **last; + + clear_unused_frames(s); + + frame = s->work_frames; + last = &s->work_frames; + while (frame) { + if (!frame->used) { + *f = frame; + return 0; + } + + last = &frame->next; + frame = frame->next; + } + + frame = av_mallocz(sizeof(*frame)); + if (!frame) + return AVERROR(ENOMEM); + *last = frame; + *f = frame; + + return 0; + } + + static int submit_frame(AVFilterContext *ctx, AVFrame *frame, + mfxFrameSurface1 **surface) + { + QSVDeintContext *s = ctx->priv; + QSVFrame *qf; + int ret; + + ret = get_free_frame(s, &qf); + if (ret < 0) + return ret; + + qf->frame = frame; + + qf->surface = *(mfxFrameSurface1*)qf->frame->data[3]; + + qf->surface.Data.Locked = 0; + qf->surface.Info.CropW = qf->frame->width; + qf->surface.Info.CropH = qf->frame->height; + + qf->surface.Info.PicStruct = !qf->frame->interlaced_frame ? MFX_PICSTRUCT_PROGRESSIVE : + (qf->frame->top_field_first ? MFX_PICSTRUCT_FIELD_TFF : + MFX_PICSTRUCT_FIELD_BFF); + if (qf->frame->repeat_pict == 1) + qf->surface.Info.PicStruct |= MFX_PICSTRUCT_FIELD_REPEATED; + else if (qf->frame->repeat_pict == 2) + qf->surface.Info.PicStruct |= MFX_PICSTRUCT_FRAME_DOUBLING; + else if (qf->frame->repeat_pict == 4) + qf->surface.Info.PicStruct |= MFX_PICSTRUCT_FRAME_TRIPLING; + + if (ctx->inputs[0]->frame_rate.num) { + qf->surface.Info.FrameRateExtN = ctx->inputs[0]->frame_rate.num; + qf->surface.Info.FrameRateExtD = ctx->inputs[0]->frame_rate.den; + } else { + qf->surface.Info.FrameRateExtN = ctx->inputs[0]->time_base.num; + qf->surface.Info.FrameRateExtD = ctx->inputs[0]->time_base.den; + } + + qf->surface.Data.TimeStamp = av_rescale_q(qf->frame->pts, + ctx->inputs[0]->time_base, + (AVRational){1, 90000}); + + *surface = &qf->surface; + qf->used = 1; + + return 0; + } + + static int process_frame(AVFilterContext *ctx, const AVFrame *in, + mfxFrameSurface1 *surf_in) + { + QSVDeintContext *s = ctx->priv; + AVFilterLink *inlink = ctx->inputs[0]; + AVFilterLink *outlink = ctx->outputs[0]; + + AVFrame *out; + mfxFrameSurface1 *surf_out; + mfxSyncPoint sync = NULL; + mfxStatus err; + int ret, again = 0; + + out = av_frame_alloc(); + if (!out) + return AVERROR(ENOMEM); + + ret = av_hwframe_get_buffer(s->hw_frames_ctx, out, 0); + if (ret < 0) + goto fail; + + surf_out = (mfxFrameSurface1*)out->data[3]; + surf_out->Info.CropW = outlink->w; + surf_out->Info.CropH = outlink->h; + surf_out->Info.PicStruct = MFX_PICSTRUCT_PROGRESSIVE; + + do { + err = MFXVideoVPP_RunFrameVPPAsync(s->session, surf_in, surf_out, + NULL, &sync); + if (err == MFX_WRN_DEVICE_BUSY) + av_usleep(1); + } while (err == MFX_WRN_DEVICE_BUSY); + + if (err == MFX_ERR_MORE_DATA) { + av_frame_free(&out); + return QSVDEINT_MORE_INPUT; + } + + if ((err < 0 && err != MFX_ERR_MORE_SURFACE) || !sync) { + av_log(ctx, AV_LOG_ERROR, "Error during deinterlacing: %d\n", err); + ret = AVERROR_UNKNOWN; + goto fail; + } + if (err == MFX_ERR_MORE_SURFACE) + again = 1; + + do { + err = MFXVideoCORE_SyncOperation(s->session, sync, 1000); + } while (err == MFX_WRN_IN_EXECUTION); + if (err < 0) { + av_log(ctx, AV_LOG_ERROR, "Error synchronizing the operation: %d\n", err); + ret = AVERROR_UNKNOWN; + goto fail; + } + + ret = av_frame_copy_props(out, in); + if (ret < 0) + goto fail; + + out->width = outlink->w; + out->height = outlink->h; + out->interlaced_frame = 0; + + out->pts = av_rescale_q(out->pts, inlink->time_base, outlink->time_base); + if (out->pts == s->last_pts) + out->pts++; + s->last_pts = out->pts; + + ret = ff_filter_frame(outlink, out); + if (ret < 0) + return ret; + + return again ? QSVDEINT_MORE_OUTPUT : 0; + fail: + av_frame_free(&out); + return ret; + } + + static int qsvdeint_filter_frame(AVFilterLink *link, AVFrame *in) + { + AVFilterContext *ctx = link->dst; + + mfxFrameSurface1 *surf_in; + int ret; + + ret = submit_frame(ctx, in, &surf_in); + if (ret < 0) { + av_frame_free(&in); + return ret; + } + + do { + ret = process_frame(ctx, in, surf_in); + if (ret < 0) + return ret; + } while (ret == QSVDEINT_MORE_OUTPUT); + + return 0; + } + + static int qsvdeint_request_frame(AVFilterLink *outlink) + { + AVFilterContext *ctx = outlink->src; - QSVDeintContext *s = ctx->priv; - int ret = 0; + - s->got_output_frame = 0; - while (ret >= 0 && !s->got_output_frame) - ret = ff_request_frame(ctx->inputs[0]); - - return ret; ++ return ff_request_frame(ctx->inputs[0]); + } + + #define OFFSET(x) offsetof(QSVDeintContext, x) + #define FLAGS AV_OPT_FLAG_VIDEO_PARAM + static const AVOption options[] = { + { NULL }, + }; + + static const AVClass qsvdeint_class = { + .class_name = "deinterlace_qsv", + .item_name = av_default_item_name, + .option = options, + .version = LIBAVUTIL_VERSION_INT, + }; + + static const AVFilterPad qsvdeint_inputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + .filter_frame = qsvdeint_filter_frame, + }, + { NULL } + }; + + static const AVFilterPad qsvdeint_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + .config_props = qsvdeint_config_props, + .request_frame = qsvdeint_request_frame, + }, + { NULL } + }; + + AVFilter ff_vf_deinterlace_qsv = { + .name = "deinterlace_qsv", + .description = NULL_IF_CONFIG_SMALL("QuickSync video deinterlacing"), + + .uninit = qsvdeint_uninit, + .query_formats = qsvdeint_query_formats, + + .priv_size = sizeof(QSVDeintContext), + .priv_class = &qsvdeint_class, + + .inputs = qsvdeint_inputs, + .outputs = qsvdeint_outputs, + }; _______________________________________________ ffmpeg-cvslog mailing list ffmpeg-cvslog@ffmpeg.org http://ffmpeg.org/mailman/listinfo/ffmpeg-cvslog