ffmpeg | branch: master | Zhong Li <zhong...@intel.com> | Mon May 21 17:58:52 2018 +0800| [a5e1cb9e96bca091ed7103d8be72a99e7dc31582] | committer: Zhong Li
lavu/hwcontext_qsv: Add support for AV_PIX_FMT_BGRA. RGB32(AV_PIX_FMT_BGRA on intel platforms) format may be used as overlay with alpha blending. So add AV_PIX_FMT_BGRA format support. One example of alpha blending overlay: ffmpeg -hwaccel qsv -c:v h264_qsv -i BA1_Sony_D.jsv -filter_complex 'movie=lena-rgba.png,hwupload=extra_hw_frames=16[a];[0:v][a]overlay_qsv=x=10:y=10' -c:v h264_qsv -y out.mp4 Rename RGB32 to be BGRA to make it clearer as Mark Thompson's suggestion. V2: Add P010 format support else will introduce HEVC 10bit encoding regression. Thanks for LinJie's discovery. Signed-off-by: Zhong Li <zhong...@intel.com> Verified-by: Fu, Linjie <linjie...@intel.com> > http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=a5e1cb9e96bca091ed7103d8be72a99e7dc31582 --- libavfilter/qsvvpp.c | 2 +- libavutil/hwcontext_qsv.c | 44 ++++++++++++++++++++++++++++++++++---------- 2 files changed, 35 insertions(+), 11 deletions(-) diff --git a/libavfilter/qsvvpp.c b/libavfilter/qsvvpp.c index 7ee1e565b6..06efdf5089 100644 --- a/libavfilter/qsvvpp.c +++ b/libavfilter/qsvvpp.c @@ -142,7 +142,7 @@ static int pix_fmt_to_mfx_fourcc(int format) return MFX_FOURCC_NV12; case AV_PIX_FMT_YUYV422: return MFX_FOURCC_YUY2; - case AV_PIX_FMT_RGB32: + case AV_PIX_FMT_BGRA: return MFX_FOURCC_RGB4; } diff --git a/libavutil/hwcontext_qsv.c b/libavutil/hwcontext_qsv.c index a581d2a401..33e121b416 100644 --- a/libavutil/hwcontext_qsv.c +++ b/libavutil/hwcontext_qsv.c @@ -100,6 +100,7 @@ static const struct { uint32_t fourcc; } supported_pixel_formats[] = { { AV_PIX_FMT_NV12, MFX_FOURCC_NV12 }, + { AV_PIX_FMT_BGRA, MFX_FOURCC_RGB4 }, { AV_PIX_FMT_P010, MFX_FOURCC_P010 }, { AV_PIX_FMT_PAL8, MFX_FOURCC_P8 }, }; @@ -751,6 +752,37 @@ static int qsv_transfer_data_child(AVHWFramesContext *ctx, AVFrame *dst, return ret; } +static int map_frame_to_surface(const AVFrame *frame, mfxFrameSurface1 *surface) +{ + switch (frame->format) { + case AV_PIX_FMT_NV12: + case AV_PIX_FMT_P010: + surface->Data.Y = frame->data[0]; + surface->Data.UV = frame->data[1]; + break; + + case AV_PIX_FMT_YUV420P: + surface->Data.Y = frame->data[0]; + surface->Data.U = frame->data[1]; + surface->Data.V = frame->data[2]; + break; + + case AV_PIX_FMT_BGRA: + surface->Data.B = frame->data[0]; + surface->Data.G = frame->data[0] + 1; + surface->Data.R = frame->data[0] + 2; + surface->Data.A = frame->data[0] + 3; + break; + + default: + return MFX_ERR_UNSUPPORTED; + } + surface->Data.Pitch = frame->linesize[0]; + surface->Data.TimeStamp = frame->pts; + + return 0; +} + static int qsv_transfer_data_from(AVHWFramesContext *ctx, AVFrame *dst, const AVFrame *src) { @@ -796,11 +828,7 @@ static int qsv_transfer_data_from(AVHWFramesContext *ctx, AVFrame *dst, } out.Info = in->Info; - out.Data.PitchLow = dst->linesize[0]; - out.Data.Y = dst->data[0]; - out.Data.U = dst->data[1]; - out.Data.V = dst->data[2]; - out.Data.A = dst->data[3]; + map_frame_to_surface(dst, &out); do { err = MFXVideoVPP_RunFrameVPPAsync(s->session_download, in, &out, NULL, &sync); @@ -868,11 +896,7 @@ static int qsv_transfer_data_to(AVHWFramesContext *ctx, AVFrame *dst, } in.Info = out->Info; - in.Data.PitchLow = src->linesize[0]; - in.Data.Y = src->data[0]; - in.Data.U = src->data[1]; - in.Data.V = src->data[2]; - in.Data.A = src->data[3]; + map_frame_to_surface(src, &in); do { err = MFXVideoVPP_RunFrameVPPAsync(s->session_upload, &in, out, NULL, &sync); _______________________________________________ ffmpeg-cvslog mailing list ffmpeg-cvslog@ffmpeg.org http://ffmpeg.org/mailman/listinfo/ffmpeg-cvslog