[FFmpeg-devel] [PATCH 2/2] libavcodec/amfenc: Update AMF encoder options
Encoder options have been updated to the current version of the AMF. Signed-off-by: Araz Iusubov --- libavcodec/amfenc_av1.c | 108 +++-- libavcodec/amfenc_h264.c | 146 +-- libavcodec/amfenc_hevc.c | 143 +- 3 files changed, 259 insertions(+), 138 deletions(-) diff --git a/libavcodec/amfenc_av1.c b/libavcodec/amfenc_av1.c index 9f18aac648..d40c71cb33 100644 --- a/libavcodec/amfenc_av1.c +++ b/libavcodec/amfenc_av1.c @@ -25,15 +25,16 @@ #define OFFSET(x) offsetof(AmfContext, x) #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM static const AVOption options[] = { -{ "usage", "Set the encoding usage", OFFSET(usage), AV_OPT_TYPE_INT, {.i64 = AMF_VIDEO_ENCODER_AV1_USAGE_TRANSCODING }, AMF_VIDEO_ENCODER_AV1_USAGE_TRANSCODING, AMF_VIDEO_ENCODER_AV1_USAGE_LOW_LATENCY, VE, .unit = "usage" }, -{ "transcoding","", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_USAGE_TRANSCODING }, 0, 0, VE, .unit = "usage" }, -{ "lowlatency", "", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_USAGE_LOW_LATENCY }, 0, 0, VE, .unit = "usage" }, -{ "profile","Set the profile (default main)", OFFSET(profile),AV_OPT_TYPE_INT,{.i64 = AMF_VIDEO_ENCODER_AV1_PROFILE_MAIN }, AMF_VIDEO_ENCODER_AV1_PROFILE_MAIN, AMF_VIDEO_ENCODER_AV1_PROFILE_MAIN, VE, .unit = "profile" }, +{ "usage", "Set the encoding usage", OFFSET(usage), AV_OPT_TYPE_INT, {.i64 = -1 }, -1, AMF_VIDEO_ENCODER_AV1_USAGE_LOW_LATENCY, VE, .unit = "usage" }, +{ "transcoding","Generic Transcoding", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_USAGE_TRANSCODING }, 0, 0, VE, .unit = "usage" }, +{ "lowlatency", "Low latency usecase", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_USAGE_LOW_LATENCY }, 0, 0, VE, .unit = "usage" }, + +{ "profile","Set the profile", OFFSET(profile), AV_OPT_TYPE_INT,{.i64 = -1 }, -1, AMF_VIDEO_ENCODER_AV1_PROFILE_MAIN, VE, .unit = "profile" }, { "main", "", 0, AV_OPT_TYPE_CONST,{.i64 = AMF_VIDEO_ENCODER_AV1_PROFILE_MAIN }, 0, 0, VE, .unit = "profile" }, -{ "level", "Set the encoding level (default auto)", OFFSET(level), AV_OPT_TYPE_INT,{.i64 = 0 }, 0, AMF_VIDEO_ENCODER_AV1_LEVEL_7_3, VE, .unit = "level" }, -{ "auto", "", 0, AV_OPT_TYPE_CONST, {.i64 = 0 }, 0, 0, VE, .unit = "level" }, +{ "level", "Set the encoding level (default auto)", OFFSET(level), AV_OPT_TYPE_INT,{.i64 = -1 }, -1, AMF_VIDEO_ENCODER_AV1_LEVEL_7_3, VE, .unit = "level" }, +{ "auto", "", 0, AV_OPT_TYPE_CONST, {.i64 = -1 }, 0, 0, VE, .unit = "level" }, { "2.0","", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_LEVEL_2_0 }, 0, 0, VE, .unit = "level" }, { "2.1","", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_LEVEL_2_1 }, 0, 0, VE, .unit = "level" }, { "2.2","", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_LEVEL_2_2 }, 0, 0, VE, .unit = "level" }, @@ -59,11 +60,12 @@ static const AVOption options[] = { { "7.2","", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_LEVEL_7_2 }, 0, 0, VE, .unit = "level" }, { "7.3","", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_LEVEL_7_3 }, 0, 0, VE, .unit = "level" }, -{ "quality","Set the encoding quality", OFFSET(quality),AV_OPT_TYPE_INT, {.i64 = AMF_VIDEO_ENCODER_AV1_QUALITY_PRESET_SPEED }, AMF_VIDEO_ENCODER_AV1_QUALITY_PRESET_HIGH_QUALITY, AMF_VIDEO_ENCODER_AV1_QUALITY_PRESET_SPEED, VE, .unit = "quality" }, +{ "quality","Set the encoding quality preset", OFFSET(quality), AV_OPT_TYPE_INT, {.i64 = -1 }, -1, AMF_VIDEO_ENCODER_AV1_QUALITY_PRESET_SPEED, VE, .unit = "quality" }, +{ "
[FFmpeg-devel] [PATCH 1/2] libavcodec/amfenc: Update AMF release version
--- configure | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/configure b/configure index 8101b4fce6..23b140dc78 100755 --- a/configure +++ b/configure @@ -7364,7 +7364,7 @@ fi enabled amf && check_cpp_condition amf "AMF/core/Version.h" \ -"(AMF_VERSION_MAJOR << 48 | AMF_VERSION_MINOR << 32 | AMF_VERSION_RELEASE << 16 | AMF_VERSION_BUILD_NUM) >= 0x00010004001d" +"(AMF_VERSION_MAJOR << 48 | AMF_VERSION_MINOR << 32 | AMF_VERSION_RELEASE << 16 | AMF_VERSION_BUILD_NUM) >= 0x000100040021" # Funny iconv installations are not unusual, so check it after all flags have been set if enabled libc_iconv; then -- 2.43.0.windows.1 ___ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org https://ffmpeg.org/mailman/listinfo/ffmpeg-devel To unsubscribe, visit link above, or email ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".
[FFmpeg-devel] [PATCH 1/4] avcodec/amfenc: Fixes the color information in the output.
From: Michael Fabian 'Xaymar' Dirks added 10 bit support for amf hevc. before: command - ffmpeg.exe -hide_banner -y -hwaccel d3d11va -hwaccel_output_format d3d11 -i test_10bit_file.mkv -an -c:v h264_amf res.dx11_hw_h264.mkv output - Format of input frames context (p010le) is not supported by AMF. command - ffmpeg.exe -hide_banner -y -hwaccel d3d11va -hwaccel_output_format d3d11 -i test_10bit_file -an -c:v hevc_amf res.dx11_hw_hevc.mkv output - Format of input frames context (p010le) is not supported by AMF. after: command - ffmpeg.exe -hide_banner -y -hwaccel d3d11va -hwaccel_output_format d3d11 -i test_10bit_file -an -c:v h264_amf res.dx11_hw_h264.mkv output - 10-bit input video is not supported by AMF H264 encoder command - ffmpeg.exe -hide_banner -y -hwaccel d3d11va -hwaccel_output_format d3d11 -i test_10bit_file -an -c:v hevc_amf res.dx11_hw_hevc.mkv output - 10bit file v2 - lost line returned in ff_amf_pix_fmts v3 - fixes after review v4 - extract duplicated code, fix incorrect processing of 10-bit input for h264 v5 - non-functional changes after review Co-authored-by: Evgeny Pavlov --- libavcodec/amfenc.c | 37 + libavcodec/amfenc.h | 3 +++ libavcodec/amfenc_h264.c | 24 libavcodec/amfenc_hevc.c | 26 +- 4 files changed, 85 insertions(+), 5 deletions(-) diff --git a/libavcodec/amfenc.c b/libavcodec/amfenc.c index 061859f85c..0bd15dd812 100644 --- a/libavcodec/amfenc.c +++ b/libavcodec/amfenc.c @@ -60,6 +60,7 @@ const enum AVPixelFormat ff_amf_pix_fmts[] = { #if CONFIG_DXVA2 AV_PIX_FMT_DXVA2_VLD, #endif +AV_PIX_FMT_P010, AV_PIX_FMT_NONE }; @@ -72,6 +73,7 @@ static const FormatMap format_map[] = { { AV_PIX_FMT_NONE, AMF_SURFACE_UNKNOWN }, { AV_PIX_FMT_NV12, AMF_SURFACE_NV12 }, +{ AV_PIX_FMT_P010, AMF_SURFACE_P010 }, { AV_PIX_FMT_BGR0, AMF_SURFACE_BGRA }, { AV_PIX_FMT_RGB0, AMF_SURFACE_RGBA }, { AV_PIX_FMT_GRAY8, AMF_SURFACE_GRAY8 }, @@ -785,6 +787,41 @@ int ff_amf_receive_packet(AVCodecContext *avctx, AVPacket *avpkt) return ret; } +int ff_amf_get_color_profile(AVCodecContext *avctx) +{ +amf_int64 color_profile = AMF_VIDEO_CONVERTER_COLOR_PROFILE_UNKNOWN; +if (avctx->color_range == AVCOL_RANGE_JPEG) { +/// Color Space for Full (JPEG) Range +switch (avctx->colorspace) { +case AVCOL_SPC_SMPTE170M: +color_profile = AMF_VIDEO_CONVERTER_COLOR_PROFILE_FULL_601; +break; +case AVCOL_SPC_BT709: +color_profile = AMF_VIDEO_CONVERTER_COLOR_PROFILE_FULL_709; +break; +case AVCOL_SPC_BT2020_NCL: +case AVCOL_SPC_BT2020_CL: +color_profile = AMF_VIDEO_CONVERTER_COLOR_PROFILE_FULL_2020; +break; +} +} else { +/// Color Space for Limited (MPEG) range +switch (avctx->colorspace) { +case AVCOL_SPC_SMPTE170M: +color_profile = AMF_VIDEO_CONVERTER_COLOR_PROFILE_601; +break; +case AVCOL_SPC_BT709: +color_profile = AMF_VIDEO_CONVERTER_COLOR_PROFILE_709; +break; +case AVCOL_SPC_BT2020_NCL: +case AVCOL_SPC_BT2020_CL: +color_profile = AMF_VIDEO_CONVERTER_COLOR_PROFILE_2020; +break; +} +} +return color_profile; +} + const AVCodecHWConfigInternal *const ff_amfenc_hw_configs[] = { #if CONFIG_D3D11VA HW_CONFIG_ENCODER_FRAMES(D3D11, D3D11VA), diff --git a/libavcodec/amfenc.h b/libavcodec/amfenc.h index 2dbd378ef8..62736ef579 100644 --- a/libavcodec/amfenc.h +++ b/libavcodec/amfenc.h @@ -21,6 +21,7 @@ #include +#include #include #include #include @@ -170,6 +171,8 @@ int ff_amf_receive_packet(AVCodecContext *avctx, AVPacket *avpkt); */ extern const enum AVPixelFormat ff_amf_pix_fmts[]; +int ff_amf_get_color_profile(AVCodecContext *avctx); + /** * Error handling helper */ diff --git a/libavcodec/amfenc_h264.c b/libavcodec/amfenc_h264.c index bd544d12df..f785e091c9 100644 --- a/libavcodec/amfenc_h264.c +++ b/libavcodec/amfenc_h264.c @@ -199,6 +199,8 @@ static av_cold int amf_encode_init_h264(AVCodecContext *avctx) AMFRate framerate; AMFSize framesize = AMFConstructSize(avctx->width, avctx->height); int deblocking_filter = (avctx->flags & AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0; +amf_int64color_profile; +enum AVPixelFormat pix_fmt; if (avctx->framerate.num > 0 && avctx->framerate.den > 0) { framerate = AMFConstructRate(avctx->framerate.num, avctx->framerate.den); @@ -262,10 +264,24 @@ FF_ENABLE_DEPRECATION_WARNINGS AMF_ASSIGN_PROPERTY_RATIO(res, ctx->encoder, AMF_VIDEO_ENCODER_ASPECT_RATIO, ratio); } -/// Color Range (Partial/TV/MPEG or Full/PC/JPEG) -
[FFmpeg-devel] [PATCH 2/4] avcodec/amfenc: HDR metadata.
From: nyanmisaka v2: fixes for indentation --- libavcodec/amfenc.c | 83 + 1 file changed, 83 insertions(+) diff --git a/libavcodec/amfenc.c b/libavcodec/amfenc.c index 0bd15dd812..068bb53002 100644 --- a/libavcodec/amfenc.c +++ b/libavcodec/amfenc.c @@ -36,6 +36,57 @@ #include "amfenc.h" #include "encode.h" #include "internal.h" +#include "libavutil/mastering_display_metadata.h" + +static int amf_save_hdr_metadata(AVCodecContext *avctx, const AVFrame *frame, AMFHDRMetadata *hdrmeta) +{ +AVFrameSideData*sd_display; +AVFrameSideData*sd_light; +AVMasteringDisplayMetadata *display_meta; +AVContentLightMetadata *light_meta; + +sd_display = av_frame_get_side_data(frame, AV_FRAME_DATA_MASTERING_DISPLAY_METADATA); +if (sd_display) { +display_meta = (AVMasteringDisplayMetadata *)sd_display->data; +if (display_meta->has_luminance) { +const unsigned int luma_den = 1; +hdrmeta->maxMasteringLuminance = +(amf_uint32)(luma_den * av_q2d(display_meta->max_luminance)); +hdrmeta->minMasteringLuminance = +FFMIN((amf_uint32)(luma_den * av_q2d(display_meta->min_luminance)), hdrmeta->maxMasteringLuminance); +} +if (display_meta->has_primaries) { +const unsigned int chroma_den = 5; +hdrmeta->redPrimary[0] = +FFMIN((amf_uint16)(chroma_den * av_q2d(display_meta->display_primaries[0][0])), chroma_den); +hdrmeta->redPrimary[1] = +FFMIN((amf_uint16)(chroma_den * av_q2d(display_meta->display_primaries[0][1])), chroma_den); +hdrmeta->greenPrimary[0] = +FFMIN((amf_uint16)(chroma_den * av_q2d(display_meta->display_primaries[1][0])), chroma_den); +hdrmeta->greenPrimary[1] = +FFMIN((amf_uint16)(chroma_den * av_q2d(display_meta->display_primaries[1][1])), chroma_den); +hdrmeta->bluePrimary[0] = +FFMIN((amf_uint16)(chroma_den * av_q2d(display_meta->display_primaries[2][0])), chroma_den); +hdrmeta->bluePrimary[1] = +FFMIN((amf_uint16)(chroma_den * av_q2d(display_meta->display_primaries[2][1])), chroma_den); +hdrmeta->whitePoint[0] = +FFMIN((amf_uint16)(chroma_den * av_q2d(display_meta->white_point[0])), chroma_den); +hdrmeta->whitePoint[1] = +FFMIN((amf_uint16)(chroma_den * av_q2d(display_meta->white_point[1])), chroma_den); +} + +sd_light = av_frame_get_side_data(frame, AV_FRAME_DATA_CONTENT_LIGHT_LEVEL); +if (sd_light) { +light_meta = (AVContentLightMetadata *)sd_light->data; +if (light_meta) { +hdrmeta->maxContentLightLevel = (amf_uint16)light_meta->MaxCLL; +hdrmeta->maxFrameAverageLightLevel = (amf_uint16)light_meta->MaxFALL; +} +} +return 0; +} +return 1; +} #if CONFIG_D3D11VA #include @@ -683,6 +734,26 @@ int ff_amf_receive_packet(AVCodecContext *avctx, AVPacket *avpkt) frame_ref_storage_buffer->pVtbl->Release(frame_ref_storage_buffer); } +// HDR10 metadata +if (frame->color_trc == AVCOL_TRC_SMPTE2084) { +AMFBuffer * hdrmeta_buffer = NULL; +res = ctx->context->pVtbl->AllocBuffer(ctx->context, AMF_MEMORY_HOST, sizeof(AMFHDRMetadata), &hdrmeta_buffer); +if (res == AMF_OK) { +AMFHDRMetadata * hdrmeta = (AMFHDRMetadata*)hdrmeta_buffer->pVtbl->GetNative(hdrmeta_buffer); +if (amf_save_hdr_metadata(avctx, frame, hdrmeta) == 0) { +switch (avctx->codec->id) { +case AV_CODEC_ID_H264: +AMF_ASSIGN_PROPERTY_INTERFACE(res, ctx->encoder, AMF_VIDEO_ENCODER_INPUT_HDR_METADATA, hdrmeta_buffer); break; +case AV_CODEC_ID_HEVC: +AMF_ASSIGN_PROPERTY_INTERFACE(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_INPUT_HDR_METADATA, hdrmeta_buffer); break; +} +res = amf_set_property_buffer(surface, L"av_frame_hdrmeta", hdrmeta_buffer); +AMF_RETURN_IF_FALSE(avctx, res == AMF_OK, AVERROR_UNKNOWN, "SetProperty failed for \"av_frame_hdrmeta\" with error %d\n", res); +} +hdrmeta_buffer->pVtbl->Release(hdrmeta_buffer); +} +} + surface->pVtbl->SetPts(surface, frame->pts); AMF_ASSIGN_PROPERTY_INT64(res, surface, PTS_PROP, frame->pts); @@ -746,6 +817,18 @@ int ff_amf_receive_packet(AVCodecContext *avctx, AVPacket *avpkt) } res_resubmit = AMF_OK; if (ctx->delayed_surface != NULL) { // try to resubmit frame +if (ctx->delayed_surface->pVtbl->HasProperty(ctx->delayed_surface, L"av_frame_hdrmeta")) { +AMFBuffer
[FFmpeg-devel] [PATCH 3/4] avcodec/amfenc: add 10 bit encoding in av1_amf
From: Evgeny Pavlov v2: refactored after review Signed-off-by: Evgeny Pavlov Co-authored-by: Dmitrii Ovchinnikov --- libavcodec/amfenc.c | 2 ++ libavcodec/amfenc_av1.c | 22 ++ 2 files changed, 24 insertions(+) diff --git a/libavcodec/amfenc.c b/libavcodec/amfenc.c index 068bb53002..f1b76bd6aa 100644 --- a/libavcodec/amfenc.c +++ b/libavcodec/amfenc.c @@ -826,6 +826,8 @@ int ff_amf_receive_packet(AVCodecContext *avctx, AVPacket *avpkt) AMF_ASSIGN_PROPERTY_INTERFACE(res, ctx->encoder, AMF_VIDEO_ENCODER_INPUT_HDR_METADATA, hdrmeta_buffer); break; case AV_CODEC_ID_HEVC: AMF_ASSIGN_PROPERTY_INTERFACE(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_INPUT_HDR_METADATA, hdrmeta_buffer); break; +case AV_CODEC_ID_AV1: +AMF_ASSIGN_PROPERTY_INTERFACE(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_INPUT_HDR_METADATA, hdrmeta_buffer); break; } hdrmeta_buffer->pVtbl->Release(hdrmeta_buffer); } diff --git a/libavcodec/amfenc_av1.c b/libavcodec/amfenc_av1.c index 8f13aea29e..634eeea48f 100644 --- a/libavcodec/amfenc_av1.c +++ b/libavcodec/amfenc_av1.c @@ -165,6 +165,9 @@ static av_cold int amf_encode_init_av1(AVCodecContext* avctx) AMFGuid guid; AMFRate framerate; AMFSize framesize = AMFConstructSize(avctx->width, avctx->height); +amf_int64 color_depth; +amf_int64 color_profile; +enumAVPixelFormat pix_fmt; @@ -203,6 +206,25 @@ FF_ENABLE_DEPRECATION_WARNINGS } AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_PROFILE, profile); +/// Color profile +color_profile = ff_amf_get_color_profile(avctx); +AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_OUTPUT_COLOR_PROFILE, color_profile); + +/// Color Depth +pix_fmt = avctx->hw_frames_ctx ? ((AVHWFramesContext*)avctx->hw_frames_ctx->data)->sw_format +: avctx->pix_fmt; +color_depth = AMF_COLOR_BIT_DEPTH_8; +if (pix_fmt == AV_PIX_FMT_P010) { +color_depth = AMF_COLOR_BIT_DEPTH_10; +} + +AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_COLOR_BIT_DEPTH, color_depth); +AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_OUTPUT_COLOR_PROFILE, color_profile); +/// Color Transfer Characteristics (AMF matches ISO/IEC) +AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_OUTPUT_TRANSFER_CHARACTERISTIC, (amf_int64)avctx->color_trc); +/// Color Primaries (AMF matches ISO/IEC) +AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_OUTPUT_COLOR_PRIMARIES, (amf_int64)avctx->color_primaries); + profile_level = avctx->level; if (profile_level == AV_LEVEL_UNKNOWN) { profile_level = ctx->level; -- 2.43.0.windows.1 ___ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org https://ffmpeg.org/mailman/listinfo/ffmpeg-devel To unsubscribe, visit link above, or email ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".
[FFmpeg-devel] [PATCH 4/4] avcodec/amfenc: GPU driver version check
Implemented gpu driver check. 10-bit patch works incorrectly on driver version lower than 23.30. --- libavcodec/amfenc_av1.c | 1 + 1 file changed, 1 insertion(+) diff --git a/libavcodec/amfenc_av1.c b/libavcodec/amfenc_av1.c index 634eeea48f..7463251529 100644 --- a/libavcodec/amfenc_av1.c +++ b/libavcodec/amfenc_av1.c @@ -215,6 +215,7 @@ FF_ENABLE_DEPRECATION_WARNINGS : avctx->pix_fmt; color_depth = AMF_COLOR_BIT_DEPTH_8; if (pix_fmt == AV_PIX_FMT_P010) { +AMF_RETURN_IF_FALSE(ctx, ctx->version >= AMF_MAKE_FULL_VERSION(1, 4, 32, 0), AVERROR_UNKNOWN, "HEVC 10-bit encoder is not supported by AMD GPU drivers versions lower than 23.30.\n"); color_depth = AMF_COLOR_BIT_DEPTH_10; } -- 2.43.0.windows.1 ___ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org https://ffmpeg.org/mailman/listinfo/ffmpeg-devel To unsubscribe, visit link above, or email ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".
[FFmpeg-devel] [PATCH, v2] libavcodec/amfenc: Update AMF encoder options
Encoder options have been updated to the current version of the AMF. Signed-off-by: Araz Iusubov --- libavcodec/amfenc_av1.c | 108 +++-- libavcodec/amfenc_h264.c | 146 +-- libavcodec/amfenc_hevc.c | 143 +- 3 files changed, 259 insertions(+), 138 deletions(-) diff --git a/libavcodec/amfenc_av1.c b/libavcodec/amfenc_av1.c index 9f18aac648..d40c71cb33 100644 --- a/libavcodec/amfenc_av1.c +++ b/libavcodec/amfenc_av1.c @@ -25,15 +25,16 @@ #define OFFSET(x) offsetof(AmfContext, x) #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM static const AVOption options[] = { -{ "usage", "Set the encoding usage", OFFSET(usage), AV_OPT_TYPE_INT, {.i64 = AMF_VIDEO_ENCODER_AV1_USAGE_TRANSCODING }, AMF_VIDEO_ENCODER_AV1_USAGE_TRANSCODING, AMF_VIDEO_ENCODER_AV1_USAGE_LOW_LATENCY, VE, .unit = "usage" }, -{ "transcoding","", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_USAGE_TRANSCODING }, 0, 0, VE, .unit = "usage" }, -{ "lowlatency", "", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_USAGE_LOW_LATENCY }, 0, 0, VE, .unit = "usage" }, -{ "profile","Set the profile (default main)", OFFSET(profile),AV_OPT_TYPE_INT,{.i64 = AMF_VIDEO_ENCODER_AV1_PROFILE_MAIN }, AMF_VIDEO_ENCODER_AV1_PROFILE_MAIN, AMF_VIDEO_ENCODER_AV1_PROFILE_MAIN, VE, .unit = "profile" }, +{ "usage", "Set the encoding usage", OFFSET(usage), AV_OPT_TYPE_INT, {.i64 = -1 }, -1, AMF_VIDEO_ENCODER_AV1_USAGE_LOW_LATENCY, VE, .unit = "usage" }, +{ "transcoding","Generic Transcoding", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_USAGE_TRANSCODING }, 0, 0, VE, .unit = "usage" }, +{ "lowlatency", "Low latency usecase", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_USAGE_LOW_LATENCY }, 0, 0, VE, .unit = "usage" }, + +{ "profile","Set the profile", OFFSET(profile), AV_OPT_TYPE_INT,{.i64 = -1 }, -1, AMF_VIDEO_ENCODER_AV1_PROFILE_MAIN, VE, .unit = "profile" }, { "main", "", 0, AV_OPT_TYPE_CONST,{.i64 = AMF_VIDEO_ENCODER_AV1_PROFILE_MAIN }, 0, 0, VE, .unit = "profile" }, -{ "level", "Set the encoding level (default auto)", OFFSET(level), AV_OPT_TYPE_INT,{.i64 = 0 }, 0, AMF_VIDEO_ENCODER_AV1_LEVEL_7_3, VE, .unit = "level" }, -{ "auto", "", 0, AV_OPT_TYPE_CONST, {.i64 = 0 }, 0, 0, VE, .unit = "level" }, +{ "level", "Set the encoding level (default auto)", OFFSET(level), AV_OPT_TYPE_INT,{.i64 = -1 }, -1, AMF_VIDEO_ENCODER_AV1_LEVEL_7_3, VE, .unit = "level" }, +{ "auto", "", 0, AV_OPT_TYPE_CONST, {.i64 = -1 }, 0, 0, VE, .unit = "level" }, { "2.0","", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_LEVEL_2_0 }, 0, 0, VE, .unit = "level" }, { "2.1","", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_LEVEL_2_1 }, 0, 0, VE, .unit = "level" }, { "2.2","", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_LEVEL_2_2 }, 0, 0, VE, .unit = "level" }, @@ -59,11 +60,12 @@ static const AVOption options[] = { { "7.2","", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_LEVEL_7_2 }, 0, 0, VE, .unit = "level" }, { "7.3","", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_LEVEL_7_3 }, 0, 0, VE, .unit = "level" }, -{ "quality","Set the encoding quality", OFFSET(quality),AV_OPT_TYPE_INT, {.i64 = AMF_VIDEO_ENCODER_AV1_QUALITY_PRESET_SPEED }, AMF_VIDEO_ENCODER_AV1_QUALITY_PRESET_HIGH_QUALITY, AMF_VIDEO_ENCODER_AV1_QUALITY_PRESET_SPEED, VE, .unit = "quality" }, +{ "quality","Set the encoding quality preset", OFFSET(quality), AV_OPT_TYPE_INT, {.i64 = -1 }, -1, AMF_VIDEO_ENCODER_AV1_QUALITY_PRESET_SPEED, VE, .unit = "quality" }, +{ "
[FFmpeg-devel] [PATCH] avcodec/vaapi_encode: add customized surface alignment
This commit fixes issues with AMD HEVC encoding. By default AMD hevc encoder asks for the alignment 64x16, while FFMPEG VAAPI has 16x16. Adding support for customzied surface size from VASurfaceAttribAlignmentSize in VAAPI version 1.21.0. To: primeadv...@gmail.com Signed-off-by: Araz Iusubov --- libavcodec/vaapi_encode.c | 11 +++ libavutil/hwcontext.h | 7 +++ libavutil/hwcontext_vaapi.c | 5 + 3 files changed, 23 insertions(+) diff --git a/libavcodec/vaapi_encode.c b/libavcodec/vaapi_encode.c index 808b79c0c7..dc1b7465b7 100644 --- a/libavcodec/vaapi_encode.c +++ b/libavcodec/vaapi_encode.c @@ -2700,6 +2700,17 @@ static av_cold int vaapi_encode_create_recon_frames(AVCodecContext *avctx) av_log(avctx, AV_LOG_DEBUG, "Using %s as format of " "reconstructed frames.\n", av_get_pix_fmt_name(recon_format)); +if (constraints->log2_alignment) { +ctx->surface_width = FFALIGN(avctx->width, + 1 << (constraints->log2_alignment & 0xf)); +ctx->surface_height = FFALIGN(avctx->height, + 1 << ((constraints->log2_alignment & 0xf0) >> 4)); +av_log(avctx, AV_LOG_VERBOSE, "Using customized alignment size " +"[%dx%d].\n", +(1 << (constraints->log2_alignment & 0xf)), +(1 << ((constraints->log2_alignment & 0xf0) >> 4))); +} + if (ctx->surface_width < constraints->min_width || ctx->surface_height < constraints->min_height || ctx->surface_width > constraints->max_width || diff --git a/libavutil/hwcontext.h b/libavutil/hwcontext.h index df7733fe5e..5725245be4 100644 --- a/libavutil/hwcontext.h +++ b/libavutil/hwcontext.h @@ -481,6 +481,13 @@ typedef struct AVHWFramesConstraints { */ int max_width; int max_height; + +/** + * The frame width/height log2 alignment when available + * the lower 4 bits, width; another 4 bits, height + * (Zero is not applied, use the default value) + */ +int log2_alignment; } AVHWFramesConstraints; /** diff --git a/libavutil/hwcontext_vaapi.c b/libavutil/hwcontext_vaapi.c index 2c75f5f5b1..161623bc27 100644 --- a/libavutil/hwcontext_vaapi.c +++ b/libavutil/hwcontext_vaapi.c @@ -294,6 +294,11 @@ static int vaapi_frames_get_constraints(AVHWDeviceContext *hwdev, case VASurfaceAttribMaxHeight: constraints->max_height = attr_list[i].value.value.i; break; +#if VA_CHECK_VERSION(1, 21, 0) +case VASurfaceAttribAlignmentSize: +constraints->log2_alignment = attr_list[i].value.value.i; +break; +#endif } } if (pix_fmt_count == 0) { -- 2.43.0.windows.1 ___ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org https://ffmpeg.org/mailman/listinfo/ffmpeg-devel To unsubscribe, visit link above, or email ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".
[FFmpeg-devel] [PATCH] libavcodec/amfenc: Update AMF encoder options
The encoder options have been updated to the current version of the AMF. Signed-off-by: Araz Iusubov --- libavcodec/amfenc.c | 1 + libavcodec/amfenc.h | 4 + libavcodec/amfenc_av1.c | 154 +- libavcodec/amfenc_h264.c | 155 +- libavcodec/amfenc_hevc.c | 158 ++- 5 files changed, 314 insertions(+), 158 deletions(-) diff --git a/libavcodec/amfenc.c b/libavcodec/amfenc.c index 061859f85c..6b1e635a03 100644 --- a/libavcodec/amfenc.c +++ b/libavcodec/amfenc.c @@ -468,6 +468,7 @@ static int amf_copy_buffer(AVCodecContext *avctx, AVPacket *pkt, AMFBuffer *buff if (var.int64Value == AMF_VIDEO_ENCODER_AV1_OUTPUT_FRAME_TYPE_KEY) { pkt->flags = AV_PKT_FLAG_KEY; } +break; default: break; } diff --git a/libavcodec/amfenc.h b/libavcodec/amfenc.h index 2dbd378ef8..1b8cfdf4a5 100644 --- a/libavcodec/amfenc.h +++ b/libavcodec/amfenc.h @@ -59,6 +59,7 @@ typedef struct AmfContext { AMFContext *context; ///< AMF context //encoder AMFComponent *encoder; ///< AMF encoder object +AMFCaps*encoder_caps; amf_booleof; ///< flag indicating EOF happened AMF_SURFACE_FORMAT format; ///< AMF surface format @@ -85,6 +86,7 @@ typedef struct AmfContext { int usage; int profile; int level; +int latency; int preencode; int quality; int b_frame_delta_qp; @@ -112,6 +114,7 @@ typedef struct AmfContext { int max_b_frames; int qvbr_quality_level; int hw_high_motion_quality_boost; +int encoder_instance_id; // HEVC - specific options @@ -126,6 +129,7 @@ typedef struct AmfContext { // AV1 - specific options enum AMF_VIDEO_ENCODER_AV1_ALIGNMENT_MODE_ENUM align; +enum AMF_VIDEO_ENCODER_AV1_AQ_MODE_ENUMaq_mode; // Preanalysis - specific options diff --git a/libavcodec/amfenc_av1.c b/libavcodec/amfenc_av1.c index 3878f0d461..50f75b53e2 100644 --- a/libavcodec/amfenc_av1.c +++ b/libavcodec/amfenc_av1.c @@ -24,12 +24,17 @@ #define OFFSET(x) offsetof(AmfContext, x) #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM -static const AVOption options[] = { -{ "usage", "Set the encoding usage", OFFSET(usage), AV_OPT_TYPE_INT, {.i64 = AMF_VIDEO_ENCODER_AV1_USAGE_TRANSCODING }, AMF_VIDEO_ENCODER_AV1_USAGE_TRANSCODING, AMF_VIDEO_ENCODER_AV1_USAGE_LOW_LATENCY, VE, .unit = "usage" }, -{ "transcoding","", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_USAGE_TRANSCODING }, 0, 0, VE, .unit = "usage" }, -{ "lowlatency", "", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_USAGE_LOW_LATENCY }, 0, 0, VE, .unit = "usage" }, -{ "profile","Set the profile (default main)", OFFSET(profile),AV_OPT_TYPE_INT,{.i64 = AMF_VIDEO_ENCODER_AV1_PROFILE_MAIN }, AMF_VIDEO_ENCODER_AV1_PROFILE_MAIN, AMF_VIDEO_ENCODER_AV1_PROFILE_MAIN, VE, .unit = "profile" }, +static const AVOption options[] = { +{ "usage", "Set the encoding usage", OFFSET(usage), AV_OPT_TYPE_INT, {.i64 = -1 }, -1, AMF_VIDEO_ENCODER_AV1_USAGE_LOW_LATENCY_HIGH_QUALITY, VE, "usage" }, +{ "transcoding","Generic Transcoding", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_USAGE_TRANSCODING }, 0, 0, VE, "usage" }, +{ "ultralowlatency","ultra low latency trancoding", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_USAGE_ULTRA_LOW_LATENCY }, 0, 0, VE, "usage" }, +{ "lowlatency", "low latency trancoding", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_USAGE_LOW_LATENCY }, 0, 0, VE, "usage" }, +{ "webcam", "Webcam", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_USAGE_WEBCAM }, 0, 0, VE, "usage" }, +{ "high_quality", "high quality trancoding", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_USAGE_HIGH_QUALITY }, 0, 0, VE, "usage" }, +{ "lowlatency_high_quality","low latency yet high quality
[FFmpeg-devel] [PATCH] [FFmpeg-devel, v2] avcodec/vaapi_encode: add customized surface alignment
This commit fixes issues with AMD HEVC encoding. By default AMD hevc encoder asks for the alignment 64x16, while FFMPEG VAAPI has 16x16. Adding support for customized surface size from VASurfaceAttribAlignmentSize in VAAPI version 1.21.0 Signed-off-by: Araz Iusubov --- libavcodec/vaapi_encode.c | 11 +++ libavutil/hwcontext.h | 7 +++ libavutil/hwcontext_vaapi.c | 5 + 3 files changed, 23 insertions(+) diff --git a/libavcodec/vaapi_encode.c b/libavcodec/vaapi_encode.c index 940f0678a5..2a74db23b1 100644 --- a/libavcodec/vaapi_encode.c +++ b/libavcodec/vaapi_encode.c @@ -2711,6 +2711,17 @@ static av_cold int vaapi_encode_create_recon_frames(AVCodecContext *avctx) av_log(avctx, AV_LOG_DEBUG, "Using %s as format of " "reconstructed frames.\n", av_get_pix_fmt_name(recon_format)); +if (constraints->log2_alignment) { +ctx->surface_width = FFALIGN(avctx->width, + 1 << (constraints->log2_alignment & 0xf)); +ctx->surface_height = FFALIGN(avctx->height, + 1 << ((constraints->log2_alignment & 0xf0) >> 4)); +av_log(avctx, AV_LOG_VERBOSE, "Using customized alignment size " +"[%dx%d].\n", +(1 << (constraints->log2_alignment & 0xf)), +(1 << ((constraints->log2_alignment & 0xf0) >> 4))); +} + if (ctx->surface_width < constraints->min_width || ctx->surface_height < constraints->min_height || ctx->surface_width > constraints->max_width || diff --git a/libavutil/hwcontext.h b/libavutil/hwcontext.h index bac30debae..1eb56aff78 100644 --- a/libavutil/hwcontext.h +++ b/libavutil/hwcontext.h @@ -465,6 +465,13 @@ typedef struct AVHWFramesConstraints { */ int max_width; int max_height; + +/** + * The frame width/height log2 alignment when available + * the lower 4 bits, width; another 4 bits, height + * (Zero is not applied, use the default value) + */ +int log2_alignment; } AVHWFramesConstraints; /** diff --git a/libavutil/hwcontext_vaapi.c b/libavutil/hwcontext_vaapi.c index 56d03aa4cd..6cda0fd811 100644 --- a/libavutil/hwcontext_vaapi.c +++ b/libavutil/hwcontext_vaapi.c @@ -294,6 +294,11 @@ static int vaapi_frames_get_constraints(AVHWDeviceContext *hwdev, case VASurfaceAttribMaxHeight: constraints->max_height = attr_list[i].value.value.i; break; +#if VA_CHECK_VERSION(1, 21, 0) +case VASurfaceAttribAlignmentSize: +constraints->log2_alignment = attr_list[i].value.value.i; +break; +#endif } } if (pix_fmt_count == 0) { -- 2.43.0.windows.1 ___ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org https://ffmpeg.org/mailman/listinfo/ffmpeg-devel To unsubscribe, visit link above, or email ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".
[FFmpeg-devel] [PATCH] [FFmpeg-devel, v3] avcodec/vaapi_encode: add customized surface alignment
This commit fixes issues with AMD HEVC encoding. By default AMD hevc encoder asks for the alignment 64x16, while FFMPEG VAAPI has 16x16. Adding support for customized surface size from VASurfaceAttribAlignmentSize in VAAPI version 1.21.0 Signed-off-by: Araz Iusubov --- libavcodec/vaapi_encode.c | 11 +++ libavutil/hwcontext.h | 7 +++ libavutil/hwcontext_vaapi.c | 5 + 3 files changed, 23 insertions(+) diff --git a/libavcodec/vaapi_encode.c b/libavcodec/vaapi_encode.c index 940f0678a5..2a74db23b1 100644 --- a/libavcodec/vaapi_encode.c +++ b/libavcodec/vaapi_encode.c @@ -2711,6 +2711,17 @@ static av_cold int vaapi_encode_create_recon_frames(AVCodecContext *avctx) av_log(avctx, AV_LOG_DEBUG, "Using %s as format of " "reconstructed frames.\n", av_get_pix_fmt_name(recon_format)); +if (constraints->log2_alignment) { +ctx->surface_width = FFALIGN(avctx->width, + 1 << (constraints->log2_alignment & 0xf)); +ctx->surface_height = FFALIGN(avctx->height, + 1 << ((constraints->log2_alignment & 0xf0) >> 4)); +av_log(avctx, AV_LOG_VERBOSE, "Using customized alignment size " +"[%dx%d].\n", +(1 << (constraints->log2_alignment & 0xf)), +(1 << ((constraints->log2_alignment & 0xf0) >> 4))); +} + if (ctx->surface_width < constraints->min_width || ctx->surface_height < constraints->min_height || ctx->surface_width > constraints->max_width || diff --git a/libavutil/hwcontext.h b/libavutil/hwcontext.h index bac30debae..1eb56aff78 100644 --- a/libavutil/hwcontext.h +++ b/libavutil/hwcontext.h @@ -465,6 +465,13 @@ typedef struct AVHWFramesConstraints { */ int max_width; int max_height; + +/** + * The frame width/height log2 alignment when available + * the lower 4 bits, width; another 4 bits, height + * (Zero is not applied, use the default value) + */ +int log2_alignment; } AVHWFramesConstraints; /** diff --git a/libavutil/hwcontext_vaapi.c b/libavutil/hwcontext_vaapi.c index 56d03aa4cd..6cda0fd811 100644 --- a/libavutil/hwcontext_vaapi.c +++ b/libavutil/hwcontext_vaapi.c @@ -294,6 +294,11 @@ static int vaapi_frames_get_constraints(AVHWDeviceContext *hwdev, case VASurfaceAttribMaxHeight: constraints->max_height = attr_list[i].value.value.i; break; +#if VA_CHECK_VERSION(1, 21, 0) +case VASurfaceAttribAlignmentSize: +constraints->log2_alignment = attr_list[i].value.value.i; +break; +#endif } } if (pix_fmt_count == 0) { -- 2.43.0.windows.1 ___ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org https://ffmpeg.org/mailman/listinfo/ffmpeg-devel To unsubscribe, visit link above, or email ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".
[FFmpeg-devel] [PATCH] [v4] avcodec/vaapi_encode: add customized surface alignment
This commit fixes issues with AMD HEVC encoding. By default AMD hevc encoder asks for the alignment 64x16, while FFMPEG VAAPI has 16x16. Adding support for customized surface size from VASurfaceAttribAlignmentSize in VAAPI version 1.21.0 Signed-off-by: Araz Iusubov --- libavcodec/vaapi_encode.c | 11 +++ libavutil/hwcontext.h | 7 +++ libavutil/hwcontext_vaapi.c | 5 + 3 files changed, 23 insertions(+) diff --git a/libavcodec/vaapi_encode.c b/libavcodec/vaapi_encode.c index 940f0678a5..2a74db23b1 100644 --- a/libavcodec/vaapi_encode.c +++ b/libavcodec/vaapi_encode.c @@ -2711,6 +2711,17 @@ static av_cold int vaapi_encode_create_recon_frames(AVCodecContext *avctx) av_log(avctx, AV_LOG_DEBUG, "Using %s as format of " "reconstructed frames.\n", av_get_pix_fmt_name(recon_format)); +if (constraints->log2_alignment) { +ctx->surface_width = FFALIGN(avctx->width, + 1 << (constraints->log2_alignment & 0xf)); +ctx->surface_height = FFALIGN(avctx->height, + 1 << ((constraints->log2_alignment & 0xf0) >> 4)); +av_log(avctx, AV_LOG_VERBOSE, "Using customized alignment size " +"[%dx%d].\n", +(1 << (constraints->log2_alignment & 0xf)), +(1 << ((constraints->log2_alignment & 0xf0) >> 4))); +} + if (ctx->surface_width < constraints->min_width || ctx->surface_height < constraints->min_height || ctx->surface_width > constraints->max_width || diff --git a/libavutil/hwcontext.h b/libavutil/hwcontext.h index bac30debae..1eb56aff78 100644 --- a/libavutil/hwcontext.h +++ b/libavutil/hwcontext.h @@ -465,6 +465,13 @@ typedef struct AVHWFramesConstraints { */ int max_width; int max_height; + +/** + * The frame width/height log2 alignment when available + * the lower 4 bits, width; another 4 bits, height + * (Zero is not applied, use the default value) + */ +int log2_alignment; } AVHWFramesConstraints; /** diff --git a/libavutil/hwcontext_vaapi.c b/libavutil/hwcontext_vaapi.c index 56d03aa4cd..6cda0fd811 100644 --- a/libavutil/hwcontext_vaapi.c +++ b/libavutil/hwcontext_vaapi.c @@ -294,6 +294,11 @@ static int vaapi_frames_get_constraints(AVHWDeviceContext *hwdev, case VASurfaceAttribMaxHeight: constraints->max_height = attr_list[i].value.value.i; break; +#if VA_CHECK_VERSION(1, 21, 0) +case VASurfaceAttribAlignmentSize: +constraints->log2_alignment = attr_list[i].value.value.i; +break; +#endif } } if (pix_fmt_count == 0) { -- 2.43.0.windows.1 ___ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org https://ffmpeg.org/mailman/listinfo/ffmpeg-devel To unsubscribe, visit link above, or email ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".
[FFmpeg-devel] [PATCH 1/4] avcodec/amfenc: Fixes the color information in the output.
added 10 bit support for amf hevc. before: command - ffmpeg.exe -hide_banner -y -hwaccel d3d11va -hwaccel_output_format d3d11 -i test_10bit_file.mkv -an -c:v h264_amf res.dx11_hw_h264.mkv output - Format of input frames context (p010le) is not supported by AMF. command - ffmpeg.exe -hide_banner -y -hwaccel d3d11va -hwaccel_output_format d3d11 -i test_10bit_file -an -c:v hevc_amf res.dx11_hw_hevc.mkv output - Format of input frames context (p010le) is not supported by AMF. after: command - ffmpeg.exe -hide_banner -y -hwaccel d3d11va -hwaccel_output_format d3d11 -i test_10bit_file -an -c:v h264_amf res.dx11_hw_h264.mkv output - 10-bit input video is not supported by AMF H264 encoder command - ffmpeg.exe -hide_banner -y -hwaccel d3d11va -hwaccel_output_format d3d11 -i test_10bit_file -an -c:v hevc_amf res.dx11_hw_hevc.mkv output - 10bit file v2 - lost line returned in ff_amf_pix_fmts v3 - fixes after review v4 - extract duplicated code, fix incorrect processing of 10-bit input for h264 v5 - non-functional changes after review Co-authored-by: Evgeny Pavlov --- libavcodec/amfenc.c | 37 + libavcodec/amfenc.h | 3 +++ libavcodec/amfenc_h264.c | 24 libavcodec/amfenc_hevc.c | 31 ++- 4 files changed, 90 insertions(+), 5 deletions(-) diff --git a/libavcodec/amfenc.c b/libavcodec/amfenc.c index 061859f85c..0bd15dd812 100644 --- a/libavcodec/amfenc.c +++ b/libavcodec/amfenc.c @@ -60,6 +60,7 @@ const enum AVPixelFormat ff_amf_pix_fmts[] = { #if CONFIG_DXVA2 AV_PIX_FMT_DXVA2_VLD, #endif +AV_PIX_FMT_P010, AV_PIX_FMT_NONE }; @@ -72,6 +73,7 @@ static const FormatMap format_map[] = { { AV_PIX_FMT_NONE, AMF_SURFACE_UNKNOWN }, { AV_PIX_FMT_NV12, AMF_SURFACE_NV12 }, +{ AV_PIX_FMT_P010, AMF_SURFACE_P010 }, { AV_PIX_FMT_BGR0, AMF_SURFACE_BGRA }, { AV_PIX_FMT_RGB0, AMF_SURFACE_RGBA }, { AV_PIX_FMT_GRAY8, AMF_SURFACE_GRAY8 }, @@ -785,6 +787,41 @@ int ff_amf_receive_packet(AVCodecContext *avctx, AVPacket *avpkt) return ret; } +int ff_amf_get_color_profile(AVCodecContext *avctx) +{ +amf_int64 color_profile = AMF_VIDEO_CONVERTER_COLOR_PROFILE_UNKNOWN; +if (avctx->color_range == AVCOL_RANGE_JPEG) { +/// Color Space for Full (JPEG) Range +switch (avctx->colorspace) { +case AVCOL_SPC_SMPTE170M: +color_profile = AMF_VIDEO_CONVERTER_COLOR_PROFILE_FULL_601; +break; +case AVCOL_SPC_BT709: +color_profile = AMF_VIDEO_CONVERTER_COLOR_PROFILE_FULL_709; +break; +case AVCOL_SPC_BT2020_NCL: +case AVCOL_SPC_BT2020_CL: +color_profile = AMF_VIDEO_CONVERTER_COLOR_PROFILE_FULL_2020; +break; +} +} else { +/// Color Space for Limited (MPEG) range +switch (avctx->colorspace) { +case AVCOL_SPC_SMPTE170M: +color_profile = AMF_VIDEO_CONVERTER_COLOR_PROFILE_601; +break; +case AVCOL_SPC_BT709: +color_profile = AMF_VIDEO_CONVERTER_COLOR_PROFILE_709; +break; +case AVCOL_SPC_BT2020_NCL: +case AVCOL_SPC_BT2020_CL: +color_profile = AMF_VIDEO_CONVERTER_COLOR_PROFILE_2020; +break; +} +} +return color_profile; +} + const AVCodecHWConfigInternal *const ff_amfenc_hw_configs[] = { #if CONFIG_D3D11VA HW_CONFIG_ENCODER_FRAMES(D3D11, D3D11VA), diff --git a/libavcodec/amfenc.h b/libavcodec/amfenc.h index 2dbd378ef8..62736ef579 100644 --- a/libavcodec/amfenc.h +++ b/libavcodec/amfenc.h @@ -21,6 +21,7 @@ #include +#include #include #include #include @@ -170,6 +171,8 @@ int ff_amf_receive_packet(AVCodecContext *avctx, AVPacket *avpkt); */ extern const enum AVPixelFormat ff_amf_pix_fmts[]; +int ff_amf_get_color_profile(AVCodecContext *avctx); + /** * Error handling helper */ diff --git a/libavcodec/amfenc_h264.c b/libavcodec/amfenc_h264.c index 32baa8e91a..7caeca748d 100644 --- a/libavcodec/amfenc_h264.c +++ b/libavcodec/amfenc_h264.c @@ -200,6 +200,8 @@ static av_cold int amf_encode_init_h264(AVCodecContext *avctx) AMFRate framerate; AMFSize framesize = AMFConstructSize(avctx->width, avctx->height); int deblocking_filter = (avctx->flags & AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0; +amf_int64color_profile; +enum AVPixelFormat pix_fmt; if (avctx->framerate.num > 0 && avctx->framerate.den > 0) { framerate = AMFConstructRate(avctx->framerate.num, avctx->framerate.den); @@ -270,10 +272,24 @@ FF_ENABLE_DEPRECATION_WARNINGS AMF_ASSIGN_PROPERTY_RATIO(res, ctx->encoder, AMF_VIDEO_ENCODER_ASPECT_RATIO, ratio); } -/// Color Range (Partial/TV/MPEG or Full/PC/JPEG) -if (avctx->color_range == AVCO
[FFmpeg-devel] [PATCH 2/4] avcodec/amfenc: HDR metadata.
v2: fixes for indentation --- libavcodec/amfenc.c | 83 + 1 file changed, 83 insertions(+) diff --git a/libavcodec/amfenc.c b/libavcodec/amfenc.c index 0bd15dd812..068bb53002 100644 --- a/libavcodec/amfenc.c +++ b/libavcodec/amfenc.c @@ -36,6 +36,57 @@ #include "amfenc.h" #include "encode.h" #include "internal.h" +#include "libavutil/mastering_display_metadata.h" + +static int amf_save_hdr_metadata(AVCodecContext *avctx, const AVFrame *frame, AMFHDRMetadata *hdrmeta) +{ +AVFrameSideData*sd_display; +AVFrameSideData*sd_light; +AVMasteringDisplayMetadata *display_meta; +AVContentLightMetadata *light_meta; + +sd_display = av_frame_get_side_data(frame, AV_FRAME_DATA_MASTERING_DISPLAY_METADATA); +if (sd_display) { +display_meta = (AVMasteringDisplayMetadata *)sd_display->data; +if (display_meta->has_luminance) { +const unsigned int luma_den = 1; +hdrmeta->maxMasteringLuminance = +(amf_uint32)(luma_den * av_q2d(display_meta->max_luminance)); +hdrmeta->minMasteringLuminance = +FFMIN((amf_uint32)(luma_den * av_q2d(display_meta->min_luminance)), hdrmeta->maxMasteringLuminance); +} +if (display_meta->has_primaries) { +const unsigned int chroma_den = 5; +hdrmeta->redPrimary[0] = +FFMIN((amf_uint16)(chroma_den * av_q2d(display_meta->display_primaries[0][0])), chroma_den); +hdrmeta->redPrimary[1] = +FFMIN((amf_uint16)(chroma_den * av_q2d(display_meta->display_primaries[0][1])), chroma_den); +hdrmeta->greenPrimary[0] = +FFMIN((amf_uint16)(chroma_den * av_q2d(display_meta->display_primaries[1][0])), chroma_den); +hdrmeta->greenPrimary[1] = +FFMIN((amf_uint16)(chroma_den * av_q2d(display_meta->display_primaries[1][1])), chroma_den); +hdrmeta->bluePrimary[0] = +FFMIN((amf_uint16)(chroma_den * av_q2d(display_meta->display_primaries[2][0])), chroma_den); +hdrmeta->bluePrimary[1] = +FFMIN((amf_uint16)(chroma_den * av_q2d(display_meta->display_primaries[2][1])), chroma_den); +hdrmeta->whitePoint[0] = +FFMIN((amf_uint16)(chroma_den * av_q2d(display_meta->white_point[0])), chroma_den); +hdrmeta->whitePoint[1] = +FFMIN((amf_uint16)(chroma_den * av_q2d(display_meta->white_point[1])), chroma_den); +} + +sd_light = av_frame_get_side_data(frame, AV_FRAME_DATA_CONTENT_LIGHT_LEVEL); +if (sd_light) { +light_meta = (AVContentLightMetadata *)sd_light->data; +if (light_meta) { +hdrmeta->maxContentLightLevel = (amf_uint16)light_meta->MaxCLL; +hdrmeta->maxFrameAverageLightLevel = (amf_uint16)light_meta->MaxFALL; +} +} +return 0; +} +return 1; +} #if CONFIG_D3D11VA #include @@ -683,6 +734,26 @@ int ff_amf_receive_packet(AVCodecContext *avctx, AVPacket *avpkt) frame_ref_storage_buffer->pVtbl->Release(frame_ref_storage_buffer); } +// HDR10 metadata +if (frame->color_trc == AVCOL_TRC_SMPTE2084) { +AMFBuffer * hdrmeta_buffer = NULL; +res = ctx->context->pVtbl->AllocBuffer(ctx->context, AMF_MEMORY_HOST, sizeof(AMFHDRMetadata), &hdrmeta_buffer); +if (res == AMF_OK) { +AMFHDRMetadata * hdrmeta = (AMFHDRMetadata*)hdrmeta_buffer->pVtbl->GetNative(hdrmeta_buffer); +if (amf_save_hdr_metadata(avctx, frame, hdrmeta) == 0) { +switch (avctx->codec->id) { +case AV_CODEC_ID_H264: +AMF_ASSIGN_PROPERTY_INTERFACE(res, ctx->encoder, AMF_VIDEO_ENCODER_INPUT_HDR_METADATA, hdrmeta_buffer); break; +case AV_CODEC_ID_HEVC: +AMF_ASSIGN_PROPERTY_INTERFACE(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_INPUT_HDR_METADATA, hdrmeta_buffer); break; +} +res = amf_set_property_buffer(surface, L"av_frame_hdrmeta", hdrmeta_buffer); +AMF_RETURN_IF_FALSE(avctx, res == AMF_OK, AVERROR_UNKNOWN, "SetProperty failed for \"av_frame_hdrmeta\" with error %d\n", res); +} +hdrmeta_buffer->pVtbl->Release(hdrmeta_buffer); +} +} + surface->pVtbl->SetPts(surface, frame->pts); AMF_ASSIGN_PROPERTY_INT64(res, surface, PTS_PROP, frame->pts); @@ -746,6 +817,18 @@ int ff_amf_receive_packet(AVCodecContext *avctx, AVPacket *avpkt) } res_resubmit = AMF_OK; if (ctx->delayed_surface != NULL) { // try to resubmit frame +if (ctx->delayed_surface->pVtbl->HasProperty(ctx->delayed_surface, L"av_frame_hdrmeta")) { +AMFBuffer * hdrmeta_buffer =
[FFmpeg-devel] [PATCH 3/4] avcodec/amfenc: add 10 bit encoding in av1_amf
v2: refactored after review Signed-off-by: Evgeny Pavlov Co-authored-by: Dmitrii Ovchinnikov --- libavcodec/amfenc.c | 2 ++ libavcodec/amfenc_av1.c | 27 +++ 2 files changed, 29 insertions(+) diff --git a/libavcodec/amfenc.c b/libavcodec/amfenc.c index 068bb53002..f1b76bd6aa 100644 --- a/libavcodec/amfenc.c +++ b/libavcodec/amfenc.c @@ -826,6 +826,8 @@ int ff_amf_receive_packet(AVCodecContext *avctx, AVPacket *avpkt) AMF_ASSIGN_PROPERTY_INTERFACE(res, ctx->encoder, AMF_VIDEO_ENCODER_INPUT_HDR_METADATA, hdrmeta_buffer); break; case AV_CODEC_ID_HEVC: AMF_ASSIGN_PROPERTY_INTERFACE(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_INPUT_HDR_METADATA, hdrmeta_buffer); break; +case AV_CODEC_ID_AV1: +AMF_ASSIGN_PROPERTY_INTERFACE(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_INPUT_HDR_METADATA, hdrmeta_buffer); break; } hdrmeta_buffer->pVtbl->Release(hdrmeta_buffer); } diff --git a/libavcodec/amfenc_av1.c b/libavcodec/amfenc_av1.c index d40c71cb33..a8629d74b0 100644 --- a/libavcodec/amfenc_av1.c +++ b/libavcodec/amfenc_av1.c @@ -166,6 +166,9 @@ static av_cold int amf_encode_init_av1(AVCodecContext* avctx) AMFGuid guid; AMFRate framerate; AMFSize framesize = AMFConstructSize(avctx->width, avctx->height); +amf_int64 color_depth; +amf_int64 color_profile; +enumAVPixelFormat pix_fmt; @@ -211,6 +214,30 @@ FF_ENABLE_DEPRECATION_WARNINGS AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_PROFILE, profile); } +/// Color profile +color_profile = ff_amf_get_color_profile(avctx); +AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_OUTPUT_COLOR_PROFILE, color_profile); + +/// Color Depth +pix_fmt = avctx->hw_frames_ctx ? ((AVHWFramesContext*)avctx->hw_frames_ctx->data)->sw_format +: avctx->pix_fmt; +color_depth = AMF_COLOR_BIT_DEPTH_8; +if (pix_fmt == AV_PIX_FMT_P010) { +color_depth = AMF_COLOR_BIT_DEPTH_10; +} + +AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_COLOR_BIT_DEPTH, color_depth); +AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_OUTPUT_COLOR_PROFILE, color_profile); +if (color_depth == AMF_COLOR_BIT_DEPTH_8) { +/// Color Transfer Characteristics (AMF matches ISO/IEC) +AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_OUTPUT_TRANSFER_CHARACTERISTIC, AMF_COLOR_TRANSFER_CHARACTERISTIC_BT709); +/// Color Primaries (AMF matches ISO/IEC) +AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_OUTPUT_COLOR_PRIMARIES, AMF_COLOR_PRIMARIES_BT709); +} else { +AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_OUTPUT_TRANSFER_CHARACTERISTIC, AMF_COLOR_TRANSFER_CHARACTERISTIC_SMPTE2084); +AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_OUTPUT_COLOR_PRIMARIES, AMF_COLOR_PRIMARIES_BT2020); +} + profile_level = avctx->level; if (profile_level == AV_LEVEL_UNKNOWN) { profile_level = ctx->level; -- 2.45.2.windows.1 ___ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org https://ffmpeg.org/mailman/listinfo/ffmpeg-devel To unsubscribe, visit link above, or email ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".
[FFmpeg-devel] [PATCH 4/4] avcodec/amfenc: GPU driver version check
Implemented gpu driver check. 10-bit patch works incorrectly on driver version lower than 23.30. --- libavcodec/amfenc_av1.c | 1 + 1 file changed, 1 insertion(+) diff --git a/libavcodec/amfenc_av1.c b/libavcodec/amfenc_av1.c index a8629d74b0..cfa319f933 100644 --- a/libavcodec/amfenc_av1.c +++ b/libavcodec/amfenc_av1.c @@ -223,6 +223,7 @@ FF_ENABLE_DEPRECATION_WARNINGS : avctx->pix_fmt; color_depth = AMF_COLOR_BIT_DEPTH_8; if (pix_fmt == AV_PIX_FMT_P010) { +AMF_RETURN_IF_FALSE(ctx, ctx->version >= AMF_MAKE_FULL_VERSION(1, 4, 32, 0), AVERROR_UNKNOWN, "HEVC 10-bit encoder is not supported by AMD GPU drivers versions lower than 23.30.\n"); color_depth = AMF_COLOR_BIT_DEPTH_10; } -- 2.45.2.windows.1 ___ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org https://ffmpeg.org/mailman/listinfo/ffmpeg-devel To unsubscribe, visit link above, or email ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".
[FFmpeg-devel] [PATCH] avcodec/amf_enc: new encoder features support
Implemented: New usage modes for AV1 encoder. Latency mode for H264, HEVC and AV1 encoders. Adaptive Quantization (AQ) mode in AV1 encoder. --- libavcodec/amfenc.h | 2 ++ libavcodec/amfenc_av1.c | 24 +++- libavcodec/amfenc_h264.c | 5 + libavcodec/amfenc_hevc.c | 6 ++ 4 files changed, 36 insertions(+), 1 deletion(-) diff --git a/libavcodec/amfenc.h b/libavcodec/amfenc.h index cee47888a4..e53b57d369 100644 --- a/libavcodec/amfenc.h +++ b/libavcodec/amfenc.h @@ -86,6 +86,7 @@ typedef struct AmfContext { int usage; int profile; int level; +int latency; int preencode; int quality; int b_frame_delta_qp; @@ -127,6 +128,7 @@ typedef struct AmfContext { // AV1 - specific options enum AMF_VIDEO_ENCODER_AV1_ALIGNMENT_MODE_ENUM align; +enum AMF_VIDEO_ENCODER_AV1_AQ_MODE_ENUMaq_mode; // Preanalysis - specific options diff --git a/libavcodec/amfenc_av1.c b/libavcodec/amfenc_av1.c index de76161524..f23d94b692 100644 --- a/libavcodec/amfenc_av1.c +++ b/libavcodec/amfenc_av1.c @@ -25,9 +25,13 @@ #define OFFSET(x) offsetof(AmfContext, x) #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM static const AVOption options[] = { -{ "usage", "Set the encoding usage", OFFSET(usage), AV_OPT_TYPE_INT, {.i64 = AMF_VIDEO_ENCODER_AV1_USAGE_TRANSCODING }, AMF_VIDEO_ENCODER_AV1_USAGE_TRANSCODING, AMF_VIDEO_ENCODER_AV1_USAGE_LOW_LATENCY, VE, .unit = "usage" }, +{ "usage", "Set the encoding usage", OFFSET(usage), AV_OPT_TYPE_INT, {.i64 = -1 }, -1, AMF_VIDEO_ENCODER_AV1_USAGE_LOW_LATENCY_HIGH_QUALITY, VE, .unit = "usage" }, { "transcoding","", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_USAGE_TRANSCODING }, 0, 0, VE, .unit = "usage" }, +{ "ultralowlatency","ultra low latency trancoding", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_USAGE_ULTRA_LOW_LATENCY }, 0, 0, VE, .unit = "usage" }, { "lowlatency", "", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_USAGE_LOW_LATENCY }, 0, 0, VE, .unit = "usage" }, +{ "webcam", "Webcam", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_USAGE_WEBCAM }, 0, 0, VE, .unit = "usage" }, +{ "high_quality", "high quality trancoding", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_USAGE_HIGH_QUALITY }, 0, 0, VE, .unit = "usage" }, +{ "lowlatency_high_quality","low latency yet high quality trancoding", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_USAGE_LOW_LATENCY_HIGH_QUALITY }, 0, 0, VE, .unit = "usage" }, { "profile","Set the profile (default main)", OFFSET(profile),AV_OPT_TYPE_INT,{.i64 = AMF_VIDEO_ENCODER_AV1_PROFILE_MAIN }, AMF_VIDEO_ENCODER_AV1_PROFILE_MAIN, AMF_VIDEO_ENCODER_AV1_PROFILE_MAIN, VE, .unit = "profile" }, { "main", "", 0, AV_OPT_TYPE_CONST,{.i64 = AMF_VIDEO_ENCODER_AV1_PROFILE_MAIN }, 0, 0, VE, .unit = "profile" }, @@ -65,6 +69,12 @@ static const AVOption options[] = { { "quality","", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_QUALITY_PRESET_QUALITY }, 0, 0, VE, .unit = "quality" }, { "high_quality", "", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_QUALITY_PRESET_HIGH_QUALITY }, 0, 0, VE, .unit = "quality" }, +{ "latency","Set the encoding latency mode", OFFSET(latency),AV_OPT_TYPE_INT,{.i64 = -1 }, -1, AMF_VIDEO_ENCODER_AV1_ENCODING_LATENCY_MODE_LOWEST_LATENCY, VE, .unit = "latency_mode" }, +{ "none", "No encoding latency requirement.", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_ENCODING_LATENCY_MODE_NONE }, 0, 0, VE, .unit = "latency_mode" }, +{ "power_saving_real_time", "Try the best to finish encoding a frame within 1/framerate sec.", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_ENCODING_LATENCY_MODE_POWER_SAVING_REAL_TIME}, 0, 0, VE, .unit = "latency_mode" }, +{ "real_time", "Try the best to finish encoding a frame within 1/(2 x framerate) sec.", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_ENCODING_LATENCY_MODE_REAL_TIME }, 0, 0, VE, .unit = "latency_mode" }, +{ "lowest_latency", "Encoding as fast as possible. This mode causes highest power consumption", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_ENCODING_LATENCY_MODE_LOWEST_LATENCY
[FFmpeg-devel] [PATCH] avcodec/amf_enc: av1 cropping support
--- libavcodec/amfenc_av1.c | 82 - 1 file changed, 81 insertions(+), 1 deletion(-) diff --git a/libavcodec/amfenc_av1.c b/libavcodec/amfenc_av1.c index d40c71cb33..27599b9fbe 100644 --- a/libavcodec/amfenc_av1.c +++ b/libavcodec/amfenc_av1.c @@ -22,6 +22,9 @@ #include "amfenc.h" #include "codec_internal.h" +#define AMF_VIDEO_ENCODER_AV1_CAP_WIDTH_ALIGNMENT_FACTOR_LOCAL L"Av1WidthAlignmentFactor" // amf_int64; default = 1 +#define AMF_VIDEO_ENCODER_AV1_CAP_HEIGHT_ALIGNMENT_FACTOR_LOCAL L"Av1HeightAlignmentFactor" // amf_int64; default = 1 + #define OFFSET(x) offsetof(AmfContext, x) #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM static const AVOption options[] = { @@ -167,7 +170,13 @@ static av_cold int amf_encode_init_av1(AVCodecContext* avctx) AMFRate framerate; AMFSize framesize = AMFConstructSize(avctx->width, avctx->height); - +//for av1 alignment and crop +AVPacketSideData* sd_crop = NULL; +uint32_t* crop= NULL; +uint32_tcrop_right = 0; +uint32_tcrop_bottom = 0; +int width_alignment_factor = -1; +int height_alignment_factor = -1; if (avctx->framerate.num > 0 && avctx->framerate.den > 0) { framerate = AMFConstructRate(avctx->framerate.num, avctx->framerate.den); @@ -482,6 +491,77 @@ FF_ENABLE_DEPRECATION_WARNINGS buffer->pVtbl->Release(buffer); var.pInterface->pVtbl->Release(var.pInterface); +//processing crop informaiton according to alignment +if (ctx->encoder->pVtbl->GetProperty(ctx->encoder, AMF_VIDEO_ENCODER_AV1_CAP_WIDTH_ALIGNMENT_FACTOR_LOCAL, &var) != AMF_OK) +{ +// assume older driver and Navi3x +width_alignment_factor = 64; +} +else +{ +width_alignment_factor = (int)var.int64Value; +} + +if (ctx->encoder->pVtbl->GetProperty(ctx->encoder, AMF_VIDEO_ENCODER_AV1_CAP_HEIGHT_ALIGNMENT_FACTOR_LOCAL, &var) != AMF_OK) +{ +// assume older driver and Navi3x +height_alignment_factor = 16; +} +else +{ +height_alignment_factor = (int)var.int64Value; +} + +if (width_alignment_factor != -1 && height_alignment_factor != -1) +{ +if (avctx->width % width_alignment_factor != 0) +{ +crop_right = width_alignment_factor - (avctx->width & (width_alignment_factor - 1)); +} + +if (avctx->height % height_alignment_factor != 0) +{ +crop_bottom = height_alignment_factor - (avctx->height & (height_alignment_factor - 1)); +} + +//There is specia processing for crop_bottom equal to 8 in hardware +if (crop_bottom == 8) +{ +crop_bottom = 2; +} +} + +if (crop_right != 0 || crop_bottom != 0) +{ +sd_crop = av_realloc_array(avctx->coded_side_data, avctx->nb_coded_side_data + 1, sizeof(*sd_crop)); +if (!sd_crop) +{ +av_log(ctx, AV_LOG_WARNING, "Can't allocate memory for amf av1 encoder crop information\n"); +return AVERROR(ENOMEM); +} + +sd_crop->data = (uint8_t*)av_mallocz(sizeof(uint32_t) * 4); +if (!sd_crop->data) +{ +av_log(ctx, AV_LOG_WARNING, "Can't allocate memory for amf av1 encoder crop information\n"); +return AVERROR(ENOMEM); +} + +crop = (uint32_t*)sd_crop->data; + +//top, bottom, left,right +*crop++ = 0; +*crop++ = crop_bottom; +*crop++ = 0; +*crop = crop_right; + +avctx->nb_coded_side_data++; + +avctx->coded_side_data = sd_crop; +avctx->coded_side_data[avctx->nb_coded_side_data - 1].type = AV_PKT_DATA_FRAME_CROPPING; +avctx->coded_side_data[avctx->nb_coded_side_data - 1].size = sizeof(uint32_t) * 4; +} + return 0; } -- 2.45.2.windows.1 ___ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org https://ffmpeg.org/mailman/listinfo/ffmpeg-devel To unsubscribe, visit link above, or email ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".
[FFmpeg-devel] [PATCH] avcodec/amf_enc: av1 cropping support
--- libavcodec/amfenc.c | 6 libavcodec/amfenc.h | 3 ++ libavcodec/amfenc_av1.c | 64 - 3 files changed, 72 insertions(+), 1 deletion(-) diff --git a/libavcodec/amfenc.c b/libavcodec/amfenc.c index 061859f85c..93925854e0 100644 --- a/libavcodec/amfenc.c +++ b/libavcodec/amfenc.c @@ -402,6 +402,12 @@ int av_cold ff_amf_encode_close(AVCodecContext *avctx) dlclose(ctx->library); ctx->library = NULL; } + +if (ctx->crop) +{ +av_freep(ctx->crop); +} + ctx->trace = NULL; ctx->debug = NULL; ctx->factory = NULL; diff --git a/libavcodec/amfenc.h b/libavcodec/amfenc.h index 2dbd378ef8..abcf465063 100644 --- a/libavcodec/amfenc.h +++ b/libavcodec/amfenc.h @@ -81,6 +81,9 @@ typedef struct AmfContext { int log_to_dbg; +//handle crop +uint32_t*crop; + // Static options, have to be set before Init() call int usage; int profile; diff --git a/libavcodec/amfenc_av1.c b/libavcodec/amfenc_av1.c index d40c71cb33..e9bc26a770 100644 --- a/libavcodec/amfenc_av1.c +++ b/libavcodec/amfenc_av1.c @@ -19,9 +19,13 @@ #include "libavutil/internal.h" #include "libavutil/mem.h" #include "libavutil/opt.h" +#include "libavutil/intreadwrite.h" #include "amfenc.h" #include "codec_internal.h" +#define AMF_VIDEO_ENCODER_AV1_CAP_WIDTH_ALIGNMENT_FACTOR_LOCAL L"Av1WidthAlignmentFactor" // amf_int64; default = 1 +#define AMF_VIDEO_ENCODER_AV1_CAP_HEIGHT_ALIGNMENT_FACTOR_LOCAL L"Av1HeightAlignmentFactor" // amf_int64; default = 1 + #define OFFSET(x) offsetof(AmfContext, x) #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM static const AVOption options[] = { @@ -167,7 +171,12 @@ static av_cold int amf_encode_init_av1(AVCodecContext* avctx) AMFRate framerate; AMFSize framesize = AMFConstructSize(avctx->width, avctx->height); - +//for av1 alignment and crop +AVPacketSideData* sd_crop = NULL; +uint32_tcrop_right = 0; +uint32_tcrop_bottom = 0; +int width_alignment_factor = -1; +int height_alignment_factor = -1; if (avctx->framerate.num > 0 && avctx->framerate.den > 0) { framerate = AMFConstructRate(avctx->framerate.num, avctx->framerate.den); @@ -482,6 +491,59 @@ FF_ENABLE_DEPRECATION_WARNINGS buffer->pVtbl->Release(buffer); var.pInterface->pVtbl->Release(var.pInterface); +//processing crop informaiton according to alignment +if (ctx->encoder->pVtbl->GetProperty(ctx->encoder, AMF_VIDEO_ENCODER_AV1_CAP_WIDTH_ALIGNMENT_FACTOR_LOCAL, &var) != AMF_OK) +// assume older driver and Navi3x +width_alignment_factor = 64; +else +width_alignment_factor = (int)var.int64Value; + +if (ctx->encoder->pVtbl->GetProperty(ctx->encoder, AMF_VIDEO_ENCODER_AV1_CAP_HEIGHT_ALIGNMENT_FACTOR_LOCAL, &var) != AMF_OK) +// assume older driver and Navi3x +height_alignment_factor = 16; +else +height_alignment_factor = (int)var.int64Value; + +if (width_alignment_factor != -1 && height_alignment_factor != -1) { +if (avctx->width % width_alignment_factor != 0) +crop_right = width_alignment_factor - (avctx->width & (width_alignment_factor - 1)); + +if (avctx->height % height_alignment_factor != 0) +crop_bottom = height_alignment_factor - (avctx->height & (height_alignment_factor - 1)); + +//There is specia processing for crop_bottom equal to 8 in hardware +if (crop_bottom == 8) +crop_bottom = 2; +} + +if (crop_right != 0 || crop_bottom != 0) { +ctx->crop = av_mallocz(sizeof(uint32_t) * 4); +if (!ctx->crop) { +av_log(ctx, AV_LOG_WARNING, "Can't allocate memory for amf av1 encoder crop information\n"); +return AVERROR(ENOMEM); +} + +sd_crop = av_realloc_array(avctx->coded_side_data, avctx->nb_coded_side_data + 1, sizeof(*sd_crop)); +if (!sd_crop) { +av_log(ctx, AV_LOG_WARNING, "Can't allocate memory for amf av1 encoder crop information\n"); +av_freep(ctx->crop); +return AVERROR(ENOMEM); +} + +avctx->coded_side_data = sd_crop; +avctx->nb_coded_side_data++; + +//top, bottom, left,right +AV_WL32(ctx->crop + 0, 0); +AV_WL32(ctx->crop + 1, crop_bottom); +AV_WL32(ctx->crop + 2, 0); +AV_WL32(ctx->crop + 3, crop_right); + +avctx->coded_side_data[avctx->nb_coded_side_data - 1].type = AV_PKT_DATA_FRAME_CROPPING; +avctx->coded_side_data[avctx->nb_coded_side_data - 1].data = (uint8_t*)ctx->crop; +avctx->coded_side_data[avctx->nb_coded_side_data - 1].size = sizeof(uint32_t) * 4; +} + return 0; } -- 2.45.2.windows.1
[FFmpeg-devel] [PATCH 1/4, v2] avcodec/amfenc: Fixes the color information in the output.
From: Michael Fabian 'Xaymar' Dirks added 10 bit support for amf hevc. before: command - ffmpeg.exe -hide_banner -y -hwaccel d3d11va -hwaccel_output_format d3d11 -i test_10bit_file.mkv -an -c:v h264_amf res.dx11_hw_h264.mkv output - Format of input frames context (p010le) is not supported by AMF. command - ffmpeg.exe -hide_banner -y -hwaccel d3d11va -hwaccel_output_format d3d11 -i test_10bit_file -an -c:v hevc_amf res.dx11_hw_hevc.mkv output - Format of input frames context (p010le) is not supported by AMF. after: command - ffmpeg.exe -hide_banner -y -hwaccel d3d11va -hwaccel_output_format d3d11 -i test_10bit_file -an -c:v h264_amf res.dx11_hw_h264.mkv output - 10-bit input video is not supported by AMF H264 encoder command - ffmpeg.exe -hide_banner -y -hwaccel d3d11va -hwaccel_output_format d3d11 -i test_10bit_file -an -c:v hevc_amf res.dx11_hw_hevc.mkv output - 10bit file v2 - lost line returned in ff_amf_pix_fmts v3 - fixes after review v4 - extract duplicated code, fix incorrect processing of 10-bit input for h264 v5 - non-functional changes after review Co-authored-by: Evgeny Pavlov Co-authored-by: Araz Iusubov --- libavcodec/amfenc.c | 37 + libavcodec/amfenc.h | 3 +++ libavcodec/amfenc_h264.c | 24 libavcodec/amfenc_hevc.c | 31 ++- 4 files changed, 90 insertions(+), 5 deletions(-) diff --git a/libavcodec/amfenc.c b/libavcodec/amfenc.c index 061859f85c..0bd15dd812 100644 --- a/libavcodec/amfenc.c +++ b/libavcodec/amfenc.c @@ -60,6 +60,7 @@ const enum AVPixelFormat ff_amf_pix_fmts[] = { #if CONFIG_DXVA2 AV_PIX_FMT_DXVA2_VLD, #endif +AV_PIX_FMT_P010, AV_PIX_FMT_NONE }; @@ -72,6 +73,7 @@ static const FormatMap format_map[] = { { AV_PIX_FMT_NONE, AMF_SURFACE_UNKNOWN }, { AV_PIX_FMT_NV12, AMF_SURFACE_NV12 }, +{ AV_PIX_FMT_P010, AMF_SURFACE_P010 }, { AV_PIX_FMT_BGR0, AMF_SURFACE_BGRA }, { AV_PIX_FMT_RGB0, AMF_SURFACE_RGBA }, { AV_PIX_FMT_GRAY8, AMF_SURFACE_GRAY8 }, @@ -785,6 +787,41 @@ int ff_amf_receive_packet(AVCodecContext *avctx, AVPacket *avpkt) return ret; } +int ff_amf_get_color_profile(AVCodecContext *avctx) +{ +amf_int64 color_profile = AMF_VIDEO_CONVERTER_COLOR_PROFILE_UNKNOWN; +if (avctx->color_range == AVCOL_RANGE_JPEG) { +/// Color Space for Full (JPEG) Range +switch (avctx->colorspace) { +case AVCOL_SPC_SMPTE170M: +color_profile = AMF_VIDEO_CONVERTER_COLOR_PROFILE_FULL_601; +break; +case AVCOL_SPC_BT709: +color_profile = AMF_VIDEO_CONVERTER_COLOR_PROFILE_FULL_709; +break; +case AVCOL_SPC_BT2020_NCL: +case AVCOL_SPC_BT2020_CL: +color_profile = AMF_VIDEO_CONVERTER_COLOR_PROFILE_FULL_2020; +break; +} +} else { +/// Color Space for Limited (MPEG) range +switch (avctx->colorspace) { +case AVCOL_SPC_SMPTE170M: +color_profile = AMF_VIDEO_CONVERTER_COLOR_PROFILE_601; +break; +case AVCOL_SPC_BT709: +color_profile = AMF_VIDEO_CONVERTER_COLOR_PROFILE_709; +break; +case AVCOL_SPC_BT2020_NCL: +case AVCOL_SPC_BT2020_CL: +color_profile = AMF_VIDEO_CONVERTER_COLOR_PROFILE_2020; +break; +} +} +return color_profile; +} + const AVCodecHWConfigInternal *const ff_amfenc_hw_configs[] = { #if CONFIG_D3D11VA HW_CONFIG_ENCODER_FRAMES(D3D11, D3D11VA), diff --git a/libavcodec/amfenc.h b/libavcodec/amfenc.h index 2dbd378ef8..62736ef579 100644 --- a/libavcodec/amfenc.h +++ b/libavcodec/amfenc.h @@ -21,6 +21,7 @@ #include +#include #include #include #include @@ -170,6 +171,8 @@ int ff_amf_receive_packet(AVCodecContext *avctx, AVPacket *avpkt); */ extern const enum AVPixelFormat ff_amf_pix_fmts[]; +int ff_amf_get_color_profile(AVCodecContext *avctx); + /** * Error handling helper */ diff --git a/libavcodec/amfenc_h264.c b/libavcodec/amfenc_h264.c index 32baa8e91a..7caeca748d 100644 --- a/libavcodec/amfenc_h264.c +++ b/libavcodec/amfenc_h264.c @@ -200,6 +200,8 @@ static av_cold int amf_encode_init_h264(AVCodecContext *avctx) AMFRate framerate; AMFSize framesize = AMFConstructSize(avctx->width, avctx->height); int deblocking_filter = (avctx->flags & AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0; +amf_int64color_profile; +enum AVPixelFormat pix_fmt; if (avctx->framerate.num > 0 && avctx->framerate.den > 0) { framerate = AMFConstructRate(avctx->framerate.num, avctx->framerate.den); @@ -270,10 +272,24 @@ FF_ENABLE_DEPRECATION_WARNINGS AMF_ASSIGN_PROPERTY_RATIO(res, ctx->encoder, AMF_VIDE
[FFmpeg-devel] [PATCH 2/4,v2] avcodec/amfenc: HDR metadata.
From: nyanmisaka v2: fixes for indentation --- libavcodec/amfenc.c | 83 + 1 file changed, 83 insertions(+) diff --git a/libavcodec/amfenc.c b/libavcodec/amfenc.c index 0bd15dd812..068bb53002 100644 --- a/libavcodec/amfenc.c +++ b/libavcodec/amfenc.c @@ -36,6 +36,57 @@ #include "amfenc.h" #include "encode.h" #include "internal.h" +#include "libavutil/mastering_display_metadata.h" + +static int amf_save_hdr_metadata(AVCodecContext *avctx, const AVFrame *frame, AMFHDRMetadata *hdrmeta) +{ +AVFrameSideData*sd_display; +AVFrameSideData*sd_light; +AVMasteringDisplayMetadata *display_meta; +AVContentLightMetadata *light_meta; + +sd_display = av_frame_get_side_data(frame, AV_FRAME_DATA_MASTERING_DISPLAY_METADATA); +if (sd_display) { +display_meta = (AVMasteringDisplayMetadata *)sd_display->data; +if (display_meta->has_luminance) { +const unsigned int luma_den = 1; +hdrmeta->maxMasteringLuminance = +(amf_uint32)(luma_den * av_q2d(display_meta->max_luminance)); +hdrmeta->minMasteringLuminance = +FFMIN((amf_uint32)(luma_den * av_q2d(display_meta->min_luminance)), hdrmeta->maxMasteringLuminance); +} +if (display_meta->has_primaries) { +const unsigned int chroma_den = 5; +hdrmeta->redPrimary[0] = +FFMIN((amf_uint16)(chroma_den * av_q2d(display_meta->display_primaries[0][0])), chroma_den); +hdrmeta->redPrimary[1] = +FFMIN((amf_uint16)(chroma_den * av_q2d(display_meta->display_primaries[0][1])), chroma_den); +hdrmeta->greenPrimary[0] = +FFMIN((amf_uint16)(chroma_den * av_q2d(display_meta->display_primaries[1][0])), chroma_den); +hdrmeta->greenPrimary[1] = +FFMIN((amf_uint16)(chroma_den * av_q2d(display_meta->display_primaries[1][1])), chroma_den); +hdrmeta->bluePrimary[0] = +FFMIN((amf_uint16)(chroma_den * av_q2d(display_meta->display_primaries[2][0])), chroma_den); +hdrmeta->bluePrimary[1] = +FFMIN((amf_uint16)(chroma_den * av_q2d(display_meta->display_primaries[2][1])), chroma_den); +hdrmeta->whitePoint[0] = +FFMIN((amf_uint16)(chroma_den * av_q2d(display_meta->white_point[0])), chroma_den); +hdrmeta->whitePoint[1] = +FFMIN((amf_uint16)(chroma_den * av_q2d(display_meta->white_point[1])), chroma_den); +} + +sd_light = av_frame_get_side_data(frame, AV_FRAME_DATA_CONTENT_LIGHT_LEVEL); +if (sd_light) { +light_meta = (AVContentLightMetadata *)sd_light->data; +if (light_meta) { +hdrmeta->maxContentLightLevel = (amf_uint16)light_meta->MaxCLL; +hdrmeta->maxFrameAverageLightLevel = (amf_uint16)light_meta->MaxFALL; +} +} +return 0; +} +return 1; +} #if CONFIG_D3D11VA #include @@ -683,6 +734,26 @@ int ff_amf_receive_packet(AVCodecContext *avctx, AVPacket *avpkt) frame_ref_storage_buffer->pVtbl->Release(frame_ref_storage_buffer); } +// HDR10 metadata +if (frame->color_trc == AVCOL_TRC_SMPTE2084) { +AMFBuffer * hdrmeta_buffer = NULL; +res = ctx->context->pVtbl->AllocBuffer(ctx->context, AMF_MEMORY_HOST, sizeof(AMFHDRMetadata), &hdrmeta_buffer); +if (res == AMF_OK) { +AMFHDRMetadata * hdrmeta = (AMFHDRMetadata*)hdrmeta_buffer->pVtbl->GetNative(hdrmeta_buffer); +if (amf_save_hdr_metadata(avctx, frame, hdrmeta) == 0) { +switch (avctx->codec->id) { +case AV_CODEC_ID_H264: +AMF_ASSIGN_PROPERTY_INTERFACE(res, ctx->encoder, AMF_VIDEO_ENCODER_INPUT_HDR_METADATA, hdrmeta_buffer); break; +case AV_CODEC_ID_HEVC: +AMF_ASSIGN_PROPERTY_INTERFACE(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_INPUT_HDR_METADATA, hdrmeta_buffer); break; +} +res = amf_set_property_buffer(surface, L"av_frame_hdrmeta", hdrmeta_buffer); +AMF_RETURN_IF_FALSE(avctx, res == AMF_OK, AVERROR_UNKNOWN, "SetProperty failed for \"av_frame_hdrmeta\" with error %d\n", res); +} +hdrmeta_buffer->pVtbl->Release(hdrmeta_buffer); +} +} + surface->pVtbl->SetPts(surface, frame->pts); AMF_ASSIGN_PROPERTY_INT64(res, surface, PTS_PROP, frame->pts); @@ -746,6 +817,18 @@ int ff_amf_receive_packet(AVCodecContext *avctx, AVPacket *avpkt) } res_resubmit = AMF_OK; if (ctx->delayed_surface != NULL) { // try to resubmit frame +if (ctx->delayed_surface->pVtbl->HasProperty(ctx->delayed_surface, L"av_frame_hdrmeta")) { +AMFBuffer
[FFmpeg-devel] [PATCH 3/4, v2] avcodec/amfenc: add 10 bit encoding in av1_amf
From: Evgeny Pavlov v2: refactored after review Signed-off-by: Evgeny Pavlov Co-authored-by: Dmitrii Ovchinnikov Co-authored-by: Araz Iusubov --- libavcodec/amfenc.c | 2 ++ libavcodec/amfenc_av1.c | 28 +++- 2 files changed, 29 insertions(+), 1 deletion(-) diff --git a/libavcodec/amfenc.c b/libavcodec/amfenc.c index 068bb53002..49dd91c4e0 100644 --- a/libavcodec/amfenc.c +++ b/libavcodec/amfenc.c @@ -746,6 +746,8 @@ int ff_amf_receive_packet(AVCodecContext *avctx, AVPacket *avpkt) AMF_ASSIGN_PROPERTY_INTERFACE(res, ctx->encoder, AMF_VIDEO_ENCODER_INPUT_HDR_METADATA, hdrmeta_buffer); break; case AV_CODEC_ID_HEVC: AMF_ASSIGN_PROPERTY_INTERFACE(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_INPUT_HDR_METADATA, hdrmeta_buffer); break; +case AV_CODEC_ID_AV1: +AMF_ASSIGN_PROPERTY_INTERFACE(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_INPUT_HDR_METADATA, hdrmeta_buffer); break; } res = amf_set_property_buffer(surface, L"av_frame_hdrmeta", hdrmeta_buffer); AMF_RETURN_IF_FALSE(avctx, res == AMF_OK, AVERROR_UNKNOWN, "SetProperty failed for \"av_frame_hdrmeta\" with error %d\n", res); diff --git a/libavcodec/amfenc_av1.c b/libavcodec/amfenc_av1.c index 62b9af9da0..e960e5ec81 100644 --- a/libavcodec/amfenc_av1.c +++ b/libavcodec/amfenc_av1.c @@ -170,7 +170,9 @@ static av_cold int amf_encode_init_av1(AVCodecContext* avctx) AMFGuid guid; AMFRate framerate; AMFSize framesize = AMFConstructSize(avctx->width, avctx->height); - +amf_int64 color_depth; +amf_int64 color_profile; +enumAVPixelFormat pix_fmt; //for av1 alignment and crop uint32_tcrop_right = 0; @@ -220,6 +222,30 @@ FF_ENABLE_DEPRECATION_WARNINGS AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_PROFILE, profile); } +/// Color profile +color_profile = ff_amf_get_color_profile(avctx); +AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_OUTPUT_COLOR_PROFILE, color_profile); + +/// Color Depth +pix_fmt = avctx->hw_frames_ctx ? ((AVHWFramesContext*)avctx->hw_frames_ctx->data)->sw_format +: avctx->pix_fmt; +color_depth = AMF_COLOR_BIT_DEPTH_8; +if (pix_fmt == AV_PIX_FMT_P010) { +color_depth = AMF_COLOR_BIT_DEPTH_10; +} + +AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_COLOR_BIT_DEPTH, color_depth); +AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_OUTPUT_COLOR_PROFILE, color_profile); +if (color_depth == AMF_COLOR_BIT_DEPTH_8) { +/// Color Transfer Characteristics (AMF matches ISO/IEC) +AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_OUTPUT_TRANSFER_CHARACTERISTIC, AMF_COLOR_TRANSFER_CHARACTERISTIC_BT709); +/// Color Primaries (AMF matches ISO/IEC) +AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_OUTPUT_COLOR_PRIMARIES, AMF_COLOR_PRIMARIES_BT709); +} else { +AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_OUTPUT_TRANSFER_CHARACTERISTIC, AMF_COLOR_TRANSFER_CHARACTERISTIC_SMPTE2084); +AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_OUTPUT_COLOR_PRIMARIES, AMF_COLOR_PRIMARIES_BT2020); +} + profile_level = avctx->level; if (profile_level == AV_LEVEL_UNKNOWN) { profile_level = ctx->level; -- 2.45.2.windows.1 ___ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org https://ffmpeg.org/mailman/listinfo/ffmpeg-devel To unsubscribe, visit link above, or email ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".
[FFmpeg-devel] [PATCH 4/4, v2] avcodec/amfenc: GPU driver version check
Implemented gpu driver check. 10-bit patch works incorrectly on driver version lower than 23.30. --- libavcodec/amfenc.c | 4 1 file changed, 4 insertions(+) diff --git a/libavcodec/amfenc.c b/libavcodec/amfenc.c index 49dd91c4e0..41eaef9758 100644 --- a/libavcodec/amfenc.c +++ b/libavcodec/amfenc.c @@ -415,6 +415,10 @@ static int amf_init_encoder(AVCodecContext *avctx) else pix_fmt = avctx->pix_fmt; +if (pix_fmt == AV_PIX_FMT_P010) { +AMF_RETURN_IF_FALSE(ctx, ctx->version >= AMF_MAKE_FULL_VERSION(1, 4, 32, 0), AVERROR_UNKNOWN, "10-bit encoder is not supported by AMD GPU drivers versions lower than 23.30.\n"); +} + ctx->format = amf_av_to_amf_format(pix_fmt); AMF_RETURN_IF_FALSE(ctx, ctx->format != AMF_SURFACE_UNKNOWN, AVERROR(EINVAL), "Format %s is not supported\n", av_get_pix_fmt_name(pix_fmt)); -- 2.45.2.windows.1 ___ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org https://ffmpeg.org/mailman/listinfo/ffmpeg-devel To unsubscribe, visit link above, or email ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".
[FFmpeg-devel] [PATCH, v2] avcodec/amfenc: increase precision of Sleep() on Windows
From: Evgeny Pavlov This commit increase precision of Sleep() function on Windows. This fix reduces the sleep time on Windows to improve AMF encoding performance on low resolution input videos. Fix for issue #10622 We evaluated CreateWaitableTimerExW with CREATE_WAITABLE_TIMER_HIGH_RESOLUTION flag. In fact, this function has the same precision level as the Sleep() function. Usually changing the time resolution will only affect the current process and will not impact other processes, thus it will not cause a global effect on the current system. Here is an info from documentation on timeBeginPeriod https://learn.microsoft.com/en-us/windows/win32/api/timeapi/nf-timeapi-timebeginperiod "Prior to Windows 10, version 2004, this function affects a global Windows setting. For all processes Windows uses the lowest value (that is, highest resolution) requested by any process. Starting with Windows 10, version 2004, this function no longer affects global timer resolution. For processes which call this function, Windows uses the lowest value (that is, highest resolution) requested by any process. For processes which have not called this function, Windows does not guarantee a higher resolution than the default system resolution." We provide the following measurement to show performance improvements with this patch. 1. Performance tests show that this high precision sleep will improve performance, especially for low resolution sequences, it can get about 20% improvement. Frames Per Second (FPS) being encoded by the hardware encoder (Navi 31 RX7900XT ): Source Type: H.264 , Output Type: H.264 (Sorry for bad formatting) No. | Sequence Resolution | No. of Frames|FPS Before patch| FPS after patch | Difference| Improvement % |---|--||---|---|-- 1 | 480x360 | 8290 |2030| 2365| 335 | 16.5% 2 | 720x576 | 8290 |1440| 1790| 350 | 24.3% 3 | 1280x720| 8290 |1120| 1190| 70| 6.3% 4 | 1920x1080 | 8290 |692 | 714 | 22| 3.2% 5 | 3840x2160 | 8290 |200 | 200 | 0 | 0.0% The sample ffmpeg command line: $ ffmpeg.exe -y -hwaccel d3d11va -hwaccel_output_format d3d11 -i input.mp4 -c:v h264_amf out.mp4 where input.mp4 should be changed to corresponding resolution input H.264 format bitstream. 2. The power tests show an increase in power is within limit scope. The purpose of the power test is to examine the increase in CPU power consumption due to the improvement in CPU time resolution after using this patch. We were testing a product from AMD called Phoenix, which we refer to as an APU. It combines a general-purpose AMD CPU and a 3D integrated graphics processing unit (IGPU) on a single die. Only the APU has a DAP connector to the board's power rails. We got the power test data shown below: || 480x360 | 720x576 | 1280x720 | 1920x1080 | 3840x2160 | average ||---||--|---|---| |CPU power change | 1.93%| 2.43% | -1.69% | 3.49% | 2.92% | 1.82% |APU power total change | 0.86%| 1.34% | -0.62% | 1.54% | -0.58%| 0.51 When using a high precision clock by applying the patch, the average power consumption for CPU increases 1.82%, and the APU total increases 0.51%. We can see the power increase in power not very significant. Signed-off-by: Evgeny Pavlov --- libavcodec/amfenc.c | 31 +++ libavcodec/amfenc.h | 3 +++ 2 files changed, 34 insertions(+) diff --git a/libavcodec/amfenc.c b/libavcodec/amfenc.c index 061859f85c..55e24856e8 100644 --- a/libavcodec/amfenc.c +++ b/libavcodec/amfenc.c @@ -42,7 +42,12 @@ #endif #ifdef _WIN32 +#include #include "compat/w32dlfcn.h" + +typedef MMRESULT (*timeapi_fun)(UINT uPeriod); +#define WINMM_DLL "winmm.dll" + #else #include #endif @@ -113,6 +118,9 @@ static int amf_load_library(AVCodecContext *avctx) AMFInit_Fn init_fun; AMFQueryVersion_Fn version_fun; AMF_RESULT res; +#ifdef _WIN32 +timeapi_fun time_begin_fun; +#endif ctx->delayed_frame = av_frame_alloc(); if (!ctx->delayed_frame) { @@ -145,6 +153,16 @@ static int amf_load_library(AVCodecContext *avctx) AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_UNKNOWN, "GetTrace() failed with error %d\n", res); res = ctx->factory->pVtbl->GetDebug(ctx->factory, &ctx->debug); AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_UNKNOWN, "GetDebug() failed with error %d\n", res); + +#ifdef _WIN32 +// Increase precision of Sleep() function on Windows platform +ctx->winmm_
[FFmpeg-devel] [PATCH, v3] avcodec/amfenc: increase precision of Sleep() on Windows
From: Cameron Gutman This commit increase precision of Sleep() function on Windows. This fix reduces the sleep time on Windows to improve AMF encoding performance on low resolution input videos. Fix for issue #10622 Co-authored-by: Araz Iusubov --- libavcodec/amfenc.c | 21 ++--- libavcodec/amfenc.h | 3 +++ libavcodec/amfenc_av1.c | 5 + libavcodec/amfenc_h264.c | 5 + libavcodec/amfenc_hevc.c | 5 + 5 files changed, 36 insertions(+), 3 deletions(-) diff --git a/libavcodec/amfenc.c b/libavcodec/amfenc.c index 225fb9df27..03d75031f5 100644 --- a/libavcodec/amfenc.c +++ b/libavcodec/amfenc.c @@ -426,6 +426,8 @@ static int amf_init_encoder(AVCodecContext *avctx) res = ctx->factory->pVtbl->CreateComponent(ctx->factory, ctx->context, codec_id, &ctx->encoder); AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_ENCODER_NOT_FOUND, "CreateComponent(%ls) failed with error %d\n", codec_id, res); +ctx->submitted_frame = 0; + return 0; } @@ -541,7 +543,6 @@ static int amf_copy_buffer(AVCodecContext *avctx, AVPacket *pkt, AMFBuffer *buff if ((ctx->max_b_frames > 0 || ((ctx->pa_adaptive_mini_gop == 1) ? true : false)) && ctx->dts_delay == 0) { int64_t timestamp_last = AV_NOPTS_VALUE; size_t can_read = av_fifo_can_read(ctx->timestamp_list); - AMF_RETURN_IF_FALSE(ctx, can_read > 0, AVERROR_UNKNOWN, "timestamp_list is empty while max_b_frames = %d\n", avctx->max_b_frames); av_fifo_peek(ctx->timestamp_list, ×tamp_last, 1, can_read - 1); @@ -826,6 +827,13 @@ int ff_amf_receive_packet(AVCodecContext *avctx, AVPacket *avpkt) av_frame_unref(frame); ret = av_fifo_write(ctx->timestamp_list, &pts, 1); + +if (ctx->submitted_frame == 0) +{ +ctx->use_b_frame = (ctx->max_b_frames > 0 || ((ctx->pa_adaptive_mini_gop == 1) ? true : false)); +} +ctx->submitted_frame++; + if (ret < 0) return ret; } @@ -835,7 +843,7 @@ int ff_amf_receive_packet(AVCodecContext *avctx, AVPacket *avpkt) do { block_and_wait = 0; // poll data -if (!avpkt->data && !avpkt->buf) { +if (!avpkt->data && !avpkt->buf && (ctx->use_b_frame ? (ctx->submitted_frame >= 2) : true) ) { res_query = ctx->encoder->pVtbl->QueryOutput(ctx->encoder, &data); if (data) { // copy data to packet @@ -845,6 +853,7 @@ int ff_amf_receive_packet(AVCodecContext *avctx, AVPacket *avpkt) data->pVtbl->QueryInterface(data, &guid, (void**)&buffer); // query for buffer interface ret = amf_copy_buffer(avctx, avpkt, buffer); +ctx->submitted_frame++; buffer->pVtbl->Release(buffer); if (data->pVtbl->HasProperty(data, L"av_frame_ref")) { @@ -884,6 +893,7 @@ int ff_amf_receive_packet(AVCodecContext *avctx, AVPacket *avpkt) av_frame_unref(ctx->delayed_frame); AMF_RETURN_IF_FALSE(ctx, res_resubmit == AMF_OK, AVERROR_UNKNOWN, "Repeated SubmitInput() failed with error %d\n", res_resubmit); +ctx->submitted_frame++; ret = av_fifo_write(ctx->timestamp_list, &pts, 1); if (ret < 0) return ret; @@ -902,7 +912,12 @@ int ff_amf_receive_packet(AVCodecContext *avctx, AVPacket *avpkt) if (query_output_data_flag == 0) { if (res_resubmit == AMF_INPUT_FULL || ctx->delayed_drain || (ctx->eof && res_query != AMF_EOF) || (ctx->hwsurfaces_in_queue >= ctx->hwsurfaces_in_queue_max)) { block_and_wait = 1; -av_usleep(1000); + +// Only sleep if the driver doesn't support waiting in QueryOutput() +// or if we already have output data so we will skip calling it. +if (!ctx->query_timeout_supported || avpkt->data || avpkt->buf) { +av_usleep(1000); +} } } } while (block_and_wait); diff --git a/libavcodec/amfenc.h b/libavcodec/amfenc.h index 0f2abcbd82..d83ee5bf1c 100644 --- a/libavcodec/amfenc.h +++ b/libavcodec/amfenc.h @@ -68,6 +68,7 @@ typedef struct AmfContext { int hwsurfaces_in_queue; int hwsurfaces_in_queue_max; +int query_timeout_supported; // helpers to handle async calls int delayed_drain; @@ -77,6 +78,8 @@ typedef struct AmfContext { // shift dts back by max_b_frames in timing AVFifo *timestamp_list; int64_t dts_delay;
[FFmpeg-devel] [PATCH] avcodec/amfenc: Color information update
Processing of transfer characteristic SMPTE2084 and 8-bit depth added on AMF side and will appear in one of upcoming releases. --- libavcodec/amfenc_av1.c | 13 - libavcodec/amfenc_hevc.c | 13 - 2 files changed, 8 insertions(+), 18 deletions(-) diff --git a/libavcodec/amfenc_av1.c b/libavcodec/amfenc_av1.c index b40d54f70c..fc28287a48 100644 --- a/libavcodec/amfenc_av1.c +++ b/libavcodec/amfenc_av1.c @@ -252,15 +252,10 @@ FF_ENABLE_DEPRECATION_WARNINGS AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_COLOR_BIT_DEPTH, color_depth); AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_OUTPUT_COLOR_PROFILE, color_profile); -if (color_depth == AMF_COLOR_BIT_DEPTH_8) { -/// Color Transfer Characteristics (AMF matches ISO/IEC) -AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_OUTPUT_TRANSFER_CHARACTERISTIC, AMF_COLOR_TRANSFER_CHARACTERISTIC_BT709); -/// Color Primaries (AMF matches ISO/IEC) -AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_OUTPUT_COLOR_PRIMARIES, AMF_COLOR_PRIMARIES_BT709); -} else { -AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_OUTPUT_TRANSFER_CHARACTERISTIC, AMF_COLOR_TRANSFER_CHARACTERISTIC_SMPTE2084); -AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_OUTPUT_COLOR_PRIMARIES, AMF_COLOR_PRIMARIES_BT2020); -} +/// Color Transfer Characteristics (AMF matches ISO/IEC) +AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_OUTPUT_TRANSFER_CHARACTERISTIC, avctx->color_trc); +/// Color Primaries (AMF matches ISO/IEC) +AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_OUTPUT_COLOR_PRIMARIES, avctx->color_primaries); profile_level = avctx->level; if (profile_level == AV_LEVEL_UNKNOWN) { diff --git a/libavcodec/amfenc_hevc.c b/libavcodec/amfenc_hevc.c index f9f6f8adb3..8ceaee0851 100644 --- a/libavcodec/amfenc_hevc.c +++ b/libavcodec/amfenc_hevc.c @@ -254,15 +254,10 @@ FF_ENABLE_DEPRECATION_WARNINGS color_depth = AMF_COLOR_BIT_DEPTH_10; } AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_COLOR_BIT_DEPTH, color_depth); -if (color_depth == AMF_COLOR_BIT_DEPTH_8) { -/// Color Transfer Characteristics (AMF matches ISO/IEC) -AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_OUTPUT_TRANSFER_CHARACTERISTIC, AMF_COLOR_TRANSFER_CHARACTERISTIC_BT709); -/// Color Primaries (AMF matches ISO/IEC) -AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_OUTPUT_COLOR_PRIMARIES, AMF_COLOR_PRIMARIES_BT709); -} else { -AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_OUTPUT_TRANSFER_CHARACTERISTIC, AMF_COLOR_TRANSFER_CHARACTERISTIC_SMPTE2084); -AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_OUTPUT_COLOR_PRIMARIES, AMF_COLOR_PRIMARIES_BT2020); -} +/// Color Transfer Characteristics (AMF matches ISO/IEC) +AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_OUTPUT_TRANSFER_CHARACTERISTIC, avctx->color_trc); +/// Color Primaries (AMF matches ISO/IEC) +AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_OUTPUT_COLOR_PRIMARIES, avctx->color_primaries); // Picture control properties AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_NUM_GOPS_PER_IDR, ctx->gops_per_idr); -- 2.45.2.windows.1 ___ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org https://ffmpeg.org/mailman/listinfo/ffmpeg-devel To unsubscribe, visit link above, or email ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".
[FFmpeg-devel] [PATCH] avcodec/amfenc: GOP size check
АFix for the error with an invalid GOP size parameter. --- libavcodec/amfenc_av1.c | 4 +++- libavcodec/amfenc_h264.c | 4 +++- libavcodec/amfenc_hevc.c | 4 +++- 3 files changed, 9 insertions(+), 3 deletions(-) diff --git a/libavcodec/amfenc_av1.c b/libavcodec/amfenc_av1.c index b40d54f70c..8920389180 100644 --- a/libavcodec/amfenc_av1.c +++ b/libavcodec/amfenc_av1.c @@ -281,7 +281,9 @@ FF_ENABLE_DEPRECATION_WARNINGS } // Picture control properties -AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_GOP_SIZE, avctx->gop_size); +if (avctx->gop_size != -1) { +AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_GOP_SIZE, avctx->gop_size); +} // Setup header insertion mode only if this option was defined explicitly if (ctx->header_insertion_mode != -1) { diff --git a/libavcodec/amfenc_h264.c b/libavcodec/amfenc_h264.c index 959be9eab6..cc473c5056 100644 --- a/libavcodec/amfenc_h264.c +++ b/libavcodec/amfenc_h264.c @@ -512,7 +512,9 @@ FF_ENABLE_DEPRECATION_WARNINGS AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_DE_BLOCKING_FILTER, !!deblocking_filter); // Keyframe Interval -AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_IDR_PERIOD, avctx->gop_size); +if (avctx->gop_size != -1) { +AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_IDR_PERIOD, avctx->gop_size); +} // Header Insertion Spacing if (ctx->header_spacing >= 0) diff --git a/libavcodec/amfenc_hevc.c b/libavcodec/amfenc_hevc.c index f9f6f8adb3..c3b60ff6f4 100644 --- a/libavcodec/amfenc_hevc.c +++ b/libavcodec/amfenc_hevc.c @@ -266,7 +266,9 @@ FF_ENABLE_DEPRECATION_WARNINGS // Picture control properties AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_NUM_GOPS_PER_IDR, ctx->gops_per_idr); -AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_GOP_SIZE, avctx->gop_size); +if (avctx->gop_size != -1) { +AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_GOP_SIZE, avctx->gop_size); +} if (avctx->slices > 1) { AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_SLICES_PER_FRAME, avctx->slices); } -- 2.45.2.windows.1 ___ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org https://ffmpeg.org/mailman/listinfo/ffmpeg-devel To unsubscribe, visit link above, or email ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".
[FFmpeg-devel] [PATCH] avcodec/amfenc: DX12 Reference-only feature support
The Reference-Only feature in DirectX 12 is a memory optimization technique designed for video decoding scenarios. This feature requires that reference resources must be allocated with the D3D12_RESOURCE_FLAG_VIDEO_DECODE_REFERENCE_ONLY resource flag. Reference textures must also be separated from output textures. This feature is not supported in the current version of ffmpeg. Since AMD GPU uses this feature in Direct 12 decoder, ffmpeg does not support AMD GPU Direct 12 decoding. To properly support the Reference-Only feature, two parallel resource pools must be configured and managed: General Resource Pool: Contains resources used for output decoded frames. Defined in AVHWFramesContext and manages the final decoded textures. Reference-Only Resource Pool: Intended for storing reference frame resources. Resources created with the D3D12_RESOURCE_FLAG_VIDEO_DECODE_REFERENCE_ONLY flag are allocated to AVBufferPool. --- libavcodec/d3d12va_decode.c | 58 --- libavutil/hwcontext_d3d12va.c | 65 --- 2 files changed, 115 insertions(+), 8 deletions(-) diff --git a/libavcodec/d3d12va_decode.c b/libavcodec/d3d12va_decode.c index 3b8978635e..8916f94d10 100644 --- a/libavcodec/d3d12va_decode.c +++ b/libavcodec/d3d12va_decode.c @@ -51,11 +51,19 @@ unsigned ff_d3d12va_get_surface_index(const AVCodecContext *avctx, D3D12VADecodeContext *ctx, const AVFrame *frame, int curr) { +AVHWFramesContext *frames_ctx = D3D12VA_FRAMES_CONTEXT(avctx); +AVD3D12VAFramesContext *frames_hwctx = frames_ctx->hwctx; + AVD3D12VAFrame *f; ID3D12Resource *res; unsigned i; -f = (AVD3D12VAFrame *)frame->data[0]; +if (frames_hwctx->flags & D3D12_RESOURCE_FLAG_VIDEO_DECODE_REFERENCE_ONLY) { +f = (AVD3D12VAFrame*)frame->data[1]; +} else { +f = (AVD3D12VAFrame*)frame->data[0]; +} + if (!f) goto fail; @@ -250,6 +258,11 @@ static int d3d12va_create_decoder(AVCodecContext *avctx) return AVERROR_PATCHWELCOME; } +if (feature.ConfigurationFlags & D3D12_VIDEO_DECODE_CONFIGURATION_FLAG_REFERENCE_ONLY_ALLOCATIONS_REQUIRED) { +frames_hwctx->flags |= (D3D12_RESOURCE_FLAG_VIDEO_DECODE_REFERENCE_ONLY | D3D12_RESOURCE_FLAG_DENY_SHADER_RESOURCE); +av_log(avctx, AV_LOG_INFO, "Reference-Only Allocations are required for this configuration.\n"); +} + desc = (D3D12_VIDEO_DECODER_DESC) { .NodeMask = 0, .Configuration = ctx->cfg, @@ -440,8 +453,19 @@ int ff_d3d12va_common_end_frame(AVCodecContext *avctx, AVFrame *frame, D3D12VADecodeContext *ctx = D3D12VA_DECODE_CONTEXT(avctx); ID3D12Resource *buffer= NULL; ID3D12CommandAllocator *command_allocator = NULL; -AVD3D12VAFrame *f = (AVD3D12VAFrame *)frame->data[0]; -ID3D12Resource *resource = (ID3D12Resource *)f->texture; +AVHWFramesContext *frames_ctx= D3D12VA_FRAMES_CONTEXT(avctx); +AVD3D12VAFramesContext *frames_hwctx = frames_ctx->hwctx; +AVD3D12VAFrame *f = NULL; +AVD3D12VAFrame *output_data = NULL; + +if (frames_hwctx->flags & D3D12_RESOURCE_FLAG_VIDEO_DECODE_REFERENCE_ONLY) { +f = (AVD3D12VAFrame*)frame->data[1]; +output_data = (AVD3D12VAFrame*)frame->data[0]; +} else { +f = (AVD3D12VAFrame*)frame->data[0]; +} + +ID3D12Resource* resource = (ID3D12Resource*)f->texture; ID3D12VideoDecodeCommandList *cmd_list = ctx->command_list; D3D12_RESOURCE_BARRIER barriers[32] = { 0 }; @@ -469,6 +493,14 @@ int ff_d3d12va_common_end_frame(AVCodecContext *avctx, AVFrame *frame, .pOutputTexture2D= resource, }; +if (frames_hwctx->flags & D3D12_RESOURCE_FLAG_VIDEO_DECODE_REFERENCE_ONLY) { +output_args.pOutputTexture2D = output_data->texture; + +output_args.ConversionArguments.Enable = 1; +output_args.ConversionArguments.pReferenceTexture2D = resource; +output_args.ConversionArguments.ReferenceSubresource = 0; +} + UINT num_barrier = 1; barriers[0] = (D3D12_RESOURCE_BARRIER) { .Type = D3D12_RESOURCE_BARRIER_TYPE_TRANSITION, @@ -481,6 +513,20 @@ int ff_d3d12va_common_end_frame(AVCodecContext *avctx, AVFrame *frame, }, }; +if (frames_hwctx->flags & D3D12_RESOURCE_FLAG_VIDEO_DECODE_REFERENCE_ONLY) { +barriers[1] = (D3D12_RESOURCE_BARRIER) { +.Type = D3D12_RESOURCE_BARRIER_TYPE_TRANSITION, +.Flags = D3D12_RESOURCE_BARRIER_FLAG_NONE, +.Transition = { +.pResource = output_data->texture, +.Subresource = D3D12_RESOURCE_BARRIER_ALL_SUBRESOURCES, +.StateBefore = D3D12_RESOURCE_STATE_COMMON, +
[FFmpeg-devel] [PATCH] avcodec/amfenc: Fix max rate control log message
--- libavcodec/amfenc_av1.c | 2 +- libavcodec/amfenc_h264.c | 2 +- libavcodec/amfenc_hevc.c | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/libavcodec/amfenc_av1.c b/libavcodec/amfenc_av1.c index bc6ad3d7fb..b21011ef23 100644 --- a/libavcodec/amfenc_av1.c +++ b/libavcodec/amfenc_av1.c @@ -374,7 +374,7 @@ FF_ENABLE_DEPRECATION_WARNINGS AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_PEAK_BITRATE, avctx->rc_max_rate); } else if (ctx->rate_control_mode == AMF_VIDEO_ENCODER_AV1_RATE_CONTROL_METHOD_PEAK_CONSTRAINED_VBR) { -av_log(ctx, AV_LOG_WARNING, "rate control mode is PEAK_CONSTRAINED_VBR but rc_max_rate is not set\n"); +av_log(ctx, AV_LOG_DEBUG, "rate control mode is vbr_peak but max_rate is not set, default max_rate will be applied.\n"); } if (avctx->bit_rate > 0) { ctx->rate_control_mode = AMF_VIDEO_ENCODER_AV1_RATE_CONTROL_METHOD_CBR; diff --git a/libavcodec/amfenc_h264.c b/libavcodec/amfenc_h264.c index 78bd70eb7a..18b1ca48c9 100644 --- a/libavcodec/amfenc_h264.c +++ b/libavcodec/amfenc_h264.c @@ -395,7 +395,7 @@ FF_ENABLE_DEPRECATION_WARNINGS if (avctx->rc_max_rate) { AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_PEAK_BITRATE, avctx->rc_max_rate); } else if (ctx->rate_control_mode == AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_PEAK_CONSTRAINED_VBR) { -av_log(ctx, AV_LOG_WARNING, "rate control mode is PEAK_CONSTRAINED_VBR but rc_max_rate is not set\n"); +av_log(ctx, AV_LOG_DEBUG, "rate control mode is vbr_peak but max_rate is not set, default max_rate will be applied.\n"); } if (ctx->latency != -1) { diff --git a/libavcodec/amfenc_hevc.c b/libavcodec/amfenc_hevc.c index 0907db945c..42f823e512 100644 --- a/libavcodec/amfenc_hevc.c +++ b/libavcodec/amfenc_hevc.c @@ -369,7 +369,7 @@ FF_ENABLE_DEPRECATION_WARNINGS if (avctx->rc_max_rate) { AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_PEAK_BITRATE, avctx->rc_max_rate); } else if (ctx->rate_control_mode == AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_METHOD_PEAK_CONSTRAINED_VBR) { -av_log(ctx, AV_LOG_WARNING, "rate control mode is PEAK_CONSTRAINED_VBR but rc_max_rate is not set\n"); +av_log(ctx, AV_LOG_DEBUG, "rate control mode is vbr_peak but max_rate is not set, default max_rate will be applied.\n"); } if (ctx->latency != -1) { -- 2.45.2.windows.1 ___ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org https://ffmpeg.org/mailman/listinfo/ffmpeg-devel To unsubscribe, visit link above, or email ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".
[FFmpeg-devel] [PATCH, v2] avcodec/d3d12va_decode: enable reference-only mode
The Reference-Only feature in DirectX 12 is a memory optimization technique designed for video decoding scenarios. This feature requires that reference resources must be allocated with the D3D12_RESOURCE_FLAG_VIDEO_DECODE_REFERENCE_ONLY resource flag. Reference textures must also be separated from output textures. This feature is not supported in the current version of ffmpeg. Since AMD GPU uses this feature in Direct 12 decoder, ffmpeg does not support AMD GPU Direct 12 decoding. --- libavcodec/d3d12va_decode.c | 165 +--- libavcodec/d3d12va_decode.h | 13 +++ 2 files changed, 166 insertions(+), 12 deletions(-) diff --git a/libavcodec/d3d12va_decode.c b/libavcodec/d3d12va_decode.c index 3b8978635e..b9df266e37 100644 --- a/libavcodec/d3d12va_decode.c +++ b/libavcodec/d3d12va_decode.c @@ -41,6 +41,102 @@ typedef struct HelperObjects { uint64_t fence_value; } HelperObjects; +typedef struct ReferenceFrame { +ID3D12Resource *resource; +intused; +ID3D12Resource *output_resource; +} ReferenceFrame; + +static ID3D12Resource *get_reference_only_resource(AVCodecContext *avctx, ID3D12Resource *output_resource) +{ +D3D12VADecodeContext *ctx = D3D12VA_DECODE_CONTEXT(avctx); +AVHWFramesContext *frames_ctx = D3D12VA_FRAMES_CONTEXT(avctx); +AVD3D12VADeviceContext *device_hwctx = ctx->device_ctx; +AVD3D12VAFramesContext *frames_hwctx = frames_ctx->hwctx; +int i = 0; +ID3D12Resource *resource = NULL; +ReferenceFrame *reference_only_map = ctx->reference_only_map; +if (reference_only_map == NULL) { +av_log(avctx, AV_LOG_ERROR, "Reference frames are not allocated!\n"); +return NULL; +} + +// find unused resource +for (i = 0; i < ctx->max_num_ref; i++) { +if (!reference_only_map[i].used && reference_only_map[i].resource != NULL) { +reference_only_map[i].used = 1; +resource = reference_only_map[i].resource; +reference_only_map[i].output_resource = output_resource; +return resource; +} +} + +// find space to allocate +for (i = 0; i < ctx->max_num_ref; i++) { +if (reference_only_map[i].resource == NULL) +break; +} + +if (i == ctx->max_num_ref) { +av_log(avctx, AV_LOG_ERROR, "No space for new Reference frame!\n"); +return NULL; +} + +// allocate frame +D3D12_HEAP_PROPERTIES props = { .Type = D3D12_HEAP_TYPE_DEFAULT }; +D3D12_RESOURCE_DESC desc; +output_resource->lpVtbl->GetDesc(output_resource, &desc); +desc.Flags = D3D12_RESOURCE_FLAG_VIDEO_DECODE_REFERENCE_ONLY | D3D12_RESOURCE_FLAG_DENY_SHADER_RESOURCE; + +if (FAILED(ID3D12Device_CreateCommittedResource(device_hwctx->device, &props, D3D12_HEAP_FLAG_NONE, &desc, +D3D12_RESOURCE_STATE_COMMON, NULL, &IID_ID3D12Resource, (void **)&reference_only_map[i].resource))) { +av_log(ctx, AV_LOG_ERROR, "Failed to create D3D12 Reference Resource!\n"); +return NULL; +} + +reference_only_map[i].used = 1; +resource = reference_only_map[i].resource; +reference_only_map[i].output_resource = output_resource; + +return resource; +} + +static void free_reference_only_resources(AVCodecContext *avctx) +{ +D3D12VADecodeContext *ctx = D3D12VA_DECODE_CONTEXT(avctx); +int i; +ReferenceFrame *reference_only_map = ctx->reference_only_map; +if (reference_only_map != NULL) { +for (i = 0; i < ctx->max_num_ref; i++) { +if (reference_only_map[i].resource != NULL) { +D3D12_OBJECT_RELEASE(reference_only_map[i].resource); +} +} +av_freep(&ctx->reference_only_map); +av_freep(&ctx->ref_only_resources); +} +} + +static void prepare_reference_only_resources(AVCodecContext *avctx) +{ +D3D12VADecodeContext *ctx = D3D12VA_DECODE_CONTEXT(avctx); +int i, j; +ReferenceFrame *reference_only_map = ctx->reference_only_map; +if (reference_only_map == NULL) +return; +memset(ctx->ref_only_resources, 0, ctx->max_num_ref * sizeof(*(ctx->ref_only_resources))); +for (j = 0; j < ctx->max_num_ref; j++) { +for (i = 0; i < ctx->max_num_ref; i++) { +if (reference_only_map[j].used && reference_only_map[j].output_resource == ctx->ref_resources[i]) { +ctx->ref_only_resources[i] = reference_only_map[j].resource; +break; +} +} +if (i == ctx->max_num_ref) +reference_only_map[j].used = 0; +} +} + int ff_d3d12va_get_suitable_max_bitstream_size(AVCodecContext *avctx) { AVHWFramesContext *frames_ctx = D3D12VA_FRAMES_CONTEXT(avctx); @@ -250,6 +346,18 @@ static int d3d12va_create_decoder(AVCodecContext *avctx) return AVERROR_PATCHWELCOME; } +ctx->reference_only_map = NULL; +ctx->ref_only_resources = NULL; +if (feature.ConfigurationFlags & D3D12_VIDEO_DECODE_CONF
[FFmpeg-devel] [PATCH] avcodec/d3d12va_encode: texture array support for HEVC
This patch adds support for the texture array feature used by AMD boards in the D3D12 HEVC encoder. In texture array mode, a single texture array is shared for all reference and reconstructed pictures using different subresources. The implementation ensures compatibility and has been successfully tested on AMD, Intel, and NVIDIA GPUs. --- libavcodec/d3d12va_encode.c | 241 +-- libavcodec/d3d12va_encode.h | 29 libavcodec/d3d12va_encode_hevc.c | 5 +- 3 files changed, 231 insertions(+), 44 deletions(-) diff --git a/libavcodec/d3d12va_encode.c b/libavcodec/d3d12va_encode.c index 4d738200fe..580d2ea383 100644 --- a/libavcodec/d3d12va_encode.c +++ b/libavcodec/d3d12va_encode.c @@ -264,6 +264,11 @@ static int d3d12va_encode_issue(AVCodecContext *avctx, av_log(avctx, AV_LOG_DEBUG, "Input surface is %p.\n", pic->input_surface->texture); +if (ctx->is_texture_array) { +base_pic->recon_image->data[0] = ctx->texture_array_frame; +pic->subresource_index = (ctx->subresource_used_index++) % ctx->max_subresource_array_size; +} + pic->recon_surface = (AVD3D12VAFrame *)base_pic->recon_image->data[0]; av_log(avctx, AV_LOG_DEBUG, "Recon surface is %p.\n", pic->recon_surface->texture); @@ -325,11 +330,28 @@ static int d3d12va_encode_issue(AVCodecContext *avctx, goto fail; } +if (ctx->is_texture_array) { +d3d12_refs.pSubresources = av_calloc(d3d12_refs.NumTexture2Ds, + sizeof(*d3d12_refs.pSubresources)); +if (!d3d12_refs.pSubresources) { +err = AVERROR(ENOMEM); +goto fail; +} +} + i = 0; -for (j = 0; j < base_pic->nb_refs[0]; j++) -d3d12_refs.ppTexture2Ds[i++] = ((D3D12VAEncodePicture *)base_pic->refs[0][j]->priv)->recon_surface->texture; -for (j = 0; j < base_pic->nb_refs[1]; j++) -d3d12_refs.ppTexture2Ds[i++] = ((D3D12VAEncodePicture *)base_pic->refs[1][j]->priv)->recon_surface->texture; +for (j = 0; j < base_pic->nb_refs[0]; j++) { +d3d12_refs.ppTexture2Ds[i] = ((D3D12VAEncodePicture *)base_pic->refs[0][j]->priv)->recon_surface->texture; +if (ctx->is_texture_array) +d3d12_refs.pSubresources[i] = ((D3D12VAEncodePicture *)base_pic->refs[0][j]->priv)->subresource_index; +i++; +} +for (j = 0; j < base_pic->nb_refs[1]; j++) { +d3d12_refs.ppTexture2Ds[i] = ((D3D12VAEncodePicture *)base_pic->refs[1][j]->priv)->recon_surface->texture; +if (ctx->is_texture_array) +d3d12_refs.pSubresources[i] = ((D3D12VAEncodePicture *)base_pic->refs[1][j]->priv)->subresource_index; +i++; +} } input_args.PictureControlDesc.IntraRefreshFrameIndex = 0; @@ -343,7 +365,11 @@ static int d3d12va_encode_issue(AVCodecContext *avctx, output_args.Bitstream.pBuffer= pic->output_buffer; output_args.Bitstream.FrameStartOffset = pic->aligned_header_size; output_args.ReconstructedPicture.pReconstructedPicture = pic->recon_surface->texture; -output_args.ReconstructedPicture.ReconstructedPictureSubresource = 0; +if (ctx->is_texture_array) { +output_args.ReconstructedPicture.ReconstructedPictureSubresource = pic->subresource_index; +} else { +output_args.ReconstructedPicture.ReconstructedPictureSubresource = 0; +} output_args.EncoderOutputMetadata.pBuffer= pic->encoded_metadata; output_args.EncoderOutputMetadata.Offset = 0; @@ -381,35 +407,87 @@ static int d3d12va_encode_issue(AVCodecContext *avctx, }, \ } +#define TRANSITION_BARRIER_SUBRESOURCE(res, subres,before, after) \ +(D3D12_RESOURCE_BARRIER) { \ +.Type = D3D12_RESOURCE_BARRIER_TYPE_TRANSITION,\ +.Flags = D3D12_RESOURCE_BARRIER_FLAG_NONE, \ +.Transition = { \ +.pResource = res, \ +.Subresource = subres, \ +.StateBefore = before, \ +.StateAfter = after, \ +}, \ +} + barriers[0] = TRANSITION_BARRIER(pic->input_surface->texture, D3D12_RESOURCE_STATE_COMMON, D3D12_RESOURCE_STATE_VIDEO_ENCODE_READ); barriers[1] = TRANSITION_BARRIER(pic->output_buffer, D3D12_RESOURCE_STATE_COMMON,
[FFmpeg-devel] [PATCH] avcodec/amfenc: add smart access video option
From: Evgeny Pavlov This commit adds option for enabling SmartAccess Video (SAV) in AMF encoders. SAV is an AMD hardware-specific feature which enables the parallelization of encode and decode streams across multiple Video Codec Engine (VCN) hardware instances. --- libavcodec/amfenc.h | 1 + libavcodec/amfenc_av1.c | 17 + libavcodec/amfenc_h264.c | 18 ++ libavcodec/amfenc_hevc.c | 18 ++ 4 files changed, 54 insertions(+) diff --git a/libavcodec/amfenc.h b/libavcodec/amfenc.h index 3f42c4cd94..aec3a3f9ec 100644 --- a/libavcodec/amfenc.h +++ b/libavcodec/amfenc.h @@ -72,6 +72,7 @@ typedef struct AMFEncoderContext { int b_frame_delta_qp; int ref_b_frame_delta_qp; int bit_depth; +int smart_access_video; // Dynamic options, can be set after Init() call diff --git a/libavcodec/amfenc_av1.c b/libavcodec/amfenc_av1.c index 05ad2b8897..8c5fa9dfbc 100644 --- a/libavcodec/amfenc_av1.c +++ b/libavcodec/amfenc_av1.c @@ -133,6 +133,8 @@ static const AVOption options[] = { { "1080p", "", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_ALIGNMENT_MODE_64X16_1080P_CODED_1082 }, 0, 0, VE, .unit = "align" }, { "none", "", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_ALIGNMENT_MODE_NO_RESTRICTIONS }, 0, 0, VE, .unit = "align" }, +{ "smart_access_video", "Enable Smart Access Video", OFFSET(smart_access_video), AV_OPT_TYPE_BOOL, {.i64 = -1 }, -1, 1, VE}, + //Pre Analysis options { "preanalysis","Enable preanalysis", OFFSET(preanalysis), AV_OPT_TYPE_BOOL, {.i64 = -1 }, -1, 1, VE }, @@ -326,6 +328,21 @@ FF_ENABLE_DEPRECATION_WARNINGS av_log(ctx, AV_LOG_DEBUG, "Rate control turned to Peak VBR\n"); } } +if (ctx->smart_access_video != -1) { +AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_ENABLE_SMART_ACCESS_VIDEO, ctx->smart_access_video != 0); +if (res != AMF_OK) { +av_log(avctx, AV_LOG_ERROR, "The Smart Access Video is not supported by AMF.\n"); +if (ctx->smart_access_video != 0) +return AVERROR(ENOSYS); +} else { +av_log(avctx, AV_LOG_INFO, "The Smart Access Video (%d) is set.\n", ctx->smart_access_video); +// Set low latency mode if Smart Access Video is enabled +if (ctx->smart_access_video != 0) { +AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_ENCODING_LATENCY_MODE, AMF_VIDEO_ENCODER_AV1_ENCODING_LATENCY_MODE_LOWEST_LATENCY); +av_log(avctx, AV_LOG_INFO, "The Smart Access Video set low latency mode.\n"); +} +} +} // Pre-Pass, Pre-Analysis, Two-Pass if (ctx->rate_control_mode == AMF_VIDEO_ENCODER_AV1_RATE_CONTROL_METHOD_CONSTANT_QP) { diff --git a/libavcodec/amfenc_h264.c b/libavcodec/amfenc_h264.c index 4291f0ea64..2f906f3273 100644 --- a/libavcodec/amfenc_h264.c +++ b/libavcodec/amfenc_h264.c @@ -139,6 +139,8 @@ static const AVOption options[] = { { "forced_idr", "Force I frames to be IDR frames", OFFSET(forced_idr) , AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE }, { "aud","Inserts AU Delimiter NAL unit",OFFSET(aud) , AV_OPT_TYPE_BOOL, { .i64 = -1 }, -1, 1, VE }, +{ "smart_access_video", "Enable Smart Access Video", OFFSET(smart_access_video), AV_OPT_TYPE_BOOL, {.i64 = -1 }, -1, 1, VE}, + //Pre Analysis options { "preanalysis","Enable preanalysis", OFFSET(preanalysis), AV_OPT_TYPE_BOOL, {.i64 = -1 }, -1, 1, VE }, @@ -399,6 +401,22 @@ FF_ENABLE_DEPRECATION_WARNINGS AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_LOWLATENCY_MODE, ((ctx->latency == 0) ? false : true)); } +if (ctx->smart_access_video != -1) { +AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_ENABLE_SMART_ACCESS_VIDEO, ctx->smart_access_video != 0); +if (res != AMF_OK) { +av_log(avctx, AV_LOG_ERROR, "The Smart Access Video is not supported by AMF.\n"); +if (ctx->smart_access_video != 0) +return AVERROR(ENOSYS); +} else { +av_log(avctx, AV_LOG_INFO, "The Smart Access Video (%d) is set.\n", ctx->smart_access_video); +// Set low latency mode if Smart Access Video is enabled +if (ctx->smart_access_video != 0) { +AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_LOWLATENCY_MODE, true); +av_log(avctx, AV_LOG_INFO, "The Smart Access Video set low latency mode.\n"); +} +} +
[FFmpeg-devel] [PATCH] avcodec/d3d12va_decode: enable reference-only decoder mode
The Reference-Only feature in DirectX 12 is a memory optimization technique designed for video decoding scenarios. This feature requires that reference resources must be allocated with the D3D12_RESOURCE_FLAG_VIDEO_DECODE_REFERENCE_ONLY resource flag. Reference textures must also be separated from output textures. This feature is not supported in the current version of ffmpeg. Since AMD GPU uses this feature in Direct 12 decoder, ffmpeg does not support AMD GPU Direct 12 decoding. --- libavcodec/d3d12va_decode.c | 176 +--- libavcodec/d3d12va_decode.h | 13 +++ 2 files changed, 176 insertions(+), 13 deletions(-) diff --git a/libavcodec/d3d12va_decode.c b/libavcodec/d3d12va_decode.c index 3b8978635e..c51234c256 100644 --- a/libavcodec/d3d12va_decode.c +++ b/libavcodec/d3d12va_decode.c @@ -41,6 +41,111 @@ typedef struct HelperObjects { uint64_t fence_value; } HelperObjects; +typedef struct ReferenceFrame { +ID3D12Resource *resource; +intused; +ID3D12Resource *output_resource; +} ReferenceFrame; + +static ID3D12Resource *get_reference_only_resource(AVCodecContext *avctx, ID3D12Resource *output_resource) +{ +D3D12VADecodeContext *ctx = D3D12VA_DECODE_CONTEXT(avctx); +AVHWFramesContext *frames_ctx = D3D12VA_FRAMES_CONTEXT(avctx); +AVD3D12VADeviceContext *device_hwctx = ctx->device_ctx; +AVD3D12VAFramesContext *frames_hwctx = frames_ctx->hwctx; +int i; +ID3D12Resource *resource = NULL; +ReferenceFrame *reference_only_map = (ReferenceFrame *)(ctx->reference_only_map); +if(reference_only_map == NULL){ +av_log(avctx, AV_LOG_ERROR, "Reference frames are not allocated!\n"); +return NULL; +} + +// find unused resource +for (i = 0; i < ctx->max_num_ref; i++) { +if(!reference_only_map[i].used && reference_only_map[i].resource != NULL) { +reference_only_map[i].used = 1; +resource = reference_only_map[i].resource; +reference_only_map[i].output_resource = output_resource; +break; +} +} +if(resource == NULL){ +// find space to allocate +for (i = 0; i < ctx->max_num_ref; i++) { +if(reference_only_map[i].resource == NULL) { +break; +} +} +} +if(i == ctx->max_num_ref){ +av_log(avctx, AV_LOG_ERROR, "No space for new Reference frame!\n"); +}else{ +// allocate frame +D3D12_HEAP_PROPERTIES props = { .Type = D3D12_HEAP_TYPE_DEFAULT }; +D3D12_RESOURCE_DESC desc = { +.Dimension= D3D12_RESOURCE_DIMENSION_TEXTURE2D, +.Alignment= 0, +.Width= avctx->coded_width, +.Height = avctx->coded_height, +.DepthOrArraySize = 1, +.MipLevels= 1, +.Format = frames_hwctx->format, +.SampleDesc = {.Count = 1, .Quality = 0 }, +.Layout = D3D12_TEXTURE_LAYOUT_UNKNOWN, +.Flags= D3D12_RESOURCE_FLAG_VIDEO_DECODE_REFERENCE_ONLY | D3D12_RESOURCE_FLAG_DENY_SHADER_RESOURCE, +}; + +if (FAILED(ID3D12Device_CreateCommittedResource(device_hwctx->device, &props, D3D12_HEAP_FLAG_NONE, &desc, +D3D12_RESOURCE_STATE_COMMON, NULL, &IID_ID3D12Resource, (void **)&reference_only_map[i].resource))) { +av_log(ctx, AV_LOG_ERROR, "Could not create the texture\n"); +} +resource = reference_only_map[i].resource; +reference_only_map[i].used = 1; +reference_only_map[i].output_resource = output_resource; +} +// return it +return resource; +} + +static void free_reference_only_resources(AVCodecContext *avctx) +{ +D3D12VADecodeContext *ctx = D3D12VA_DECODE_CONTEXT(avctx); +int i; +ReferenceFrame *reference_only_map = (ReferenceFrame *)(ctx->reference_only_map); +if(reference_only_map != NULL){ +for (i = 0; i < ctx->max_num_ref; i++) { +if(reference_only_map[i].resource != NULL) { +D3D12_OBJECT_RELEASE(reference_only_map[i].resource); +} +} +av_freep(&ctx->reference_only_map); +av_freep(&ctx->ref_only_resources); +} +} + +static void prepare_reference_only_resources(AVCodecContext *avctx) +{ +D3D12VADecodeContext *ctx = D3D12VA_DECODE_CONTEXT(avctx); +int i, j; +ReferenceFrame *reference_only_map = (ReferenceFrame *)(ctx->reference_only_map); +if(reference_only_map == NULL){ +return; +} +memset(ctx->ref_only_resources, 0, ctx->max_num_ref * sizeof(*(ctx->ref_only_resources))); +for (j = 0; j < ctx->max_num_ref; j++) { +for (i = 0; i < ctx->max_num_ref; i++) { +if(reference_only_map[j].used && reference_only_map[j].output_resource == ctx->ref_resources[i]) { +ctx->ref_only_resources[i] = ref
[FFmpeg-devel] [PATCH, v2] avcodec/d3d12va_encode: texture array support for HEVC
This patch adds support for the texture array feature used by AMD boards in the D3D12 HEVC encoder. In texture array mode, a single texture array is shared for all reference and reconstructed pictures using different subresources. The implementation ensures compatibility and has been successfully tested on AMD, Intel, and NVIDIA GPUs. v2 updates: 1. The reference to MaxL1ReferencesForB for the H.264 codec was updated to use the corresponding H.264 field instead of the HEVC one. 2. Max_subresource_array_size calculation was adjusted by removing the D3D12VA_VIDEO_ENC_ASYNC_DEPTH offset. --- libavcodec/d3d12va_encode.c | 241 +-- libavcodec/d3d12va_encode.h | 29 libavcodec/d3d12va_encode_hevc.c | 5 +- 3 files changed, 231 insertions(+), 44 deletions(-) diff --git a/libavcodec/d3d12va_encode.c b/libavcodec/d3d12va_encode.c index 4d738200fe..d428ad1fd8 100644 --- a/libavcodec/d3d12va_encode.c +++ b/libavcodec/d3d12va_encode.c @@ -264,6 +264,11 @@ static int d3d12va_encode_issue(AVCodecContext *avctx, av_log(avctx, AV_LOG_DEBUG, "Input surface is %p.\n", pic->input_surface->texture); +if (ctx->is_texture_array) { +base_pic->recon_image->data[0] = ctx->texture_array_frame; +pic->subresource_index = (ctx->subresource_used_index++) % ctx->max_subresource_array_size; +} + pic->recon_surface = (AVD3D12VAFrame *)base_pic->recon_image->data[0]; av_log(avctx, AV_LOG_DEBUG, "Recon surface is %p.\n", pic->recon_surface->texture); @@ -325,11 +330,28 @@ static int d3d12va_encode_issue(AVCodecContext *avctx, goto fail; } +if (ctx->is_texture_array) { +d3d12_refs.pSubresources = av_calloc(d3d12_refs.NumTexture2Ds, + sizeof(*d3d12_refs.pSubresources)); +if (!d3d12_refs.pSubresources) { +err = AVERROR(ENOMEM); +goto fail; +} +} + i = 0; -for (j = 0; j < base_pic->nb_refs[0]; j++) -d3d12_refs.ppTexture2Ds[i++] = ((D3D12VAEncodePicture *)base_pic->refs[0][j]->priv)->recon_surface->texture; -for (j = 0; j < base_pic->nb_refs[1]; j++) -d3d12_refs.ppTexture2Ds[i++] = ((D3D12VAEncodePicture *)base_pic->refs[1][j]->priv)->recon_surface->texture; +for (j = 0; j < base_pic->nb_refs[0]; j++) { +d3d12_refs.ppTexture2Ds[i] = ((D3D12VAEncodePicture *)base_pic->refs[0][j]->priv)->recon_surface->texture; +if (ctx->is_texture_array) +d3d12_refs.pSubresources[i] = ((D3D12VAEncodePicture *)base_pic->refs[0][j]->priv)->subresource_index; +i++; +} +for (j = 0; j < base_pic->nb_refs[1]; j++) { +d3d12_refs.ppTexture2Ds[i] = ((D3D12VAEncodePicture *)base_pic->refs[1][j]->priv)->recon_surface->texture; +if (ctx->is_texture_array) +d3d12_refs.pSubresources[i] = ((D3D12VAEncodePicture *)base_pic->refs[1][j]->priv)->subresource_index; +i++; +} } input_args.PictureControlDesc.IntraRefreshFrameIndex = 0; @@ -343,7 +365,11 @@ static int d3d12va_encode_issue(AVCodecContext *avctx, output_args.Bitstream.pBuffer= pic->output_buffer; output_args.Bitstream.FrameStartOffset = pic->aligned_header_size; output_args.ReconstructedPicture.pReconstructedPicture = pic->recon_surface->texture; -output_args.ReconstructedPicture.ReconstructedPictureSubresource = 0; +if (ctx->is_texture_array) { +output_args.ReconstructedPicture.ReconstructedPictureSubresource = pic->subresource_index; +} else { +output_args.ReconstructedPicture.ReconstructedPictureSubresource = 0; +} output_args.EncoderOutputMetadata.pBuffer= pic->encoded_metadata; output_args.EncoderOutputMetadata.Offset = 0; @@ -381,35 +407,87 @@ static int d3d12va_encode_issue(AVCodecContext *avctx, }, \ } +#define TRANSITION_BARRIER_SUBRESOURCE(res, subres,before, after) \ +(D3D12_RESOURCE_BARRIER) { \ +.Type = D3D12_RESOURCE_BARRIER_TYPE_TRANSITION,\ +.Flags = D3D12_RESOURCE_BARRIER_FLAG_NONE, \ +.Transition = { \ +.pResource = res, \ +.Subresource = subres, \ +.StateBefore = before, \ +.StateAfter = after, \ +}, \ +} + barriers[0] = TRANSITION_BARRIER(pic->input_surface->texture,
[FFmpeg-devel] [PATCH, v3] avcodec/d3d12va_encode: texture array support for HEVC
This patch adds support for the texture array feature used by AMD boards in the D3D12 HEVC encoder. In texture array mode, a single texture array is shared for all reference and reconstructed pictures using different subresources. The implementation ensures compatibility and has been successfully tested on AMD, Intel, and NVIDIA GPUs. v2 updates: 1. The reference to MaxL1ReferencesForB for the H.264 codec was updated to use the corresponding H.264 field instead of the HEVC one. 2. Max_subresource_array_size calculation was adjusted by removing the D3D12VA_VIDEO_ENC_ASYNC_DEPTH offset. v3 updates: 1. Fixed a type mismatch by explicitly casting AVD3D12VAFrame* to (uint8_t*) when assigning to data[0]. 2. Adjusted logging format specifier for HRESULT to use `%lx`. --- libavcodec/d3d12va_encode.c | 241 +-- libavcodec/d3d12va_encode.h | 29 libavcodec/d3d12va_encode_hevc.c | 5 +- 3 files changed, 231 insertions(+), 44 deletions(-) diff --git a/libavcodec/d3d12va_encode.c b/libavcodec/d3d12va_encode.c index 4d738200fe..85e79b2e14 100644 --- a/libavcodec/d3d12va_encode.c +++ b/libavcodec/d3d12va_encode.c @@ -264,6 +264,11 @@ static int d3d12va_encode_issue(AVCodecContext *avctx, av_log(avctx, AV_LOG_DEBUG, "Input surface is %p.\n", pic->input_surface->texture); +if (ctx->is_texture_array) { +base_pic->recon_image->data[0] = (uint8_t *)ctx->texture_array_frame; +pic->subresource_index = (ctx->subresource_used_index++) % ctx->max_subresource_array_size; +} + pic->recon_surface = (AVD3D12VAFrame *)base_pic->recon_image->data[0]; av_log(avctx, AV_LOG_DEBUG, "Recon surface is %p.\n", pic->recon_surface->texture); @@ -325,11 +330,28 @@ static int d3d12va_encode_issue(AVCodecContext *avctx, goto fail; } +if (ctx->is_texture_array) { +d3d12_refs.pSubresources = av_calloc(d3d12_refs.NumTexture2Ds, + sizeof(*d3d12_refs.pSubresources)); +if (!d3d12_refs.pSubresources) { +err = AVERROR(ENOMEM); +goto fail; +} +} + i = 0; -for (j = 0; j < base_pic->nb_refs[0]; j++) -d3d12_refs.ppTexture2Ds[i++] = ((D3D12VAEncodePicture *)base_pic->refs[0][j]->priv)->recon_surface->texture; -for (j = 0; j < base_pic->nb_refs[1]; j++) -d3d12_refs.ppTexture2Ds[i++] = ((D3D12VAEncodePicture *)base_pic->refs[1][j]->priv)->recon_surface->texture; +for (j = 0; j < base_pic->nb_refs[0]; j++) { +d3d12_refs.ppTexture2Ds[i] = ((D3D12VAEncodePicture *)base_pic->refs[0][j]->priv)->recon_surface->texture; +if (ctx->is_texture_array) +d3d12_refs.pSubresources[i] = ((D3D12VAEncodePicture *)base_pic->refs[0][j]->priv)->subresource_index; +i++; +} +for (j = 0; j < base_pic->nb_refs[1]; j++) { +d3d12_refs.ppTexture2Ds[i] = ((D3D12VAEncodePicture *)base_pic->refs[1][j]->priv)->recon_surface->texture; +if (ctx->is_texture_array) +d3d12_refs.pSubresources[i] = ((D3D12VAEncodePicture *)base_pic->refs[1][j]->priv)->subresource_index; +i++; +} } input_args.PictureControlDesc.IntraRefreshFrameIndex = 0; @@ -343,7 +365,11 @@ static int d3d12va_encode_issue(AVCodecContext *avctx, output_args.Bitstream.pBuffer= pic->output_buffer; output_args.Bitstream.FrameStartOffset = pic->aligned_header_size; output_args.ReconstructedPicture.pReconstructedPicture = pic->recon_surface->texture; -output_args.ReconstructedPicture.ReconstructedPictureSubresource = 0; +if (ctx->is_texture_array) { +output_args.ReconstructedPicture.ReconstructedPictureSubresource = pic->subresource_index; +} else { +output_args.ReconstructedPicture.ReconstructedPictureSubresource = 0; +} output_args.EncoderOutputMetadata.pBuffer= pic->encoded_metadata; output_args.EncoderOutputMetadata.Offset = 0; @@ -381,35 +407,87 @@ static int d3d12va_encode_issue(AVCodecContext *avctx, }, \ } +#define TRANSITION_BARRIER_SUBRESOURCE(res, subres,before, after) \ +(D3D12_RESOURCE_BARRIER) { \ +.Type = D3D12_RESOURCE_BARRIER_TYPE_TRANSITION,\ +.Flags = D3D12_RESOURCE_BARRIER_FLAG_NONE, \ +.Transition = { \ +.pResource = res, \ +.Subresource = subres, \ +.StateBefore = before, \ +.StateAfter = after,
[FFmpeg-devel] [PATCH, v4] avcodec/d3d12va_encode: texture array support for HEVC
This patch adds support for the texture array feature used by AMD boards in the D3D12 HEVC encoder. In texture array mode, a single texture array is shared for all reference and reconstructed pictures using different subresources. The implementation ensures compatibility and has been successfully tested on AMD, Intel, and NVIDIA GPUs. v2 updates: 1. The reference to MaxL1ReferencesForB for the H.264 codec was updated to use the corresponding H.264 field instead of the HEVC one. 2. Max_subresource_array_size calculation was adjusted by removing the D3D12VA_VIDEO_ENC_ASYNC_DEPTH offset. v3 updates: 1. Fixed a type mismatch by explicitly casting AVD3D12VAFrame* to (uint8_t*) when assigning to data[0]. 2. Adjusted logging format specifier for HRESULT to use `%lx`. v4 updates: 1. Moved texture array management to hwcontext_d3d12va for proper abstraction. 2. Added `texture_array` and `texture_array_size` fields to AVD3D12VAFramesContext. 3. Implemented shared texture array allocation during `av_hwframe_ctx_init`. 4. Frames now receive unique subresource indices via `d3d12va_pool_alloc_texture_array`. 5. Removed `d3d12va_create_texture_array`, allocation is now handled entirely within hwcontext. 6. Encoder now uses subresource indices provided by hwcontext instead of managing them manually. --- libavcodec/d3d12va_encode.c | 191 +++ libavcodec/d3d12va_encode.h | 12 ++ libavcodec/d3d12va_encode_hevc.c | 5 +- libavutil/hwcontext_d3d12va.c| 66 ++- libavutil/hwcontext_d3d12va.h| 18 +++ 5 files changed, 242 insertions(+), 50 deletions(-) diff --git a/libavcodec/d3d12va_encode.c b/libavcodec/d3d12va_encode.c index e24a5b8d24..f9f4ca8903 100644 --- a/libavcodec/d3d12va_encode.c +++ b/libavcodec/d3d12va_encode.c @@ -191,7 +191,8 @@ static int d3d12va_encode_issue(AVCodecContext *avctx, FFHWBaseEncodeContext *base_ctx = avctx->priv_data; D3D12VAEncodeContext *ctx = avctx->priv_data; D3D12VAEncodePicture *pic = base_pic->priv; -AVD3D12VAFramesContext *frames_hwctx = base_ctx->input_frames->hwctx; +AVD3D12VAFramesContext *frames_hwctx_input = base_ctx->input_frames->hwctx; +AVD3D12VAFramesContext *frames_hwctx_recon = ((AVHWFramesContext*)base_pic->recon_image->hw_frames_ctx->data)->hwctx; int err, i, j; HRESULT hr; char data[MAX_PARAM_BUFFER_SIZE]; @@ -221,7 +222,7 @@ static int d3d12va_encode_issue(AVCodecContext *avctx, D3D12_VIDEO_ENCODER_RESOLVE_METADATA_INPUT_ARGUMENTS input_metadata = { .EncoderCodec = ctx->codec->d3d12_codec, .EncoderProfile = ctx->profile->d3d12_profile, -.EncoderInputFormat = frames_hwctx->format, +.EncoderInputFormat = frames_hwctx_input->format, .EncodedPictureEffectiveResolution = ctx->resolution, }; @@ -264,6 +265,9 @@ static int d3d12va_encode_issue(AVCodecContext *avctx, av_log(avctx, AV_LOG_DEBUG, "Input surface is %p.\n", pic->input_surface->texture); +if (ctx->is_texture_array) +pic->subresource_index = ((AVD3D12VAFrame*)base_pic->recon_image->data[0])->subresource_index; + pic->recon_surface = (AVD3D12VAFrame *)base_pic->recon_image->data[0]; av_log(avctx, AV_LOG_DEBUG, "Recon surface is %p.\n", pic->recon_surface->texture); @@ -325,11 +329,28 @@ static int d3d12va_encode_issue(AVCodecContext *avctx, goto fail; } +if (ctx->is_texture_array) { +d3d12_refs.pSubresources = av_calloc(d3d12_refs.NumTexture2Ds, + sizeof(*d3d12_refs.pSubresources)); +if (!d3d12_refs.pSubresources) { +err = AVERROR(ENOMEM); +goto fail; +} +} + i = 0; -for (j = 0; j < base_pic->nb_refs[0]; j++) -d3d12_refs.ppTexture2Ds[i++] = ((D3D12VAEncodePicture *)base_pic->refs[0][j]->priv)->recon_surface->texture; -for (j = 0; j < base_pic->nb_refs[1]; j++) -d3d12_refs.ppTexture2Ds[i++] = ((D3D12VAEncodePicture *)base_pic->refs[1][j]->priv)->recon_surface->texture; +for (j = 0; j < base_pic->nb_refs[0]; j++) { +d3d12_refs.ppTexture2Ds[i] = ((D3D12VAEncodePicture *)base_pic->refs[0][j]->priv)->recon_surface->texture; +if (ctx->is_texture_array) +d3d12_refs.pSubresources[i] = ((D3D12VAEncodePicture *)base_pic->refs[0][j]->priv)->subresource_index; +i++; +} +for (j = 0; j < base_pic->nb_refs[1]; j++) { +d3d12_refs.ppTexture2Ds[i] = ((D3D12VAEncodePicture *)base_pic->refs[1][j]->priv)->recon_surface->texture; +if (ctx->is_texture_array) +d3d12_refs.pSubresources[i] = ((D3D12VAEncodePicture *)base_pic->refs[1][j]->priv)->subresource_index; +i++; +} } input_args.PictureControlDesc.IntraRefreshFrameIndex = 0; @@ -343,7 +364,10 @@ static int d3
[FFmpeg-devel] [PATCH] avcodec/d3d12va_encode: fix l0 reference count limit
Prevents potential null pointer dereference when querying MaxL1ReferencesForB from codec-specific support structures during GOP structure initialization. --- libavcodec/d3d12va_encode.c | 6 -- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/libavcodec/d3d12va_encode.c b/libavcodec/d3d12va_encode.c index 4d738200fe..e39d4d1c29 100644 --- a/libavcodec/d3d12va_encode.c +++ b/libavcodec/d3d12va_encode.c @@ -1088,13 +1088,15 @@ static int d3d12va_encode_init_gop_structure(AVCodecContext *avctx) switch (ctx->codec->d3d12_codec) { case D3D12_VIDEO_ENCODER_CODEC_H264: ref_l0 = FFMIN(support.PictureSupport.pH264Support->MaxL0ReferencesForP, - support.PictureSupport.pH264Support->MaxL1ReferencesForB); + support.PictureSupport.pH264Support->MaxL1ReferencesForB ? + support.PictureSupport.pHEVCSupport->MaxL1ReferencesForB : UINT_MAX); ref_l1 = support.PictureSupport.pH264Support->MaxL1ReferencesForB; break; case D3D12_VIDEO_ENCODER_CODEC_HEVC: ref_l0 = FFMIN(support.PictureSupport.pHEVCSupport->MaxL0ReferencesForP, - support.PictureSupport.pHEVCSupport->MaxL1ReferencesForB); + support.PictureSupport.pHEVCSupport->MaxL1ReferencesForB ? + support.PictureSupport.pHEVCSupport->MaxL1ReferencesForB : UINT_MAX); ref_l1 = support.PictureSupport.pHEVCSupport->MaxL1ReferencesForB; break; -- 2.45.2.windows.1 ___ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org https://ffmpeg.org/mailman/listinfo/ffmpeg-devel To unsubscribe, visit link above, or email ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".
[FFmpeg-devel] [PATCH] avutil/hwcontext_amf: add device cache size
This change improves pipeline stability and reduces dynamic GPU surface allocations when using AMF with copy_frame = 1. This optimization has no negative effect. --- libavutil/hwcontext_amf.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/libavutil/hwcontext_amf.c b/libavutil/hwcontext_amf.c index a020b0e2e1..20b9296e34 100644 --- a/libavutil/hwcontext_amf.c +++ b/libavutil/hwcontext_amf.c @@ -496,8 +496,10 @@ static int amf_device_create(AVHWDeviceContext *device_ctx, ret = ctx->factory->pVtbl->CreateContext(ctx->factory, &ctx->context); -if (ret == AMF_OK) +if (ret == AMF_OK) { +AMF_ASSIGN_PROPERTY_INT64(ret, ctx->context, L"DeviceSurfaceCacheSize", 50 ); return 0; +} av_log(device_ctx, AV_LOG_ERROR, "CreateContext() failed with error %d.\n", ret); } amf_device_uninit(device_ctx); -- 2.45.2.windows.1 ___ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org https://ffmpeg.org/mailman/listinfo/ffmpeg-devel To unsubscribe, visit link above, or email ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".
[FFmpeg-devel] [PATCH, v6] avcodec/d3d12va_encode: texture array support for HEVC
This patch adds support for the texture array feature used by AMD boards in the D3D12 HEVC encoder. In texture array mode, a single texture array is shared for all reference and reconstructed pictures using different subresources. The implementation ensures compatibility and has been successfully tested on AMD, Intel, and NVIDIA GPUs. v2 updates: 1. The reference to MaxL1ReferencesForB for the H.264 codec was updated to use the corresponding H.264 field instead of the HEVC one. 2. Max_subresource_array_size calculation was adjusted by removing the D3D12VA_VIDEO_ENC_ASYNC_DEPTH offset. v3 updates: 1. Fixed a type mismatch by explicitly casting AVD3D12VAFrame* to (uint8_t*) when assigning to data[0]. 2. Adjusted logging format specifier for HRESULT to use `%lx`. v4 updates: 1. Moved texture array management to hwcontext_d3d12va for proper abstraction. 2. Added `texture_array` and `texture_array_size` fields to AVD3D12VAFramesContext. 3. Implemented shared texture array allocation during `av_hwframe_ctx_init`. 4. Frames now receive unique subresource indices via `d3d12va_pool_alloc_texture_array`. 5. Removed `d3d12va_create_texture_array`, allocation is now handled entirely within hwcontext. 6. Encoder now uses subresource indices provided by hwcontext instead of managing them manually. v5 updates: No changes, resubmitted as v4 was missed by patchwork. v6 updates: 1. Minor cosmetic fixes according to review of v5 2. Bumped lavu version to 60.5.100 and updated APIchanges --- doc/APIchanges | 4 + libavcodec/d3d12va_encode.c | 184 +++ libavcodec/d3d12va_encode.h | 12 ++ libavcodec/d3d12va_encode_hevc.c | 5 +- libavutil/hwcontext_d3d12va.c| 65 ++- libavutil/hwcontext_d3d12va.h| 18 +++ libavutil/version.h | 4 +- 7 files changed, 240 insertions(+), 52 deletions(-) diff --git a/doc/APIchanges b/doc/APIchanges index d6e38245f8..eab06cd251 100644 --- a/doc/APIchanges +++ b/doc/APIchanges @@ -2,6 +2,10 @@ The last version increases of all libraries were on 2025-03-28 API changes, most recent first: +2025-07-xx - xx - lavu 60.5.100 - hwcontext_d3d12va.h + Add support for texture array mode AVD3D12VAFrame.subresource_index, + AVD3D12VAFramesContext.texture_array and texture_array_size + 2025-07-xx - xd - lavfi 11.2.100 - avfilter.h Add AVFilterGraph->max_buffered_frames. diff --git a/libavcodec/d3d12va_encode.c b/libavcodec/d3d12va_encode.c index e24a5b8d24..1f202f512c 100644 --- a/libavcodec/d3d12va_encode.c +++ b/libavcodec/d3d12va_encode.c @@ -191,7 +191,8 @@ static int d3d12va_encode_issue(AVCodecContext *avctx, FFHWBaseEncodeContext *base_ctx = avctx->priv_data; D3D12VAEncodeContext *ctx = avctx->priv_data; D3D12VAEncodePicture *pic = base_pic->priv; -AVD3D12VAFramesContext *frames_hwctx = base_ctx->input_frames->hwctx; +AVD3D12VAFramesContext *frames_hwctx_input = base_ctx->input_frames->hwctx; +AVD3D12VAFramesContext *frames_hwctx_recon = base_ctx->recon_frames->hwctx; int err, i, j; HRESULT hr; char data[MAX_PARAM_BUFFER_SIZE]; @@ -221,7 +222,7 @@ static int d3d12va_encode_issue(AVCodecContext *avctx, D3D12_VIDEO_ENCODER_RESOLVE_METADATA_INPUT_ARGUMENTS input_metadata = { .EncoderCodec = ctx->codec->d3d12_codec, .EncoderProfile = ctx->profile->d3d12_profile, -.EncoderInputFormat = frames_hwctx->format, +.EncoderInputFormat = frames_hwctx_input->format, .EncodedPictureEffectiveResolution = ctx->resolution, }; @@ -268,6 +269,8 @@ static int d3d12va_encode_issue(AVCodecContext *avctx, av_log(avctx, AV_LOG_DEBUG, "Recon surface is %p.\n", pic->recon_surface->texture); +pic->subresource_index = ctx->is_texture_array ? pic->recon_surface->subresource_index : 0; + pic->output_buffer_ref = av_buffer_pool_get(ctx->output_buffer_pool); if (!pic->output_buffer_ref) { err = AVERROR(ENOMEM); @@ -325,11 +328,26 @@ static int d3d12va_encode_issue(AVCodecContext *avctx, goto fail; } +if (ctx->is_texture_array) { +d3d12_refs.pSubresources = av_calloc(d3d12_refs.NumTexture2Ds, + sizeof(*d3d12_refs.pSubresources)); +if (!d3d12_refs.pSubresources) { +err = AVERROR(ENOMEM); +goto fail; +} +} + i = 0; -for (j = 0; j < base_pic->nb_refs[0]; j++) -d3d12_refs.ppTexture2Ds[i++] = ((D3D12VAEncodePicture *)base_pic->refs[0][j]->priv)->recon_surface->texture; -for (j = 0; j < base_pic->nb_refs[1]; j++) -d3d12_refs.ppTexture2Ds[i++] = ((D3D12VAEncodePicture *)base_pic->refs[1][j]->priv)->recon_surface->texture; +for (j = 0; j < base_pic->nb_refs[0]; j++) { +d3d12_refs.ppTexture2Ds[i] = ((D3D12VAEncodePicture *)ba
[FFmpeg-devel] [PATCH, v5] avcodec/d3d12va_encode: texture array support for HEVC
This patch adds support for the texture array feature used by AMD boards in the D3D12 HEVC encoder. In texture array mode, a single texture array is shared for all reference and reconstructed pictures using different subresources. The implementation ensures compatibility and has been successfully tested on AMD, Intel, and NVIDIA GPUs. v2 updates: 1. The reference to MaxL1ReferencesForB for the H.264 codec was updated to use the corresponding H.264 field instead of the HEVC one. 2. Max_subresource_array_size calculation was adjusted by removing the D3D12VA_VIDEO_ENC_ASYNC_DEPTH offset. v3 updates: 1. Fixed a type mismatch by explicitly casting AVD3D12VAFrame* to (uint8_t*) when assigning to data[0]. 2. Adjusted logging format specifier for HRESULT to use `%lx`. v4 updates: 1. Moved texture array management to hwcontext_d3d12va for proper abstraction. 2. Added `texture_array` and `texture_array_size` fields to AVD3D12VAFramesContext. 3. Implemented shared texture array allocation during `av_hwframe_ctx_init`. 4. Frames now receive unique subresource indices via `d3d12va_pool_alloc_texture_array`. 5. Removed `d3d12va_create_texture_array`, allocation is now handled entirely within hwcontext. 6. Encoder now uses subresource indices provided by hwcontext instead of managing them manually. v5 updates: No changes, resubmitted as v4 was missed by patchwork. --- libavcodec/d3d12va_encode.c | 191 +++ libavcodec/d3d12va_encode.h | 12 ++ libavcodec/d3d12va_encode_hevc.c | 5 +- libavutil/hwcontext_d3d12va.c| 66 ++- libavutil/hwcontext_d3d12va.h| 18 +++ 5 files changed, 242 insertions(+), 50 deletions(-) diff --git a/libavcodec/d3d12va_encode.c b/libavcodec/d3d12va_encode.c index e24a5b8d24..f9f4ca8903 100644 --- a/libavcodec/d3d12va_encode.c +++ b/libavcodec/d3d12va_encode.c @@ -191,7 +191,8 @@ static int d3d12va_encode_issue(AVCodecContext *avctx, FFHWBaseEncodeContext *base_ctx = avctx->priv_data; D3D12VAEncodeContext *ctx = avctx->priv_data; D3D12VAEncodePicture *pic = base_pic->priv; -AVD3D12VAFramesContext *frames_hwctx = base_ctx->input_frames->hwctx; +AVD3D12VAFramesContext *frames_hwctx_input = base_ctx->input_frames->hwctx; +AVD3D12VAFramesContext *frames_hwctx_recon = ((AVHWFramesContext*)base_pic->recon_image->hw_frames_ctx->data)->hwctx; int err, i, j; HRESULT hr; char data[MAX_PARAM_BUFFER_SIZE]; @@ -221,7 +222,7 @@ static int d3d12va_encode_issue(AVCodecContext *avctx, D3D12_VIDEO_ENCODER_RESOLVE_METADATA_INPUT_ARGUMENTS input_metadata = { .EncoderCodec = ctx->codec->d3d12_codec, .EncoderProfile = ctx->profile->d3d12_profile, -.EncoderInputFormat = frames_hwctx->format, +.EncoderInputFormat = frames_hwctx_input->format, .EncodedPictureEffectiveResolution = ctx->resolution, }; @@ -264,6 +265,9 @@ static int d3d12va_encode_issue(AVCodecContext *avctx, av_log(avctx, AV_LOG_DEBUG, "Input surface is %p.\n", pic->input_surface->texture); +if (ctx->is_texture_array) +pic->subresource_index = ((AVD3D12VAFrame*)base_pic->recon_image->data[0])->subresource_index; + pic->recon_surface = (AVD3D12VAFrame *)base_pic->recon_image->data[0]; av_log(avctx, AV_LOG_DEBUG, "Recon surface is %p.\n", pic->recon_surface->texture); @@ -325,11 +329,28 @@ static int d3d12va_encode_issue(AVCodecContext *avctx, goto fail; } +if (ctx->is_texture_array) { +d3d12_refs.pSubresources = av_calloc(d3d12_refs.NumTexture2Ds, + sizeof(*d3d12_refs.pSubresources)); +if (!d3d12_refs.pSubresources) { +err = AVERROR(ENOMEM); +goto fail; +} +} + i = 0; -for (j = 0; j < base_pic->nb_refs[0]; j++) -d3d12_refs.ppTexture2Ds[i++] = ((D3D12VAEncodePicture *)base_pic->refs[0][j]->priv)->recon_surface->texture; -for (j = 0; j < base_pic->nb_refs[1]; j++) -d3d12_refs.ppTexture2Ds[i++] = ((D3D12VAEncodePicture *)base_pic->refs[1][j]->priv)->recon_surface->texture; +for (j = 0; j < base_pic->nb_refs[0]; j++) { +d3d12_refs.ppTexture2Ds[i] = ((D3D12VAEncodePicture *)base_pic->refs[0][j]->priv)->recon_surface->texture; +if (ctx->is_texture_array) +d3d12_refs.pSubresources[i] = ((D3D12VAEncodePicture *)base_pic->refs[0][j]->priv)->subresource_index; +i++; +} +for (j = 0; j < base_pic->nb_refs[1]; j++) { +d3d12_refs.ppTexture2Ds[i] = ((D3D12VAEncodePicture *)base_pic->refs[1][j]->priv)->recon_surface->texture; +if (ctx->is_texture_array) +d3d12_refs.pSubresources[i] = ((D3D12VAEncodePicture *)base_pic->refs[1][j]->priv)->subresource_index; +i++; +} } input_args.PictureControl
[FFmpeg-devel] [PATCH] amfenc: Fix for full PA queue
Fixes AMF_INPUT_FULL errors with pre-analysis (PA) enabled. Added wait and poll encoder output to free up internal buffers before submitting new frames. Improves stability and performance by accounting for encoder and analysis stage queue limits (incl. lookahead buffering). Reproduce: ffmpeg.exe -y -r 60 -f lavfi -i testsrc=rate=60:size=3840x2160 -t 10 -pix_fmt yuv420p -an -c:v av1_amf -preset:v high_quality -profile:v main -quality:v high_quality -usage:v high_quality --- libavcodec/amfenc.c | 75 - libavcodec/amfenc.h | 1 + 2 files changed, 61 insertions(+), 15 deletions(-) diff --git a/libavcodec/amfenc.c b/libavcodec/amfenc.c index 962bf6b006..9bf8be28dd 100644 --- a/libavcodec/amfenc.c +++ b/libavcodec/amfenc.c @@ -204,6 +204,16 @@ int av_cold ff_amf_encode_close(AVCodecContext *avctx) av_buffer_unref(&ctx->device_ctx_ref); av_fifo_freep2(&ctx->timestamp_list); +if (ctx->output_list) { +// release remaining AMF output buffers +while(av_fifo_can_read(ctx->output_list)) { +AMFBuffer* buffer = NULL; +av_fifo_read(ctx->output_list, &buffer, 1); +if(buffer != NULL) +buffer->pVtbl->Release(buffer); +} +av_fifo_freep2(&ctx->output_list); +} av_freep(&ctx->pts_property_name); av_freep(&ctx->av_frame_property_name); @@ -306,6 +316,10 @@ int ff_amf_encode_init(AVCodecContext *avctx) if (!ctx->timestamp_list) { return AVERROR(ENOMEM); } +ctx->output_list = av_fifo_alloc2(2, sizeof(AMFBuffer*), AV_FIFO_FLAG_AUTO_GROW); +if (!ctx->output_list) +return AVERROR(ENOMEM); + ctx->dts_delay = 0; ctx->hwsurfaces_in_queue = 0; @@ -639,6 +653,22 @@ static int amf_submit_frame_locked(AVCodecContext *avctx, AVFrame *frame, AMFSur amf_unlock_context(avctx); return ret; } +static AMF_RESULT amf_query_output(AVCodecContext *avctx, AMFBuffer **buffer) +{ +AMFEncoderContext *ctx = avctx->priv_data; +AMFData*data = NULL; +AMF_RESULT ret = ctx->encoder->pVtbl->QueryOutput(ctx->encoder, &data); +*buffer = NULL; +if (data) { +AMFGuid guid = IID_AMFBuffer(); +data->pVtbl->QueryInterface(data, &guid, (void**)buffer); // query for buffer interface +data->pVtbl->Release(data); +if (amf_release_attached_frame_ref(ctx, *buffer) == AMF_OK) +ctx->hwsurfaces_in_queue--; +ctx->encoded_frame++; +} +return ret; +} int ff_amf_receive_packet(AVCodecContext *avctx, AVPacket *avpkt) { @@ -649,7 +679,7 @@ int ff_amf_receive_packet(AVCodecContext *avctx, AVPacket *avpkt) AMF_RESULT res; int ret; AMF_RESULT res_query; -AMFData*data = NULL; +AMFBuffer* buffer = NULL; AVFrame*frame = av_frame_alloc(); int block_and_wait; int64_t pts = 0; @@ -659,6 +689,14 @@ int ff_amf_receive_packet(AVCodecContext *avctx, AVPacket *avpkt) av_frame_free(&frame); return AVERROR(EINVAL); } +// check if some outputs are available +av_fifo_read(ctx->output_list, &buffer, 1); +if (buffer != NULL) { // return already retrieved output +ret = amf_copy_buffer(avctx, avpkt, buffer); +buffer->pVtbl->Release(buffer); +return ret; +} + ret = ff_encode_get_frame(avctx, frame); if(ret < 0){ if(ret != AVERROR_EOF){ @@ -698,20 +736,10 @@ int ff_amf_receive_packet(AVCodecContext *avctx, AVPacket *avpkt) do { block_and_wait = 0; // poll data - -res_query = ctx->encoder->pVtbl->QueryOutput(ctx->encoder, &data); -if (data) { -// copy data to packet -AMFBuffer *buffer; -AMFGuid guid = IID_AMFBuffer(); -data->pVtbl->QueryInterface(data, &guid, (void**)&buffer); // query for buffer interface +res_query = amf_query_output(avctx, &buffer); +if (buffer) { ret = amf_copy_buffer(avctx, avpkt, buffer); -if (amf_release_attached_frame_ref(ctx, buffer) == AMF_OK) { -ctx->hwsurfaces_in_queue--; -} -ctx->encoded_frame++; buffer->pVtbl->Release(buffer); -data->pVtbl->Release(data); AMF_RETURN_IF_FALSE(ctx, ret >= 0, ret, "amf_copy_buffer() failed with error %d\n", ret); @@ -737,12 +765,29 @@ int ff_amf_receive_packet(AVCodecContext *avctx, AVPacket *avpkt) if (res_query == AMF_EOF) { ret = AVERROR_EOF; -} else if (data == NULL) { +} else if (buffer == NULL) { ret = AVERROR(EAGAIN); } else { if(surface) { // resubmit surface -res = ctx->encoder->pVtbl->SubmitInput(ctx->encoder, (AMFData*)surface); +do { +res = ctx->encoder->pVtbl->SubmitInput(ctx->encoder, (AMFData*)surface); +if (res != AMF_INPUT_FULL) +