On 26/12/2022 06:59, Jean Jogh wrote:
see https://github.com/intel/libva/pull/664

Signed-off-by: jianfeng.zheng <jianfeng.zh...@mthreads.com>
---
  configure                      | 13 +++++++++++++
  libavcodec/h264_slice.c        |  6 ++++++
  libavcodec/vaapi_decode.c      | 10 ++++++++++
  libavcodec/vaapi_encode_h264.c | 24 ++++++++++++++++++++++--
  libavcodec/vaapi_h264.c        |  5 +++--
  5 files changed, 54 insertions(+), 4 deletions(-)

diff --git a/configure b/configure
index f08cdab3d1..ac199d97cb 100755
--- a/configure
+++ b/configure
@@ -2410,6 +2410,7 @@ HAVE_LIST="
      texi2html
      xmllint
      zlib_gzip
+    va_profile_h264_high10
  "

  # options emitted with CONFIG_ prefix but not available on the command line
@@ -6958,6 +6959,18 @@ if enabled vaapi; then
      check_type "va/va.h va/va_enc_jpeg.h" "VAEncPictureParameterBufferJPEG"
      check_type "va/va.h va/va_enc_vp8.h"  "VAEncPictureParameterBufferVP8"
      check_type "va/va.h va/va_enc_vp9.h"  "VAEncPictureParameterBufferVP9"
+
+    #
+    # Using 'VA_CHECK_VERSION' in source codes make things easy. But
we have to wait
+    # until newly added VAProfile being distributed by VAAPI released version.
+    #
+    # Before or after that, we can use auto-detection to keep version
compatibility.
+    # It always works.
+    #
+    disable va_profile_h264_high10 && enabled h264_vaapi_hwaccel
+        test_code cc va/va.h "VAProfile p = VAProfileH264High10" &&
+        enable va_profile_h264_high10

To echo comments from others, I think it would be better follow the same form 
as other codecs and use the VA_CHECK_VERSION for the next release.

+
  fi

  if enabled_all opencl libdrm ; then
diff --git a/libavcodec/h264_slice.c b/libavcodec/h264_slice.c
index 420758ba0a..b9236281b1 100644
--- a/libavcodec/h264_slice.c
+++ b/libavcodec/h264_slice.c
@@ -806,6 +806,12 @@ static enum AVPixelFormat
get_pixel_format(H264Context *h, int force_callback)
          } else if (CHROMA422(h))
              *fmt++ = AV_PIX_FMT_YUV422P10;
          else
+#if CONFIG_H264_VAAPI_HWACCEL
+            // Just add as candidate. Whether VAProfileH264High10 usable or
+            // not is decided by vaapi_decode_make_config() defined in FFmpeg
+            // and vaQueryCodingProfile() defined in libva.
+            *fmt++ = AV_PIX_FMT_VAAPI;
+#endif
              *fmt++ = AV_PIX_FMT_YUV420P10;

Braces definitely needed here!

          break;
      case 12:
diff --git a/libavcodec/vaapi_decode.c b/libavcodec/vaapi_decode.c
index 134f10eca5..03bbbf2c41 100644
--- a/libavcodec/vaapi_decode.c
+++ b/libavcodec/vaapi_decode.c
@@ -402,6 +402,9 @@ static const struct {
                             H264ConstrainedBaseline),
      MAP(H264,        H264_MAIN,       H264Main    ),
      MAP(H264,        H264_HIGH,       H264High    ),
+#ifdef HAVE_VA_PROFILE_H264_HIGH10
+    MAP(H264,        H264_HIGH_10,    H264High10  ),
+#endif
  #if VA_CHECK_VERSION(0, 37, 0)
      MAP(HEVC,        HEVC_MAIN,       HEVCMain    ),
      MAP(HEVC,        HEVC_MAIN_10,    HEVCMain10  ),
@@ -510,6 +513,13 @@ static int vaapi_decode_make_config(AVCodecContext *avctx,
              if (exact_match)
                  break;
          }
+#ifdef HAVE_VA_PROFILE_H264_HIGH10
+        //incase 8bit stream being decoded under VAProfileH264High10
+        if (avctx->codec_id == AV_CODEC_ID_H264 &&
+            (avctx->profile == FF_PROFILE_H264_EXTENDED ||
avctx->profile == FF_PROFILE_H264_BASELINE) &&
+            matched_va_profile == VAProfileH264High)
+            break;
+#endif

Can you explain what this is trying to do?  Why does it need a special case 
where H.265 doesn't?

      }
      av_freep(&profile_list);

diff --git a/libavcodec/vaapi_encode_h264.c b/libavcodec/vaapi_encode_h264.c
index dd17be2190..d26ac5a98f 100644
--- a/libavcodec/vaapi_encode_h264.c
+++ b/libavcodec/vaapi_encode_h264.c
@@ -23,6 +23,7 @@

  #include "libavutil/avassert.h"
  #include "libavutil/common.h"
+#include "libavutil/pixdesc.h"
  #include "libavutil/internal.h"
  #include "libavutil/opt.h"

@@ -290,10 +291,21 @@ static int
vaapi_encode_h264_init_sequence_params(AVCodecContext *avctx)
      H264RawPPS                        *pps = &priv->raw_pps;
      VAEncSequenceParameterBufferH264 *vseq = ctx->codec_sequence_params;
      VAEncPictureParameterBufferH264  *vpic = ctx->codec_picture_params;
+    const AVPixFmtDescriptor *desc;
+    int bit_depth;

      memset(sps, 0, sizeof(*sps));
      memset(pps, 0, sizeof(*pps));

+    desc = av_pix_fmt_desc_get(priv->common.input_frames->sw_format);
+    av_assert0(desc);
+    if (desc->nb_components == 1 || desc->log2_chroma_w != 1 ||
desc->log2_chroma_h != 1) {
+        av_log(avctx, AV_LOG_ERROR, "Chroma format of input pixel format "
+                "%s is not supported.\n", desc->name);
+        return AVERROR(EINVAL);
+    }
+    bit_depth = desc->comp[0].depth;
+
      sps->nal_unit_header.nal_ref_idc   = 3;
      sps->nal_unit_header.nal_unit_type = H264_NAL_SPS;

@@ -303,11 +315,11 @@ static int
vaapi_encode_h264_init_sequence_params(AVCodecContext *avctx)
          avctx->profile == FF_PROFILE_H264_MAIN)
          sps->constraint_set1_flag = 1;

-    if (avctx->profile == FF_PROFILE_H264_HIGH)
+    if (avctx->profile == FF_PROFILE_H264_HIGH || avctx->profile ==
FF_PROFILE_H264_HIGH_10)
          sps->constraint_set3_flag = ctx->gop_size == 1;

      if (avctx->profile == FF_PROFILE_H264_MAIN ||
-        avctx->profile == FF_PROFILE_H264_HIGH) {
+        avctx->profile == FF_PROFILE_H264_HIGH || avctx->profile ==
FF_PROFILE_H264_HIGH_10) {
          sps->constraint_set4_flag = 1;
          sps->constraint_set5_flag = ctx->b_per_p == 0;
      }
@@ -348,6 +360,8 @@ static int
vaapi_encode_h264_init_sequence_params(AVCodecContext *avctx)

      sps->seq_parameter_set_id = 0;
      sps->chroma_format_idc    = 1;
+    sps->bit_depth_luma_minus8 = bit_depth - 8;
+    sps->bit_depth_chroma_minus8 = bit_depth - 8;

      sps->log2_max_frame_num_minus4 = 4;
      sps->pic_order_cnt_type        = 0;
@@ -1111,6 +1125,9 @@ static av_cold int
vaapi_encode_h264_configure(AVCodecContext *avctx)
  }

  static const VAAPIEncodeProfile vaapi_encode_h264_profiles[] = {
+#ifdef HAVE_VA_PROFILE_H264_HIGH10
+    { FF_PROFILE_H264_HIGH_10, 10, 3, 1, 1, VAProfileH264High10 },
+#endif
      { FF_PROFILE_H264_HIGH, 8, 3, 1, 1, VAProfileH264High },
      { FF_PROFILE_H264_MAIN, 8, 3, 1, 1, VAProfileH264Main },
      { FF_PROFILE_H264_CONSTRAINED_BASELINE,
@@ -1175,11 +1192,13 @@ static av_cold int
vaapi_encode_h264_init(AVCodecContext *avctx)
          av_log(avctx, AV_LOG_ERROR, "H.264 extended profile "
                 "is not supported.\n");
          return AVERROR_PATCHWELCOME;
+#ifndef HAVE_VA_PROFILE_H264_HIGH10
      case FF_PROFILE_H264_HIGH_10:
      case FF_PROFILE_H264_HIGH_10_INTRA:
          av_log(avctx, AV_LOG_ERROR, "H.264 10-bit profiles "
                 "are not supported.\n");
          return AVERROR_PATCHWELCOME;
+#endif

This is doing early rejection of explicitly-specified profiles which libavcodec 
definitely can't support - I think just remove this block entirely, since it 
will be rejected later if not supported.

(Presumably explicitly-specified High 10 Intra does work as expected?  It looks 
like it should, but it would be good to check.)

      case FF_PROFILE_H264_HIGH_422:
      case FF_PROFILE_H264_HIGH_422_INTRA:
      case FF_PROFILE_H264_HIGH_444:
@@ -1267,6 +1286,7 @@ static const AVOption vaapi_encode_h264_options[] = {
      { PROFILE("constrained_baseline", FF_PROFILE_H264_CONSTRAINED_BASELINE) },
      { PROFILE("main",                 FF_PROFILE_H264_MAIN) },
      { PROFILE("high",                 FF_PROFILE_H264_HIGH) },
+    { PROFILE("high10",               FF_PROFILE_H264_HIGH_10) },
  #undef PROFILE

      { "level", "Set level (level_idc)",

Hmm.  It looks like libva has the same deficiency here as in H.265 where it 
doesn't support the lowest QPs.  Is there any way of fixing that on the libva 
side?7

diff --git a/libavcodec/vaapi_h264.c b/libavcodec/vaapi_h264.c
index 9332aa6f31..3e44c8caf6 100644
--- a/libavcodec/vaapi_h264.c
+++ b/libavcodec/vaapi_h264.c
@@ -234,6 +234,7 @@ static int vaapi_h264_start_frame(AVCodecContext
        *avctx,
      VAPictureParameterBufferH264 pic_param;
      VAIQMatrixBufferH264 iq_matrix;
      int err;
+    int qp_bd_offset = 6 * (sps->bit_depth_luma - 8);

      pic->output_surface = ff_vaapi_get_surface_id(h->cur_pic_ptr->f);

@@ -256,8 +257,8 @@ static int vaapi_h264_start_frame(AVCodecContext
        *avctx,
              .log2_max_pic_order_cnt_lsb_minus4      =
sps->log2_max_poc_lsb - 4,
              .delta_pic_order_always_zero_flag       =
sps->delta_pic_order_always_zero_flag,
          },
-        .pic_init_qp_minus26                        = pps->init_qp - 26,
-        .pic_init_qs_minus26                        = pps->init_qs - 26,
+        .pic_init_qp_minus26                        = pps->init_qp -
26 - qp_bd_offset,
+        .pic_init_qs_minus26                        = pps->init_qs -
26 - qp_bd_offset,

The field is (signed) pic_init_qp_minus26 as found in the PPS in order to allow 
header parsing in hardware if it wants.  You should not be applying QpBdOffset 
as if making QP'_y here, because that's only done at the MB level.

Given that you've sent this and therefore it presumably works, I guess you must 
have a driver applying the reverse offset to recover the actual QP?  Can you 
fix the driver not to do that?

          .chroma_qp_index_offset                     =
pps->chroma_qp_index_offset[0],
          .second_chroma_qp_index_offset              =
pps->chroma_qp_index_offset[1],
          .pic_fields.bits = {

Thanks,

- Mark
_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".

Reply via email to