Am 30.06.2016 um 20:30 schrieb Boyuan Zhang:
Signed-off-by: Boyuan Zhang <boyuan.zh...@amd.com>
---
  src/gallium/state_trackers/va/buffer.c     |   6 +
  src/gallium/state_trackers/va/picture.c    | 170 ++++++++++++++++++++++++++++-
  src/gallium/state_trackers/va/va_private.h |   3 +
  3 files changed, 177 insertions(+), 2 deletions(-)

diff --git a/src/gallium/state_trackers/va/buffer.c 
b/src/gallium/state_trackers/va/buffer.c
index 7d3167b..dfcebbe 100644
--- a/src/gallium/state_trackers/va/buffer.c
+++ b/src/gallium/state_trackers/va/buffer.c
@@ -133,6 +133,12 @@ vlVaMapBuffer(VADriverContextP ctx, VABufferID buf_id, 
void **pbuff)
        if (!buf->derived_surface.transfer || !*pbuff)
           return VA_STATUS_ERROR_INVALID_BUFFER;
+ if (buf->type == VAEncCodedBufferType) {
+         ((VACodedBufferSegment*)buf->data)->buf = *pbuff;
+         ((VACodedBufferSegment*)buf->data)->size = buf->coded_size;
+         ((VACodedBufferSegment*)buf->data)->next = NULL;
+         *pbuff = buf->data;
+      }
     } else {
        pipe_mutex_unlock(drv->mutex);
        *pbuff = buf->data;
diff --git a/src/gallium/state_trackers/va/picture.c 
b/src/gallium/state_trackers/va/picture.c
index 89ac024..26205b1 100644
--- a/src/gallium/state_trackers/va/picture.c
+++ b/src/gallium/state_trackers/va/picture.c
@@ -78,7 +78,8 @@ vlVaBeginPicture(VADriverContextP ctx, VAContextID 
context_id, VASurfaceID rende
        return VA_STATUS_SUCCESS;
     }
- context->decoder->begin_frame(context->decoder, context->target, &context->desc.base);
+   if (context->decoder->entrypoint != PIPE_VIDEO_ENTRYPOINT_ENCODE)
+      context->decoder->begin_frame(context->decoder, context->target, 
&context->desc.base);
return VA_STATUS_SUCCESS;
  }
@@ -278,6 +279,140 @@ handleVASliceDataBufferType(vlVaContext *context, 
vlVaBuffer *buf)
        num_buffers, (const void * const*)buffers, sizes);
  }
+static VAStatus
+handleVAEncMiscParameterTypeRateControl(vlVaContext *context, 
VAEncMiscParameterBuffer *misc)
+{
+   VAEncMiscParameterRateControl *rc = (VAEncMiscParameterRateControl 
*)misc->data;
+   if (context->desc.h264enc.rate_ctrl.rate_ctrl_method ==
+       PIPE_H264_ENC_RATE_CONTROL_METHOD_CONSTANT)
+      context->desc.h264enc.rate_ctrl.target_bitrate = rc->bits_per_second;
+   else
+      context->desc.h264enc.rate_ctrl.target_bitrate = rc->bits_per_second * 
rc->target_percentage;
+   context->desc.h264enc.rate_ctrl.peak_bitrate = rc->bits_per_second;
+   if (context->desc.h264enc.rate_ctrl.target_bitrate < 2000000)
+      context->desc.h264enc.rate_ctrl.vbv_buffer_size = 
MIN2((context->desc.h264enc.rate_ctrl.target_bitrate * 2.75), 2000000);
+   else
+      context->desc.h264enc.rate_ctrl.vbv_buffer_size = 
context->desc.h264enc.rate_ctrl.target_bitrate;
+
+   return VA_STATUS_SUCCESS;
+}
+
+static VAStatus
+handleVAEncSequenceParameterBufferType(vlVaDriver *drv, vlVaContext *context, 
vlVaBuffer *buf)
+{
+   VAEncSequenceParameterBufferH264 *h264 = (VAEncSequenceParameterBufferH264 
*)buf->data;
+   if (!context->decoder) {
+      context->templat.max_references = h264->max_num_ref_frames;
+      context->templat.level = h264->level_idc;
+      context->decoder = drv->pipe->create_video_codec(drv->pipe, 
&context->templat);
+      if (!context->decoder)
+         return VA_STATUS_ERROR_ALLOCATION_FAILED;
+   }
+   context->desc.h264enc.gop_size = h264->intra_idr_period;
+   return VA_STATUS_SUCCESS;
+}
+
+static VAStatus
+handleVAEncMiscParameterBufferType(vlVaContext *context, vlVaBuffer *buf)
+{
+   VAStatus vaStatus = VA_STATUS_SUCCESS;
+   VAEncMiscParameterBuffer *misc;
+   misc = buf->data;
+
+   switch (misc->type) {
+   case VAEncMiscParameterTypeRateControl:
+      vaStatus = handleVAEncMiscParameterTypeRateControl(context, misc);
+      break;
+
+   default:
+      break;
+   }
+
+   return vaStatus;
+}
+
+static VAStatus
+handleVAEncPictureParameterBufferType(vlVaDriver *drv, vlVaContext *context, 
vlVaBuffer *buf)
+{
+   VAEncPictureParameterBufferH264 *h264;
+   vlVaBuffer *coded_buf;
+
+   h264 = buf->data;
+   context->desc.h264enc.frame_num = h264->frame_num;
+   context->desc.h264enc.not_referenced = false;
+   context->desc.h264enc.is_idr = (h264->pic_fields.bits.idr_pic_flag == 1);
+   context->desc.h264enc.pic_order_cnt = h264->CurrPic.TopFieldOrderCnt / 2;
+   if (context->desc.h264enc.is_idr)
+      context->desc.h264enc.i_remain = 1;
+   else
+      context->desc.h264enc.i_remain = 0;
+
+   context->desc.h264enc.p_remain = context->desc.h264enc.gop_size - 
context->desc.h264enc.gop_cnt - context->desc.h264enc.i_remain;
+
+   coded_buf = handle_table_get(drv->htab, h264->coded_buf);
+   coded_buf->derived_surface.resource = pipe_buffer_create(drv->pipe->screen, 
PIPE_BIND_VERTEX_BUFFER,
+                                         PIPE_USAGE_STREAM, coded_buf->size);
+   context->coded_buf = coded_buf;
+
+   context->desc.h264enc.frame_idx[h264->CurrPic.picture_id] = h264->frame_num;
+   if (context->desc.h264enc.is_idr)
+      context->desc.h264enc.picture_type = PIPE_H264_ENC_PICTURE_TYPE_IDR;
+   else
+      context->desc.h264enc.picture_type = PIPE_H264_ENC_PICTURE_TYPE_P;
+
+   context->desc.h264enc.frame_num_cnt++;
+   context->desc.h264enc.gop_cnt++;
+   if (context->desc.h264enc.gop_cnt == context->desc.h264enc.gop_size)
+      context->desc.h264enc.gop_cnt = 0;
+
+   return VA_STATUS_SUCCESS;
+}
+
+static VAStatus
+handleVAEncSliceParameterBufferType(vlVaDriver *drv, vlVaContext *context, 
vlVaBuffer *buf)
+{
+   VAEncSliceParameterBufferH264 *h264;
+
+   h264 = buf->data;
+   context->desc.h264enc.ref_idx_l0 = VA_INVALID_ID;
+   context->desc.h264enc.ref_idx_l1 = VA_INVALID_ID;
+   context->desc.h264enc.has_ref_pic_list = false;
+
+   for (int i = 0; i < 32; i++) {
+      if (h264->RefPicList0[i].picture_id == VA_INVALID_ID)
+         context->desc.h264enc.ref_pic_list_0[i] = VA_INVALID_ID;
+      else {
+         context->desc.h264enc.ref_pic_list_0[i] = 
context->desc.h264enc.frame_idx[h264->RefPicList0[i].picture_id];
+         if (context->desc.h264enc.ref_idx_l0 == VA_INVALID_ID)
+            context->desc.h264enc.ref_idx_l0 = 
context->desc.h264enc.frame_idx[h264->RefPicList0[i].picture_id];
+         context->desc.h264enc.has_ref_pic_list = true;
+      }
+      if (h264->RefPicList1[i].picture_id == VA_INVALID_ID || h264->slice_type 
!= 1)
+         context->desc.h264enc.ref_pic_list_1[i] = VA_INVALID_ID;
+      else {
+         context->desc.h264enc.ref_pic_list_1[i] = 
context->desc.h264enc.frame_idx[h264->RefPicList1[i].picture_id];
+         if (context->desc.h264enc.ref_idx_l1 == VA_INVALID_ID)
+            context->desc.h264enc.ref_idx_l1 = 
context->desc.h264enc.frame_idx[h264->RefPicList1[i].picture_id];
+         context->desc.h264enc.has_ref_pic_list = true;
+      }
+   }
+
+   if (h264->slice_type == 1)
+      context->desc.h264enc.picture_type = PIPE_H264_ENC_PICTURE_TYPE_B;
+   else if (h264->slice_type == 0)
+      context->desc.h264enc.picture_type = PIPE_H264_ENC_PICTURE_TYPE_P;
+   else if (h264->slice_type == 2) {
+      if (context->desc.h264enc.is_idr){
+         context->desc.h264enc.picture_type = PIPE_H264_ENC_PICTURE_TYPE_IDR;
+         context->desc.h264enc.idr_pic_id++;
+          } else
+         context->desc.h264enc.picture_type = PIPE_H264_ENC_PICTURE_TYPE_I;
+   } else
+      context->desc.h264enc.picture_type = PIPE_H264_ENC_PICTURE_TYPE_SKIP;
+
+   return VA_STATUS_SUCCESS;
+}
+
  VAStatus
  vlVaRenderPicture(VADriverContextP ctx, VAContextID context_id, VABufferID 
*buffers, int num_buffers)
  {
@@ -328,6 +463,22 @@ vlVaRenderPicture(VADriverContextP ctx, VAContextID 
context_id, VABufferID *buff
           vaStatus = vlVaHandleVAProcPipelineParameterBufferType(drv, context, 
buf);
           break;
+ case VAEncSequenceParameterBufferType:
+         vaStatus = handleVAEncSequenceParameterBufferType(drv, context, buf);
+         break;
+
+      case VAEncMiscParameterBufferType:
+         vaStatus = handleVAEncMiscParameterBufferType(context, buf);
+         break;
+
+      case VAEncPictureParameterBufferType:
+         vaStatus = handleVAEncPictureParameterBufferType(drv, context, buf);
+         break;
+
+      case VAEncSliceParameterBufferType:
+         vaStatus = handleVAEncSliceParameterBufferType(drv, context, buf);
+         break;
+
        default:
           break;
        }
@@ -342,6 +493,9 @@ vlVaEndPicture(VADriverContextP ctx, VAContextID context_id)
  {
     vlVaDriver *drv;
     vlVaContext *context;
+   vlVaBuffer *coded_buf;
+   unsigned int coded_size;
+   void *feedback;
if (!ctx)
        return VA_STATUS_ERROR_INVALID_CONTEXT;
@@ -365,7 +519,19 @@ vlVaEndPicture(VADriverContextP ctx, VAContextID 
context_id)
     }
context->mpeg4.frame_num++;
-   context->decoder->end_frame(context->decoder, context->target, 
&context->desc.base);
+
+   if (context->decoder->entrypoint == PIPE_VIDEO_ENTRYPOINT_ENCODE) {
+      coded_buf = context->coded_buf;
+      context->decoder->begin_frame(context->decoder, context->target, 
&context->desc.base);
+      context->decoder->encode_bitstream(context->decoder, context->target,
+                                         coded_buf->derived_surface.resource, 
&feedback);
+      context->decoder->end_frame(context->decoder, context->target, 
&context->desc.base);
+      context->decoder->flush(context->decoder);
+      context->decoder->get_feedback(context->decoder, feedback, &coded_size);
+      coded_buf->coded_size = coded_size;

This is really inefficient to do here.

Basically you drain the pipeline and wait for the just send picture to be completed encoded before you return.

Additional to that we should keep the calls to begin_frame() and end_frame() where they are. If some parameters are not yet filled in we should modify the backend to handle that instead.

Christian.

+   }
+   else
+      context->decoder->end_frame(context->decoder, context->target, 
&context->desc.base);
return VA_STATUS_SUCCESS;
  }
diff --git a/src/gallium/state_trackers/va/va_private.h 
b/src/gallium/state_trackers/va/va_private.h
index ad9010a..6d3ac38 100644
--- a/src/gallium/state_trackers/va/va_private.h
+++ b/src/gallium/state_trackers/va/va_private.h
@@ -229,6 +229,7 @@ typedef struct {
        struct pipe_vc1_picture_desc vc1;
        struct pipe_h264_picture_desc h264;
        struct pipe_h265_picture_desc h265;
+      struct pipe_h264_enc_picture_desc h264enc;
     } desc;
struct {
@@ -241,6 +242,7 @@ typedef struct {
     } mpeg4;
struct vl_deint_filter *deint;
+   struct vlVaBuffer *coded_buf;
  } vlVaContext;
typedef struct {
@@ -260,6 +262,7 @@ typedef struct {
     } derived_surface;
     unsigned int export_refcount;
     VABufferInfo export_state;
+   unsigned int coded_size;
  } vlVaBuffer;
typedef struct {

_______________________________________________
mesa-dev mailing list
mesa-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/mesa-dev

Reply via email to