Please split that up into multiple parts. E.g. adding the config structure, new image format support etc..

Julien, can you take a look as well if you have time and work a bit with Boyuan to get this cleaned up?

Thanks in advance,
Christian.

Am 14.06.2016 um 22:14 schrieb Boyuan Zhang:
Signed-off-by: Boyuan Zhang <boyuan.zh...@amd.com>
---
  src/gallium/state_trackers/va/buffer.c     |   6 ++
  src/gallium/state_trackers/va/config.c     | 104 +++++++++++++++---
  src/gallium/state_trackers/va/context.c    |  72 ++++++++-----
  src/gallium/state_trackers/va/image.c      | 126 +++++++++++++++++++---
  src/gallium/state_trackers/va/picture.c    | 165 ++++++++++++++++++++++++++++-
  src/gallium/state_trackers/va/surface.c    |  16 ++-
  src/gallium/state_trackers/va/va_private.h |   9 ++
  7 files changed, 441 insertions(+), 57 deletions(-)

diff --git a/src/gallium/state_trackers/va/buffer.c 
b/src/gallium/state_trackers/va/buffer.c
index 7d3167b..dfcebbe 100644
--- a/src/gallium/state_trackers/va/buffer.c
+++ b/src/gallium/state_trackers/va/buffer.c
@@ -133,6 +133,12 @@ vlVaMapBuffer(VADriverContextP ctx, VABufferID buf_id, 
void **pbuff)
        if (!buf->derived_surface.transfer || !*pbuff)
           return VA_STATUS_ERROR_INVALID_BUFFER;
+ if (buf->type == VAEncCodedBufferType) {
+         ((VACodedBufferSegment*)buf->data)->buf = *pbuff;
+         ((VACodedBufferSegment*)buf->data)->size = buf->coded_size;
+         ((VACodedBufferSegment*)buf->data)->next = NULL;
+         *pbuff = buf->data;
+      }
     } else {
        pipe_mutex_unlock(drv->mutex);
        *pbuff = buf->data;
diff --git a/src/gallium/state_trackers/va/config.c 
b/src/gallium/state_trackers/va/config.c
index 9ca0aa8..04d214d 100644
--- a/src/gallium/state_trackers/va/config.c
+++ b/src/gallium/state_trackers/va/config.c
@@ -34,6 +34,8 @@
#include "va_private.h" +#include "util/u_handle_table.h"
+
  DEBUG_GET_ONCE_BOOL_OPTION(mpeg4, "VAAPI_MPEG4_ENABLED", false)
VAStatus
@@ -72,6 +74,7 @@ vlVaQueryConfigEntrypoints(VADriverContextP ctx, VAProfile 
profile,
  {
     struct pipe_screen *pscreen;
     enum pipe_video_profile p;
+   int va_status = VA_STATUS_ERROR_UNSUPPORTED_PROFILE;
if (!ctx)
        return VA_STATUS_ERROR_INVALID_CONTEXT;
@@ -88,12 +91,18 @@ vlVaQueryConfigEntrypoints(VADriverContextP ctx, VAProfile 
profile,
        return VA_STATUS_ERROR_UNSUPPORTED_PROFILE;
pscreen = VL_VA_PSCREEN(ctx);
-   if (!pscreen->get_video_param(pscreen, p, PIPE_VIDEO_ENTRYPOINT_BITSTREAM, 
PIPE_VIDEO_CAP_SUPPORTED))
-      return VA_STATUS_ERROR_UNSUPPORTED_PROFILE;
-
-   entrypoint_list[(*num_entrypoints)++] = VAEntrypointVLD;
+   if (pscreen->get_video_param(pscreen, p, PIPE_VIDEO_ENTRYPOINT_BITSTREAM, 
PIPE_VIDEO_CAP_SUPPORTED)) {
+      entrypoint_list[(*num_entrypoints)++] = VAEntrypointVLD;
+      va_status = VA_STATUS_SUCCESS;
+   }
+   if (pscreen->get_video_param(pscreen, p, PIPE_VIDEO_ENTRYPOINT_ENCODE, 
PIPE_VIDEO_CAP_SUPPORTED) &&
+       p == PIPE_VIDEO_PROFILE_MPEG4_AVC_BASELINE) {
+      entrypoint_list[(*num_entrypoints)++] = VAEntrypointEncSlice;
+      entrypoint_list[(*num_entrypoints)++] = VAEntrypointEncPicture;
+      va_status = VA_STATUS_SUCCESS;
+   }
- return VA_STATUS_SUCCESS;
+   return va_status;
  }
VAStatus
@@ -112,7 +121,7 @@ vlVaGetConfigAttributes(VADriverContextP ctx, VAProfile 
profile, VAEntrypoint en
           value = VA_RT_FORMAT_YUV420;
           break;
        case VAConfigAttribRateControl:
-         value = VA_RC_NONE;
+         value = VA_RC_CQP | VA_RC_CBR;
           break;
        default:
           value = VA_ATTRIB_NOT_SUPPORTED;
@@ -128,14 +137,27 @@ VAStatus
  vlVaCreateConfig(VADriverContextP ctx, VAProfile profile, VAEntrypoint 
entrypoint,
                   VAConfigAttrib *attrib_list, int num_attribs, VAConfigID 
*config_id)
  {
+   vlVaDriver *drv;
+   vlVaConfig *config;
     struct pipe_screen *pscreen;
     enum pipe_video_profile p;
if (!ctx)
        return VA_STATUS_ERROR_INVALID_CONTEXT;
+ drv = VL_VA_DRIVER(ctx);
+
+   if (!drv)
+      return VA_STATUS_ERROR_INVALID_CONTEXT;
+
+   config = CALLOC(1, sizeof(vlVaConfig));
+   if (!config)
+      return VA_STATUS_ERROR_ALLOCATION_FAILED;
+
     if (profile == VAProfileNone && entrypoint == VAEntrypointVideoProc) {
-      *config_id = PIPE_VIDEO_PROFILE_UNKNOWN;
+      config->entrypoint = VAEntrypointVideoProc;
+      config->profile = PIPE_VIDEO_PROFILE_UNKNOWN;
+      *config_id = handle_table_add(drv->htab, config);
        return VA_STATUS_SUCCESS;
     }
@@ -144,13 +166,36 @@ vlVaCreateConfig(VADriverContextP ctx, VAProfile profile, VAEntrypoint entrypoin
        return VA_STATUS_ERROR_UNSUPPORTED_PROFILE;
pscreen = VL_VA_PSCREEN(ctx);
-   if (!pscreen->get_video_param(pscreen, p, PIPE_VIDEO_ENTRYPOINT_BITSTREAM, 
PIPE_VIDEO_CAP_SUPPORTED))
-      return VA_STATUS_ERROR_UNSUPPORTED_PROFILE;
-
-   if (entrypoint != VAEntrypointVLD)
+   if (entrypoint == VAEntrypointVLD) {
+      if (!pscreen->get_video_param(pscreen, p, 
PIPE_VIDEO_ENTRYPOINT_BITSTREAM, PIPE_VIDEO_CAP_SUPPORTED))
+         return VA_STATUS_ERROR_UNSUPPORTED_PROFILE;
+   }
+   else if (entrypoint == VAEntrypointEncSlice) {
+      if (!pscreen->get_video_param(pscreen, p, PIPE_VIDEO_ENTRYPOINT_ENCODE, 
PIPE_VIDEO_CAP_SUPPORTED))
+         return VA_STATUS_ERROR_UNSUPPORTED_PROFILE;
+   }
+   else
        return VA_STATUS_ERROR_UNSUPPORTED_ENTRYPOINT;
- *config_id = p;
+   if (entrypoint == VAEntrypointEncSlice || entrypoint == 
VAEntrypointEncPicture)
+      config->entrypoint = PIPE_VIDEO_ENTRYPOINT_ENCODE;
+   else
+      config->entrypoint = PIPE_VIDEO_ENTRYPOINT_BITSTREAM;
+
+   config->profile = p;
+
+   for (int i = 0; i <num_attribs ; i++) {
+      if (attrib_list[i].type == VAConfigAttribRateControl) {
+         if (attrib_list[i].value == VA_RC_CBR)
+            config->rc = PIPE_H264_ENC_RATE_CONTROL_METHOD_CONSTANT;
+         else if (attrib_list[i].value == VA_RC_VBR)
+            config->rc = PIPE_H264_ENC_RATE_CONTROL_METHOD_VARIABLE;
+         else
+            config->rc = PIPE_H264_ENC_RATE_CONTROL_METHOD_DISABLE;
+      }
+   }
+
+   *config_id = handle_table_add(drv->htab, config);
return VA_STATUS_SUCCESS;
  }
@@ -158,9 +203,25 @@ vlVaCreateConfig(VADriverContextP ctx, VAProfile profile, 
VAEntrypoint entrypoin
  VAStatus
  vlVaDestroyConfig(VADriverContextP ctx, VAConfigID config_id)
  {
+   vlVaDriver *drv;
+   vlVaConfig *config;
+
     if (!ctx)
        return VA_STATUS_ERROR_INVALID_CONTEXT;
+ drv = VL_VA_DRIVER(ctx);
+
+   if (!drv)
+      return VA_STATUS_ERROR_INVALID_CONTEXT;
+
+   config = handle_table_get(drv->htab, config_id);
+
+   if (!config)
+      return VA_STATUS_ERROR_INVALID_CONFIG;
+
+   FREE(config);
+   handle_table_remove(drv->htab, config_id);
+
     return VA_STATUS_SUCCESS;
  }
@@ -168,18 +229,31 @@ VAStatus
  vlVaQueryConfigAttributes(VADriverContextP ctx, VAConfigID config_id, 
VAProfile *profile,
                            VAEntrypoint *entrypoint, VAConfigAttrib 
*attrib_list, int *num_attribs)
  {
+   vlVaDriver *drv;
+   vlVaConfig *config;
+
     if (!ctx)
        return VA_STATUS_ERROR_INVALID_CONTEXT;
- *profile = PipeToProfile(config_id);
+   drv = VL_VA_DRIVER(ctx);
+
+   if (!drv)
+      return VA_STATUS_ERROR_INVALID_CONTEXT;
+
+   config = handle_table_get(drv->htab, config_id);
+
+   if (!config)
+      return VA_STATUS_ERROR_INVALID_CONFIG;
+
+   *profile = PipeToProfile(config->profile);
- if (config_id == PIPE_VIDEO_PROFILE_UNKNOWN) {
+   if (config->profile == PIPE_VIDEO_PROFILE_UNKNOWN) {
        *entrypoint = VAEntrypointVideoProc;
        *num_attribs = 0;
        return VA_STATUS_SUCCESS;
     }
- *entrypoint = VAEntrypointVLD;
+   *entrypoint = config->entrypoint;
*num_attribs = 1;
     attrib_list[0].type = VAConfigAttribRTFormat;
diff --git a/src/gallium/state_trackers/va/context.c 
b/src/gallium/state_trackers/va/context.c
index 402fbb2..154f584 100644
--- a/src/gallium/state_trackers/va/context.c
+++ b/src/gallium/state_trackers/va/context.c
@@ -195,18 +195,21 @@ vlVaCreateContext(VADriverContextP ctx, VAConfigID 
config_id, int picture_width,
  {
     vlVaDriver *drv;
     vlVaContext *context;
+   vlVaConfig *config;
     int is_vpp;
if (!ctx)
        return VA_STATUS_ERROR_INVALID_CONTEXT;
- is_vpp = config_id == PIPE_VIDEO_PROFILE_UNKNOWN && !picture_width &&
+   drv = VL_VA_DRIVER(ctx);
+   config = handle_table_get(drv->htab, config_id);
+
+   is_vpp = config->profile == PIPE_VIDEO_PROFILE_UNKNOWN && !picture_width &&
              !picture_height && !flag && !render_targets && 
!num_render_targets;
if (!(picture_width && picture_height) && !is_vpp)
        return VA_STATUS_ERROR_INVALID_IMAGE_FORMAT;
- drv = VL_VA_DRIVER(ctx);
     context = CALLOC(1, sizeof(vlVaContext));
     if (!context)
        return VA_STATUS_ERROR_ALLOCATION_FAILED;
@@ -218,13 +221,27 @@ vlVaCreateContext(VADriverContextP ctx, VAConfigID 
config_id, int picture_width,
           return VA_STATUS_ERROR_INVALID_CONTEXT;
        }
     } else {
-      context->templat.profile = config_id;
-      context->templat.entrypoint = PIPE_VIDEO_ENTRYPOINT_BITSTREAM;
+      context->templat.profile = config->profile;
+      context->templat.entrypoint = config->entrypoint;
        context->templat.chroma_format = PIPE_VIDEO_CHROMA_FORMAT_420;
        context->templat.width = picture_width;
        context->templat.height = picture_height;
        context->templat.expect_chunked_decode = true;
+ if ((config->entrypoint == PIPE_VIDEO_ENTRYPOINT_ENCODE)
+          && (u_reduce_video_profile(context->templat.profile) ==
+          PIPE_VIDEO_FORMAT_MPEG4_AVC)) {
+         context->templat.max_references = num_render_targets;
+         context->templat.level = u_get_h264_level(context->templat.width,
+                                     context->templat.height,
+                                     &context->templat.max_references);
+
+         context->decoder = drv->pipe->create_video_codec(drv->pipe,
+                                                          &context->templat);
+         if (!context->decoder)
+            return VA_STATUS_ERROR_ALLOCATION_FAILED;
+      }
+
        switch (u_reduce_video_profile(context->templat.profile)) {
        case PIPE_VIDEO_FORMAT_MPEG12:
        case PIPE_VIDEO_FORMAT_VC1:
@@ -234,16 +251,18 @@ vlVaCreateContext(VADriverContextP ctx, VAConfigID 
config_id, int picture_width,
case PIPE_VIDEO_FORMAT_MPEG4_AVC:
           context->templat.max_references = 0;
-         context->desc.h264.pps = CALLOC_STRUCT(pipe_h264_pps);
-         if (!context->desc.h264.pps) {
-            FREE(context);
-            return VA_STATUS_ERROR_ALLOCATION_FAILED;
-         }
-         context->desc.h264.pps->sps = CALLOC_STRUCT(pipe_h264_sps);
-         if (!context->desc.h264.pps->sps) {
-            FREE(context->desc.h264.pps);
-            FREE(context);
-            return VA_STATUS_ERROR_ALLOCATION_FAILED;
+         if (config->entrypoint != PIPE_VIDEO_ENTRYPOINT_ENCODE) {
+            context->desc.h264.pps = CALLOC_STRUCT(pipe_h264_pps);
+            if (!context->desc.h264.pps) {
+               FREE(context);
+               return VA_STATUS_ERROR_ALLOCATION_FAILED;
+            }
+            context->desc.h264.pps->sps = CALLOC_STRUCT(pipe_h264_sps);
+            if (!context->desc.h264.pps->sps) {
+               FREE(context->desc.h264.pps);
+               FREE(context);
+               return VA_STATUS_ERROR_ALLOCATION_FAILED;
+            }
           }
           break;
@@ -267,7 +286,10 @@ vlVaCreateContext(VADriverContextP ctx, VAConfigID config_id, int picture_width,
        }
     }
- context->desc.base.profile = config_id;
+   context->desc.base.profile = config->profile;
+   context->desc.base.entry_point = config->entrypoint;
+   if (config->entrypoint == PIPE_VIDEO_ENTRYPOINT_ENCODE)
+      context->desc.h264enc.rate_ctrl.rate_ctrl_method = config->rc;
     pipe_mutex_lock(drv->mutex);
     *context_id = handle_table_add(drv->htab, context);
     pipe_mutex_unlock(drv->mutex);
@@ -293,15 +315,17 @@ vlVaDestroyContext(VADriverContextP ctx, VAContextID 
context_id)
     }
if (context->decoder) {
-      if (u_reduce_video_profile(context->decoder->profile) ==
-            PIPE_VIDEO_FORMAT_MPEG4_AVC) {
-         FREE(context->desc.h264.pps->sps);
-         FREE(context->desc.h264.pps);
-      }
-      if (u_reduce_video_profile(context->decoder->profile) ==
-            PIPE_VIDEO_FORMAT_HEVC) {
-         FREE(context->desc.h265.pps->sps);
-         FREE(context->desc.h265.pps);
+      if (context->desc.base.entry_point != PIPE_VIDEO_ENTRYPOINT_ENCODE) {
+         if (u_reduce_video_profile(context->decoder->profile) ==
+               PIPE_VIDEO_FORMAT_MPEG4_AVC) {
+            FREE(context->desc.h264.pps->sps);
+            FREE(context->desc.h264.pps);
+         }
+         if (u_reduce_video_profile(context->decoder->profile) ==
+               PIPE_VIDEO_FORMAT_HEVC) {
+            FREE(context->desc.h265.pps->sps);
+            FREE(context->desc.h265.pps);
+         }
        }
        context->decoder->destroy(context->decoder);
     }
diff --git a/src/gallium/state_trackers/va/image.c 
b/src/gallium/state_trackers/va/image.c
index 1b956e3..168f077 100644
--- a/src/gallium/state_trackers/va/image.c
+++ b/src/gallium/state_trackers/va/image.c
@@ -185,10 +185,12 @@ vlVaDeriveImage(VADriverContextP ctx, VASurfaceID 
surface, VAImage *image)
     vlVaSurface *surf;
     vlVaBuffer *img_buf;
     VAImage *img;
+   struct pipe_sampler_view **views;
     struct pipe_surface **surfaces;
     int w;
     int h;
     int i;
+   int pitch[3];
if (!ctx)
        return VA_STATUS_ERROR_INVALID_CONTEXT;
@@ -200,7 +202,7 @@ vlVaDeriveImage(VADriverContextP ctx, VASurfaceID surface, 
VAImage *image)
surf = handle_table_get(drv->htab, surface); - if (!surf || !surf->buffer || surf->buffer->interlaced)
+   if (!surf || !surf->buffer)
        return VA_STATUS_ERROR_INVALID_SURFACE;
surfaces = surf->buffer->get_surfaces(surf->buffer);
@@ -220,6 +222,51 @@ vlVaDeriveImage(VADriverContextP ctx, VASurfaceID surface, 
VAImage *image)
     w = align(surf->buffer->width, 2);
     h = align(surf->buffer->height, 2);
+ switch (img->format.fourcc) {
+      case VA_FOURCC('N','V','1','2'):
+         img->num_planes = 2;
+         break;
+
+      case VA_FOURCC('I','4','2','0'):
+      case VA_FOURCC('Y','V','1','2'):
+         img->num_planes = 3;
+         break;
+
+      case VA_FOURCC('U','Y','V','Y'):
+      case VA_FOURCC('Y','U','Y','V'):
+      case VA_FOURCC('B','G','R','A'):
+      case VA_FOURCC('R','G','B','A'):
+      case VA_FOURCC('B','G','R','X'):
+      case VA_FOURCC('R','G','B','X'):
+         img->num_planes = 1;
+         break;
+
+      default:
+         /* VaDeriveImage is designed for contiguous planes. */
+         FREE(img);
+         return VA_STATUS_ERROR_INVALID_IMAGE_FORMAT;
+   }
+
+   views = surf->buffer->get_sampler_view_planes(surf->buffer);
+   if (!views)
+      return VA_STATUS_ERROR_OPERATION_FAILED;
+
+   for (i = 0; i < img->num_planes; i++) {
+      unsigned width, height;
+      if (!views[i]) continue;
+      vlVaVideoSurfaceSize(surf, i, &width, &height);
+      struct pipe_box box = {0, 0, 0, width, height, 1};
+      struct pipe_transfer *transfer;
+      uint8_t *map;
+      map = drv->pipe->transfer_map(drv->pipe, views[i]->texture, 0,
+                                    PIPE_TRANSFER_READ, &box, &transfer);
+      if (!map)
+         return VA_STATUS_ERROR_OPERATION_FAILED;
+
+      pitch[i] = transfer->stride;
+      pipe_transfer_unmap(drv->pipe, transfer);
+   }
+
     for (i = 0; i < ARRAY_SIZE(formats); ++i) {
        if (img->format.fourcc == formats[i].fourcc) {
           img->format = formats[i];
@@ -228,12 +275,33 @@ vlVaDeriveImage(VADriverContextP ctx, VASurfaceID 
surface, VAImage *image)
     }
switch (img->format.fourcc) {
+   case VA_FOURCC('N','V','1','2'):
+      img->num_planes = 2;
+      img->pitches[0] = pitch[0];
+      img->offsets[0] = 0;
+      img->pitches[1] = pitch[1];
+      img->offsets[1] = pitch[0] * h;
+      img->data_size  = pitch[0] * h + pitch[1] * h / 2;
+      break;
+
+   case VA_FOURCC('I','4','2','0'):
+   case VA_FOURCC('Y','V','1','2'):
+      img->num_planes = 3;
+      img->pitches[0] = pitch[0];
+      img->offsets[0] = 0;
+      img->pitches[1] = pitch[1];
+      img->offsets[1] = pitch[0] * h;
+      img->pitches[2] = pitch[2];
+      img->offsets[2] = pitch[0] * h + pitch[1] * h / 4;
+      img->data_size  = pitch[0] * h + pitch[1] * h / 4 + pitch[2] * h / 4;
+      break;
+
     case VA_FOURCC('U','Y','V','Y'):
     case VA_FOURCC('Y','U','Y','V'):
        img->num_planes = 1;
-      img->pitches[0] = w * 2;
+      img->pitches[0] = pitch[0] * 2;
        img->offsets[0] = 0;
-      img->data_size  = w * h * 2;
+      img->data_size  = pitch[0] * h * 2;
        break;
case VA_FOURCC('B','G','R','A'):
@@ -241,9 +309,9 @@ vlVaDeriveImage(VADriverContextP ctx, VASurfaceID surface, 
VAImage *image)
     case VA_FOURCC('B','G','R','X'):
     case VA_FOURCC('R','G','B','X'):
        img->num_planes = 1;
-      img->pitches[0] = w * 4;
+      img->pitches[0] = pitch[0] * 4;
        img->offsets[0] = 0;
-      img->data_size  = w * h * 4;
+      img->data_size  = pitch[0] * h * 4;
        break;
default:
@@ -431,7 +499,7 @@ vlVaPutImage(VADriverContextP ctx, VASurfaceID surface, 
VAImageID image,
     VAImage *vaimage;
     struct pipe_sampler_view **views;
     enum pipe_format format;
-   void *data[3];
+   uint8_t *data[3];
     unsigned pitches[3], i, j;
if (!ctx)
@@ -471,7 +539,9 @@ vlVaPutImage(VADriverContextP ctx, VASurfaceID surface, 
VAImageID image,
        return VA_STATUS_ERROR_OPERATION_FAILED;
     }
- if (format != surf->buffer->buffer_format) {
+   if ((format != surf->buffer->buffer_format) &&
+      ((format != PIPE_FORMAT_YV12) || (surf->buffer->buffer_format != 
PIPE_FORMAT_NV12)) &&
+      ((format != PIPE_FORMAT_IYUV) || (surf->buffer->buffer_format != 
PIPE_FORMAT_NV12))) {
        struct pipe_video_buffer *tmp_buf;
        struct pipe_video_buffer templat = surf->templat;
@@ -513,12 +583,42 @@ vlVaPutImage(VADriverContextP ctx, VASurfaceID surface, VAImageID image,
        unsigned width, height;
        if (!views[i]) continue;
        vlVaVideoSurfaceSize(surf, i, &width, &height);
-      for (j = 0; j < views[i]->texture->array_size; ++j) {
-         struct pipe_box dst_box = {0, 0, j, width, height, 1};
-         drv->pipe->transfer_inline_write(drv->pipe, views[i]->texture, 0,
-            PIPE_TRANSFER_WRITE, &dst_box,
-            data[i] + pitches[i] * j,
-            pitches[i] * views[i]->texture->array_size, 0);
+      if ((format == PIPE_FORMAT_YV12) || (format == PIPE_FORMAT_IYUV) &&
+         (surf->buffer->buffer_format == PIPE_FORMAT_NV12) && (i == 1)) {
+         struct pipe_transfer *transfer = NULL;
+         uint8_t *map = NULL;
+         struct pipe_box dst_box_1 = {0, 0, 0, width, height, 1};
+         map = drv->pipe->transfer_map(drv->pipe,
+                                       views[i]->texture,
+                                       0,
+                                       PIPE_TRANSFER_DISCARD_RANGE,
+                                       &dst_box_1, &transfer);
+         if (map == NULL)
+            return VA_STATUS_ERROR_OPERATION_FAILED;
+
+         bool odd = false;
+         for (unsigned int k = 0; k < ((vaimage->offsets[1])/2) ; k++){
+            if (odd == false) {
+               map[k] = data[i][k/2];
+               odd = true;
+            }
+            else {
+               map[k] = data[i+1][k/2];
+               odd = false;
+            }
+         }
+         pipe_transfer_unmap(drv->pipe, transfer);
+         pipe_mutex_unlock(drv->mutex);
+         return VA_STATUS_SUCCESS;
+      }
+      else {
+         for (j = 0; j < views[i]->texture->array_size; ++j) {
+            struct pipe_box dst_box = {0, 0, j, width, height, 1};
+            drv->pipe->transfer_inline_write(drv->pipe, views[i]->texture, 0,
+               PIPE_TRANSFER_WRITE, &dst_box,
+               data[i] + pitches[i] * j,
+               pitches[i] * views[i]->texture->array_size, 0);
+         }
        }
     }
     pipe_mutex_unlock(drv->mutex);
diff --git a/src/gallium/state_trackers/va/picture.c 
b/src/gallium/state_trackers/va/picture.c
index 89ac024..973b862 100644
--- a/src/gallium/state_trackers/va/picture.c
+++ b/src/gallium/state_trackers/va/picture.c
@@ -78,7 +78,8 @@ vlVaBeginPicture(VADriverContextP ctx, VAContextID 
context_id, VASurfaceID rende
        return VA_STATUS_SUCCESS;
     }
- context->decoder->begin_frame(context->decoder, context->target, &context->desc.base);
+   if (context->decoder->entrypoint != PIPE_VIDEO_ENTRYPOINT_ENCODE)
+      context->decoder->begin_frame(context->decoder, context->target, 
&context->desc.base);
return VA_STATUS_SUCCESS;
  }
@@ -278,6 +279,134 @@ handleVASliceDataBufferType(vlVaContext *context, 
vlVaBuffer *buf)
        num_buffers, (const void * const*)buffers, sizes);
  }
+static VAStatus
+handleVAEncMiscParameterTypeRateControl(vlVaContext *context, 
VAEncMiscParameterBuffer *misc)
+{
+   VAEncMiscParameterRateControl *rc = (VAEncMiscParameterRateControl 
*)misc->data;
+   if (context->desc.h264enc.rate_ctrl.rate_ctrl_method ==
+       PIPE_H264_ENC_RATE_CONTROL_METHOD_CONSTANT)
+      context->desc.h264enc.rate_ctrl.target_bitrate = rc->bits_per_second;
+   else
+      context->desc.h264enc.rate_ctrl.target_bitrate = rc->bits_per_second * 
rc->target_percentage;
+   context->desc.h264enc.rate_ctrl.peak_bitrate = rc->bits_per_second;
+   if (context->desc.h264enc.rate_ctrl.target_bitrate < 2000000)
+      context->desc.h264enc.rate_ctrl.vbv_buffer_size = 
MIN2((context->desc.h264enc.rate_ctrl.target_bitrate * 2.75), 2000000);
+   else
+      context->desc.h264enc.rate_ctrl.vbv_buffer_size = 
context->desc.h264enc.rate_ctrl.target_bitrate;
+
+   return VA_STATUS_SUCCESS;
+}
+
+static VAStatus
+handleVAEncSequenceParameterBufferType(vlVaDriver *drv, vlVaContext *context, 
vlVaBuffer *buf)
+{
+   VAEncSequenceParameterBufferH264 *h264 = (VAEncSequenceParameterBufferH264 
*)buf->data;
+   context->decoder->max_references = h264->max_num_ref_frames;
+   context->desc.h264enc.gop_size = h264->intra_idr_period;
+   return VA_STATUS_SUCCESS;
+}
+
+static VAStatus
+handleVAEncMiscParameterBufferType(vlVaContext *context, vlVaBuffer *buf)
+{
+   VAStatus vaStatus = VA_STATUS_SUCCESS;
+   VAEncMiscParameterBuffer *misc;
+   misc = buf->data;
+
+   switch (misc->type) {
+   case VAEncMiscParameterTypeRateControl:
+      vaStatus = handleVAEncMiscParameterTypeRateControl(context, misc);
+      break;
+
+   default:
+      break;
+   }
+
+   return vaStatus;
+}
+
+static VAStatus
+handleVAEncPictureParameterBufferType(vlVaDriver *drv, vlVaContext *context, 
vlVaBuffer *buf)
+{
+   VAEncPictureParameterBufferH264 *h264;
+   vlVaBuffer *coded_buf;
+
+   h264 = buf->data;
+   context->desc.h264enc.frame_num = h264->frame_num;
+   context->desc.h264enc.not_referenced = false;
+   context->desc.h264enc.is_idr = (h264->pic_fields.bits.idr_pic_flag == 1);
+   context->desc.h264enc.pic_order_cnt = h264->CurrPic.TopFieldOrderCnt / 2;
+   if (context->desc.h264enc.is_idr)
+      context->desc.h264enc.i_remain = 1;
+   else
+      context->desc.h264enc.i_remain = 0;
+
+   context->desc.h264enc.p_remain = context->desc.h264enc.gop_size - 
context->desc.h264enc.gop_cnt - context->desc.h264enc.i_remain;
+
+   coded_buf = handle_table_get(drv->htab, h264->coded_buf);
+   coded_buf->derived_surface.resource = pipe_buffer_create(drv->pipe->screen, 
PIPE_BIND_VERTEX_BUFFER,
+                                         PIPE_USAGE_STREAM, coded_buf->size);
+   context->coded_buf = coded_buf;
+
+   context->desc.h264enc.frame_idx[h264->CurrPic.picture_id] = h264->frame_num;
+   if (context->desc.h264enc.is_idr)
+      context->desc.h264enc.picture_type = PIPE_H264_ENC_PICTURE_TYPE_IDR;
+   else
+      context->desc.h264enc.picture_type = PIPE_H264_ENC_PICTURE_TYPE_P;
+
+   context->desc.h264enc.frame_num_cnt++;
+   context->desc.h264enc.gop_cnt++;
+   if (context->desc.h264enc.gop_cnt == context->desc.h264enc.gop_size)
+      context->desc.h264enc.gop_cnt = 0;
+
+   return VA_STATUS_SUCCESS;
+}
+
+static VAStatus
+handleVAEncSliceParameterBufferType(vlVaDriver *drv, vlVaContext *context, 
vlVaBuffer *buf)
+{
+   VAEncSliceParameterBufferH264 *h264;
+
+   h264 = buf->data;
+   context->desc.h264enc.ref_idx_l0 = VA_INVALID_ID;
+   context->desc.h264enc.ref_idx_l1 = VA_INVALID_ID;
+   context->desc.h264enc.has_ref_pic_list = false;
+
+   for (int i = 0; i < 32; i++) {
+      if (h264->RefPicList0[i].picture_id == VA_INVALID_ID)
+         context->desc.h264enc.ref_pic_list_0[i] = VA_INVALID_ID;
+      else {
+         context->desc.h264enc.ref_pic_list_0[i] = 
context->desc.h264enc.frame_idx[h264->RefPicList0[i].picture_id];
+         if (context->desc.h264enc.ref_idx_l0 == VA_INVALID_ID)
+            context->desc.h264enc.ref_idx_l0 = 
context->desc.h264enc.frame_idx[h264->RefPicList0[i].picture_id];
+         context->desc.h264enc.has_ref_pic_list = true;
+      }
+      if (h264->RefPicList1[i].picture_id == VA_INVALID_ID || h264->slice_type 
!= 1)
+         context->desc.h264enc.ref_pic_list_1[i] = VA_INVALID_ID;
+      else {
+         context->desc.h264enc.ref_pic_list_1[i] = 
context->desc.h264enc.frame_idx[h264->RefPicList1[i].picture_id];
+         if (context->desc.h264enc.ref_idx_l1 == VA_INVALID_ID)
+            context->desc.h264enc.ref_idx_l1 = 
context->desc.h264enc.frame_idx[h264->RefPicList1[i].picture_id];
+         context->desc.h264enc.has_ref_pic_list = true;
+      }
+   }
+
+   if (h264->slice_type == 1)
+      context->desc.h264enc.picture_type = PIPE_H264_ENC_PICTURE_TYPE_B;
+   else if (h264->slice_type == 0)
+      context->desc.h264enc.picture_type = PIPE_H264_ENC_PICTURE_TYPE_P;
+   else if (h264->slice_type == 2) {
+      if (context->desc.h264enc.is_idr){
+         context->desc.h264enc.picture_type = PIPE_H264_ENC_PICTURE_TYPE_IDR;
+         context->desc.h264enc.idr_pic_id++;
+          } else
+         context->desc.h264enc.picture_type = PIPE_H264_ENC_PICTURE_TYPE_I;
+   } else
+      context->desc.h264enc.picture_type = PIPE_H264_ENC_PICTURE_TYPE_SKIP;
+
+   return VA_STATUS_SUCCESS;
+}
+
  VAStatus
  vlVaRenderPicture(VADriverContextP ctx, VAContextID context_id, VABufferID 
*buffers, int num_buffers)
  {
@@ -328,6 +457,22 @@ vlVaRenderPicture(VADriverContextP ctx, VAContextID 
context_id, VABufferID *buff
           vaStatus = vlVaHandleVAProcPipelineParameterBufferType(drv, context, 
buf);
           break;
+ case VAEncSequenceParameterBufferType:
+         vaStatus = handleVAEncSequenceParameterBufferType(drv, context, buf);
+         break;
+
+      case VAEncMiscParameterBufferType:
+         vaStatus = handleVAEncMiscParameterBufferType(context, buf);
+         break;
+
+      case VAEncPictureParameterBufferType:
+         vaStatus = handleVAEncPictureParameterBufferType(drv, context, buf);
+         break;
+
+      case VAEncSliceParameterBufferType:
+         vaStatus = handleVAEncSliceParameterBufferType(drv, context, buf);
+         break;
+
        default:
           break;
        }
@@ -342,6 +487,9 @@ vlVaEndPicture(VADriverContextP ctx, VAContextID context_id)
  {
     vlVaDriver *drv;
     vlVaContext *context;
+   vlVaBuffer *coded_buf;
+   unsigned int coded_size;
+   void *feedback;
if (!ctx)
        return VA_STATUS_ERROR_INVALID_CONTEXT;
@@ -365,7 +513,20 @@ vlVaEndPicture(VADriverContextP ctx, VAContextID 
context_id)
     }
context->mpeg4.frame_num++;
-   context->decoder->end_frame(context->decoder, context->target, 
&context->desc.base);
+
+   if (context->decoder->entrypoint == PIPE_VIDEO_ENTRYPOINT_ENCODE) {
+      coded_buf = context->coded_buf;
+      context->desc.h264enc.enable_low_level_control = true;
+      context->decoder->begin_frame(context->decoder, context->target, 
&context->desc.base);
+      context->decoder->encode_bitstream(context->decoder, context->target,
+                                         coded_buf->derived_surface.resource, 
&feedback);
+      context->decoder->end_frame(context->decoder, context->target, 
&context->desc.base);
+      context->decoder->flush(context->decoder);
+      context->decoder->get_feedback(context->decoder, feedback, &coded_size);
+      coded_buf->coded_size = coded_size;
+   }
+   else
+      context->decoder->end_frame(context->decoder, context->target, 
&context->desc.base);
return VA_STATUS_SUCCESS;
  }
diff --git a/src/gallium/state_trackers/va/surface.c 
b/src/gallium/state_trackers/va/surface.c
index 5efb893..1c56757 100644
--- a/src/gallium/state_trackers/va/surface.c
+++ b/src/gallium/state_trackers/va/surface.c
@@ -43,6 +43,8 @@
#include "va_private.h" +DEBUG_GET_ONCE_BOOL_OPTION(nointerlace, "DISABLE_INTERLACE", FALSE);
+
  #include <va/va_drmcommon.h>
static const enum pipe_format vpp_surface_formats[] = {
@@ -315,17 +317,18 @@ vlVaUnlockSurface(VADriverContextP ctx, VASurfaceID 
surface)
  }
VAStatus
-vlVaQuerySurfaceAttributes(VADriverContextP ctx, VAConfigID config,
+vlVaQuerySurfaceAttributes(VADriverContextP ctx, VAConfigID config_id,
                             VASurfaceAttrib *attrib_list, unsigned int 
*num_attribs)
  {
     vlVaDriver *drv;
+   vlVaConfig *config;
     VASurfaceAttrib *attribs;
     struct pipe_screen *pscreen;
     int i, j;
STATIC_ASSERT(ARRAY_SIZE(vpp_surface_formats) <= VL_VA_MAX_IMAGE_FORMATS); - if (config == VA_INVALID_ID)
+   if (config_id == VA_INVALID_ID)
        return VA_STATUS_ERROR_INVALID_CONFIG;
if (!attrib_list && !num_attribs)
@@ -344,6 +347,11 @@ vlVaQuerySurfaceAttributes(VADriverContextP ctx, 
VAConfigID config,
     if (!drv)
        return VA_STATUS_ERROR_INVALID_CONTEXT;
+ config = handle_table_get(drv->htab, config_id);
+
+   if (!config)
+      return VA_STATUS_ERROR_INVALID_CONFIG;
+
     pscreen = VL_VA_PSCREEN(ctx);
if (!pscreen)
@@ -359,7 +367,7 @@ vlVaQuerySurfaceAttributes(VADriverContextP ctx, VAConfigID 
config,
/* vlVaCreateConfig returns PIPE_VIDEO_PROFILE_UNKNOWN
      * only for VAEntrypointVideoProc. */
-   if (config == PIPE_VIDEO_PROFILE_UNKNOWN) {
+   if (config->profile == PIPE_VIDEO_PROFILE_UNKNOWN) {
        for (j = 0; j < ARRAY_SIZE(vpp_surface_formats); ++j) {
           attribs[i].type = VASurfaceAttribPixelFormat;
           attribs[i].value.type = VAGenericValueTypeInteger;
@@ -608,6 +616,8 @@ vlVaCreateSurfaces2(VADriverContextP ctx, unsigned int 
format,
templat.width = width;
     templat.height = height;
+   if (debug_get_option_nointerlace())
+      templat.interlaced = false;
memset(surfaces, VA_INVALID_ID, num_surfaces * sizeof(VASurfaceID)); diff --git a/src/gallium/state_trackers/va/va_private.h b/src/gallium/state_trackers/va/va_private.h
index d91de44..6d3ac38 100644
--- a/src/gallium/state_trackers/va/va_private.h
+++ b/src/gallium/state_trackers/va/va_private.h
@@ -229,6 +229,7 @@ typedef struct {
        struct pipe_vc1_picture_desc vc1;
        struct pipe_h264_picture_desc h264;
        struct pipe_h265_picture_desc h265;
+      struct pipe_h264_enc_picture_desc h264enc;
     } desc;
struct {
@@ -241,9 +242,16 @@ typedef struct {
     } mpeg4;
struct vl_deint_filter *deint;
+   struct vlVaBuffer *coded_buf;
  } vlVaContext;
typedef struct {
+   VAEntrypoint entrypoint;
+   enum pipe_video_profile profile;
+   enum pipe_h264_enc_rate_control_method rc;
+} vlVaConfig;
+
+typedef struct {
     VABufferType type;
     unsigned int size;
     unsigned int num_elements;
@@ -254,6 +262,7 @@ typedef struct {
     } derived_surface;
     unsigned int export_refcount;
     VABufferInfo export_state;
+   unsigned int coded_size;
  } vlVaBuffer;
typedef struct {

_______________________________________________
mesa-dev mailing list
mesa-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/mesa-dev

Reply via email to