virglrenderer has logic to validate both stride and layer_stride,
but both are always zero. The fallback for that case is:

stride = width * bytes_per_pixel
layer_stride = stride * num_layers

However, this assumption causes trouble in the following cases:

1) When allocating host-compatible buffers for the planned wayland integration.
2) Certain YUV buffers, which Gallium imports as 3 R8 buffers with variable
   strides. For example, HAL_PIXEL_FORMAT_YV12 requires that the chroma planes
   are aligned to 16 bytes.

This commit doesn't fix the discrepancy, but adds the necessary plumbing
so we don't forget.

Signed-off-by: Gurchetan Singh <gurchetansi...@chromium.org>
---
 drivers/gpu/drm/virtio/virtgpu_drv.h   | 2 ++
 drivers/gpu/drm/virtio/virtgpu_ioctl.c | 9 ++++++---
 drivers/gpu/drm/virtio/virtgpu_vq.c    | 6 ++++++
 3 files changed, 14 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h 
b/drivers/gpu/drm/virtio/virtgpu_drv.h
index 314e02f94d9c..c1c9a9b8e25c 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -312,12 +312,14 @@ void virtio_gpu_cmd_submit(struct virtio_gpu_device 
*vgdev,
 void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
                                          uint32_t ctx_id,
                                          uint64_t offset, uint32_t level,
+                                         uint32_t stride, uint32_t 
layer_stride,
                                          struct virtio_gpu_box *box,
                                          struct virtio_gpu_object_array *objs,
                                          struct virtio_gpu_fence *fence);
 void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
                                        uint32_t ctx_id,
                                        uint64_t offset, uint32_t level,
+                                       uint32_t stride, uint32_t layer_stride,
                                        struct virtio_gpu_box *box,
                                        struct virtio_gpu_object_array *objs,
                                        struct virtio_gpu_fence *fence);
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c 
b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index 9af1ec62434f..98b72dead962 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -324,8 +324,10 @@ static int virtio_gpu_transfer_from_host_ioctl(struct 
drm_device *dev,
                ret = -ENOMEM;
                goto err_unlock;
        }
+
+       /* TODO: add the correct stride / layer_stride. */
        virtio_gpu_cmd_transfer_from_host_3d
-               (vgdev, vfpriv->ctx_id, offset, args->level,
+               (vgdev, vfpriv->ctx_id, offset, args->level, 0, 0,
                 &box, objs, fence);
        dma_fence_put(&fence->f);
        return 0;
@@ -369,10 +371,11 @@ static int virtio_gpu_transfer_to_host_ioctl(struct 
drm_device *dev, void *data,
                if (!fence)
                        goto err_unlock;
 
+               /* TODO: add the correct stride / layer_stride. */
                virtio_gpu_cmd_transfer_to_host_3d
                        (vgdev,
-                        vfpriv ? vfpriv->ctx_id : 0, offset,
-                        args->level, &box, objs, fence);
+                        vfpriv ? vfpriv->ctx_id : 0, offset, args->level,
+                        0, 0, &box, objs, fence);
                dma_fence_put(&fence->f);
        }
        return 0;
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c 
b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 80176f379ad5..9fb3c8c3b687 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -965,6 +965,7 @@ virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device 
*vgdev,
 void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
                                        uint32_t ctx_id,
                                        uint64_t offset, uint32_t level,
+                                       uint32_t stride, uint32_t layer_stride,
                                        struct virtio_gpu_box *box,
                                        struct virtio_gpu_object_array *objs,
                                        struct virtio_gpu_fence *fence)
@@ -990,6 +991,8 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct 
virtio_gpu_device *vgdev,
        cmd_p->box = *box;
        cmd_p->offset = cpu_to_le64(offset);
        cmd_p->level = cpu_to_le32(level);
+       cmd_p->stride = cpu_to_le32(stride);
+       cmd_p->layer_stride = cpu_to_le32(layer_stride);
 
        virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
 }
@@ -997,6 +1000,7 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct 
virtio_gpu_device *vgdev,
 void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
                                          uint32_t ctx_id,
                                          uint64_t offset, uint32_t level,
+                                         uint32_t stride, uint32_t 
layer_stride,
                                          struct virtio_gpu_box *box,
                                          struct virtio_gpu_object_array *objs,
                                          struct virtio_gpu_fence *fence)
@@ -1016,6 +1020,8 @@ void virtio_gpu_cmd_transfer_from_host_3d(struct 
virtio_gpu_device *vgdev,
        cmd_p->box = *box;
        cmd_p->offset = cpu_to_le64(offset);
        cmd_p->level = cpu_to_le32(level);
+       cmd_p->stride = cpu_to_le32(stride);
+       cmd_p->layer_stride = cpu_to_le32(layer_stride);
 
        virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
 }
-- 
2.23.0.444.g18eeb5a265-goog

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

Reply via email to