From: Jocelyn Falempe <jfale...@redhat.com>

Virtio gpu supports the drm_panic module, which displays a message to
the screen when a kernel panic occurs.

Signed-off-by: Ryosuke Yasuoka <ryasu...@redhat.com>
Signed-off-by: Jocelyn Falempe <jfale...@redhat.com>
---
v5:
Based on Dmitry's comment, fix the followings
- Rename virtio_panic_buffer to panic_vbuf
- Remove some unnecessary dummy ret and return directly.
- Reject if the bo is VRAM BO
- Remove virtio_gpu_panic_put_vbuf() before notify
- Add description for panic buffer allocation
- Remove virtio_gpu_panic_object_array and use
  virtio_gpu_panic_array_alloc() to allocate objs instead of static
  allocation in stack.

v4:
https://lkml.org/lkml/2024/11/13/346

- As per Dmitry's comment, make virtio_panic_buffer private to
  virtio_gpu_device.

v3:
https://lkml.org/lkml/2024/11/8/155

- As per Jocelyn's comment, add a finite timeout 500usec in
  virtio_gpu_panic_put_vbuf() to avoid infinite loop

v2:
https://lkml.org/lkml/2024/11/6/668

- Remove unnecessary virtio_gpu_vbuffer_inline
- Remove reclaim_list and just call drm_gem_object_put() if there is an
   obj
- Don't wait for an event in virtio_gpu_panic_queue_ctrl_sgs and just
   return -ENOMEM. Also add error handlers for this error.
- Use virtio_gpu_panic_queue_fenced_ctrl_buffer() in
virtio_gpu_panic_cmd_resource_flush
- Remove fence and objs arguments because these are always NULL in panic
   handler.
- Rename virtio_gpu_panic_queue_fenced_ctrl_buffer to
   ..._queue_ctrl_buffer
- Rename virtio_gpu_panic_alloc_cmd to ..._panic_init_cmd

v1:
https://lkml.org/lkml/2024/10/31/154


 drivers/gpu/drm/virtio/virtgpu_drv.h   |  20 ++++
 drivers/gpu/drm/virtio/virtgpu_gem.c   |  14 +++
 drivers/gpu/drm/virtio/virtgpu_plane.c | 139 ++++++++++++++++++++++++
 drivers/gpu/drm/virtio/virtgpu_vq.c    | 143 +++++++++++++++++++++++--
 4 files changed, 310 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h 
b/drivers/gpu/drm/virtio/virtgpu_drv.h
index 64c236169db8..b82950ce75ba 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -125,6 +125,12 @@ struct virtio_gpu_object_array {
        struct drm_gem_object *objs[] __counted_by(total);
 };
 
+#define MAX_INLINE_CMD_SIZE   96
+#define MAX_INLINE_RESP_SIZE  24
+#define VBUFFER_SIZE         (sizeof(struct virtio_gpu_vbuffer) \
+                             + MAX_INLINE_CMD_SIZE              \
+                             + MAX_INLINE_RESP_SIZE)
+
 struct virtio_gpu_vbuffer;
 struct virtio_gpu_device;
 
@@ -267,6 +273,7 @@ struct virtio_gpu_device {
        spinlock_t resource_export_lock;
        /* protects map state and host_visible_mm */
        spinlock_t host_visible_lock;
+       void *panic_vbuf;
 };
 
 struct virtio_gpu_fpriv {
@@ -305,6 +312,7 @@ int virtio_gpu_mode_dumb_mmap(struct drm_file *file_priv,
                              struct drm_device *dev,
                              uint32_t handle, uint64_t *offset_p);
 
+struct virtio_gpu_object_array *virtio_gpu_panic_array_alloc(void);
 struct virtio_gpu_object_array *virtio_gpu_array_alloc(u32 nents);
 struct virtio_gpu_object_array*
 virtio_gpu_array_from_handles(struct drm_file *drm_file, u32 *handles, u32 
nents);
@@ -329,12 +337,23 @@ void virtio_gpu_cmd_create_resource(struct 
virtio_gpu_device *vgdev,
                                    struct virtio_gpu_fence *fence);
 void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
                                   struct virtio_gpu_object *bo);
+int virtio_gpu_panic_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
+                                            uint64_t offset,
+                                            uint32_t width, uint32_t height,
+                                            uint32_t x, uint32_t y,
+                                            struct virtio_gpu_object_array 
*objs,
+                                            struct virtio_gpu_vbuffer *vbuf);
 void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
                                        uint64_t offset,
                                        uint32_t width, uint32_t height,
                                        uint32_t x, uint32_t y,
                                        struct virtio_gpu_object_array *objs,
                                        struct virtio_gpu_fence *fence);
+int virtio_gpu_panic_cmd_resource_flush(struct virtio_gpu_device *vgdev,
+                                       struct virtio_gpu_vbuffer *vbuf,
+                                       uint32_t resource_id,
+                                       uint32_t x, uint32_t y,
+                                       uint32_t width, uint32_t height);
 void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
                                   uint32_t resource_id,
                                   uint32_t x, uint32_t y,
@@ -399,6 +418,7 @@ void virtio_gpu_ctrl_ack(struct virtqueue *vq);
 void virtio_gpu_cursor_ack(struct virtqueue *vq);
 void virtio_gpu_dequeue_ctrl_func(struct work_struct *work);
 void virtio_gpu_dequeue_cursor_func(struct work_struct *work);
+void virtio_gpu_panic_notify(struct virtio_gpu_device *vgdev);
 void virtio_gpu_notify(struct virtio_gpu_device *vgdev);
 
 int
diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c 
b/drivers/gpu/drm/virtio/virtgpu_gem.c
index 7db48d17ee3a..9f2b47222e0f 100644
--- a/drivers/gpu/drm/virtio/virtgpu_gem.c
+++ b/drivers/gpu/drm/virtio/virtgpu_gem.c
@@ -161,6 +161,20 @@ void virtio_gpu_gem_object_close(struct drm_gem_object 
*obj,
        virtio_gpu_notify(vgdev);
 }
 
+/* For drm panic */
+struct virtio_gpu_object_array *virtio_gpu_panic_array_alloc(void)
+{
+       struct virtio_gpu_object_array *objs;
+
+       objs = kmalloc(sizeof(struct virtio_gpu_object_array), GFP_ATOMIC);
+       if (!objs)
+               return NULL;
+
+       objs->nents = 0;
+       objs->total = 1;
+       return objs;
+}
+
 struct virtio_gpu_object_array *virtio_gpu_array_alloc(u32 nents)
 {
        struct virtio_gpu_object_array *objs;
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c 
b/drivers/gpu/drm/virtio/virtgpu_plane.c
index a72a2dbda031..42079169fb69 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -26,6 +26,9 @@
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_damage_helper.h>
 #include <drm/drm_fourcc.h>
+#include <drm/drm_managed.h>
+#include <drm/drm_panic.h>
+#include <linux/delay.h>
 
 #include "virtgpu_drv.h"
 
@@ -108,6 +111,31 @@ static int virtio_gpu_plane_atomic_check(struct drm_plane 
*plane,
        return ret;
 }
 
+/* For drm panic */
+static int virtio_gpu_panic_update_dumb_bo(struct virtio_gpu_device *vgdev,
+                                          struct drm_plane_state *state,
+                                          struct drm_rect *rect,
+                                          struct virtio_gpu_vbuffer *vbuf)
+{
+       struct virtio_gpu_object *bo =
+               gem_to_virtio_gpu_obj(state->fb->obj[0]);
+       struct virtio_gpu_object_array *objs;
+       uint32_t w = rect->x2 - rect->x1;
+       uint32_t h = rect->y2 - rect->y1;
+       uint32_t x = rect->x1;
+       uint32_t y = rect->y1;
+       uint32_t off = x * state->fb->format->cpp[0] +
+               y * state->fb->pitches[0];
+
+       objs = virtio_gpu_panic_array_alloc();
+       if (!objs)
+               return -ENOMEM;
+       virtio_gpu_array_add_obj(objs, &bo->base.base);
+
+       return virtio_gpu_panic_cmd_transfer_to_host_2d(vgdev, off, w, h, x, y,
+                                                       objs, vbuf);
+}
+
 static void virtio_gpu_update_dumb_bo(struct virtio_gpu_device *vgdev,
                                      struct drm_plane_state *state,
                                      struct drm_rect *rect)
@@ -131,6 +159,24 @@ static void virtio_gpu_update_dumb_bo(struct 
virtio_gpu_device *vgdev,
                                           objs, NULL);
 }
 
+/* For drm_panic */
+static int virtio_gpu_panic_resource_flush(struct drm_plane *plane,
+                                          struct virtio_gpu_vbuffer *vbuf,
+                                          uint32_t x, uint32_t y,
+                                          uint32_t width, uint32_t height)
+{
+       struct drm_device *dev = plane->dev;
+       struct virtio_gpu_device *vgdev = dev->dev_private;
+       struct virtio_gpu_framebuffer *vgfb;
+       struct virtio_gpu_object *bo;
+
+       vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
+       bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
+
+       return virtio_gpu_panic_cmd_resource_flush(vgdev, vbuf, 
bo->hw_res_handle, x, y,
+                                                  width, height);
+}
+
 static void virtio_gpu_resource_flush(struct drm_plane *plane,
                                      uint32_t x, uint32_t y,
                                      uint32_t width, uint32_t height)
@@ -359,11 +405,95 @@ static void virtio_gpu_cursor_plane_update(struct 
drm_plane *plane,
        virtio_gpu_cursor_ping(vgdev, output);
 }
 
+static int virtio_drm_get_scanout_buffer(struct drm_plane *plane,
+                                        struct drm_scanout_buffer *sb)
+{
+       struct virtio_gpu_object *bo;
+
+       if (!plane->state || !plane->state->fb || !plane->state->visible)
+               return -ENODEV;
+
+       bo = gem_to_virtio_gpu_obj(plane->state->fb->obj[0]);
+       if (virtio_gpu_is_vram(bo))
+               return -ENODEV;
+
+       /* try to vmap it if possible */
+       if (!bo->base.vaddr) {
+               int ret;
+
+               ret = drm_gem_shmem_vmap(&bo->base, &sb->map[0]);
+               if (ret)
+                       return ret;
+       } else {
+               iosys_map_set_vaddr(&sb->map[0], bo->base.vaddr);
+       }
+
+       sb->format = plane->state->fb->format;
+       sb->height = plane->state->fb->height;
+       sb->width = plane->state->fb->width;
+       sb->pitch[0] = plane->state->fb->pitches[0];
+       return 0;
+}
+
+static void virtio_gpu_panic_put_vbuf(struct virtqueue *vq,
+                                     struct virtio_gpu_vbuffer *vbuf)
+{
+       unsigned int len;
+       int i;
+
+       /* waiting vbuf to be used */
+       for (i = 0; i < 500; i++) {
+               if (vbuf == virtqueue_get_buf(vq, &len))
+                       break;
+               udelay(1);
+       }
+}
+
+static void virtio_panic_flush(struct drm_plane *plane)
+{
+       struct virtio_gpu_object *bo;
+       struct drm_device *dev = plane->dev;
+       struct virtio_gpu_device *vgdev = dev->dev_private;
+       struct drm_rect rect;
+       void *p_vbuf = vgdev->panic_vbuf;
+       struct virtio_gpu_vbuffer *vbuf_dumb_bo = p_vbuf;
+       struct virtio_gpu_vbuffer *vbuf_resource_flush = p_vbuf + VBUFFER_SIZE;
+
+       rect.x1 = 0;
+       rect.y1 = 0;
+       rect.x2 = plane->state->fb->width;
+       rect.y2 = plane->state->fb->height;
+
+       bo = gem_to_virtio_gpu_obj(plane->state->fb->obj[0]);
+
+       if (bo->dumb) {
+               if (virtio_gpu_panic_update_dumb_bo(vgdev,
+                                                   plane->state,
+                                                   &rect,
+                                                   vbuf_dumb_bo))
+                       return;
+       }
+
+       virtio_gpu_panic_resource_flush(plane, vbuf_resource_flush,
+                                       plane->state->src_x >> 16,
+                                       plane->state->src_y >> 16,
+                                       plane->state->src_w >> 16,
+                                       plane->state->src_h >> 16);
+       virtio_gpu_panic_notify(vgdev);
+
+       virtio_gpu_panic_put_vbuf(vgdev->ctrlq.vq,
+                                 vbuf_dumb_bo);
+       virtio_gpu_panic_put_vbuf(vgdev->ctrlq.vq,
+                                 vbuf_resource_flush);
+}
+
 static const struct drm_plane_helper_funcs virtio_gpu_primary_helper_funcs = {
        .prepare_fb             = virtio_gpu_plane_prepare_fb,
        .cleanup_fb             = virtio_gpu_plane_cleanup_fb,
        .atomic_check           = virtio_gpu_plane_atomic_check,
        .atomic_update          = virtio_gpu_primary_plane_update,
+       .get_scanout_buffer     = virtio_drm_get_scanout_buffer,
+       .panic_flush            = virtio_panic_flush,
 };
 
 static const struct drm_plane_helper_funcs virtio_gpu_cursor_helper_funcs = {
@@ -383,6 +513,15 @@ struct drm_plane *virtio_gpu_plane_init(struct 
virtio_gpu_device *vgdev,
        const uint32_t *formats;
        int nformats;
 
+       /* Allocate panic buffers. Each plane is handled sequentially by the drm
+        * panic handler, so we can re-use this buffer for all planes safely.
+        */
+       if (index == 0 && type == DRM_PLANE_TYPE_PRIMARY) {
+               vgdev->panic_vbuf = drmm_kzalloc(dev, 2 * VBUFFER_SIZE, 
GFP_KERNEL);
+               if (!vgdev->panic_vbuf)
+                       return ERR_PTR(-ENOMEM);
+       }
+
        if (type == DRM_PLANE_TYPE_CURSOR) {
                formats = virtio_gpu_cursor_formats;
                nformats = ARRAY_SIZE(virtio_gpu_cursor_formats);
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c 
b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 0d3d0d09f39b..450cfa937f80 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -36,12 +36,6 @@
 #include "virtgpu_drv.h"
 #include "virtgpu_trace.h"
 
-#define MAX_INLINE_CMD_SIZE   96
-#define MAX_INLINE_RESP_SIZE  24
-#define VBUFFER_SIZE          (sizeof(struct virtio_gpu_vbuffer) \
-                              + MAX_INLINE_CMD_SIZE             \
-                              + MAX_INLINE_RESP_SIZE)
-
 static void convert_to_hw_box(struct virtio_gpu_box *dst,
                              const struct drm_virtgpu_3d_box *src)
 {
@@ -311,6 +305,34 @@ static struct sg_table *vmalloc_to_sgt(char *data, 
uint32_t size, int *sg_ents)
        return sgt;
 }
 
+/* For drm_panic */
+static int virtio_gpu_panic_queue_ctrl_sgs(struct virtio_gpu_device *vgdev,
+                                          struct virtio_gpu_vbuffer *vbuf,
+                                          int elemcnt,
+                                          struct scatterlist **sgs,
+                                          int outcnt,
+                                          int incnt)
+{
+       struct virtqueue *vq = vgdev->ctrlq.vq;
+       int ret;
+
+       if (vgdev->has_indirect)
+               elemcnt = 1;
+
+       if (vq->num_free < elemcnt)
+               return -ENOMEM;
+
+       ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
+       WARN_ON(ret);
+
+       vbuf->seqno = ++vgdev->ctrlq.seqno;
+       trace_virtio_gpu_cmd_queue(vq, virtio_gpu_vbuf_ctrl_hdr(vbuf), 
vbuf->seqno);
+
+       atomic_inc(&vgdev->pending_commands);
+
+       return 0;
+}
+
 static int virtio_gpu_queue_ctrl_sgs(struct virtio_gpu_device *vgdev,
                                     struct virtio_gpu_vbuffer *vbuf,
                                     struct virtio_gpu_fence *fence,
@@ -368,6 +390,32 @@ static int virtio_gpu_queue_ctrl_sgs(struct 
virtio_gpu_device *vgdev,
        return 0;
 }
 
+/* For drm_panic */
+static int virtio_gpu_panic_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
+                                             struct virtio_gpu_vbuffer *vbuf)
+{
+       struct scatterlist *sgs[3], vcmd, vresp;
+       int elemcnt = 0, outcnt = 0, incnt = 0;
+
+       /* set up vcmd */
+       sg_init_one(&vcmd, vbuf->buf, vbuf->size);
+       elemcnt++;
+       sgs[outcnt] = &vcmd;
+       outcnt++;
+
+       /* set up vresp */
+       if (vbuf->resp_size) {
+               sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size);
+               elemcnt++;
+               sgs[outcnt + incnt] = &vresp;
+               incnt++;
+       }
+
+       return virtio_gpu_panic_queue_ctrl_sgs(vgdev, vbuf,
+                                              elemcnt, sgs,
+                                              outcnt, incnt);
+}
+
 static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
                                               struct virtio_gpu_vbuffer *vbuf,
                                               struct virtio_gpu_fence *fence)
@@ -422,6 +470,21 @@ static int virtio_gpu_queue_fenced_ctrl_buffer(struct 
virtio_gpu_device *vgdev,
        return ret;
 }
 
+/* For drm_panic */
+void virtio_gpu_panic_notify(struct virtio_gpu_device *vgdev)
+{
+       bool notify;
+
+       if (!atomic_read(&vgdev->pending_commands))
+               return;
+
+       atomic_set(&vgdev->pending_commands, 0);
+       notify = virtqueue_kick_prepare(vgdev->ctrlq.vq);
+
+       if (notify)
+               virtqueue_notify(vgdev->ctrlq.vq);
+}
+
 void virtio_gpu_notify(struct virtio_gpu_device *vgdev)
 {
        bool notify;
@@ -567,6 +630,42 @@ void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device 
*vgdev,
        virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 }
 
+/* For drm_panic */
+static void virtio_gpu_panic_init_cmd(struct virtio_gpu_device *vgdev,
+                                     struct virtio_gpu_vbuffer *vbuf,
+                                     int cmd_size)
+{
+       vbuf->buf = (void *)vbuf + sizeof(*vbuf);
+       vbuf->size = cmd_size;
+       vbuf->resp_cb = NULL;
+       vbuf->resp_size = sizeof(struct virtio_gpu_ctrl_hdr);
+       vbuf->resp_buf = (void *)vbuf->buf + cmd_size;
+}
+
+/* For drm_panic */
+int virtio_gpu_panic_cmd_resource_flush(struct virtio_gpu_device *vgdev,
+                                       struct virtio_gpu_vbuffer *vbuf,
+                                       uint32_t resource_id,
+                                       uint32_t x, uint32_t y,
+                                       uint32_t width, uint32_t height)
+{
+       struct virtio_gpu_resource_flush *cmd_p;
+
+       virtio_gpu_panic_init_cmd(vgdev, vbuf,
+                                 sizeof(struct virtio_gpu_resource_flush));
+       cmd_p = (void *)vbuf->buf;
+       vbuf->objs = NULL;
+
+       cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH);
+       cmd_p->resource_id = cpu_to_le32(resource_id);
+       cmd_p->r.width = cpu_to_le32(width);
+       cmd_p->r.height = cpu_to_le32(height);
+       cmd_p->r.x = cpu_to_le32(x);
+       cmd_p->r.y = cpu_to_le32(y);
+
+       return virtio_gpu_panic_queue_ctrl_buffer(vgdev, vbuf);
+}
+
 void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
                                   uint32_t resource_id,
                                   uint32_t x, uint32_t y,
@@ -591,6 +690,38 @@ void virtio_gpu_cmd_resource_flush(struct 
virtio_gpu_device *vgdev,
        virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
 }
 
+/* For drm_panic */
+int virtio_gpu_panic_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
+                                            uint64_t offset,
+                                            uint32_t width, uint32_t height,
+                                            uint32_t x, uint32_t y,
+                                            struct virtio_gpu_object_array 
*objs,
+                                            struct virtio_gpu_vbuffer *vbuf)
+{
+       struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
+       struct virtio_gpu_transfer_to_host_2d *cmd_p;
+       bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
+
+       if (virtio_gpu_is_shmem(bo) && use_dma_api)
+               dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
+                                           bo->base.sgt, DMA_TO_DEVICE);
+
+       virtio_gpu_panic_init_cmd(vgdev, vbuf,
+                                 sizeof(struct 
virtio_gpu_transfer_to_host_2d));
+       cmd_p = (void *)vbuf->buf;
+       vbuf->objs = objs;
+
+       cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
+       cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
+       cmd_p->offset = cpu_to_le64(offset);
+       cmd_p->r.width = cpu_to_le32(width);
+       cmd_p->r.height = cpu_to_le32(height);
+       cmd_p->r.x = cpu_to_le32(x);
+       cmd_p->r.y = cpu_to_le32(y);
+
+       return virtio_gpu_panic_queue_ctrl_buffer(vgdev, vbuf);
+}
+
 void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
                                        uint64_t offset,
                                        uint32_t width, uint32_t height,
-- 
2.47.0

Reply via email to