> -----Original Message-----
> From: Stefano Garzarella [mailto:sgarz...@redhat.com]
> Sent: Monday, March 28, 2022 11:00 PM
> To: Longpeng (Mike, Cloud Infrastructure Service Product Dept.)
> <longpe...@huawei.com>
> Cc: Stefan Hajnoczi <stefa...@redhat.com>; Michael Tsirkin <m...@redhat.com>;
> Cornelia Huck <coh...@redhat.com>; Paolo Bonzini <pbonz...@redhat.com>;
> Gonglei (Arei) <arei.gong...@huawei.com>; Yechuan <yech...@huawei.com>;
> Huangzhichao <huangzhic...@huawei.com>; qemu devel list
> <qemu-devel@nongnu.org>
> Subject: Re: [PATCH v3 05/10] vdpa-dev: implement the realize interface
>
> On Sat, Mar 19, 2022 at 03:20:07PM +0800, Longpeng(Mike) wrote:
> >From: Longpeng <longpe...@huawei.com>
> >
> >Implements the .realize interface.
> >
> >Signed-off-by: Longpeng <longpe...@huawei.com>
> >---
> > hw/virtio/vdpa-dev-pci.c | 18 ++++-
> > hw/virtio/vdpa-dev.c | 132 +++++++++++++++++++++++++++++++++++
> > include/hw/virtio/vdpa-dev.h | 10 +++
> > 3 files changed, 159 insertions(+), 1 deletion(-)
> >
> >diff --git a/hw/virtio/vdpa-dev-pci.c b/hw/virtio/vdpa-dev-pci.c
> >index 9eb590ce8c..31bd17353a 100644
> >--- a/hw/virtio/vdpa-dev-pci.c
> >+++ b/hw/virtio/vdpa-dev-pci.c
> >@@ -51,10 +51,26 @@ static Property vhost_vdpa_device_pci_properties[] = {
> > DEFINE_PROP_END_OF_LIST(),
> > };
> >
> >+static int vhost_vdpa_device_pci_post_init(VhostVdpaDevice *v, Error **errp)
> >+{
> >+ VhostVdpaDevicePCI *dev = container_of(v, VhostVdpaDevicePCI, vdev);
> >+ VirtIOPCIProxy *vpci_dev = &dev->parent_obj;
> >+
> >+ vpci_dev->class_code = virtio_pci_get_class_id(v->vdev_id);
> >+ vpci_dev->trans_devid = virtio_pci_get_trans_devid(v->vdev_id);
> >+ /* one for config vector */
> >+ vpci_dev->nvectors = v->num_queues + 1;
> >+
> >+ return 0;
> >+}
> >+
> > static void
> > vhost_vdpa_device_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
> > {
> >- return;
> >+ VhostVdpaDevicePCI *dev = VHOST_VDPA_DEVICE_PCI(vpci_dev);
> >+
> >+ dev->vdev.post_init = vhost_vdpa_device_pci_post_init;
> >+ qdev_realize(DEVICE(&dev->vdev), BUS(&vpci_dev->bus), errp);
> > }
> >
> > static void vhost_vdpa_device_pci_class_init(ObjectClass *klass, void *data)
> >diff --git a/hw/virtio/vdpa-dev.c b/hw/virtio/vdpa-dev.c
> >index 993cbc7d11..4defe6c33d 100644
> >--- a/hw/virtio/vdpa-dev.c
> >+++ b/hw/virtio/vdpa-dev.c
> >@@ -29,9 +29,140 @@
> > #include "sysemu/sysemu.h"
> > #include "sysemu/runstate.h"
> >
> >+static void
> >+vhost_vdpa_device_dummy_handle_output(VirtIODevice *vdev, VirtQueue *vq)
> >+{
> >+ /* Nothing to do */
> >+}
> >+
> >+static uint32_t
> >+vhost_vdpa_device_get_u32(int fd, unsigned long int cmd, Error **errp)
> >+{
> >+ uint32_t val = (uint32_t)-1;
> >+
> >+ if (ioctl(fd, cmd, &val) < 0) {
> >+ error_setg(errp, "vhost-vdpa-device: cmd 0x%lx failed: %s",
> >+ cmd, strerror(errno));
> >+ }
> >+
> >+ return val;
> >+}
> >+
> > static void vhost_vdpa_device_realize(DeviceState *dev, Error **errp)
> > {
> >+ VirtIODevice *vdev = VIRTIO_DEVICE(dev);
> >+ VhostVdpaDevice *v = VHOST_VDPA_DEVICE(vdev);
> >+ uint32_t max_queue_size;
> >+ struct vhost_virtqueue *vqs;
> >+ int i, ret;
> >+
> >+ if (!v->vdpa_dev || v->vdpa_dev_fd == -1) {
> ^
> Should we use && operator here?
>
> I can't start QEMU, how did you test this series?
>
Oh! I changed to && operator in my local environment when I test this series
but forgot to include it.
I'll fix it in the next version.
> $ ./qemu-system-x86_64 -m 512M -smp 2 -M q35,accel=kvm \
> ...
> -device vhost-vdpa-device-pci,vdpa-dev=/dev/vhost-vdpa-0
> qemu-system-x86_64: -device
> vhost-vdpa-device-pci,vdpa-dev=/dev/vhost-vdpa-0: both vpda-dev and
> vpda-dev-fd are missing
>
> >+ error_setg(errp, "both vpda-dev and vpda-dev-fd are missing");
>
> Typo s/vpda/vdpa
>
OK.
> >+ return;
> >+ }
> >+
> >+ if (v->vdpa_dev && v->vdpa_dev_fd != -1) {
> >+ error_setg(errp, "both vpda-dev and vpda-dev-fd are set");
>
> Ditto
>
OK.
> >+ return;
> >+ }
> >+
> >+ if (v->vdpa_dev_fd == -1) {
> >+ v->vdpa_dev_fd = qemu_open(v->vdpa_dev, O_RDWR, errp);
> >+ if (*errp) {
> >+ return;
> >+ }
> >+ }
> >+ v->vdpa.device_fd = v->vdpa_dev_fd;
> >+
> >+ v->vdev_id = vhost_vdpa_device_get_u32(v->vdpa_dev_fd,
> >+ VHOST_VDPA_GET_DEVICE_ID, errp);
> >+ if (*errp) {
> >+ goto out;
> >+ }
> >+
> >+ max_queue_size = vhost_vdpa_device_get_u32(v->vdpa_dev_fd,
> >+ VHOST_VDPA_GET_VRING_NUM,
> >errp);
> >+ if (*errp) {
> >+ goto out;
> >+ }
> >+
> >+ if (v->queue_size > max_queue_size) {
> >+ error_setg(errp, "vhost-vdpa-device: invalid queue_size: %u
> (max:%u)",
> >+ v->queue_size, max_queue_size);
> >+ goto out;
> >+ } else if (!v->queue_size) {
> >+ v->queue_size = max_queue_size;
> >+ }
> >+
> >+ v->num_queues = vhost_vdpa_device_get_u32(v->vdpa_dev_fd,
> >+ VHOST_VDPA_GET_VQS_COUNT,
> >errp);
> >+ if (*errp) {
> >+ goto out;
> >+ }
> >+
> >+ if (!v->num_queues || v->num_queues > VIRTIO_QUEUE_MAX) {
> >+ error_setg(errp, "invalid number of virtqueues: %u (max:%u)",
> >+ v->num_queues, VIRTIO_QUEUE_MAX);
> >+ goto out;
> >+ }
> >+
> >+ v->dev.nvqs = v->num_queues;
> >+ vqs = g_new0(struct vhost_virtqueue, v->dev.nvqs);
> >+ v->dev.vqs = vqs;
> >+ v->dev.vq_index = 0;
> >+ v->dev.vq_index_end = v->dev.nvqs;
> >+ v->dev.backend_features = 0;
> >+ v->started = false;
> >+
> >+ ret = vhost_dev_init(&v->dev, &v->vdpa, VHOST_BACKEND_TYPE_VDPA, 0,
> NULL);
> >+ if (ret < 0) {
> >+ error_setg(errp, "vhost-vdpa-device: vhost initialization
> failed: %s",
> >+ strerror(-ret));
> >+ goto free_vqs;
> >+ }
> >+
> >+ v->config_size = vhost_vdpa_device_get_u32(v->vdpa_dev_fd,
> >+ VHOST_VDPA_GET_CONFIG_SIZE,
> errp);
> >+ if (*errp) {
> >+ goto vhost_cleanup;
> >+ }
> >+ v->config = g_malloc0(v->config_size);
> >+
> >+ ret = vhost_dev_get_config(&v->dev, v->config, v->config_size, NULL);
> >+ if (ret < 0) {
> >+ error_setg(errp, "vhost-vdpa-device: get config failed");
> >+ goto free_config;
> >+ }
> >+
> >+ virtio_init(vdev, "vhost-vdpa", v->vdev_id, v->config_size);
> >+
> >+ v->virtqs = g_new0(VirtQueue *, v->dev.nvqs);
> >+ for (i = 0; i < v->dev.nvqs; i++) {
> >+ v->virtqs[i] = virtio_add_queue(vdev, v->queue_size,
> >+
> vhost_vdpa_device_dummy_handle_output);
> >+ }
> >+
> >+ if (v->post_init && v->post_init(v, errp) < 0) {
> >+ goto free_virtio;
> >+ }
> >+
> > return;
> >+
> >+free_virtio:
> >+ for (i = 0; i < v->num_queues; i++) {
> >+ virtio_delete_queue(v->virtqs[i]);
> >+ }
> >+ g_free(v->virtqs);
> >+ virtio_cleanup(vdev);
> >+free_config:
> >+ g_free(v->config);
> >+vhost_cleanup:
> >+ vhost_dev_cleanup(&v->dev);
> >+free_vqs:
> >+ g_free(vqs);
> >+out:
> >+ qemu_close(v->vdpa_dev_fd);
> >+ v->vdpa_dev_fd = -1;
> > }
> >
> > static void vhost_vdpa_device_unrealize(DeviceState *dev)
> >@@ -66,6 +197,7 @@ static void vhost_vdpa_device_set_status(VirtIODevice
> >*vdev,
> uint8_t status)
> > static Property vhost_vdpa_device_properties[] = {
> > DEFINE_PROP_STRING("vdpa-dev", VhostVdpaDevice, vdpa_dev),
> > DEFINE_PROP_INT32("vdpa-dev-fd", VhostVdpaDevice, vdpa_dev_fd, -1),
>
> Other vhost devices use the `vhostfd` property, maybe we should use the
> same name.
>
> If we go for this change, then maybe we also need to change `vdpa-dev`
> to `vhostpath` or something like that
>
> Thanks,
> Stefano
>
> >+ DEFINE_PROP_UINT16("queue-size", VhostVdpaDevice, queue_size, 0),
> > DEFINE_PROP_END_OF_LIST(),
> > };
> >
> >diff --git a/include/hw/virtio/vdpa-dev.h b/include/hw/virtio/vdpa-dev.h
> >index 476bda0873..cf11abd0f7 100644
> >--- a/include/hw/virtio/vdpa-dev.h
> >+++ b/include/hw/virtio/vdpa-dev.h
> >@@ -28,6 +28,16 @@ struct VhostVdpaDevice {
> > char *vdpa_dev;
> > int vdpa_dev_fd;
> > int32_t bootindex;
> >+ uint32_t vdev_id;
> >+ uint32_t num_queues;
> >+ struct vhost_dev dev;
> >+ struct vhost_vdpa vdpa;
> >+ VirtQueue **virtqs;
> >+ uint8_t *config;
> >+ int config_size;
> >+ uint16_t queue_size;
> >+ bool started;
> >+ int (*post_init)(VhostVdpaDevice *v, Error **errp);
> > };
> >
> > #endif
> >--
> >2.23.0
> >