On 07/19/2018 09:13 PM, Nikolay Nikolaev wrote:
Each vhost-user message handling function will return an int result
which is described in the new enum vh_result: error, OK and reply.
All functions will now have two arguments, virtio_net double pointer
and VhostUserMsg pointer.

Signed-off-by: Nikolay Nikolaev <nicknickol...@gmail.com>
---
  lib/librte_vhost/vhost_user.c |  219 ++++++++++++++++++++++++-----------------
  1 file changed, 130 insertions(+), 89 deletions(-)

Adding Ilya in cc, who has submitted a patch that seems to be
complementary with yours: http://patches.dpdk.org/patch/44168/

Other than that, it looks good to me:
Reviewed-by: Maxime Coquelin <maxime.coque...@redhat.com>

Thanks,
Maxime

diff --git a/lib/librte_vhost/vhost_user.c b/lib/librte_vhost/vhost_user.c
index b408460f0..6b39d1e30 100644
--- a/lib/librte_vhost/vhost_user.c
+++ b/lib/librte_vhost/vhost_user.c
@@ -71,6 +71,16 @@ static const char *vhost_message_str[VHOST_USER_MAX] = {
        [VHOST_USER_CRYPTO_CLOSE_SESS] = "VHOST_USER_CRYPTO_CLOSE_SESS",
  };
+/* The possible results of a message handling function */
+enum vh_result {
+       /* Message handling failed */
+       VH_RESULT_ERR   = -1,
+       /* Message handling successful */
+       VH_RESULT_OK    =  0,
+       /* Message handling successful and reply prepared */
+       VH_RESULT_REPLY =  1,
+};
+
  static uint64_t
  get_blk_size(int fd)
  {
@@ -127,27 +137,31 @@ vhost_backend_cleanup(struct virtio_net *dev)
   * the device hasn't been initialised.
   */
  static int
-vhost_user_set_owner(void)
+vhost_user_set_owner(struct virtio_net **pdev __rte_unused,
+               VhostUserMsg * msg __rte_unused)
  {
-       return 0;
+       return VH_RESULT_OK;
  }
static int
-vhost_user_reset_owner(struct virtio_net *dev)
+vhost_user_reset_owner(struct virtio_net **pdev,
+               VhostUserMsg * msg __rte_unused)
  {
+       struct virtio_net *dev = *pdev;
        vhost_destroy_device_notify(dev);
cleanup_device(dev, 0);
        reset_device(dev);
-       return 0;
+       return VH_RESULT_OK;
  }
/*
   * The features that we support are requested.
   */
-static uint64_t
-vhost_user_get_features(struct virtio_net *dev, VhostUserMsg *msg)
+static int
+vhost_user_get_features(struct virtio_net **pdev, VhostUserMsg *msg)
  {
+       struct virtio_net *dev = *pdev;
        uint64_t features = 0;
rte_vhost_driver_get_features(dev->ifname, &features);
@@ -155,15 +169,16 @@ vhost_user_get_features(struct virtio_net *dev, 
VhostUserMsg *msg)
        msg->payload.u64 = features;
        msg->size = sizeof(msg->payload.u64);
- return features;
+       return VH_RESULT_REPLY;
  }
/*
   * The queue number that we support are requested.
   */
-static uint32_t
-vhost_user_get_queue_num(struct virtio_net *dev, VhostUserMsg *msg)
+static int
+vhost_user_get_queue_num(struct virtio_net **pdev, VhostUserMsg *msg)
  {
+       struct virtio_net *dev = *pdev;
        uint32_t queue_num = 0;
rte_vhost_driver_get_queue_num(dev->ifname, &queue_num);
@@ -171,15 +186,17 @@ vhost_user_get_queue_num(struct virtio_net *dev, 
VhostUserMsg *msg)
        msg->payload.u64 = (uint64_t)queue_num;
        msg->size = sizeof(msg->payload.u64);
- return queue_num;
+       return VH_RESULT_REPLY;
  }
/*
   * We receive the negotiated features supported by us and the virtio device.
   */
  static int
-vhost_user_set_features(struct virtio_net *dev, uint64_t features)
+vhost_user_set_features(struct virtio_net **pdev, VhostUserMsg *msg)
  {
+       struct virtio_net *dev = *pdev;
+       uint64_t features = msg->payload.u64;
        uint64_t vhost_features = 0;
        struct rte_vdpa_device *vdpa_dev;
        int did = -1;
@@ -189,12 +206,12 @@ vhost_user_set_features(struct virtio_net *dev, uint64_t 
features)
                RTE_LOG(ERR, VHOST_CONFIG,
                        "(%d) received invalid negotiated features.\n",
                        dev->vid);
-               return -1;
+               return VH_RESULT_ERR;
        }
if (dev->flags & VIRTIO_DEV_RUNNING) {
                if (dev->features == features)
-                       return 0;
+                       return VH_RESULT_OK;
/*
                 * Error out if master tries to change features while device is
@@ -205,7 +222,7 @@ vhost_user_set_features(struct virtio_net *dev, uint64_t 
features)
                        RTE_LOG(ERR, VHOST_CONFIG,
                                "(%d) features changed while device is 
running.\n",
                                dev->vid);
-                       return -1;
+                       return VH_RESULT_ERR;
                }
if (dev->notify_ops->features_changed)
@@ -250,16 +267,17 @@ vhost_user_set_features(struct virtio_net *dev, uint64_t 
features)
        if (vdpa_dev && vdpa_dev->ops->set_features)
                vdpa_dev->ops->set_features(dev->vid);
- return 0;
+       return VH_RESULT_OK;
  }
/*
   * The virtio device sends us the size of the descriptor ring.
   */
  static int
-vhost_user_set_vring_num(struct virtio_net *dev,
+vhost_user_set_vring_num(struct virtio_net **pdev,
                         VhostUserMsg *msg)
  {
+       struct virtio_net *dev = *pdev;
        struct vhost_virtqueue *vq = dev->virtqueue[msg->payload.state.index];
vq->size = msg->payload.state.num;
@@ -272,7 +290,7 @@ vhost_user_set_vring_num(struct virtio_net *dev,
        if ((vq->size & (vq->size - 1)) || vq->size > 32768) {
                RTE_LOG(ERR, VHOST_CONFIG,
                        "invalid virtqueue size %u\n", vq->size);
-               return -1;
+               return VH_RESULT_ERR;
        }
if (dev->dequeue_zero_copy) {
@@ -298,7 +316,7 @@ vhost_user_set_vring_num(struct virtio_net *dev,
                if (!vq->shadow_used_packed) {
                        RTE_LOG(ERR, VHOST_CONFIG,
                                        "failed to allocate memory for shadow used 
ring.\n");
-                       return -1;
+                       return VH_RESULT_ERR;
                }
} else {
@@ -308,7 +326,7 @@ vhost_user_set_vring_num(struct virtio_net *dev,
                if (!vq->shadow_used_split) {
                        RTE_LOG(ERR, VHOST_CONFIG,
                                        "failed to allocate memory for shadow used 
ring.\n");
-                       return -1;
+                       return VH_RESULT_ERR;
                }
        }
@@ -318,10 +336,10 @@ vhost_user_set_vring_num(struct virtio_net *dev,
        if (!vq->batch_copy_elems) {
                RTE_LOG(ERR, VHOST_CONFIG,
                        "failed to allocate memory for batching copy.\n");
-               return -1;
+               return VH_RESULT_ERR;
        }
- return 0;
+       return VH_RESULT_OK;
  }
/*
@@ -619,12 +637,12 @@ translate_ring_addresses(struct virtio_net *dev, int 
vq_index)
  static int
  vhost_user_set_vring_addr(struct virtio_net **pdev, VhostUserMsg *msg)
  {
+       struct virtio_net *dev = *pdev;
        struct vhost_virtqueue *vq;
        struct vhost_vring_addr *addr = &msg->payload.addr;
-       struct virtio_net *dev = *pdev;
if (dev->mem == NULL)
-               return -1;
+               return VH_RESULT_ERR;
/* addr->index refers to the queue index. The txq 1, rxq is 0. */
        vq = dev->virtqueue[msg->payload.addr.index];
@@ -641,27 +659,28 @@ vhost_user_set_vring_addr(struct virtio_net **pdev, 
VhostUserMsg *msg)
                                (1ULL << VHOST_USER_F_PROTOCOL_FEATURES))) {
                dev = translate_ring_addresses(dev, msg->payload.addr.index);
                if (!dev)
-                       return -1;
+                       return VH_RESULT_ERR;
*pdev = dev;
        }
- return 0;
+       return VH_RESULT_OK;
  }
/*
   * The virtio device sends us the available ring last used index.
   */
  static int
-vhost_user_set_vring_base(struct virtio_net *dev,
+vhost_user_set_vring_base(struct virtio_net **pdev,
                          VhostUserMsg *msg)
  {
+       struct virtio_net *dev = *pdev;
        dev->virtqueue[msg->payload.state.index]->last_used_idx  =
                        msg->payload.state.num;
        dev->virtqueue[msg->payload.state.index]->last_avail_idx =
                        msg->payload.state.num;
- return 0;
+       return VH_RESULT_OK;
  }
static int
@@ -802,7 +821,7 @@ vhost_user_set_mem_table(struct virtio_net **pdev, 
VhostUserMsg *msg)
        if (memory.nregions > VHOST_MEMORY_MAX_NREGIONS) {
                RTE_LOG(ERR, VHOST_CONFIG,
                        "too many memory regions (%u)\n", memory.nregions);
-               return -1;
+               return VH_RESULT_ERR;
        }
if (dev->mem && !vhost_memory_changed(&memory, dev->mem)) {
@@ -812,7 +831,7 @@ vhost_user_set_mem_table(struct virtio_net **pdev, 
VhostUserMsg *msg)
                for (i = 0; i < memory.nregions; i++)
                        close(msg->fds[i]);
- return 0;
+               return VH_RESULT_OK;
        }
if (dev->mem) {
@@ -831,7 +850,7 @@ vhost_user_set_mem_table(struct virtio_net **pdev, 
VhostUserMsg *msg)
                                "(%d) failed to allocate memory "
                                "for dev->guest_pages\n",
                                dev->vid);
-                       return -1;
+                       return VH_RESULT_ERR;
                }
        }
@@ -841,7 +860,7 @@ vhost_user_set_mem_table(struct virtio_net **pdev, VhostUserMsg *msg)
                RTE_LOG(ERR, VHOST_CONFIG,
                        "(%d) failed to allocate memory for dev->mem\n",
                        dev->vid);
-               return -1;
+               return VH_RESULT_ERR;
        }
        dev->mem->nregions = memory.nregions;
@@ -938,7 +957,7 @@ vhost_user_set_mem_table(struct virtio_net **pdev, VhostUserMsg *msg) dev = translate_ring_addresses(dev, i);
                        if (!dev)
-                               return -1;
+                               return VH_RESULT_ERR;
*pdev = dev;
                }
@@ -946,13 +965,13 @@ vhost_user_set_mem_table(struct virtio_net **pdev, 
VhostUserMsg *msg)
dump_guest_pages(dev); - return 0;
+       return VH_RESULT_OK;
err_mmap:
        free_mem_region(dev);
        rte_free(dev->mem);
        dev->mem = NULL;
-       return -1;
+       return VH_RESULT_ERR;
  }
static bool
@@ -994,9 +1013,10 @@ virtio_is_ready(struct virtio_net *dev)
        return 1;
  }
-static void
-vhost_user_set_vring_call(struct virtio_net *dev, VhostUserMsg *msg)
+static int
+vhost_user_set_vring_call(struct virtio_net **pdev, VhostUserMsg *msg)
  {
+       struct virtio_net *dev = *pdev;
        struct vhost_vring_file file;
        struct vhost_virtqueue *vq;
@@ -1013,22 +1033,26 @@ vhost_user_set_vring_call(struct virtio_net *dev, VhostUserMsg *msg)
                close(vq->callfd);
vq->callfd = file.fd;
+
+       return VH_RESULT_OK;
  }
-static void vhost_user_set_vring_err(struct virtio_net *dev __rte_unused,
+static int vhost_user_set_vring_err(struct virtio_net **pdev __rte_unused,
                        VhostUserMsg *msg)
  {
        if (!(msg->payload.u64 & VHOST_USER_VRING_NOFD_MASK))
                close(msg->fds[0]);
        RTE_LOG(INFO, VHOST_CONFIG, "not implemented\n");
+
+       return VH_RESULT_OK;
  }
-static void
+static int
  vhost_user_set_vring_kick(struct virtio_net **pdev, VhostUserMsg *msg)
  {
+       struct virtio_net *dev = *pdev;
        struct vhost_vring_file file;
        struct vhost_virtqueue *vq;
-       struct virtio_net *dev = *pdev;
file.index = msg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
        if (msg->payload.u64 & VHOST_USER_VRING_NOFD_MASK)
@@ -1041,7 +1065,7 @@ vhost_user_set_vring_kick(struct virtio_net **pdev, 
VhostUserMsg *msg)
        /* Interpret ring addresses only when ring is started. */
        dev = translate_ring_addresses(dev, file.index);
        if (!dev)
-               return;
+               return VH_RESULT_OK;
*pdev = dev; @@ -1058,6 +1082,8 @@ vhost_user_set_vring_kick(struct virtio_net **pdev, VhostUserMsg *msg)
        if (vq->kickfd >= 0)
                close(vq->kickfd);
        vq->kickfd = file.fd;
+
+       return VH_RESULT_OK;
  }
static void
@@ -1080,9 +1106,10 @@ free_zmbufs(struct vhost_virtqueue *vq)
   * when virtio is stopped, qemu will send us the GET_VRING_BASE message.
   */
  static int
-vhost_user_get_vring_base(struct virtio_net *dev,
+vhost_user_get_vring_base(struct virtio_net **pdev,
                          VhostUserMsg *msg)
  {
+       struct virtio_net *dev = *pdev;
        struct vhost_virtqueue *vq = dev->virtqueue[msg->payload.state.index];
/* We have to stop the queue (virtio) if it is running. */
@@ -1127,7 +1154,7 @@ vhost_user_get_vring_base(struct virtio_net *dev,
msg->size = sizeof(msg->payload.state); - return 0;
+       return VH_RESULT_OK;
  }
/*
@@ -1135,9 +1162,10 @@ vhost_user_get_vring_base(struct virtio_net *dev,
   * enable the virtio queue pair.
   */
  static int
-vhost_user_set_vring_enable(struct virtio_net *dev,
+vhost_user_set_vring_enable(struct virtio_net **pdev,
                            VhostUserMsg *msg)
  {
+       struct virtio_net *dev = *pdev;
        int enable = (int)msg->payload.state.num;
        int index = (int)msg->payload.state.index;
        struct rte_vdpa_device *vdpa_dev;
@@ -1158,13 +1186,14 @@ vhost_user_set_vring_enable(struct virtio_net *dev,
dev->virtqueue[index]->enabled = enable; - return 0;
+       return VH_RESULT_OK;
  }
-static void
-vhost_user_get_protocol_features(struct virtio_net *dev,
+static int
+vhost_user_get_protocol_features(struct virtio_net **pdev,
                                 VhostUserMsg *msg)
  {
+       struct virtio_net *dev = *pdev;
        uint64_t features, protocol_features;
rte_vhost_driver_get_features(dev->ifname, &features);
@@ -1181,35 +1210,42 @@ vhost_user_get_protocol_features(struct virtio_net *dev,
msg->payload.u64 = protocol_features;
        msg->size = sizeof(msg->payload.u64);
+
+       return VH_RESULT_OK;
  }
-static void
-vhost_user_set_protocol_features(struct virtio_net *dev,
-                                uint64_t protocol_features)
+static int
+vhost_user_set_protocol_features(struct virtio_net **pdev,
+                                VhostUserMsg *msg)
  {
+       struct virtio_net *dev = *pdev;
+       uint64_t protocol_features = msg->payload.u64;
        if (protocol_features & ~VHOST_USER_PROTOCOL_FEATURES)
-               return;
+               return VH_RESULT_OK;
dev->protocol_features = protocol_features;
+
+       return VH_RESULT_OK;
  }
static int
-vhost_user_set_log_base(struct virtio_net *dev, VhostUserMsg *msg)
+vhost_user_set_log_base(struct virtio_net **pdev, VhostUserMsg *msg)
  {
+       struct virtio_net *dev = *pdev;
        int fd = msg->fds[0];
        uint64_t size, off;
        void *addr;
if (fd < 0) {
                RTE_LOG(ERR, VHOST_CONFIG, "invalid log fd: %d\n", fd);
-               return -1;
+               return VH_RESULT_ERR;
        }
if (msg->size != sizeof(VhostUserLog)) {
                RTE_LOG(ERR, VHOST_CONFIG,
                        "invalid log base msg size: %"PRId32" != %d\n",
                        msg->size, (int)sizeof(VhostUserLog));
-               return -1;
+               return VH_RESULT_ERR;
        }
size = msg->payload.log.mmap_size;
@@ -1220,7 +1256,7 @@ vhost_user_set_log_base(struct virtio_net *dev, 
VhostUserMsg *msg)
                RTE_LOG(ERR, VHOST_CONFIG,
                        "log offset %#"PRIx64" exceeds log size %#"PRIx64"\n",
                        off, size);
-               return -1;
+               return VH_RESULT_ERR;
        }
RTE_LOG(INFO, VHOST_CONFIG,
@@ -1235,7 +1271,7 @@ vhost_user_set_log_base(struct virtio_net *dev, 
VhostUserMsg *msg)
        close(fd);
        if (addr == MAP_FAILED) {
                RTE_LOG(ERR, VHOST_CONFIG, "mmap log base failed!\n");
-               return -1;
+               return VH_RESULT_ERR;
        }
/*
@@ -1251,14 +1287,16 @@ vhost_user_set_log_base(struct virtio_net *dev, 
VhostUserMsg *msg)
msg->size = sizeof(msg->payload.u64); - return 0;
+       return VH_RESULT_OK;
  }
-static void
-vhost_user_set_log_fd(struct virtio_net *dev __rte_unused, VhostUserMsg *msg)
+static int
+vhost_user_set_log_fd(struct virtio_net **pdev __rte_unused, VhostUserMsg *msg)
  {
        close(msg->fds[0]);
        RTE_LOG(INFO, VHOST_CONFIG, "not implemented.\n");
+
+       return VH_RESULT_OK;
  }
/*
@@ -1270,8 +1308,9 @@ vhost_user_set_log_fd(struct virtio_net *dev 
__rte_unused, VhostUserMsg *msg)
   * a flag 'broadcast_rarp' to let rte_vhost_dequeue_burst() inject it.
   */
  static int
-vhost_user_send_rarp(struct virtio_net *dev, VhostUserMsg *msg)
+vhost_user_send_rarp(struct virtio_net **pdev, VhostUserMsg *msg)
  {
+       struct virtio_net *dev = *pdev;
        uint8_t *mac = (uint8_t *)&msg->payload.u64;
        struct rte_vdpa_device *vdpa_dev;
        int did = -1;
@@ -1295,40 +1334,42 @@ vhost_user_send_rarp(struct virtio_net *dev, 
VhostUserMsg *msg)
        if (vdpa_dev && vdpa_dev->ops->migration_done)
                vdpa_dev->ops->migration_done(dev->vid);
- return 0;
+       return VH_RESULT_OK;
  }
static int
-vhost_user_net_set_mtu(struct virtio_net *dev, VhostUserMsg *msg)
+vhost_user_net_set_mtu(struct virtio_net **pdev, VhostUserMsg *msg)
  {
+       struct virtio_net *dev = *pdev;
        if (msg->payload.u64 < VIRTIO_MIN_MTU ||
                        msg->payload.u64 > VIRTIO_MAX_MTU) {
                RTE_LOG(ERR, VHOST_CONFIG, "Invalid MTU size (%"PRIu64")\n",
                                msg->payload.u64);
- return -1;
+               return VH_RESULT_ERR;
        }
dev->mtu = msg->payload.u64; - return 0;
+       return VH_RESULT_OK;
  }
static int
-vhost_user_set_req_fd(struct virtio_net *dev, VhostUserMsg *msg)
+vhost_user_set_req_fd(struct virtio_net **pdev, VhostUserMsg *msg)
  {
+       struct virtio_net *dev = *pdev;
        int fd = msg->fds[0];
if (fd < 0) {
                RTE_LOG(ERR, VHOST_CONFIG,
                                "Invalid file descriptor for slave channel 
(%d)\n",
                                fd);
-               return -1;
+               return VH_RESULT_ERR;
        }
dev->slave_req_fd = fd; - return 0;
+       return VH_RESULT_OK;
  }
static int
@@ -1393,7 +1434,7 @@ vhost_user_iotlb_msg(struct virtio_net **pdev, 
VhostUserMsg *msg)
                len = imsg->size;
                vva = qva_to_vva(dev, imsg->uaddr, &len);
                if (!vva)
-                       return -1;
+                       return VH_RESULT_ERR;
for (i = 0; i < dev->nr_vring; i++) {
                        struct vhost_virtqueue *vq = dev->virtqueue[i];
@@ -1419,10 +1460,10 @@ vhost_user_iotlb_msg(struct virtio_net **pdev, 
VhostUserMsg *msg)
        default:
                RTE_LOG(ERR, VHOST_CONFIG, "Invalid IOTLB message type (%d)\n",
                                imsg->type);
-               return -1;
+               return VH_RESULT_ERR;
        }
- return 0;
+       return VH_RESULT_OK;
  }
/* return bytes# of read on success or negative val on failure. */
@@ -1671,28 +1712,28 @@ vhost_user_msg_handler(int vid, int fd)
switch (msg.request.master) {
        case VHOST_USER_GET_FEATURES:
-               vhost_user_get_features(dev, &msg);
+               ret = vhost_user_get_features(&dev, &msg);
                send_vhost_reply(fd, &msg);
                break;
        case VHOST_USER_SET_FEATURES:
-               ret = vhost_user_set_features(dev, msg.payload.u64);
+               ret = vhost_user_set_features(&dev, &msg);
                if (ret)
                        return -1;
                break;
case VHOST_USER_GET_PROTOCOL_FEATURES:
-               vhost_user_get_protocol_features(dev, &msg);
+               ret = vhost_user_get_protocol_features(&dev, &msg);
                send_vhost_reply(fd, &msg);
                break;
        case VHOST_USER_SET_PROTOCOL_FEATURES:
-               vhost_user_set_protocol_features(dev, msg.payload.u64);
+               ret = vhost_user_set_protocol_features(&dev, &msg);
                break;
case VHOST_USER_SET_OWNER:
-               vhost_user_set_owner();
+               ret = vhost_user_set_owner(&dev, &msg);
                break;
        case VHOST_USER_RESET_OWNER:
-               vhost_user_reset_owner(dev);
+               ret = vhost_user_reset_owner(&dev, &msg);
                break;
case VHOST_USER_SET_MEM_TABLE:
@@ -1700,57 +1741,57 @@ vhost_user_msg_handler(int vid, int fd)
                break;
case VHOST_USER_SET_LOG_BASE:
-               vhost_user_set_log_base(dev, &msg);
+               ret = vhost_user_set_log_base(&dev, &msg);
                send_vhost_reply(fd, &msg);
                break;
        case VHOST_USER_SET_LOG_FD:
-               vhost_user_set_log_fd(dev, &msg);
+               ret = vhost_user_set_log_fd(&dev, &msg);
                break;
case VHOST_USER_SET_VRING_NUM:
-               vhost_user_set_vring_num(dev, &msg);
+               ret = vhost_user_set_vring_num(&dev, &msg);
                break;
        case VHOST_USER_SET_VRING_ADDR:
-               vhost_user_set_vring_addr(&dev, &msg);
+               ret = vhost_user_set_vring_addr(&dev, &msg);
                break;
        case VHOST_USER_SET_VRING_BASE:
-               vhost_user_set_vring_base(dev, &msg);
+               ret = vhost_user_set_vring_base(&dev, &msg);
                break;
case VHOST_USER_GET_VRING_BASE:
-               vhost_user_get_vring_base(dev, &msg);
+               ret = vhost_user_get_vring_base(&dev, &msg);
                send_vhost_reply(fd, &msg);
                break;
case VHOST_USER_SET_VRING_KICK:
-               vhost_user_set_vring_kick(&dev, &msg);
+               ret = vhost_user_set_vring_kick(&dev, &msg);
                break;
        case VHOST_USER_SET_VRING_CALL:
-               vhost_user_set_vring_call(dev, &msg);
+               ret = vhost_user_set_vring_call(&dev, &msg);
                break;
case VHOST_USER_SET_VRING_ERR:
-               vhost_user_set_vring_err(dev, &msg);
+               ret = vhost_user_set_vring_err(&dev, &msg);
                break;
case VHOST_USER_GET_QUEUE_NUM:
-               vhost_user_get_queue_num(dev, &msg);
+               ret = vhost_user_get_queue_num(&dev, &msg);
                send_vhost_reply(fd, &msg);
                break;
case VHOST_USER_SET_VRING_ENABLE:
-               vhost_user_set_vring_enable(dev, &msg);
+               ret = vhost_user_set_vring_enable(&dev, &msg);
                break;
        case VHOST_USER_SEND_RARP:
-               vhost_user_send_rarp(dev, &msg);
+               ret = vhost_user_send_rarp(&dev, &msg);
                break;
case VHOST_USER_NET_SET_MTU:
-               ret = vhost_user_net_set_mtu(dev, &msg);
+               ret = vhost_user_net_set_mtu(&dev, &msg);
                break;
case VHOST_USER_SET_SLAVE_REQ_FD:
-               ret = vhost_user_set_req_fd(dev, &msg);
+               ret = vhost_user_set_req_fd(&dev, &msg);
                break;
case VHOST_USER_IOTLB_MSG:

Reply via email to