A malicious Vhost-user master could send in loop hand-crafted
vhost-user messages containing more file descriptors the
vhost-user slave expects. Doing so causes the application using
the vhost-user library to run out of FDs.

This issue has been assigned CVE-2019-14818

Fixes: 8f972312b8f4 ("vhost: support vhost-user")

Signed-off-by: Maxime Coquelin <maxime.coque...@redhat.com>
---
 lib/librte_vhost/vhost_user.c | 119 ++++++++++++++++++++++++++++++++--
 1 file changed, 115 insertions(+), 4 deletions(-)

diff --git a/lib/librte_vhost/vhost_user.c b/lib/librte_vhost/vhost_user.c
index 6d2431e604..049e37c6de 100644
--- a/lib/librte_vhost/vhost_user.c
+++ b/lib/librte_vhost/vhost_user.c
@@ -83,6 +83,36 @@ static const char *vhost_message_str[VHOST_USER_MAX] = {
 static int send_vhost_reply(int sockfd, struct VhostUserMsg *msg);
 static int read_vhost_message(int sockfd, struct VhostUserMsg *msg);
 
+static void
+close_msg_fds(struct VhostUserMsg *msg)
+{
+       int i;
+
+       for (i = 0; i < msg->fd_num; i++)
+               close(msg->fds[i]);
+}
+
+/*
+ * Ensure the expected number of FDs is received,
+ * close all FDs and return an error if this is not the case.
+ */
+static int
+validate_msg_fds(struct VhostUserMsg *msg, int expected_fds)
+{
+       if (msg->fd_num == expected_fds)
+               return 0;
+
+       RTE_LOG(ERR, VHOST_CONFIG,
+               " Expect %d FDs for request %s, received %d\n",
+               expected_fds,
+               vhost_message_str[msg->request.master],
+               msg->fd_num);
+
+       close_msg_fds(msg);
+
+       return -1;
+}
+
 static uint64_t
 get_blk_size(int fd)
 {
@@ -179,18 +209,25 @@ vhost_backend_cleanup(struct virtio_net *dev)
  */
 static int
 vhost_user_set_owner(struct virtio_net **pdev __rte_unused,
-                       struct VhostUserMsg *msg __rte_unused,
+                       struct VhostUserMsg *msg,
                        int main_fd __rte_unused)
 {
+       if (validate_msg_fds(msg, 0) != 0)
+               return RTE_VHOST_MSG_RESULT_ERR;
+
        return RTE_VHOST_MSG_RESULT_OK;
 }
 
 static int
 vhost_user_reset_owner(struct virtio_net **pdev,
-                       struct VhostUserMsg *msg __rte_unused,
+                       struct VhostUserMsg *msg,
                        int main_fd __rte_unused)
 {
        struct virtio_net *dev = *pdev;
+
+       if (validate_msg_fds(msg, 0) != 0)
+               return RTE_VHOST_MSG_RESULT_ERR;
+
        vhost_destroy_device_notify(dev);
 
        cleanup_device(dev, 0);
@@ -208,6 +245,9 @@ vhost_user_get_features(struct virtio_net **pdev, struct 
VhostUserMsg *msg,
        struct virtio_net *dev = *pdev;
        uint64_t features = 0;
 
+       if (validate_msg_fds(msg, 0) != 0)
+               return RTE_VHOST_MSG_RESULT_ERR;
+
        rte_vhost_driver_get_features(dev->ifname, &features);
 
        msg->payload.u64 = features;
@@ -227,6 +267,9 @@ vhost_user_get_queue_num(struct virtio_net **pdev, struct 
VhostUserMsg *msg,
        struct virtio_net *dev = *pdev;
        uint32_t queue_num = 0;
 
+       if (validate_msg_fds(msg, 0) != 0)
+               return RTE_VHOST_MSG_RESULT_ERR;
+
        rte_vhost_driver_get_queue_num(dev->ifname, &queue_num);
 
        msg->payload.u64 = (uint64_t)queue_num;
@@ -249,6 +292,9 @@ vhost_user_set_features(struct virtio_net **pdev, struct 
VhostUserMsg *msg,
        struct rte_vdpa_device *vdpa_dev;
        int did = -1;
 
+       if (validate_msg_fds(msg, 0) != 0)
+               return RTE_VHOST_MSG_RESULT_ERR;
+
        rte_vhost_driver_get_features(dev->ifname, &vhost_features);
        if (features & ~vhost_features) {
                RTE_LOG(ERR, VHOST_CONFIG,
@@ -331,6 +377,9 @@ vhost_user_set_vring_num(struct virtio_net **pdev,
        struct virtio_net *dev = *pdev;
        struct vhost_virtqueue *vq = dev->virtqueue[msg->payload.state.index];
 
+       if (validate_msg_fds(msg, 0) != 0)
+               return RTE_VHOST_MSG_RESULT_ERR;
+
        vq->size = msg->payload.state.num;
 
        /* VIRTIO 1.0, 2.4 Virtqueues says:
@@ -718,6 +767,9 @@ vhost_user_set_vring_addr(struct virtio_net **pdev, struct 
VhostUserMsg *msg,
        struct vhost_vring_addr *addr = &msg->payload.addr;
        bool access_ok;
 
+       if (validate_msg_fds(msg, 0) != 0)
+               return RTE_VHOST_MSG_RESULT_ERR;
+
        if (dev->mem == NULL)
                return RTE_VHOST_MSG_RESULT_ERR;
 
@@ -759,6 +811,9 @@ vhost_user_set_vring_base(struct virtio_net **pdev,
        struct vhost_virtqueue *vq = dev->virtqueue[msg->payload.state.index];
        uint64_t val = msg->payload.state.num;
 
+       if (validate_msg_fds(msg, 0) != 0)
+               return RTE_VHOST_MSG_RESULT_ERR;
+
        if (vq_is_packed(dev)) {
                /*
                 * Bit[0:14]: avail index
@@ -920,6 +975,9 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct 
VhostUserMsg *msg,
        int populate;
        int fd;
 
+       if (validate_msg_fds(msg, memory->nregions) != 0)
+               return RTE_VHOST_MSG_RESULT_ERR;
+
        if (memory->nregions > VHOST_MEMORY_MAX_NREGIONS) {
                RTE_LOG(ERR, VHOST_CONFIG,
                        "too many memory regions (%u)\n", memory->nregions);
@@ -930,8 +988,7 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct 
VhostUserMsg *msg,
                RTE_LOG(INFO, VHOST_CONFIG,
                        "(%d) memory regions not changed\n", dev->vid);
 
-               for (i = 0; i < memory->nregions; i++)
-                       close(msg->fds[i]);
+               close_msg_fds(msg);
 
                return RTE_VHOST_MSG_RESULT_OK;
        }
@@ -1074,6 +1131,10 @@ vhost_user_set_mem_table(struct virtio_net **pdev, 
struct VhostUserMsg *msg,
                                "Failed to read qemu ack on postcopy 
set-mem-table\n");
                        goto err_mmap;
                }
+
+               if (validate_msg_fds(&ack_msg, 0) != 0)
+                       goto err_mmap;
+
                if (ack_msg.request.master != VHOST_USER_SET_MEM_TABLE) {
                        RTE_LOG(ERR, VHOST_CONFIG,
                                "Bad qemu ack on postcopy set-mem-table (%d)\n",
@@ -1194,6 +1255,9 @@ vhost_user_set_vring_call(struct virtio_net **pdev, 
struct VhostUserMsg *msg,
        struct vhost_vring_file file;
        struct vhost_virtqueue *vq;
 
+       if (validate_msg_fds(msg, 1) != 0)
+               return RTE_VHOST_MSG_RESULT_ERR;
+
        file.index = msg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
        if (msg->payload.u64 & VHOST_USER_VRING_NOFD_MASK)
                file.fd = VIRTIO_INVALID_EVENTFD;
@@ -1215,6 +1279,9 @@ static int vhost_user_set_vring_err(struct virtio_net 
**pdev __rte_unused,
                        struct VhostUserMsg *msg,
                        int main_fd __rte_unused)
 {
+       if (validate_msg_fds(msg, 1) != 0)
+               return RTE_VHOST_MSG_RESULT_ERR;
+
        if (!(msg->payload.u64 & VHOST_USER_VRING_NOFD_MASK))
                close(msg->fds[0]);
        RTE_LOG(INFO, VHOST_CONFIG, "not implemented\n");
@@ -1230,6 +1297,9 @@ vhost_user_set_vring_kick(struct virtio_net **pdev, 
struct VhostUserMsg *msg,
        struct vhost_vring_file file;
        struct vhost_virtqueue *vq;
 
+       if (validate_msg_fds(msg, 1) != 0)
+               return RTE_VHOST_MSG_RESULT_ERR;
+
        file.index = msg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
        if (msg->payload.u64 & VHOST_USER_VRING_NOFD_MASK)
                file.fd = VIRTIO_INVALID_EVENTFD;
@@ -1286,6 +1356,9 @@ vhost_user_get_vring_base(struct virtio_net **pdev,
        struct vhost_virtqueue *vq = dev->virtqueue[msg->payload.state.index];
        uint64_t val;
 
+       if (validate_msg_fds(msg, 0) != 0)
+               return RTE_VHOST_MSG_RESULT_ERR;
+
        /* We have to stop the queue (virtio) if it is running. */
        vhost_destroy_device_notify(dev);
 
@@ -1361,6 +1434,9 @@ vhost_user_set_vring_enable(struct virtio_net **pdev,
        struct rte_vdpa_device *vdpa_dev;
        int did = -1;
 
+       if (validate_msg_fds(msg, 0) != 0)
+               return RTE_VHOST_MSG_RESULT_ERR;
+
        RTE_LOG(INFO, VHOST_CONFIG,
                "set queue enable: %d to qp idx: %d\n",
                enable, index);
@@ -1391,6 +1467,9 @@ vhost_user_get_protocol_features(struct virtio_net **pdev,
        struct virtio_net *dev = *pdev;
        uint64_t features, protocol_features;
 
+       if (validate_msg_fds(msg, 0) != 0)
+               return RTE_VHOST_MSG_RESULT_ERR;
+
        rte_vhost_driver_get_features(dev->ifname, &features);
        rte_vhost_driver_get_protocol_features(dev->ifname, &protocol_features);
 
@@ -1419,6 +1498,9 @@ vhost_user_set_protocol_features(struct virtio_net **pdev,
        uint64_t protocol_features = msg->payload.u64;
        uint64_t slave_protocol_features = 0;
 
+       if (validate_msg_fds(msg, 0) != 0)
+               return RTE_VHOST_MSG_RESULT_ERR;
+
        rte_vhost_driver_get_protocol_features(dev->ifname,
                        &slave_protocol_features);
        if (protocol_features & ~slave_protocol_features) {
@@ -1445,6 +1527,9 @@ vhost_user_set_log_base(struct virtio_net **pdev, struct 
VhostUserMsg *msg,
        uint64_t size, off;
        void *addr;
 
+       if (validate_msg_fds(msg, 1) != 0)
+               return RTE_VHOST_MSG_RESULT_ERR;
+
        if (fd < 0) {
                RTE_LOG(ERR, VHOST_CONFIG, "invalid log fd: %d\n", fd);
                return RTE_VHOST_MSG_RESULT_ERR;
@@ -1508,6 +1593,9 @@ static int vhost_user_set_log_fd(struct virtio_net **pdev 
__rte_unused,
                        struct VhostUserMsg *msg,
                        int main_fd __rte_unused)
 {
+       if (validate_msg_fds(msg, 1) != 0)
+               return RTE_VHOST_MSG_RESULT_ERR;
+
        close(msg->fds[0]);
        RTE_LOG(INFO, VHOST_CONFIG, "not implemented.\n");
 
@@ -1531,6 +1619,9 @@ vhost_user_send_rarp(struct virtio_net **pdev, struct 
VhostUserMsg *msg,
        struct rte_vdpa_device *vdpa_dev;
        int did = -1;
 
+       if (validate_msg_fds(msg, 0) != 0)
+               return RTE_VHOST_MSG_RESULT_ERR;
+
        RTE_LOG(DEBUG, VHOST_CONFIG,
                ":: mac: %02x:%02x:%02x:%02x:%02x:%02x\n",
                mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
@@ -1558,6 +1649,10 @@ vhost_user_net_set_mtu(struct virtio_net **pdev, struct 
VhostUserMsg *msg,
                        int main_fd __rte_unused)
 {
        struct virtio_net *dev = *pdev;
+
+       if (validate_msg_fds(msg, 0) != 0)
+               return RTE_VHOST_MSG_RESULT_ERR;
+
        if (msg->payload.u64 < VIRTIO_MIN_MTU ||
                        msg->payload.u64 > VIRTIO_MAX_MTU) {
                RTE_LOG(ERR, VHOST_CONFIG, "Invalid MTU size (%"PRIu64")\n",
@@ -1578,6 +1673,9 @@ vhost_user_set_req_fd(struct virtio_net **pdev, struct 
VhostUserMsg *msg,
        struct virtio_net *dev = *pdev;
        int fd = msg->fds[0];
 
+       if (validate_msg_fds(msg, 1) != 0)
+               return RTE_VHOST_MSG_RESULT_ERR;
+
        if (fd < 0) {
                RTE_LOG(ERR, VHOST_CONFIG,
                                "Invalid file descriptor for slave channel 
(%d)\n",
@@ -1663,6 +1761,9 @@ vhost_user_iotlb_msg(struct virtio_net **pdev, struct 
VhostUserMsg *msg,
        uint16_t i;
        uint64_t vva, len;
 
+       if (validate_msg_fds(msg, 0) != 0)
+               return RTE_VHOST_MSG_RESULT_ERR;
+
        switch (imsg->type) {
        case VHOST_IOTLB_UPDATE:
                len = imsg->size;
@@ -1709,6 +1810,9 @@ vhost_user_set_postcopy_advise(struct virtio_net **pdev,
 #ifdef RTE_LIBRTE_VHOST_POSTCOPY
        struct uffdio_api api_struct;
 
+       if (validate_msg_fds(msg, 0) != 0)
+               return RTE_VHOST_MSG_RESULT_ERR;
+
        dev->postcopy_ufd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK);
 
        if (dev->postcopy_ufd == -1) {
@@ -1744,6 +1848,9 @@ vhost_user_set_postcopy_listen(struct virtio_net **pdev,
 {
        struct virtio_net *dev = *pdev;
 
+       if (validate_msg_fds(msg, 0) != 0)
+               return RTE_VHOST_MSG_RESULT_ERR;
+
        if (dev->mem && dev->mem->nregions) {
                RTE_LOG(ERR, VHOST_CONFIG,
                        "Regions already registered at postcopy-listen\n");
@@ -1760,6 +1867,9 @@ vhost_user_postcopy_end(struct virtio_net **pdev, struct 
VhostUserMsg *msg,
 {
        struct virtio_net *dev = *pdev;
 
+       if (validate_msg_fds(msg, 0) != 0)
+               return RTE_VHOST_MSG_RESULT_ERR;
+
        dev->postcopy_listening = 0;
        if (dev->postcopy_ufd >= 0) {
                close(dev->postcopy_ufd);
@@ -2112,6 +2222,7 @@ vhost_user_msg_handler(int vid, int fd)
        if (!handled) {
                RTE_LOG(ERR, VHOST_CONFIG,
                        "vhost message (req: %d) was not handled.\n", request);
+               close_msg_fds(&msg);
                ret = RTE_VHOST_MSG_RESULT_ERR;
        }
 
-- 
2.21.0

Reply via email to