Vhost backend of different devices have different features.
Add an API to get vDPA device type, net device or blk device
currently, so users can set different features for different
kinds of devices.

Signed-off-by: Andy Pei <andy....@intel.com>
Reviewed-by: Chenbo Xia <chenbo....@intel.com>
---
 doc/guides/prog_guide/vhost_lib.rst    |  5 ++++
 doc/guides/rel_notes/release_22_07.rst |  4 ++++
 lib/vhost/rte_vhost.h                  | 17 +++++++++++++
 lib/vhost/socket.c                     | 44 ++++++++++++++++++++++++++++++++++
 lib/vhost/vdpa_driver.h                |  3 +++
 lib/vhost/version.map                  |  1 +
 6 files changed, 74 insertions(+)

diff --git a/doc/guides/prog_guide/vhost_lib.rst 
b/doc/guides/prog_guide/vhost_lib.rst
index f287b76..0337b38 100644
--- a/doc/guides/prog_guide/vhost_lib.rst
+++ b/doc/guides/prog_guide/vhost_lib.rst
@@ -282,6 +282,11 @@ The following is an overview of some key Vhost API 
functions:
   Clear inflight packets which are submitted to DMA engine in vhost async data
   path. Completed packets are returned to applications through ``pkts``.
 
+* ``rte_vhost_driver_get_vdpa_dev_type(path, type)``
+
+  Get device type of vDPA device, such as VDPA_DEVICE_TYPE_NET,
+  VDPA_DEVICE_TYPE_BLK.
+
 Vhost-user Implementations
 --------------------------
 
diff --git a/doc/guides/rel_notes/release_22_07.rst 
b/doc/guides/rel_notes/release_22_07.rst
index e49cace..63875b7 100644
--- a/doc/guides/rel_notes/release_22_07.rst
+++ b/doc/guides/rel_notes/release_22_07.rst
@@ -60,6 +60,10 @@ New Features
   Added an API which can get the number of in-flight packets in
   vhost async data path without using lock.
 
+* **Added vhost API to get the device type of a vDPA device.**
+
+  Added an API which can get the device type of vDPA device.
+
 * **Updated Intel iavf driver.**
 
   * Added Tx QoS queue rate limitation support.
diff --git a/lib/vhost/rte_vhost.h b/lib/vhost/rte_vhost.h
index c733f85..2f130ec 100644
--- a/lib/vhost/rte_vhost.h
+++ b/lib/vhost/rte_vhost.h
@@ -117,6 +117,9 @@
 
 #define RTE_MAX_VHOST_DEVICE   1024
 
+#define RTE_VHOST_VDPA_DEVICE_TYPE_NET 0
+#define RTE_VHOST_VDPA_DEVICE_TYPE_BLK 1
+
 struct rte_vdpa_device;
 
 /**
@@ -486,6 +489,20 @@ struct rte_vdpa_device *
 rte_vhost_driver_get_vdpa_device(const char *path);
 
 /**
+ * Get the device type of the vdpa device.
+ *
+ * @param path
+ *  The vhost-user socket file path
+ * @param type
+ *  the device type of the vdpa device
+ * @return
+ *  0 on success, -1 on failure
+ */
+__rte_experimental
+int
+rte_vhost_driver_get_vdpa_dev_type(const char *path, uint32_t *type);
+
+/**
  * Set the feature bits the vhost-user driver supports.
  *
  * @param path
diff --git a/lib/vhost/socket.c b/lib/vhost/socket.c
index b304339..baef4d2 100644
--- a/lib/vhost/socket.c
+++ b/lib/vhost/socket.c
@@ -619,6 +619,50 @@ struct rte_vdpa_device *
 }
 
 int
+rte_vhost_driver_get_vdpa_dev_type(const char *path, uint32_t *type)
+{
+       struct vhost_user_socket *vsocket;
+       struct rte_vdpa_device *vdpa_dev;
+       uint32_t vdpa_type = 0;
+       int ret = 0;
+
+       pthread_mutex_lock(&vhost_user.mutex);
+       vsocket = find_vhost_user_socket(path);
+       if (!vsocket) {
+               VHOST_LOG_CONFIG(ERR,
+                                "(%s) socket file is not registered yet.\n",
+                                path);
+               ret = -1;
+               goto unlock_exit;
+       }
+
+       vdpa_dev = vsocket->vdpa_dev;
+       if (!vdpa_dev) {
+               ret = -1;
+               goto unlock_exit;
+       }
+
+       if (vdpa_dev->ops->get_dev_type) {
+               ret = vdpa_dev->ops->get_dev_type(vdpa_dev, &vdpa_type);
+               if (ret) {
+                       VHOST_LOG_CONFIG(ERR,
+                                        "(%s) failed to get vdpa dev type for 
socket file.\n",
+                                        path);
+                       ret = -1;
+                       goto unlock_exit;
+               }
+       } else {
+               vdpa_type = RTE_VHOST_VDPA_DEVICE_TYPE_NET;
+       }
+
+       *type = vdpa_type;
+
+unlock_exit:
+       pthread_mutex_unlock(&vhost_user.mutex);
+       return ret;
+}
+
+int
 rte_vhost_driver_disable_features(const char *path, uint64_t features)
 {
        struct vhost_user_socket *vsocket;
diff --git a/lib/vhost/vdpa_driver.h b/lib/vhost/vdpa_driver.h
index c4233a6..8b88a53 100644
--- a/lib/vhost/vdpa_driver.h
+++ b/lib/vhost/vdpa_driver.h
@@ -78,6 +78,9 @@ struct rte_vdpa_dev_ops {
        /** Set the device configuration space */
        int (*set_config)(int vid, uint8_t *config, uint32_t offset,
                      uint32_t size, uint32_t flags);
+
+       /** get device type: net device, blk device... */
+       int (*get_dev_type)(struct rte_vdpa_device *dev, uint32_t *type);
 };
 
 /**
diff --git a/lib/vhost/version.map b/lib/vhost/version.map
index 5841315..583b4f3 100644
--- a/lib/vhost/version.map
+++ b/lib/vhost/version.map
@@ -90,6 +90,7 @@ EXPERIMENTAL {
 
        # added in 22.07
        rte_vhost_async_get_inflight_thread_unsafe;
+       rte_vhost_driver_get_vdpa_dev_type;
 
 };
 
-- 
1.8.3.1

Reply via email to