This patch adds the dma-vfio argument parsing for async vhost driver.
This argument can help to determine whether IOMMU needs to be
programmed for guest memory.

Signed-off-by: Xuan Ding <xuan.d...@intel.com>
---
 doc/guides/sample_app_ug/vhost.rst |  7 +++++++
 examples/vhost/main.c              | 16 +++++++++++++++-
 2 files changed, 22 insertions(+), 1 deletion(-)

diff --git a/doc/guides/sample_app_ug/vhost.rst 
b/doc/guides/sample_app_ug/vhost.rst
index 63dcf181e1..c54aebc504 100644
--- a/doc/guides/sample_app_ug/vhost.rst
+++ b/doc/guides/sample_app_ug/vhost.rst
@@ -176,6 +176,13 @@ operation. The index of the device corresponds to the 
socket file in order,
 that means vhost device 0 is created through the first socket file, vhost
 device 1 is created through the second socket file, and so on.
 
+**--dma-vfio**
+This parameter is used to specify whether the IOMMU needs to be programmed.
+If the DMA device is bound to vfio, IOMMU dma mapping will be setup for
+guest memory. If igb_uio is bound by DMA device, there is no need to do
+IOMMU dma mapping. It is a supplementary parameter for async vhost-user
+driver and it is disabled by default.
+
 Common Issues
 -------------
 
diff --git a/examples/vhost/main.c b/examples/vhost/main.c
index 81d7e4cbd3..53bb8cfe80 100644
--- a/examples/vhost/main.c
+++ b/examples/vhost/main.c
@@ -89,6 +89,8 @@ static uint32_t enable_tx_csum;
 /* Disable TSO offload */
 static uint32_t enable_tso;
 
+static uint32_t dma_use_vfio;
+
 static int client_mode;
 
 static int builtin_net_driver;
@@ -472,7 +474,8 @@ us_vhost_usage(const char *prgname)
        "               --tso [0|1] disable/enable TCP segment offload.\n"
        "               --client register a vhost-user socket as client mode.\n"
        "               --dma-type register dma type for your vhost async 
driver. For example \"ioat\" for now.\n"
-       "               --dmas register dma channel for specific vhost 
device.\n",
+       "               --dmas register dma channel for specific vhost 
device.\n"
+       "               --dma-vfio [0|1]: 0: DMA device uses igb_uio, 1: DMA 
device uses vfio\n",
               prgname);
 }
 
@@ -503,6 +506,8 @@ enum {
        OPT_DMA_TYPE_NUM,
 #define OPT_DMAS                "dmas"
        OPT_DMAS_NUM,
+#define OPT_DMA_VFIO            "dma-vfio"
+       OPT_DMA_VFIO_NUM,
 };
 
 /*
@@ -542,6 +547,8 @@ us_vhost_parse_args(int argc, char **argv)
                                NULL, OPT_DMA_TYPE_NUM},
                {OPT_DMAS, required_argument,
                                NULL, OPT_DMAS_NUM},
+               {OPT_DMA_VFIO, required_argument,
+                               NULL, OPT_DMA_VFIO_NUM},
                {NULL, 0, 0, 0},
        };
 
@@ -679,6 +686,10 @@ us_vhost_parse_args(int argc, char **argv)
                        }
                        break;
 
+               case OPT_DMA_VFIO_NUM:
+                       dma_use_vfio = 1;
+                       break;
+
                case OPT_CLIENT_NUM:
                        client_mode = 1;
                        break;
@@ -1788,6 +1799,9 @@ main(int argc, char *argv[])
        if (client_mode)
                flags |= RTE_VHOST_USER_CLIENT;
 
+       if (dma_use_vfio)
+               flags |= RTE_VHOST_USER_ASYNC_USE_VFIO;
+
        /* Register vhost user driver to handle vhost messages. */
        for (i = 0; i < nb_sockets; i++) {
                char *file = socket_files + i * PATH_MAX;
-- 
2.17.1

Reply via email to