Hi Maxime, The replies are inline.
> -----Original Message----- > From: Maxime Coquelin <maxime.coque...@redhat.com> > Sent: Wednesday, October 14, 2020 5:24 PM > To: Jiang, Cheng1 <cheng1.ji...@intel.com>; Xia, Chenbo > <chenbo....@intel.com>; Wang, Zhihong <zhihong.w...@intel.com> > Cc: dev@dpdk.org; Fu, Patrick <patrick...@intel.com> > Subject: Re: [PATCH v4 1/4] example/vhost: add async vhost args parsing > function > > > > On 10/12/20 6:54 AM, Cheng Jiang wrote: > > This patch is to add async vhost driver arguments parsing function for > > CBDMA channel, DMA initiation function and args description. > > The meson build file is changed to fix dependency problem. With these > > arguments vhost device can be set to use CBDMA or CPU for enqueue > > operation and bind vhost device with specific CBDMA channel to > > accelerate data copy. > > > > Signed-off-by: Cheng Jiang <cheng1.ji...@intel.com> > > --- > > examples/vhost/ioat.c | 117 > +++++++++++++++++++++++++++++++++++++ > > examples/vhost/main.c | 37 +++++++++++- > > examples/vhost/main.h | 2 + > > examples/vhost/meson.build | 4 +- > > 4 files changed, 157 insertions(+), 3 deletions(-) create mode > > 100644 examples/vhost/ioat.c > > > > diff --git a/examples/vhost/ioat.c b/examples/vhost/ioat.c new file > > mode 100644 index 000000000..c3158d3c3 > > --- /dev/null > > +++ b/examples/vhost/ioat.c > > @@ -0,0 +1,117 @@ > > +/* SPDX-License-Identifier: BSD-3-Clause > > + * Copyright(c) 2010-2017 Intel Corporation */ > > + > > +#include <rte_vhost.h> > > +#include <rte_rawdev.h> > > +#include <rte_ioat_rawdev.h> > > +#include <rte_pci.h> > > + > > +#include "main.h" > > + > > +#define MAX_VHOST_DEVICE 1024 > > +#define IOAT_RING_SIZE 4096 > > + > > +struct dma_info { > > + struct rte_pci_addr addr; > > + uint16_t dev_id; > > + bool is_valid; > > +}; > > + > > +struct dma_for_vhost { > > + struct dma_info dmas[RTE_MAX_QUEUES_PER_PORT * 2]; > > + uint16_t nr; > > +}; > > + > > +struct dma_for_vhost dma_bind[MAX_VHOST_DEVICE]; > > + > > +int > > +open_ioat(const char *value) > > +{ > > + struct dma_for_vhost *dma_info = dma_bind; > > + char *input = strndup(value, strlen(value) + 1); > > + char *addrs = input; > > + char *ptrs[2]; > > + char *start, *end, *substr; > > + int64_t vid, vring_id; > > + struct rte_ioat_rawdev_config config; > > + struct rte_rawdev_info info = { .dev_private = &config }; > > + char name[32]; > > + int dev_id; > > + int ret = 0; > > + uint16_t i = 0; > > + char *dma_arg[MAX_VHOST_DEVICE]; > > + uint8_t args_nr; > > + > > + while (isblank(*addrs)) > > + addrs++; > > + if (*addrs == '\0') { > > + ret = -1; > > + goto out; > > + } > > + > > + /* process DMA devices within bracket. */ > > + addrs++; > > + substr = strtok(addrs, ";]"); > > + if (!substr) { > > + ret = -1; > > + goto out; > > + } > > + args_nr = rte_strsplit(substr, strlen(substr), > > + dma_arg, MAX_VHOST_DEVICE, ','); > > + do { > > + char *arg_temp = dma_arg[i]; > > + rte_strsplit(arg_temp, strlen(arg_temp), ptrs, 2, '@'); > > + > > + start = strstr(ptrs[0], "txd"); > > + if (start == NULL) { > > + ret = -1; > > + goto out; > > + } > > + > > + start += 3; > > + vid = strtol(start, &end, 0); > > + if (end == start) { > > + ret = -1; > > + goto out; > > + } > > + > > + vring_id = 0 + VIRTIO_RXQ; > > + if (rte_pci_addr_parse(ptrs[1], > > + &(dma_info + vid)->dmas[vring_id].addr) < 0) > { > > + ret = -1; > > + goto out; > > + } > > + > > + rte_pci_device_name(&(dma_info + vid)- > >dmas[vring_id].addr, > > + name, sizeof(name)); > > + dev_id = rte_rawdev_get_dev_id(name); > > + if (dev_id == (uint16_t)(-ENODEV) || > > + dev_id == (uint16_t)(-EINVAL)) { > > + ret = -1; > > + goto out; > > + } > > + > > + if (rte_rawdev_info_get(dev_id, &info, sizeof(config)) < 0 || > > + strstr(info.driver_name, "ioat") == NULL) { > > + ret = -1; > > + goto out; > > + } > > + > > + (dma_info + vid)->dmas[vring_id].dev_id = dev_id; > > + (dma_info + vid)->dmas[vring_id].is_valid = true; > > + config.ring_size = IOAT_RING_SIZE; > > + config.hdls_disable = true; > > + if (rte_rawdev_configure(dev_id, &info, sizeof(config)) < 0) { > > + ret = -1; > > + goto out; > > + } > > + rte_rawdev_start(dev_id); > > + > > + dma_info->nr++; > > + i++; > > + } while (i < args_nr); > > +out: > > + free(input); > > + return ret; > > +} > > diff --git a/examples/vhost/main.c b/examples/vhost/main.c index > > 959c0c283..4806419d6 100644 > > --- a/examples/vhost/main.c > > +++ b/examples/vhost/main.c > > @@ -95,6 +95,10 @@ static int client_mode; > > > > static int builtin_net_driver; > > > > +static int async_vhost_driver; > > + > > +static char dma_type[MAX_LONG_OPT_SZ]; > > + > > /* Specify timeout (in useconds) between retries on RX. */ static > > uint32_t burst_rx_delay_time = BURST_RX_WAIT_US; > > /* Specify the number of retries on RX. */ @@ -181,6 +185,15 @@ > > struct mbuf_table lcore_tx_queue[RTE_MAX_LCORE]; > > / US_PER_S * BURST_TX_DRAIN_US) > > #define VLAN_HLEN 4 > > > > +static inline int > > +open_dma(const char *value) > > +{ > > + if (strncmp(dma_type, "IOAT", 4) == 0) > > I think it is better to have it in lower case. Sure, it will be changed in v5. > > > + return open_ioat(value); > > + else > > + return -1; > > +} > > + > > /* > > * Builds up the correct configuration for VMDQ VLAN pool map > > * according to the pool & queue limits. > > @@ -446,7 +459,9 @@ us_vhost_usage(const char *prgname) > > " --socket-file: The path of the socket file.\n" > > " --tx-csum [0|1] disable/enable TX checksum > offload.\n" > > " --tso [0|1] disable/enable TCP segment offload.\n" > > - " --client register a vhost-user socket as client > mode.\n", > > + " --client register a vhost-user socket as client > mode.\n" > > + " --dma-type register dma type for your vhost async > driver.\n" > > I think you should mention possible DMA types, i.e. "ioat" for now. it will be added in v5. > > > + " --dmas register dma channel for specific vhost > device.\n", > > prgname); > > } > > > > @@ -472,6 +487,8 @@ us_vhost_parse_args(int argc, char **argv) > > {"tso", required_argument, NULL, 0}, > > {"client", no_argument, &client_mode, 1}, > > {"builtin-net-driver", no_argument, &builtin_net_driver, 1}, > > + {"dma-type", required_argument, NULL, 0}, > > + {"dmas", required_argument, NULL, 0}, > > {NULL, 0, 0, 0}, > > }; > > > > @@ -614,6 +631,24 @@ us_vhost_parse_args(int argc, char **argv) > > } > > } > > > > + if (!strncmp(long_option[option_index].name, > > + "dma-type", > MAX_LONG_OPT_SZ)) { > > + strcpy(dma_type, optarg); > > + } > > + > > + if (!strncmp(long_option[option_index].name, > > + "dmas", > MAX_LONG_OPT_SZ)) { > > + if (open_dma(optarg) == -1) { > > + if (*optarg == -1) { > > + RTE_LOG(INFO, > VHOST_CONFIG, > > + "Wrong DMA args\n"); > > + us_vhost_usage(prgname); > > + } > > + return -1; > > + } > > + async_vhost_driver = 1; > > + } > > + > > break; > > > > /* Invalid option - print options. */ diff --git > > a/examples/vhost/main.h b/examples/vhost/main.h index > > 7cba0edbf..eac18824b 100644 > > --- a/examples/vhost/main.h > > +++ b/examples/vhost/main.h > > @@ -90,3 +90,5 @@ uint16_t vs_dequeue_pkts(struct vhost_dev *dev, > uint16_t queue_id, > > struct rte_mempool *mbuf_pool, > > struct rte_mbuf **pkts, uint16_t count); #endif /* > _MAIN_H_ */ > > + > > +int open_ioat(const char *value); > > diff --git a/examples/vhost/meson.build b/examples/vhost/meson.build > > index 872d51153..cb11edd78 100644 > > --- a/examples/vhost/meson.build > > +++ b/examples/vhost/meson.build > > @@ -9,8 +9,8 @@ > > if not is_linux > > build = false > > endif > > -deps += 'vhost' > > +deps += ['vhost', 'rawdev_ioat'] > > It is breaking build on other platforms than X86: > https://travis-ci.com/github/ovsrobot/dpdk/builds/189405820 > > It should be done the other way, which is to enable ioat support in this > example if rawdev_ioat is supported. Agreed, it will be changed in v5. Thanks, Cheng > > > allow_experimental_apis = true > > sources = files( > > - 'main.c', 'virtio_net.c' > > + 'main.c', 'virtio_net.c', 'ioat.c' > > ) > >