[PATCH] net/i40e: add outer VLAN processing

2021-11-18 Thread Robin Zhang
Outer VLAN processing is supported after firmware v8.4, kernel driver
also change the default behavior to support this feature. To align with
kernel driver, add support for outer VLAN processing in DPDK. This will
not impact on an old firmware.

Signed-off-by: Robin Zhang 
---
 drivers/net/i40e/i40e_ethdev.c | 58 +++---
 drivers/net/i40e/i40e_ethdev.h |  3 ++
 2 files changed, 56 insertions(+), 5 deletions(-)

diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 344cbd25d3..6e6c0d51ac 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -2591,11 +2591,31 @@ i40e_dev_close(struct rte_eth_dev *dev)
int ret;
uint8_t aq_fail = 0;
int retries = 0;
+   int mask = 0;
+   struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
 
PMD_INIT_FUNC_TRACE();
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
 
+   /*
+* To avoid global register conflict with kernel driver, need set
+* switch configuration back to default, disable double vlan and
+* clear the VLAN filters when dev close.
+*/
+   if (pf->is_outer_vlan_processing &&
+   (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)) {
+   mask = RTE_ETH_VLAN_EXTEND_MASK;
+   rxmode->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
+
+   if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
+   mask |= RTE_ETH_VLAN_FILTER_MASK;
+   rxmode->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
+   }
+
+   i40e_vlan_offload_set(dev, mask);
+   }
+
ret = rte_eth_switch_domain_free(pf->switch_domain_id);
if (ret)
PMD_INIT_LOG(WARNING, "failed to free switch domain: %d", ret);
@@ -3918,6 +3938,7 @@ i40e_vlan_tpid_set(struct rte_eth_dev *dev,
int qinq = dev->data->dev_conf.rxmode.offloads &
   RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
int ret = 0;
+   u16 sw_flags = 0, valid_flags = 0;
 
if ((vlan_type != RTE_ETH_VLAN_TYPE_INNER &&
 vlan_type != RTE_ETH_VLAN_TYPE_OUTER) ||
@@ -3935,15 +3956,28 @@ i40e_vlan_tpid_set(struct rte_eth_dev *dev,
/* 802.1ad frames ability is added in NVM API 1.7*/
if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) {
if (qinq) {
+   if (pf->is_outer_vlan_processing) {
+   sw_flags = I40E_AQ_SET_SWITCH_CFG_OUTER_VLAN;
+   valid_flags = I40E_AQ_SET_SWITCH_CFG_OUTER_VLAN;
+   }
if (vlan_type == RTE_ETH_VLAN_TYPE_OUTER)
hw->first_tag = rte_cpu_to_le_16(tpid);
else if (vlan_type == RTE_ETH_VLAN_TYPE_INNER)
hw->second_tag = rte_cpu_to_le_16(tpid);
} else {
-   if (vlan_type == RTE_ETH_VLAN_TYPE_OUTER)
-   hw->second_tag = rte_cpu_to_le_16(tpid);
+   if (pf->is_outer_vlan_processing) {
+   sw_flags = 0;
+   valid_flags = I40E_AQ_SET_SWITCH_CFG_OUTER_VLAN;
+   }
+   if (vlan_type == RTE_ETH_VLAN_TYPE_OUTER) {
+   if (pf->is_outer_vlan_processing)
+   hw->first_tag = rte_cpu_to_le_16(tpid);
+   else
+   hw->second_tag = rte_cpu_to_le_16(tpid);
+   }
}
-   ret = i40e_aq_set_switch_config(hw, 0, 0, 0, NULL);
+   ret = i40e_aq_set_switch_config(hw, sw_flags,
+   valid_flags, 0, NULL);
if (ret != I40E_SUCCESS) {
PMD_DRV_LOG(ERR,
"Set switch config failed aq_err: %d",
@@ -4022,9 +4056,12 @@ i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
   RTE_ETHER_TYPE_VLAN);
i40e_vlan_tpid_set(dev, RTE_ETH_VLAN_TYPE_INNER,
   RTE_ETHER_TYPE_VLAN);
-   }
-   else
+   } else {
+   if (pf->is_outer_vlan_processing)
+   i40e_vlan_tpid_set(dev, RTE_ETH_VLAN_TYPE_OUTER,
+  RTE_ETHER_TYPE_QINQ);
i40e_vsi_config_double_vlan(vsi, FALSE);
+   }
}
 
if (mask & RTE_ETH_QINQ_STRIP_MASK) {
@@ -4854,6 +4891,17 @@ i40e_pf_parameter_init(struct rte_eth_dev *dev)
return -EINVAL;
}
 
+   /**
+* Enable outer VLAN processing if firmware version is greater
+* than v8.3
+*/
+  

[PATCH v1] doc: arm64 cross docs improvements/fixes

2021-11-18 Thread Juraj Linkeš
Numactl cross compilation doesn't work with clang, remove it and fix the
gcc cross compiler executable name.

Remove CFLAGS and LDFLAGS since Meson doesn't support them well enough.
Add alternatives.

The names of the downloaded gcc binaries differ from those in cross
files, so point this out in docs.

Fixes: eb0e12c0c299 ("doc: add clang to aarch64 cross build guide")

Signed-off-by: Juraj Linkeš 
---
Let me know if I should split the patch.
---
 .../linux_gsg/cross_build_dpdk_for_arm64.rst  | 69 ---
 1 file changed, 60 insertions(+), 9 deletions(-)

diff --git a/doc/guides/linux_gsg/cross_build_dpdk_for_arm64.rst 
b/doc/guides/linux_gsg/cross_build_dpdk_for_arm64.rst
index d59af58235..71f3f6c878 100644
--- a/doc/guides/linux_gsg/cross_build_dpdk_for_arm64.rst
+++ b/doc/guides/linux_gsg/cross_build_dpdk_for_arm64.rst
@@ -35,13 +35,14 @@ NUMA is required by most modern machines, not needed for 
non-NUMA architectures.
git checkout v2.0.13 -b v2.0.13
./autogen.sh
autoconf -i
-   ./configure --host=aarch64-linux-gnu CC= --prefix=
+   ./configure --host=aarch64-linux-gnu 
CC=aarch64-none-linux-gnu-gcc--prefix=
make install
 
 .. note::
 
-   The compiler above can be either aarch64-linux-gnu-gcc or clang.
-   See below for information on how to get specific compilers.
+   The compiler is aarch64-none-linux-gnu-gcc if you download gcc using the
+   below guide. If you're using a different compiler, make sure you're using
+   the proper executable name.
 
 The numa header files and lib file is generated in the include and lib folder
 respectively under .
@@ -98,10 +99,6 @@ For aarch32::
 Augment the GNU toolchain with NUMA support
 ~~~
 
-.. note::
-
-   This way is optional, an alternative is to use extra CFLAGS and LDFLAGS.
-
 Copy the NUMA header files and lib to the cross compiler's directories:
 
 .. code-block:: console
@@ -110,9 +107,62 @@ Copy the NUMA header files and lib to the cross compiler's 
directories:
cp /lib/libnuma.a 
/gcc-arm-9.2-2019.12-x86_64-aarch64-none-linux-gnu/lib/gcc/aarch64-none-linux-gnu/9.2.1/
cp /lib/libnuma.so 
/gcc-arm-9.2-2019.12-x86_64-aarch64-none-linux-gnu/lib/gcc/aarch64-none-linux-gnu/9.2.1/
 
+.. note::
+
+   Using LDFLAGS and CFLAGS is not a viable alternative to copying the files.
+   The Meson docs say it is not recommended, as there are many caveats to their
+   use with Meson, especially when rebuilding the project. A viable alternative
+   would be to use the ``c_args`` and ``c_link_args`` options with Meson 0.51.0
+   and higher:
+
+.. code-block:: console
+
+   -Dc_args=-I/include -Dc_link_args=-L/lib
+
+   For Meson versions lower than 0.51.0, the ``c_args`` and ``c_link_args``
+   options don't apply to cross compilation. However, the compiler/linker flags
+   may be added to cross files under [properties]:
+
+.. code-block:: console
+
+   c_args = ['-I/include']
+   c_link_args = ['-L/lib']
+
 Cross Compiling DPDK with GNU toolchain using Meson
 ~~~
 
+.. note::
+
+   The names of gcc binaries in cross files differ from the downloaded
+   ones, which have an extra "-none-" in their name. Please modify the cross
+   file binaries accordingly when using the downloaded cross compilers.
+
+   A example cross file with modified names and added numa paths would look
+   like this:
+
+.. code-block:: console
+
+   [binaries]
+   c = 'aarch64-none-linux-gnu-gcc'
+   cpp = 'aarch64-none-linux-gnu-cpp'
+   ar = 'aarch64-none-linux-gnu-gcc-ar'
+   strip = 'aarch64-none-linux-gnu-strip'
+   pkgconfig = 'aarch64-linux-gnu-pkg-config' # the downloaded binaries
+  # don't contain a pkgconfig binary, so it's not modified
+   pcap-config = ''
+
+   [host_machine]
+   system = 'linux'
+   cpu_family = 'aarch64'
+   cpu = 'armv8-a'
+   endian = 'little'
+
+   [properties]
+   # Generate binaries that are portable across all Armv8 machines
+   platform = 'generic'
+   c_args = ['-I/include']  # replace 
+   c_link_args = ['-L/lib'] # with your path
+
 To cross-compile DPDK on a desired target machine we can use the following
 command::
 
@@ -120,12 +170,13 @@ command::
ninja -C cross-build
 
 For example if the target machine is aarch64 we can use the following
-command::
+command, provided the cross file has been modified accordingly::
 
meson aarch64-build-gcc --cross-file config/arm/arm64_armv8_linux_gcc
ninja -C aarch64-build-gcc
 
-If the target machine is aarch32 we can use the following command::
+If the target machine is aarch32 we can use the following command, provided
+the cross file has been modified accordingly::
 
meson aarch32-build --cross-file config/arm/arm32_armv8_linux_gcc
ninja -C aarch32-build
-- 
2.20.1



Re: [PATCH v1 1/1] app/test-gpudev: introduce ethdev to rx/tx packets using GPU memory

2021-11-18 Thread Elena Agostini
> From: Jerin Jacob 
> Date: Thursday, 18 November 2021 at 07:17
> To: Elena Agostini 
> Cc: dpdk-dev 
> Subject: Re: [PATCH v1 1/1] app/test-gpudev: introduce ethdev to rx/tx 
> packets using GPU memory
> External email: Use caution opening links or attachments>
>

> On Thu, Nov 18, 2021 at 12:28 AM  wrote:
> >
> > From: Elena Agostini 
> >
> > This patch introduces ethdev in test-gpudev app to provide:
> > - an example to show how GPU memory can be used to send and receive packets
> > - an useful tool to measure network metrics when using GPU memory with
> > io forwarding
> >
> > With this feature test-gpudev can:
> > - RX packets in CPU or GPU memory
> > - Store packets in the gpudev communication list
> > - TX receive packets from the communication list
> >
> > It's a simulation of a multi-core application.
> >
> > Signed-off-by: Elena Agostini 
> > ---
> >  app/test-gpudev/main.c | 471 +++--
> >  1 file changed, 452 insertions(+), 19 deletions(-)
> >
> > diff --git a/app/test-gpudev/main.c b/app/test-gpudev/main.c
> > index 250fba6427..daa586c64e 100644
> > --- a/app/test-gpudev/main.c
> > +++ b/app/test-gpudev/main.c
> > @@ -10,6 +10,8 @@
> >  #include 
> >  #include 
> >  #include 
> > +#include 
> > +#include 
> >
> >  #include 
> >  #include 
> > @@ -19,22 +21,98 @@
> >  #include 
> >  #include 
> >  #include 
> > +#include 
> > +#include 
> > +#include 
> >
> >  #include 
> >
> > +#ifndef ACCESS_ONCE
> > +#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&x)
> > +#endif
> > +
> > +#ifndef WRITE_ONCE
> > +#define WRITE_ONCE(x, v) (ACCESS_ONCE(x) = (v))
> > +#endif>

> Better to have a public version of this macro as it uses just in this
> test application.

Thanks for taking time to review this patch.
I can actually use the RTE_GPU_VOLATILE macro exposed in the gpudev library
to replace both of them.


RE: Probing the expected state/support of DPDK@armhf

2021-11-18 Thread Juraj Linkeš
> > > > > What I see when building DPDK 21.11 is
> > > > > 2973 ../config/meson.build:364:1: ERROR: Problem encountered:
> > > > > Number of CPU cores not specified.
> > > > >
> > > > > Right now this seems to be broken the same everywhere - Suse
> > > > > [1], fedora [2], Debian/Ubuntu [3]
> > > > Looks like this happens with native build on armv7 machine.
> > > > RTE_MAX_LCORE
> > > is not set for the build.
> > >
> >
> > What do we want to do with armv7 native build, Ruifeng? For aarch64,
> > we detect which machine we're building on and we set everything
> > accordingly, unless the generic build is enabled. Do we want to add
> > support for just the generic build for armv7 (i.e. regardless of
> > what's set in -Dplatform)? What values of RTE_MAX_LCORE and
> > RTE_MAX_NUMA_NODES make sense for an
> > armv7 generic build?
> 
> Yes, support just the generic build for armv7.
> I think we should have RTE_MAX_LCORE=128, RTE_MAX_NUMA_NODES=8 for
> the build.
> These values are consistent with values used in stable branch where armv7 
> still
> have Make build support.

Ok, I'll submit a patch.


[PATCH v2 0/1] app/test-gpudev: introduce ethdev to rx/tx packets using GPU memory

2021-11-18 Thread eagostini
From: Elena Agostini 

This patch introduces ethdev in test-gpudev app to provide:
- an example to show how GPU memory can be used to send and receive packets
- an useful tool to measure network metrics when using GPU memory with
io forwarding

With this feature test-gpudev can:
- RX packets in CPU or GPU memory
- Store packets in the gpudev communication list
- TX receive packets from the communication list

It's a simulation of a multi-core application.

Changelog:
- Address review comments
- Minor improvements

Elena Agostini (1):
  app/test-gpudev: introduce ethdev to rx/tx packets using GPU memory

 app/test-gpudev/main.c | 477 +++--
 1 file changed, 458 insertions(+), 19 deletions(-)

-- 
2.17.1



[PATCH v2 1/1] app/test-gpudev: introduce ethdev to rx/tx packets using GPU memory

2021-11-18 Thread eagostini
From: Elena Agostini 

This patch introduces ethdev in test-gpudev app to provide:
- an example to show how GPU memory can be used to send and receive packets
- an useful tool to measure network metrics when using GPU memory with
io forwarding

With this feature test-gpudev can:
- RX packets in CPU or GPU memory
- Store packets in the gpudev communication list
- TX receive packets from the communication list

It's a simulation of a multi-core application.

Signed-off-by: Elena Agostini 
---
 app/test-gpudev/main.c | 477 +++--
 1 file changed, 458 insertions(+), 19 deletions(-)

diff --git a/app/test-gpudev/main.c b/app/test-gpudev/main.c
index 250fba6427..18de023208 100644
--- a/app/test-gpudev/main.c
+++ b/app/test-gpudev/main.c
@@ -10,6 +10,8 @@
 #include 
 #include 
 #include 
+#include 
+#include 
 
 #include 
 #include 
@@ -19,22 +21,90 @@
 #include 
 #include 
 #include 
+#include 
+#include 
+#include 
 
 #include 
 
+#define GPU_PAGE_SHIFT   16
+#define GPU_PAGE_SIZE(1UL << GPU_PAGE_SHIFT)
+#define GPU_PAGE_OFFSET  (GPU_PAGE_SIZE-1)
+#define GPU_PAGE_MASK(~GPU_PAGE_OFFSET)
+
+#define MAX_QUEUES 16
+#define NUM_COMM_ITEMS 2048
+#define PKT_GAP 4
+
+// #define DEBUG_PRINT 1
+
 enum app_args {
ARG_HELP,
-   ARG_MEMPOOL
+   ARG_BURST,
+   ARG_GPU,
+   ARG_MBUFD,
+   ARG_MEMORY,
+   ARG_QUEUES,
+   ARG_TESTAPI,
+};
+
+enum mem_type {
+   MEMORY_CPU,
+   MEMORY_GPU
+};
+
+/* Options configurable from cmd line */
+static uint32_t conf_burst = 64;
+static uint16_t conf_gpu_id = 0;
+static enum mem_type conf_mtype = MEMORY_CPU;
+static uint32_t conf_mbuf_dataroom = 2048;
+static uint32_t conf_queues = 1;
+static bool conf_testapi = false;
+static uint16_t conf_nb_descriptors = 2048;
+
+/* Options statically defined */
+static uint32_t conf_nb_mbuf = 16384;
+static uint16_t conf_port_id = 0;
+
+/* Other variables */
+static volatile bool force_quit;
+static struct rte_mempool *mpool;
+static struct rte_pktmbuf_extmem ext_mem;
+struct rte_gpu_comm_list *comm_list_fwd[MAX_QUEUES];
+struct rte_ether_addr port_eth_addr;
+static struct rte_eth_conf port_conf = {
+   .rxmode = {
+   .mq_mode = ETH_MQ_RX_RSS,
+   .split_hdr_size = 0,
+   .offloads = 0,
+   },
+   .txmode = {
+   .mq_mode = ETH_MQ_TX_NONE,
+   .offloads = 0,
+   },
+   .rx_adv_conf = {
+   .rss_conf = {
+   .rss_key = NULL,
+   .rss_hf = ETH_RSS_IP
+   },
+   },
 };
 
 static void
 usage(const char *prog_name)
 {
-   printf("%s [EAL options] --\n",
+   printf("%s [EAL options] --\n"
+   " --help\n"
+   " --burst N: number of packets per rx burst\n"
+   " --gpu N: GPU ID to use\n"
+   " --memory N: external mempool memory type, 0 CPU, 1 GPU\n"
+   " --mbufd N: mbuf dataroom size\n"
+   " --testapi: test gpudev function\n"
+   " --queues N: number of RX queues\n",
prog_name);
 }
 
-static void
+static int
 args_parse(int argc, char **argv)
 {
char **argvopt;
@@ -42,7 +112,19 @@ args_parse(int argc, char **argv)
int opt_idx;
 
static struct option lgopts[] = {
-   { "help", 0, 0, ARG_HELP},
+   { "help",  0, 0, ARG_HELP},
+   /* Packets per burst. */
+   { "burst",  1, 0, ARG_BURST},
+   /* GPU to use. */
+   { "gpu",  1, 0, ARG_GPU},
+   /* Type of memory for the mempool. */
+   { "memory",  1, 0, ARG_MEMORY},
+   /* Size of mbufs dataroom */
+   { "mbufd", 1, 0, ARG_MBUFD},
+   /* Number of RX queues */
+   { "queues", 1, 0, ARG_QUEUES},
+   /* Test only gpudev functions */
+   { "testapi", 0, 0, ARG_TESTAPI},
/* End of options */
{ 0, 0, 0, 0 }
};
@@ -51,6 +133,24 @@ args_parse(int argc, char **argv)
while ((opt = getopt_long(argc, argvopt, "",
lgopts, &opt_idx)) != EOF) {
switch (opt) {
+   case ARG_BURST:
+   conf_burst = (uint32_t) atoi(optarg);
+   break;
+   case ARG_GPU:
+   conf_gpu_id = (uint16_t) atoi(optarg);
+   break;
+   case ARG_MEMORY:
+   conf_mtype = (atoi(optarg) == 1 ? MEMORY_GPU : 
MEMORY_CPU);
+   break;
+   case ARG_MBUFD:
+   conf_mbuf_dataroom = (uint32_t) atoi(optarg);
+   break;
+   case ARG_QUEUES:
+   conf_queues = (uint32_t) atoi(optarg);
+   break;
+   case ARG_TESTAPI:
+   conf_testapi = (atoi(optarg) == 1 ? true

[PATCH v1] config/arm: add armv7 native config

2021-11-18 Thread Juraj Linkeš
Arvm7 native build fails with this error:
../config/meson.build:364:1: ERROR: Problem encountered:
Number of CPU cores not specified.

This is because RTE_MAX_LCORE is not set. We also need to set
RTE_MAX_NUMA_NODES in armv7 native builds.

Fixes: 8ef09fdc506b ("build: add optional NUMA and CPU counts detection")

Signed-off-by: Juraj Linkeš 
---
 config/arm/meson.build | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/config/arm/meson.build b/config/arm/meson.build
index 213324d262..57980661b2 100644
--- a/config/arm/meson.build
+++ b/config/arm/meson.build
@@ -432,6 +432,8 @@ if dpdk_conf.get('RTE_ARCH_32')
 else
 # armv7 build
 dpdk_conf.set('RTE_ARCH_ARMv7', true)
+dpdk_conf.set('RTE_MAX_LCORE', 128)
+dpdk_conf.set('RTE_MAX_NUMA_NODES', 8)
 # the minimum architecture supported, armv7-a, needs the following,
 machine_args += '-mfpu=neon'
 endif
-- 
2.20.1



[PATCH v1] gpudev: return EINVAL if invalid input pointer for free and unregister

2021-11-18 Thread eagostini
From: Elena Agostini 

Signed-off-by: Elena Agostini 
---
 lib/gpudev/gpudev.c | 10 ++
 lib/gpudev/rte_gpudev.h |  2 ++
 2 files changed, 12 insertions(+)

diff --git a/lib/gpudev/gpudev.c b/lib/gpudev/gpudev.c
index 2b174d8bd5..97575ed979 100644
--- a/lib/gpudev/gpudev.c
+++ b/lib/gpudev/gpudev.c
@@ -576,6 +576,11 @@ rte_gpu_mem_free(int16_t dev_id, void *ptr)
return -rte_errno;
}
 
+   if (ptr == NULL) {
+   rte_errno = EINVAL;
+   return -rte_errno;
+   }
+
if (dev->ops.mem_free == NULL) {
rte_errno = ENOTSUP;
return -rte_errno;
@@ -619,6 +624,11 @@ rte_gpu_mem_unregister(int16_t dev_id, void *ptr)
return -rte_errno;
}
 
+   if (ptr == NULL) {
+   rte_errno = EINVAL;
+   return -rte_errno;
+   }
+
if (dev->ops.mem_unregister == NULL) {
rte_errno = ENOTSUP;
return -rte_errno;
diff --git a/lib/gpudev/rte_gpudev.h b/lib/gpudev/rte_gpudev.h
index fa3f3aad4f..02014328f6 100644
--- a/lib/gpudev/rte_gpudev.h
+++ b/lib/gpudev/rte_gpudev.h
@@ -394,6 +394,7 @@ __rte_alloc_size(2);
  *   0 on success, -rte_errno otherwise:
  *   - ENODEV if invalid dev_id
  *   - ENOTSUP if operation not supported by the driver
+ *   - EINVAL if input ptr is invalid
  *   - EPERM if driver error
  */
 __rte_experimental
@@ -442,6 +443,7 @@ int rte_gpu_mem_register(int16_t dev_id, size_t size, void 
*ptr);
  *   0 on success, -rte_errno otherwise:
  *   - ENODEV if invalid dev_id
  *   - ENOTSUP if operation not supported by the driver
+ *   - EINVAL if input ptr is invalid
  *   - EPERM if driver error
  */
 __rte_experimental
-- 
2.17.1



[PATCH v2] gpudev: free and unregister return gracefully if input pointer is NULL

2021-11-18 Thread eagostini
From: Elena Agostini 

Signed-off-by: Elena Agostini 
---
 drivers/gpu/cuda/cuda.c | 4 ++--
 lib/gpudev/gpudev.c | 6 ++
 2 files changed, 8 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/cuda/cuda.c b/drivers/gpu/cuda/cuda.c
index 24ae630d04..f68e2b20b9 100644
--- a/drivers/gpu/cuda/cuda.c
+++ b/drivers/gpu/cuda/cuda.c
@@ -765,7 +765,7 @@ cuda_mem_free(struct rte_gpu *dev, void *ptr)
return -ENODEV;
 
if (ptr == NULL)
-   return -EINVAL;
+   return 0;
 
hk = get_hash_from_ptr((void *)ptr);
 
@@ -804,7 +804,7 @@ cuda_mem_unregister(struct rte_gpu *dev, void *ptr)
return -ENODEV;
 
if (ptr == NULL)
-   return -EINVAL;
+   return 0;
 
hk = get_hash_from_ptr((void *)ptr);
 
diff --git a/lib/gpudev/gpudev.c b/lib/gpudev/gpudev.c
index 2b174d8bd5..b41c43016a 100644
--- a/lib/gpudev/gpudev.c
+++ b/lib/gpudev/gpudev.c
@@ -569,6 +569,9 @@ rte_gpu_mem_free(int16_t dev_id, void *ptr)
 {
struct rte_gpu *dev;
 
+   if (ptr == NULL)
+   return 0;
+
dev = gpu_get_by_id(dev_id);
if (dev == NULL) {
GPU_LOG(ERR, "free mem for invalid device ID %d", dev_id);
@@ -612,6 +615,9 @@ rte_gpu_mem_unregister(int16_t dev_id, void *ptr)
 {
struct rte_gpu *dev;
 
+   if (ptr == NULL)
+   return 0;
+
dev = gpu_get_by_id(dev_id);
if (dev == NULL) {
GPU_LOG(ERR, "unregister mem for invalid device ID %d", dev_id);
-- 
2.17.1



Re: [dpdk-dev] [PATCH] config/x86: add support for AMD platform

2021-11-18 Thread Thomas Monjalon
I request a techboard decision for this patch.


02/11/2021 20:04, Thomas Monjalon:
> 02/11/2021 19:45, David Marchand:
> > On Tue, Nov 2, 2021 at 3:53 PM Aman Kumar  wrote:
> > >
> > > -Dcpu_instruction_set=znverX meson option can be used
> > > to build dpdk for AMD platforms. Supported options are
> > > znver1, znver2 and znver3.
> > >
> > > Signed-off-by: Aman Kumar 
> > > ---
> > >  dpdk_conf.set('RTE_CACHE_LINE_SIZE', 64)
> > >  dpdk_conf.set('RTE_MAX_LCORE', 128)
> > >  dpdk_conf.set('RTE_MAX_NUMA_NODES', 32)
> > > +
> > > +# AMD platform support
> > > +if get_option('cpu_instruction_set') == 'znver1'
> > > +dpdk_conf.set('RTE_MAX_LCORE', 256)
> > > +elif get_option('cpu_instruction_set') == 'znver2'
> > > +dpdk_conf.set('RTE_MAX_LCORE', 512)
> > > +elif get_option('cpu_instruction_set') == 'znver3'
> > > +dpdk_conf.set('RTE_MAX_LCORE', 512)
> > > +endif
> > 
> > I already replied to a similar patch earlier in this release.
> > https://inbox.dpdk.org/dev/cajfav8z-5amvenr3mazktqh-7szx_c6eqcua6udmxxhgrcm...@mail.gmail.com/
> > 
> > So repeating the same: do you actually _need_ more than 128 lcores in
> > a single DPDK application?

We did not receive an answer to this question.

> Yes I forgot this previous discussion concluding that we should not increase
> more than 128 threads.

We had a discussion yesterday in techboard meeting.
The consensus is that we didn't hear for real need of more than 128 threads,
except for configuration usability convenience.

Now looking again at the code, this is how it is defined:

option('max_lcores', type: 'string', value: 'default', description:
   'Set maximum number of cores/threads supported by EAL;
   "default" is different per-arch, "detect" detects the number of cores on 
the build machine.')
config/x86/meson.build: dpdk_conf.set('RTE_MAX_LCORE', 128)
config/ppc/meson.build: dpdk_conf.set('RTE_MAX_LCORE', 128)
config/arm/meson.build: it goes from 4 to 1280!

So I feel it is not fair to reject this AMD patch if we allow Arm to go beyond.
Techboard, let's have a quick decision please for 21.11-rc4.


> The --lcores syntax and David's work on rte_thread_register should unblock
> most of use cases.





[PATCH v1] gpu/cuda: properly set rte_errno

2021-11-18 Thread eagostini
From: Elena Agostini 

Signed-off-by: Elena Agostini 
---
 drivers/gpu/cuda/cuda.c | 229 +++-
 1 file changed, 153 insertions(+), 76 deletions(-)

diff --git a/drivers/gpu/cuda/cuda.c b/drivers/gpu/cuda/cuda.c
index 24ae630d04..9991f9b9f1 100644
--- a/drivers/gpu/cuda/cuda.c
+++ b/drivers/gpu/cuda/cuda.c
@@ -464,8 +464,10 @@ cuda_dev_info_get(struct rte_gpu *dev, struct rte_gpu_info 
*info)
CUcontext current_ctx;
CUcontext input_ctx;
 
-   if (dev == NULL)
-   return -ENODEV;
+   if (dev == NULL) {
+   rte_errno = ENODEV;
+   return -rte_errno;
+   }
 
/* Child initialization time probably called by rte_gpu_add_child() */
if (dev->mpshared->info.parent != RTE_GPU_ID_NONE &&
@@ -476,7 +478,8 @@ cuda_dev_info_get(struct rte_gpu *dev, struct rte_gpu_info 
*info)
pfn_cuGetErrorString(res, &(err_string));
rte_cuda_log(ERR, "cuCtxGetCurrent failed with %s",
err_string);
-   return -EPERM;
+   rte_errno = EPERM;
+   return -rte_errno;
}
 
/* Set child ctx as current ctx */
@@ -486,7 +489,8 @@ cuda_dev_info_get(struct rte_gpu *dev, struct rte_gpu_info 
*info)
pfn_cuGetErrorString(res, &(err_string));
rte_cuda_log(ERR, "cuCtxSetCurrent input failed with 
%s",
err_string);
-   return -EPERM;
+   rte_errno = EPERM;
+   return -rte_errno;
}
 
/*
@@ -505,8 +509,10 @@ cuda_dev_info_get(struct rte_gpu *dev, struct rte_gpu_info 
*info)
(uint32_t)affinityPrm.param.smCount.val;
 
ret = rte_gpu_info_get(dev->mpshared->info.parent, 
&parent_info);
-   if (ret)
-   return -ENODEV;
+   if (ret) {
+   rte_errno = ENODEV;
+   return -rte_errno;
+   }
dev->mpshared->info.total_memory = parent_info.total_memory;
 
/*
@@ -517,7 +523,8 @@ cuda_dev_info_get(struct rte_gpu *dev, struct rte_gpu_info 
*info)
RTE_CACHE_LINE_SIZE);
if (dev->mpshared->dev_private == NULL) {
rte_cuda_log(ERR, "Failed to allocate memory for GPU 
process private");
-   return -EPERM;
+   rte_errno = EPERM;
+   return -rte_errno;
}
 
private = (struct cuda_info *)dev->mpshared->dev_private;
@@ -527,7 +534,8 @@ cuda_dev_info_get(struct rte_gpu *dev, struct rte_gpu_info 
*info)
pfn_cuGetErrorString(res, &(err_string));
rte_cuda_log(ERR, "cuCtxGetDevice failed with %s",
err_string);
-   return -EPERM;
+   rte_errno = EPERM;
+   return -rte_errno;
}
 
res = pfn_cuDeviceGetName(private->gpu_name,
@@ -536,7 +544,8 @@ cuda_dev_info_get(struct rte_gpu *dev, struct rte_gpu_info 
*info)
pfn_cuGetErrorString(res, &(err_string));
rte_cuda_log(ERR, "cuDeviceGetName failed with %s",
err_string);
-   return -EPERM;
+   rte_errno = EPERM;
+   return -rte_errno;
}
 
/* Restore original ctx as current ctx */
@@ -545,7 +554,8 @@ cuda_dev_info_get(struct rte_gpu *dev, struct rte_gpu_info 
*info)
pfn_cuGetErrorString(res, &(err_string));
rte_cuda_log(ERR, "cuCtxSetCurrent current failed with 
%s",
err_string);
-   return -EPERM;
+   rte_errno = EPERM;
+   return -rte_errno;
}
}
 
@@ -567,10 +577,14 @@ cuda_mem_alloc(struct rte_gpu *dev, size_t size, void 
**ptr)
CUcontext input_ctx;
unsigned int flag = 1;
 
-   if (dev == NULL)
-   return -ENODEV;
-   if (size == 0)
-   return -EINVAL;
+   if (dev == NULL) {
+   rte_errno = ENODEV;
+   return -rte_errno;
+   }
+   if (size == 0) {
+   rte_errno = EINVAL;
+   return -rte_errno;
+   }
 
/* Store current ctx */
res = pfn_cuCtxGetCurrent(¤t_ctx);
@@ -578,7 +592,8 @@ cuda_mem_alloc(struct rte_gpu *dev, size_t size, void **ptr)
pfn_cuGetErrorString(res, &(err_string));
rte_cuda_log(ERR, "cuCtxGetCurrent failed with %s",
err_str

Re: [dpdk-dev] [PATCH] config/x86: add support for AMD platform

2021-11-18 Thread Bruce Richardson
On Thu, Nov 18, 2021 at 01:25:38PM +0100, Thomas Monjalon wrote:
> I request a techboard decision for this patch.
> 
> 
> 02/11/2021 20:04, Thomas Monjalon:
> > 02/11/2021 19:45, David Marchand:
> > > On Tue, Nov 2, 2021 at 3:53 PM Aman Kumar  wrote:
> > > >
> > > > -Dcpu_instruction_set=znverX meson option can be used
> > > > to build dpdk for AMD platforms. Supported options are
> > > > znver1, znver2 and znver3.
> > > >
> > > > Signed-off-by: Aman Kumar 
> > > > ---
> > > >  dpdk_conf.set('RTE_CACHE_LINE_SIZE', 64)
> > > >  dpdk_conf.set('RTE_MAX_LCORE', 128)
> > > >  dpdk_conf.set('RTE_MAX_NUMA_NODES', 32)
> > > > +
> > > > +# AMD platform support
> > > > +if get_option('cpu_instruction_set') == 'znver1'
> > > > +dpdk_conf.set('RTE_MAX_LCORE', 256)
> > > > +elif get_option('cpu_instruction_set') == 'znver2'
> > > > +dpdk_conf.set('RTE_MAX_LCORE', 512)
> > > > +elif get_option('cpu_instruction_set') == 'znver3'
> > > > +dpdk_conf.set('RTE_MAX_LCORE', 512)
> > > > +endif
> > > 
> > > I already replied to a similar patch earlier in this release.
> > > https://inbox.dpdk.org/dev/cajfav8z-5amvenr3mazktqh-7szx_c6eqcua6udmxxhgrcm...@mail.gmail.com/
> > > 
> > > So repeating the same: do you actually _need_ more than 128 lcores in
> > > a single DPDK application?
> 
> We did not receive an answer to this question.
> 
> > Yes I forgot this previous discussion concluding that we should not increase
> > more than 128 threads.
> 
> We had a discussion yesterday in techboard meeting.
> The consensus is that we didn't hear for real need of more than 128 threads,
> except for configuration usability convenience.
> 
> Now looking again at the code, this is how it is defined:
> 
> option('max_lcores', type: 'string', value: 'default', description:
>'Set maximum number of cores/threads supported by EAL;
>"default" is different per-arch, "detect" detects the number of cores 
> on the build machine.')
> config/x86/meson.build: dpdk_conf.set('RTE_MAX_LCORE', 128)
> config/ppc/meson.build: dpdk_conf.set('RTE_MAX_LCORE', 128)
> config/arm/meson.build: it goes from 4 to 1280!
> 
> So I feel it is not fair to reject this AMD patch if we allow Arm to go 
> beyond.
> Techboard, let's have a quick decision please for 21.11-rc4.
> 
I would support increasing the default value for x86 in this release.

I believe Dave H. had some patches to decrease the memory footprint
overhead of such a change. I don't believe that they were merged, and while
it's a bit late for 21.11 now, those should be considered for 22.03 release
and then maybe for backport.

/Bruce


Re: [dpdk-dev] [PATCH] config/x86: add support for AMD platform

2021-11-18 Thread Thomas Monjalon
18/11/2021 14:52, Bruce Richardson:
> On Thu, Nov 18, 2021 at 01:25:38PM +0100, Thomas Monjalon wrote:
> > I request a techboard decision for this patch.
> > 
> > 
> > 02/11/2021 20:04, Thomas Monjalon:
> > > 02/11/2021 19:45, David Marchand:
> > > > On Tue, Nov 2, 2021 at 3:53 PM Aman Kumar  
> > > > wrote:
> > > > >
> > > > > -Dcpu_instruction_set=znverX meson option can be used
> > > > > to build dpdk for AMD platforms. Supported options are
> > > > > znver1, znver2 and znver3.
> > > > >
> > > > > Signed-off-by: Aman Kumar 
> > > > > ---
> > > > >  dpdk_conf.set('RTE_CACHE_LINE_SIZE', 64)
> > > > >  dpdk_conf.set('RTE_MAX_LCORE', 128)
> > > > >  dpdk_conf.set('RTE_MAX_NUMA_NODES', 32)
> > > > > +
> > > > > +# AMD platform support
> > > > > +if get_option('cpu_instruction_set') == 'znver1'
> > > > > +dpdk_conf.set('RTE_MAX_LCORE', 256)
> > > > > +elif get_option('cpu_instruction_set') == 'znver2'
> > > > > +dpdk_conf.set('RTE_MAX_LCORE', 512)
> > > > > +elif get_option('cpu_instruction_set') == 'znver3'
> > > > > +dpdk_conf.set('RTE_MAX_LCORE', 512)
> > > > > +endif
> > > > 
> > > > I already replied to a similar patch earlier in this release.
> > > > https://inbox.dpdk.org/dev/cajfav8z-5amvenr3mazktqh-7szx_c6eqcua6udmxxhgrcm...@mail.gmail.com/
> > > > 
> > > > So repeating the same: do you actually _need_ more than 128 lcores in
> > > > a single DPDK application?
> > 
> > We did not receive an answer to this question.
> > 
> > > Yes I forgot this previous discussion concluding that we should not 
> > > increase
> > > more than 128 threads.
> > 
> > We had a discussion yesterday in techboard meeting.
> > The consensus is that we didn't hear for real need of more than 128 threads,
> > except for configuration usability convenience.
> > 
> > Now looking again at the code, this is how it is defined:
> > 
> > option('max_lcores', type: 'string', value: 'default', description:
> >'Set maximum number of cores/threads supported by EAL;
> >"default" is different per-arch, "detect" detects the number of 
> > cores on the build machine.')
> > config/x86/meson.build: dpdk_conf.set('RTE_MAX_LCORE', 128)
> > config/ppc/meson.build: dpdk_conf.set('RTE_MAX_LCORE', 128)
> > config/arm/meson.build: it goes from 4 to 1280!
> > 
> > So I feel it is not fair to reject this AMD patch if we allow Arm to go 
> > beyond.
> > Techboard, let's have a quick decision please for 21.11-rc4.
> > 
> I would support increasing the default value for x86 in this release.

This patch is not increasing the default for all x86,
only for some CPUs as given at compilation time.
I think it is the same logic as Arm CPU-specific compilation.

> I believe Dave H. had some patches to decrease the memory footprint
> overhead of such a change. I don't believe that they were merged, and while
> it's a bit late for 21.11 now, those should be considered for 22.03 release
> and then maybe for backport.





Re: [PATCH] ci: add Fedora 35 container in GHA

2021-11-18 Thread Aaron Conole
David Marchand  writes:

> On Tue, Nov 16, 2021 at 3:44 PM Aaron Conole  wrote:
>> > Build DPDK with Fedora 35 containers.
>> > Differences with the Ubuntu GHA vm images:
>> > - tasks are run as root in containers, no need for sudo,
>> > - compiler must be explicitly installed: clang is not in container images,
>> > - GHA artifacts can't contain a ':' in their name, and must be filtered,
>> >
>> > Signed-off-by: David Marchand 
>> Acked-by: Aaron Conole 
>
> Unfortunately, while testing it a bit more (having it in my tree), I
> got some random failures when pulling container images.
> I suspect docker limits image pulls... but I am not sure yet.
> I'll have to investigate this.

I do see that there were warnings even with the github robot's run - but
those builds eventually went through.  I guess if it isn't stable, it
makes sense to not deploy.

One alternative is to create a separate "workflow" for github action
that can be called "fedora" and that will be recorded differently by the
robot and then if there's a failure at least it will be a different
"test."  This probably isn't preferable, though - since it creates
another test in the UI.

>
> --
> David Marchand



Re: [PATCH] common/mlx5: decrease log level for hlist creation

2021-11-18 Thread David Marchand
Hi Slava,

On Wed, Nov 17, 2021 at 3:46 PM David Marchand
 wrote:
>
> On Wed, Nov 17, 2021 at 2:28 PM Slava Ovsiienko  
> wrote:
> >
> > I've re-checked the mlx5_hlist_create() call tree.
> >
> > And it seems all the calls are done with hardcoded  const values for "size" 
> > argument,
> > and all these values are powers-of-2.
> >
> > We had an issue in the past, but then I was not seeing this warning for a 
> > long time
> > on my setup.
>
> I'll double check with Maxime.
> There might be a misunderstanding between us.

Maxime passed me his setup with a CX6.
I confirm there is no warning in main and the problem has been fixed
in v20.11 LTS.
Sorry for the noise, I'll withdraw this patch.

Thanks.


For the record:
- v20.11 and v20.11.1 has logs about:
mlx5_pci: Size 0x is not power of 2, will be aligned to 0x1.
mlx5_pci: Failed to init cache list FDB_ingress_0_matcher_cache entry (nil).

- v20.11.2 has only:
mlx5_pci: Failed to init cache list FDB_ingress_0_matcher_cache entry (nil).

- v20.11.3 has no warning


-- 
David Marchand



[PATCH] doc: support IPsec Multi-buffer lib v1.1

2021-11-18 Thread Pablo de Lara
Updated AESNI MB and AESNI GCM, KASUMI, ZUC and SNOW3G PMD documentation
guides with information about the latest Intel IPSec Multi-buffer
library supported.

Signed-off-by: Pablo de Lara 
---
 doc/guides/cryptodevs/aesni_gcm.rst | 8 
 doc/guides/cryptodevs/aesni_mb.rst  | 8 
 doc/guides/cryptodevs/kasumi.rst| 8 
 doc/guides/cryptodevs/snow3g.rst| 8 
 doc/guides/cryptodevs/zuc.rst   | 8 
 5 files changed, 20 insertions(+), 20 deletions(-)

diff --git a/doc/guides/cryptodevs/aesni_gcm.rst 
b/doc/guides/cryptodevs/aesni_gcm.rst
index bbe9d99840..99a8979d49 100644
--- a/doc/guides/cryptodevs/aesni_gcm.rst
+++ b/doc/guides/cryptodevs/aesni_gcm.rst
@@ -40,8 +40,8 @@ Installation
 To build DPDK with the AESNI_GCM_PMD the user is required to download the 
multi-buffer
 library from `here `_
 and compile it on their user system before building DPDK.
-The latest version of the library supported by this PMD is v1.0, which
-can be downloaded in 
``_.
+The latest version of the library supported by this PMD is v1.1, which
+can be downloaded in 
``_.
 
 .. code-block:: console
 
@@ -84,8 +84,8 @@ and the external crypto libraries supported by them:
17.08 - 18.02  Multi-buffer library 0.46 - 0.48
18.05 - 19.02  Multi-buffer library 0.49 - 0.52
19.05 - 20.08  Multi-buffer library 0.52 - 0.55
-   20.11 - 21.08  Multi-buffer library 0.53 - 1.0*
-   21.11+ Multi-buffer library 1.0*
+   20.11 - 21.08  Multi-buffer library 0.53 - 1.1*
+   21.11+ Multi-buffer library 1.0  - 1.1*
=  
 
 \* Multi-buffer library 1.0 or newer only works for Meson but not Make build 
system.
diff --git a/doc/guides/cryptodevs/aesni_mb.rst 
b/doc/guides/cryptodevs/aesni_mb.rst
index 948128ae9b..1777eb25c1 100644
--- a/doc/guides/cryptodevs/aesni_mb.rst
+++ b/doc/guides/cryptodevs/aesni_mb.rst
@@ -86,8 +86,8 @@ Installation
 To build DPDK with the AESNI_MB_PMD the user is required to download the 
multi-buffer
 library from `here `_
 and compile it on their user system before building DPDK.
-The latest version of the library supported by this PMD is v1.0, which
-can be downloaded from 
``_.
+The latest version of the library supported by this PMD is v1.1, which
+can be downloaded from 
``_.
 
 .. code-block:: console
 
@@ -132,8 +132,8 @@ and the Multi-Buffer library version supported by them:
18.05 - 19.02   0.49 - 0.52
19.05 - 19.08   0.52
19.11 - 20.08   0.52 - 0.55
-   20.11 - 21.08   0.53 - 1.0*
-   21.11+  1.0*
+   20.11 - 21.08   0.53 - 1.1*
+   21.11+  1.0  - 1.1*
==  
 
 \* Multi-buffer library 1.0 or newer only works for Meson but not Make build 
system.
diff --git a/doc/guides/cryptodevs/kasumi.rst b/doc/guides/cryptodevs/kasumi.rst
index bc82744fcc..72318f2bd4 100644
--- a/doc/guides/cryptodevs/kasumi.rst
+++ b/doc/guides/cryptodevs/kasumi.rst
@@ -36,8 +36,8 @@ Installation
 To build DPDK with the KASUMI_PMD the user is required to download the 
multi-buffer
 library from `here `_
 and compile it on their user system before building DPDK.
-The latest version of the library supported by this PMD is v1.0, which
-can be downloaded from 
``_.
+The latest version of the library supported by this PMD is v1.1, which
+can be downloaded from 
``_.
 
 After downloading the library, the user needs to unpack and compile it
 on their system before building DPDK:
@@ -78,8 +78,8 @@ and the external crypto libraries supported by them:
DPDK version   Crypto library version
=  
16.11 - 19.11  LibSSO KASUMI
-   20.02 - 21.08  Multi-buffer library 0.53 - 1.0*
-   21.11+ Multi-buffer library 1.0*
+   20.02 - 21.08  Multi-buffer library 0.53 - 1.1*
+   21.11+ Multi-buffer library 1.0  - 1.1*
=  
 
 \* Multi-buffer library 1.0 or newer only works for Meson but not Make build 
system.
diff --git a/doc/guides/cryptodevs/snow3g.rst b/doc/guides/cryptodevs/snow3g.rst
index 4ba71d66ce..68a1462c13 100644
--- a/doc/guides/cryptodevs/snow3g.rst
+++ b/doc/guides/cryptodevs/snow3g.rst
@@ -35,8 +35,8 @@ Installation
 To build DPDK with the SNOW3G_PMD the user is required to download the 
multi-buffer
 library from `here `_
 and compile it on their user system before building DPDK.
-The latest version of the library supported by this PMD is v1.0, which
-can

[PATCH v1 0/3] Fix typo's and capitalise PMD

2021-11-18 Thread Sean Morrissey
This patchset cleans up the use of the phrase PMD
throughout dpdk by capitalising pmd and also
removing the use of the word driver following PMD.

This patchset also removes some unnecessary
duplication of the word "the" in comments and
docs.

Sean Morrissey (3):
  fix PMD wording typo
  fix 'the the' typo
  doc: capitalise PMD

 app/test-pmd/cmdline.c|  4 +--
 app/test/test_barrier.c   |  2 +-
 doc/guides/bbdevs/turbo_sw.rst|  2 +-
 doc/guides/cryptodevs/ccp.rst |  2 +-
 doc/guides/cryptodevs/openssl.rst |  2 +-
 doc/guides/cryptodevs/overview.rst|  2 +-
 doc/guides/cryptodevs/virtio.rst  |  2 +-
 doc/guides/eventdevs/opdl.rst |  2 +-
 doc/guides/linux_gsg/build_sample_apps.rst|  2 +-
 doc/guides/nics/af_packet.rst |  2 +-
 doc/guides/nics/af_xdp.rst|  2 +-
 doc/guides/nics/avp.rst   |  4 +--
 doc/guides/nics/cnxk.rst  |  2 +-
 doc/guides/nics/enetfec.rst   |  2 +-
 doc/guides/nics/fm10k.rst |  4 +--
 doc/guides/nics/intel_vf.rst  |  2 +-
 doc/guides/nics/netvsc.rst|  2 +-
 doc/guides/nics/nfp.rst   |  2 +-
 doc/guides/nics/octeontx.rst  |  4 +--
 doc/guides/nics/octeontx2.rst |  6 ++--
 doc/guides/nics/thunderx.rst  |  4 +--
 doc/guides/nics/virtio.rst|  4 +--
 doc/guides/prog_guide/compressdev.rst |  6 ++--
 .../prog_guide/writing_efficient_code.rst |  4 +--
 doc/guides/rel_notes/known_issues.rst |  2 +-
 doc/guides/rel_notes/release_16_04.rst|  2 +-
 doc/guides/rel_notes/release_18_02.rst|  4 +--
 doc/guides/rel_notes/release_19_05.rst|  6 ++--
 doc/guides/rel_notes/release_19_11.rst|  2 +-
 doc/guides/rel_notes/release_20_11.rst|  4 +--
 doc/guides/rel_notes/release_21_02.rst|  2 +-
 doc/guides/rel_notes/release_21_05.rst|  2 +-
 doc/guides/rel_notes/release_21_08.rst|  2 +-
 doc/guides/rel_notes/release_21_11.rst|  2 +-
 doc/guides/rel_notes/release_2_1.rst  |  2 +-
 doc/guides/rel_notes/release_2_2.rst  |  4 +--
 doc/guides/sample_app_ug/bbdev_app.rst|  2 +-
 doc/guides/sample_app_ug/keep_alive.rst   |  2 +-
 .../sample_app_ug/l3_forward_access_ctrl.rst  |  2 +-
 doc/guides/sample_app_ug/vhost.rst|  4 +--
 doc/guides/tools/testeventdev.rst |  2 +-
 doc/guides/vdpadevs/ifc.rst   |  4 +--
 drivers/bus/vmbus/rte_bus_vmbus.h |  2 +-
 drivers/common/cpt/cpt_hw_types.h |  2 +-
 drivers/common/sfc_efx/efsys.h|  2 +-
 drivers/compress/qat/qat_comp_pmd.h   |  2 +-
 drivers/crypto/qat/qat_asym_pmd.h |  2 +-
 drivers/crypto/qat/qat_sym_pmd.h  |  2 +-
 drivers/net/bnx2x/elink.c |  2 +-
 drivers/net/bnxt/hsi_struct_def_dpdk.h|  2 +-
 drivers/net/bonding/rte_eth_bond_pmd.c|  2 +-
 drivers/net/fm10k/fm10k_ethdev.c  |  2 +-
 drivers/net/hinic/base/hinic_pmd_cmdq.h   |  2 +-
 drivers/net/hinic/base/hinic_pmd_hwdev.c  |  2 +-
 drivers/net/hns3/hns3_ethdev.c|  6 ++--
 drivers/net/hns3/hns3_ethdev.h|  6 ++--
 drivers/net/hns3/hns3_ethdev_vf.c | 28 +--
 drivers/net/hns3/hns3_rss.c   |  4 +--
 drivers/net/hns3/hns3_rxtx.c  |  8 +++---
 drivers/net/hns3/hns3_rxtx.h  |  4 +--
 drivers/net/i40e/base/i40e_common.c   |  2 +-
 drivers/net/i40e/i40e_ethdev.c|  2 +-
 drivers/net/ice/ice_ethdev.h  |  2 +-
 drivers/net/ionic/ionic_if.h  |  2 +-
 drivers/net/mlx5/mlx5_trigger.c   |  2 +-
 drivers/net/nfp/nfp_common.h  |  2 +-
 drivers/net/nfp/nfp_ethdev.c  |  2 +-
 drivers/net/nfp/nfp_ethdev_vf.c   |  2 +-
 drivers/raw/ifpga/base/README |  2 +-
 lib/bbdev/rte_bbdev.h | 12 
 lib/bbdev/rte_bbdev_pmd.h |  2 +-
 lib/compressdev/rte_compressdev_pmd.h |  4 +--
 lib/cryptodev/cryptodev_pmd.h |  4 +--
 lib/dmadev/rte_dmadev_core.h  |  2 +-
 lib/eal/include/rte_dev.h |  2 +-
 lib/eal/include/rte_devargs.h |  4 +--
 lib/ethdev/rte_ethdev.h   | 18 ++--
 lib/eventdev/eventdev_pmd.h   |  2 +-
 lib/ip_frag/rte_ipv6_fragmentation.c  |  2 +-
 lib/rawdev/rte_rawdev_pmd.h   |  2 +-
 80 files changed, 136 insertions(+), 136 deletions(-)

-- 
2.25.1



[PATCH v1 2/3] fix 'the the' typo

2021-11-18 Thread Sean Morrissey
Remove the use of double "the" as it does not
make sense.

Signed-off-by: Sean Morrissey 
Signed-off-by: Conor Fogarty 
---
 app/test/test_barrier.c  | 2 +-
 doc/guides/sample_app_ug/keep_alive.rst  | 2 +-
 drivers/bus/vmbus/rte_bus_vmbus.h| 2 +-
 drivers/common/cpt/cpt_hw_types.h| 2 +-
 drivers/net/bnx2x/elink.c| 2 +-
 drivers/net/bnxt/hsi_struct_def_dpdk.h   | 2 +-
 drivers/net/bonding/rte_eth_bond_pmd.c   | 2 +-
 drivers/net/hinic/base/hinic_pmd_hwdev.c | 2 +-
 drivers/net/i40e/base/i40e_common.c  | 2 +-
 drivers/net/ice/ice_ethdev.h | 2 +-
 drivers/net/ionic/ionic_if.h | 2 +-
 drivers/net/mlx5/mlx5_trigger.c  | 2 +-
 lib/bbdev/rte_bbdev_pmd.h| 2 +-
 lib/compressdev/rte_compressdev_pmd.h| 2 +-
 lib/cryptodev/cryptodev_pmd.h| 2 +-
 lib/eventdev/eventdev_pmd.h  | 2 +-
 lib/ip_frag/rte_ipv6_fragmentation.c | 2 +-
 lib/rawdev/rte_rawdev_pmd.h  | 2 +-
 18 files changed, 18 insertions(+), 18 deletions(-)

diff --git a/app/test/test_barrier.c b/app/test/test_barrier.c
index 9641133ccf..6d6d48749c 100644
--- a/app/test/test_barrier.c
+++ b/app/test/test_barrier.c
@@ -6,7 +6,7 @@
   * This is a simple functional test for rte_smp_mb() implementation.
   * I.E. make sure that LOAD and STORE operations that precede the
   * rte_smp_mb() call are globally visible across the lcores
-  * before the the LOAD and STORE operations that follows it.
+  * before the LOAD and STORE operations that follows it.
   * The test uses simple implementation of Peterson's lock algorithm
   * (https://en.wikipedia.org/wiki/Peterson%27s_algorithm)
   * for two execution units to make sure that rte_smp_mb() prevents
diff --git a/doc/guides/sample_app_ug/keep_alive.rst 
b/doc/guides/sample_app_ug/keep_alive.rst
index b6d75c8a80..a907ff36a1 100644
--- a/doc/guides/sample_app_ug/keep_alive.rst
+++ b/doc/guides/sample_app_ug/keep_alive.rst
@@ -78,7 +78,7 @@ options.
 Explanation
 ---
 
-The following sections provide some explanation of the The
+The following sections provide some explanation of the
 Keep-Alive/'Liveliness' conceptual scheme. As mentioned in the
 overview section, the initialization and run-time paths are very
 similar to those of the :doc:`l2_forward_real_virtual`.
diff --git a/drivers/bus/vmbus/rte_bus_vmbus.h 
b/drivers/bus/vmbus/rte_bus_vmbus.h
index 466d42d277..a24bad831d 100644
--- a/drivers/bus/vmbus/rte_bus_vmbus.h
+++ b/drivers/bus/vmbus/rte_bus_vmbus.h
@@ -291,7 +291,7 @@ struct iova_list {
  * @param data
  * Pointer to the buffer additional data to send
  * @param dlen
- *  Maximum size of what the the buffer will hold
+ *  Maximum size of what the buffer will hold
  * @param xact
  * Identifier of the request
  * @param flags
diff --git a/drivers/common/cpt/cpt_hw_types.h 
b/drivers/common/cpt/cpt_hw_types.h
index a1f969eb14..522844c351 100644
--- a/drivers/common/cpt/cpt_hw_types.h
+++ b/drivers/common/cpt/cpt_hw_types.h
@@ -466,7 +466,7 @@ typedef union {
uint64_t dbell_cnt : 20;
/** [ 19:  0](R/W/H) Number of instruction queue 64-bit words
 * to add to the CPT instruction doorbell count. Readback value
-* is the the current number of pending doorbell requests.
+* is the current number of pending doorbell requests.
 *
 * If counter overflows CPT()_VQ()_MISC_INT[DBELL_DOVF] is set.
 *
diff --git a/drivers/net/bnx2x/elink.c b/drivers/net/bnx2x/elink.c
index b65126d718..2093d8f373 100644
--- a/drivers/net/bnx2x/elink.c
+++ b/drivers/net/bnx2x/elink.c
@@ -15013,7 +15013,7 @@ static void elink_check_kr2_wa(struct elink_params 
*params,
 
/* Once KR2 was disabled, wait 5 seconds before checking KR2 recovery
 * Since some switches tend to reinit the AN process and clear the
-* the advertised BP/NP after ~2 seconds causing the KR2 to be disabled
+* advertised BP/NP after ~2 seconds causing the KR2 to be disabled
 * and recovered many times
 */
if (vars->check_kr2_recovery_cnt > 0) {
diff --git a/drivers/net/bnxt/hsi_struct_def_dpdk.h 
b/drivers/net/bnxt/hsi_struct_def_dpdk.h
index 0c08171dec..88624f8129 100644
--- a/drivers/net/bnxt/hsi_struct_def_dpdk.h
+++ b/drivers/net/bnxt/hsi_struct_def_dpdk.h
@@ -14998,7 +14998,7 @@ struct hwrm_func_resource_qcaps_output {
#define HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESERVATION_STRATEGY_MINIMAL 
\
UINT32_C(0x1)
/*
-* The PF driver should not reserve any resources for each VF until the
+* The PF driver should not reserve any resources for each VF until
 * the VF interface is brought up.
 */
#define 
HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESERVATION_STRATEGY_MINIMAL_STATIC \
diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c 
b/drivers/net/bonding/rte_eth

[PATCH v1 3/3] doc: capitalise PMD

2021-11-18 Thread Sean Morrissey
The doc's contain references to pmd but the proper
use is to use PMD.

Signed-off-by: Sean Morrissey 
---
 doc/guides/cryptodevs/ccp.rst  | 2 +-
 doc/guides/cryptodevs/openssl.rst  | 2 +-
 doc/guides/cryptodevs/overview.rst | 2 +-
 doc/guides/eventdevs/opdl.rst  | 2 +-
 doc/guides/nics/cnxk.rst   | 2 +-
 doc/guides/nics/octeontx.rst   | 4 ++--
 doc/guides/nics/octeontx2.rst  | 6 +++---
 doc/guides/nics/thunderx.rst   | 2 +-
 doc/guides/prog_guide/compressdev.rst  | 6 +++---
 doc/guides/rel_notes/release_18_02.rst | 4 ++--
 doc/guides/rel_notes/release_2_1.rst   | 2 +-
 doc/guides/sample_app_ug/vhost.rst | 4 ++--
 doc/guides/vdpadevs/ifc.rst| 4 ++--
 13 files changed, 21 insertions(+), 21 deletions(-)

diff --git a/doc/guides/cryptodevs/ccp.rst b/doc/guides/cryptodevs/ccp.rst
index 36dae090f9..52e98b0859 100644
--- a/doc/guides/cryptodevs/ccp.rst
+++ b/doc/guides/cryptodevs/ccp.rst
@@ -100,7 +100,7 @@ The following parameters (all optional) can be provided in 
the previous two call
 
 * ccp_auth_opt: Specify authentication operations to perform on CPU using 
openssl APIs.
 
-To validate ccp pmd, l2fwd-crypto example can be used with following command:
+To validate ccp PMD, l2fwd-crypto example can be used with following command:
 
 .. code-block:: console
 
diff --git a/doc/guides/cryptodevs/openssl.rst 
b/doc/guides/cryptodevs/openssl.rst
index 848a2e8eb8..03041ceda1 100644
--- a/doc/guides/cryptodevs/openssl.rst
+++ b/doc/guides/cryptodevs/openssl.rst
@@ -69,7 +69,7 @@ use version 1.1.1g or newer.
 Initialization
 --
 
-User can use app/test application to check how to use this pmd and to verify
+User can use app/test application to check how to use this PMD and to verify
 crypto processing.
 
 Test name is cryptodev_openssl_autotest.
diff --git a/doc/guides/cryptodevs/overview.rst 
b/doc/guides/cryptodevs/overview.rst
index 1172297915..d754b0cfc6 100644
--- a/doc/guides/cryptodevs/overview.rst
+++ b/doc/guides/cryptodevs/overview.rst
@@ -19,7 +19,7 @@ Supported Feature Flags
 
- "OOP SGL In SGL Out" feature flag stands for
  "Out-of-place Scatter-gather list Input, Scatter-gather list Output",
- which means pmd supports different scatter-gather styled input and output 
buffers
+ which means PMD supports different scatter-gather styled input and output 
buffers
  (i.e. both can consists of multiple segments).
 
- "OOP SGL In LB Out" feature flag stands for
diff --git a/doc/guides/eventdevs/opdl.rst b/doc/guides/eventdevs/opdl.rst
index cbfd1f11b7..f220959249 100644
--- a/doc/guides/eventdevs/opdl.rst
+++ b/doc/guides/eventdevs/opdl.rst
@@ -87,7 +87,7 @@ due to the static nature of the underlying queues. It is 
because of this
 that the implementation can achieve such high throughput and low latency
 
 The following list is a comprehensive outline of the what is supported and
-the limitations / restrictions imposed by the opdl pmd
+the limitations / restrictions imposed by the opdl PMD
 
  - The order in which packets moved between queues is static and fixed \
(dynamic scheduling is not supported).
diff --git a/doc/guides/nics/cnxk.rst b/doc/guides/nics/cnxk.rst
index 837ffc02b4..84f9865654 100644
--- a/doc/guides/nics/cnxk.rst
+++ b/doc/guides/nics/cnxk.rst
@@ -289,7 +289,7 @@ Limitations
 
 
 The OCTEON CN9K/CN10K SoC family NIC has inbuilt HW assisted external mempool 
manager.
-``net_cnxk`` pmd only works with ``mempool_cnxk`` mempool handler
+``net_cnxk`` PMD only works with ``mempool_cnxk`` mempool handler
 as it is performance wise most effective way for packet allocation and Tx 
buffer
 recycling on OCTEON TX2 SoC platform.
 
diff --git a/doc/guides/nics/octeontx.rst b/doc/guides/nics/octeontx.rst
index 8236cc3e93..092120815d 100644
--- a/doc/guides/nics/octeontx.rst
+++ b/doc/guides/nics/octeontx.rst
@@ -108,7 +108,7 @@ for details.
 Initialization
 --
 
-The OCTEON TX ethdev pmd is exposed as a vdev device which consists of a set
+The OCTEON TX ethdev PMD is exposed as a vdev device which consists of a set
 of PKI and PKO PCIe VF devices. On EAL initialization,
 PKI/PKO PCIe VF devices will be probed and then the vdev device can be created
 from the application code, or from the EAL command line based on
@@ -126,7 +126,7 @@ the number of interesting ports with ``nr_ports`` argument.
 
 Dependency
 ~~
-``eth_octeontx`` pmd is depend on ``event_octeontx`` eventdev device and
+``eth_octeontx`` PMD is depend on ``event_octeontx`` eventdev device and
 ``octeontx_fpavf`` external mempool handler.
 
 Example:
diff --git a/doc/guides/nics/octeontx2.rst b/doc/guides/nics/octeontx2.rst
index eae32f0afe..4ce067f2c5 100644
--- a/doc/guides/nics/octeontx2.rst
+++ b/doc/guides/nics/octeontx2.rst
@@ -256,7 +256,7 @@ Limitations
 ~
 
 The OCTEON TX2 SoC family NIC has inbu

[PATCH v1 1/3] fix PMD wording typo

2021-11-18 Thread Sean Morrissey
Removing the use of driver following PMD as its
unnecessary.

Signed-off-by: Sean Morrissey 
Signed-off-by: Conor Fogarty 
---
 app/test-pmd/cmdline.c|  4 +--
 doc/guides/bbdevs/turbo_sw.rst|  2 +-
 doc/guides/cryptodevs/virtio.rst  |  2 +-
 doc/guides/linux_gsg/build_sample_apps.rst|  2 +-
 doc/guides/nics/af_packet.rst |  2 +-
 doc/guides/nics/af_xdp.rst|  2 +-
 doc/guides/nics/avp.rst   |  4 +--
 doc/guides/nics/enetfec.rst   |  2 +-
 doc/guides/nics/fm10k.rst |  4 +--
 doc/guides/nics/intel_vf.rst  |  2 +-
 doc/guides/nics/netvsc.rst|  2 +-
 doc/guides/nics/nfp.rst   |  2 +-
 doc/guides/nics/thunderx.rst  |  2 +-
 doc/guides/nics/virtio.rst|  4 +--
 .../prog_guide/writing_efficient_code.rst |  4 +--
 doc/guides/rel_notes/known_issues.rst |  2 +-
 doc/guides/rel_notes/release_16_04.rst|  2 +-
 doc/guides/rel_notes/release_19_05.rst|  6 ++--
 doc/guides/rel_notes/release_19_11.rst|  2 +-
 doc/guides/rel_notes/release_20_11.rst|  4 +--
 doc/guides/rel_notes/release_21_02.rst|  2 +-
 doc/guides/rel_notes/release_21_05.rst|  2 +-
 doc/guides/rel_notes/release_21_08.rst|  2 +-
 doc/guides/rel_notes/release_21_11.rst|  2 +-
 doc/guides/rel_notes/release_2_2.rst  |  4 +--
 doc/guides/sample_app_ug/bbdev_app.rst|  2 +-
 .../sample_app_ug/l3_forward_access_ctrl.rst  |  2 +-
 doc/guides/tools/testeventdev.rst |  2 +-
 drivers/common/sfc_efx/efsys.h|  2 +-
 drivers/compress/qat/qat_comp_pmd.h   |  2 +-
 drivers/crypto/qat/qat_asym_pmd.h |  2 +-
 drivers/crypto/qat/qat_sym_pmd.h  |  2 +-
 drivers/net/fm10k/fm10k_ethdev.c  |  2 +-
 drivers/net/hinic/base/hinic_pmd_cmdq.h   |  2 +-
 drivers/net/hns3/hns3_ethdev.c|  6 ++--
 drivers/net/hns3/hns3_ethdev.h|  6 ++--
 drivers/net/hns3/hns3_ethdev_vf.c | 28 +--
 drivers/net/hns3/hns3_rss.c   |  4 +--
 drivers/net/hns3/hns3_rxtx.c  |  8 +++---
 drivers/net/hns3/hns3_rxtx.h  |  4 +--
 drivers/net/i40e/i40e_ethdev.c|  2 +-
 drivers/net/nfp/nfp_common.h  |  2 +-
 drivers/net/nfp/nfp_ethdev.c  |  2 +-
 drivers/net/nfp/nfp_ethdev_vf.c   |  2 +-
 drivers/raw/ifpga/base/README |  2 +-
 lib/bbdev/rte_bbdev.h | 12 
 lib/compressdev/rte_compressdev_pmd.h |  2 +-
 lib/cryptodev/cryptodev_pmd.h |  2 +-
 lib/dmadev/rte_dmadev_core.h  |  2 +-
 lib/eal/include/rte_dev.h |  2 +-
 lib/eal/include/rte_devargs.h |  4 +--
 lib/ethdev/rte_ethdev.h   | 18 ++--
 52 files changed, 97 insertions(+), 97 deletions(-)

diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
index c43c85c591..6e10afeedd 100644
--- a/app/test-pmd/cmdline.c
+++ b/app/test-pmd/cmdline.c
@@ -2701,7 +2701,7 @@ cmd_config_rxtx_queue_parsed(void *parsed_result,
ret = rte_eth_dev_tx_queue_stop(res->portid, res->qid);
 
if (ret == -ENOTSUP)
-   fprintf(stderr, "Function not supported in PMD driver\n");
+   fprintf(stderr, "Function not supported in PMD\n");
 }
 
 cmdline_parse_token_string_t cmd_config_rxtx_queue_port =
@@ -14700,7 +14700,7 @@ cmd_ddp_info_parsed(
free(proto);
 #endif
if (ret == -ENOTSUP)
-   fprintf(stderr, "Function not supported in PMD driver\n");
+   fprintf(stderr, "Function not supported in PMD\n");
close_file(pkg);
 }
 
diff --git a/doc/guides/bbdevs/turbo_sw.rst b/doc/guides/bbdevs/turbo_sw.rst
index 43c5129fd7..1e23e37027 100644
--- a/doc/guides/bbdevs/turbo_sw.rst
+++ b/doc/guides/bbdevs/turbo_sw.rst
@@ -149,7 +149,7 @@ Example:
 
 * For AVX512 machines with SDK libraries installed then both 4G and 5G can be 
enabled for full real time FEC capability.
   For AVX2 machines it is possible to only enable the 4G libraries and the PMD 
capabilities will be limited to 4G FEC.
-  If no library is present then the PMD driver will still build but its 
capabilities will be limited accordingly.
+  If no library is present then the PMD will still build but its capabilities 
will be limited accordingly.
 
 
 To use the PMD in an application, user must:
diff --git a/doc/guides/cryptodevs/virtio.rst b/doc/guides/cryptodevs/virtio.rst
index 8b96446ff2..ce4d43519a 100644
--- a/doc/guides/cryptodevs/virtio.rst
+++ b/doc/guides/cryptodevs/virtio.rst
@@ -73,7 +73,7 @@ number of the virtio-crypto device:
 echo -n :00:04.0 > /sys/bus/pci/drivers/virtio-pci/unbind
 echo "1af4 1054" > /sys/bu

RE: [dpdk-dev] Minutes of Technical Board Meeting, 2021-Oct-27

2021-11-18 Thread Honnappa Nagarahalli


> > > There was a comment to remove the TLV length. I will do that next
> > > version with implementation.
> > >
> > > Identified the following set of work for this.
> > >
> > > 1) Common code at lib/dwa/
> > > 2) Marvell DPU based driver at drivers/dwa/cnxk/
> > > 3) Test application at app/test-dwa/
> > > 4) It is possible to have an SW driver(To allow non-specialized HW
> > > to use the
> > > framework) for this by:
> > > a) Emulate DWA HW as a separate DPDK process
> > > b) Add drivers/dwa/sw/ and use memif driver so to create a
> > > communication channel between emulated DWA HW process and DPDK
> application.
> > Why use memif driver? Why not ring-pmd?
> 
> Planning to emulation DWA accelerator functional model as a separate DPDK
> process in SW case.
You mean the primary and secondary processes correct?

> Therefore memif is the ideal choice as it supports zero-copy of the data as
> well.
> https://doc.dpdk.org/guides/nics/memif.html
Zero-copy in memif is nothing but exchanging pointers to shared data.
The rings work across the primary and secondary processes and are always 
zero-copy.
We are doing some perf comparisons between memif and rings, will let you know 
once we have the data.

> 
> >
> > > c) Add drivers/dwa/sw/profiles//l3fwd - To implement l3fwd profile
> > > using DPDK libraries for SW driver.
> > >
> > > I think, Item (4) aka SW drivers as useful(We don't need to
> > > implement for all profiles, I think, just for  l3fwd it make sense
> > > to add, to allow to use of the framework in just SW mode).
> > > Is there any opinion on adding item (4) in DPDK? I saw mixed
> > > opinions earlier on this. I would like to understand, Is there any
> > > objection to doing
> > > item(4) in DPDK as it needs a good amount of work and  I don't want
> > > to do throw it away if the community doesn't like this.
> > > Any opinion?
> > >
> > > [1]
> > > http://mails.dpdk.org/archives/dev/2021-October/226070.html
> > >
> > 


RE: [EXT] [PATCH -v1] bbdev: update num_ops type to be uint32_t in rte_bbdev_enc_op_alloc_bulk

2021-11-18 Thread Chautru, Nicolas
Hi Akhil, 
The change below from Mingshan is not for 21.11, this is targeting following 
release. It should have been mentioned in cover letter. 
Thanks
Nic


> -Original Message-
> From: Akhil Goyal 
> Sent: Wednesday, November 17, 2021 11:21 PM
> To: Zhang, Mingshan ; dev@dpdk.org;
> akhil.go...@nxp.com
> Cc: Chautru, Nicolas 
> Subject: RE: [EXT] [PATCH -v1] bbdev: update num_ops type to be uint32_t in
> rte_bbdev_enc_op_alloc_bulk
> 
> > From: Mingshan Zhang 
> 
> Title is too big. Please run ./devtools/check-git-log.sh.
> >
> > update num_ops type to be uint32_t in rte_bbdev_enc_op_alloc_bulk
> Explain the need for this change. We only take critical fixes in the APIs for
> RC4.
> Bbdev APIs are not experimental anymore. Please rebase on TOT.
> This change is an API change which may need deprecation notice from next
> release.
> 
> >
> > Signed-off-by: Mingshan Zhang 
> > ---
> >  doc/guides/prog_guide/bbdev.rst | 4 ++--
> >  lib/bbdev/rte_bbdev_op.h| 4 ++--
> >  2 files changed, 4 insertions(+), 4 deletions(-)
> >
> > diff --git a/doc/guides/prog_guide/bbdev.rst
> > b/doc/guides/prog_guide/bbdev.rst index 70fa01a..c330e08 100644
> > --- a/doc/guides/prog_guide/bbdev.rst
> > +++ b/doc/guides/prog_guide/bbdev.rst
> > @@ -387,10 +387,10 @@ allocate bbdev operations of a specific type
> > from a given bbdev operation mempoo  .. code-block:: c
> >
> >  int rte_bbdev_enc_op_alloc_bulk(struct rte_mempool *mempool,
> > -struct rte_bbdev_enc_op **ops, uint16_t num_ops)
> > +struct rte_bbdev_enc_op **ops, uint32_t num_ops)
> >
> >  int rte_bbdev_dec_op_alloc_bulk(struct rte_mempool *mempool,
> > -struct rte_bbdev_dec_op **ops, uint16_t num_ops)
> > +struct rte_bbdev_dec_op **ops, uint32_t num_ops)
> >
> >  ``rte_bbdev_*_op_free_bulk()`` is called by the application to return
> > an  operation to its allocating pool.
> > diff --git a/lib/bbdev/rte_bbdev_op.h b/lib/bbdev/rte_bbdev_op.h index
> > 5512859..f074b35 100644
> > --- a/lib/bbdev/rte_bbdev_op.h
> > +++ b/lib/bbdev/rte_bbdev_op.h
> > @@ -867,7 +867,7 @@ struct rte_mempool *  __rte_experimental  static
> > inline int  rte_bbdev_enc_op_alloc_bulk(struct rte_mempool *mempool,
> > -   struct rte_bbdev_enc_op **ops, uint16_t num_ops)
> > +   struct rte_bbdev_enc_op **ops, uint32_t num_ops)
> >  {
> > struct rte_bbdev_op_pool_private *priv;
> > int ret;
> > @@ -904,7 +904,7 @@ struct rte_mempool *  __rte_experimental  static
> > inline int  rte_bbdev_dec_op_alloc_bulk(struct rte_mempool *mempool,
> > -   struct rte_bbdev_dec_op **ops, uint16_t num_ops)
> > +   struct rte_bbdev_dec_op **ops, uint32_t num_ops)
> >  {
> > struct rte_bbdev_op_pool_private *priv;
> > int ret;
> > --
> > 1.8.3.1



[PATCH v2] common/mlx5: fix mempool registration

2021-11-18 Thread Dmitry Kozlyuk
Mempool registration was not correctly processing
mempools with RTE_PKTMBUF_F_PINEND_EXT_BUF flag set
("pinned mempools" for short), because it is not known
at registration time whether the mempool is a pktmbuf one,
and its elements may not yet be initialized to analyze them.
Attempts had been made to recognize such pools,
but there was no robust solution, only the owner of a mempool
(the application or a device) knows its type.
This patch extends common/mlx5 registration code
to accept a hint that the mempool is a pinned one
and uses this capability from net/mlx5 driver.

1. Remove all code assuming pktmbuf pool type
   or trying to recognize the type of a pool.
2. Register pinned mempools used for Rx
   and their external memory on port start.
   Populate the MR cache with all their MRs.
3. Change Tx slow path logic as follows:
   3.1. Search the mempool database for a memory region (MR)
by the mbuf pool and its buffer address.
   3.2. If not MR for the address is found for the mempool,
and the mempool contains only pinned external buffers,
perform the mempool registration of the mempool
and its external pinned memory.
   3.3. Fall back to using page-based MRs in other cases
(for example, a buffer with externally attached memory,
but not from a pinned mempool).

Fixes: 690b2a88c2f7 ("common/mlx5: add mempool registration facilities")
Fixes: fec28ca0e3a9 ("net/mlx5: support mempool registration")

Signed-off-by: Dmitry Kozlyuk 
Reviewed-by: Matan Azrad 
Reviewed-by: Viacheslav Ovsiienko 
---
v2: 1) rebase on ToT
2) fix MR cache population

 drivers/common/mlx5/mlx5_common.c|  11 +-
 drivers/common/mlx5/mlx5_common_mp.c |   4 +-
 drivers/common/mlx5/mlx5_common_mp.h |  10 +-
 drivers/common/mlx5/mlx5_common_mr.c | 166 ---
 drivers/common/mlx5/mlx5_common_mr.h |  15 +--
 drivers/common/mlx5/version.map  |   1 +
 drivers/net/mlx5/linux/mlx5_mp_os.c  |   3 +-
 drivers/net/mlx5/mlx5_rxq.c  |   2 +-
 drivers/net/mlx5/mlx5_trigger.c  |  40 ++-
 9 files changed, 158 insertions(+), 94 deletions(-)

diff --git a/drivers/common/mlx5/mlx5_common.c 
b/drivers/common/mlx5/mlx5_common.c
index 66c2c08b7d..f1650f94c6 100644
--- a/drivers/common/mlx5/mlx5_common.c
+++ b/drivers/common/mlx5/mlx5_common.c
@@ -317,9 +317,9 @@ mlx5_dev_to_pci_str(const struct rte_device *dev, char 
*addr, size_t size)
  */
 static int
 mlx5_dev_mempool_register(struct mlx5_common_device *cdev,
- struct rte_mempool *mp)
+ struct rte_mempool *mp, bool is_extmem)
 {
-   return mlx5_mr_mempool_register(cdev, mp);
+   return mlx5_mr_mempool_register(cdev, mp, is_extmem);
 }
 
 /**
@@ -353,7 +353,7 @@ mlx5_dev_mempool_register_cb(struct rte_mempool *mp, void 
*arg)
struct mlx5_common_device *cdev = arg;
int ret;
 
-   ret = mlx5_dev_mempool_register(cdev, mp);
+   ret = mlx5_dev_mempool_register(cdev, mp, false);
if (ret < 0 && rte_errno != EEXIST)
DRV_LOG(ERR,
"Failed to register existing mempool %s for PD %p: %s",
@@ -390,13 +390,10 @@ mlx5_dev_mempool_event_cb(enum rte_mempool_event event, 
struct rte_mempool *mp,
  void *arg)
 {
struct mlx5_common_device *cdev = arg;
-   bool extmem = mlx5_mempool_is_extmem(mp);
 
switch (event) {
case RTE_MEMPOOL_EVENT_READY:
-   if (extmem)
-   break;
-   if (mlx5_dev_mempool_register(cdev, mp) < 0)
+   if (mlx5_dev_mempool_register(cdev, mp, false) < 0)
DRV_LOG(ERR,
"Failed to register new mempool %s for PD %p: 
%s",
mp->name, cdev->pd, rte_strerror(rte_errno));
diff --git a/drivers/common/mlx5/mlx5_common_mp.c 
b/drivers/common/mlx5/mlx5_common_mp.c
index 536d61f66c..a7a671b7c5 100644
--- a/drivers/common/mlx5/mlx5_common_mp.c
+++ b/drivers/common/mlx5/mlx5_common_mp.c
@@ -65,7 +65,8 @@ mlx5_mp_req_mr_create(struct mlx5_common_device *cdev, 
uintptr_t addr)
  */
 int
 mlx5_mp_req_mempool_reg(struct mlx5_common_device *cdev,
-   struct rte_mempool *mempool, bool reg)
+   struct rte_mempool *mempool, bool reg,
+   bool is_extmem)
 {
struct rte_mp_msg mp_req;
struct rte_mp_msg *mp_res;
@@ -82,6 +83,7 @@ mlx5_mp_req_mempool_reg(struct mlx5_common_device *cdev,
 MLX5_MP_REQ_MEMPOOL_UNREGISTER;
mp_init_port_agnostic_msg(&mp_req, type);
arg->mempool = mempool;
+   arg->is_extmem = is_extmem;
arg->cdev = cdev;
ret = rte_mp_request_sync(&mp_req, &mp_rep, &ts);
if (ret) {
diff --git a/drivers/common/mlx5/mlx5_common_mp.h 
b/drivers/common/mlx5/mlx5_common_mp.h
index b1e3a41a20..4599ba8f92 100644
--- a/drivers/common/mlx5/mlx5_common_mp.h
+++ b/drivers/common/mlx5/m

Re: [PATCH v1] gpudev: return EINVAL if invalid input pointer for free and unregister

2021-11-18 Thread Stephen Hemminger
On Thu, 18 Nov 2021 19:28:02 +
 wrote:

> diff --git a/lib/gpudev/gpudev.c b/lib/gpudev/gpudev.c
> index 2b174d8bd5..97575ed979 100644
> --- a/lib/gpudev/gpudev.c
> +++ b/lib/gpudev/gpudev.c
> @@ -576,6 +576,11 @@ rte_gpu_mem_free(int16_t dev_id, void *ptr)
>   return -rte_errno;
>   }
>  
> + if (ptr == NULL) {
> + rte_errno = EINVAL;
> + return -rte_errno;
> + }
> +

The convention for free(), and rte_free() is that calling free
with a NULL pointer is a nop. Why not follow those?

This would keep programmers from having to view GPU as a
special case.


Re: [PATCH v1] gpudev: return EINVAL if invalid input pointer for free and unregister

2021-11-18 Thread Elena Agostini
> From: Stephen Hemminger 
> Date: Thursday, 18 November 2021 at 17:21
> To: Elena Agostini 
> Cc: dev@dpdk.org 
> Subject: Re: [PATCH v1] gpudev: return EINVAL if invalid input pointer for 
> free and unregister
> External email: Use caution opening links or attachments>
>
> On Thu, 18 Nov 2021 19:28:02 +
>  wrote:>
> > diff --git a/lib/gpudev/gpudev.c b/lib/gpudev/gpudev.c
> > index 2b174d8bd5..97575ed979 100644
> > --- a/lib/gpudev/gpudev.c
> > +++ b/lib/gpudev/gpudev.c
> > @@ -576,6 +576,11 @@ rte_gpu_mem_free(int16_t dev_id, void *ptr)
> >   return -rte_errno;
> >   }
> >
> > + if (ptr == NULL) {
> > + rte_errno = EINVAL;
> > + return -rte_errno;
> > + }
> > +>
> The convention for free(), and rte_free() is that calling free
> with a NULL pointer is a nop. Why not follow those?>
> This would keep programmers from having to view GPU as a
> special case.

Please look at v2 here 
https://patches.dpdk.org/project/dpdk/patch/2028203354.25355-1-eagost...@nvidia.com/


Re: [dpdk-dev] Minutes of Technical Board Meeting, 2021-Oct-27

2021-11-18 Thread Jerin Jacob
On Thu, Nov 18, 2021 at 8:44 PM Honnappa Nagarahalli
 wrote:
>
> 
>
> > > > There was a comment to remove the TLV length. I will do that next
> > > > version with implementation.
> > > >
> > > > Identified the following set of work for this.
> > > >
> > > > 1) Common code at lib/dwa/
> > > > 2) Marvell DPU based driver at drivers/dwa/cnxk/
> > > > 3) Test application at app/test-dwa/
> > > > 4) It is possible to have an SW driver(To allow non-specialized HW
> > > > to use the
> > > > framework) for this by:
> > > > a) Emulate DWA HW as a separate DPDK process
> > > > b) Add drivers/dwa/sw/ and use memif driver so to create a
> > > > communication channel between emulated DWA HW process and DPDK
> > application.
> > > Why use memif driver? Why not ring-pmd?
> >
> > Planning to emulation DWA accelerator functional model as a separate DPDK
> > process in SW case.
> You mean the primary and secondary processes correct?

Primary and Primary. (DWA emulation as a separate primary process to
mimic the real-world scenario)

>
> > Therefore memif is the ideal choice as it supports zero-copy of the data as
> > well.
> > https://doc.dpdk.org/guides/nics/memif.html
> Zero-copy in memif is nothing but exchanging pointers to shared data.
> The rings work across the primary and secondary processes and are always 
> zero-copy.
> We are doing some perf comparisons between memif and rings, will let you know 
> once we have the data.

Ok.
I think between primary to primary memif will be required.

>
> >
> > >
> > > > c) Add drivers/dwa/sw/profiles//l3fwd - To implement l3fwd profile
> > > > using DPDK libraries for SW driver.
> > > >
> > > > I think, Item (4) aka SW drivers as useful(We don't need to
> > > > implement for all profiles, I think, just for  l3fwd it make sense
> > > > to add, to allow to use of the framework in just SW mode).
> > > > Is there any opinion on adding item (4) in DPDK? I saw mixed
> > > > opinions earlier on this. I would like to understand, Is there any
> > > > objection to doing
> > > > item(4) in DPDK as it needs a good amount of work and  I don't want
> > > > to do throw it away if the community doesn't like this.
> > > > Any opinion?
> > > >
> > > > [1]
> > > > http://mails.dpdk.org/archives/dev/2021-October/226070.html
> > > >
> > > 


[Bug 888] cannot close memif port

2021-11-18 Thread bugzilla
https://bugs.dpdk.org/show_bug.cgi?id=888

Bug ID: 888
   Summary: cannot close memif port
   Product: DPDK
   Version: 21.11
  Hardware: x86
OS: Linux
Status: UNCONFIRMED
  Severity: normal
  Priority: Normal
 Component: ethdev
  Assignee: dev@dpdk.org
  Reporter: sunnyla...@gmail.com
  Target Milestone: ---

Created attachment 179
  --> https://bugs.dpdk.org/attachment.cgi?id=179&action=edit
code to reproduce bug

I'm trying out DPDK 21.11-rc3 and noticed a regression.

If a port using net_memif driver has been started, it is not possible to close
the port.
This is because the rte_eth_dev_close function has a check that the port must
be stopped, but net_memif driver does not support dev_ops->dev_stop operation.
Consequently, it becomes impossible to stop and close the memif port.

-- 
You are receiving this mail because:
You are the assignee for the bug.

[PATCH] net/memif: allow stopping and closing device

2021-11-18 Thread Junxiao Shi
Bugzilla ID: 888
Fixes: febc855b358e ("ethdev: forbid closing started device")

Signed-off-by: Junxiao Shi 
---
 drivers/net/memif/rte_eth_memif.c | 11 ---
 1 file changed, 8 insertions(+), 3 deletions(-)

diff --git a/drivers/net/memif/rte_eth_memif.c 
b/drivers/net/memif/rte_eth_memif.c
index 43d7378329..e3d523af57 100644
--- a/drivers/net/memif/rte_eth_memif.c
+++ b/drivers/net/memif/rte_eth_memif.c
@@ -1260,6 +1260,13 @@ memif_dev_start(struct rte_eth_dev *dev)
return ret;
 }
 
+static int
+memif_dev_stop(struct rte_eth_dev *dev)
+{
+   memif_disconnect(dev);
+   return 0;
+}
+
 static int
 memif_dev_close(struct rte_eth_dev *dev)
 {
@@ -1268,7 +1275,6 @@ memif_dev_close(struct rte_eth_dev *dev)
 
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
memif_msg_enq_disconnect(pmd->cc, "Device closed", 0);
-   memif_disconnect(dev);
 
for (i = 0; i < dev->data->nb_rx_queues; i++)
(*dev->dev_ops->rx_queue_release)(dev, i);
@@ -1276,8 +1282,6 @@ memif_dev_close(struct rte_eth_dev *dev)
(*dev->dev_ops->tx_queue_release)(dev, i);
 
memif_socket_remove_device(dev);
-   } else {
-   memif_disconnect(dev);
}
 
rte_free(dev->process_private);
@@ -1515,6 +1519,7 @@ memif_rx_queue_intr_disable(struct rte_eth_dev *dev, 
uint16_t qid __rte_unused)
 
 static const struct eth_dev_ops ops = {
.dev_start = memif_dev_start,
+   .dev_stop = memif_dev_stop,
.dev_close = memif_dev_close,
.dev_infos_get = memif_dev_info,
.dev_configure = memif_dev_configure,
-- 
2.17.1



RE: [dpdk-dev] Minutes of Technical Board Meeting, 2021-Oct-27

2021-11-18 Thread Honnappa Nagarahalli

> >
> > > > > There was a comment to remove the TLV length. I will do that
> > > > > next version with implementation.
> > > > >
> > > > > Identified the following set of work for this.
> > > > >
> > > > > 1) Common code at lib/dwa/
> > > > > 2) Marvell DPU based driver at drivers/dwa/cnxk/
> > > > > 3) Test application at app/test-dwa/
> > > > > 4) It is possible to have an SW driver(To allow non-specialized
> > > > > HW to use the
> > > > > framework) for this by:
> > > > > a) Emulate DWA HW as a separate DPDK process
> > > > > b) Add drivers/dwa/sw/ and use memif driver so to create a
> > > > > communication channel between emulated DWA HW process and
> DPDK
> > > application.
> > > > Why use memif driver? Why not ring-pmd?
> > >
> > > Planning to emulation DWA accelerator functional model as a separate
> > > DPDK process in SW case.
> > You mean the primary and secondary processes correct?
> 
> Primary and Primary. (DWA emulation as a separate primary process to mimic
> the real-world scenario)
> 
> >
> > > Therefore memif is the ideal choice as it supports zero-copy of the
> > > data as well.
> > > https://doc.dpdk.org/guides/nics/memif.html
> > Zero-copy in memif is nothing but exchanging pointers to shared data.
> > The rings work across the primary and secondary processes and are always
> zero-copy.
> > We are doing some perf comparisons between memif and rings, will let you
> know once we have the data.
> 
> Ok.
> I think between primary to primary memif will be required.
Agree, memif is easier/required between primary to primary. But, using it with 
zero-copy would need additional code on the application part in this scenario. 
The memory being shared needs to be mapped in both the processes.

> 
> >
> > >
> > > >
> > > > > c) Add drivers/dwa/sw/profiles//l3fwd - To implement l3fwd
> > > > > profile using DPDK libraries for SW driver.
> > > > >
> > > > > I think, Item (4) aka SW drivers as useful(We don't need to
> > > > > implement for all profiles, I think, just for  l3fwd it make
> > > > > sense to add, to allow to use of the framework in just SW mode).
> > > > > Is there any opinion on adding item (4) in DPDK? I saw mixed
> > > > > opinions earlier on this. I would like to understand, Is there
> > > > > any objection to doing
> > > > > item(4) in DPDK as it needs a good amount of work and  I don't
> > > > > want to do throw it away if the community doesn't like this.
> > > > > Any opinion?
> > > > >
> > > > > [1]
> > > > > http://mails.dpdk.org/archives/dev/2021-October/226070.html
> > > > >
> > > > 


Re: [PATCH v1] gpudev: return EINVAL if invalid input pointer for free and unregister

2021-11-18 Thread Tyler Retzlaff
On Thu, Nov 18, 2021 at 07:28:02PM +, eagost...@nvidia.com wrote:
> From: Elena Agostini 
> 
> Signed-off-by: Elena Agostini 
> ---
>  lib/gpudev/gpudev.c | 10 ++
>  lib/gpudev/rte_gpudev.h |  2 ++
>  2 files changed, 12 insertions(+)
> 
> diff --git a/lib/gpudev/gpudev.c b/lib/gpudev/gpudev.c
> index 2b174d8bd5..97575ed979 100644
> --- a/lib/gpudev/gpudev.c
> +++ b/lib/gpudev/gpudev.c
> @@ -576,6 +576,11 @@ rte_gpu_mem_free(int16_t dev_id, void *ptr)
>   return -rte_errno;
>   }
>  
> + if (ptr == NULL) {
> + rte_errno = EINVAL;
> + return -rte_errno;
> + }

in general dpdk has real problems with how it indicates that an error
occurred and what error occurred consistently.

some api's return 0 on success
  and maybe return -errno if ! 0
  and maybe return errno if ! 0
  and maybe set rte_errno if ! 0

some api's return -1 on failure
  and set rte_errno if -1

some api's return < 0 on failure
  and maybe set rte_errno
  and maybe return -errno
  and maybe set rte_errno and return -rte_errno

this isn't isiolated to only this change but since additions and context
in this patch highlight it maybe it's a good time to bring it up.

it's frustrating to have to carefully read the implementation every time
you want to make a function call to make sure you're handling the flavor
of error reporting for a particular function.

if this is new code could we please clearly identify the current best
practice and follow it as a standard going forward for all new public
apis.

thanks!


RE: [dpdk-dev] [Bug 826] red_autotest random failures

2021-11-18 Thread Liguzinski, WojciechX
Hi,

I was trying to reproduce this test failure, but for me RED tests are passing. 
I was running the exact test command like the one described in Bug 826 - 
'red_autotest' on the current main branch.

Here is an example when DPDK is build without RTE_SCHED_CMAN enabled, but with 
this flag set to true tests are also not failing.

root@silpixa00400629:~/wojtek/dpdk/build/app/test# ./dpdk-test '-l 0-15' 
--file-prefix=red_autotest
EAL: Detected CPU lcores: 96
EAL: Detected NUMA nodes: 2
EAL: Detected static linkage of DPDK
EAL: Multi-process socket /var/run/dpdk/red_autotest/mp_socket
EAL: Selected IOVA mode 'VA'
EAL: VFIO support initialized
TELEMETRY: No legacy callbacks, legacy socket not created
APP: HPET is not enabled, using TSC as default timer
RTE>>red_autotest


functional test 1 : use one rte_red configuration,
increase average queue size to various levels,
compare drop rate to drop probability

avg queue size enqueued   droppeddrop prob %
drop rate %diff % tolerance %
6  1  0  0. 
0. 0. 50.
12 1  0  0. 
0. 0. 50.
18 1  0  0. 
0. 0. 50.
24 1  0  0. 
0. 0. 50.
30 1  0  0. 
0. 0. 50.
36 9961   39 0.4167 
0.3900 0. 50.
42 9898   1021.0417 
1.0200 0. 50.
48 9835   1651.6667 
1.6500 0. 50.
54 9785   2152.2917 
2.1500 0. 50.
60 9703   2972.9167 
2.9700 0. 50.
66 9627   3733.5417 
3.7300 0. 50.
72 9580   4204.1667 
4.2000 0. 50.
78 9511   4894.7917 
4.8900 0. 50.
84 9462   5385.4167 
5.3800 0. 50.
90 9398   6026.0417 
6.0200 0. 50.
96 9366   6346.6667 
6.3400 0. 50.
1029267   7337.2917 
7.3300 0. 50.
1089212   7887.9167 
7.8800 0. 50.
1149146   8548.5417 
8.5400 0. 50.
1209102   8989.1667 
8.9800 0. 50.
1268984   1016   9.7917 
10.16000. 50.
1320  1  100.   
100.   0. 50.
1380  1  100.   
100.   0. 50.
1440  1  100.   
100.   0. 50.
--


functional test 2 : use several RED configurations,
increase average queue size to just below maximum threshold,
compare drop rate to drop probability

RED config avg queue size min threshold  max threshold  drop prob %drop 
rate %diff % tolerance %
0  12732 1289.8958 
10.01000. 50.
1  12732 1284.9479 
4.9700 0. 50.
2  12732 1283.2986 
2.6800 0. 50.
3  12732 128

[PATCH] net/bnxt: fix crash caused by error recovery

2021-11-18 Thread Somnath Kotur
bnxt_stop_rxtx() does not stop data path processing as intended
as it does not update the recently introduced fast-path pointers
'(struct rte_eth_fp_ops)->rx_pkt_burst'. Since both the burst routines
only use the fast-path pointer, the real burst routines get invoked
instead of the dummy ones set by bnxt_stop_rxtx() leading to crashes
in the data path (e.g. dereferencing freed structures)

Fix the segfault by updating the fast-path pointer as well

Fixes: c87d435a4d79 ("ethdev: copy fast-path API into separate structure")

Signed-off-by: Somnath Kotur 
Reviewed-by: Ajit Khaparde 
---
 drivers/net/bnxt/bnxt_cpr.c| 9 +
 drivers/net/bnxt/bnxt_ethdev.c | 7 ++-
 2 files changed, 15 insertions(+), 1 deletion(-)

diff --git a/drivers/net/bnxt/bnxt_cpr.c b/drivers/net/bnxt/bnxt_cpr.c
index 6bb70d516e..a43b22a8f8 100644
--- a/drivers/net/bnxt/bnxt_cpr.c
+++ b/drivers/net/bnxt/bnxt_cpr.c
@@ -387,4 +387,13 @@ void bnxt_stop_rxtx(struct bnxt *bp)
 {
bp->eth_dev->rx_pkt_burst = &bnxt_dummy_recv_pkts;
bp->eth_dev->tx_pkt_burst = &bnxt_dummy_xmit_pkts;
+
+   rte_eth_fp_ops[bp->eth_dev->data->port_id].rx_pkt_burst =
+   bp->eth_dev->rx_pkt_burst;
+   rte_eth_fp_ops[bp->eth_dev->data->port_id].tx_pkt_burst =
+   bp->eth_dev->tx_pkt_burst;
+   rte_mb();
+
+   /* Allow time for threads to exit the real burst functions. */
+   rte_delay_ms(100);
 }
diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c
index 4413b5d72e..c1bdf9a921 100644
--- a/drivers/net/bnxt/bnxt_ethdev.c
+++ b/drivers/net/bnxt/bnxt_ethdev.c
@@ -4323,6 +4323,8 @@ static void bnxt_dev_recover(void *arg)
 
/* Clear Error flag so that device re-init should happen */
bp->flags &= ~BNXT_FLAG_FATAL_ERROR;
+   PMD_DRV_LOG(INFO, "Port: %u Starting recovery...\n",
+   bp->eth_dev->data->port_id);
 
rc = bnxt_check_fw_ready(bp);
if (rc)
@@ -4347,7 +4349,8 @@ static void bnxt_dev_recover(void *arg)
if (rc)
goto err_start;
 
-   PMD_DRV_LOG(INFO, "Recovered from FW reset\n");
+   PMD_DRV_LOG(INFO, "Port: %u Recovered from FW reset\n",
+   bp->eth_dev->data->port_id);
pthread_mutex_unlock(&bp->err_recovery_lock);
 
return;
@@ -4372,6 +4375,8 @@ void bnxt_dev_reset_and_resume(void *arg)
int rc;
 
bnxt_dev_cleanup(bp);
+   PMD_DRV_LOG(INFO, "Port: %u Finished bnxt_dev_cleanup\n",
+   bp->eth_dev->data->port_id);
 
bnxt_wait_for_device_shutdown(bp);
 
-- 
2.28.0.497.g54e85e7



Re: [dpdk-dev] Minutes of Technical Board Meeting, 2021-Oct-27

2021-11-18 Thread Jerin Jacob
On Fri, Nov 19, 2021 at 1:39 AM Honnappa Nagarahalli
 wrote:
>
> 
> > >
> > > > > > There was a comment to remove the TLV length. I will do that
> > > > > > next version with implementation.
> > > > > >
> > > > > > Identified the following set of work for this.
> > > > > >
> > > > > > 1) Common code at lib/dwa/
> > > > > > 2) Marvell DPU based driver at drivers/dwa/cnxk/
> > > > > > 3) Test application at app/test-dwa/
> > > > > > 4) It is possible to have an SW driver(To allow non-specialized
> > > > > > HW to use the
> > > > > > framework) for this by:
> > > > > > a) Emulate DWA HW as a separate DPDK process
> > > > > > b) Add drivers/dwa/sw/ and use memif driver so to create a
> > > > > > communication channel between emulated DWA HW process and
> > DPDK
> > > > application.
> > > > > Why use memif driver? Why not ring-pmd?
> > > >
> > > > Planning to emulation DWA accelerator functional model as a separate
> > > > DPDK process in SW case.
> > > You mean the primary and secondary processes correct?
> >
> > Primary and Primary. (DWA emulation as a separate primary process to mimic
> > the real-world scenario)
> >
> > >
> > > > Therefore memif is the ideal choice as it supports zero-copy of the
> > > > data as well.
> > > > https://doc.dpdk.org/guides/nics/memif.html
> > > Zero-copy in memif is nothing but exchanging pointers to shared data.
> > > The rings work across the primary and secondary processes and are always
> > zero-copy.
> > > We are doing some perf comparisons between memif and rings, will let you
> > know once we have the data.
> >
> > Ok.
> > I think between primary to primary memif will be required.
> Agree, memif is easier/required between primary to primary. But, using it 
> with zero-copy would need additional code on the application part in this 
> scenario. The memory being shared needs to be mapped in both the processes.

The existing memif driver does it internally for all memsegs [1]. Even
if it is not, it will be abstracted in profile and driver, to make the
application transparent on transport aspects.

[1]

static int
memif_regions_init(struct rte_eth_dev *dev)
{
struct pmd_internals *pmd = dev->data->dev_private;
int ret;

/*
 * Zero-copy exposes dpdk memory.
 * Each memseg list will be represented by memif region.
 * Zero-copy regions indexing: memseg list idx + 1,
 * as we already have region 0 reserved for descriptors.
 */
if (pmd->flags & ETH_MEMIF_FLAG_ZERO_COPY) {
/* create region idx 0 containing descriptors */
ret = memif_region_init_shm(dev, 0);
if (ret < 0)
return ret;
ret = rte_memseg_walk(memif_region_init_zc, (void
*)dev->process_private);
if (ret < 0)
return ret;
} else {
/* create one memory region contaning rings and buffers */
ret = memif_region_init_shm(dev, /* has buffers */ 1);
if (ret < 0)
return ret;
}

return 0;
}

>
> >
> > >
> > > >
> > > > >
> > > > > > c) Add drivers/dwa/sw/profiles//l3fwd - To implement l3fwd
> > > > > > profile using DPDK libraries for SW driver.
> > > > > >
> > > > > > I think, Item (4) aka SW drivers as useful(We don't need to
> > > > > > implement for all profiles, I think, just for  l3fwd it make
> > > > > > sense to add, to allow to use of the framework in just SW mode).
> > > > > > Is there any opinion on adding item (4) in DPDK? I saw mixed
> > > > > > opinions earlier on this. I would like to understand, Is there
> > > > > > any objection to doing
> > > > > > item(4) in DPDK as it needs a good amount of work and  I don't
> > > > > > want to do throw it away if the community doesn't like this.
> > > > > > Any opinion?
> > > > > >
> > > > > > [1]
> > > > > > http://mails.dpdk.org/archives/dev/2021-October/226070.html
> > > > > >
> > > > > 


[dpdk-dev] [PATCH] doc: fix memif driver acronyms

2021-11-18 Thread jerinj
From: Jerin Jacob 

The commit d250589d5702 ("net/memif: replace master/slave arguments")
replaced master/slave terms to server/client terms.
Fix the documentation to reflect the same.

Fixes: d250589d5702 ("net/memif: replace master/slave arguments")
Cc: sta...@dpdk.org

Signed-off-by: Jerin Jacob 
---
 doc/guides/nics/memif.rst | 12 ++--
 1 file changed, 6 insertions(+), 6 deletions(-)

diff --git a/doc/guides/nics/memif.rst b/doc/guides/nics/memif.rst
index d783f2d4a4..aca843640b 100644
--- a/doc/guides/nics/memif.rst
+++ b/doc/guides/nics/memif.rst
@@ -107,13 +107,13 @@ region n (no-zero-copy):
 
+---+-+
 | Rings | Buffers  
   |
 
+---+---+-+---+---+
-| S2M rings | M2S rings | packet buffer 0 | . | pb ((1 << 
pmd->run.log2_ring_size)*(s2m + m2s))-1 |
+| C2S rings | S2C rings | packet buffer 0 | . | pb ((1 << 
pmd->run.log2_ring_size)*(c2s + s2c))-1 |
 
+---+---+-+---+---+
 
-S2M OR M2S Rings:
+C2S OR S2C Rings:
 
 +++---+
-| ring 0 | ring 1 | ring num_s2m_rings - 1|
+| ring 0 | ring 1 | ring num_c2s_rings - 1|
 +++---+
 
 ring 0:
@@ -123,8 +123,8 @@ ring 0:
 +-+---+
 
 Descriptors are assigned packet buffers in order of rings creation. If we have 
one ring
-in each direction and ring size is 1024, then first 1024 buffers will belong 
to S2M ring and
-last 1024 will belong to M2S ring. In case of zero-copy, buffers are dequeued 
and
+in each direction and ring size is 1024, then first 1024 buffers will belong 
to C2S ring and
+last 1024 will belong to S2C ring. In case of zero-copy, buffers are dequeued 
and
 enqueued as needed.
 
 **Descriptor format**
@@ -193,7 +193,7 @@ region 0:
 +---+
 | Rings |
 +---+---+
-| S2M rings | M2S rings |
+| C2S rings | S2C rings |
 +---+---+
 
 region n:
-- 
2.34.0



[PATCH] net/bnxt: fail init when mbuf allocation fails

2021-11-18 Thread Ajit Khaparde
Fix driver init when Rx mbuf allocation fails.
If we continue to use the driver with whatever rings were
created successfully, it can cause unexpected behavior.

Signed-off-by: Ajit Khaparde 
Reviewed-by: Somnath Kotur 
Reviewed-by: Kalesh AP 
---
 drivers/net/bnxt/bnxt_hwrm.c | 41 ++--
 drivers/net/bnxt/bnxt_ring.c |  5 +++--
 drivers/net/bnxt/bnxt_rxr.c  |  8 +++
 3 files changed, 32 insertions(+), 22 deletions(-)

diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c
index 7f51c61097..f53f8632fe 100644
--- a/drivers/net/bnxt/bnxt_hwrm.c
+++ b/drivers/net/bnxt/bnxt_hwrm.c
@@ -2633,6 +2633,8 @@ bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
cpr = bp->rx_queues[i]->cp_ring;
if (BNXT_HAS_RING_GRPS(bp))
bp->grp_info[i].fw_stats_ctx = -1;
+   if (cpr == NULL)
+   continue;
rc = bnxt_hwrm_stat_ctx_free(bp, cpr);
if (rc)
return rc;
@@ -2640,6 +2642,8 @@ bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
 
for (i = 0; i < bp->tx_cp_nr_rings; i++) {
cpr = bp->tx_queues[i]->cp_ring;
+   if (cpr == NULL)
+   continue;
rc = bnxt_hwrm_stat_ctx_free(bp, cpr);
if (rc)
return rc;
@@ -2697,16 +2701,17 @@ void bnxt_free_cp_ring(struct bnxt *bp, struct 
bnxt_cp_ring_info *cpr)
 void bnxt_free_hwrm_rx_ring(struct bnxt *bp, int queue_index)
 {
struct bnxt_rx_queue *rxq = bp->rx_queues[queue_index];
-   struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
-   struct bnxt_ring *ring = rxr->rx_ring_struct;
-   struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
+   struct bnxt_rx_ring_info *rxr = rxq ? rxq->rx_ring : NULL;
+   struct bnxt_ring *ring = rxr ? rxr->rx_ring_struct : NULL;
+   struct bnxt_cp_ring_info *cpr = rxq ? rxq->cp_ring : NULL;
 
if (BNXT_HAS_RING_GRPS(bp))
bnxt_hwrm_ring_grp_free(bp, queue_index);
 
-   bnxt_hwrm_ring_free(bp, ring,
-   HWRM_RING_FREE_INPUT_RING_TYPE_RX,
-   cpr->cp_ring_struct->fw_ring_id);
+   if (ring != NULL && cpr != NULL)
+   bnxt_hwrm_ring_free(bp, ring,
+   HWRM_RING_FREE_INPUT_RING_TYPE_RX,
+   cpr->cp_ring_struct->fw_ring_id);
if (BNXT_HAS_RING_GRPS(bp))
bp->grp_info[queue_index].rx_fw_ring_id = INVALID_HW_RING_ID;
 
@@ -2715,22 +2720,26 @@ void bnxt_free_hwrm_rx_ring(struct bnxt *bp, int 
queue_index)
 * but we may have to deal with agg ring struct before the offload
 * flags are updated.
 */
-   if (!bnxt_need_agg_ring(bp->eth_dev) || rxr->ag_ring_struct == NULL)
+   if (!bnxt_need_agg_ring(bp->eth_dev) ||
+   (rxr && rxr->ag_ring_struct == NULL))
goto no_agg;
 
-   ring = rxr->ag_ring_struct;
-   bnxt_hwrm_ring_free(bp, ring,
-   BNXT_CHIP_P5(bp) ?
-   HWRM_RING_FREE_INPUT_RING_TYPE_RX_AGG :
-   HWRM_RING_FREE_INPUT_RING_TYPE_RX,
-   cpr->cp_ring_struct->fw_ring_id);
+   ring = rxr ? rxr->ag_ring_struct : NULL;
+   if (ring != NULL && cpr != NULL) {
+   bnxt_hwrm_ring_free(bp, ring,
+   BNXT_CHIP_P5(bp) ?
+   HWRM_RING_FREE_INPUT_RING_TYPE_RX_AGG :
+   HWRM_RING_FREE_INPUT_RING_TYPE_RX,
+   cpr->cp_ring_struct->fw_ring_id);
+   }
if (BNXT_HAS_RING_GRPS(bp))
bp->grp_info[queue_index].ag_fw_ring_id = INVALID_HW_RING_ID;
 
 no_agg:
-   bnxt_hwrm_stat_ctx_free(bp, cpr);
-
-   bnxt_free_cp_ring(bp, cpr);
+   if (cpr != NULL) {
+   bnxt_hwrm_stat_ctx_free(bp, cpr);
+   bnxt_free_cp_ring(bp, cpr);
+   }
 
if (BNXT_HAS_RING_GRPS(bp))
bp->grp_info[queue_index].cp_fw_ring_id = INVALID_HW_RING_ID;
diff --git a/drivers/net/bnxt/bnxt_ring.c b/drivers/net/bnxt/bnxt_ring.c
index 7940d489a1..dc437f314e 100644
--- a/drivers/net/bnxt/bnxt_ring.c
+++ b/drivers/net/bnxt/bnxt_ring.c
@@ -648,8 +648,9 @@ int bnxt_alloc_hwrm_rx_ring(struct bnxt *bp, int 
queue_index)
 
if (rxq->rx_started) {
if (bnxt_init_one_rx_ring(rxq)) {
-   PMD_DRV_LOG(ERR, "bnxt_init_one_rx_ring failed!\n");
-   bnxt_rx_queue_release_op(bp->eth_dev, queue_index);
+   PMD_DRV_LOG(ERR,
+   "ring%d bnxt_init_one_rx_ring failed!\n",
+   queue_index);
rc = -ENOMEM;
goto err_out;
}
diff --git a/drivers/net/bnxt/bnxt_rxr.c b/drivers/net/bnxt/

Re: [PATCH] net/bnxt: fix crash caused by error recovery

2021-11-18 Thread Ajit Khaparde
On Thu, Nov 18, 2021 at 7:57 PM Somnath Kotur
 wrote:
>
> bnxt_stop_rxtx() does not stop data path processing as intended
> as it does not update the recently introduced fast-path pointers
> '(struct rte_eth_fp_ops)->rx_pkt_burst'. Since both the burst routines
> only use the fast-path pointer, the real burst routines get invoked
> instead of the dummy ones set by bnxt_stop_rxtx() leading to crashes
> in the data path (e.g. dereferencing freed structures)
>
> Fix the segfault by updating the fast-path pointer as well
>
> Fixes: c87d435a4d79 ("ethdev: copy fast-path API into separate structure")
>
> Signed-off-by: Somnath Kotur 
> Reviewed-by: Ajit Khaparde 
Patch applied to dpdk-next-net-brcm.

> ---
>  drivers/net/bnxt/bnxt_cpr.c| 9 +
>  drivers/net/bnxt/bnxt_ethdev.c | 7 ++-
>  2 files changed, 15 insertions(+), 1 deletion(-)
>
> diff --git a/drivers/net/bnxt/bnxt_cpr.c b/drivers/net/bnxt/bnxt_cpr.c
> index 6bb70d516e..a43b22a8f8 100644
> --- a/drivers/net/bnxt/bnxt_cpr.c
> +++ b/drivers/net/bnxt/bnxt_cpr.c
> @@ -387,4 +387,13 @@ void bnxt_stop_rxtx(struct bnxt *bp)
>  {
> bp->eth_dev->rx_pkt_burst = &bnxt_dummy_recv_pkts;
> bp->eth_dev->tx_pkt_burst = &bnxt_dummy_xmit_pkts;
> +
> +   rte_eth_fp_ops[bp->eth_dev->data->port_id].rx_pkt_burst =
> +   bp->eth_dev->rx_pkt_burst;
> +   rte_eth_fp_ops[bp->eth_dev->data->port_id].tx_pkt_burst =
> +   bp->eth_dev->tx_pkt_burst;
> +   rte_mb();
> +
> +   /* Allow time for threads to exit the real burst functions. */
> +   rte_delay_ms(100);
>  }
> diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c
> index 4413b5d72e..c1bdf9a921 100644
> --- a/drivers/net/bnxt/bnxt_ethdev.c
> +++ b/drivers/net/bnxt/bnxt_ethdev.c
> @@ -4323,6 +4323,8 @@ static void bnxt_dev_recover(void *arg)
>
> /* Clear Error flag so that device re-init should happen */
> bp->flags &= ~BNXT_FLAG_FATAL_ERROR;
> +   PMD_DRV_LOG(INFO, "Port: %u Starting recovery...\n",
> +   bp->eth_dev->data->port_id);
>
> rc = bnxt_check_fw_ready(bp);
> if (rc)
> @@ -4347,7 +4349,8 @@ static void bnxt_dev_recover(void *arg)
> if (rc)
> goto err_start;
>
> -   PMD_DRV_LOG(INFO, "Recovered from FW reset\n");
> +   PMD_DRV_LOG(INFO, "Port: %u Recovered from FW reset\n",
> +   bp->eth_dev->data->port_id);
> pthread_mutex_unlock(&bp->err_recovery_lock);
>
> return;
> @@ -4372,6 +4375,8 @@ void bnxt_dev_reset_and_resume(void *arg)
> int rc;
>
> bnxt_dev_cleanup(bp);
> +   PMD_DRV_LOG(INFO, "Port: %u Finished bnxt_dev_cleanup\n",
> +   bp->eth_dev->data->port_id);
>
> bnxt_wait_for_device_shutdown(bp);
>
> --
> 2.28.0.497.g54e85e7
>


Re: [PATCH] net/bnxt: fail init when mbuf allocation fails

2021-11-18 Thread Ajit Khaparde
On Thu, Nov 18, 2021 at 9:40 PM Ajit Khaparde
 wrote:
>
> Fix driver init when Rx mbuf allocation fails.
> If we continue to use the driver with whatever rings were
> created successfully, it can cause unexpected behavior.
>
> Signed-off-by: Ajit Khaparde 
> Reviewed-by: Somnath Kotur 
> Reviewed-by: Kalesh AP 
Patch applied to dpdk-next-net-brcm.


> ---
>  drivers/net/bnxt/bnxt_hwrm.c | 41 ++--
>  drivers/net/bnxt/bnxt_ring.c |  5 +++--
>  drivers/net/bnxt/bnxt_rxr.c  |  8 +++
>  3 files changed, 32 insertions(+), 22 deletions(-)
>
> diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c
> index 7f51c61097..f53f8632fe 100644
> --- a/drivers/net/bnxt/bnxt_hwrm.c
> +++ b/drivers/net/bnxt/bnxt_hwrm.c
> @@ -2633,6 +2633,8 @@ bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
> cpr = bp->rx_queues[i]->cp_ring;
> if (BNXT_HAS_RING_GRPS(bp))
> bp->grp_info[i].fw_stats_ctx = -1;
> +   if (cpr == NULL)
> +   continue;
> rc = bnxt_hwrm_stat_ctx_free(bp, cpr);
> if (rc)
> return rc;
> @@ -2640,6 +2642,8 @@ bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
>
> for (i = 0; i < bp->tx_cp_nr_rings; i++) {
> cpr = bp->tx_queues[i]->cp_ring;
> +   if (cpr == NULL)
> +   continue;
> rc = bnxt_hwrm_stat_ctx_free(bp, cpr);
> if (rc)
> return rc;
> @@ -2697,16 +2701,17 @@ void bnxt_free_cp_ring(struct bnxt *bp, struct 
> bnxt_cp_ring_info *cpr)
>  void bnxt_free_hwrm_rx_ring(struct bnxt *bp, int queue_index)
>  {
> struct bnxt_rx_queue *rxq = bp->rx_queues[queue_index];
> -   struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
> -   struct bnxt_ring *ring = rxr->rx_ring_struct;
> -   struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
> +   struct bnxt_rx_ring_info *rxr = rxq ? rxq->rx_ring : NULL;
> +   struct bnxt_ring *ring = rxr ? rxr->rx_ring_struct : NULL;
> +   struct bnxt_cp_ring_info *cpr = rxq ? rxq->cp_ring : NULL;
>
> if (BNXT_HAS_RING_GRPS(bp))
> bnxt_hwrm_ring_grp_free(bp, queue_index);
>
> -   bnxt_hwrm_ring_free(bp, ring,
> -   HWRM_RING_FREE_INPUT_RING_TYPE_RX,
> -   cpr->cp_ring_struct->fw_ring_id);
> +   if (ring != NULL && cpr != NULL)
> +   bnxt_hwrm_ring_free(bp, ring,
> +   HWRM_RING_FREE_INPUT_RING_TYPE_RX,
> +   cpr->cp_ring_struct->fw_ring_id);
> if (BNXT_HAS_RING_GRPS(bp))
> bp->grp_info[queue_index].rx_fw_ring_id = INVALID_HW_RING_ID;
>
> @@ -2715,22 +2720,26 @@ void bnxt_free_hwrm_rx_ring(struct bnxt *bp, int 
> queue_index)
>  * but we may have to deal with agg ring struct before the offload
>  * flags are updated.
>  */
> -   if (!bnxt_need_agg_ring(bp->eth_dev) || rxr->ag_ring_struct == NULL)
> +   if (!bnxt_need_agg_ring(bp->eth_dev) ||
> +   (rxr && rxr->ag_ring_struct == NULL))
> goto no_agg;
>
> -   ring = rxr->ag_ring_struct;
> -   bnxt_hwrm_ring_free(bp, ring,
> -   BNXT_CHIP_P5(bp) ?
> -   HWRM_RING_FREE_INPUT_RING_TYPE_RX_AGG :
> -   HWRM_RING_FREE_INPUT_RING_TYPE_RX,
> -   cpr->cp_ring_struct->fw_ring_id);
> +   ring = rxr ? rxr->ag_ring_struct : NULL;
> +   if (ring != NULL && cpr != NULL) {
> +   bnxt_hwrm_ring_free(bp, ring,
> +   BNXT_CHIP_P5(bp) ?
> +   HWRM_RING_FREE_INPUT_RING_TYPE_RX_AGG :
> +   HWRM_RING_FREE_INPUT_RING_TYPE_RX,
> +   cpr->cp_ring_struct->fw_ring_id);
> +   }
> if (BNXT_HAS_RING_GRPS(bp))
> bp->grp_info[queue_index].ag_fw_ring_id = INVALID_HW_RING_ID;
>
>  no_agg:
> -   bnxt_hwrm_stat_ctx_free(bp, cpr);
> -
> -   bnxt_free_cp_ring(bp, cpr);
> +   if (cpr != NULL) {
> +   bnxt_hwrm_stat_ctx_free(bp, cpr);
> +   bnxt_free_cp_ring(bp, cpr);
> +   }
>
> if (BNXT_HAS_RING_GRPS(bp))
> bp->grp_info[queue_index].cp_fw_ring_id = INVALID_HW_RING_ID;
> diff --git a/drivers/net/bnxt/bnxt_ring.c b/drivers/net/bnxt/bnxt_ring.c
> index 7940d489a1..dc437f314e 100644
> --- a/drivers/net/bnxt/bnxt_ring.c
> +++ b/drivers/net/bnxt/bnxt_ring.c
> @@ -648,8 +648,9 @@ int bnxt_alloc_hwrm_rx_ring(struct bnxt *bp, int 
> queue_index)
>
> if (rxq->rx_started) {
> if (bnxt_init_one_rx_ring(rxq)) {
> -   PMD_DRV_LOG(ERR, "bnxt_init_one_rx_ring failed!\n");
> -   bnxt_rx_queue_release_op(bp->eth_dev, queue_index);
> +   PM

RE: [PATCH v1] net/vhost: add queue status check

2021-11-18 Thread Li, Miao
Hi

> -Original Message-
> From: Maxime Coquelin 
> Sent: Tuesday, November 16, 2021 5:36 PM
> To: Li, Miao ; dev@dpdk.org
> Cc: Xia, Chenbo 
> Subject: Re: [PATCH v1] net/vhost: add queue status check
> 
> 
> 
> On 11/16/21 10:34, Maxime Coquelin wrote:
> >
> >
> > On 11/16/21 17:44, Miao Li wrote:
> >> This patch adds queue status check to make sure that vhost monitor
> >> address will not be got until the link between backend and frontend
> > s/got/gone/?
> >> up and the packets are allowed to be queued.
> >
> > It needs a fixes tag.

If we don't add this check, rte_vhost_get_monitor_addr will return -EINVAL when 
check if dev is null. But before return, get_device() will be called and print 
error log "device not found". So we want to add this check and return -EINVAL 
before call rte_vhost_get_monitor_addr. If we don't add this check, the vhost 
monitor address will also not be got but vhost will print error log 
continuously. It have no function impact, so I think it is not a fix. 

> >
> >> Signed-off-by: Miao Li 
> >> ---
> >>   drivers/net/vhost/rte_eth_vhost.c | 2 ++
> >>   1 file changed, 2 insertions(+)
> >>
> >> diff --git a/drivers/net/vhost/rte_eth_vhost.c
> >> b/drivers/net/vhost/rte_eth_vhost.c
> >> index 070f0e6dfd..9d600054d8 100644
> >> --- a/drivers/net/vhost/rte_eth_vhost.c
> >> +++ b/drivers/net/vhost/rte_eth_vhost.c
> >> @@ -1415,6 +1415,8 @@ vhost_get_monitor_addr(void *rx_queue, struct
> >> rte_power_monitor_cond *pmc)
> >>   int ret;
> >>   if (vq == NULL)
> >>   return -EINVAL;
> >> +    if (unlikely(rte_atomic32_read(&vq->allow_queuing) == 0))
> >> +    return -EINVAL;
> 
> Also, EINVAL might not be the right return value here.

I don't know which return value will be better. Do you have any suggestions? 
Thanks!

> 
> > How does it help?
> > What does prevent allow_queuing to become zero between the check and the
> > call to rte_vhost_get_monitor_addr?

This check will prevent vhost to print error log continuously.

> >
> > I think you need to implement the same logic as in eth_vhost_rx(), i.e.
> > check allow_queueing, set while_queueing, check allow_queueing, do your
> > stuff and clear while_queuing.

I think the while_queuing is unnecessary because we only read the value in vq 
and this API will only be called as a callback of RX.

Thanks,
Miao

> >
> >>   ret = rte_vhost_get_monitor_addr(vq->vid, vq->virtqueue_id,
> >>   &vhost_pmc);
> >>   if (ret < 0)
> >>
> >
> > Maxime



Re: [dpdk-dev] [Bug 826] red_autotest random failures

2021-11-18 Thread Thomas Monjalon
18/11/2021 23:10, Liguzinski, WojciechX:
> Hi,
> 
> I was trying to reproduce this test failure, but for me RED tests are 
> passing. 
> I was running the exact test command like the one described in Bug 826 - 
> 'red_autotest' on the current main branch.

The test is not always failing.
There are some failing conditions, please find them.
I think you should try in a container with more limited resources.