Re: [PATCH v2] ethdev: add Linux ethtool link mode conversion

2024-03-03 Thread Thomas Monjalon
01/03/2024 19:00, Stephen Hemminger:
> On Fri, 01 Mar 2024 16:20:56 +0100
> Thomas Monjalon  wrote:
> 
> > > > 
> > > > The use case is to get capabilities from the kernel driver via ethtool 
> > > > ioctl.
> > > >   
> > > 
> > > Sure, as it is adding kernel ethtool conversion, DPDK driver will get
> > > link from kernel driver, thanks for clarification.  
> > 
> > Yes the PMD uses ethtool API to get device capabilies.
> 
> Is this the old ioctl interface, or the new (and preferred) ethtool over
> netlink API?

mlx5 is using ioctl commands ETHTOOL_GSET and ETHTOOL_GLINKSETTINGS





[PATCH v2] eal/x86: improve rte_memcpy const size 16 performance

2024-03-03 Thread Morten Brørup
When the rte_memcpy() size is 16, the same 16 bytes are copied twice.
In the case where the size is known to be 16 at build tine, omit the
duplicate copy.

Reduced the amount of effectively copy-pasted code by using #ifdef
inside functions instead of outside functions.

Suggested-by: Stephen Hemminger 
Signed-off-by: Morten Brørup 
---
v2:
* For GCC, version 11 is required for proper AVX handling;
  if older GCC version, treat AVX as SSE.
  Clang does not have this issue.
  Note: Original code always treated AVX as SSE, regardless of compiler.
* Do not add copyright. (Stephen Hemminger)
---
 lib/eal/x86/include/rte_memcpy.h | 231 ---
 1 file changed, 56 insertions(+), 175 deletions(-)

diff --git a/lib/eal/x86/include/rte_memcpy.h b/lib/eal/x86/include/rte_memcpy.h
index 72a92290e0..d1df841f5e 100644
--- a/lib/eal/x86/include/rte_memcpy.h
+++ b/lib/eal/x86/include/rte_memcpy.h
@@ -91,14 +91,6 @@ rte_mov15_or_less(void *dst, const void *src, size_t n)
return ret;
 }
 
-#if defined __AVX512F__ && defined RTE_MEMCPY_AVX512
-
-#define ALIGNMENT_MASK 0x3F
-
-/**
- * AVX512 implementation below
- */
-
 /**
  * Copy 16 bytes from one location to another,
  * locations should not overlap.
@@ -119,10 +111,16 @@ rte_mov16(uint8_t *dst, const uint8_t *src)
 static __rte_always_inline void
 rte_mov32(uint8_t *dst, const uint8_t *src)
 {
+#if (defined __AVX512F__ && defined RTE_MEMCPY_AVX512) || defined __AVX2__ || \
+   (defined __AVX__ && !(defined(RTE_TOOLCHAIN_GCC) && 
(GCC_VERSION < 11)))
__m256i ymm0;
 
ymm0 = _mm256_loadu_si256((const __m256i *)src);
_mm256_storeu_si256((__m256i *)dst, ymm0);
+#else /* SSE implementation */
+   rte_mov16((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);
+   rte_mov16((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16);
+#endif
 }
 
 /**
@@ -132,10 +130,15 @@ rte_mov32(uint8_t *dst, const uint8_t *src)
 static __rte_always_inline void
 rte_mov64(uint8_t *dst, const uint8_t *src)
 {
+#if defined __AVX512F__ && defined RTE_MEMCPY_AVX512
__m512i zmm0;
 
zmm0 = _mm512_loadu_si512((const void *)src);
_mm512_storeu_si512((void *)dst, zmm0);
+#else /* AVX2, AVX & SSE implementation */
+   rte_mov32((uint8_t *)dst + 0 * 32, (const uint8_t *)src + 0 * 32);
+   rte_mov32((uint8_t *)dst + 1 * 32, (const uint8_t *)src + 1 * 32);
+#endif
 }
 
 /**
@@ -156,12 +159,18 @@ rte_mov128(uint8_t *dst, const uint8_t *src)
 static __rte_always_inline void
 rte_mov256(uint8_t *dst, const uint8_t *src)
 {
-   rte_mov64(dst + 0 * 64, src + 0 * 64);
-   rte_mov64(dst + 1 * 64, src + 1 * 64);
-   rte_mov64(dst + 2 * 64, src + 2 * 64);
-   rte_mov64(dst + 3 * 64, src + 3 * 64);
+   rte_mov128(dst + 0 * 128, src + 0 * 128);
+   rte_mov128(dst + 1 * 128, src + 1 * 128);
 }
 
+#if defined __AVX512F__ && defined RTE_MEMCPY_AVX512
+
+/**
+ * AVX512 implementation below
+ */
+
+#define ALIGNMENT_MASK 0x3F
+
 /**
  * Copy 128-byte blocks from one location to another,
  * locations should not overlap.
@@ -231,12 +240,22 @@ rte_memcpy_generic(void *dst, const void *src, size_t n)
/**
 * Fast way when copy size doesn't exceed 512 bytes
 */
+   if (__builtin_constant_p(n) && n == 32) {
+   rte_mov32((uint8_t *)dst, (const uint8_t *)src);
+   return ret;
+   }
if (n <= 32) {
rte_mov16((uint8_t *)dst, (const uint8_t *)src);
+   if (__builtin_constant_p(n) && n == 16)
+   return ret; /* avoid (harmless) duplicate copy */
rte_mov16((uint8_t *)dst - 16 + n,
  (const uint8_t *)src - 16 + n);
return ret;
}
+   if (__builtin_constant_p(n) && n == 64) {
+   rte_mov64((uint8_t *)dst, (const uint8_t *)src);
+   return ret;
+   }
if (n <= 64) {
rte_mov32((uint8_t *)dst, (const uint8_t *)src);
rte_mov32((uint8_t *)dst - 32 + n,
@@ -313,80 +332,14 @@ rte_memcpy_generic(void *dst, const void *src, size_t n)
goto COPY_BLOCK_128_BACK63;
 }
 
-#elif defined __AVX2__
-
-#define ALIGNMENT_MASK 0x1F
+#elif defined __AVX2__ || \
+   (defined __AVX__ && !(defined(RTE_TOOLCHAIN_GCC) && 
(GCC_VERSION < 11)))
 
 /**
- * AVX2 implementation below
+ * AVX2 (and AVX, unless too old GCC version) implementation below
  */
 
-/**
- * Copy 16 bytes from one location to another,
- * locations should not overlap.
- */
-static __rte_always_inline void
-rte_mov16(uint8_t *dst, const uint8_t *src)
-{
-   __m128i xmm0;
-
-   xmm0 = _mm_loadu_si128((const __m128i *)(const void *)src);
-   _mm_storeu_si128((__m128i *)(void *)dst, xmm0);
-}
-
-/**
- * Copy 32 bytes from one location to another,
- * locations should not overlap.
- */
-static __rte_always_inline void
-rte_mov32(uint8_t *dst, const uint8_t *src)
-{
-   __m25

[PATCH v3] ethdev: add Linux ethtool link mode conversion

2024-03-03 Thread Thomas Monjalon
Speed capabilities of a NIC may be discovered through its Linux
kernel driver. It is especially useful for bifurcated drivers,
so they don't have to duplicate the same logic in the DPDK driver.

Parsing ethtool speed capabilities is made easy thanks to
the functions added in ethdev for internal usage only.
Of course these functions work only on Linux,
so they are not compiled in other environments.

In order to ease parsing, the ethtool macro names are parsed
externally in a shell command which generates a C array
included in this patch.
It also avoids to depend on a kernel version.
This C array should be updated in future to get latest ethtool bits.
Note it is easier to update this array than adding new cases
in a parsing code.

The types in the functions are following the ethtool type:
uint32_t for bitmaps, and int8_t for the number of 32-bitmaps.

Signed-off-by: Thomas Monjalon 
---

A follow-up patch will be sent to use these functions in mlx5.
I suspect mana could use this parsing as well.

v2: fix style and Windows build
v3: make table const
---
 lib/ethdev/ethdev_linux_ethtool.c | 161 ++
 lib/ethdev/ethdev_linux_ethtool.h |  41 
 lib/ethdev/meson.build|   9 ++
 lib/ethdev/version.map|   3 +
 4 files changed, 214 insertions(+)
 create mode 100644 lib/ethdev/ethdev_linux_ethtool.c
 create mode 100644 lib/ethdev/ethdev_linux_ethtool.h

diff --git a/lib/ethdev/ethdev_linux_ethtool.c 
b/lib/ethdev/ethdev_linux_ethtool.c
new file mode 100644
index 00..e792204b01
--- /dev/null
+++ b/lib/ethdev/ethdev_linux_ethtool.c
@@ -0,0 +1,161 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2024 NVIDIA Corporation & Affiliates
+ */
+
+#include 
+
+#include "rte_ethdev.h"
+#include "ethdev_linux_ethtool.h"
+
+/* Link modes sorted with index as defined in ethtool.
+ * Values are speed in Mbps with LSB indicating duplex.
+ *
+ * The ethtool bits definition should not change as it is a kernel API.
+ * Using raw numbers directly avoids checking API availability
+ * and allows to compile with new bits included even on an old kernel.
+ *
+ * The array below is built from bit definitions with this shell command:
+ *   sed -rn 's;.*(ETHTOOL_LINK_MODE_)([0-9]+)([0-9a-zA-Z_]*).*= *([0-9]*).*;'\
+ *   '[\4] = \2, /\* \1\2\3 *\/;p' /usr/include/linux/ethtool.h |
+ *   awk '/_Half_/{$3=$3+1","}1'
+ */
+static const uint32_t link_modes[] = {
+ [0] =  11, /* ETHTOOL_LINK_MODE_10baseT_Half_BIT */
+ [1] =  10, /* ETHTOOL_LINK_MODE_10baseT_Full_BIT */
+ [2] = 101, /* ETHTOOL_LINK_MODE_100baseT_Half_BIT */
+ [3] = 100, /* ETHTOOL_LINK_MODE_100baseT_Full_BIT */
+ [4] =1001, /* ETHTOOL_LINK_MODE_1000baseT_Half_BIT */
+ [5] =1000, /* ETHTOOL_LINK_MODE_1000baseT_Full_BIT */
+[12] =   1, /* ETHTOOL_LINK_MODE_1baseT_Full_BIT */
+[15] =2500, /* ETHTOOL_LINK_MODE_2500baseX_Full_BIT */
+[17] =1000, /* ETHTOOL_LINK_MODE_1000baseKX_Full_BIT */
+[18] =   1, /* ETHTOOL_LINK_MODE_1baseKX4_Full_BIT */
+[19] =   1, /* ETHTOOL_LINK_MODE_1baseKR_Full_BIT */
+[20] =   1, /* ETHTOOL_LINK_MODE_1baseR_FEC_BIT */
+[21] =   2, /* ETHTOOL_LINK_MODE_2baseMLD2_Full_BIT */
+[22] =   2, /* ETHTOOL_LINK_MODE_2baseKR2_Full_BIT */
+[23] =   4, /* ETHTOOL_LINK_MODE_4baseKR4_Full_BIT */
+[24] =   4, /* ETHTOOL_LINK_MODE_4baseCR4_Full_BIT */
+[25] =   4, /* ETHTOOL_LINK_MODE_4baseSR4_Full_BIT */
+[26] =   4, /* ETHTOOL_LINK_MODE_4baseLR4_Full_BIT */
+[27] =   56000, /* ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT */
+[28] =   56000, /* ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT */
+[29] =   56000, /* ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT */
+[30] =   56000, /* ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT */
+[31] =   25000, /* ETHTOOL_LINK_MODE_25000baseCR_Full_BIT */
+[32] =   25000, /* ETHTOOL_LINK_MODE_25000baseKR_Full_BIT */
+[33] =   25000, /* ETHTOOL_LINK_MODE_25000baseSR_Full_BIT */
+[34] =   5, /* ETHTOOL_LINK_MODE_5baseCR2_Full_BIT */
+[35] =   5, /* ETHTOOL_LINK_MODE_5baseKR2_Full_BIT */
+[36] =  10, /* ETHTOOL_LINK_MODE_10baseKR4_Full_BIT */
+[37] =  10, /* ETHTOOL_LINK_MODE_10baseSR4_Full_BIT */
+[38] =  10, /* ETHTOOL_LINK_MODE_10baseCR4_Full_BIT */
+[39] =  10, /* ETHTOOL_LINK_MODE_10baseLR4_ER4_Full_BIT */
+[40] =   5, /* ETHTOOL_LINK_MODE_5baseSR2_Full_BIT */
+[41] =1000, /* ETHTOOL_LINK_MODE_1000baseX_Full_BIT */
+[42] =   1, /* ETHTOOL_LINK_MODE_1baseCR_Full_BIT */
+[43] =   1, /* ETHTOOL_LINK_MODE_1baseSR_Full_BIT */
+[44] =   1, /* ETHTOOL_LINK_MODE_1baseLR_Full_BIT */
+[45] =   1, /* ETH

RE: [PATCH] eal/x86: improve rte_memcpy const size 16 performance

2024-03-03 Thread Morten Brørup
> From: Stephen Hemminger [mailto:step...@networkplumber.org]
> Sent: Sunday, 3 March 2024 06.58
> 
> On Sat, 2 Mar 2024 21:40:03 -0800
> Stephen Hemminger  wrote:
> 
> > On Sun,  3 Mar 2024 00:48:12 +0100
> > Morten Brørup  wrote:
> >
> > > When the rte_memcpy() size is 16, the same 16 bytes are copied
> twice.
> > > In the case where the size is knownto be 16 at build tine, omit the
> > > duplicate copy.
> > >
> > > Reduced the amount of effectively copy-pasted code by using #ifdef
> > > inside functions instead of outside functions.
> > >
> > > Suggested-by: Stephen Hemminger 
> > > Signed-off-by: Morten Brørup 
> > > ---
> >
> > Looks good, let me see how it looks in goldbolt vs Gcc.
> >
> > One other issue is that for the non-constant case, rte_memcpy has an
> excessively
> > large inline code footprint. That is one of the reasons Gcc doesn't
> always
> > inline.  For > 128 bytes, it really should be a function.

Yes, the code footprint is significant for the non-constant case.
I suppose Intel considered the cost and benefits when they developed this.
Or perhaps they just wanted a showcase for their new and shiny vector 
instructions. ;-)

Inlining might provide significant branch prediction benefits in cases where 
the size is not build-time constant, but run-time constant.

> 
> For size of 4,6,8,16, 32, 64, up to 128 Gcc inline and rte_memcpy match.
> 
> For size 128. It looks gcc is simpler.
> 
> rte_copy_addr:
> vmovdqu ymm0, YMMWORD PTR [rsi]
> vextracti128XMMWORD PTR [rdi+16], ymm0, 0x1
> vmovdqu XMMWORD PTR [rdi], xmm0
> vmovdqu ymm0, YMMWORD PTR [rsi+32]
> vextracti128XMMWORD PTR [rdi+48], ymm0, 0x1
> vmovdqu XMMWORD PTR [rdi+32], xmm0
> vmovdqu ymm0, YMMWORD PTR [rsi+64]
> vextracti128XMMWORD PTR [rdi+80], ymm0, 0x1
> vmovdqu XMMWORD PTR [rdi+64], xmm0
> vmovdqu ymm0, YMMWORD PTR [rsi+96]
> vextracti128XMMWORD PTR [rdi+112], ymm0, 0x1
> vmovdqu XMMWORD PTR [rdi+96], xmm0
> vzeroupper
> ret

Interesting. Playing around with Godbolt revealed that GCC version < 11 creates 
the above from rte_memcpy, whereas GCC version >= 11 does it correctly. Clang 
doesn't have this issue.
I guess that's why the original code treated AVX as SSE.
Fixed in v2.

> copy_addr:
> vmovdqu ymm0, YMMWORD PTR [rsi]
> vmovdqu YMMWORD PTR [rdi], ymm0
> vmovdqu ymm1, YMMWORD PTR [rsi+32]
> vmovdqu YMMWORD PTR [rdi+32], ymm1
> vmovdqu ymm2, YMMWORD PTR [rsi+64]
> vmovdqu YMMWORD PTR [rdi+64], ymm2
> vmovdqu ymm3, YMMWORD PTR [rsi+96]
> vmovdqu YMMWORD PTR [rdi+96], ymm3
> vzeroupper
> ret


RE: [PATCH v2 00/11] net/mlx5: flow insertion performance improvements

2024-03-03 Thread Raslan Darawsheh
Hi.

> -Original Message-
> From: Dariusz Sosnowski 
> Sent: Thursday, February 29, 2024 1:52 PM
> To: Slava Ovsiienko ; Ori Kam ;
> Suanming Mou ; Matan Azrad
> 
> Cc: dev@dpdk.org; Raslan Darawsheh ; Bing Zhao
> 
> Subject: [PATCH v2 00/11] net/mlx5: flow insertion performance
> improvements
> 
> Goal of this patchset is to improve the throughput of flow insertion and
> deletion in mlx5 PMD when HW Steering flow engine is used.
> 
> - Patch 1 - Use preallocated per-queue, per-actions template buffer
>   for storing translated flow actions, instead of allocating and
>   filling it on demand, on each flow operation.
> - Patches 2-4 - Make resource index allocation optional. This allocation
>   will be skipped when it is not required by the created template table.
> - Patches 5-7 - Reduce memory footprint of the internal flow queue.
> - Patch 8 - Remove indirection between flow job and flow itself,
>   by using flow as an operation container.
> - Patches 9-10 - Reduce memory footpring of flow struct by moving
>   rarely used flow fields outside of the main flow struct.
>   These fields will accesses only when needed.
>   Also remove unneeded `zmalloc` usage.
> - Patch 11 - Remove unneeded device status check in flow create.
> 
> In general all of these changes result in the following improvements (all
> numbers are averaged Kflows/sec):
> 
> |  | Insertion) |   +%   | Deletion |   +%  |
> |--|:--:|:--:|::|:-:|
> | baseline |   6338.7   ||  9739.6  |   |
> | improvements |   6978.8   | +10.1% |  10432.4 | +7.1% |
> 
> The basic benchmark was run on ConnectX-6 Dx (22.40.1000), on the system
> with Intel Xeon Platinum 8380 CPU.
> 
> v2:
> 
> - Rebased.
> - Applied Acked-by tags from previous version.
> 
> Bing Zhao (2):
>   net/mlx5: skip the unneeded resource index allocation
>   net/mlx5: remove unneeded device status checking
> 
> Dariusz Sosnowski (7):
>   net/mlx5: allocate local DR rule action buffers
>   net/mlx5: remove action params from job
>   net/mlx5: remove flow pattern from job
>   net/mlx5: remove updated flow from job
>   net/mlx5: use flow as operation container
>   net/mlx5: move rarely used flow fields outside
>   net/mlx5: reuse flow fields
> 
> Erez Shitrit (2):
>   net/mlx5/hws: add check for matcher rule update support
>   net/mlx5/hws: add check if matcher contains complex rules
> 
>  drivers/net/mlx5/hws/mlx5dr.h |  16 +
>  drivers/net/mlx5/hws/mlx5dr_action.c  |   6 +
>  drivers/net/mlx5/hws/mlx5dr_action.h  |   2 +
>  drivers/net/mlx5/hws/mlx5dr_matcher.c |  29 +
>  drivers/net/mlx5/mlx5.h   |  29 +-
>  drivers/net/mlx5/mlx5_flow.h  | 128 -
>  drivers/net/mlx5/mlx5_flow_hw.c   | 794 --
>  7 files changed, 666 insertions(+), 338 deletions(-)
> 
> --
> 2.39.2

Series applied to next-net-mlx,
Kindest regards,
Raslan Darawsheh


RE: [PATCH v2] net/mlx5: add HWS support for matching ingress metadata

2024-03-03 Thread Raslan Darawsheh
Hi,

> -Original Message-
> From: Michael Baum 
> Sent: Thursday, February 29, 2024 3:49 PM
> To: dev@dpdk.org
> Cc: Matan Azrad ; Dariusz Sosnowski
> ; Raslan Darawsheh ; Slava
> Ovsiienko ; Ori Kam ;
> Suanming Mou 
> Subject: [PATCH v2] net/mlx5: add HWS support for matching ingress
> metadata
> 
> Add support for matching metadata in HWS ingress rules.
> It using REG_B matching which is supported for each device supports HWS.
> 
> Signed-off-by: Michael Baum 
> ---
> 
> v2:
>  - Rebase.
>  - Fix compilation issue.
>  - Update documentation.

Patch applied to next-net-mlx,
Kindest regards
Raslan Darawsheh



Re: [PATCH v5 21/23] net/cnxk: generalise flow operation APIs

2024-03-03 Thread Jerin Jacob
On Sat, Mar 2, 2024 at 12:55 AM Harman Kalra  wrote:
>
> Flow operations can be performed on cnxk ports as well as representor
> ports. Since representor ports are not cnxk ports but have eswitch as
> base device underneath, special handling is required to align with base
> infra. Introducing a flag to generic flow APIs to discriminate if the
> operation request made on normal or representor ports.
>
> Signed-off-by: Harman Kalra 

Please fix ./devtools/check-doc-vs-code.sh
rte_flow doc out of sync for cnxk
item port_representor
action vxlan_encap
./devtools/check-doc-vs-code.sh failed


Re: [dpdk-dev] [PATCH v2 2/2] common/cnxk: fix possible out-of-bounds access

2024-03-03 Thread Jerin Jacob
On Fri, Mar 1, 2024 at 9:05 AM  wrote:
>
> From: Satheesh Paul 
>
> The subtraction expression in mbox_memcpy() can wrap around
> causing an out-of-bounds access. Added a check on 'size' to
> fix this.
>
> Coverity issue: 384431, 384439
> Fixes: 585bb3e538f9 ("common/cnxk: add VF support to base device class")
> Cc: sta...@dpdk.org
>
> Signed-off-by: Satheesh Paul 
> Reviewed-by: Harman Kalra 


Series applied to dpdk-next-net-mrvl/for-main. Thanks


Re: [PATCH v2] net/cnxk: performance improvement for SW mbuf free

2024-03-03 Thread Jerin Jacob
On Fri, Mar 1, 2024 at 8:47 AM Rahul Bhansali  wrote:
>
> Performance improvement is done for Tx fastpath flag MBUF_NOFF when
> tx_compl_ena is false and mbuf has an external buffer.
> In such case, Instead of individual external mbuf free before LMTST,
> a chain of external mbuf will be created and free all after LMTST.
> This not only improve the performance but also fixes SQ corruption.
>
> CN10k performance improvement is ~14%.
> CN9k performance improvement is ~20%.
>
> Fixes: 51a636528515 ("net/cnxk: fix crash during Tx completion")
> Cc: sta...@dpdk.org
>
> Signed-off-by: Rahul Bhansali 

Applied to dpdk-next-net-mrvl/for-main. Thanks


Re: [PATCH v4 2/3] dma/cnxk: support for DMA event enqueue dequeue

2024-03-03 Thread Jerin Jacob
On Fri, Mar 1, 2024 at 11:02 PM Amit Prakash Shukla
 wrote:
>
> Added cnxk driver support for dma event enqueue and dequeue.
> Also added changes for work queue entry completion status and
> dual workslot DMA event enqueue.
>
> Signed-off-by: Pavan Nikhilesh 
> Signed-off-by: Amit Prakash Shukla 

Fix https://mails.dpdk.org/archives/test-report/2024-March/596514.html


Re: [PATCH] eal/x86: improve rte_memcpy const size 16 performance

2024-03-03 Thread Stephen Hemminger
Another option would be to just do what PPC already does.
The ENA part is because it has some garbage trying to use memcpy
always (which is one of those bad ideas).

From 74e7ab929e61e0481f6e0214d4d06a716b2f7d79 Mon Sep 17 00:00:00 2001
From: Stephen Hemminger 
Date: Sun, 3 Mar 2024 08:02:07 -0800
Subject: [PATCH] rte_memcpy: use builtin memcpy for fixed sizes

This makes x86 arch do same thing as PPC, and also allows
code checkers to see memcpy issues.  It shows a pre-existing
bug in ipsec test now.

Signed-off-by: Stephen Hemminger 
---
 drivers/net/ena/base/ena_plat_dpdk.h |  9 +-
 lib/eal/x86/include/rte_memcpy.h | 45 +++-
 2 files changed, 26 insertions(+), 28 deletions(-)

diff --git a/drivers/net/ena/base/ena_plat_dpdk.h 
b/drivers/net/ena/base/ena_plat_dpdk.h
index 14bf582a451f..997e6aa3dfbd 100644
--- a/drivers/net/ena/base/ena_plat_dpdk.h
+++ b/drivers/net/ena/base/ena_plat_dpdk.h
@@ -70,14 +70,7 @@ typedef uint64_t dma_addr_t;
 #define ENA_UDELAY(x) rte_delay_us_block(x)
 
 #define ENA_TOUCH(x) ((void)(x))
-/* Redefine memcpy with caution: rte_memcpy can be simply aliased to memcpy, so
- * make the redefinition only if it's safe (and beneficial) to do so.
- */
-#if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64_MEMCPY) || \
-   defined(RTE_ARCH_ARM_NEON_MEMCPY)
-#undef memcpy
-#define memcpy rte_memcpy
-#endif
+
 #define wmb rte_wmb
 #define rmb rte_rmb
 #define mb rte_mb
diff --git a/lib/eal/x86/include/rte_memcpy.h b/lib/eal/x86/include/rte_memcpy.h
index 72a92290e05d..aab30be0eeb9 100644
--- a/lib/eal/x86/include/rte_memcpy.h
+++ b/lib/eal/x86/include/rte_memcpy.h
@@ -27,24 +27,6 @@ extern "C" {
 #pragma GCC diagnostic ignored "-Wstringop-overflow"
 #endif
 
-/**
- * Copy bytes from one location to another. The locations must not overlap.
- *
- * @note This is implemented as a macro, so it's address should not be taken
- * and care is needed as parameter expressions may be evaluated multiple times.
- *
- * @param dst
- *   Pointer to the destination of the data.
- * @param src
- *   Pointer to the source data.
- * @param n
- *   Number of bytes to copy.
- * @return
- *   Pointer to the destination data.
- */
-static __rte_always_inline void *
-rte_memcpy(void *dst, const void *src, size_t n);
-
 /**
  * Copy bytes from one location to another,
  * locations should not overlap.
@@ -859,8 +841,8 @@ rte_memcpy_aligned(void *dst, const void *src, size_t n)
return ret;
 }
 
-static __rte_always_inline void *
-rte_memcpy(void *dst, const void *src, size_t n)
+static inline void *
+rte_memcpy_func(void *dst, const void *src, size_t n)
 {
if (!(((uintptr_t)dst | (uintptr_t)src) & ALIGNMENT_MASK))
return rte_memcpy_aligned(dst, src, n);
@@ -868,6 +850,29 @@ rte_memcpy(void *dst, const void *src, size_t n)
return rte_memcpy_generic(dst, src, n);
 }
 
+
+/**
+ * Copy bytes from one location to another. The locations must not overlap.
+ *
+ * @note This is implemented as a macro, so it's address should not be taken
+ * and care is needed as parameter expressions may be evaluated multiple times.
+ *
+ * @param dst
+ *   Pointer to the destination of the data.
+ * @param src
+ *   Pointer to the source data.
+ * @param n
+ *   Number of bytes to copy.
+ * @return
+ *   Pointer to the destination data.
+ */
+#define rte_memcpy(dst, src, n)  \
+   __extension__ ({ \
+   (__builtin_constant_p(n)) ?  \
+   memcpy((dst), (src), (n)) :  \
+   rte_memcpy_func((dst), (src), (n)); })
+
+
 #undef ALIGNMENT_MASK
 
 #if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION >= 10)
-- 
2.43.0





[PATCH v6 00/23] net/cnxk: support for port representors

2024-03-03 Thread Harman Kalra
Introducing port representor support to CNXK drivers by adding virtual ethernet
ports providing a logical representation in DPDK for physical function(PF) or
SR-IOV virtual function (VF) devices for control and monitoring.

These port representor ethdev instances can be spawned on an as needed basis
through configuration parameters passed to the driver of the underlying
base device using devargs ``-a ,representor=pf*vf*``

In case of exception path (i.e. until the flow definition is offloaded to the
hardware), packets transmitted by the VFs shall be received by these
representor port, while packets transmitted by representor ports shall be
received by respective VFs.

On receiving the VF traffic via these representor ports, applications holding
these representor ports can decide to offload the traffic flow into the HW.
Henceforth the matching traffic shall be directly steered to the respective
VFs without being received by the application.

Current virtual representor port PMD supports following operations:

- Get represented port statistics
- Flow operations - create, validate, destroy, query, flush, dump

Changes since V5:
* Fixed check-doc-vs-code.sh issue

Changes since V4:
* Fixed build issues
* Updated release notes

Changes since V3:
* Added support for more platforms
* Added xstats support for representor port stats

Changes since V2:
* Moved devargs parsing logic to common code and sent as separate series
* Documentation updated
* Addressed comments from V2

Changes since V1:
* Updated communication layer between representor and represented port.
* Added support for native represented ports
* Port representor and represented port item and action support
* Build failure fixes


Harman Kalra (21):
  common/cnxk: add support for representors
  net/cnxk: implementing eswitch device
  net/cnxk: eswitch HW resource configuration
  net/cnxk: eswitch devargs parsing
  net/cnxk: probing representor ports
  common/cnxk: common NPC changes for eswitch
  common/cnxk: interface to update VLAN TPID
  net/cnxk: eswitch flow configurations
  net/cnxk: eswitch fastpath routines
  net/cnxk: add representor control plane
  common/cnxk: representee notification callback
  net/cnxk: handling representee notification
  net/cnxk: representor ethdev ops
  common/cnxk: get representees ethernet stats
  net/cnxk: ethernet statistics for representor
  common/cnxk: base support for eswitch VF
  net/cnxk: eswitch VF as ethernet device
  net/cnxk: add representor port pattern and action
  net/cnxk: generalise flow operation APIs
  net/cnxk: flow create on representor ports
  net/cnxk: other flow operations

Kiran Kumar K (2):
  common/cnxk: support port representor and represented port
  net/cnxk: add represented port pattern and action

 MAINTAINERS |   1 +
 doc/guides/nics/cnxk.rst|  41 ++
 doc/guides/nics/features/cnxk.ini   |   5 +
 doc/guides/nics/features/cnxk_vec.ini   |   6 +
 doc/guides/nics/features/cnxk_vf.ini|   6 +
 doc/guides/rel_notes/release_24_03.rst  |   1 +
 drivers/common/cnxk/meson.build |   1 +
 drivers/common/cnxk/roc_api.h   |   3 +
 drivers/common/cnxk/roc_constants.h |   2 +
 drivers/common/cnxk/roc_dev.c   |  71 ++
 drivers/common/cnxk/roc_dev_priv.h  |   3 +
 drivers/common/cnxk/roc_eswitch.c   | 389 +++
 drivers/common/cnxk/roc_eswitch.h   |  59 ++
 drivers/common/cnxk/roc_mbox.c  |   2 +
 drivers/common/cnxk/roc_mbox.h  |  91 ++-
 drivers/common/cnxk/roc_nix.c   |  46 +-
 drivers/common/cnxk/roc_nix.h   |   4 +
 drivers/common/cnxk/roc_nix_priv.h  |  12 +-
 drivers/common/cnxk/roc_nix_vlan.c  |  23 +-
 drivers/common/cnxk/roc_npc.c   | 110 ++-
 drivers/common/cnxk/roc_npc.h   |  24 +-
 drivers/common/cnxk/roc_npc_mcam.c  |  67 +-
 drivers/common/cnxk/roc_npc_parse.c |  28 +-
 drivers/common/cnxk/roc_npc_priv.h  |   5 +-
 drivers/common/cnxk/roc_platform.c  |   2 +
 drivers/common/cnxk/roc_platform.h  |   4 +
 drivers/common/cnxk/version.map |  14 +
 drivers/net/cnxk/cn10k_ethdev.c |   3 +
 drivers/net/cnxk/cnxk_eswitch.c | 800 ++
 drivers/net/cnxk/cnxk_eswitch.h | 212 ++
 drivers/net/cnxk/cnxk_eswitch_devargs.c | 125 
 drivers/net/cnxk/cnxk_eswitch_flow.c| 454 +
 drivers/net/cnxk/cnxk_eswitch_rxtx.c| 211 ++
 drivers/net/cnxk/cnxk_ethdev.c  |  41 +-
 drivers/net/cnxk/cnxk_ethdev.h  |   3 +
 drivers/net/cnxk/cnxk_ethdev_ops.c  |   4 +
 drivers/net/cnxk/cnxk_flow.c| 826 +++
 drivers/net/cnxk/cnxk_flow.h|  27 +-
 drivers/net/cnxk/cnxk_link.c|   3 +-
 drivers/net/cnxk/cnxk_rep.c | 618 +
 drivers/net/cnxk/cnxk_rep.h | 150 +
 drivers/net/cnxk/cnxk_rep_flow.c| 815 +++
 drivers/net/cnxk/cnxk_rep_msg.c 

[PATCH v6 01/23] common/cnxk: add support for representors

2024-03-03 Thread Harman Kalra
Introducing a new Mailbox for registering base device behind
all representors and also registering debug log type for representors
and base device driver.

Signed-off-by: Harman Kalra 
---
 doc/guides/nics/cnxk.rst|  4 
 drivers/common/cnxk/roc_constants.h |  1 +
 drivers/common/cnxk/roc_mbox.h  |  8 
 drivers/common/cnxk/roc_nix.c   | 31 +
 drivers/common/cnxk/roc_nix.h   |  3 +++
 drivers/common/cnxk/roc_platform.c  |  2 ++
 drivers/common/cnxk/roc_platform.h  |  4 
 drivers/common/cnxk/version.map |  3 +++
 8 files changed, 56 insertions(+)

diff --git a/doc/guides/nics/cnxk.rst b/doc/guides/nics/cnxk.rst
index 39660dba82..1ab8a0ca74 100644
--- a/doc/guides/nics/cnxk.rst
+++ b/doc/guides/nics/cnxk.rst
@@ -654,3 +654,7 @@ Debugging Options
+---++---+
| 2 | NPC| --log-level='pmd\.net.cnxk\.flow,8'   |
+---++---+
+   | 3 | REP| --log-level='pmd\.net.cnxk\.rep,8'|
+   +---++---+
+   | 4 | ESW| --log-level='pmd\.net.cnxk\.esw,8'|
+   +---++---+
diff --git a/drivers/common/cnxk/roc_constants.h 
b/drivers/common/cnxk/roc_constants.h
index 291b6a4bc9..cb4edbea58 100644
--- a/drivers/common/cnxk/roc_constants.h
+++ b/drivers/common/cnxk/roc_constants.h
@@ -43,6 +43,7 @@
 #define PCI_DEVID_CNXK_RVU_NIX_INL_VF 0xA0F1
 #define PCI_DEVID_CNXK_RVU_REE_PF 0xA0f4
 #define PCI_DEVID_CNXK_RVU_REE_VF 0xA0f5
+#define PCI_DEVID_CNXK_RVU_ESWITCH_PF 0xA0E0
 
 #define PCI_DEVID_CN9K_CGX  0xA059
 #define PCI_DEVID_CN10K_RPM 0xA060
diff --git a/drivers/common/cnxk/roc_mbox.h b/drivers/common/cnxk/roc_mbox.h
index d8a8494ac4..54956a6a06 100644
--- a/drivers/common/cnxk/roc_mbox.h
+++ b/drivers/common/cnxk/roc_mbox.h
@@ -68,6 +68,7 @@ struct mbox_msghdr {
M(NDC_SYNC_OP, 0x009, ndc_sync_op, ndc_sync_op, msg_rsp)   \
M(LMTST_TBL_SETUP, 0x00a, lmtst_tbl_setup, lmtst_tbl_setup_req,\
  msg_rsp) \
+   M(GET_REP_CNT, 0x00d, get_rep_cnt, msg_req, get_rep_cnt_rsp)   \
/* CGX mbox IDs (range 0x200 - 0x3FF) */   \
M(CGX_START_RXTX, 0x200, cgx_start_rxtx, msg_req, msg_rsp) \
M(CGX_STOP_RXTX, 0x201, cgx_stop_rxtx, msg_req, msg_rsp)   \
@@ -548,6 +549,13 @@ struct lmtst_tbl_setup_req {
uint64_t __io rsvd[2]; /* Future use */
 };
 
+#define MAX_PFVF_REP 64
+struct get_rep_cnt_rsp {
+   struct mbox_msghdr hdr;
+   uint16_t __io rep_cnt;
+   uint16_t __io rep_pfvf_map[MAX_PFVF_REP];
+};
+
 /* CGX mbox message formats */
 /* CGX mailbox error codes
  * Range 1101 - 1200.
diff --git a/drivers/common/cnxk/roc_nix.c b/drivers/common/cnxk/roc_nix.c
index 90ccb260fb..e68d472f43 100644
--- a/drivers/common/cnxk/roc_nix.c
+++ b/drivers/common/cnxk/roc_nix.c
@@ -533,3 +533,34 @@ roc_nix_dev_fini(struct roc_nix *roc_nix)
rc |= dev_fini(&nix->dev, nix->pci_dev);
return rc;
 }
+
+int
+roc_nix_max_rep_count(struct roc_nix *roc_nix)
+{
+   struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+   struct dev *dev = &nix->dev;
+   struct mbox *mbox = mbox_get(dev->mbox);
+   struct get_rep_cnt_rsp *rsp;
+   struct msg_req *req;
+   int rc, i;
+
+   req = mbox_alloc_msg_get_rep_cnt(mbox);
+   if (!req) {
+   rc = -ENOSPC;
+   goto exit;
+   }
+
+   req->hdr.pcifunc = roc_nix_get_pf_func(roc_nix);
+
+   rc = mbox_process_msg(mbox, (void *)&rsp);
+   if (rc)
+   goto exit;
+
+   roc_nix->rep_cnt = rsp->rep_cnt;
+   for (i = 0; i < rsp->rep_cnt; i++)
+   roc_nix->rep_pfvf_map[i] = rsp->rep_pfvf_map[i];
+
+exit:
+   mbox_put(mbox);
+   return rc;
+}
diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index 4db71544f0..0289ce9820 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -482,6 +482,8 @@ struct roc_nix {
uint32_t buf_sz;
uint64_t meta_aura_handle;
uintptr_t meta_mempool;
+   uint16_t rep_cnt;
+   uint16_t rep_pfvf_map[MAX_PFVF_REP];
TAILQ_ENTRY(roc_nix) next;
 
 #define ROC_NIX_MEM_SZ (6 * 1070)
@@ -1014,4 +1016,5 @@ int __roc_api roc_nix_mcast_list_setup(struct mbox *mbox, 
uint8_t intf, int nb_e
   uint16_t *pf_funcs, uint16_t *channels, 
uint32_t *rqs,
   uint32_t *grp_index, uint32_t 
*start_index);
 int __roc_api roc_nix_mcast_list_free(struct mbox *mbox, uint32_t 
mcast_grp_index);
+int __roc_api roc_nix_max_rep_count(struct roc_nix *roc_nix);

[PATCH v6 02/23] net/cnxk: implementing eswitch device

2024-03-03 Thread Harman Kalra
Eswitch device is a parent or base device behind all the representors,
acting as transport layer between representors and representees

Signed-off-by: Harman Kalra 
---
 drivers/net/cnxk/cnxk_eswitch.c | 379 
 drivers/net/cnxk/cnxk_eswitch.h | 103 +
 drivers/net/cnxk/meson.build|   1 +
 3 files changed, 483 insertions(+)
 create mode 100644 drivers/net/cnxk/cnxk_eswitch.c
 create mode 100644 drivers/net/cnxk/cnxk_eswitch.h

diff --git a/drivers/net/cnxk/cnxk_eswitch.c b/drivers/net/cnxk/cnxk_eswitch.c
new file mode 100644
index 00..8f216d7c88
--- /dev/null
+++ b/drivers/net/cnxk/cnxk_eswitch.c
@@ -0,0 +1,379 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2024 Marvell.
+ */
+
+#include 
+
+#define CNXK_NIX_DEF_SQ_COUNT 512
+
+static int
+cnxk_eswitch_dev_remove(struct rte_pci_device *pci_dev)
+{
+   struct cnxk_eswitch_dev *eswitch_dev;
+   int rc = 0;
+
+   PLT_SET_USED(pci_dev);
+   if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+   return 0;
+
+   eswitch_dev = cnxk_eswitch_pmd_priv();
+   if (!eswitch_dev) {
+   rc = -EINVAL;
+   goto exit;
+   }
+
+   rte_free(eswitch_dev);
+exit:
+   return rc;
+}
+
+int
+cnxk_eswitch_nix_rsrc_start(struct cnxk_eswitch_dev *eswitch_dev)
+{
+   int rc;
+
+   /* Enable Rx in NPC */
+   rc = roc_nix_npc_rx_ena_dis(&eswitch_dev->nix, true);
+   if (rc) {
+   plt_err("Failed to enable NPC rx %d", rc);
+   goto done;
+   }
+
+   rc = roc_npc_mcam_enable_all_entries(&eswitch_dev->npc, 1);
+   if (rc) {
+   plt_err("Failed to enable NPC entries %d", rc);
+   goto done;
+   }
+
+done:
+   return 0;
+}
+
+int
+cnxk_eswitch_txq_start(struct cnxk_eswitch_dev *eswitch_dev, uint16_t qid)
+{
+   struct roc_nix_sq *sq = &eswitch_dev->txq[qid].sqs;
+   int rc = -EINVAL;
+
+   if (eswitch_dev->txq[qid].state == CNXK_ESWITCH_QUEUE_STATE_STARTED)
+   return 0;
+
+   if (eswitch_dev->txq[qid].state != CNXK_ESWITCH_QUEUE_STATE_CONFIGURED) 
{
+   plt_err("Eswitch txq %d not configured yet", qid);
+   goto done;
+   }
+
+   rc = roc_nix_sq_ena_dis(sq, true);
+   if (rc) {
+   plt_err("Failed to enable sq aura fc, txq=%u, rc=%d", qid, rc);
+   goto done;
+   }
+
+   eswitch_dev->txq[qid].state = CNXK_ESWITCH_QUEUE_STATE_STARTED;
+done:
+   return rc;
+}
+
+int
+cnxk_eswitch_txq_stop(struct cnxk_eswitch_dev *eswitch_dev, uint16_t qid)
+{
+   struct roc_nix_sq *sq = &eswitch_dev->txq[qid].sqs;
+   int rc = -EINVAL;
+
+   if (eswitch_dev->txq[qid].state == CNXK_ESWITCH_QUEUE_STATE_STOPPED ||
+   eswitch_dev->txq[qid].state == CNXK_ESWITCH_QUEUE_STATE_RELEASED)
+   return 0;
+
+   if (eswitch_dev->txq[qid].state != CNXK_ESWITCH_QUEUE_STATE_STARTED) {
+   plt_err("Eswitch txq %d not started", qid);
+   goto done;
+   }
+
+   rc = roc_nix_sq_ena_dis(sq, false);
+   if (rc) {
+   plt_err("Failed to disable sqb aura fc, txq=%u, rc=%d", qid, 
rc);
+   goto done;
+   }
+
+   eswitch_dev->txq[qid].state = CNXK_ESWITCH_QUEUE_STATE_STOPPED;
+done:
+   return rc;
+}
+
+int
+cnxk_eswitch_rxq_start(struct cnxk_eswitch_dev *eswitch_dev, uint16_t qid)
+{
+   struct roc_nix_rq *rq = &eswitch_dev->rxq[qid].rqs;
+   int rc = -EINVAL;
+
+   if (eswitch_dev->rxq[qid].state == CNXK_ESWITCH_QUEUE_STATE_STARTED)
+   return 0;
+
+   if (eswitch_dev->rxq[qid].state != CNXK_ESWITCH_QUEUE_STATE_CONFIGURED) 
{
+   plt_err("Eswitch rxq %d not configured yet", qid);
+   goto done;
+   }
+
+   rc = roc_nix_rq_ena_dis(rq, true);
+   if (rc) {
+   plt_err("Failed to enable rxq=%u, rc=%d", qid, rc);
+   goto done;
+   }
+
+   eswitch_dev->rxq[qid].state = CNXK_ESWITCH_QUEUE_STATE_STARTED;
+done:
+   return rc;
+}
+
+int
+cnxk_eswitch_rxq_stop(struct cnxk_eswitch_dev *eswitch_dev, uint16_t qid)
+{
+   struct roc_nix_rq *rq = &eswitch_dev->rxq[qid].rqs;
+   int rc = -EINVAL;
+
+   if (eswitch_dev->rxq[qid].state == CNXK_ESWITCH_QUEUE_STATE_STOPPED ||
+   eswitch_dev->rxq[qid].state == CNXK_ESWITCH_QUEUE_STATE_RELEASED)
+   return 0;
+
+   if (eswitch_dev->rxq[qid].state != CNXK_ESWITCH_QUEUE_STATE_STARTED) {
+   plt_err("Eswitch rxq %d not started", qid);
+   goto done;
+   }
+
+   rc = roc_nix_rq_ena_dis(rq, false);
+   if (rc) {
+   plt_err("Failed to disable rxq=%u, rc=%d", qid, rc);
+   goto done;
+   }
+
+   eswitch_dev->rxq[qid].state = CNXK_ESWITCH_QUEUE_STATE_STOPPED;
+done:
+   return rc;
+}
+
+int
+cnxk_eswitch_rxq_release(struct cnxk_eswitch_dev *eswitch_dev, uint16_t qid)
+{
+   struct r

[PATCH v6 03/23] net/cnxk: eswitch HW resource configuration

2024-03-03 Thread Harman Kalra
Configuring the hardware resources used by the eswitch device.

Signed-off-by: Harman Kalra 
---
 drivers/net/cnxk/cnxk_eswitch.c | 217 +++-
 1 file changed, 216 insertions(+), 1 deletion(-)

diff --git a/drivers/net/cnxk/cnxk_eswitch.c b/drivers/net/cnxk/cnxk_eswitch.c
index 8f216d7c88..810e7c9c25 100644
--- a/drivers/net/cnxk/cnxk_eswitch.c
+++ b/drivers/net/cnxk/cnxk_eswitch.c
@@ -6,13 +6,53 @@
 
 #define CNXK_NIX_DEF_SQ_COUNT 512
 
+static int
+eswitch_hw_rsrc_cleanup(struct cnxk_eswitch_dev *eswitch_dev, struct 
rte_pci_device *pci_dev)
+{
+   struct roc_nix *nix;
+   int rc = 0;
+
+   nix = &eswitch_dev->nix;
+
+   roc_nix_unregister_queue_irqs(nix);
+   roc_nix_tm_fini(nix);
+   rc = roc_nix_lf_free(nix);
+   if (rc) {
+   plt_err("Failed to cleanup sq, rc %d", rc);
+   goto exit;
+   }
+
+   /* Check if this device is hosting common resource */
+   nix = roc_idev_npa_nix_get();
+   if (!nix || nix->pci_dev != pci_dev) {
+   rc = 0;
+   goto exit;
+   }
+
+   /* Try nix fini now */
+   rc = roc_nix_dev_fini(nix);
+   if (rc == -EAGAIN) {
+   plt_info("Common resource in use by other devices %s", 
pci_dev->name);
+   goto exit;
+   } else if (rc) {
+   plt_err("Failed in nix dev fini, rc=%d", rc);
+   goto exit;
+   }
+
+   rte_free(eswitch_dev->txq);
+   rte_free(eswitch_dev->rxq);
+   rte_free(eswitch_dev->cxq);
+
+exit:
+   return rc;
+}
+
 static int
 cnxk_eswitch_dev_remove(struct rte_pci_device *pci_dev)
 {
struct cnxk_eswitch_dev *eswitch_dev;
int rc = 0;
 
-   PLT_SET_USED(pci_dev);
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
 
@@ -22,6 +62,9 @@ cnxk_eswitch_dev_remove(struct rte_pci_device *pci_dev)
goto exit;
}
 
+   /* Cleanup HW resources */
+   eswitch_hw_rsrc_cleanup(eswitch_dev, pci_dev);
+
rte_free(eswitch_dev);
 exit:
return rc;
@@ -318,6 +361,170 @@ cnxk_eswitch_txq_setup(struct cnxk_eswitch_dev 
*eswitch_dev, uint16_t qid, uint1
return rc;
 }
 
+static int
+nix_lf_setup(struct cnxk_eswitch_dev *eswitch_dev)
+{
+   uint16_t nb_rxq, nb_txq, nb_cq;
+   struct roc_nix_fc_cfg fc_cfg;
+   struct roc_nix *nix;
+   uint64_t rx_cfg;
+   void *qs;
+   int rc;
+
+   /* Initialize base roc nix */
+   nix = &eswitch_dev->nix;
+   nix->pci_dev = eswitch_dev->pci_dev;
+   nix->hw_vlan_ins = true;
+   nix->reta_sz = ROC_NIX_RSS_RETA_SZ_256;
+   rc = roc_nix_dev_init(nix);
+   if (rc) {
+   plt_err("Failed to init nix eswitch device, rc=%d(%s)", rc, 
roc_error_msg_get(rc));
+   goto fail;
+   }
+
+   /* Get the representors count */
+   rc = roc_nix_max_rep_count(&eswitch_dev->nix);
+   if (rc) {
+   plt_err("Failed to get rep cnt, rc=%d(%s)", rc, 
roc_error_msg_get(rc));
+   goto free_cqs;
+   }
+
+   /* Allocating an NIX LF */
+   nb_rxq = CNXK_ESWITCH_MAX_RXQ;
+   nb_txq = CNXK_ESWITCH_MAX_TXQ;
+   nb_cq = CNXK_ESWITCH_MAX_RXQ;
+   rx_cfg = ROC_NIX_LF_RX_CFG_DIS_APAD;
+   rc = roc_nix_lf_alloc(nix, nb_rxq, nb_txq, rx_cfg);
+   if (rc) {
+   plt_err("lf alloc failed = %s(%d)", roc_error_msg_get(rc), rc);
+   goto dev_fini;
+   }
+
+   if (nb_rxq) {
+   /* Allocate memory for eswitch rq's and cq's */
+   qs = plt_zmalloc(sizeof(struct cnxk_eswitch_rxq) * nb_rxq, 0);
+   if (!qs) {
+   plt_err("Failed to alloc eswitch rxq");
+   goto lf_free;
+   }
+   eswitch_dev->rxq = qs;
+   }
+
+   if (nb_txq) {
+   /* Allocate memory for roc sq's */
+   qs = plt_zmalloc(sizeof(struct cnxk_eswitch_txq) * nb_txq, 0);
+   if (!qs) {
+   plt_err("Failed to alloc eswitch txq");
+   goto free_rqs;
+   }
+   eswitch_dev->txq = qs;
+   }
+
+   if (nb_cq) {
+   qs = plt_zmalloc(sizeof(struct cnxk_eswitch_cxq) * nb_cq, 0);
+   if (!qs) {
+   plt_err("Failed to alloc eswitch cxq");
+   goto free_sqs;
+   }
+   eswitch_dev->cxq = qs;
+   }
+
+   eswitch_dev->nb_rxq = nb_rxq;
+   eswitch_dev->nb_txq = nb_txq;
+
+   /* Re-enable NIX LF error interrupts */
+   roc_nix_err_intr_ena_dis(nix, true);
+   roc_nix_ras_intr_ena_dis(nix, true);
+
+   rc = roc_nix_lso_fmt_setup(nix);
+   if (rc) {
+   plt_err("lso setup failed = %s(%d)", roc_error_msg_get(rc), rc);
+   goto free_cqs;
+   }
+
+   rc = roc_nix_switch_hdr_set(nix, 0, 0, 0, 0);
+   if (rc) {
+   plt_e

[PATCH v6 04/23] net/cnxk: eswitch devargs parsing

2024-03-03 Thread Harman Kalra
Implementing the devargs parsing logic via which the representors
pattern is provided. These patterns define for which representies
representors shall be created.

Signed-off-by: Harman Kalra 
---
 drivers/net/cnxk/cnxk_eswitch.c |  88 +
 drivers/net/cnxk/cnxk_eswitch.h |  52 ++
 drivers/net/cnxk/cnxk_eswitch_devargs.c | 124 
 drivers/net/cnxk/meson.build|   1 +
 4 files changed, 265 insertions(+)
 create mode 100644 drivers/net/cnxk/cnxk_eswitch_devargs.c

diff --git a/drivers/net/cnxk/cnxk_eswitch.c b/drivers/net/cnxk/cnxk_eswitch.c
index 810e7c9c25..687bb7d146 100644
--- a/drivers/net/cnxk/cnxk_eswitch.c
+++ b/drivers/net/cnxk/cnxk_eswitch.c
@@ -388,6 +388,7 @@ nix_lf_setup(struct cnxk_eswitch_dev *eswitch_dev)
plt_err("Failed to get rep cnt, rc=%d(%s)", rc, 
roc_error_msg_get(rc));
goto free_cqs;
}
+   eswitch_dev->repr_cnt.max_repr = eswitch_dev->nix.rep_cnt;
 
/* Allocating an NIX LF */
nb_rxq = CNXK_ESWITCH_MAX_RXQ;
@@ -525,11 +526,73 @@ eswitch_hw_rsrc_setup(struct cnxk_eswitch_dev 
*eswitch_dev, struct rte_pci_devic
return rc;
 }
 
+int
+cnxk_eswitch_representor_info_get(struct cnxk_eswitch_dev *eswitch_dev,
+ struct rte_eth_representor_info *info)
+{
+   struct cnxk_eswitch_devargs *esw_da;
+   int rc = 0, n_entries, i, j = 0, k = 0;
+
+   for (i = 0; i < eswitch_dev->nb_esw_da; i++) {
+   for (j = 0; j < eswitch_dev->esw_da[i].nb_repr_ports; j++)
+   k++;
+   }
+   n_entries = k;
+
+   if (info == NULL)
+   goto out;
+
+   if ((uint32_t)n_entries > info->nb_ranges_alloc)
+   n_entries = info->nb_ranges_alloc;
+
+   k = 0;
+   info->controller = 0;
+   info->pf = 0;
+   for (i = 0; i < eswitch_dev->nb_esw_da; i++) {
+   esw_da = &eswitch_dev->esw_da[i];
+   info->ranges[k].type = esw_da->da.type;
+   switch (esw_da->da.type) {
+   case RTE_ETH_REPRESENTOR_PF:
+   info->ranges[k].controller = 0;
+   info->ranges[k].pf = esw_da->repr_hw_info[0].pfvf;
+   info->ranges[k].vf = 0;
+   info->ranges[k].id_base = info->ranges[i].pf;
+   info->ranges[k].id_end = info->ranges[i].pf;
+   snprintf(info->ranges[k].name, 
sizeof(info->ranges[k].name), "pf%d",
+info->ranges[k].pf);
+   k++;
+   break;
+   case RTE_ETH_REPRESENTOR_VF:
+   for (j = 0; j < esw_da->nb_repr_ports; j++) {
+   info->ranges[k].controller = 0;
+   info->ranges[k].pf = esw_da->da.ports[0];
+   info->ranges[k].vf = 
esw_da->repr_hw_info[j].pfvf;
+   info->ranges[k].id_base = 
esw_da->repr_hw_info[j].port_id;
+   info->ranges[k].id_end = 
esw_da->repr_hw_info[j].port_id;
+   snprintf(info->ranges[k].name, 
sizeof(info->ranges[k].name),
+"pf%dvf%d", info->ranges[k].pf, 
info->ranges[k].vf);
+   k++;
+   }
+   break;
+   default:
+   plt_err("Invalid type %d", esw_da->da.type);
+   rc = 0;
+   goto fail;
+   };
+   }
+   info->nb_ranges = k;
+fail:
+   return rc;
+out:
+   return n_entries;
+}
+
 static int
 cnxk_eswitch_dev_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device 
*pci_dev)
 {
struct cnxk_eswitch_dev *eswitch_dev;
const struct rte_memzone *mz = NULL;
+   uint16_t num_reps;
int rc = -ENOMEM;
 
RTE_SET_USED(pci_drv);
@@ -562,12 +625,37 @@ cnxk_eswitch_dev_probe(struct rte_pci_driver *pci_drv, 
struct rte_pci_device *pc
}
}
 
+   if (pci_dev->device.devargs) {
+   rc = cnxk_eswitch_repr_devargs(pci_dev, eswitch_dev);
+   if (rc)
+   goto rsrc_cleanup;
+   }
+
+   if (eswitch_dev->repr_cnt.nb_repr_created > 
eswitch_dev->repr_cnt.max_repr) {
+   plt_err("Representors to be created %d can be greater than max 
allowed %d",
+   eswitch_dev->repr_cnt.nb_repr_created, 
eswitch_dev->repr_cnt.max_repr);
+   rc = -EINVAL;
+   goto rsrc_cleanup;
+   }
+
+   num_reps = eswitch_dev->repr_cnt.nb_repr_created;
+   if (!num_reps) {
+   plt_err("No representors enabled");
+   goto fail;
+   }
+
+   plt_esw_dbg("Max no of reps %d reps to be created %d Eswtch pfunc %x",
+   eswitch_dev->repr_cnt.max_repr, 
e

[PATCH v6 05/23] net/cnxk: probing representor ports

2024-03-03 Thread Harman Kalra
Basic skeleton for probing representor devices. If PF device is
passed with "representor" devargs, representor ports gets probed
as a separate ethdev device.

Signed-off-by: Harman Kalra 
---
 MAINTAINERS |   1 +
 doc/guides/nics/cnxk.rst|  35 +
 drivers/net/cnxk/cnxk_eswitch.c |  12 ++
 drivers/net/cnxk/cnxk_eswitch.h |   8 +-
 drivers/net/cnxk/cnxk_rep.c | 256 
 drivers/net/cnxk/cnxk_rep.h |  50 +++
 drivers/net/cnxk/cnxk_rep_ops.c | 129 
 drivers/net/cnxk/meson.build|   2 +
 8 files changed, 492 insertions(+), 1 deletion(-)
 create mode 100644 drivers/net/cnxk/cnxk_rep.c
 create mode 100644 drivers/net/cnxk/cnxk_rep.h
 create mode 100644 drivers/net/cnxk/cnxk_rep_ops.c

diff --git a/MAINTAINERS b/MAINTAINERS
index 962c359cdd..062812e7c3 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -818,6 +818,7 @@ M: Nithin Dabilpuram 
 M: Kiran Kumar K 
 M: Sunil Kumar Kori 
 M: Satha Rao 
+M: Harman Kalra 
 T: git://dpdk.org/next/dpdk-next-net-mrvl
 F: drivers/common/cnxk/
 F: drivers/net/cnxk/
diff --git a/doc/guides/nics/cnxk.rst b/doc/guides/nics/cnxk.rst
index 1ab8a0ca74..93d6db5cb0 100644
--- a/doc/guides/nics/cnxk.rst
+++ b/doc/guides/nics/cnxk.rst
@@ -37,6 +37,7 @@ Features of the CNXK Ethdev PMD are:
 - Inline IPsec processing support
 - Ingress meter support
 - Queue based priority flow control support
+- Port representors
 
 Prerequisites
 -
@@ -640,6 +641,40 @@ Runtime Config Options for inline device
With the above configuration, driver would enable packet inject from ARM 
cores
to crypto to process and send back in Rx path.
 
+Port Representors
+-
+
+The CNXK driver supports port representor model by adding virtual ethernet
+ports providing a logical representation in DPDK for physical function(PF) or
+SR-IOV virtual function (VF) devices for control and monitoring.
+
+Base device or parent device underneath the representor ports is a eswitch
+device which is not a cnxk ethernet device but has NIC RX and TX capabilities.
+Each representor port is represented by a RQ and SQ pair of this eswitch
+device.
+
+Implementation supports representors for both physical function and virtual
+function.
+
+Port representor ethdev instances can be spawned on an as needed basis
+through configuration parameters passed to the driver of the underlying
+base device using devargs ``-a ,representor=pf*vf*``
+
+.. note::
+
+   Representor ports to be created for respective representees should be
+   defined via standard representor devargs patterns
+   Eg. To create a representor for representee PF1VF0, devargs to be passed
+   is ``-a ,representor=pf01vf0``
+
+   Implementation supports creation of multiple port representors with pattern:
+   ``-a ,representor=[pf0vf[1,2],pf1vf[2-5]]``
+
+Port representor PMD supports following operations:
+
+- Get PF/VF statistics
+- Flow operations - create, validate, destroy, query, flush, dump
+
 Debugging Options
 -
 
diff --git a/drivers/net/cnxk/cnxk_eswitch.c b/drivers/net/cnxk/cnxk_eswitch.c
index 687bb7d146..599ed149ae 100644
--- a/drivers/net/cnxk/cnxk_eswitch.c
+++ b/drivers/net/cnxk/cnxk_eswitch.c
@@ -3,6 +3,7 @@
  */
 
 #include 
+#include 
 
 #define CNXK_NIX_DEF_SQ_COUNT 512
 
@@ -62,6 +63,10 @@ cnxk_eswitch_dev_remove(struct rte_pci_device *pci_dev)
goto exit;
}
 
+   /* Remove representor devices associated with PF */
+   if (eswitch_dev->repr_cnt.nb_repr_created)
+   cnxk_rep_dev_remove(eswitch_dev);
+
/* Cleanup HW resources */
eswitch_hw_rsrc_cleanup(eswitch_dev, pci_dev);
 
@@ -648,6 +653,13 @@ cnxk_eswitch_dev_probe(struct rte_pci_driver *pci_drv, 
struct rte_pci_device *pc
eswitch_dev->repr_cnt.max_repr, 
eswitch_dev->repr_cnt.nb_repr_created,
roc_nix_get_pf_func(&eswitch_dev->nix));
 
+   /* Probe representor ports */
+   rc = cnxk_rep_dev_probe(pci_dev, eswitch_dev);
+   if (rc) {
+   plt_err("Failed to probe representor ports");
+   goto rsrc_cleanup;
+   }
+
/* Spinlock for synchronization between representors traffic and control
 * messages
 */
diff --git a/drivers/net/cnxk/cnxk_eswitch.h b/drivers/net/cnxk/cnxk_eswitch.h
index 6ff296399e..dcd5add6d0 100644
--- a/drivers/net/cnxk/cnxk_eswitch.h
+++ b/drivers/net/cnxk/cnxk_eswitch.h
@@ -66,6 +66,11 @@ struct cnxk_eswitch_repr_cnt {
uint16_t nb_repr_started;
 };
 
+struct cnxk_eswitch_switch_domain {
+   uint16_t switch_domain_id;
+   uint16_t pf;
+};
+
 struct cnxk_rep_info {
struct rte_eth_dev *rep_eth_dev;
 };
@@ -121,7 +126,8 @@ struct cnxk_eswitch_dev {
 
/* Port representor fields */
rte_spinlock_t rep_lock;
-   uint16_t switch_domain_id;
+   uint16_t nb_switch_domain;
+   struct cnxk_eswitch_switch_domain sw_dom[RTE_MAX_ETHPORTS];
uint16_t es

[PATCH v6 06/23] common/cnxk: common NPC changes for eswitch

2024-03-03 Thread Harman Kalra
Adding new MCAM API for installing flow using generic npc_install_flow
mbox and other helper APIs. Also adding rss action configuration for
eswitch.

Signed-off-by: Harman Kalra 
---
 drivers/common/cnxk/meson.build|   1 +
 drivers/common/cnxk/roc_api.h  |   3 +
 drivers/common/cnxk/roc_eswitch.c  | 306 +
 drivers/common/cnxk/roc_eswitch.h  |  22 +++
 drivers/common/cnxk/roc_mbox.h |  33 
 drivers/common/cnxk/roc_npc.c  |  26 ++-
 drivers/common/cnxk/roc_npc.h  |   5 +-
 drivers/common/cnxk/roc_npc_mcam.c |   2 +-
 drivers/common/cnxk/roc_npc_priv.h |   3 +-
 drivers/common/cnxk/version.map|   6 +
 10 files changed, 398 insertions(+), 9 deletions(-)
 create mode 100644 drivers/common/cnxk/roc_eswitch.c
 create mode 100644 drivers/common/cnxk/roc_eswitch.h

diff --git a/drivers/common/cnxk/meson.build b/drivers/common/cnxk/meson.build
index 56eea52909..e0e4600989 100644
--- a/drivers/common/cnxk/meson.build
+++ b/drivers/common/cnxk/meson.build
@@ -20,6 +20,7 @@ sources = files(
 'roc_cpt_debug.c',
 'roc_dev.c',
 'roc_dpi.c',
+'roc_eswitch.c',
 'roc_hash.c',
 'roc_idev.c',
 'roc_irq.c',
diff --git a/drivers/common/cnxk/roc_api.h b/drivers/common/cnxk/roc_api.h
index f630853088..6a86863c57 100644
--- a/drivers/common/cnxk/roc_api.h
+++ b/drivers/common/cnxk/roc_api.h
@@ -117,4 +117,7 @@
 /* MACsec */
 #include "roc_mcs.h"
 
+/* Eswitch */
+#include "roc_eswitch.h"
+
 #endif /* _ROC_API_H_ */
diff --git a/drivers/common/cnxk/roc_eswitch.c 
b/drivers/common/cnxk/roc_eswitch.c
new file mode 100644
index 00..e480ab1046
--- /dev/null
+++ b/drivers/common/cnxk/roc_eswitch.c
@@ -0,0 +1,306 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2024 Marvell.
+ */
+
+#include 
+
+#include "roc_api.h"
+#include "roc_priv.h"
+
+static int
+eswitch_vlan_rx_cfg(uint16_t pcifunc, struct mbox *mbox)
+{
+   struct nix_vtag_config *vtag_cfg;
+   int rc;
+
+   vtag_cfg = mbox_alloc_msg_nix_vtag_cfg(mbox_get(mbox));
+   if (!vtag_cfg) {
+   rc = -EINVAL;
+   goto exit;
+   }
+
+   /* config strip, capture and size */
+   vtag_cfg->hdr.pcifunc = pcifunc;
+   vtag_cfg->vtag_size = NIX_VTAGSIZE_T4;
+   vtag_cfg->cfg_type = VTAG_RX; /* rx vlan cfg */
+   vtag_cfg->rx.vtag_type = NIX_RX_VTAG_TYPE0;
+   vtag_cfg->rx.strip_vtag = true;
+   vtag_cfg->rx.capture_vtag = true;
+
+   rc = mbox_process(mbox);
+   if (rc)
+   goto exit;
+
+   rc = 0;
+exit:
+   mbox_put(mbox);
+   return rc;
+}
+
+static int
+eswitch_vlan_tx_cfg(struct roc_npc_flow *flow, uint16_t pcifunc, struct mbox 
*mbox,
+   uint16_t vlan_tci, uint16_t *vidx)
+{
+   struct nix_vtag_config *vtag_cfg;
+   struct nix_vtag_config_rsp *rsp;
+   int rc;
+
+   union {
+   uint64_t reg;
+   struct nix_tx_vtag_action_s act;
+   } tx_vtag_action;
+
+   vtag_cfg = mbox_alloc_msg_nix_vtag_cfg(mbox_get(mbox));
+   if (!vtag_cfg) {
+   rc = -EINVAL;
+   goto exit;
+   }
+
+   /* Insert vlan tag */
+   vtag_cfg->hdr.pcifunc = pcifunc;
+   vtag_cfg->vtag_size = NIX_VTAGSIZE_T4;
+   vtag_cfg->cfg_type = VTAG_TX; /* tx vlan cfg */
+   vtag_cfg->tx.cfg_vtag0 = true;
+   vtag_cfg->tx.vtag0 = (((uint32_t)ROC_ESWITCH_VLAN_TPID << 16) | 
vlan_tci);
+
+   rc = mbox_process_msg(mbox, (void *)&rsp);
+   if (rc)
+   goto exit;
+
+   if (rsp->vtag0_idx < 0) {
+   plt_err("Failed to config TX VTAG action");
+   rc = -EINVAL;
+   goto exit;
+   }
+
+   *vidx = rsp->vtag0_idx;
+   tx_vtag_action.reg = 0;
+   tx_vtag_action.act.vtag0_def = rsp->vtag0_idx;
+   tx_vtag_action.act.vtag0_lid = NPC_LID_LA;
+   tx_vtag_action.act.vtag0_op = NIX_TX_VTAGOP_INSERT;
+   tx_vtag_action.act.vtag0_relptr = NIX_TX_VTAGACTION_VTAG0_RELPTR;
+
+   flow->vtag_action = tx_vtag_action.reg;
+
+   rc = 0;
+exit:
+   mbox_put(mbox);
+   return rc;
+}
+
+int
+roc_eswitch_npc_mcam_tx_rule(struct roc_npc *roc_npc, struct roc_npc_flow 
*flow, uint16_t pcifunc,
+uint32_t vlan_tci)
+{
+   struct npc *npc = roc_npc_to_npc_priv(roc_npc);
+   struct npc_install_flow_req *req;
+   struct npc_install_flow_rsp *rsp;
+   struct mbox *mbox = npc->mbox;
+   uint16_t vidx = 0, lbkid;
+   int rc;
+
+   rc = eswitch_vlan_tx_cfg(flow, roc_npc->pf_func, mbox, vlan_tci, &vidx);
+   if (rc) {
+   plt_err("Failed to configure VLAN TX, err %d", rc);
+   goto fail;
+   }
+
+   req = mbox_alloc_msg_npc_install_flow(mbox_get(mbox));
+   if (!req) {
+   rc = -EINVAL;
+   goto exit;
+   }
+
+   lbkid = 0;
+   req->hdr.pcifunc = roc_npc->pf_func; /* Eswitch PF is requester */

[PATCH v6 07/23] common/cnxk: interface to update VLAN TPID

2024-03-03 Thread Harman Kalra
Introducing eswitch variant of set vlan tpid api which can be
using for PF and VF

Signed-off-by: Harman Kalra 
---
 drivers/common/cnxk/roc_eswitch.c  | 15 +++
 drivers/common/cnxk/roc_eswitch.h  |  4 
 drivers/common/cnxk/roc_nix_priv.h | 11 +--
 drivers/common/cnxk/roc_nix_vlan.c | 23 ++-
 drivers/common/cnxk/version.map|  1 +
 5 files changed, 43 insertions(+), 11 deletions(-)

diff --git a/drivers/common/cnxk/roc_eswitch.c 
b/drivers/common/cnxk/roc_eswitch.c
index e480ab1046..020a891a32 100644
--- a/drivers/common/cnxk/roc_eswitch.c
+++ b/drivers/common/cnxk/roc_eswitch.c
@@ -304,3 +304,18 @@ roc_eswitch_npc_rss_action_configure(struct roc_npc 
*roc_npc, struct roc_npc_flo
((uint64_t)(rss_grp_idx & NPC_RSS_ACT_GRP_MASK) << 
NPC_RSS_ACT_GRP_OFFSET);
return 0;
 }
+
+int
+roc_eswitch_nix_vlan_tpid_set(struct roc_nix *roc_nix, uint32_t type, uint16_t 
tpid, bool is_vf)
+{
+   struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+   struct dev *dev = &nix->dev;
+   int rc;
+
+   /* Configuring for PF/VF */
+   rc = nix_vlan_tpid_set(dev->mbox, dev->pf_func | is_vf, type, tpid);
+   if (rc)
+   plt_err("Failed to set tpid for PF, rc %d", rc);
+
+   return rc;
+}
diff --git a/drivers/common/cnxk/roc_eswitch.h 
b/drivers/common/cnxk/roc_eswitch.h
index cdbe808a71..34b75d10ac 100644
--- a/drivers/common/cnxk/roc_eswitch.h
+++ b/drivers/common/cnxk/roc_eswitch.h
@@ -19,4 +19,8 @@ int __roc_api roc_eswitch_npc_mcam_delete_rule(struct roc_npc 
*roc_npc, struct r
 int __roc_api roc_eswitch_npc_rss_action_configure(struct roc_npc *roc_npc,
   struct roc_npc_flow *flow, 
uint32_t flowkey_cfg,
   uint16_t *reta_tbl);
+
+/* NIX */
+int __roc_api roc_eswitch_nix_vlan_tpid_set(struct roc_nix *nix, uint32_t 
type, uint16_t tpid,
+   bool is_vf);
 #endif /* __ROC_ESWITCH_H__ */
diff --git a/drivers/common/cnxk/roc_nix_priv.h 
b/drivers/common/cnxk/roc_nix_priv.h
index a582b9df33..3d99ade2b4 100644
--- a/drivers/common/cnxk/roc_nix_priv.h
+++ b/drivers/common/cnxk/roc_nix_priv.h
@@ -469,13 +469,12 @@ void nix_tm_shaper_profile_free(struct 
nix_tm_shaper_profile *profile);
 uint64_t nix_get_blkaddr(struct dev *dev);
 void nix_lf_rq_dump(__io struct nix_cn10k_rq_ctx_s *ctx, FILE *file);
 int nix_lf_gen_reg_dump(uintptr_t nix_lf_base, uint64_t *data);
-int nix_lf_stat_reg_dump(uintptr_t nix_lf_base, uint64_t *data,
-uint8_t lf_tx_stats, uint8_t lf_rx_stats);
-int nix_lf_int_reg_dump(uintptr_t nix_lf_base, uint64_t *data, uint16_t qints,
-   uint16_t cints);
-int nix_q_ctx_get(struct dev *dev, uint8_t ctype, uint16_t qid,
- __io void **ctx_p);
+int nix_lf_stat_reg_dump(uintptr_t nix_lf_base, uint64_t *data, uint8_t 
lf_tx_stats,
+uint8_t lf_rx_stats);
+int nix_lf_int_reg_dump(uintptr_t nix_lf_base, uint64_t *data, uint16_t qints, 
uint16_t cints);
+int nix_q_ctx_get(struct dev *dev, uint8_t ctype, uint16_t qid, __io void 
**ctx_p);
 uint8_t nix_tm_lbk_relchan_get(struct nix *nix);
+int nix_vlan_tpid_set(struct mbox *mbox, uint16_t pcifunc, uint32_t type, 
uint16_t tpid);
 
 /*
  * Telemetry
diff --git a/drivers/common/cnxk/roc_nix_vlan.c 
b/drivers/common/cnxk/roc_nix_vlan.c
index abd2eb0571..db218593ad 100644
--- a/drivers/common/cnxk/roc_nix_vlan.c
+++ b/drivers/common/cnxk/roc_nix_vlan.c
@@ -211,18 +211,17 @@ roc_nix_vlan_insert_ena_dis(struct roc_nix *roc_nix,
 }
 
 int
-roc_nix_vlan_tpid_set(struct roc_nix *roc_nix, uint32_t type, uint16_t tpid)
+nix_vlan_tpid_set(struct mbox *mbox, uint16_t pcifunc, uint32_t type, uint16_t 
tpid)
 {
-   struct nix *nix = roc_nix_to_nix_priv(roc_nix);
-   struct dev *dev = &nix->dev;
-   struct mbox *mbox = mbox_get(dev->mbox);
struct nix_set_vlan_tpid *tpid_cfg;
int rc = -ENOSPC;
 
-   tpid_cfg = mbox_alloc_msg_nix_set_vlan_tpid(mbox);
+   /* Configuring for PF */
+   tpid_cfg = mbox_alloc_msg_nix_set_vlan_tpid(mbox_get(mbox));
if (tpid_cfg == NULL)
goto exit;
tpid_cfg->tpid = tpid;
+   tpid_cfg->hdr.pcifunc = pcifunc;
 
if (type & ROC_NIX_VLAN_TYPE_OUTER)
tpid_cfg->vlan_type = NIX_VLAN_TYPE_OUTER;
@@ -234,3 +233,17 @@ roc_nix_vlan_tpid_set(struct roc_nix *roc_nix, uint32_t 
type, uint16_t tpid)
mbox_put(mbox);
return rc;
 }
+
+int
+roc_nix_vlan_tpid_set(struct roc_nix *roc_nix, uint32_t type, uint16_t tpid)
+{
+   struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+   struct dev *dev = &nix->dev;
+   int rc;
+
+   rc = nix_vlan_tpid_set(dev->mbox, dev->pf_func, type, tpid);
+   if (rc)
+   plt_err("Failed to set tpid for PF, rc %d", rc);
+
+   return rc;
+}
diff --git a/drivers/common/cnxk/version.map b/drivers/common/

[PATCH v6 08/23] net/cnxk: eswitch flow configurations

2024-03-03 Thread Harman Kalra
Adding flow rules for eswitch PF and VF and implementing
interfaces to delete, shift flow rules

Signed-off-by: Harman Kalra 
---
 drivers/net/cnxk/cnxk_eswitch.c |  44 +++
 drivers/net/cnxk/cnxk_eswitch.h |  25 +-
 drivers/net/cnxk/cnxk_eswitch_devargs.c |   1 +
 drivers/net/cnxk/cnxk_eswitch_flow.c| 454 
 drivers/net/cnxk/meson.build|   1 +
 5 files changed, 522 insertions(+), 3 deletions(-)
 create mode 100644 drivers/net/cnxk/cnxk_eswitch_flow.c

diff --git a/drivers/net/cnxk/cnxk_eswitch.c b/drivers/net/cnxk/cnxk_eswitch.c
index 599ed149ae..25992fddc9 100644
--- a/drivers/net/cnxk/cnxk_eswitch.c
+++ b/drivers/net/cnxk/cnxk_eswitch.c
@@ -2,11 +2,33 @@
  * Copyright(C) 2024 Marvell.
  */
 
+#include 
+
 #include 
 #include 
 
 #define CNXK_NIX_DEF_SQ_COUNT 512
 
+struct cnxk_esw_repr_hw_info *
+cnxk_eswitch_representor_hw_info(struct cnxk_eswitch_dev *eswitch_dev, 
uint16_t hw_func)
+{
+   struct cnxk_eswitch_devargs *esw_da;
+   int i, j;
+
+   if (!eswitch_dev)
+   return NULL;
+
+   /* Traversing the initialized represented list */
+   for (i = 0; i < eswitch_dev->nb_esw_da; i++) {
+   esw_da = &eswitch_dev->esw_da[i];
+   for (j = 0; j < esw_da->nb_repr_ports; j++) {
+   if (esw_da->repr_hw_info[j].hw_func == hw_func)
+   return &esw_da->repr_hw_info[j];
+   }
+   }
+   return NULL;
+}
+
 static int
 eswitch_hw_rsrc_cleanup(struct cnxk_eswitch_dev *eswitch_dev, struct 
rte_pci_device *pci_dev)
 {
@@ -67,6 +89,10 @@ cnxk_eswitch_dev_remove(struct rte_pci_device *pci_dev)
if (eswitch_dev->repr_cnt.nb_repr_created)
cnxk_rep_dev_remove(eswitch_dev);
 
+   /* Cleanup NPC rxtx flow rules */
+   cnxk_eswitch_flow_rules_remove_list(eswitch_dev, 
&eswitch_dev->esw_flow_list,
+   eswitch_dev->npc.pf_func);
+
/* Cleanup HW resources */
eswitch_hw_rsrc_cleanup(eswitch_dev, pci_dev);
 
@@ -87,6 +113,21 @@ cnxk_eswitch_nix_rsrc_start(struct cnxk_eswitch_dev 
*eswitch_dev)
goto done;
}
 
+   /* Install eswitch PF mcam rules */
+   rc = cnxk_eswitch_pfvf_flow_rules_install(eswitch_dev, false);
+   if (rc) {
+   plt_err("Failed to install rxtx rules, rc %d", rc);
+   goto done;
+   }
+
+   /* Configure TPID for Eswitch PF LFs */
+   rc = roc_eswitch_nix_vlan_tpid_set(&eswitch_dev->nix, 
ROC_NIX_VLAN_TYPE_OUTER,
+  CNXK_ESWITCH_VLAN_TPID, false);
+   if (rc) {
+   plt_err("Failed to configure tpid, rc %d", rc);
+   goto done;
+   }
+
rc = roc_npc_mcam_enable_all_entries(&eswitch_dev->npc, 1);
if (rc) {
plt_err("Failed to enable NPC entries %d", rc);
@@ -524,6 +565,9 @@ eswitch_hw_rsrc_setup(struct cnxk_eswitch_dev *eswitch_dev, 
struct rte_pci_devic
if (rc)
goto rsrc_cleanup;
 
+   /* List for eswitch default flows */
+   TAILQ_INIT(&eswitch_dev->esw_flow_list);
+
return rc;
 rsrc_cleanup:
eswitch_hw_rsrc_cleanup(eswitch_dev, pci_dev);
diff --git a/drivers/net/cnxk/cnxk_eswitch.h b/drivers/net/cnxk/cnxk_eswitch.h
index dcd5add6d0..5b4e1b0a71 100644
--- a/drivers/net/cnxk/cnxk_eswitch.h
+++ b/drivers/net/cnxk/cnxk_eswitch.h
@@ -13,11 +13,10 @@
 #include "cn10k_tx.h"
 
 #define CNXK_ESWITCH_CTRL_MSG_SOCK_PATH "/tmp/cxk_rep_ctrl_msg_sock"
+#define CNXK_ESWITCH_VLAN_TPID ROC_ESWITCH_VLAN_TPID
 #define CNXK_REP_ESWITCH_DEV_MZ"cnxk_eswitch_dev"
-#define CNXK_ESWITCH_VLAN_TPID 0x8100
 #define CNXK_ESWITCH_MAX_TXQ   256
 #define CNXK_ESWITCH_MAX_RXQ   256
-#define CNXK_ESWITCH_LBK_CHAN  63
 #define CNXK_ESWITCH_VFPF_SHIFT8
 
 #define CNXK_ESWITCH_QUEUE_STATE_RELEASED   0
@@ -25,6 +24,7 @@
 #define CNXK_ESWITCH_QUEUE_STATE_STARTED2
 #define CNXK_ESWITCH_QUEUE_STATE_STOPPED3
 
+TAILQ_HEAD(eswitch_flow_list, roc_npc_flow);
 enum cnxk_esw_da_pattern_type {
CNXK_ESW_DA_TYPE_LIST = 0,
CNXK_ESW_DA_TYPE_PFVF,
@@ -39,6 +39,9 @@ struct cnxk_esw_repr_hw_info {
uint16_t pfvf;
/* representor port id assigned to representee */
uint16_t port_id;
+   uint16_t num_flow_entries;
+
+   TAILQ_HEAD(flow_list, roc_npc_flow) repr_flow_list;
 };
 
 /* Structure representing per devarg information - this can be per representee
@@ -90,7 +93,6 @@ struct cnxk_eswitch_cxq {
uint8_t state;
 };
 
-TAILQ_HEAD(eswitch_flow_list, roc_npc_flow);
 struct cnxk_eswitch_dev {
/* Input parameters */
struct plt_pci_device *pci_dev;
@@ -116,6 +118,13 @@ struct cnxk_eswitch_dev {
uint16_t rep_cnt;
uint8_t configured;
 
+   /* NPC rxtx rules */
+   struct flow_list esw_flow_list;
+   uint16_t num_entries;
+  

[PATCH v6 09/23] net/cnxk: eswitch fastpath routines

2024-03-03 Thread Harman Kalra
Implementing fastpath RX and TX fast path routines which can be
invoked from respective representors rx burst and tx burst

Signed-off-by: Harman Kalra 
---
 drivers/net/cnxk/cnxk_eswitch.h  |   5 +
 drivers/net/cnxk/cnxk_eswitch_rxtx.c | 211 +++
 drivers/net/cnxk/meson.build |   1 +
 3 files changed, 217 insertions(+)
 create mode 100644 drivers/net/cnxk/cnxk_eswitch_rxtx.c

diff --git a/drivers/net/cnxk/cnxk_eswitch.h b/drivers/net/cnxk/cnxk_eswitch.h
index 5b4e1b0a71..4edfa91bdc 100644
--- a/drivers/net/cnxk/cnxk_eswitch.h
+++ b/drivers/net/cnxk/cnxk_eswitch.h
@@ -177,4 +177,9 @@ int cnxk_eswitch_pfvf_flow_rules_install(struct 
cnxk_eswitch_dev *eswitch_dev, b
 int cnxk_eswitch_flow_rule_shift(uint16_t hw_func, uint16_t *new_entry);
 int cnxk_eswitch_flow_rules_remove_list(struct cnxk_eswitch_dev *eswitch_dev,
struct flow_list *list, uint16_t 
hw_func);
+/* RX TX fastpath routines */
+uint16_t cnxk_eswitch_dev_tx_burst(struct cnxk_eswitch_dev *eswitch_dev, 
uint16_t qid,
+  struct rte_mbuf **pkts, uint16_t nb_tx, 
const uint16_t flags);
+uint16_t cnxk_eswitch_dev_rx_burst(struct cnxk_eswitch_dev *eswitch_dev, 
uint16_t qid,
+  struct rte_mbuf **pkts, uint16_t nb_pkts);
 #endif /* __CNXK_ESWITCH_H__ */
diff --git a/drivers/net/cnxk/cnxk_eswitch_rxtx.c 
b/drivers/net/cnxk/cnxk_eswitch_rxtx.c
new file mode 100644
index 00..d57e32b091
--- /dev/null
+++ b/drivers/net/cnxk/cnxk_eswitch_rxtx.c
@@ -0,0 +1,211 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2024 Marvell.
+ */
+
+#include 
+
+static __rte_always_inline struct rte_mbuf *
+eswitch_nix_get_mbuf_from_cqe(void *cq, const uint64_t data_off)
+{
+   rte_iova_t buff;
+
+   /* Skip CQE, NIX_RX_PARSE_S and SG HDR(9 DWORDs) and peek buff addr */
+   buff = *((rte_iova_t *)((uint64_t *)cq + 9));
+   return (struct rte_mbuf *)(buff - data_off);
+}
+
+static inline uint64_t
+eswitch_nix_rx_nb_pkts(struct roc_nix_cq *cq, const uint64_t wdata, const 
uint32_t qmask)
+{
+   uint64_t reg, head, tail;
+   uint32_t available;
+
+   /* Update the available count if cached value is not enough */
+
+   /* Use LDADDA version to avoid reorder */
+   reg = roc_atomic64_add_sync(wdata, cq->status);
+   /* CQ_OP_STATUS operation error */
+   if (reg & BIT_ULL(NIX_CQ_OP_STAT_OP_ERR) || reg & 
BIT_ULL(NIX_CQ_OP_STAT_CQ_ERR))
+   return 0;
+
+   tail = reg & 0xF;
+   head = (reg >> 20) & 0xF;
+   if (tail < head)
+   available = tail - head + qmask + 1;
+   else
+   available = tail - head;
+
+   return available;
+}
+
+static inline void
+nix_cn9k_xmit_one(uint64_t *cmd, void *lmt_addr, const plt_iova_t io_addr)
+{
+   uint64_t lmt_status;
+
+   do {
+   roc_lmt_mov(lmt_addr, cmd, 0);
+   lmt_status = roc_lmt_submit_ldeor(io_addr);
+   } while (lmt_status == 0);
+}
+
+uint16_t
+cnxk_eswitch_dev_tx_burst(struct cnxk_eswitch_dev *eswitch_dev, uint16_t qid,
+ struct rte_mbuf **pkts, uint16_t nb_xmit, const 
uint16_t flags)
+{
+   struct roc_nix_sq *sq = &eswitch_dev->txq[qid].sqs;
+   struct roc_nix_rq *rq = &eswitch_dev->rxq[qid].rqs;
+   uint64_t aura_handle, cmd[6], data = 0;
+   uint16_t lmt_id, pkt = 0, nb_tx = 0;
+   struct nix_send_ext_s *send_hdr_ext;
+   struct nix_send_hdr_s *send_hdr;
+   uint16_t vlan_tci = qid;
+   union nix_send_sg_s *sg;
+   uintptr_t lmt_base, pa;
+   int64_t fc_pkts, dw_m1;
+   rte_iova_t io_addr;
+
+   if (unlikely(eswitch_dev->txq[qid].state != 
CNXK_ESWITCH_QUEUE_STATE_STARTED))
+   return 0;
+
+   lmt_base = sq->roc_nix->lmt_base;
+   io_addr = sq->io_addr;
+   aura_handle = rq->aura_handle;
+   /* Get LMT base address and LMT ID as per thread ID */
+   lmt_id = roc_plt_control_lmt_id_get();
+   lmt_base += ((uint64_t)lmt_id << ROC_LMT_LINE_SIZE_LOG2);
+   /* Double word minus 1: LMTST size-1 in units of 128 bits */
+   /* 2(HDR) + 2(EXT_HDR) + 1(SG) + 1(IOVA) = 6/2 - 1 = 2 */
+   dw_m1 = cn10k_nix_tx_ext_subs(flags) + 1;
+
+   memset(cmd, 0, sizeof(cmd));
+   send_hdr = (struct nix_send_hdr_s *)&cmd[0];
+   send_hdr->w0.sizem1 = dw_m1;
+   send_hdr->w0.sq = sq->qid;
+
+   if (dw_m1 >= 2) {
+   send_hdr_ext = (struct nix_send_ext_s *)&cmd[2];
+   send_hdr_ext->w0.subdc = NIX_SUBDC_EXT;
+   if (flags & NIX_TX_OFFLOAD_VLAN_QINQ_F) {
+   send_hdr_ext->w1.vlan0_ins_ena = true;
+   /* 2B before end of l2 header */
+   send_hdr_ext->w1.vlan0_ins_ptr = 12;
+   send_hdr_ext->w1.vlan0_ins_tci = 0;
+   }
+   sg = (union nix_send_sg_s *)&cmd[4];
+   } else {
+   

[PATCH v6 10/23] net/cnxk: add representor control plane

2024-03-03 Thread Harman Kalra
Implementing the control path for representor ports, where represented
ports can be configured using TLV messaging.

Signed-off-by: Harman Kalra 
---
 drivers/net/cnxk/cnxk_eswitch.c |  70 ++-
 drivers/net/cnxk/cnxk_eswitch.h |   8 +
 drivers/net/cnxk/cnxk_rep.c |  52 ++
 drivers/net/cnxk/cnxk_rep.h |   3 +
 drivers/net/cnxk/cnxk_rep_msg.c | 827 
 drivers/net/cnxk/cnxk_rep_msg.h |  95 
 drivers/net/cnxk/meson.build|   1 +
 7 files changed, 1048 insertions(+), 8 deletions(-)
 create mode 100644 drivers/net/cnxk/cnxk_rep_msg.c
 create mode 100644 drivers/net/cnxk/cnxk_rep_msg.h

diff --git a/drivers/net/cnxk/cnxk_eswitch.c b/drivers/net/cnxk/cnxk_eswitch.c
index 25992fddc9..14d0df8791 100644
--- a/drivers/net/cnxk/cnxk_eswitch.c
+++ b/drivers/net/cnxk/cnxk_eswitch.c
@@ -9,6 +9,27 @@
 
 #define CNXK_NIX_DEF_SQ_COUNT 512
 
+int
+cnxk_eswitch_representor_id(struct cnxk_eswitch_dev *eswitch_dev, uint16_t 
hw_func,
+   uint16_t *rep_id)
+{
+   struct cnxk_esw_repr_hw_info *repr_info;
+   int rc = 0;
+
+   repr_info = cnxk_eswitch_representor_hw_info(eswitch_dev, hw_func);
+   if (!repr_info) {
+   plt_warn("Failed to get representor group for %x", hw_func);
+   rc = -ENOENT;
+   goto fail;
+   }
+
+   *rep_id = repr_info->rep_id;
+
+   return 0;
+fail:
+   return rc;
+}
+
 struct cnxk_esw_repr_hw_info *
 cnxk_eswitch_representor_hw_info(struct cnxk_eswitch_dev *eswitch_dev, 
uint16_t hw_func)
 {
@@ -86,8 +107,41 @@ cnxk_eswitch_dev_remove(struct rte_pci_device *pci_dev)
}
 
/* Remove representor devices associated with PF */
-   if (eswitch_dev->repr_cnt.nb_repr_created)
+   if (eswitch_dev->repr_cnt.nb_repr_created) {
+   /* Exiting the rep msg ctrl thread */
+   if (eswitch_dev->start_ctrl_msg_thrd) {
+   uint32_t sunlen;
+   struct sockaddr_un sun = {0};
+   int sock_fd = 0;
+
+   eswitch_dev->start_ctrl_msg_thrd = false;
+   if (!eswitch_dev->client_connected) {
+   plt_esw_dbg("Establishing connection for 
teardown");
+   sock_fd = socket(AF_UNIX, SOCK_STREAM, 0);
+   if (sock_fd == -1) {
+   plt_err("Failed to open socket. err 
%d", -errno);
+   return -errno;
+   }
+   sun.sun_family = AF_UNIX;
+   sunlen = sizeof(struct sockaddr_un);
+   strncpy(sun.sun_path, 
CNXK_ESWITCH_CTRL_MSG_SOCK_PATH,
+   sizeof(sun.sun_path) - 1);
+
+   if (connect(sock_fd, (struct sockaddr *)&sun, 
sunlen) < 0) {
+   plt_err("Failed to connect socket: %s, 
err %d",
+   
CNXK_ESWITCH_CTRL_MSG_SOCK_PATH, errno);
+   close(sock_fd);
+   return -errno;
+   }
+   }
+   rte_thread_join(eswitch_dev->rep_ctrl_msg_thread, NULL);
+   if (!eswitch_dev->client_connected)
+   close(sock_fd);
+   }
+
+   /* Remove representor devices associated with PF */
cnxk_rep_dev_remove(eswitch_dev);
+   }
 
/* Cleanup NPC rxtx flow rules */
cnxk_eswitch_flow_rules_remove_list(eswitch_dev, 
&eswitch_dev->esw_flow_list,
@@ -106,13 +160,6 @@ cnxk_eswitch_nix_rsrc_start(struct cnxk_eswitch_dev 
*eswitch_dev)
 {
int rc;
 
-   /* Enable Rx in NPC */
-   rc = roc_nix_npc_rx_ena_dis(&eswitch_dev->nix, true);
-   if (rc) {
-   plt_err("Failed to enable NPC rx %d", rc);
-   goto done;
-   }
-
/* Install eswitch PF mcam rules */
rc = cnxk_eswitch_pfvf_flow_rules_install(eswitch_dev, false);
if (rc) {
@@ -128,6 +175,13 @@ cnxk_eswitch_nix_rsrc_start(struct cnxk_eswitch_dev 
*eswitch_dev)
goto done;
}
 
+   /* Enable Rx in NPC */
+   rc = roc_nix_npc_rx_ena_dis(&eswitch_dev->nix, true);
+   if (rc) {
+   plt_err("Failed to enable NPC rx %d", rc);
+   goto done;
+   }
+
rc = roc_npc_mcam_enable_all_entries(&eswitch_dev->npc, 1);
if (rc) {
plt_err("Failed to enable NPC entries %d", rc);
diff --git a/drivers/net/cnxk/cnxk_eswitch.h b/drivers/net/cnxk/cnxk_eswitch.h
index 4edfa91bdc..ecf10a8e08 100644
--- a/drivers/net/cnxk/cnxk_eswitch.h
+++ b/drivers/net/cnxk/cnxk_eswitch.h
@@ -133,6 +133,12 @@ struct cnxk_eswitch_dev {
/* No of representors */
struct cnxk_e

[PATCH v6 11/23] common/cnxk: representee notification callback

2024-03-03 Thread Harman Kalra
Setting up a callback which gets invoked every time a representee
comes up or goes down. Later this callback gets handled by network
counterpart.

Signed-off-by: Harman Kalra 
---
 drivers/common/cnxk/roc_dev.c  | 70 ++
 drivers/common/cnxk/roc_dev_priv.h |  3 ++
 drivers/common/cnxk/roc_eswitch.c  | 23 ++
 drivers/common/cnxk/roc_eswitch.h  | 31 +
 drivers/common/cnxk/roc_mbox.c |  2 +
 drivers/common/cnxk/roc_mbox.h | 19 +++-
 drivers/common/cnxk/version.map|  2 +
 7 files changed, 149 insertions(+), 1 deletion(-)

diff --git a/drivers/common/cnxk/roc_dev.c b/drivers/common/cnxk/roc_dev.c
index 14aff233d5..867f981423 100644
--- a/drivers/common/cnxk/roc_dev.c
+++ b/drivers/common/cnxk/roc_dev.c
@@ -539,6 +539,75 @@ pf_vf_mbox_send_up_msg(struct dev *dev, void *rec_msg)
}
 }
 
+static int
+mbox_up_handler_rep_repte_notify(struct dev *dev, struct rep_repte_req *req, 
struct msg_rsp *rsp)
+{
+   struct roc_eswitch_repte_notify_msg *notify_msg;
+   int rc = 0;
+
+   plt_base_dbg("pf:%d/vf:%d msg id 0x%x (%s) from: pf:%d/vf:%d", 
dev_get_pf(dev->pf_func),
+dev_get_vf(dev->pf_func), req->hdr.id, 
mbox_id2name(req->hdr.id),
+dev_get_pf(req->hdr.pcifunc), 
dev_get_vf(req->hdr.pcifunc));
+
+   plt_base_dbg("repte pcifunc %x, enable %d", req->repte_pcifunc, 
req->enable);
+   if (dev->ops && dev->ops->repte_notify) {
+   notify_msg = plt_zmalloc(sizeof(struct 
roc_eswitch_repte_notify_msg), 0);
+   if (!notify_msg) {
+   plt_err("Failed to allocate memory");
+   rc = -ENOMEM;
+   goto fail;
+   }
+   notify_msg->type = ROC_ESWITCH_REPTE_STATE;
+   notify_msg->state.hw_func = req->repte_pcifunc;
+   notify_msg->state.enable = req->enable;
+
+   rc = dev->ops->repte_notify(dev->roc_nix, (void *)notify_msg);
+   if (rc < 0)
+   plt_err("Failed to sent new representee %x notification 
to %s",
+   req->repte_pcifunc, (req->enable == true) ? 
"enable" : "disable");
+
+   plt_free(notify_msg);
+   }
+fail:
+   rsp->hdr.rc = rc;
+   return rc;
+}
+
+static int
+mbox_up_handler_rep_set_mtu(struct dev *dev, struct rep_mtu *req, struct 
msg_rsp *rsp)
+{
+   struct roc_eswitch_repte_notify_msg *notify_msg;
+   int rc = 0;
+
+   plt_base_dbg("pf:%d/vf:%d msg id 0x%x (%s) from: pf:%d/vf:%d", 
dev_get_pf(dev->pf_func),
+dev_get_vf(dev->pf_func), req->hdr.id, 
mbox_id2name(req->hdr.id),
+dev_get_pf(req->hdr.pcifunc), 
dev_get_vf(req->hdr.pcifunc));
+
+   plt_base_dbg("rep pcifunc %x, rep id %d mtu %d", req->rep_pcifunc, 
req->rep_id, req->mtu);
+   if (dev->ops && dev->ops->repte_notify) {
+   notify_msg = plt_zmalloc(sizeof(struct 
roc_eswitch_repte_notify_msg), 0);
+   if (!notify_msg) {
+   plt_err("Failed to allocate memory");
+   rc = -ENOMEM;
+   goto fail;
+   }
+   notify_msg->type = ROC_ESWITCH_REPTE_MTU;
+   notify_msg->mtu.hw_func = req->rep_pcifunc;
+   notify_msg->mtu.rep_id = req->rep_id;
+   notify_msg->mtu.mtu = req->mtu;
+
+   rc = dev->ops->repte_notify(dev->roc_nix, (void *)notify_msg);
+   if (rc < 0)
+   plt_err("Failed to send new mtu notification for 
representee %x ",
+   req->rep_pcifunc);
+
+   plt_free(notify_msg);
+   }
+fail:
+   rsp->hdr.rc = rc;
+   return rc;
+}
+
 static int
 mbox_up_handler_mcs_intr_notify(struct dev *dev, struct mcs_intr_info *info, 
struct msg_rsp *rsp)
 {
@@ -713,6 +782,7 @@ mbox_process_msgs_up(struct dev *dev, struct mbox_msghdr 
*req)
}
MBOX_UP_CGX_MESSAGES
MBOX_UP_MCS_MESSAGES
+   MBOX_UP_REP_MESSAGES
 #undef M
}
 
diff --git a/drivers/common/cnxk/roc_dev_priv.h 
b/drivers/common/cnxk/roc_dev_priv.h
index 5b2c5096f8..50e12cbf17 100644
--- a/drivers/common/cnxk/roc_dev_priv.h
+++ b/drivers/common/cnxk/roc_dev_priv.h
@@ -36,12 +36,15 @@ typedef void (*q_err_cb_t)(void *roc_nix, void *data);
 /* Link status get callback */
 typedef void (*link_status_get_t)(void *roc_nix,
  struct cgx_link_user_info *link);
+/* Representee notification callback */
+typedef int (*repte_notify_t)(void *roc_nix, void *notify_msg);
 
 struct dev_ops {
link_info_t link_status_update;
ptp_info_t ptp_info_update;
link_status_get_t link_status_get;
q_err_cb_t q_err_cb;
+   repte_notify_t repte_notify;
 };
 
 #define dev_is_vf(dev) ((dev)->hwcap & DEV_HWCAP_F_VF)
diff --git a/drivers/common/cnxk/roc_eswitch.c 
b/drivers/comm

[PATCH v6 12/23] net/cnxk: handling representee notification

2024-03-03 Thread Harman Kalra
In case of any representee coming up or going down, kernel sends a
mbox up call which signals a thread to process these messages and
enable/disable HW resources accordingly.

Signed-off-by: Harman Kalra 
---
 drivers/net/cnxk/cnxk_eswitch.c |   8 +
 drivers/net/cnxk/cnxk_eswitch.h |  19 ++
 drivers/net/cnxk/cnxk_rep.c | 326 
 drivers/net/cnxk/cnxk_rep.h |  37 
 4 files changed, 390 insertions(+)

diff --git a/drivers/net/cnxk/cnxk_eswitch.c b/drivers/net/cnxk/cnxk_eswitch.c
index 14d0df8791..f420d01ef8 100644
--- a/drivers/net/cnxk/cnxk_eswitch.c
+++ b/drivers/net/cnxk/cnxk_eswitch.c
@@ -139,6 +139,14 @@ cnxk_eswitch_dev_remove(struct rte_pci_device *pci_dev)
close(sock_fd);
}
 
+   if (eswitch_dev->repte_msg_proc.start_thread) {
+   eswitch_dev->repte_msg_proc.start_thread = false;
+   
pthread_cond_signal(&eswitch_dev->repte_msg_proc.repte_msg_cond);
+   
rte_thread_join(eswitch_dev->repte_msg_proc.repte_msg_thread, NULL);
+   
pthread_mutex_destroy(&eswitch_dev->repte_msg_proc.mutex);
+   
pthread_cond_destroy(&eswitch_dev->repte_msg_proc.repte_msg_cond);
+   }
+
/* Remove representor devices associated with PF */
cnxk_rep_dev_remove(eswitch_dev);
}
diff --git a/drivers/net/cnxk/cnxk_eswitch.h b/drivers/net/cnxk/cnxk_eswitch.h
index ecf10a8e08..0275e760fb 100644
--- a/drivers/net/cnxk/cnxk_eswitch.h
+++ b/drivers/net/cnxk/cnxk_eswitch.h
@@ -30,6 +30,22 @@ enum cnxk_esw_da_pattern_type {
CNXK_ESW_DA_TYPE_PFVF,
 };
 
+struct cnxk_esw_repte_msg {
+   struct roc_eswitch_repte_notify_msg *notify_msg;
+
+   TAILQ_ENTRY(cnxk_esw_repte_msg) next;
+};
+
+struct cnxk_esw_repte_msg_proc {
+   bool start_thread;
+   uint8_t msg_avail;
+   rte_thread_t repte_msg_thread;
+   pthread_cond_t repte_msg_cond;
+   pthread_mutex_t mutex;
+
+   TAILQ_HEAD(esw_repte_msg_list, cnxk_esw_repte_msg) msg_list;
+};
+
 struct cnxk_esw_repr_hw_info {
/* Representee pcifunc value */
uint16_t hw_func;
@@ -139,6 +155,9 @@ struct cnxk_eswitch_dev {
bool client_connected;
int sock_fd;
 
+   /* Representee notification */
+   struct cnxk_esw_repte_msg_proc repte_msg_proc;
+
/* Port representor fields */
rte_spinlock_t rep_lock;
uint16_t nb_switch_domain;
diff --git a/drivers/net/cnxk/cnxk_rep.c b/drivers/net/cnxk/cnxk_rep.c
index 5b619ebb9e..dc00cdecc1 100644
--- a/drivers/net/cnxk/cnxk_rep.c
+++ b/drivers/net/cnxk/cnxk_rep.c
@@ -4,6 +4,8 @@
 #include 
 #include 
 
+#define REPTE_MSG_PROC_THRD_NAME_MAX_LEN 30
+
 #define PF_SHIFT 10
 #define PF_MASK 0x3F
 
@@ -86,6 +88,7 @@ cnxk_rep_dev_remove(struct cnxk_eswitch_dev *eswitch_dev)
 {
int i, rc = 0;
 
+   roc_eswitch_nix_process_repte_notify_cb_unregister(&eswitch_dev->nix);
for (i = 0; i < eswitch_dev->nb_switch_domain; i++) {
rc = 
rte_eth_switch_domain_free(eswitch_dev->sw_dom[i].switch_domain_id);
if (rc)
@@ -95,6 +98,299 @@ cnxk_rep_dev_remove(struct cnxk_eswitch_dev *eswitch_dev)
return rc;
 }
 
+static int
+cnxk_representee_release(struct cnxk_eswitch_dev *eswitch_dev, uint16_t 
hw_func)
+{
+   struct cnxk_rep_dev *rep_dev = NULL;
+   struct rte_eth_dev *rep_eth_dev;
+   int i, rc = 0;
+
+   for (i = 0; i < eswitch_dev->repr_cnt.nb_repr_probed; i++) {
+   rep_eth_dev = eswitch_dev->rep_info[i].rep_eth_dev;
+   if (!rep_eth_dev) {
+   plt_err("Failed to get rep ethdev handle");
+   rc = -EINVAL;
+   goto done;
+   }
+
+   rep_dev = cnxk_rep_pmd_priv(rep_eth_dev);
+   if (rep_dev->hw_func == hw_func &&
+   (!rep_dev->native_repte || rep_dev->is_vf_active)) {
+   rep_dev->is_vf_active = false;
+   rc = cnxk_rep_dev_stop(rep_eth_dev);
+   if (rc) {
+   plt_err("Failed to stop repr port %d, rep id 
%d", rep_dev->port_id,
+   rep_dev->rep_id);
+   goto done;
+   }
+
+   cnxk_rep_rx_queue_release(rep_eth_dev, 0);
+   cnxk_rep_tx_queue_release(rep_eth_dev, 0);
+   plt_rep_dbg("Released representor ID %d representing 
%x", rep_dev->rep_id,
+   hw_func);
+   break;
+   }
+   }
+done:
+   return rc;
+}
+
+static int
+cnxk_representee_setup(struct cnxk_eswitch_dev *eswitch_dev, uint16_t hw_func, 
uint16_t rep_id)
+{
+   struct cnxk_rep_dev *rep_dev = NULL;
+   struct rte_eth_dev *rep_eth_dev;
+   int i, rc = 0;
+
+  

[PATCH v6 13/23] net/cnxk: representor ethdev ops

2024-03-03 Thread Harman Kalra
Implementing ethernet device operation callbacks for
port representors PMD

Signed-off-by: Harman Kalra 
---
 drivers/net/cnxk/cnxk_rep.c |  28 +-
 drivers/net/cnxk/cnxk_rep.h |  35 +++
 drivers/net/cnxk/cnxk_rep_msg.h |   8 +
 drivers/net/cnxk/cnxk_rep_ops.c | 495 ++--
 4 files changed, 523 insertions(+), 43 deletions(-)

diff --git a/drivers/net/cnxk/cnxk_rep.c b/drivers/net/cnxk/cnxk_rep.c
index dc00cdecc1..ca0637bde5 100644
--- a/drivers/net/cnxk/cnxk_rep.c
+++ b/drivers/net/cnxk/cnxk_rep.c
@@ -73,6 +73,8 @@ cnxk_rep_state_update(struct cnxk_eswitch_dev *eswitch_dev, 
uint16_t hw_func, ui
 int
 cnxk_rep_dev_uninit(struct rte_eth_dev *ethdev)
 {
+   struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev);
+
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
 
@@ -80,6 +82,8 @@ cnxk_rep_dev_uninit(struct rte_eth_dev *ethdev)
rte_free(ethdev->data->mac_addrs);
ethdev->data->mac_addrs = NULL;
 
+   rep_dev->parent_dev->repr_cnt.nb_repr_probed--;
+
return 0;
 }
 
@@ -432,26 +436,6 @@ cnxk_rep_parent_setup(struct cnxk_eswitch_dev *eswitch_dev)
return rc;
 }
 
-static uint16_t
-cnxk_rep_tx_burst(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
-{
-   PLT_SET_USED(tx_queue);
-   PLT_SET_USED(tx_pkts);
-   PLT_SET_USED(nb_pkts);
-
-   return 0;
-}
-
-static uint16_t
-cnxk_rep_rx_burst(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
-{
-   PLT_SET_USED(rx_queue);
-   PLT_SET_USED(rx_pkts);
-   PLT_SET_USED(nb_pkts);
-
-   return 0;
-}
-
 static int
 cnxk_rep_dev_init(struct rte_eth_dev *eth_dev, void *params)
 {
@@ -481,8 +465,8 @@ cnxk_rep_dev_init(struct rte_eth_dev *eth_dev, void *params)
eth_dev->dev_ops = &cnxk_rep_dev_ops;
 
/* Rx/Tx functions stubs to avoid crashing */
-   eth_dev->rx_pkt_burst = cnxk_rep_rx_burst;
-   eth_dev->tx_pkt_burst = cnxk_rep_tx_burst;
+   eth_dev->rx_pkt_burst = cnxk_rep_rx_burst_dummy;
+   eth_dev->tx_pkt_burst = cnxk_rep_tx_burst_dummy;
 
/* Only single queues for representor devices */
eth_dev->data->nb_rx_queues = 1;
diff --git a/drivers/net/cnxk/cnxk_rep.h b/drivers/net/cnxk/cnxk_rep.h
index 5a85d4376e..6a43259980 100644
--- a/drivers/net/cnxk/cnxk_rep.h
+++ b/drivers/net/cnxk/cnxk_rep.h
@@ -7,6 +7,13 @@
 #ifndef __CNXK_REP_H__
 #define __CNXK_REP_H__
 
+#define CNXK_REP_TX_OFFLOAD_CAPA   
\
+   (RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE | RTE_ETH_TX_OFFLOAD_VLAN_INSERT |   
   \
+RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
+
+#define CNXK_REP_RX_OFFLOAD_CAPA   
\
+   (RTE_ETH_RX_OFFLOAD_SCATTER | RTE_ETH_RX_OFFLOAD_RSS_HASH | 
RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
+
 /* Common ethdev ops */
 extern struct eth_dev_ops cnxk_rep_dev_ops;
 
@@ -58,12 +65,33 @@ struct cnxk_rep_dev {
uint16_t repte_mtu;
 };
 
+/* Inline functions */
+static inline void
+cnxk_rep_lock(struct cnxk_rep_dev *rep)
+{
+   rte_spinlock_lock(&rep->parent_dev->rep_lock);
+}
+
+static inline void
+cnxk_rep_unlock(struct cnxk_rep_dev *rep)
+{
+   rte_spinlock_unlock(&rep->parent_dev->rep_lock);
+}
+
 static inline struct cnxk_rep_dev *
 cnxk_rep_pmd_priv(const struct rte_eth_dev *eth_dev)
 {
return eth_dev->data->dev_private;
 }
 
+static __rte_always_inline void
+cnxk_rep_pool_buffer_stats(struct rte_mempool *pool)
+{
+   plt_rep_dbg("pool %s size %d buffer count in use  %d available 
%d\n", pool->name,
+   pool->size, rte_mempool_in_use_count(pool), 
rte_mempool_avail_count(pool));
+}
+
+/* Prototypes */
 int cnxk_rep_dev_probe(struct rte_pci_device *pci_dev, struct cnxk_eswitch_dev 
*eswitch_dev);
 int cnxk_rep_dev_remove(struct cnxk_eswitch_dev *eswitch_dev);
 int cnxk_rep_dev_uninit(struct rte_eth_dev *ethdev);
@@ -86,5 +114,12 @@ int cnxk_rep_stats_get(struct rte_eth_dev *eth_dev, struct 
rte_eth_stats *stats)
 int cnxk_rep_stats_reset(struct rte_eth_dev *eth_dev);
 int cnxk_rep_flow_ops_get(struct rte_eth_dev *ethdev, const struct 
rte_flow_ops **ops);
 int cnxk_rep_state_update(struct cnxk_eswitch_dev *eswitch_dev, uint16_t 
hw_func, uint16_t *rep_id);
+int cnxk_rep_promiscuous_enable(struct rte_eth_dev *ethdev);
+int cnxk_rep_promiscuous_disable(struct rte_eth_dev *ethdev);
+int cnxk_rep_mac_addr_set(struct rte_eth_dev *eth_dev, struct rte_ether_addr 
*addr);
+uint16_t cnxk_rep_tx_burst_dummy(void *tx_queue, struct rte_mbuf **tx_pkts, 
uint16_t nb_pkts);
+uint16_t cnxk_rep_rx_burst_dummy(void *rx_queue, struct rte_mbuf **rx_pkts, 
uint16_t nb_pkts);
+void cnxk_rep_tx_queue_stop(struct rte_eth_dev *ethdev, uint16_t queue_id);
+void cnxk_rep_rx_queue_stop(struct rte_eth_dev *ethdev, uint16_t queue_id);
 
 #endif /* __CNXK_REP_H__ */
diff --git a/drivers/net/cnxk/cnxk_rep_msg.h b/drivers/net/cnxk/cnxk_rep_msg.h

[PATCH v6 14/23] common/cnxk: get representees ethernet stats

2024-03-03 Thread Harman Kalra
Implementing an mbox interface to fetch the representees's ethernet
stats from the kernel.

Signed-off-by: Harman Kalra 
---
 drivers/common/cnxk/roc_eswitch.c | 45 +++
 drivers/common/cnxk/roc_eswitch.h |  2 ++
 drivers/common/cnxk/roc_mbox.h| 31 +
 drivers/common/cnxk/version.map   |  1 +
 4 files changed, 79 insertions(+)

diff --git a/drivers/common/cnxk/roc_eswitch.c 
b/drivers/common/cnxk/roc_eswitch.c
index 14819bad75..c67b4090a5 100644
--- a/drivers/common/cnxk/roc_eswitch.c
+++ b/drivers/common/cnxk/roc_eswitch.c
@@ -342,3 +342,48 @@ roc_eswitch_nix_process_repte_notify_cb_unregister(struct 
roc_nix *roc_nix)
 
dev->ops->repte_notify = NULL;
 }
+
+int
+roc_eswitch_nix_repte_stats(struct roc_nix *roc_nix, uint16_t pf_func, struct 
roc_nix_stats *stats)
+{
+   struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+   struct dev *dev = &nix->dev;
+   struct nix_get_lf_stats_req *req;
+   struct nix_lf_stats_rsp *rsp;
+   struct mbox *mbox;
+   int rc;
+
+   mbox = mbox_get(dev->mbox);
+   req = mbox_alloc_msg_nix_get_lf_stats(mbox);
+   if (!req) {
+   rc = -ENOSPC;
+   goto exit;
+   }
+
+   req->hdr.pcifunc = roc_nix_get_pf_func(roc_nix);
+   req->pcifunc = pf_func;
+
+   rc = mbox_process_msg(mbox, (void *)&rsp);
+   if (rc)
+   goto exit;
+
+   stats->rx_octs = rsp->rx.octs;
+   stats->rx_ucast = rsp->rx.ucast;
+   stats->rx_bcast = rsp->rx.bcast;
+   stats->rx_mcast = rsp->rx.mcast;
+   stats->rx_drop = rsp->rx.drop;
+   stats->rx_drop_octs = rsp->rx.drop_octs;
+   stats->rx_drop_bcast = rsp->rx.drop_bcast;
+   stats->rx_drop_mcast = rsp->rx.drop_mcast;
+   stats->rx_err = rsp->rx.err;
+
+   stats->tx_ucast = rsp->tx.ucast;
+   stats->tx_bcast = rsp->tx.bcast;
+   stats->tx_mcast = rsp->tx.mcast;
+   stats->tx_drop = rsp->tx.drop;
+   stats->tx_octs = rsp->tx.octs;
+
+exit:
+   mbox_put(mbox);
+   return rc;
+}
diff --git a/drivers/common/cnxk/roc_eswitch.h 
b/drivers/common/cnxk/roc_eswitch.h
index e0df0038d4..b701ea69ee 100644
--- a/drivers/common/cnxk/roc_eswitch.h
+++ b/drivers/common/cnxk/roc_eswitch.h
@@ -51,6 +51,8 @@ int __roc_api roc_eswitch_npc_rss_action_configure(struct 
roc_npc *roc_npc,
 /* NIX */
 int __roc_api roc_eswitch_nix_vlan_tpid_set(struct roc_nix *nix, uint32_t 
type, uint16_t tpid,
bool is_vf);
+int __roc_api roc_eswitch_nix_repte_stats(struct roc_nix *roc_nix, uint16_t 
pf_func,
+ struct roc_nix_stats *stats);
 int __roc_api roc_eswitch_nix_process_repte_notify_cb_register(struct roc_nix 
*roc_nix,
process_repte_notify_t 
proc_repte_nt);
 void __roc_api roc_eswitch_nix_process_repte_notify_cb_unregister(struct 
roc_nix *roc_nix);
diff --git a/drivers/common/cnxk/roc_mbox.h b/drivers/common/cnxk/roc_mbox.h
index d28e3ffd70..f1a3371ef9 100644
--- a/drivers/common/cnxk/roc_mbox.h
+++ b/drivers/common/cnxk/roc_mbox.h
@@ -306,6 +306,7 @@ struct mbox_msghdr {
M(NIX_MCAST_GRP_DESTROY, 0x802c, nix_mcast_grp_destroy, 
nix_mcast_grp_destroy_req, msg_rsp)\
M(NIX_MCAST_GRP_UPDATE, 0x802d, nix_mcast_grp_update, 
nix_mcast_grp_update_req,\
  nix_mcast_grp_update_rsp) 
   \
+   M(NIX_GET_LF_STATS,0x802e, nix_get_lf_stats, nix_get_lf_stats_req, 
nix_lf_stats_rsp)   \
/* MCS mbox IDs (range 0xa000 - 0xbFFF) */  
   \
M(MCS_ALLOC_RESOURCES, 0xa000, mcs_alloc_resources, mcs_alloc_rsrc_req, 
   \
  mcs_alloc_rsrc_rsp)   
   \
@@ -1850,6 +1851,36 @@ struct nix_mcast_grp_update_rsp {
uint32_t __io mce_start_index;
 };
 
+struct nix_get_lf_stats_req {
+   struct mbox_msghdr hdr;
+   uint16_t __io pcifunc;
+   uint64_t __io rsvd;
+};
+
+struct nix_lf_stats_rsp {
+   struct mbox_msghdr hdr;
+   uint16_t __io pcifunc;
+   struct {
+   uint64_t __io octs;
+   uint64_t __io ucast;
+   uint64_t __io bcast;
+   uint64_t __io mcast;
+   uint64_t __io drop;
+   uint64_t __io drop_octs;
+   uint64_t __io drop_mcast;
+   uint64_t __io drop_bcast;
+   uint64_t __io err;
+   uint64_t __io rsvd[5];
+   } rx;
+   struct {
+   uint64_t __io ucast;
+   uint64_t __io bcast;
+   uint64_t __io mcast;
+   uint64_t __io drop;
+   uint64_t __io octs;
+   } tx;
+};
+
 /* Global NIX inline IPSec configuration */
 struct nix_inline_ipsec_cfg {
struct mbox_msghdr hdr;
diff --git a/drivers/common/cnxk/vers

[PATCH v6 15/23] net/cnxk: ethernet statistics for representor

2024-03-03 Thread Harman Kalra
Adding representor ethernet statistics support which can fetch stats
for representees which are operating independently or part of
companian app.
Adds xstats callback for representor port statistics.

Signed-off-by: Harman Kalra 
Signed-off-by: Ankur Dwivedi 
---
 drivers/net/cnxk/cnxk_rep.h |   8 +
 drivers/net/cnxk/cnxk_rep_msg.h |   7 +
 drivers/net/cnxk/cnxk_rep_ops.c | 275 +++-
 3 files changed, 285 insertions(+), 5 deletions(-)

diff --git a/drivers/net/cnxk/cnxk_rep.h b/drivers/net/cnxk/cnxk_rep.h
index 6a43259980..51a2e97624 100644
--- a/drivers/net/cnxk/cnxk_rep.h
+++ b/drivers/net/cnxk/cnxk_rep.h
@@ -121,5 +121,13 @@ uint16_t cnxk_rep_tx_burst_dummy(void *tx_queue, struct 
rte_mbuf **tx_pkts, uint
 uint16_t cnxk_rep_rx_burst_dummy(void *rx_queue, struct rte_mbuf **rx_pkts, 
uint16_t nb_pkts);
 void cnxk_rep_tx_queue_stop(struct rte_eth_dev *ethdev, uint16_t queue_id);
 void cnxk_rep_rx_queue_stop(struct rte_eth_dev *ethdev, uint16_t queue_id);
+int cnxk_rep_xstats_get(struct rte_eth_dev *eth_dev, struct rte_eth_xstat 
*stats, unsigned int n);
+int cnxk_rep_xstats_reset(struct rte_eth_dev *eth_dev);
+int cnxk_rep_xstats_get_names(struct rte_eth_dev *eth_dev, struct 
rte_eth_xstat_name *xstats_names,
+ unsigned int n);
+int cnxk_rep_xstats_get_by_id(struct rte_eth_dev *eth_dev, const uint64_t 
*ids, uint64_t *values,
+ unsigned int n);
+int cnxk_rep_xstats_get_names_by_id(struct rte_eth_dev *eth_dev, const 
uint64_t *ids,
+   struct rte_eth_xstat_name *xstats_names, 
unsigned int n);
 
 #endif /* __CNXK_REP_H__ */
diff --git a/drivers/net/cnxk/cnxk_rep_msg.h b/drivers/net/cnxk/cnxk_rep_msg.h
index 63cfbe3f19..277e25d92a 100644
--- a/drivers/net/cnxk/cnxk_rep_msg.h
+++ b/drivers/net/cnxk/cnxk_rep_msg.h
@@ -21,6 +21,8 @@ typedef enum CNXK_REP_MSG {
CNXK_REP_MSG_EXIT,
/* Ethernet operation msgs */
CNXK_REP_MSG_ETH_SET_MAC,
+   CNXK_REP_MSG_ETH_STATS_GET,
+   CNXK_REP_MSG_ETH_STATS_CLEAR,
/* End of messaging sequence */
CNXK_REP_MSG_END,
 } cnxk_rep_msg_t;
@@ -89,6 +91,11 @@ typedef struct cnxk_rep_msg_eth_mac_set_meta {
uint8_t addr_bytes[RTE_ETHER_ADDR_LEN];
 } __rte_packed cnxk_rep_msg_eth_set_mac_meta_t;
 
+/* Ethernet op - get/clear stats */
+typedef struct cnxk_rep_msg_eth_stats_meta {
+   uint16_t portid;
+} __rte_packed cnxk_rep_msg_eth_stats_meta_t;
+
 void cnxk_rep_msg_populate_command(void *buffer, uint32_t *length, 
cnxk_rep_msg_t type,
   uint32_t size);
 void cnxk_rep_msg_populate_command_meta(void *buffer, uint32_t *length, void 
*msg_meta, uint32_t sz,
diff --git a/drivers/net/cnxk/cnxk_rep_ops.c b/drivers/net/cnxk/cnxk_rep_ops.c
index 97643a50f2..0ba4d55398 100644
--- a/drivers/net/cnxk/cnxk_rep_ops.c
+++ b/drivers/net/cnxk/cnxk_rep_ops.c
@@ -10,6 +10,11 @@
 #define RX_DESC_PER_QUEUE  256
 #define NB_REP_VDEV_MBUF   1024
 
+static const struct rte_eth_xstat_name cnxk_rep_xstats_string[] = {
+   {"rep_nb_rx"},
+   {"rep_nb_tx"},
+};
+
 static uint16_t
 cnxk_rep_tx_burst(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 {
@@ -24,6 +29,7 @@ cnxk_rep_tx_burst(void *tx_queue, struct rte_mbuf **tx_pkts, 
uint16_t nb_pkts)
plt_rep_dbg("Transmitting %d packets on eswitch queue %d", nb_pkts, 
txq->qid);
n_tx = cnxk_eswitch_dev_tx_burst(rep_dev->parent_dev, txq->qid, 
tx_pkts, nb_pkts,
 NIX_TX_OFFLOAD_VLAN_QINQ_F);
+   txq->stats.pkts += n_tx;
return n_tx;
 }
 
@@ -43,6 +49,7 @@ cnxk_rep_rx_burst(void *rx_queue, struct rte_mbuf **rx_pkts, 
uint16_t nb_pkts)
return 0;
 
plt_rep_dbg("Received %d packets on eswitch queue %d", n_rx, rxq->qid);
+   rxq->stats.pkts += n_rx;
return n_rx;
 }
 
@@ -486,19 +493,154 @@ cnxk_rep_tx_queue_release(struct rte_eth_dev *ethdev, 
uint16_t queue_id)
plt_err("Failed to release txq %d, rc=%d", rc, txq->qid);
 }
 
+static int
+process_eth_stats(struct cnxk_rep_dev *rep_dev, cnxk_rep_msg_ack_data_t 
*adata, cnxk_rep_msg_t msg)
+{
+   cnxk_rep_msg_eth_stats_meta_t msg_st_meta;
+   uint32_t len = 0, rc;
+   void *buffer;
+   size_t size;
+
+   size = CNXK_REP_MSG_MAX_BUFFER_SZ;
+   buffer = plt_zmalloc(size, 0);
+   if (!buffer) {
+   plt_err("Failed to allocate mem");
+   rc = -ENOMEM;
+   goto fail;
+   }
+
+   cnxk_rep_msg_populate_header(buffer, &len);
+
+   msg_st_meta.portid = rep_dev->rep_id;
+   cnxk_rep_msg_populate_command_meta(buffer, &len, &msg_st_meta,
+  
sizeof(cnxk_rep_msg_eth_stats_meta_t), msg);
+   cnxk_rep_msg_populate_msg_end(buffer, &len);
+
+   rc = cnxk_rep_msg_send_process(rep_dev, buffer, len, adata);
+   if (rc) {
+   plt_err("Failed to process the message,

[PATCH v6 16/23] common/cnxk: base support for eswitch VF

2024-03-03 Thread Harman Kalra
Base ROC layer changes for supporting eswitch VF and NIX lbk
changes for ESW

Signed-off-by: Harman Kalra 
---
 drivers/common/cnxk/roc_constants.h |  1 +
 drivers/common/cnxk/roc_dev.c   |  1 +
 drivers/common/cnxk/roc_nix.c   | 15 +--
 drivers/common/cnxk/roc_nix.h   |  1 +
 drivers/common/cnxk/roc_nix_priv.h  |  1 +
 drivers/common/cnxk/version.map |  1 +
 6 files changed, 18 insertions(+), 2 deletions(-)

diff --git a/drivers/common/cnxk/roc_constants.h 
b/drivers/common/cnxk/roc_constants.h
index cb4edbea58..21b3998cee 100644
--- a/drivers/common/cnxk/roc_constants.h
+++ b/drivers/common/cnxk/roc_constants.h
@@ -44,6 +44,7 @@
 #define PCI_DEVID_CNXK_RVU_REE_PF 0xA0f4
 #define PCI_DEVID_CNXK_RVU_REE_VF 0xA0f5
 #define PCI_DEVID_CNXK_RVU_ESWITCH_PF 0xA0E0
+#define PCI_DEVID_CNXK_RVU_ESWITCH_VF 0xA0E1
 
 #define PCI_DEVID_CN9K_CGX  0xA059
 #define PCI_DEVID_CN10K_RPM 0xA060
diff --git a/drivers/common/cnxk/roc_dev.c b/drivers/common/cnxk/roc_dev.c
index 867f981423..daf7684d8e 100644
--- a/drivers/common/cnxk/roc_dev.c
+++ b/drivers/common/cnxk/roc_dev.c
@@ -1272,6 +1272,7 @@ dev_vf_hwcap_update(struct plt_pci_device *pci_dev, 
struct dev *dev)
case PCI_DEVID_CNXK_RVU_VF:
case PCI_DEVID_CNXK_RVU_SDP_VF:
case PCI_DEVID_CNXK_RVU_NIX_INL_VF:
+   case PCI_DEVID_CNXK_RVU_ESWITCH_VF:
dev->hwcap |= DEV_HWCAP_F_VF;
break;
}
diff --git a/drivers/common/cnxk/roc_nix.c b/drivers/common/cnxk/roc_nix.c
index e68d472f43..20202788b5 100644
--- a/drivers/common/cnxk/roc_nix.c
+++ b/drivers/common/cnxk/roc_nix.c
@@ -13,6 +13,14 @@ roc_nix_is_lbk(struct roc_nix *roc_nix)
return nix->lbk_link;
 }
 
+bool
+roc_nix_is_esw(struct roc_nix *roc_nix)
+{
+   struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+
+   return nix->esw_link;
+}
+
 int
 roc_nix_get_base_chan(struct roc_nix *roc_nix)
 {
@@ -156,7 +164,7 @@ roc_nix_max_pkt_len(struct roc_nix *roc_nix)
if (roc_model_is_cn9k())
return NIX_CN9K_MAX_HW_FRS;
 
-   if (nix->lbk_link)
+   if (nix->lbk_link || nix->esw_link)
return NIX_LBK_MAX_HW_FRS;
 
return NIX_RPM_MAX_HW_FRS;
@@ -351,7 +359,7 @@ roc_nix_get_hw_info(struct roc_nix *roc_nix)
rc = mbox_process_msg(mbox, (void *)&hw_info);
if (rc == 0) {
nix->vwqe_interval = hw_info->vwqe_delay;
-   if (nix->lbk_link)
+   if (nix->lbk_link || nix->esw_link)
roc_nix->dwrr_mtu = hw_info->lbk_dwrr_mtu;
else if (nix->sdp_link)
roc_nix->dwrr_mtu = hw_info->sdp_dwrr_mtu;
@@ -368,6 +376,7 @@ sdp_lbk_id_update(struct plt_pci_device *pci_dev, struct 
nix *nix)
 {
nix->sdp_link = false;
nix->lbk_link = false;
+   nix->esw_link = false;
 
/* Update SDP/LBK link based on PCI device id */
switch (pci_dev->id.device_id) {
@@ -376,7 +385,9 @@ sdp_lbk_id_update(struct plt_pci_device *pci_dev, struct 
nix *nix)
nix->sdp_link = true;
break;
case PCI_DEVID_CNXK_RVU_AF_VF:
+   case PCI_DEVID_CNXK_RVU_ESWITCH_VF:
nix->lbk_link = true;
+   nix->esw_link = true;
break;
default:
break;
diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index 0289ce9820..bd3e540f45 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -526,6 +526,7 @@ int __roc_api roc_nix_dev_fini(struct roc_nix *roc_nix);
 
 /* Type */
 bool __roc_api roc_nix_is_lbk(struct roc_nix *roc_nix);
+bool __roc_api roc_nix_is_esw(struct roc_nix *roc_nix);
 bool __roc_api roc_nix_is_sdp(struct roc_nix *roc_nix);
 bool __roc_api roc_nix_is_pf(struct roc_nix *roc_nix);
 bool __roc_api roc_nix_is_vf_or_sdp(struct roc_nix *roc_nix);
diff --git a/drivers/common/cnxk/roc_nix_priv.h 
b/drivers/common/cnxk/roc_nix_priv.h
index 3d99ade2b4..275ffc8ea3 100644
--- a/drivers/common/cnxk/roc_nix_priv.h
+++ b/drivers/common/cnxk/roc_nix_priv.h
@@ -170,6 +170,7 @@ struct nix {
uintptr_t base;
bool sdp_link;
bool lbk_link;
+   bool esw_link;
bool ptp_en;
bool is_nix1;
 
diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
index 18c2d9d632..424ad7f484 100644
--- a/drivers/common/cnxk/version.map
+++ b/drivers/common/cnxk/version.map
@@ -278,6 +278,7 @@ INTERNAL {
roc_nix_inl_outb_cpt_lfs_dump;
roc_nix_cpt_ctx_cache_sync;
roc_nix_is_lbk;
+   roc_nix_is_esw;
roc_nix_is_pf;
roc_nix_is_sdp;
roc_nix_is_vf_or_sdp;
-- 
2.18.0



[PATCH v6 17/23] net/cnxk: eswitch VF as ethernet device

2024-03-03 Thread Harman Kalra
Adding support for eswitch VF to probe as normal cnxk ethernet device

Signed-off-by: Harman Kalra 
---
 drivers/net/cnxk/cn10k_ethdev.c|  3 +++
 drivers/net/cnxk/cnxk_ethdev.c | 41 +-
 drivers/net/cnxk/cnxk_ethdev.h |  3 +++
 drivers/net/cnxk/cnxk_ethdev_ops.c |  4 +++
 drivers/net/cnxk/cnxk_link.c   |  3 ++-
 5 files changed, 41 insertions(+), 13 deletions(-)

diff --git a/drivers/net/cnxk/cn10k_ethdev.c b/drivers/net/cnxk/cn10k_ethdev.c
index 05d6d3b53f..55ed54bb0f 100644
--- a/drivers/net/cnxk/cn10k_ethdev.c
+++ b/drivers/net/cnxk/cn10k_ethdev.c
@@ -973,6 +973,9 @@ static const struct rte_pci_id cn10k_pci_nix_map[] = {
CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KB, PCI_DEVID_CNXK_RVU_PF),
CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CNF10KB, PCI_DEVID_CNXK_RVU_PF),
CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KA, PCI_DEVID_CNXK_RVU_VF),
+   CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KA, PCI_DEVID_CNXK_RVU_ESWITCH_VF),
+   CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KB, PCI_DEVID_CNXK_RVU_ESWITCH_VF),
+   CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CNF10KA, PCI_DEVID_CNXK_RVU_ESWITCH_VF),
CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KAS, PCI_DEVID_CNXK_RVU_VF),
CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CNF10KA, PCI_DEVID_CNXK_RVU_VF),
CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KB, PCI_DEVID_CNXK_RVU_VF),
diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
index 7640910782..6b37bd877f 100644
--- a/drivers/net/cnxk/cnxk_ethdev.c
+++ b/drivers/net/cnxk/cnxk_ethdev.c
@@ -390,7 +390,7 @@ nix_update_flow_ctrl_config(struct rte_eth_dev *eth_dev)
struct cnxk_fc_cfg *fc = &dev->fc_cfg;
struct rte_eth_fc_conf fc_cfg = {0};
 
-   if (roc_nix_is_sdp(&dev->nix))
+   if (roc_nix_is_sdp(&dev->nix) || roc_nix_is_esw(&dev->nix))
return 0;
 
/* Don't do anything if PFC is enabled */
@@ -1449,12 +1449,14 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev)
goto cq_fini;
 
/* Init flow control configuration */
-   fc_cfg.type = ROC_NIX_FC_RXCHAN_CFG;
-   fc_cfg.rxchan_cfg.enable = true;
-   rc = roc_nix_fc_config_set(nix, &fc_cfg);
-   if (rc) {
-   plt_err("Failed to initialize flow control rc=%d", rc);
-   goto cq_fini;
+   if (!roc_nix_is_esw(nix)) {
+   fc_cfg.type = ROC_NIX_FC_RXCHAN_CFG;
+   fc_cfg.rxchan_cfg.enable = true;
+   rc = roc_nix_fc_config_set(nix, &fc_cfg);
+   if (rc) {
+   plt_err("Failed to initialize flow control rc=%d", rc);
+   goto cq_fini;
+   }
}
 
/* Update flow control configuration to PMD */
@@ -1977,11 +1979,21 @@ cnxk_eth_dev_init(struct rte_eth_dev *eth_dev)
TAILQ_INIT(&dev->mcs_list);
}
 
-   plt_nix_dbg("Port=%d pf=%d vf=%d ver=%s hwcap=0x%" PRIx64
-   " rxoffload_capa=0x%" PRIx64 " txoffload_capa=0x%" PRIx64,
-   eth_dev->data->port_id, roc_nix_get_pf(nix),
-   roc_nix_get_vf(nix), CNXK_ETH_DEV_PMD_VERSION, dev->hwcap,
-   dev->rx_offload_capa, dev->tx_offload_capa);
+   /* Reserve a switch domain for eswitch device */
+   if (pci_dev->id.device_id == PCI_DEVID_CNXK_RVU_ESWITCH_VF) {
+   eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
+   rc = rte_eth_switch_domain_alloc(&dev->switch_domain_id);
+   if (rc) {
+   plt_err("Failed to alloc switch domain: %d", rc);
+   goto free_mac_addrs;
+   }
+   }
+
+   plt_nix_dbg("Port=%d pf=%d vf=%d ver=%s hwcap=0x%" PRIx64 " 
rxoffload_capa=0x%" PRIx64
+   " txoffload_capa=0x%" PRIx64,
+   eth_dev->data->port_id, roc_nix_get_pf(nix), 
roc_nix_get_vf(nix),
+   CNXK_ETH_DEV_PMD_VERSION, dev->hwcap, dev->rx_offload_capa,
+   dev->tx_offload_capa);
return 0;
 
 free_mac_addrs:
@@ -2047,6 +2059,11 @@ cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool 
reset)
}
}
 
+   /* Free switch domain ID reserved for eswitch device */
+   if ((eth_dev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR) &&
+   rte_eth_switch_domain_free(dev->switch_domain_id))
+   plt_err("Failed to free switch domain");
+
/* Disable and free rte_meter entries */
nix_meter_fini(dev);
 
diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h
index 80a9dc83a1..5d42e1306a 100644
--- a/drivers/net/cnxk/cnxk_ethdev.h
+++ b/drivers/net/cnxk/cnxk_ethdev.h
@@ -427,6 +427,9 @@ struct cnxk_eth_dev {
 
/* Inject packets */
struct cnxk_ethdev_inj_cfg inj_cfg;
+
+   /* Eswitch domain ID */
+   uint16_t switch_domain_id;
 };
 
 struct cnxk_eth_rxq_sp {
diff --git a/drivers/net/cnxk/cnxk_ethdev_ops.c 
b/drivers/net/cnxk/cnxk_ethdev_ops.c
index 56049c5dd

[PATCH v6 18/23] common/cnxk: support port representor and represented port

2024-03-03 Thread Harman Kalra
From: Kiran Kumar K 

Implementing the common infrastructural changes for supporting port
representors and represented ports used as action and pattern in net
layer.

Signed-off-by: Kiran Kumar K 
Signed-off-by: Satheesh Paul 
Signed-off-by: Harman Kalra 
---
 drivers/common/cnxk/roc_npc.c   | 84 +++--
 drivers/common/cnxk/roc_npc.h   | 19 ++-
 drivers/common/cnxk/roc_npc_mcam.c  | 65 +++---
 drivers/common/cnxk/roc_npc_parse.c | 28 +-
 drivers/common/cnxk/roc_npc_priv.h  |  2 +
 drivers/net/cnxk/cnxk_flow.c|  2 +-
 6 files changed, 150 insertions(+), 50 deletions(-)

diff --git a/drivers/common/cnxk/roc_npc.c b/drivers/common/cnxk/roc_npc.c
index 67a660a2bc..d6ad3756bb 100644
--- a/drivers/common/cnxk/roc_npc.c
+++ b/drivers/common/cnxk/roc_npc.c
@@ -570,6 +570,8 @@ npc_parse_actions(struct roc_npc *roc_npc, const struct 
roc_npc_attr *attr,
flow->ctr_id = NPC_COUNTER_NONE;
flow->mtr_id = ROC_NIX_MTR_ID_INVALID;
pf_func = npc->pf_func;
+   if (flow->has_rep)
+   pf_func = flow->rep_pf_func;
 
for (; actions->type != ROC_NPC_ACTION_TYPE_END; actions++) {
switch (actions->type) {
@@ -788,9 +790,12 @@ npc_parse_actions(struct roc_npc *roc_npc, const struct 
roc_npc_attr *attr,
 
if (req_act & ROC_NPC_ACTION_TYPE_DROP) {
flow->npc_action = NIX_TX_ACTIONOP_DROP;
-   } else if ((req_act & ROC_NPC_ACTION_TYPE_COUNT) ||
-  vlan_insert_action) {
+   } else if ((req_act & ROC_NPC_ACTION_TYPE_COUNT) || 
vlan_insert_action) {
flow->npc_action = NIX_TX_ACTIONOP_UCAST_DEFAULT;
+   if (flow->rep_act_rep) {
+   flow->npc_action = NIX_TX_ACTIONOP_UCAST_CHAN;
+   flow->npc_action |= (uint64_t)0x3f << 12;
+   }
} else {
plt_err("Unsupported action for egress");
errcode = NPC_ERR_ACTION_NOTSUP;
@@ -802,7 +807,9 @@ npc_parse_actions(struct roc_npc *roc_npc, const struct 
roc_npc_attr *attr,
flow->mcast_channels[1] = npc->channel;
}
 
-   goto set_pf_func;
+   /* PF func who is sending the packet */
+   flow->tx_pf_func = pf_func;
+   goto done;
} else {
if (vlan_insert_action) {
errcode = NPC_ERR_ACTION_NOTSUP;
@@ -881,10 +888,10 @@ npc_parse_actions(struct roc_npc *roc_npc, const struct 
roc_npc_attr *attr,
if (mark)
flow->npc_action |= (uint64_t)mark << 40;
 
-set_pf_func:
/* Ideally AF must ensure that correct pf_func is set */
flow->npc_action |= (uint64_t)pf_func << 4;
 
+done:
return 0;
 
 err_exit:
@@ -898,10 +905,14 @@ npc_parse_pattern(struct npc *npc, const struct 
roc_npc_item_info pattern[],
  struct roc_npc_flow *flow, struct npc_parse_state *pst)
 {
npc_parse_stage_func_t parse_stage_funcs[] = {
-   npc_parse_meta_items, npc_parse_mark_item, npc_parse_pre_l2, 
npc_parse_cpt_hdr,
-   npc_parse_higig2_hdr, npc_parse_tx_queue,  npc_parse_la, 
npc_parse_lb,
-   npc_parse_lc, npc_parse_ld,npc_parse_le, 
npc_parse_lf,
-   npc_parse_lg, npc_parse_lh,
+   npc_parse_meta_items, npc_parse_port_representor_id,
+   npc_parse_mark_item,  npc_parse_pre_l2,
+   npc_parse_cpt_hdr,npc_parse_higig2_hdr,
+   npc_parse_tx_queue,   npc_parse_la,
+   npc_parse_lb, npc_parse_lc,
+   npc_parse_ld, npc_parse_le,
+   npc_parse_lf, npc_parse_lg,
+   npc_parse_lh,
};
uint8_t layer = 0;
int key_offset;
@@ -1140,15 +1151,20 @@ npc_rss_action_program(struct roc_npc *roc_npc,
   struct roc_npc_flow *flow)
 {
const struct roc_npc_action_rss *rss;
+   struct roc_npc *npc = roc_npc;
uint32_t rss_grp;
uint8_t alg_idx;
int rc;
 
+   if (flow->has_rep) {
+   npc = roc_npc->rep_npc;
+   npc->flowkey_cfg_state = roc_npc->flowkey_cfg_state;
+   }
+
for (; actions->type != ROC_NPC_ACTION_TYPE_END; actions++) {
if (actions->type == ROC_NPC_ACTION_TYPE_RSS) {
rss = (const struct roc_npc_action_rss *)actions->conf;
-   rc = npc_rss_action_configure(roc_npc, rss, &alg_idx,
- &rss_grp, flow->mcam_id);
+   rc = npc_rss_action_configure(npc, rss, &alg_idx, 
&rss_grp, flow->mcam_id);
if (rc)
return rc;
 
@@ -1171,7 +1187,7 @@ npc_vtag_cfg_delete(struct roc_np

[PATCH v6 19/23] net/cnxk: add represented port pattern and action

2024-03-03 Thread Harman Kalra
From: Kiran Kumar K 

Adding support for represented_port item matching and action.

Signed-off-by: Kiran Kumar K 
Signed-off-by: Satheesh Paul 
Signed-off-by: Harman Kalra 
---
 doc/guides/nics/cnxk.rst  |   1 +
 doc/guides/nics/features/cnxk.ini |   1 +
 doc/guides/nics/features/cnxk_vec.ini |   2 +
 doc/guides/nics/features/cnxk_vf.ini  |   2 +
 drivers/net/cnxk/cnxk_flow.c  | 107 ++
 5 files changed, 63 insertions(+), 50 deletions(-)

diff --git a/doc/guides/nics/cnxk.rst b/doc/guides/nics/cnxk.rst
index 93d6db5cb0..e156340cf0 100644
--- a/doc/guides/nics/cnxk.rst
+++ b/doc/guides/nics/cnxk.rst
@@ -38,6 +38,7 @@ Features of the CNXK Ethdev PMD are:
 - Ingress meter support
 - Queue based priority flow control support
 - Port representors
+- Represented port pattern matching and action
 
 Prerequisites
 -
diff --git a/doc/guides/nics/features/cnxk.ini 
b/doc/guides/nics/features/cnxk.ini
index 94e7a6ab8d..8957eba46b 100644
--- a/doc/guides/nics/features/cnxk.ini
+++ b/doc/guides/nics/features/cnxk.ini
@@ -73,6 +73,7 @@ mpls = Y
 nvgre= Y
 pppoes   = Y
 raw  = Y
+represented_port = Y
 sctp = Y
 tcp  = Y
 tx_queue = Y
diff --git a/doc/guides/nics/features/cnxk_vec.ini 
b/doc/guides/nics/features/cnxk_vec.ini
index 6086b3d73f..193fd14fbb 100644
--- a/doc/guides/nics/features/cnxk_vec.ini
+++ b/doc/guides/nics/features/cnxk_vec.ini
@@ -67,6 +67,7 @@ mpls = Y
 nvgre= Y
 pppoes   = Y
 raw  = Y
+represented_port = Y
 sctp = Y
 tcp  = Y
 tx_queue = Y
@@ -86,6 +87,7 @@ of_set_vlan_pcp  = Y
 of_set_vlan_vid  = Y
 pf   = Y
 queue= Y
+represented_port = Y
 rss  = Y
 security = Y
 vf   = Y
diff --git a/doc/guides/nics/features/cnxk_vf.ini 
b/doc/guides/nics/features/cnxk_vf.ini
index 53aa2a3d0c..7ae49b8107 100644
--- a/doc/guides/nics/features/cnxk_vf.ini
+++ b/doc/guides/nics/features/cnxk_vf.ini
@@ -64,6 +64,7 @@ mpls = Y
 nvgre= Y
 pppoes   = Y
 raw  = Y
+represented_port = Y
 sctp = Y
 tcp  = Y
 tx_queue = Y
@@ -85,6 +86,7 @@ of_set_vlan_pcp  = Y
 of_set_vlan_vid  = Y
 pf   = Y
 queue= Y
+represented_port = Y
 rss  = Y
 security = Y
 skip_cman= Y
diff --git a/drivers/net/cnxk/cnxk_flow.c b/drivers/net/cnxk/cnxk_flow.c
index b7e6ebc2e1..7b684eb759 100644
--- a/drivers/net/cnxk/cnxk_flow.c
+++ b/drivers/net/cnxk/cnxk_flow.c
@@ -4,67 +4,48 @@
 #include 
 
 const struct cnxk_rte_flow_term_info term[] = {
-   [RTE_FLOW_ITEM_TYPE_ETH] = {ROC_NPC_ITEM_TYPE_ETH,
-   sizeof(struct rte_flow_item_eth)},
-   [RTE_FLOW_ITEM_TYPE_VLAN] = {ROC_NPC_ITEM_TYPE_VLAN,
-sizeof(struct rte_flow_item_vlan)},
-   [RTE_FLOW_ITEM_TYPE_E_TAG] = {ROC_NPC_ITEM_TYPE_E_TAG,
- sizeof(struct rte_flow_item_e_tag)},
-   [RTE_FLOW_ITEM_TYPE_IPV4] = {ROC_NPC_ITEM_TYPE_IPV4,
-sizeof(struct rte_flow_item_ipv4)},
-   [RTE_FLOW_ITEM_TYPE_IPV6] = {ROC_NPC_ITEM_TYPE_IPV6,
-sizeof(struct rte_flow_item_ipv6)},
-   [RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT] = {
-   ROC_NPC_ITEM_TYPE_IPV6_FRAG_EXT,
-   sizeof(struct rte_flow_item_ipv6_frag_ext)},
-   [RTE_FLOW_ITEM_TYPE_ARP_ETH_IPV4] = {
-   ROC_NPC_ITEM_TYPE_ARP_ETH_IPV4,
-   sizeof(struct rte_flow_item_arp_eth_ipv4)},
-   [RTE_FLOW_ITEM_TYPE_MPLS] = {ROC_NPC_ITEM_TYPE_MPLS,
-sizeof(struct rte_flow_item_mpls)},
-   [RTE_FLOW_ITEM_TYPE_ICMP] = {ROC_NPC_ITEM_TYPE_ICMP,
-sizeof(struct rte_flow_item_icmp)},
-   [RTE_FLOW_ITEM_TYPE_UDP] = {ROC_NPC_ITEM_TYPE_UDP,
-   sizeof(struct rte_flow_item_udp)},
-   [RTE_FLOW_ITEM_TYPE_TCP] = {ROC_NPC_ITEM_TYPE_TCP,
-   sizeof(struct rte_flow_item_tcp)},
-   [RTE_FLOW_ITEM_TYPE_SCTP] = {ROC_NPC_ITEM_TYPE_SCTP,
-sizeof(struct rte_flow_item_sctp)},
-   [RTE_FLOW_ITEM_TYPE_ESP] = {ROC_NPC_ITEM_TYPE_ESP,
-   sizeof(struct rte_flow_item_esp)},
-   [RTE_FLOW_ITEM_TYPE_GRE] = {ROC_NPC_ITEM_TYPE_GRE,
-   sizeof(struct rte_flow_item_gre)},
-   [RTE_FLOW_ITEM_TYPE_NVGRE] = {ROC_NPC_ITEM_TYPE_NVGRE,
- sizeof(struct rte_flow_item_nvgre)},
-   [RTE_FLOW_ITEM_TYPE_VXLAN]

[PATCH v6 20/23] net/cnxk: add representor port pattern and action

2024-03-03 Thread Harman Kalra
Adding support for representor port as item matching and action.

Signed-off-by: Harman Kalra 
---
 doc/guides/nics/cnxk.rst  |   1 +
 doc/guides/nics/features/cnxk.ini |   2 +
 doc/guides/nics/features/cnxk_vec.ini |   2 +
 doc/guides/nics/features/cnxk_vf.ini  |   2 +
 drivers/net/cnxk/cnxk_flow.c  | 219 +++---
 drivers/net/cnxk/cnxk_rep.h   |  14 ++
 6 files changed, 214 insertions(+), 26 deletions(-)

diff --git a/doc/guides/nics/cnxk.rst b/doc/guides/nics/cnxk.rst
index e156340cf0..e8c64ef34c 100644
--- a/doc/guides/nics/cnxk.rst
+++ b/doc/guides/nics/cnxk.rst
@@ -39,6 +39,7 @@ Features of the CNXK Ethdev PMD are:
 - Queue based priority flow control support
 - Port representors
 - Represented port pattern matching and action
+- Port representor pattern matching and action
 
 Prerequisites
 -
diff --git a/doc/guides/nics/features/cnxk.ini 
b/doc/guides/nics/features/cnxk.ini
index 8957eba46b..9603d2566e 100644
--- a/doc/guides/nics/features/cnxk.ini
+++ b/doc/guides/nics/features/cnxk.ini
@@ -95,6 +95,7 @@ of_set_vlan_pcp  = Y
 of_set_vlan_vid  = Y
 pf   = Y
 port_id  = Y
+port_representor = Y
 queue= Y
 represented_port = Y
 rss  = Y
@@ -102,3 +103,4 @@ sample   = Y
 security = Y
 skip_cman= Y
 vf   = Y
+vxlan_decap  = I
diff --git a/doc/guides/nics/features/cnxk_vec.ini 
b/doc/guides/nics/features/cnxk_vec.ini
index 193fd14fbb..96289059ec 100644
--- a/doc/guides/nics/features/cnxk_vec.ini
+++ b/doc/guides/nics/features/cnxk_vec.ini
@@ -86,8 +86,10 @@ of_push_vlan = Y
 of_set_vlan_pcp  = Y
 of_set_vlan_vid  = Y
 pf   = Y
+port_representor = Y
 queue= Y
 represented_port = Y
 rss  = Y
 security = Y
 vf   = Y
+vxlan_decap  = I
diff --git a/doc/guides/nics/features/cnxk_vf.ini 
b/doc/guides/nics/features/cnxk_vf.ini
index 7ae49b8107..850c49c5be 100644
--- a/doc/guides/nics/features/cnxk_vf.ini
+++ b/doc/guides/nics/features/cnxk_vf.ini
@@ -85,9 +85,11 @@ of_push_vlan = Y
 of_set_vlan_pcp  = Y
 of_set_vlan_vid  = Y
 pf   = Y
+port_representor = Y
 queue= Y
 represented_port = Y
 rss  = Y
 security = Y
 skip_cman= Y
 vf   = Y
+vxlan_decap  = I
diff --git a/drivers/net/cnxk/cnxk_flow.c b/drivers/net/cnxk/cnxk_flow.c
index 7b684eb759..2cd88f0334 100644
--- a/drivers/net/cnxk/cnxk_flow.c
+++ b/drivers/net/cnxk/cnxk_flow.c
@@ -2,6 +2,7 @@
  * Copyright(C) 2021 Marvell.
  */
 #include 
+#include 
 
 const struct cnxk_rte_flow_term_info term[] = {
[RTE_FLOW_ITEM_TYPE_ETH] = {ROC_NPC_ITEM_TYPE_ETH, sizeof(struct 
rte_flow_item_eth)},
@@ -185,11 +186,44 @@ roc_npc_parse_sample_subaction(struct rte_eth_dev 
*eth_dev, const struct rte_flo
return 0;
 }
 
+static int
+representor_portid_action(struct roc_npc_action *in_actions, struct 
rte_eth_dev *portid_eth_dev,
+ uint16_t *dst_pf_func, uint8_t has_tunnel_pattern, 
int *act_cnt)
+{
+   struct rte_eth_dev *rep_eth_dev = portid_eth_dev;
+   struct rte_flow_action_mark *act_mark;
+   struct cnxk_rep_dev *rep_dev;
+   /* For inserting an action in the list */
+   int i = *act_cnt;
+
+   rep_dev = cnxk_rep_pmd_priv(rep_eth_dev);
+   *dst_pf_func = rep_dev->hw_func;
+
+   /* Add Mark action */
+   i++;
+   act_mark = plt_zmalloc(sizeof(struct rte_flow_action_mark), 0);
+   if (!act_mark) {
+   plt_err("Error allocation memory");
+   return -ENOMEM;
+   }
+
+   /* Mark ID format: (tunnel type - VxLAN, Geneve << 6) | Tunnel decap */
+   act_mark->id = has_tunnel_pattern ? ((has_tunnel_pattern << 6) | 5) : 1;
+   in_actions[i].type = ROC_NPC_ACTION_TYPE_MARK;
+   in_actions[i].conf = (struct rte_flow_action_mark *)act_mark;
+
+   *act_cnt = i;
+   plt_rep_dbg("Rep port %d ID %d mark ID is %d rep_dev->hw_func 0x%x", 
rep_dev->port_id,
+   rep_dev->rep_id, act_mark->id, rep_dev->hw_func);
+
+   return 0;
+}
+
 static int
 cnxk_map_actions(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr,
 const struct rte_flow_action actions[], struct roc_npc_action 
in_actions[],
 struct roc_npc_action_sample *in_sample_actions, uint32_t 
*flowkey_cfg,
-uint16_t *dst_pf_func)
+uint16_t *dst_pf_func, uint8_t has_tunnel_pattern)
 {
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
const struct rte_flow_action_queue *act_q = NULL;
@@ -238,6 +272,7 @@ cnxk_map_actions(struct rte_eth_dev *eth_dev, const struct 
rte_flow_attr *attr,
break;
 
case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
+   

[PATCH v6 21/23] net/cnxk: generalise flow operation APIs

2024-03-03 Thread Harman Kalra
Flow operations can be performed on cnxk ports as well as representor
ports. Since representor ports are not cnxk ports but have eswitch as
base device underneath, special handling is required to align with base
infra. Introducing a flag to generic flow APIs to discriminate if the
operation request made on normal or representor ports.

Signed-off-by: Harman Kalra 
---
 doc/guides/nics/features/cnxk.ini |   1 +
 doc/guides/nics/features/cnxk_vec.ini |   1 +
 doc/guides/nics/features/cnxk_vf.ini  |   1 +
 drivers/net/cnxk/cnxk_flow.c  | 556 ++
 drivers/net/cnxk/cnxk_flow.h  |  18 +
 5 files changed, 492 insertions(+), 85 deletions(-)

diff --git a/doc/guides/nics/features/cnxk.ini 
b/doc/guides/nics/features/cnxk.ini
index 9603d2566e..7d85fb9f93 100644
--- a/doc/guides/nics/features/cnxk.ini
+++ b/doc/guides/nics/features/cnxk.ini
@@ -71,6 +71,7 @@ ipv6_routing_ext = Y
 mark = Y
 mpls = Y
 nvgre= Y
+port_representor = Y
 pppoes   = Y
 raw  = Y
 represented_port = Y
diff --git a/doc/guides/nics/features/cnxk_vec.ini 
b/doc/guides/nics/features/cnxk_vec.ini
index 96289059ec..d925933b34 100644
--- a/doc/guides/nics/features/cnxk_vec.ini
+++ b/doc/guides/nics/features/cnxk_vec.ini
@@ -65,6 +65,7 @@ ipv6_frag_ext= Y
 mark = Y
 mpls = Y
 nvgre= Y
+port_representor = Y
 pppoes   = Y
 raw  = Y
 represented_port = Y
diff --git a/doc/guides/nics/features/cnxk_vf.ini 
b/doc/guides/nics/features/cnxk_vf.ini
index 850c49c5be..0da0106fa3 100644
--- a/doc/guides/nics/features/cnxk_vf.ini
+++ b/doc/guides/nics/features/cnxk_vf.ini
@@ -62,6 +62,7 @@ ipv6_routing_ext = Y
 mark = Y
 mpls = Y
 nvgre= Y
+port_representor = Y
 pppoes   = Y
 raw  = Y
 represented_port = Y
diff --git a/drivers/net/cnxk/cnxk_flow.c b/drivers/net/cnxk/cnxk_flow.c
index 2cd88f0334..d3c20e8315 100644
--- a/drivers/net/cnxk/cnxk_flow.c
+++ b/drivers/net/cnxk/cnxk_flow.c
@@ -4,6 +4,7 @@
 #include 
 #include 
 
+#define IS_REP_BIT 7
 const struct cnxk_rte_flow_term_info term[] = {
[RTE_FLOW_ITEM_TYPE_ETH] = {ROC_NPC_ITEM_TYPE_ETH, sizeof(struct 
rte_flow_item_eth)},
[RTE_FLOW_ITEM_TYPE_VLAN] = {ROC_NPC_ITEM_TYPE_VLAN, sizeof(struct 
rte_flow_item_vlan)},
@@ -186,17 +187,162 @@ roc_npc_parse_sample_subaction(struct rte_eth_dev 
*eth_dev, const struct rte_flo
return 0;
 }
 
+static int
+representor_rep_portid_action(struct roc_npc_action *in_actions, struct 
rte_eth_dev *eth_dev,
+ struct rte_eth_dev *portid_eth_dev,
+ enum rte_flow_action_type act_type, uint8_t 
rep_pattern,
+ uint16_t *dst_pf_func, bool is_rep, uint64_t 
*free_allocs,
+ int *act_cnt)
+{
+   struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+   struct rte_eth_dev *rep_eth_dev = portid_eth_dev;
+   struct rte_flow_action_of_set_vlan_vid *vlan_vid;
+   struct rte_flow_action_of_set_vlan_pcp *vlan_pcp;
+   struct rte_flow_action_of_push_vlan *push_vlan;
+   struct rte_flow_action_queue *act_q = NULL;
+   struct cnxk_rep_dev *rep_dev;
+   struct roc_npc *npc;
+   uint16_t vlan_tci;
+   int j = 0;
+
+   /* For inserting an action in the list */
+   int i = *act_cnt;
+
+   rep_dev = cnxk_rep_pmd_priv(rep_eth_dev);
+   if (!is_rep) {
+   dev = cnxk_eth_pmd_priv(eth_dev);
+   npc = &dev->npc;
+   } else {
+   npc = &rep_dev->parent_dev->npc;
+   }
+   if (rep_pattern >> IS_REP_BIT) { /* Check for normal/representor port 
as action */
+   if ((rep_pattern & 0x7f) == 
RTE_FLOW_ITEM_TYPE_PORT_REPRESENTOR) {
+   /* Case: Repr port pattern -> Default TX rule -> LBK ->
+*  Pattern RX LBK rule hit -> Action: send to new 
pf_func
+*/
+   if (act_type == RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR) {
+   /* New pf_func corresponds to ESW + queue 
corresponding to rep_id */
+   act_q = plt_zmalloc(sizeof(struct 
rte_flow_action_queue), 0);
+   if (!act_q) {
+   plt_err("Error allocation memory");
+   return -ENOMEM;
+   }
+   act_q->index = rep_dev->rep_id;
+
+   while (free_allocs[j] != 0)
+   j++;
+   free_allocs[j] = (uint64_t)act_q;
+   in_actions[i].type = ROC_NPC_ACTION_TYPE_QUEUE;
+   in_actions[i].conf = (st

[PATCH v6 22/23] net/cnxk: flow create on representor ports

2024-03-03 Thread Harman Kalra
Implementing base infra for handling flow operations performed on
representor ports, where these representor ports may be representing
native representees or part of companian apps. Also added support for
handling flow create operation

Signed-off-by: Harman Kalra 
---
 doc/guides/nics/features/cnxk.ini |   1 +
 doc/guides/nics/features/cnxk_vec.ini |   1 +
 doc/guides/nics/features/cnxk_vf.ini  |   1 +
 drivers/net/cnxk/cnxk_flow.h  |   9 +-
 drivers/net/cnxk/cnxk_rep.h   |   3 +
 drivers/net/cnxk/cnxk_rep_flow.c  | 401 ++
 drivers/net/cnxk/cnxk_rep_msg.h   |  27 ++
 drivers/net/cnxk/cnxk_rep_ops.c   |   3 +-
 drivers/net/cnxk/meson.build  |   1 +
 9 files changed, 444 insertions(+), 3 deletions(-)
 create mode 100644 drivers/net/cnxk/cnxk_rep_flow.c

diff --git a/doc/guides/nics/features/cnxk.ini 
b/doc/guides/nics/features/cnxk.ini
index 7d85fb9f93..b5d9f7e579 100644
--- a/doc/guides/nics/features/cnxk.ini
+++ b/doc/guides/nics/features/cnxk.ini
@@ -105,3 +105,4 @@ security = Y
 skip_cman= Y
 vf   = Y
 vxlan_decap  = I
+vxlan_encap  = I
diff --git a/doc/guides/nics/features/cnxk_vec.ini 
b/doc/guides/nics/features/cnxk_vec.ini
index d925933b34..92a486664f 100644
--- a/doc/guides/nics/features/cnxk_vec.ini
+++ b/doc/guides/nics/features/cnxk_vec.ini
@@ -94,3 +94,4 @@ rss  = Y
 security = Y
 vf   = Y
 vxlan_decap  = I
+vxlan_encap  = I
diff --git a/doc/guides/nics/features/cnxk_vf.ini 
b/doc/guides/nics/features/cnxk_vf.ini
index 0da0106fa3..a55f0e7ce5 100644
--- a/doc/guides/nics/features/cnxk_vf.ini
+++ b/doc/guides/nics/features/cnxk_vf.ini
@@ -94,3 +94,4 @@ security = Y
 skip_cman= Y
 vf   = Y
 vxlan_decap  = I
+vxlan_encap  = I
diff --git a/drivers/net/cnxk/cnxk_flow.h b/drivers/net/cnxk/cnxk_flow.h
index 226694fbed..e51d04b2c9 100644
--- a/drivers/net/cnxk/cnxk_flow.h
+++ b/drivers/net/cnxk/cnxk_flow.h
@@ -16,8 +16,13 @@ struct cnxk_rte_flow_term_info {
uint16_t item_size;
 };
 
-struct roc_npc_flow *cnxk_flow_create(struct rte_eth_dev *dev,
- const struct rte_flow_attr *attr,
+struct cnxk_rte_flow_action_info {
+   uint16_t conf_size;
+};
+
+extern const struct cnxk_rte_flow_term_info term[];
+
+struct roc_npc_flow *cnxk_flow_create(struct rte_eth_dev *dev, const struct 
rte_flow_attr *attr,
  const struct rte_flow_item pattern[],
  const struct rte_flow_action actions[],
  struct rte_flow_error *error);
diff --git a/drivers/net/cnxk/cnxk_rep.h b/drivers/net/cnxk/cnxk_rep.h
index ab8b3fb152..9bdea47bd4 100644
--- a/drivers/net/cnxk/cnxk_rep.h
+++ b/drivers/net/cnxk/cnxk_rep.h
@@ -20,6 +20,9 @@
 /* Common ethdev ops */
 extern struct eth_dev_ops cnxk_rep_dev_ops;
 
+/* Flow ops for representor ports */
+extern struct rte_flow_ops cnxk_rep_flow_ops;
+
 struct cnxk_rep_queue_stats {
uint64_t pkts;
uint64_t bytes;
diff --git a/drivers/net/cnxk/cnxk_rep_flow.c b/drivers/net/cnxk/cnxk_rep_flow.c
new file mode 100644
index 00..2613be5b9e
--- /dev/null
+++ b/drivers/net/cnxk/cnxk_rep_flow.c
@@ -0,0 +1,401 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2024 Marvell.
+ */
+
+#include 
+#include 
+
+#include 
+#include 
+#include 
+
+#define DEFAULT_DUMP_FILE_NAME "/tmp/fdump"
+#define MAX_BUFFER_SIZE   1500
+
+const struct cnxk_rte_flow_action_info action_info[] = {
+   [RTE_FLOW_ACTION_TYPE_MARK] = {sizeof(struct rte_flow_action_mark)},
+   [RTE_FLOW_ACTION_TYPE_VF] = {sizeof(struct rte_flow_action_vf)},
+   [RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT] = {sizeof(struct 
rte_flow_action_port_id)},
+   [RTE_FLOW_ACTION_TYPE_PORT_ID] = {sizeof(struct 
rte_flow_action_port_id)},
+   [RTE_FLOW_ACTION_TYPE_QUEUE] = {sizeof(struct rte_flow_action_queue)},
+   [RTE_FLOW_ACTION_TYPE_RSS] = {sizeof(struct rte_flow_action_rss)},
+   [RTE_FLOW_ACTION_TYPE_SECURITY] = {sizeof(struct 
rte_flow_action_security)},
+   [RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID] = {sizeof(struct 
rte_flow_action_of_set_vlan_vid)},
+   [RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN] = {sizeof(struct 
rte_flow_action_of_push_vlan)},
+   [RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP] = {sizeof(struct 
rte_flow_action_of_set_vlan_pcp)},
+   [RTE_FLOW_ACTION_TYPE_METER] = {sizeof(struct rte_flow_action_meter)},
+   [RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP] = {sizeof(struct 
rte_flow_action_vxlan_encap)},
+   [RTE_FLOW_ACTION_TYPE_COUNT] = {sizeof(struct rte_flow_action_count)},
+};
+
+static void
+cnxk_flow_params_count(const struct rte_flow_item pattern[], const struct 
rte_flow_action actions[],
+  uint16_t *n_pattern, uint16_t *n_action)
+{
+   int i = 0;
+
+   for 

[PATCH v6 23/23] net/cnxk: other flow operations

2024-03-03 Thread Harman Kalra
Implementing other flow operations - validate, destroy, query,
flush, dump for representor ports

Signed-off-by: Harman Kalra 
---
 doc/guides/rel_notes/release_24_03.rst |   1 +
 drivers/net/cnxk/cnxk_rep_flow.c   | 414 +
 drivers/net/cnxk/cnxk_rep_msg.h|  32 ++
 3 files changed, 447 insertions(+)

diff --git a/doc/guides/rel_notes/release_24_03.rst 
b/doc/guides/rel_notes/release_24_03.rst
index 39ffef11b0..2b160cfd0f 100644
--- a/doc/guides/rel_notes/release_24_03.rst
+++ b/doc/guides/rel_notes/release_24_03.rst
@@ -112,6 +112,7 @@ New Features
   * Added support for ``RTE_FLOW_ACTION_TYPE_SAMPLE`` flow item.
   * Added support for Rx inject.
   * Optimized SW external mbuf free for better performance and avoid SQ 
corruption.
+  * Added support for port representors.
 
 * **Updated Marvell OCTEON EP driver.**
 
diff --git a/drivers/net/cnxk/cnxk_rep_flow.c b/drivers/net/cnxk/cnxk_rep_flow.c
index 2613be5b9e..d26f5aa12c 100644
--- a/drivers/net/cnxk/cnxk_rep_flow.c
+++ b/drivers/net/cnxk/cnxk_rep_flow.c
@@ -267,6 +267,222 @@ populate_action_data(void *buffer, uint32_t *length, 
const struct rte_flow_actio
*length = len;
 }
 
+static int
+process_flow_destroy(struct cnxk_rep_dev *rep_dev, void *flow, 
cnxk_rep_msg_ack_data_t *adata)
+{
+   cnxk_rep_msg_flow_destroy_meta_t msg_fd_meta;
+   uint32_t len = 0, rc;
+   void *buffer;
+   size_t size;
+
+   /* If representor not representing any active VF, return 0 */
+   if (!rep_dev->is_vf_active)
+   return 0;
+
+   size = MAX_BUFFER_SIZE;
+   buffer = plt_zmalloc(size, 0);
+   if (!buffer) {
+   plt_err("Failed to allocate mem");
+   rc = -ENOMEM;
+   goto fail;
+   }
+
+   cnxk_rep_msg_populate_header(buffer, &len);
+
+   msg_fd_meta.portid = rep_dev->rep_id;
+   msg_fd_meta.flow = (uint64_t)flow;
+   plt_rep_dbg("Flow Destroy: flow 0x%" PRIu64 ", portid %d", 
msg_fd_meta.flow,
+   msg_fd_meta.portid);
+   cnxk_rep_msg_populate_command_meta(buffer, &len, &msg_fd_meta,
+  
sizeof(cnxk_rep_msg_flow_destroy_meta_t),
+  CNXK_REP_MSG_FLOW_DESTROY);
+   cnxk_rep_msg_populate_msg_end(buffer, &len);
+
+   rc = cnxk_rep_msg_send_process(rep_dev, buffer, len, adata);
+   if (rc) {
+   plt_err("Failed to process the message, err %d", rc);
+   goto fail;
+   }
+
+   return 0;
+fail:
+   return rc;
+}
+
+static int
+copy_flow_dump_file(FILE *target)
+{
+   FILE *source = NULL;
+   int pos;
+   char ch;
+
+   source = fopen(DEFAULT_DUMP_FILE_NAME, "r");
+   if (source == NULL) {
+   plt_err("Failed to read default dump file: %s, err %d", 
DEFAULT_DUMP_FILE_NAME,
+   errno);
+   return errno;
+   }
+
+   fseek(source, 0L, SEEK_END);
+   pos = ftell(source);
+   fseek(source, 0L, SEEK_SET);
+   while (pos--) {
+   ch = fgetc(source);
+   fputc(ch, target);
+   }
+
+   fclose(source);
+
+   /* Remove the default file after reading */
+   remove(DEFAULT_DUMP_FILE_NAME);
+
+   return 0;
+}
+
+static int
+process_flow_dump(struct cnxk_rep_dev *rep_dev, struct rte_flow *flow, FILE 
*file,
+ cnxk_rep_msg_ack_data_t *adata)
+{
+   cnxk_rep_msg_flow_dump_meta_t msg_fp_meta;
+   uint32_t len = 0, rc;
+   void *buffer;
+   size_t size;
+
+   size = MAX_BUFFER_SIZE;
+   buffer = plt_zmalloc(size, 0);
+   if (!buffer) {
+   plt_err("Failed to allocate mem");
+   rc = -ENOMEM;
+   goto fail;
+   }
+
+   cnxk_rep_msg_populate_header(buffer, &len);
+
+   msg_fp_meta.portid = rep_dev->rep_id;
+   msg_fp_meta.flow = (uint64_t)flow;
+   msg_fp_meta.is_stdout = (file == stdout) ? 1 : 0;
+
+   plt_rep_dbg("Flow Dump: flow 0x%" PRIu64 ", portid %d stdout %d", 
msg_fp_meta.flow,
+   msg_fp_meta.portid, msg_fp_meta.is_stdout);
+   cnxk_rep_msg_populate_command_meta(buffer, &len, &msg_fp_meta,
+  
sizeof(cnxk_rep_msg_flow_dump_meta_t),
+  CNXK_REP_MSG_FLOW_DUMP);
+   cnxk_rep_msg_populate_msg_end(buffer, &len);
+
+   rc = cnxk_rep_msg_send_process(rep_dev, buffer, len, adata);
+   if (rc) {
+   plt_err("Failed to process the message, err %d", rc);
+   goto fail;
+   }
+
+   /* Copy contents from default file to user file */
+   if (file != stdout)
+   copy_flow_dump_file(file);
+
+   return 0;
+fail:
+   return rc;
+}
+
+static int
+process_flow_flush(struct cnxk_rep_dev *rep_dev, cnxk_rep_msg_ack_data_t 
*adata)
+{
+   cnxk_rep_msg_flow_flush_meta_t msg_ff_meta;
+   uint32_t len = 0, rc;
+   void *bu

Re: [PATCH v2] lib/hash: feature reclaim defer queue

2024-03-03 Thread Honnappa Nagarahalli
Hello Abdullah,
Thank you for the patch, few comments inline.

The short commit log could be changed as follows:

"lib/hash: add defer queue reclaim API”

> On Mar 2, 2024, at 3:27 PM, Abdullah Ömer Yamaç  wrote:
> 
> This patch adds a new feature to the hash library to allow the user to
> reclaim the defer queue. This is useful when the user wants to force
> reclaim resources that are not being used. This API is only available
> if the RCU is enabled.
> 
> Signed-off-by: Abdullah Ömer Yamaç 
> Acked-by: Honnappa Nagarahalli 
Please add this only after you get an explicit Ack on the patch.

> ---
> lib/hash/rte_cuckoo_hash.c | 23 +++
> lib/hash/rte_hash.h| 14 ++
> lib/hash/version.map   |  7 +++
> 3 files changed, 44 insertions(+)
> 
> diff --git a/lib/hash/rte_cuckoo_hash.c b/lib/hash/rte_cuckoo_hash.c
> index 9cf94645f6..254fa80cc5 100644
> --- a/lib/hash/rte_cuckoo_hash.c
> +++ b/lib/hash/rte_cuckoo_hash.c
> @@ -1588,6 +1588,27 @@ rte_hash_rcu_qsbr_add(struct rte_hash *h, struct 
> rte_hash_rcu_config *cfg)
> return 0;
> }
> 
> +int
> +rte_hash_rcu_qsbr_dq_reclaim(struct rte_hash *h)
We need to add freed, pending and available parameters to this API. I think 
this information will be helpful for the users. For ex: in your use case, you 
could use the pending value to calculate the available hash entries.

> +{
> + int ret;
> +
> + if (h->hash_rcu_cfg == NULL || h->dq == NULL) {
We can skip NULL check for h->dq as the RCU reclaim API makes the same check.

> + rte_errno = EINVAL;
> + return -1;
> + }
> +
> + ret = rte_rcu_qsbr_dq_reclaim(h->dq, h->hash_rcu_cfg->max_reclaim_size, 
> NULL, NULL, NULL);
> + if (ret != 0) {
> + HASH_LOG(ERR,
> + "%s: could not reclaim the defer queue in hash table",
> + __func__);
> + return -1;
> + }
> +
> + return 0;
> +}
> +
> static inline void
> remove_entry(const struct rte_hash *h, struct rte_hash_bucket *bkt,
> unsigned int i)
> diff --git a/lib/hash/rte_hash.h b/lib/hash/rte_hash.h
> index 7ecc02..c119477d50 100644
> --- a/lib/hash/rte_hash.h
> +++ b/lib/hash/rte_hash.h
> @@ -674,6 +674,21 @@ rte_hash_iterate(const struct rte_hash *h, const void 
> **key, void **data, uint32
>  */
> int rte_hash_rcu_qsbr_add(struct rte_hash *h, struct rte_hash_rcu_config 
> *cfg);
> 
> +/**
> + * Reclaim resources from the defer queue.
> + * This API reclaim the resources from the defer queue if rcu is enabled.
> + *
> + * @param h
> + *   the hash object to reclaim resources
> + * @return
> + *   On success - 0
> + *   On error - 1 with error code set in rte_errno.
> + *   Possible rte_errno codes are:
> + *   - EINVAL - invalid pointer or invalid rcu mode
We can remove the ‘invalid rcd mode’.

> + */
> +__rte_experimental
> +int rte_hash_rcu_qsbr_dq_reclaim(struct rte_hash *h);
> +
> #ifdef __cplusplus
> }
> #endif
> diff --git a/lib/hash/version.map b/lib/hash/version.map
> index 6b2afebf6b..cec0e8fc67 100644
> --- a/lib/hash/version.map
> +++ b/lib/hash/version.map
> @@ -48,3 +48,9 @@ DPDK_24 {
> 
> local: *;
> };
> +
> +EXPERIMENTAL {
> + global:
> +
> + rte_hash_rcu_qsbr_dq_reclaim;
> +}
> \ No newline at end of file
> -- 
> 2.34.1
> 



[PATCH v5 1/3] common/cnxk: dma result to an offset of the event

2024-03-03 Thread Amit Prakash Shukla
Adds support to configure writing result to offset of the DMA
response event.

Signed-off-by: Amit Prakash Shukla 
---
v5:
- Fixed checkpatch warning.

v4:
- Fixed compilation error.
- Updated release notes.

v3:
- Rebased and fixed compilation error.

v2:
- Added dual workslot enqueue support.
- Fixed compilation error.

 drivers/common/cnxk/roc_dpi.c   |  6 +-
 drivers/common/cnxk/roc_dpi.h   |  2 +-
 drivers/common/cnxk/roc_dpi_priv.h  |  4 
 drivers/common/cnxk/roc_idev.c  | 20 
 drivers/common/cnxk/roc_idev_priv.h |  3 +++
 drivers/dma/cnxk/cnxk_dmadev.c  |  2 +-
 6 files changed, 34 insertions(+), 3 deletions(-)

diff --git a/drivers/common/cnxk/roc_dpi.c b/drivers/common/cnxk/roc_dpi.c
index c241168294..1ee777d779 100644
--- a/drivers/common/cnxk/roc_dpi.c
+++ b/drivers/common/cnxk/roc_dpi.c
@@ -83,6 +83,9 @@ roc_dpi_configure(struct roc_dpi *roc_dpi, uint32_t chunk_sz, 
uint64_t aura, uin
mbox_msg.s.aura = aura;
mbox_msg.s.sso_pf_func = idev_sso_pffunc_get();
mbox_msg.s.npa_pf_func = idev_npa_pffunc_get();
+   mbox_msg.s.wqecsoff = idev_dma_cs_offset_get();
+   if (mbox_msg.s.wqecsoff)
+   mbox_msg.s.wqecs = 1;
 
rc = send_msg_to_pf(&pci_dev->addr, (const char *)&mbox_msg,
sizeof(dpi_mbox_msg_t));
@@ -94,7 +97,7 @@ roc_dpi_configure(struct roc_dpi *roc_dpi, uint32_t chunk_sz, 
uint64_t aura, uin
 }
 
 int
-roc_dpi_dev_init(struct roc_dpi *roc_dpi)
+roc_dpi_dev_init(struct roc_dpi *roc_dpi, uint8_t offset)
 {
struct plt_pci_device *pci_dev = roc_dpi->pci_dev;
uint16_t vfid;
@@ -103,6 +106,7 @@ roc_dpi_dev_init(struct roc_dpi *roc_dpi)
vfid = ((pci_dev->addr.devid & 0x1F) << 3) | (pci_dev->addr.function & 
0x7);
vfid -= 1;
roc_dpi->vfid = vfid;
+   idev_dma_cs_offset_set(offset);
 
return 0;
 }
diff --git a/drivers/common/cnxk/roc_dpi.h b/drivers/common/cnxk/roc_dpi.h
index 4ebde5b8a6..978e2badb2 100644
--- a/drivers/common/cnxk/roc_dpi.h
+++ b/drivers/common/cnxk/roc_dpi.h
@@ -11,7 +11,7 @@ struct roc_dpi {
uint16_t vfid;
 } __plt_cache_aligned;
 
-int __roc_api roc_dpi_dev_init(struct roc_dpi *roc_dpi);
+int __roc_api roc_dpi_dev_init(struct roc_dpi *roc_dpi, uint8_t offset);
 int __roc_api roc_dpi_dev_fini(struct roc_dpi *roc_dpi);
 
 int __roc_api roc_dpi_configure(struct roc_dpi *dpi, uint32_t chunk_sz, 
uint64_t aura,
diff --git a/drivers/common/cnxk/roc_dpi_priv.h 
b/drivers/common/cnxk/roc_dpi_priv.h
index 518a3e7351..52962c8bc0 100644
--- a/drivers/common/cnxk/roc_dpi_priv.h
+++ b/drivers/common/cnxk/roc_dpi_priv.h
@@ -31,6 +31,10 @@ typedef union dpi_mbox_msg_t {
uint64_t sso_pf_func : 16;
/* NPA PF function */
uint64_t npa_pf_func : 16;
+   /* WQE queue DMA completion status enable */
+   uint64_t wqecs : 1;
+   /* WQE queue DMA completion status offset */
+   uint64_t wqecsoff : 8;
} s;
 } dpi_mbox_msg_t;
 
diff --git a/drivers/common/cnxk/roc_idev.c b/drivers/common/cnxk/roc_idev.c
index 48df3518b0..d0307c666c 100644
--- a/drivers/common/cnxk/roc_idev.c
+++ b/drivers/common/cnxk/roc_idev.c
@@ -301,6 +301,26 @@ idev_sso_set(struct roc_sso *sso)
__atomic_store_n(&idev->sso, sso, __ATOMIC_RELEASE);
 }
 
+void
+idev_dma_cs_offset_set(uint8_t offset)
+{
+   struct idev_cfg *idev = idev_get_cfg();
+
+   if (idev != NULL)
+   idev->dma_cs_offset = offset;
+}
+
+uint8_t
+idev_dma_cs_offset_get(void)
+{
+   struct idev_cfg *idev = idev_get_cfg();
+
+   if (idev != NULL)
+   return idev->dma_cs_offset;
+
+   return 0;
+}
+
 uint64_t
 roc_idev_nix_inl_meta_aura_get(void)
 {
diff --git a/drivers/common/cnxk/roc_idev_priv.h 
b/drivers/common/cnxk/roc_idev_priv.h
index 8dc1cb25bf..6628b18152 100644
--- a/drivers/common/cnxk/roc_idev_priv.h
+++ b/drivers/common/cnxk/roc_idev_priv.h
@@ -43,6 +43,7 @@ struct idev_cfg {
struct idev_nix_inl_rx_inj_cfg inl_rx_inj_cfg;
plt_spinlock_t nix_inl_dev_lock;
plt_spinlock_t npa_dev_lock;
+   uint8_t dma_cs_offset;
 };
 
 /* Generic */
@@ -61,6 +62,8 @@ void idev_sso_pffunc_set(uint16_t sso_pf_func);
 uint16_t idev_sso_pffunc_get(void);
 struct roc_sso *idev_sso_get(void);
 void idev_sso_set(struct roc_sso *sso);
+void idev_dma_cs_offset_set(uint8_t offset);
+uint8_t idev_dma_cs_offset_get(void);
 
 /* idev lmt */
 uint16_t idev_lmt_pffunc_get(void);
diff --git a/drivers/dma/cnxk/cnxk_dmadev.c b/drivers/dma/cnxk/cnxk_dmadev.c
index 1e7f49792c..48ab09cc38 100644
--- a/drivers/dma/cnxk/cnxk_dmadev.c
+++ b/drivers/dma/cnxk/cnxk_dmadev.c
@@ -592,7 +592,7 @@ cnxk_dmadev_probe(struct rte_pci_driver *pci_drv 
__rte_unused, struct rte_pci_de
rdpi = &dpivf->rdpi;
 
rdpi->pci_dev = pci_dev;
-   rc = roc_dpi_dev_init(rdpi);
+   rc = roc_dpi_dev_init(rdpi, 0);
if (rc < 0)
  

[PATCH v5 2/3] dma/cnxk: support for DMA event enqueue dequeue

2024-03-03 Thread Amit Prakash Shukla
Added cnxk driver support for dma event enqueue and dequeue.
Also added changes for work queue entry completion status and
dual workslot DMA event enqueue.

Signed-off-by: Pavan Nikhilesh 
Signed-off-by: Amit Prakash Shukla 
---
v5:
- Fixed checkpatch warning.

v4:
- Fixed compilation error.
- Updated release notes.

v3:
- Rebased and fixed compilation error.

v2:
- Added dual workslot enqueue support.
- Fixed compilation error.

 doc/guides/eventdevs/cnxk.rst  |   5 +
 doc/guides/rel_notes/release_24_03.rst |   4 +
 drivers/dma/cnxk/cnxk_dma_event_dp.h   |  24 ++
 drivers/dma/cnxk/cnxk_dmadev.c |   3 +-
 drivers/dma/cnxk/cnxk_dmadev.h |  20 +-
 drivers/dma/cnxk/cnxk_dmadev_fp.c  | 290 +
 drivers/dma/cnxk/meson.build   |   9 +-
 drivers/dma/cnxk/version.map   |  10 +
 drivers/event/cnxk/cn9k_eventdev.c |   2 +
 drivers/event/cnxk/cn9k_worker.h   |   1 +
 drivers/event/cnxk/meson.build |   2 +-
 11 files changed, 366 insertions(+), 4 deletions(-)
 create mode 100644 drivers/dma/cnxk/cnxk_dma_event_dp.h
 create mode 100644 drivers/dma/cnxk/version.map

diff --git a/doc/guides/eventdevs/cnxk.rst b/doc/guides/eventdevs/cnxk.rst
index cccb8a0304..9ff1052c53 100644
--- a/doc/guides/eventdevs/cnxk.rst
+++ b/doc/guides/eventdevs/cnxk.rst
@@ -227,3 +227,8 @@ ethernet devices connected to event device to override this 
applications can
 use `force_rx_bp=1` device arguments.
 Using unique mempool per each ethernet device is recommended when they are
 connected to event device.
+
+DMA adapter new mode support
+
+
+DMA driver does not support DMA adapter configured in new mode.
diff --git a/doc/guides/rel_notes/release_24_03.rst 
b/doc/guides/rel_notes/release_24_03.rst
index 39ffef11b0..59a128a0a9 100644
--- a/doc/guides/rel_notes/release_24_03.rst
+++ b/doc/guides/rel_notes/release_24_03.rst
@@ -142,6 +142,10 @@ New Features
 to support TLS v1.2, TLS v1.3 and DTLS v1.2.
   * Added PMD API to allow raw submission of instructions to CPT.
 
+* **Updated Marvell cnxk DMA driver.**
+
+  * Added support for DMA event enqueue and dequeue.
+  * Added support for dual workslot DMA event enqueue.
 
 Removed Items
 -
diff --git a/drivers/dma/cnxk/cnxk_dma_event_dp.h 
b/drivers/dma/cnxk/cnxk_dma_event_dp.h
new file mode 100644
index 00..5f890ab18b
--- /dev/null
+++ b/drivers/dma/cnxk/cnxk_dma_event_dp.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2023 Marvell.
+ */
+
+#ifndef _CNXK_DMA_EVENT_DP_H_
+#define _CNXK_DMA_EVENT_DP_H_
+
+#include 
+
+#include 
+#include 
+
+__rte_internal
+uint16_t cn10k_dma_adapter_enqueue(void *ws, struct rte_event ev[], uint16_t 
nb_events);
+
+__rte_internal
+uint16_t cn9k_dma_adapter_enqueue(void *ws, struct rte_event ev[], uint16_t 
nb_events);
+
+__rte_internal
+uint16_t cn9k_dma_adapter_dual_enqueue(void *ws, struct rte_event ev[], 
uint16_t nb_events);
+
+__rte_internal
+uintptr_t cnxk_dma_adapter_dequeue(uintptr_t get_work1);
+#endif /* _CNXK_DMA_EVENT_DP_H_ */
diff --git a/drivers/dma/cnxk/cnxk_dmadev.c b/drivers/dma/cnxk/cnxk_dmadev.c
index 48ab09cc38..4ab3cfbdf2 100644
--- a/drivers/dma/cnxk/cnxk_dmadev.c
+++ b/drivers/dma/cnxk/cnxk_dmadev.c
@@ -589,10 +589,11 @@ cnxk_dmadev_probe(struct rte_pci_driver *pci_drv 
__rte_unused, struct rte_pci_de
dmadev->fp_obj->copy_sg = cn10k_dmadev_copy_sg;
}
 
+   dpivf->mcs_lock = NULL;
rdpi = &dpivf->rdpi;
 
rdpi->pci_dev = pci_dev;
-   rc = roc_dpi_dev_init(rdpi, 0);
+   rc = roc_dpi_dev_init(rdpi, offsetof(struct cnxk_dpi_compl_s, wqecs));
if (rc < 0)
goto err_out_free;
 
diff --git a/drivers/dma/cnxk/cnxk_dmadev.h b/drivers/dma/cnxk/cnxk_dmadev.h
index 350ae73b5c..610a360ba2 100644
--- a/drivers/dma/cnxk/cnxk_dmadev.h
+++ b/drivers/dma/cnxk/cnxk_dmadev.h
@@ -14,11 +14,14 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 #include 
 
 #include 
 
+#include "cnxk_dma_event_dp.h"
+
 #define CNXK_DPI_MAX_POINTER   15
 #define CNXK_DPI_STRM_INC(s, var)  ((s).var = ((s).var + 1) & 
(s).max_cnt)
 #define CNXK_DPI_STRM_DEC(s, var)  ((s).var = ((s).var - 1) == -1 ? 
(s).max_cnt :  \
@@ -40,6 +43,11 @@
  */
 #define CNXK_DPI_REQ_CDATA 0xFF
 
+/* Set Completion data to 0xDEADBEEF when request submitted for SSO.
+ * This helps differentiate if the dequeue is called after cnxk enueue.
+ */
+#define CNXK_DPI_REQ_SSO_CDATA0xDEADBEEF
+
 union cnxk_dpi_instr_cmd {
uint64_t u;
struct cn9k_dpi_instr_cmd {
@@ -85,7 +93,10 @@ union cnxk_dpi_instr_cmd {
 
 struct cnxk_dpi_compl_s {
uint64_t cdata;
-   void *cb_data;
+   void *op;
+   uint16_t dev_id;
+   uint16_t vchan;
+   uint32_t wqecs;
 };
 
 struct cnxk_dpi_cdesc_data_s {
@@ -95,6 +106,11 @@ struct cnxk_dpi_cdesc_data_s {
uint16_t tail;
 };
 
+struct cnxk_dma_adapter_info {
+   b

[PATCH v5 3/3] event/cnxk: support DMA event functions

2024-03-03 Thread Amit Prakash Shukla
Added support of dma driver callback assignment to eventdev
enqueue and dequeue. The change also defines dma adapter
capabilities function.

Depends-on: series-30612 ("lib/dmadev: get DMA device using device ID")

Signed-off-by: Amit Prakash Shukla 
---
v5:
- Fixed checkpatch warning.

v4:
- Fixed compilation error.
- Updated release notes.

v3:
- Rebased and fixed compilation error.

v2:
- Added dual workslot enqueue support.
- Fixed compilation error.

 doc/guides/rel_notes/release_24_03.rst   |  4 +
 drivers/event/cnxk/cn10k_eventdev.c  | 70 +
 drivers/event/cnxk/cn10k_worker.h|  3 +
 drivers/event/cnxk/cn9k_eventdev.c   | 67 
 drivers/event/cnxk/cn9k_worker.h |  2 +
 drivers/event/cnxk/cnxk_eventdev.h   |  3 +
 drivers/event/cnxk/cnxk_eventdev_adptr.c | 97 
 drivers/event/cnxk/meson.build   |  1 -
 8 files changed, 246 insertions(+), 1 deletion(-)

diff --git a/doc/guides/rel_notes/release_24_03.rst 
b/doc/guides/rel_notes/release_24_03.rst
index 59a128a0a9..d3883aadd9 100644
--- a/doc/guides/rel_notes/release_24_03.rst
+++ b/doc/guides/rel_notes/release_24_03.rst
@@ -147,6 +147,10 @@ New Features
   * Added support for DMA event enqueue and dequeue.
   * Added support for dual workslot DMA event enqueue.
 
+* **Updated Marvell cnxk eventdev driver.**
+
+  * Added support for DMA driver callback assignment to eventdev enqueue and 
dequeue.
+
 Removed Items
 -
 
diff --git a/drivers/event/cnxk/cn10k_eventdev.c 
b/drivers/event/cnxk/cn10k_eventdev.c
index 221f419055..18f3b402c9 100644
--- a/drivers/event/cnxk/cn10k_eventdev.c
+++ b/drivers/event/cnxk/cn10k_eventdev.c
@@ -8,6 +8,9 @@
 #include "cn10k_cryptodev_ops.h"
 #include "cnxk_eventdev.h"
 #include "cnxk_worker.h"
+#include "cnxk_dma_event_dp.h"
+
+#include 
 
 #define CN10K_SET_EVDEV_DEQ_OP(dev, deq_op, deq_ops)   
\
deq_op = deq_ops[dev->rx_offloads & (NIX_RX_OFFLOAD_MAX - 1)]
@@ -477,6 +480,8 @@ cn10k_sso_fp_fns_set(struct rte_eventdev *event_dev)
else
event_dev->ca_enqueue = 
cn10k_cpt_sg_ver1_crypto_adapter_enqueue;
 
+   event_dev->dma_enqueue = cn10k_dma_adapter_enqueue;
+
if (dev->tx_offloads & NIX_TX_MULTI_SEG_F)
CN10K_SET_EVDEV_ENQ_OP(dev, event_dev->txa_enqueue, 
sso_hws_tx_adptr_enq_seg);
else
@@ -1020,6 +1025,67 @@ cn10k_crypto_adapter_vec_limits(const struct 
rte_eventdev *event_dev,
return 0;
 }
 
+static int
+cn10k_dma_adapter_caps_get(const struct rte_eventdev *event_dev,
+  const int16_t dma_dev_id, uint32_t *caps)
+{
+   struct rte_dma_dev *dma_dev;
+
+   RTE_SET_USED(event_dev);
+
+   dma_dev = rte_dma_pmd_get_dev_by_id(dma_dev_id);
+   if (dma_dev == NULL)
+   return -EINVAL;
+
+   CNXK_VALID_DEV_OR_ERR_RET(dma_dev->device, "cnxk_dmadev_pci_driver", 
EINVAL);
+
+   *caps = RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD;
+
+   return 0;
+}
+
+static int
+cn10k_dma_adapter_vchan_add(const struct rte_eventdev *event_dev,
+   const int16_t dma_dev_id, uint16_t vchan_id,
+   const struct rte_event *event)
+{
+   struct rte_dma_dev *dma_dev;
+   int ret;
+
+   RTE_SET_USED(event);
+   dma_dev = rte_dma_pmd_get_dev_by_id(dma_dev_id);
+   if (dma_dev == NULL)
+   return -EINVAL;
+
+   CNXK_VALID_DEV_OR_ERR_RET(dma_dev->device, "cnxk_dmadev_pci_driver", 
EINVAL);
+
+   cn10k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
+
+   ret = cnxk_dma_adapter_vchan_add(event_dev, dma_dev_id, vchan_id);
+   cn10k_sso_set_priv_mem(event_dev, NULL);
+
+   return ret;
+}
+
+static int
+cn10k_dma_adapter_vchan_del(const struct rte_eventdev *event_dev,
+   const int16_t dma_dev_id, uint16_t vchan_id)
+{
+   struct rte_dma_dev *dma_dev;
+
+   RTE_SET_USED(event_dev);
+
+   dma_dev = rte_dma_pmd_get_dev_by_id(dma_dev_id);
+   if (dma_dev == NULL)
+   return -EINVAL;
+
+   CNXK_VALID_DEV_OR_ERR_RET(dma_dev->device, "cnxk_dmadev_pci_driver", 
EINVAL);
+
+   return cnxk_dma_adapter_vchan_del(dma_dev_id, vchan_id);
+}
+
+
+
 static struct eventdev_ops cn10k_sso_dev_ops = {
.dev_infos_get = cn10k_sso_info_get,
.dev_configure = cn10k_sso_dev_configure,
@@ -1061,6 +1127,10 @@ static struct eventdev_ops cn10k_sso_dev_ops = {
.crypto_adapter_queue_pair_del = cn10k_crypto_adapter_qp_del,
.crypto_adapter_vector_limits_get = cn10k_crypto_adapter_vec_limits,
 
+   .dma_adapter_caps_get = cn10k_dma_adapter_caps_get,
+   .dma_adapter_vchan_add = cn10k_dma_adapter_vchan_add,
+   .dma_adapter_vchan_del = cn10k_dma_adapter_vchan_del,
+
.xstats_get = cnxk_sso_xstats_get,
.xstats_reset = cnxk_sso_xstats_reset,
.xstats_get_names = cnxk_sso_xstats_get_names,
diff --git a/drive

RE: [PATCH 1/3] net/nfp: add the elf module

2024-03-03 Thread Chaoyong He
> On 2/28/2024 10:18 PM, Stephen Hemminger wrote:
> > On Tue, 27 Feb 2024 19:15:49 +0800
> > Chaoyong He  wrote:
> >
> >> From: Peng Zhang 
> >>
> >> Add the elf module, which can get mip information from the firmware
> >> ELF file.
> >>
> >> Signed-off-by: Peng Zhang 
> >> Reviewed-by: Chaoyong He 
> >> Reviewed-by: Long Wu 
> >> ---
> >
> > Why are you rolling your own ELF parser?
> > There are libraries to do this such as libelf.
> > Libelf is already used in the BPF part of DPDK.
> >
> 
> There cons and pros to depend external library, as this is in the limited 
> scope of
> the driver I am less concerned about local code.
> 
> Chaoyong, what is your take on the issue, did you consider using libelf 
> library
> option?

Firstly, the nffw firmware file is a customed ELF file, we are not sure the 
libelf library can meet our needs totally.
Then, we share the same logic with our BSP code, and we don't want to have two 
different logic for the same requirement.


RE: [PATCH] net/ice: fix null pointer dereferences

2024-03-03 Thread Ma, WenwuX



> -Original Message-
> From: Richardson, Bruce 
> Sent: Friday, March 1, 2024 6:35 PM
> To: Ma, WenwuX 
> Cc: dev@dpdk.org; Jiale, SongX 
> Subject: Re: [PATCH] net/ice: fix null pointer dereferences
> 
> On Fri, Mar 01, 2024 at 01:20:29PM +0800, Wenwu Ma wrote:
> > This patch fixes two null pointer dereferences detected by coverity
> > scan.
> >
> > Coverity issue: 414096
> > Fixes: 6ccef90ff5d3 ("net/ice: support VSI level bandwidth config")
> >
> > Signed-off-by: Wenwu Ma 
> > ---
> >  drivers/net/ice/ice_tm.c | 10 --
> >  1 file changed, 8 insertions(+), 2 deletions(-)
> >
> > diff --git a/drivers/net/ice/ice_tm.c b/drivers/net/ice/ice_tm.c index
> > fbab0b8808..e10ac855f9 100644
> > --- a/drivers/net/ice/ice_tm.c
> > +++ b/drivers/net/ice/ice_tm.c
> > @@ -616,7 +616,10 @@ static int ice_set_node_rate(struct ice_hw *hw,
> >ICE_MAX_BW,
> >rate);
> > if (status) {
> > -   PMD_DRV_LOG(ERR, "Failed to set max bandwidth for
> node %u", tm_node->id);
> > +   if (tm_node != NULL)
> > +   PMD_DRV_LOG(ERR, "Failed to set max bandwidth for
> node %u", tm_node->id);
> > +   else
> > +   PMD_DRV_LOG(ERR, "Failed to set max bandwidth");
> > return -EINVAL;
> > }
> >
> > @@ -630,7 +633,10 @@ static int ice_set_node_rate(struct ice_hw *hw,
> >ICE_MIN_BW,
> >rate);
> > if (status) {
> > -   PMD_DRV_LOG(ERR, "Failed to set min bandwidth for
> node %u", tm_node->id);
> > +   if (tm_node != NULL)
> > +   PMD_DRV_LOG(ERR, "Failed to set min bandwidth for
> node %u", tm_node->id);
> > +   else
> > +   PMD_DRV_LOG(ERR, "Failed to set min bandwidth");
> > return -EINVAL;
> > }
> >
> Hi Wenwu,
> 
> I'm not sure that this is the best fix here, since the error message doesn't 
> seem
> particularly useful without the node id. Looking at the code, this is a static
> function, so non-public, and only called in three places in
> rte_tm.c: from ice_cfg_hw_node, ice_do_hierarchy_commit and
> ice_reset_nolead_nodes. In all three cases, failure of this function is
> immediately followed by a more specific error message from the calling
> function. Therefore, I think we can solve the coverity problem by just 
> deleting
> the error prints from here completely, and let the callers manage error
> reporting.
> 
> What do you think?
> 
Ok, I will submit a new patch later.

> /Bruce


[PATCH v2] net/ice: fix null pointer dereferences

2024-03-03 Thread Wenwu Ma
This patch fixes two null pointer dereferences detected by
coverity scan.

Coverity issue: 414096
Fixes: 6ccef90ff5d3 ("net/ice: support VSI level bandwidth config")
Cc: sta...@dpdk.org

Signed-off-by: Wenwu Ma 
---
v2:
 - deleting rather than modifying the prints that
   cause null pointer dereferences

---
 drivers/net/ice/ice_tm.c | 8 ++--
 1 file changed, 2 insertions(+), 6 deletions(-)

diff --git a/drivers/net/ice/ice_tm.c b/drivers/net/ice/ice_tm.c
index fbab0b8808..17f0ca0ce0 100644
--- a/drivers/net/ice/ice_tm.c
+++ b/drivers/net/ice/ice_tm.c
@@ -615,10 +615,8 @@ static int ice_set_node_rate(struct ice_hw *hw,
   sched_node,
   ICE_MAX_BW,
   rate);
-   if (status) {
-   PMD_DRV_LOG(ERR, "Failed to set max bandwidth for node %u", 
tm_node->id);
+   if (status)
return -EINVAL;
-   }
 
if (reset || committed == 0)
rate = ICE_SCHED_DFLT_BW;
@@ -629,10 +627,8 @@ static int ice_set_node_rate(struct ice_hw *hw,
   sched_node,
   ICE_MIN_BW,
   rate);
-   if (status) {
-   PMD_DRV_LOG(ERR, "Failed to set min bandwidth for node %u", 
tm_node->id);
+   if (status)
return -EINVAL;
-   }
 
return 0;
 }
-- 
2.25.1



RE: [EXTERNAL] [PATCH v5] common/qat: add virtual qat device (vQAT)

2024-03-03 Thread Akhil Goyal
> Subject: RE: [EXTERNAL] [PATCH v5] common/qat: add virtual qat device (vQAT)
> 
> > --- a/doc/guides/rel_notes/release_24_03.rst
> > +++ b/doc/guides/rel_notes/release_24_03.rst
> > @@ -146,6 +146,10 @@ New Features
> >  to support TLS v1.2, TLS v1.3 and DTLS v1.2.
> >* Added PMD API to allow raw submission of instructions to CPT.
> >
> > +* **Updated Intel QuickAssist Technology driver.**
> > +
> > +  * Enabled support for virtual QAT - vQAT (0da5) devices in QAT PMD.
> > +
> >
> You should add QAT updates under the same bullet which is already there
> for previous updates.
> I will fix it up while applying but you should take care in future.

Applied to dpdk-next-crypto with above update.
Thanks.


RE: [EXTERNAL] [PATCH v5 1/3] common/qat: isolate parser arguments configuration

2024-03-03 Thread Akhil Goyal
> This commit isolates qat device arguments from the common
> code. Now arguments are defined per service, and only appear
> in the application if the service is compiled-in.
> 
> Depends-on: patch-137678 ("common/qat: add virtual qat device (vQAT)")
> 
> Signed-off-by: Arkadiusz Kusztal 
Applied to dpdk-next-crypto
Thanks.


RE: [PATCH v5 3/3] common/qat: fix incorrectly placed legacy flag

2024-03-03 Thread Akhil Goyal
> > Subject: [PATCH v5 3/3] common/qat: fix incorrectly placed legacy flag
> >
> > This commit fixes a legacy flag, which was placed in a file that may not be
> > included in a building process.
> >
> > Fixes: cffb726b7797 ("crypto/qat: enable insecure algorithms")
> >
> > Signed-off-by: Arkadiusz Kusztal 
Cc: sta...@dpdk.org

Series Applied to dpdk-next-crypto



RE: [PATCH 0/7] add Nitrox compress device support

2024-03-03 Thread Akhil Goyal
> Subject: [PATCH 0/7] add Nitrox compress device support
> 
> Add the Nitrox PMD to support Nitrox compress device.
> ---
> v5:
> * Added missing entry for nitrox folder in compress meson.json
> 
> v4:
> * Fixed checkpatch warnings.
> * Updated release notes.
> 
> v3:
> * Fixed ABI compatibility issue.
> 
> v2:
> * Reformatted patches to minimize number of changes.
> * Removed empty file with only copyright.
> * Updated all feature flags in nitrox.ini file.
> * Added separate gotos in nitrox_pci_probe() function.
> 
> Nagadheeraj Rottela (7):
>   crypto/nitrox: move common code
>   drivers/compress: add Nitrox driver
>   common/nitrox: add compress hardware queue management
>   crypto/nitrox: set queue type during queue pair setup
>   compress/nitrox: add software queue management
>   compress/nitrox: support stateless request
>   compress/nitrox: support stateful request
> 
>  MAINTAINERS   |8 +
>  doc/guides/compressdevs/features/nitrox.ini   |   17 +
>  doc/guides/compressdevs/index.rst |1 +
>  doc/guides/compressdevs/nitrox.rst|   50 +
>  doc/guides/rel_notes/release_24_03.rst|3 +
>  drivers/common/nitrox/meson.build |   19 +
>  .../{crypto => common}/nitrox/nitrox_csr.h|   12 +
>  .../{crypto => common}/nitrox/nitrox_device.c |   51 +-
>  .../{crypto => common}/nitrox/nitrox_device.h |4 +-
>  .../{crypto => common}/nitrox/nitrox_hal.c|  116 ++
>  .../{crypto => common}/nitrox/nitrox_hal.h|  115 ++
>  .../{crypto => common}/nitrox/nitrox_logs.c   |0
>  .../{crypto => common}/nitrox/nitrox_logs.h   |0
>  drivers/{crypto => common}/nitrox/nitrox_qp.c |   56 +-
>  drivers/{crypto => common}/nitrox/nitrox_qp.h |   60 +-
>  drivers/common/nitrox/version.map |9 +
>  drivers/compress/meson.build  |1 +
>  drivers/compress/nitrox/meson.build   |   16 +
>  drivers/compress/nitrox/nitrox_comp.c |  604 +
>  drivers/compress/nitrox/nitrox_comp.h |   35 +
>  drivers/compress/nitrox/nitrox_comp_reqmgr.c  | 1194 +
>  drivers/compress/nitrox/nitrox_comp_reqmgr.h  |   58 +
>  drivers/crypto/nitrox/meson.build |   11 +-
>  drivers/crypto/nitrox/nitrox_sym.c|1 +
>  drivers/meson.build   |1 +
>  25 files changed, 2412 insertions(+), 30 deletions(-)
>  create mode 100644 doc/guides/compressdevs/features/nitrox.ini
>  create mode 100644 doc/guides/compressdevs/nitrox.rst
>  create mode 100644 drivers/common/nitrox/meson.build
>  rename drivers/{crypto => common}/nitrox/nitrox_csr.h (67%)
>  rename drivers/{crypto => common}/nitrox/nitrox_device.c (77%)
>  rename drivers/{crypto => common}/nitrox/nitrox_device.h (81%)
>  rename drivers/{crypto => common}/nitrox/nitrox_hal.c (65%)
>  rename drivers/{crypto => common}/nitrox/nitrox_hal.h (59%)
>  rename drivers/{crypto => common}/nitrox/nitrox_logs.c (100%)
>  rename drivers/{crypto => common}/nitrox/nitrox_logs.h (100%)
>  rename drivers/{crypto => common}/nitrox/nitrox_qp.c (67%)
>  rename drivers/{crypto => common}/nitrox/nitrox_qp.h (55%)
>  create mode 100644 drivers/common/nitrox/version.map
>  create mode 100644 drivers/compress/nitrox/meson.build
>  create mode 100644 drivers/compress/nitrox/nitrox_comp.c
>  create mode 100644 drivers/compress/nitrox/nitrox_comp.h
>  create mode 100644 drivers/compress/nitrox/nitrox_comp_reqmgr.c
>  create mode 100644 drivers/compress/nitrox/nitrox_comp_reqmgr.h
> 
Applied to dpdk-next-crypto.

Reworked and moved release notes changes to last patch.


RE: [EXTERNAL] [PATCH] crypto/mlx5: add virtual function device ID

2024-03-03 Thread Akhil Goyal
> Subject: [EXTERNAL] [PATCH] crypto/mlx5: add virtual function device ID
> 
> This adds the virtual function device ID to the list of
> supported NVIDIA devices that run the MLX5 compress PMD.

Compress PMD or crypto PMD? 

> 
> Signed-off-by: Suanming Mou 
> Acked-by: Matan Azrad 
> ---
>  drivers/crypto/mlx5/mlx5_crypto.c | 4 
>  1 file changed, 4 insertions(+)
> 
> diff --git a/drivers/crypto/mlx5/mlx5_crypto.c
> b/drivers/crypto/mlx5/mlx5_crypto.c
> index 4bac723c8b..26bd4087da 100644
> --- a/drivers/crypto/mlx5/mlx5_crypto.c
> +++ b/drivers/crypto/mlx5/mlx5_crypto.c
> @@ -465,6 +465,10 @@ static const struct rte_pci_id mlx5_crypto_pci_id_map[]
> = {
>   RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
> 
>   PCI_DEVICE_ID_MELLANOX_BLUEFIELD3)
>   },
> + {
> + RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
> +
>   PCI_DEVICE_ID_MELLANOX_CONNECTXVF)
> + },
>   {
>   .vendor_id = 0
>   }
> --
> 2.34.1



RE: [EXTERNAL] [PATCH] crypto/mlx5: add virtual function device ID

2024-03-03 Thread Suanming Mou



> -Original Message-
> From: Akhil Goyal 
> Sent: Monday, March 4, 2024 3:24 PM
> To: Suanming Mou ; Matan Azrad
> 
> Cc: dev@dpdk.org
> Subject: RE: [EXTERNAL] [PATCH] crypto/mlx5: add virtual function device ID
> 
> > Subject: [EXTERNAL] [PATCH] crypto/mlx5: add virtual function device
> > ID
> >
> > This adds the virtual function device ID to the list of supported
> > NVIDIA devices that run the MLX5 compress PMD.
> 
> Compress PMD or crypto PMD?

Sorry, it should be crypto.

> 
> >
> > Signed-off-by: Suanming Mou 
> > Acked-by: Matan Azrad 
> > ---
> >  drivers/crypto/mlx5/mlx5_crypto.c | 4 
> >  1 file changed, 4 insertions(+)
> >
> > diff --git a/drivers/crypto/mlx5/mlx5_crypto.c
> > b/drivers/crypto/mlx5/mlx5_crypto.c
> > index 4bac723c8b..26bd4087da 100644
> > --- a/drivers/crypto/mlx5/mlx5_crypto.c
> > +++ b/drivers/crypto/mlx5/mlx5_crypto.c
> > @@ -465,6 +465,10 @@ static const struct rte_pci_id
> > mlx5_crypto_pci_id_map[] = {
> > RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
> >
> > PCI_DEVICE_ID_MELLANOX_BLUEFIELD3)
> > },
> > +   {
> > +   RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
> > +
> > PCI_DEVICE_ID_MELLANOX_CONNECTXVF)
> > +   },
> > {
> > .vendor_id = 0
> > }
> > --
> > 2.34.1



RE: [PATCH v4] crypto/ipsec_mb: unified IPsec MB interface

2024-03-03 Thread Akhil Goyal
> > Hi folks,
> >
> > The introduction of a more unified IPsec MB library for DPDK is causing the
> > snow3g tests to fail on ARM. Artifact here:
> > https://lab.dpdk.org/results/dashboard/patchsets/29315/
> > PMDs using the direct API (KASUMI, CHACHA, ZUC, SNOW3G) will use the job
> API,
> > from the AESNI MB PMD code.
> > We have come across a similar issue in the past that related to an offset 
> > issue as
> > SNOW3G uses bits instead of bytes.
> 
> The above link does not seem to be working.
> I believe from now on, since we continue to maintain two separate repos,
> it would be better to get ack from ARM folks as well
> before merging anything onto crypto/ipsec_mb PMD.
> 
> Arm folks, Could you please get the below change tested/incorporated in the
> repo.

Hi Arm folks,
Any update on the below fix?


> 
> 
> >
> > commit a501609ea6466ed8526c0dfadedee332a4d4a451
> > Author: Pablo de Lara pablo.de.lara.gua...@intel.com
> > Date:   Wed Feb 23 16:01:16 2022 +
> >
> > crypto/ipsec_mb: fix length and offset settings
> >
> > KASUMI, SNOW3G and ZUC require lengths and offsets to
> > be set in bits or bytes depending on the algorithm.
> > There were some algorithms that were mixing these two,
> > so this commit is fixing this issue.
> >
> > This bug only appeared recently when the ARM ipsec version was bumped to
> 1.4.
> > It appears there could be a similar scenario happening now and this is a
> potential
> > fix that needs to be made in the ARM IPsec-mb repo:
> >
> > diff --git a/lib/aarch64/mb_mgr_snow3g_submit_flush_common_aarch64.h
> > b/lib/aarch64/mb_mgr_snow3g_submit_flush_common_aarch64.h
> > index 13bca11b..de284ade 100644
> > --- a/lib/aarch64/mb_mgr_snow3g_submit_flush_common_aarch64.h
> > +++ b/lib/aarch64/mb_mgr_snow3g_submit_flush_common_aarch64.h
> > @@ -94,8 +94,8 @@ static void
> > snow3g_mb_mgr_insert_uea2_job(MB_MGR_SNOW3G_OOO *state, IMB_JOB
> > *job
> >  state->num_lanes_inuse++;
> >  state->args.iv[used_lane_idx] = job->iv;
> >  state->args.keys[used_lane_idx] = job->enc_keys;
> > -state->args.in[used_lane_idx] = job->src + job-
> > >cipher_start_src_offset_in_bytes;
> > -state->args.out[used_lane_idx] = job->dst;
> > +state->args.in[used_lane_idx] = job->src + (job-
> > >cipher_start_src_offset_in_bits / 8);
> > +state->args.out[used_lane_idx] = job->dst + (job-
> > >cipher_start_src_offset_in_bits / 8);
> >  state->args.byte_length[used_lane_idx] = 
> > job->msg_len_to_cipher_in_bits /
> 8;
> >  state->args.INITIALIZED[used_lane_idx] = 0;
> >  state->lens[used_lane_idx] = job->msg_len_to_cipher_in_bits / 8;
> >
> > Thanks,
> > Brian
> >
> > > -Original Message-
> > > From: Dooley, Brian 
> > > Sent: Wednesday, February 28, 2024 11:33 AM
> > > To: Ji, Kai ; De Lara Guarch, Pablo
> > > 
> > > Cc: dev@dpdk.org; gak...@marvell.com; Dooley, Brian
> > > 
> > > Subject: [PATCH v4] crypto/ipsec_mb: unified IPsec MB interface
> > >
> > > Currently IPsec MB provides both the JOB API and direct API.
> > > AESNI_MB PMD is using the JOB API codepath while ZUC, KASUMI, SNOW3G
> > > and CHACHA20_POLY1305 are using the direct API.
> > > Instead of using the direct API for these PMDs, they should now make
> > > use of the JOB API codepath. This would remove all use of the IPsec MB
> > > direct API for these PMDs.
> > >
> > > Signed-off-by: Brian Dooley 
> > > ---
> > > v2:
> > > - Fix compilation failure
> > > v3:
> > > - Remove session configure pointer for each PMD
> > > v4:
> > > - Keep AES GCM PMD and fix extern issue
> > > ---
> > >  doc/guides/rel_notes/release_24_03.rst|   6 +
> > >  drivers/crypto/ipsec_mb/pmd_aesni_mb.c|  10 +-
> > >  drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h   |  15 +-
> > >  drivers/crypto/ipsec_mb/pmd_chacha_poly.c | 338 +--
> > >  .../crypto/ipsec_mb/pmd_chacha_poly_priv.h|  28 -
> > >  drivers/crypto/ipsec_mb/pmd_kasumi.c  | 410 +
> > >  drivers/crypto/ipsec_mb/pmd_kasumi_priv.h |  20 -
> > >  drivers/crypto/ipsec_mb/pmd_snow3g.c  | 543 +-
> > >  drivers/crypto/ipsec_mb/pmd_snow3g_priv.h |  21 -
> > >  drivers/crypto/ipsec_mb/pmd_zuc.c | 347 +--
> > >  drivers/crypto/ipsec_mb/pmd_zuc_priv.h|  20 -
> > >  11 files changed, 48 insertions(+), 1710 deletions(-)
> > >
> > 


Re: [PATCH v6 23/23] net/cnxk: other flow operations

2024-03-03 Thread Jerin Jacob
On Sun, Mar 3, 2024 at 11:11 PM Harman Kalra  wrote:
>
> Implementing other flow operations - validate, destroy, query,
> flush, dump for representor ports
>
> Signed-off-by: Harman Kalra 


Applied following fix on merge
[for-main]dell[dpdk-next-net-mrvl] $ git diff
diff --git a/drivers/net/cnxk/cnxk_rep_msg.c b/drivers/net/cnxk/cnxk_rep_msg.c
index 0af87f0169..f3a62a805e 100644
--- a/drivers/net/cnxk/cnxk_rep_msg.c
+++ b/drivers/net/cnxk/cnxk_rep_msg.c
@@ -41,7 +41,7 @@ receive_control_message(int socketfd, void *data,
uint32_t len)
if (size < 0) {
if (errno == EAGAIN)
return 0;
-   plt_err("recvmsg err %d size %ld", errno, size);
+   plt_err("recvmsg err %d size %zu", errno, size);
return -errno;
} else if (size == 0) {
return 0;


Updated the git commit comments and Series applied to
dpdk-next-net-mrvl/for-main. Thanks