[RFC] net/ice: add E830 support

2024-01-03 Thread Qiming Yang
Add E830 device ids.

Signed-off-by: Qiming Yang 
---
 drivers/net/ice/base/ice_common.c | 33 ---
 drivers/net/ice/base/ice_ddp.c|  6 +
 drivers/net/ice/base/ice_ddp.h|  1 +
 drivers/net/ice/base/ice_devids.h |  8 +++
 drivers/net/ice/base/ice_hw_autogen.h | 14 
 drivers/net/ice/base/ice_nvm.c| 15 +++-
 drivers/net/ice/base/ice_type.h   |  2 ++
 drivers/net/ice/ice_ethdev.c  |  4 
 8 files changed, 69 insertions(+), 14 deletions(-)

diff --git a/drivers/net/ice/base/ice_common.c 
b/drivers/net/ice/base/ice_common.c
index 8867279c28..f161a365ee 100644
--- a/drivers/net/ice/base/ice_common.c
+++ b/drivers/net/ice/base/ice_common.c
@@ -177,6 +177,12 @@ static enum ice_status ice_set_mac_type(struct ice_hw *hw)
case ICE_DEV_ID_E825C_SGMII:
hw->mac_type = ICE_MAC_GENERIC_3K_E825;
break;
+   case ICE_DEV_ID_E830_BACKPLANE:
+   case ICE_DEV_ID_E830_QSFP56:
+   case ICE_DEV_ID_E830_SFP:
+   case ICE_DEV_ID_E830_SFP_DD:
+   hw->mac_type = ICE_MAC_E830;
+   break;
default:
hw->mac_type = ICE_MAC_UNKNOWN;
break;
@@ -810,15 +816,26 @@ ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw,
 */
 #define IDX_OF_LFC PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX
 
-   /* Retrieve the transmit timer */
-   val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(IDX_OF_LFC));
-   tx_timer_val = val &
-   PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M;
-   cmd->tx_tmr_value = CPU_TO_LE16(tx_timer_val);
+   if ((hw)->mac_type == ICE_MAC_E830) {
+   /* Retrieve the transmit timer */
+   val = rd32(hw, E830_PRTMAC_CL01_PAUSE_QUANTA);
+   tx_timer_val = val & 
E830_PRTMAC_CL01_PAUSE_QUANTA_CL0_PAUSE_QUANTA_M;
+   cmd->tx_tmr_value = CPU_TO_LE16(tx_timer_val);
 
-   /* Retrieve the fc threshold */
-   val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(IDX_OF_LFC));
-   fc_thres_val = val & PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_M;
+   /* Retrieve the fc threshold */
+   val = rd32(hw, E830_PRTMAC_CL01_QUANTA_THRESH);
+   fc_thres_val = val & 
E830_PRTMAC_CL01_QUANTA_THRESH_CL0_QUANTA_THRESH_M;
+   } else {
+   /* Retrieve the transmit timer */
+   val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(IDX_OF_LFC));
+   tx_timer_val = val &
+   
PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M;
+   cmd->tx_tmr_value = CPU_TO_LE16(tx_timer_val);
+
+   /* Retrieve the fc threshold */
+   val = rd32(hw, 
PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(IDX_OF_LFC));
+   fc_thres_val = val & PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_M;
+   }
 
cmd->fc_refresh_threshold = CPU_TO_LE16(fc_thres_val);
 }
diff --git a/drivers/net/ice/base/ice_ddp.c b/drivers/net/ice/base/ice_ddp.c
index ffcd5a9394..e43aab981d 100644
--- a/drivers/net/ice/base/ice_ddp.c
+++ b/drivers/net/ice/base/ice_ddp.c
@@ -439,6 +439,9 @@ static u32 ice_get_pkg_segment_id(enum ice_mac_type 
mac_type)
u32 seg_id;
 
switch (mac_type) {
+   case ICE_MAC_E830:
+   seg_id = SEGMENT_TYPE_ICE_E830;
+   break;
case ICE_MAC_GENERIC:
case ICE_MAC_GENERIC_3K:
case ICE_MAC_GENERIC_3K_E825:
@@ -459,6 +462,9 @@ static u32 ice_get_pkg_sign_type(enum ice_mac_type mac_type)
u32 sign_type;
 
switch (mac_type) {
+   case ICE_MAC_E830:
+   sign_type = SEGMENT_SIGN_TYPE_RSA3K_SBB;
+   break;
case ICE_MAC_GENERIC_3K:
sign_type = SEGMENT_SIGN_TYPE_RSA3K;
break;
diff --git a/drivers/net/ice/base/ice_ddp.h b/drivers/net/ice/base/ice_ddp.h
index 1e02adf0db..6c87f11972 100644
--- a/drivers/net/ice/base/ice_ddp.h
+++ b/drivers/net/ice/base/ice_ddp.h
@@ -107,6 +107,7 @@ struct ice_generic_seg_hdr {
 #define SEGMENT_TYPE_METADATA  0x0001
 #define SEGMENT_TYPE_ICE_E810  0x0010
 #define SEGMENT_TYPE_SIGNING   0x1001
+#define SEGMENT_TYPE_ICE_E830  0x0017
 #define SEGMENT_TYPE_ICE_RUN_TIME_CFG 0x0020
__le32 seg_type;
struct ice_pkg_ver seg_format_ver;
diff --git a/drivers/net/ice/base/ice_devids.h 
b/drivers/net/ice/base/ice_devids.h
index 19478d2db1..3e0f9a16a3 100644
--- a/drivers/net/ice/base/ice_devids.h
+++ b/drivers/net/ice/base/ice_devids.h
@@ -15,6 +15,14 @@
 #define ICE_DEV_ID_E823L_1GBE  0x124F
 /* Intel(R) Ethernet Connection E823-L for QSFP */
 #define ICE_DEV_ID_E823L_QSFP  0x151D
+/* Intel(R) Ethernet Controller E830-C for backplane */
+#define ICE_DEV_ID_E830_BACKPLANE  0x12D1
+/* Intel(R) Ethernet Controller E830-C for QSFP */
+#define ICE_DEV_ID_E830_QSFP56 0x12D2
+/* Intel(R) Ethernet Controller E830-C for SFP */
+#defi

[Bug 1340] net/i40e do not match packet with non-zero 802.1p priority when flow rule for VLAN ID only is used

2024-01-03 Thread bugzilla
https://bugs.dpdk.org/show_bug.cgi?id=1340

Bug ID: 1340
   Summary: net/i40e do not match packet with non-zero 802.1p
priority when flow rule for VLAN ID only is used
   Product: DPDK
   Version: 23.11
  Hardware: All
OS: All
Status: UNCONFIRMED
  Severity: normal
  Priority: Normal
 Component: ethdev
  Assignee: dev@dpdk.org
  Reporter: andrew.rybche...@oktetlabs.ru
  Target Milestone: ---

net/i40e do not match packet with non-zero 802.1p priority when flow rule for
VLAN ID only is used

For example, add flow rule to drop packets with VLAN ID 13 and use mask to
match VLAN ID only.

If so, packets with any priority and VLAN 13 should be dropped.

However, if I send packet with priority, for example, 6, the packet is
successfully delivered.

Should be easy to reproduce with testpmd.

Looking and net/i40e could I've failed to find where VLAN CFI mask is passed to
HW. It is just analyzed, but it looks like not really used.

Found using automated test:
https://ts-factory.io/bublik/v2/log/433687?mode=treeAndlog&experimental=true&focusId=440732&lineNumber=1_82

-- 
You are receiving this mail because:
You are the assignee for the bug.

Re: [PATCH v1 1/1] net/thunderx: update dmac control register to appropriately

2024-01-03 Thread Jerin Jacob
On Thu, Dec 21, 2023 at 7:18 PM Hanumanth Pothula  wrote:
>
> By default dmac control register is set to reject packets
> on mac address match, leading all unicast packets to drop.
>
> Update DMAC control register to allow packets on MAC address
> match rather than dropping.
>
> Signed-off-by: Hanumanth Pothula 


Updated the git commit as follows and applied to
dpdk-next-net-mrvl/for-main. Thanks


commit 39711235e17d204496445be57997ed09ef6df019 (HEAD -> for-main)
Author: Hanumanth Pothula 
Date:   Thu Dec 21 16:49:59 2023 +0530

net/thunderx: fix dmac control register update

By default dmac control register is set to reject packets
on mac address match, leading all unicast packets to drop.
Update DMAC control register to allow packets on MAC address
match rather than dropping.

Fixes: e438796617dc ("net/thunderx: add PMD skeleton")
Cc: sta...@dpdk.org

Signed-off-by: Hanumanth Pothula 


RE: [PATCH v3] net/iavf: fix VF startup coredump

2024-01-03 Thread Zhang, Qi Z



> -Original Message-
> From: He, ShiyangX 
> Sent: Wednesday, January 3, 2024 6:42 PM
> To: dev@dpdk.org
> Cc: Yang, Qiming ; He, ShiyangX
> ; sta...@dpdk.org; Wu, Jingjing
> ; Xing, Beilei ; Wang, Liang-
> min ; Zhang, Qi Z 
> Subject: [PATCH v3] net/iavf: fix VF startup coredump
> 
> When the vf starts to request multiple queues, the pf sends a reset command
> to the vf. During the reset process, adminq sends an abnormal message to pf
> for an unknown reason, and the resource request fails resulting in a
> coredump.
> 
> This patch fixes the issue by checking the reset state before resetting.
> 
> v2: fix windows compilation errors
> v3: add fix tag
> 
> Fixes: 3e6a5d2d310a ("net/iavf: add devargs to enable VF auto-reset")
> Cc: sta...@dpdk.org
> 
> Signed-off-by: Shiyang He 

Acked-by: Qi Zhang 

Applied to dpdk-next-net-intel.

Thanks
Qi



Re: [PATCH 2/2] common/cnxk: fix VLAN check for inner header

2024-01-03 Thread Jerin Jacob
On Thu, Dec 21, 2023 at 12:09 PM Harman Kalra  wrote:
>
> Adding the has vlan check in inner headers i.e in LF layer. If
> has_vlan is 0 it should be masked out while installing flow rule.
>
> Fixes: c34ea71b878d ("common/cnxk: add NPC parsing API")
> Cc: sta...@dpdk.org
>
> Signed-off-by: Harman Kalra 


Series applied to dpdk-next-net-mrvl/for-main. Thanks.


[PATCH v8 0/2] net/iavf: fix Rx/Tx burst and add diagnostics

2024-01-03 Thread Mingjin Ye
Fixed Rx/Tx crash in multi-process environment and added
Tx diagnostic feature.

Mingjin Ye (2):
  net/iavf: fix Rx/Tx burst in multi-process
  net/iavf: add diagnostic support in TX path

 doc/guides/nics/intel_vf.rst   |   9 ++
 drivers/net/iavf/iavf.h|  54 ++-
 drivers/net/iavf/iavf_ethdev.c |  76 +
 drivers/net/iavf/iavf_rxtx.c   | 280 ++---
 drivers/net/iavf/iavf_rxtx.h   |   2 +
 5 files changed, 363 insertions(+), 58 deletions(-)

-- 
2.25.1



[PATCH v8 1/2] net/iavf: fix Rx/Tx burst in multi-process

2024-01-03 Thread Mingjin Ye
In a multi-process environment, a secondary process operates on shared
memory and changes the function pointer of the primary process, resulting
in a crash when the primary process cannot find the function address
during an Rx/Tx burst.

Fixes: 5b3124a0a6ef ("net/iavf: support no polling when link down")
Cc: sta...@dpdk.org

Signed-off-by: Mingjin Ye 
---
v2: Add fix for Rx burst.
---
v3: fix Rx/Tx routing.
---
v4: Fix the ops array.
---
 drivers/net/iavf/iavf.h  |  42 +++-
 drivers/net/iavf/iavf_rxtx.c | 182 ---
 2 files changed, 166 insertions(+), 58 deletions(-)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 10868f2c30..73a089c199 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -313,6 +313,44 @@ struct iavf_devargs {
 
 struct iavf_security_ctx;
 
+enum iavf_rx_burst_type {
+   IAVF_RX_DEFAULT,
+   IAVF_RX_FLEX_RXD,
+   IAVF_RX_BULK_ALLOC,
+   IAVF_RX_SCATTERED,
+   IAVF_RX_SCATTERED_FLEX_RXD,
+   IAVF_RX_SSE,
+   IAVF_RX_AVX2,
+   IAVF_RX_AVX2_OFFLOAD,
+   IAVF_RX_SSE_FLEX_RXD,
+   IAVF_RX_AVX2_FLEX_RXD,
+   IAVF_RX_AVX2_FLEX_RXD_OFFLOAD,
+   IAVF_RX_SSE_SCATTERED,
+   IAVF_RX_AVX2_SCATTERED,
+   IAVF_RX_AVX2_SCATTERED_OFFLOAD,
+   IAVF_RX_SSE_SCATTERED_FLEX_RXD,
+   IAVF_RX_AVX2_SCATTERED_FLEX_RXD,
+   IAVF_RX_AVX2_SCATTERED_FLEX_RXD_OFFLOAD,
+   IAVF_RX_AVX512,
+   IAVF_RX_AVX512_OFFLOAD,
+   IAVF_RX_AVX512_FLEX_RXD,
+   IAVF_RX_AVX512_FLEX_RXD_OFFLOAD,
+   IAVF_RX_AVX512_SCATTERED,
+   IAVF_RX_AVX512_SCATTERED_OFFLOAD,
+   IAVF_RX_AVX512_SCATTERED_FLEX_RXD,
+   IAVF_RX_AVX512_SCATTERED_FLEX_RXD_OFFLOAD,
+};
+
+enum iavf_tx_burst_type {
+   IAVF_TX_DEFAULT,
+   IAVF_TX_SSE,
+   IAVF_TX_AVX2,
+   IAVF_TX_AVX2_OFFLOAD,
+   IAVF_TX_AVX512,
+   IAVF_TX_AVX512_OFFLOAD,
+   IAVF_TX_AVX512_CTX_OFFLOAD,
+};
+
 /* Structure to store private data for each VF instance. */
 struct iavf_adapter {
struct iavf_hw hw;
@@ -328,8 +366,8 @@ struct iavf_adapter {
bool stopped;
bool closed;
bool no_poll;
-   eth_rx_burst_t rx_pkt_burst;
-   eth_tx_burst_t tx_pkt_burst;
+   enum iavf_rx_burst_type rx_burst_type;
+   enum iavf_tx_burst_type tx_burst_type;
uint16_t fdir_ref_cnt;
struct iavf_devargs devargs;
 };
diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index f19aa14646..89db82c694 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -3707,15 +3707,77 @@ iavf_prep_pkts(__rte_unused void *tx_queue, struct 
rte_mbuf **tx_pkts,
return i;
 }
 
+static
+const eth_rx_burst_t iavf_rx_pkt_burst_ops[] = {
+   [IAVF_RX_DEFAULT] = iavf_recv_pkts,
+   [IAVF_RX_FLEX_RXD] = iavf_recv_pkts_flex_rxd,
+   [IAVF_RX_BULK_ALLOC] = iavf_recv_pkts_bulk_alloc,
+   [IAVF_RX_SCATTERED] = iavf_recv_scattered_pkts,
+   [IAVF_RX_SCATTERED_FLEX_RXD] = iavf_recv_scattered_pkts_flex_rxd,
+#ifdef RTE_ARCH_X86
+   [IAVF_RX_SSE] = iavf_recv_pkts_vec,
+   [IAVF_RX_AVX2] = iavf_recv_pkts_vec_avx2,
+   [IAVF_RX_AVX2_OFFLOAD] = iavf_recv_pkts_vec_avx2_offload,
+   [IAVF_RX_SSE_FLEX_RXD] = iavf_recv_pkts_vec_flex_rxd,
+   [IAVF_RX_AVX2_FLEX_RXD] = iavf_recv_pkts_vec_avx2_flex_rxd,
+   [IAVF_RX_AVX2_FLEX_RXD_OFFLOAD] =
+   iavf_recv_pkts_vec_avx2_flex_rxd_offload,
+   [IAVF_RX_SSE_SCATTERED] = iavf_recv_scattered_pkts_vec,
+   [IAVF_RX_AVX2_SCATTERED] = iavf_recv_scattered_pkts_vec_avx2,
+   [IAVF_RX_AVX2_SCATTERED_OFFLOAD] =
+   iavf_recv_scattered_pkts_vec_avx2_offload,
+   [IAVF_RX_SSE_SCATTERED_FLEX_RXD] =
+   iavf_recv_scattered_pkts_vec_flex_rxd,
+   [IAVF_RX_AVX2_SCATTERED_FLEX_RXD] =
+   iavf_recv_scattered_pkts_vec_avx2_flex_rxd,
+   [IAVF_RX_AVX2_SCATTERED_FLEX_RXD_OFFLOAD] =
+   iavf_recv_scattered_pkts_vec_avx2_flex_rxd_offload,
+#ifdef CC_AVX512_SUPPORT
+   [IAVF_RX_AVX512] = iavf_recv_pkts_vec_avx512,
+   [IAVF_RX_AVX512_OFFLOAD] = iavf_recv_pkts_vec_avx512_offload,
+   [IAVF_RX_AVX512_FLEX_RXD] = iavf_recv_pkts_vec_avx512_flex_rxd,
+   [IAVF_RX_AVX512_FLEX_RXD_OFFLOAD] =
+   iavf_recv_pkts_vec_avx512_flex_rxd_offload,
+   [IAVF_RX_AVX512_SCATTERED] = iavf_recv_scattered_pkts_vec_avx512,
+   [IAVF_RX_AVX512_SCATTERED_OFFLOAD] =
+   iavf_recv_scattered_pkts_vec_avx512_offload,
+   [IAVF_RX_AVX512_SCATTERED_FLEX_RXD] =
+   iavf_recv_scattered_pkts_vec_avx512_flex_rxd,
+   [IAVF_RX_AVX512_SCATTERED_FLEX_RXD_OFFLOAD] =
+   iavf_recv_scattered_pkts_vec_avx512_flex_rxd_offload,
+#endif
+#elif defined RTE_ARCH_ARM
+   [IAVF_RX_SSE] = iavf_recv_pkts_vec,
+#endif
+};
+
+static
+const eth_tx_burst_t iavf_tx_pkt_burst_ops[] = {
+   [IAVF_TX_DEFAULT] = iavf_xmit_pkts,
+#ifdef RTE_ARCH_X86
+   [IAVF_TX_SSE] = i

[PATCH v8 2/2] net/iavf: add diagnostic support in TX path

2024-01-03 Thread Mingjin Ye
The only way to enable diagnostics for TX paths is to modify the
application source code. Making it difficult to diagnose faults.

In this patch, the devarg option "mbuf_check" is introduced and the
parameters are configured to enable the corresponding diagnostics.

supported cases: mbuf, size, segment, offload.
 1. mbuf: check for corrupted mbuf.
 2. size: check min/max packet length according to hw spec.
 3. segment: check number of mbuf segments not exceed hw limitation.
 4. offload: check any unsupported offload flag.

parameter format: mbuf_check=[mbuf,,]
eg: dpdk-testpmd -a :81:01.0,mbuf_check=[mbuf,size] -- -i

Signed-off-by: Mingjin Ye 
---
v2: Remove call chain.
---
v3: Optimisation implementation.
---
v4: Fix Windows os compilation error.
---
v5: Split Patch.
---
v6: remove strict.
---
v7: Modify the description document.
---
 doc/guides/nics/intel_vf.rst   |  9 
 drivers/net/iavf/iavf.h| 12 +
 drivers/net/iavf/iavf_ethdev.c | 76 ++
 drivers/net/iavf/iavf_rxtx.c   | 98 ++
 drivers/net/iavf/iavf_rxtx.h   |  2 +
 5 files changed, 197 insertions(+)

diff --git a/doc/guides/nics/intel_vf.rst b/doc/guides/nics/intel_vf.rst
index ad08198f0f..bda6648726 100644
--- a/doc/guides/nics/intel_vf.rst
+++ b/doc/guides/nics/intel_vf.rst
@@ -111,6 +111,15 @@ For more detail on SR-IOV, please refer to the following 
documents:
 by setting the ``devargs`` parameter like ``-a 
18:01.0,no-poll-on-link-down=1``
 when IAVF is backed by an Intel\ |reg| E810 device or an Intel\ |reg| 700 
Series Ethernet device.
 
+When IAVF is backed by an Intel\ |reg| E810 device or an Intel\ |reg| 700 
series Ethernet devices.
+Set the ``devargs`` parameter ``mbuf_check`` to enable TX diagnostics. For 
example,
+``-a 18:01.0,mbuf_check=mbuf`` or ``-a 18:01.0,mbuf_check=[mbuf,size]``. 
Supported cases:
+
+*   mbuf: Check for corrupted mbuf.
+*   size: Check min/max packet length according to hw spec.
+*   segment: Check number of mbuf segments not exceed hw limitation.
+*   offload: Check any unsupported offload flag.
+
 The PCIE host-interface of Intel Ethernet Switch FM1 Series VF 
infrastructure
 
^
 
diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 73a089c199..6535b624cb 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -113,9 +113,14 @@ struct iavf_ipsec_crypto_stats {
} ierrors;
 };
 
+struct iavf_mbuf_stats {
+   uint64_t tx_pkt_errors;
+};
+
 struct iavf_eth_xstats {
struct virtchnl_eth_stats eth_stats;
struct iavf_ipsec_crypto_stats ips_stats;
+   struct iavf_mbuf_stats mbuf_stats;
 };
 
 /* Structure that defines a VSI, associated with a adapter. */
@@ -309,6 +314,7 @@ struct iavf_devargs {
uint32_t watchdog_period;
int auto_reset;
int no_poll_on_link_down;
+   int mbuf_check;
 };
 
 struct iavf_security_ctx;
@@ -351,6 +357,11 @@ enum iavf_tx_burst_type {
IAVF_TX_AVX512_CTX_OFFLOAD,
 };
 
+#define IAVF_MBUF_CHECK_F_TX_MBUF(1ULL << 0)
+#define IAVF_MBUF_CHECK_F_TX_SIZE(1ULL << 1)
+#define IAVF_MBUF_CHECK_F_TX_SEGMENT (1ULL << 2)
+#define IAVF_MBUF_CHECK_F_TX_OFFLOAD (1ULL << 3)
+
 /* Structure to store private data for each VF instance. */
 struct iavf_adapter {
struct iavf_hw hw;
@@ -368,6 +379,7 @@ struct iavf_adapter {
bool no_poll;
enum iavf_rx_burst_type rx_burst_type;
enum iavf_tx_burst_type tx_burst_type;
+   uint64_t mc_flags; /* mbuf check flags. */
uint16_t fdir_ref_cnt;
struct iavf_devargs devargs;
 };
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index d1edb0dd5c..7d1cd9050b 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -13,6 +13,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #include 
 #include 
@@ -39,6 +40,8 @@
 #define IAVF_RESET_WATCHDOG_ARG"watchdog_period"
 #define IAVF_ENABLE_AUTO_RESET_ARG "auto_reset"
 #define IAVF_NO_POLL_ON_LINK_DOWN_ARG "no-poll-on-link-down"
+#define IAVF_MBUF_CHECK_ARG   "mbuf_check"
+
 uint64_t iavf_timestamp_dynflag;
 int iavf_timestamp_dynfield_offset = -1;
 
@@ -48,6 +51,7 @@ static const char * const iavf_valid_args[] = {
IAVF_RESET_WATCHDOG_ARG,
IAVF_ENABLE_AUTO_RESET_ARG,
IAVF_NO_POLL_ON_LINK_DOWN_ARG,
+   IAVF_MBUF_CHECK_ARG,
NULL
 };
 
@@ -174,6 +178,7 @@ static const struct rte_iavf_xstats_name_off 
rte_iavf_stats_strings[] = {
{"tx_broadcast_packets", _OFF_OF(eth_stats.tx_broadcast)},
{"tx_dropped_packets", _OFF_OF(eth_stats.tx_discards)},
{"tx_error_packets", _OFF_OF(eth_stats.tx_errors)},
+   {"tx_mbuf_error_packets", _OFF_OF(mbuf_stats.tx_pkt_errors)},
 
{"inline_ipsec_crypto_ipackets", _OFF_OF(ips_stats.icount)},
{"inline_ipsec_crypto_i

RE: [PATCH v2] net/iavf: fix no polling mode switch

2024-01-03 Thread Zhang, Qi Z



> -Original Message-
> From: Mingjin Ye 
> Sent: Thursday, December 14, 2023 6:33 PM
> To: dev@dpdk.org
> Cc: Yang, Qiming ; Ye, MingjinX
> ; sta...@dpdk.org; Wu, Jingjing
> ; Xing, Beilei 
> Subject: [PATCH v2] net/iavf: fix no polling mode switch
> 
> PMD does not switch to no polling mode when the PF triggers a reset event
> or the watchdog detects a reset event. In this scenario, data path will access
> the freed resources and cause a core dump.
> 
> This patch fixes this issue by automatically switching modes on VF reset.
> 
> Fixes: 5b3124a0a6ef ("net/iavf: support no polling when link down")
> Cc: sta...@dpdk.org
> 
> Signed-off-by: Mingjin Ye 

Acked-by: Qi Zhang 

Applied to dpdk-next-net-intel.

Thanks
Qi



[PATCH] net/bonding: fix query-count flags not set

2024-01-03 Thread Mário Kuka
The rte_flow_query_count structure returned from the bonding driver
always indicates that hits and bytes are invalid (bytes_set and
hits_set flags are zero) because bond_flow_query_count() from the
net/bonding driver does not set the bytes_set and hits_set flags.

Fixes: 49dad9028e2a ("net/bonding: support flow API")
Cc: ma...@mellanox.com

Signed-off-by: Mário Kuka 
---
 drivers/net/bonding/rte_eth_bond_flow.c | 6 ++
 1 file changed, 6 insertions(+)

diff --git a/drivers/net/bonding/rte_eth_bond_flow.c 
b/drivers/net/bonding/rte_eth_bond_flow.c
index 71a91675f7..5d0be5caf5 100644
--- a/drivers/net/bonding/rte_eth_bond_flow.c
+++ b/drivers/net/bonding/rte_eth_bond_flow.c
@@ -180,6 +180,8 @@ bond_flow_query_count(struct rte_eth_dev *dev, struct 
rte_flow *flow,
 
count->bytes = 0;
count->hits = 0;
+   count->bytes_set = 0;
+   count->hits_set = 0;
rte_memcpy(&member_count, count, sizeof(member_count));
for (i = 0; i < internals->member_count; i++) {
ret = rte_flow_query(internals->members[i].port_id,
@@ -192,8 +194,12 @@ bond_flow_query_count(struct rte_eth_dev *dev, struct 
rte_flow *flow,
}
count->bytes += member_count.bytes;
count->hits += member_count.hits;
+   count->bytes_set |= member_count.bytes_set;
+   count->hits_set |= member_count.hits_set;
member_count.bytes = 0;
member_count.hits = 0;
+   member_count.bytes_set = 0;
+   member_count.hits_set = 0;
}
return 0;
 }
-- 
2.39.3



Re: [PATCH v4 1/7] dts: add required methods to testpmd_shell

2024-01-03 Thread Juraj Linkeš
On Thu, Dec 21, 2023 at 8:38 PM Jeremy Spewock  wrote:
>
>
>
> On Tue, Dec 19, 2023 at 11:45 AM Juraj Linkeš  
> wrote:
>>
>> The subject could be improved. That these methods are required is
>> kinda obvious. We should try to actually include some useful
>> information in the subject, such as "add basic methods to testpmd
>> shell", but even that is not saying much. Maybe "add startup
>> verification and forwarding to testpmd shell" - I actually like
>> something like this.
>
>
> The subject on this commit was something that I was having trouble with for 
> quite some time, I was trying to think of something that would be descriptive 
> enough and not too long so I opted to go for the more vague subject and 
> explain it in the body, but I like this subject much better and will change 
> to using that in the next version.
>
>>
>> On Mon, Dec 18, 2023 at 7:13 PM  wrote:
>> >
>> > From: Jeremy Spewock 
>> >
>> > Added a method within the testpmd interactive shell that polls the
>> > status of ports and verifies that the link status on a given port is
>> > "up." Polling will continue until either the link comes up, or the
>> > timeout is reached. Also added methods for starting and stopping packet
>> > forwarding in testpmd and a method for setting the forwarding mode on
>> > testpmd. The method for starting packet forwarding will also attempt to
>> > verify that forwarding did indeed start by default.
>> >
>>
>> The body should not explain what we're adding, but why we're adding it.
>
>
> Very good point and I'll keep this in mind in the future.
>
>>
>> >
>> > +def start(self, verify: bool = True) -> None:
>> > +"""Start packet forwarding with the current configuration.
>> > +
>> > +Args:
>> > +verify: If :data:`True` , a second start command will be sent 
>> > in an attempt to verify
>> > +packet forwarding started as expected.
>> > +
>>
>> Isn't there a better way to verify this? Like with some show command?
>> Or is this how it's supposed to be used?
>
>
> I looked through documentation and the outputs of many of the show commands 
> and I wasn't able to find one that indicated that forwarding had actually 
> started.  Clearly testpmd holds this state somewhere but I couldn't find 
> where. I agree that this method of verification doesn't seem perfect but I 
> wasn't able to find a more automated method of doing so since the start 
> command has no output otherwise.
>
> One place that does seem to display if forwarding has started is using the 
> command: `show port (port_id) rxq|txq (queue_id) desc (desc_id) status` 
> (specifically the rxq variant) but this seems to take a few seconds to update 
> its state between available and done when you stop forwarding so it seems 
> like it could lead to inaccurate verification. This would also make 
> assumptions on the number of rx and tx queues which I'm unsure if we would 
> want to make.
>

Yea, let's not assume anything if we can avoid it. I guess there
doesn't really have to be a dedicated command for verification since
doing it this way is kinda doing the same thing, except maybe this
could put the testpmd application into a faulty state if we try to
start forwarding a second time if it hasn't already been started by
the first execution. I imagine this is not a risk since it's the way
it was done in the original DTS.

>>
>>
>> > +Raises:
>> > +InteractiveCommandExecutionError: If `verify` is :data:`True` 
>> > and forwarding fails to
>> > +start.
>> > +"""
>> > +self.send_command("start")
>> > +if verify:
>> > +# If forwarding was already started, sending "start" again 
>> > should tell us
>> > +if "Packet forwarding already started" not in 
>> > self.send_command("start"):
>> > +raise InteractiveCommandExecutionError("Testpmd failed to 
>> > start packet forwarding.")
>> > +
>> > +def stop(self) -> None:
>> > +"""Stop packet forwarding."""
>> > +self.send_command("stop")
>> > +
>>
>> Do we want to do verification here as well? Is there a reason to do
>> such verification?
>
>
> I figured there wasn't much of a reason as your two options when you run this 
> command are you aren't already forwarding in which case this command still 
> gets you into the state you want to be in, or you are forwarding and it stops 
> you which also puts you into the correct state.
>
> I guess that assumes that there can't be an error when trying to stop 
> forwarding,

Yes, that would be the reason to do the verification.

> so I could add a verification step for that purpose, but I don't think it 
> would commonly be the case that stopping fails. There isn't really harm in 
> verifying this for safety though, so I'll add it.
>

Yes, let's add it if we're not absolutely sure we don't need it. The
worst case scenario is it would help with debugging if the
verification fails.

>>
>>
>> >  def get_devices(se

Re: [PATCH v4 7/7] dts: add scatter test suite

2024-01-03 Thread Juraj Linkeš
On Thu, Dec 21, 2023 at 10:47 PM Jeremy Spewock  wrote:
>
>
>
> On Tue, Dec 19, 2023 at 12:29 PM Juraj Linkeš  
> wrote:
>>
>> Should we use the full name (pmd_buffer_scatter) in the subject? I
>> lean towards the full name.
>>
>> On Mon, Dec 18, 2023 at 7:13 PM  wrote:
>> >
>> > From: Jeremy Spewock 
>> >
>> > This test suite provides testing the support of scattered packets by
>> > Poll Mode Drivers using testpmd. It incorporates 5 different test cases
>> > which test the sending and receiving of packets with lengths that are
>> > less than the mbuf data buffer size, the same as the mbuf data buffer
>> > size, and the mbuf data buffer size plus 1, 4, and 5. The goal of this
>> > test suite is to align with the existing dts test plan for scattered
>> > packets within DTS.
>> >
>>
>> Again, we need to describe why we're adding this commit. In the case
>> of test suites, the why is obvious, so we should give a good high
>> level description of what the tests test (something like the test
>> suite tests the x feature by doing y, y being the salient part of the
>> tests). The original test plan is actually pretty good, so we can
>> extract the rationale from it.
>
>
> This is a good point, I'll pull more from the test plan to improve this.
>
>>
>>
>> > Signed-off-by: Jeremy Spewock 
>> > ---
>> >  dts/tests/TestSuite_pmd_buffer_scatter.py | 105 ++
>> >  1 file changed, 105 insertions(+)
>> >  create mode 100644 dts/tests/TestSuite_pmd_buffer_scatter.py
>> >
>> > diff --git a/dts/tests/TestSuite_pmd_buffer_scatter.py 
>> > b/dts/tests/TestSuite_pmd_buffer_scatter.py
>> > new file mode 100644
>> > index 00..8e2a32a1aa
>> > --- /dev/null
>> > +++ b/dts/tests/TestSuite_pmd_buffer_scatter.py
>> > @@ -0,0 +1,105 @@
>> > +# SPDX-License-Identifier: BSD-3-Clause
>> > +# Copyright(c) 2023 University of New Hampshire
>> > +
>> > +"""Multi-segment packet scattering testing suite.
>> > +
>> > +Configure the Rx queues to have mbuf data buffers whose sizes are smaller 
>> > than the maximum packet
>> > +size (currently set to 2048 to fit a full 1512-byte ethernet frame) and 
>> > send a total of 5 packets
>> > +with lengths less than, equal to, and greater than the mbuf size (CRC 
>> > included).
>> > +"""
>>
>> Let's expand this. I'll point to the original test plan again, let's
>> use some of it here. I think it makes sense to make this docstring a
>> kind of a test plan with high level description.
>
>
> Sounds good, I'll expand this too.
>
>>
>>
>> > +import struct
>> > +
>> > +from scapy.layers.inet import IP  # type: ignore[import]
>> > +from scapy.layers.l2 import Ether  # type: ignore[import]
>> > +from scapy.packet import Raw  # type: ignore[import]
>> > +from scapy.utils import hexstr  # type: ignore[import]
>> > +
>> > +from framework.remote_session.remote.testpmd_shell import (
>> > +TestPmdForwardingModes,
>> > +TestPmdShell,
>> > +)
>> > +from framework.test_suite import TestSuite
>> > +
>> > +
>> > +class PmdBufferScatter(TestSuite):
>> > +"""DPDK packet scattering test suite.
>> > +
>>
>> And here we could add some more specifics.
>>
>>
>> I'd like to utilize the original test plans and a split like this
>> makes sense at a first glance.
>
>
> I'll add more here in the next version as well. I agree that using the 
> original test plans will help make this more descriptive and better for the 
> documentation.
>
>>
>> > +testpmd.set_forward_mode(TestPmdForwardingModes.mac)
>> > +testpmd.start()
>> > +link_is_up = testpmd.wait_link_status_up(0) and 
>> > testpmd.wait_link_status_up(1)
>>
>> These two calls should probably be inside testpmd.start(). Looks like
>> we're always going to be doing this.
>>
>> Also, this assumes there will be two ports. Ideally, the shell itself
>> will know which ports to check (that should be in EAL parameters), but
>> that may require a bigger refactor (unless we just parse back the -a
>> options, which could be permissible).
>
>
> Collecting the number of ports should be easy enough from the original args 
> and then I can just verify ports 0-num are up so I agree that this is a 
> simple way to change this that adds a good amount of value.
>
> While I don't believe the link status is actually directly related to 
> starting testpmd, I think that if we are going to start forwarding we still 
> are also always going to want to be sure that the links are up and we can 
> properly forward the packets. I'll add this to the verification check in the 
> start method.
>

Right, we don't necessarily need to verify port status when starting
testpmd (that could be optional though, we could use that in a smoke
test). We should always check it when enabling forwarding (and if we
ever need to not do that we can add an option for that later).

>>
>>
>> > +self.verify(link_is_up, "Links never came up in TestPMD.")
>> > +
>> > +for offset in [-1, 0, 1, 4, 5]:
>> > +recv_payload = self.scatter_pktgen_send

RE: [EXT] [PATCH] app/test-crypto-perf: fix invalid mbuf next operation

2024-01-03 Thread Anoob Joseph
Hi Suanming,

Good catch. Please see inline.

Thanks,
Anoob

> -Original Message-
> From: Suanming Mou 
> Sent: Wednesday, January 3, 2024 9:24 AM
> To: Ciara Power 
> Cc: dev@dpdk.org
> Subject: [EXT] [PATCH] app/test-crypto-perf: fix invalid mbuf next operation
> 
> External Email
> 
> --
> In fill_multi_seg_mbuf(), when remaining_segments is 0, rte_mbuf m's next
> should pointer to NULL instead of a new rte_mbuf, that casues setting m->next
> as NULL out of the while loop to the invalid mbuf.
> 
> This commit fixes the invalid mbuf next operation.
> 
> Fixes: bf9d6702eca9 ("app/crypto-perf: use single mempool")
> 
> Signed-off-by: Suanming Mou 
> ---
>  app/test-crypto-perf/cperf_test_common.c | 12 +++-
>  1 file changed, 7 insertions(+), 5 deletions(-)
> 
> diff --git a/app/test-crypto-perf/cperf_test_common.c b/app/test-crypto-
> perf/cperf_test_common.c
> index 932aab16df..ad2076dd2e 100644
> --- a/app/test-crypto-perf/cperf_test_common.c
> +++ b/app/test-crypto-perf/cperf_test_common.c
> @@ -72,13 +72,15 @@ fill_multi_seg_mbuf(struct rte_mbuf *m, struct
> rte_mempool *mp,
>   rte_mbuf_refcnt_set(m, 1);
>   next_mbuf = (struct rte_mbuf *) ((uint8_t *) m +
>   mbuf_hdr_size + segment_sz);
> - m->next = next_mbuf;
> - m = next_mbuf;
> - remaining_segments--;
> 
> + remaining_segments--;
> + if (remaining_segments > 0) {

[Anoob] Would it make sense to move assignment of next_mbuf also to here? That 
way, the checks will become self explanatory.
next_mbuf = (struct rte_mbuf *) ((uint8_t *) m +
mbuf_hdr_size + segment_sz);

> + m->next = next_mbuf;
> + m = next_mbuf;
> + } else {
> + m->next = NULL;
> + }
>   } while (remaining_segments > 0);
> -
> - m->next = NULL;
>  }
> 
>  static void
> --
> 2.34.1



RE: [EXT] [PATCH 1/2] app/test-crypto-perf: fix invalid memcmp results

2024-01-03 Thread Anoob Joseph
> The function memcmp() returns an integer less than, equal to, or greater than
> zero. In current code, if the first memcmp() returns less than zero and the
> second memcmp() returns greater than zero, the sum of results may still be 0
> and indicates verify succussed.
> 
> This commit converts the return value to be zero or greater than zero. That 
> will
> make sure the sum of results be correct.
> 
> Fixes: df52cb3b6e13 ("app/crypto-perf: move verify as single test type")
> 
> Signed-off-by: Suanming Mou 

Acked-by: Anoob Joseph 



RE: [EXT] [PATCH] app/test-crypto-perf: fix invalid mbuf next operation

2024-01-03 Thread Suanming Mou
Hi,

> -Original Message-
> From: Anoob Joseph 
> Sent: Wednesday, January 3, 2024 7:22 PM
> To: Suanming Mou 
> Cc: dev@dpdk.org; Ciara Power 
> Subject: RE: [EXT] [PATCH] app/test-crypto-perf: fix invalid mbuf next 
> operation
> 
> Hi Suanming,
> 
> Good catch. Please see inline.
> 
> Thanks,
> Anoob
> 
> > -Original Message-
> > From: Suanming Mou 
> > Sent: Wednesday, January 3, 2024 9:24 AM
> > To: Ciara Power 
> > Cc: dev@dpdk.org
> > Subject: [EXT] [PATCH] app/test-crypto-perf: fix invalid mbuf next
> > operation
> >
> > External Email
> >
> > --
> > In fill_multi_seg_mbuf(), when remaining_segments is 0, rte_mbuf m's
> > next should pointer to NULL instead of a new rte_mbuf, that casues
> > setting m->next as NULL out of the while loop to the invalid mbuf.
> >
> > This commit fixes the invalid mbuf next operation.
> >
> > Fixes: bf9d6702eca9 ("app/crypto-perf: use single mempool")
> >
> > Signed-off-by: Suanming Mou 
> > ---
> >  app/test-crypto-perf/cperf_test_common.c | 12 +++-
> >  1 file changed, 7 insertions(+), 5 deletions(-)
> >
> > diff --git a/app/test-crypto-perf/cperf_test_common.c
> > b/app/test-crypto- perf/cperf_test_common.c index
> > 932aab16df..ad2076dd2e 100644
> > --- a/app/test-crypto-perf/cperf_test_common.c
> > +++ b/app/test-crypto-perf/cperf_test_common.c
> > @@ -72,13 +72,15 @@ fill_multi_seg_mbuf(struct rte_mbuf *m, struct
> > rte_mempool *mp,
> > rte_mbuf_refcnt_set(m, 1);
> > next_mbuf = (struct rte_mbuf *) ((uint8_t *) m +
> > mbuf_hdr_size + segment_sz);
> > -   m->next = next_mbuf;
> > -   m = next_mbuf;
> > -   remaining_segments--;
> >
> > +   remaining_segments--;
> > +   if (remaining_segments > 0) {
> 
> [Anoob] Would it make sense to move assignment of next_mbuf also to here?
> That way, the checks will become self explanatory.
>   next_mbuf = (struct rte_mbuf *) ((uint8_t *) m +
>   mbuf_hdr_size + segment_sz);
> 

Make sense. Maybe just like that:
m->next = (struct rte_mbuf *) ((uint8_t *) m +
mbuf_hdr_size + segment_sz);
m = m->next;

What do you think?

> > +   m->next = next_mbuf;
> > +   m = next_mbuf;
> > +   } else {
> > +   m->next = NULL;
> > +   }
> > } while (remaining_segments > 0);
> > -
> > -   m->next = NULL;
> >  }
> >
> >  static void
> > --
> > 2.34.1



[PATCH] dts: improve documentation

2024-01-03 Thread Luca Vizzarro
Improve instructions for installing dependencies, configuring and
launching the project. Finally, document the configuration schema
by adding more comments to the example and documenting every
property and definition.

Reviewed-by: Paul Szczepanek 
Signed-off-by: Luca Vizzarro 
---
 .mailmap |   1 +
 doc/guides/tools/dts.rst | 255 +--
 dts/conf.yaml|  31 +++--
 3 files changed, 240 insertions(+), 47 deletions(-)

diff --git a/.mailmap b/.mailmap
index ab0742a382..6326e28d08 100644
--- a/.mailmap
+++ b/.mailmap
@@ -815,6 +815,7 @@ Louise Kilheeney 
 Louis Luo 
 Louis Peens  
 Luca Boccassi   
  
+Luca Vizzarro 
 Luc Pelletier 
 Lukasz Bartosik 
 Lukasz Czapnik 
diff --git a/doc/guides/tools/dts.rst b/doc/guides/tools/dts.rst
index 32c18ee472..31495cad51 100644
--- a/doc/guides/tools/dts.rst
+++ b/doc/guides/tools/dts.rst
@@ -91,7 +91,7 @@ Setting up DTS environment
 
.. code-block:: console
 
-  poetry install
+  poetry install --no-root
   poetry shell
 
 #. **SSH Connection**
@@ -189,72 +189,73 @@ Running DTS
 ---
 
 DTS needs to know which nodes to connect to and what hardware to use on those 
nodes.
-Once that's configured, DTS needs a DPDK tarball and it's ready to run.
+Once that's configured, DTS needs a DPDK tarball or a git ref ID and it's 
ready to run.
 
 Configuring DTS
 ~~~
 
-DTS configuration is split into nodes and executions and build targets within 
executions.
-By default, DTS will try to use the ``dts/conf.yaml`` config file,
-which is a template that illustrates what can be configured in DTS:
-
-  .. literalinclude:: ../../../dts/conf.yaml
- :language: yaml
- :start-at: executions:
-
+DTS configuration is split into nodes and executions and build targets within 
executions,
+and follows a defined schema as described in `Configuration Schema`_.
+By default, DTS will try to use the ``dts/conf.yaml`` :ref:`config file 
`,
+which is a template that illustrates what can be configured in DTS.
 
 The user must have :ref:`administrator privileges `
 which don't require password authentication.
-The other fields are mostly self-explanatory
-and documented in more detail in 
``dts/framework/config/conf_yaml_schema.json``.
 
 DTS Execution
 ~
 
-DTS is run with ``main.py`` located in the ``dts`` directory after entering 
Poetry shell::
+DTS is run with ``main.py`` located in the ``dts`` directory after entering 
Poetry shell:
+
+.. code-block:: console
 
-   usage: main.py [-h] [--config-file CONFIG_FILE] [--output-dir OUTPUT_DIR] 
[-t TIMEOUT]
-  [-v VERBOSE] [-s SKIP_SETUP] [--tarball TARBALL]
-  [--compile-timeout COMPILE_TIMEOUT] [--test-cases TEST_CASES]
-  [--re-run RE_RUN]
+   (dts-py3.10) $ ./main.py --help
+   usage: main.py [-h] [--config-file CONFIG_FILE] [--output-dir OUTPUT_DIR] 
[-t TIMEOUT] [-v VERBOSE]
+  [-s SKIP_SETUP] [--tarball TARBALL] [--compile-timeout 
COMPILE_TIMEOUT]
+  [--test-cases TEST_CASES] [--re-run RE_RUN]
 
-   Run DPDK test suites. All options may be specified with the environment 
variables provided in
-   brackets. Command line arguments have higher priority.
+   Run DPDK test suites. All options may be specified with the environment 
variables provided in brackets.
+   Command line arguments have higher priority.
 
options:
  -h, --helpshow this help message and exit
  --config-file CONFIG_FILE
-   [DTS_CFG_FILE] configuration file that describes 
the test cases, SUTs
-   and targets. (default: conf.yaml)
+   [DTS_CFG_FILE] configuration file that describes 
the test cases, SUTs and targets.
+   (default: conf.yaml)
  --output-dir OUTPUT_DIR, --output OUTPUT_DIR
-   [DTS_OUTPUT_DIR] Output directory where dts logs 
and results are
-   saved. (default: output)
+   [DTS_OUTPUT_DIR] Output directory where dts logs 
and results are saved. (default:
+   output)
  -t TIMEOUT, --timeout TIMEOUT
-   [DTS_TIMEOUT] The default timeout for all DTS 
operations except for
-   compiling DPDK. (default: 15)
+   [DTS_TIMEOUT] The default timeout for all DTS 
operations except for compiling DPDK.
+   (default: 15)
  -v VERBOSE, --verbose VERBOSE
-   [DTS_VERBOSE] Set to 'Y' to enable verbose output, 
logging all
-   messages to the console. (default: N)
+   [DTS_VERBOSE] Set to 'Y' to enable verbose output, 
logging all messages to the
+   console. (default: N)
  -s SKIP_SETUP, --skip-setup SKIP_SETUP
-   [DTS_SKIP_SETUP] Set to 'Y' to skip 

RE: 22.11.4 patches review and test

2024-01-03 Thread Ali Alnubani
> -Original Message-
> From: Xueming(Steven) Li 
> Sent: Wednesday, December 20, 2023 9:19 AM
> To: sta...@dpdk.org
> Cc: Xueming(Steven) Li ; dev@dpdk.org; Abhishek
> Marathe ; Ali Alnubani
> ; benjamin.wal...@intel.com; David Christensen
> ; Hemant Agrawal ;
> Ian Stokes ; Jerin Jacob ; John
> McNamara ; Ju-Hyoung Lee
> ; Kevin Traynor ; Luca
> Boccassi ; Pei Zhang ;
> qian.q...@intel.com; Raslan Darawsheh ; NBU-
> Contact-Thomas Monjalon (EXTERNAL) ; Yanghang
> Liu ; yuan.p...@intel.com; zhaoyan.c...@intel.com
> Subject: 22.11.4 patches review and test
> 
> Hi all,
> 
> Here is a list of patches targeted for stable release 22.11.4.
> 
> The planned date for the final release is 5th January.
> 
> Please help with testing and validation of your use cases and report
> any issues/results with reply-all to this mail. For the final release
> the fixes and reported validations will be added to the release notes.
> 
> A release candidate tarball can be found at:
> 
> https://dpdk.org/browse/dpdk-stable/tag/?id=v22.11.4-rc3
> 
> These patches are located at branch 22.11 of dpdk-stable repo:
> https://dpdk.org/browse/dpdk-stable/
> 
> Thanks.

Hello,

We ran the following functional tests with Nvidia hardware on v22.11.4-rc3:
- Basic functionality:
  Send and receive multiple types of traffic.
- testpmd xstats counter test.
- testpmd timestamp test.
- Changing/checking link status through testpmd.
- rte_flow tests 
(https://doc.dpdk.org/guides/nics/mlx5.html#supported-hardware-offloads)
- RSS tests.
- VLAN filtering, stripping, and insertion tests.
- Checksum and TSO tests.
- ptype tests.
- link_status_interrupt example application tests.
- l3fwd-power example application tests.
- Multi-process example applications tests.
- Hardware LRO tests.
- Regex application tests.
- Buffer Split tests.
- Tx scheduling tests.

Functional tests ran on:
- NIC: ConnectX-6 Dx / OS: Ubuntu 20.04 / Driver: MLNX_OFED_LINUX-23.10-1.1.9.0 
/ Firmware: 22.39.2048
- NIC: ConnectX-7 / OS: Ubuntu 20.04 / Driver: MLNX_OFED_LINUX-23.10-1.1.9.0 / 
Firmware: 28.39.2048
- DPU: BlueField-2 / DOCA SW version: 2.5.0 / Firmware: 24.39.2048

Additionally, we ran build tests with multiple configurations on the following 
OS/driver combinations (all passed):
- Ubuntu 20.04.6 with MLNX_OFED_LINUX-23.10-1.1.9.0.
- Ubuntu 20.04.6 with rdma-core master (9016f34).
- Ubuntu 20.04.6 with rdma-core v28.0.
- Fedora 38 with rdma-core v44.0.
- Fedora 40 (Rawhide) with rdma-core v48.0.
- OpenSUSE Leap 15.5 with rdma-core v42.0.
- Windows Server 2019 with Clang 16.0.6.

We don't see new issues caused by the changes in this release.

Thanks,
Ali


RE: 21.11.6 patches review and test

2024-01-03 Thread Ali Alnubani
> -Original Message-
> From: Kevin Traynor 
> Sent: Wednesday, December 20, 2023 3:23 PM
> To: sta...@dpdk.org
> Cc: dev@dpdk.org; Abhishek Marathe ;
> Ali Alnubani ; benjamin.wal...@intel.com; David
> Christensen ; Hemant Agrawal
> ; Ian Stokes ; Jerin Jacob
> ; John McNamara ; Ju-
> Hyoung Lee ; Kevin Traynor ;
> Luca Boccassi ; Pei Zhang ;
> qian.q...@intel.com; Raslan Darawsheh ; NBU-
> Contact-Thomas Monjalon (EXTERNAL) ;
> yangh...@redhat.com; yuan.p...@intel.com; zhaoyan.c...@intel.com
> Subject: 21.11.6 patches review and test
> 
> Hi all,
> 
> Here is a list of patches targeted for stable release 21.11.6.
> 
> The planned date for the final release is 12 January.
> 
> Please help with testing and validation of your use cases and report
> any issues/results with reply-all to this mail. For the final release
> the fixes and reported validations will be added to the release notes.
> 
> A release candidate tarball can be found at:
> 
> https://dpdk.org/browse/dpdk-stable/tag/?id=v21.11.6-rc1
> 
> These patches are located at branch 21.11 of dpdk-stable repo:
> https://dpdk.org/browse/dpdk-stable/
> 
> Thanks.
> 

Hello,

We ran the following functional tests with Nvidia hardware on 21.11.6-rc1:
- Basic functionality:
  Send and receive multiple types of traffic.
- testpmd xstats counter test.
- testpmd timestamp test.
- Changing/checking link status through testpmd.
- rte_flow tests 
(https://doc.dpdk.org/guides/nics/mlx5.html#supported-hardware-offloads)
- RSS tests.
- VLAN filtering, stripping, and insertion tests.
- Checksum and TSO tests.
- ptype tests.
- link_status_interrupt example application tests.
- l3fwd-power example application tests.
- Multi-process example applications tests.
- Hardware LRO tests.
- Regex application tests.
- Buffer Split tests.
- Tx scheduling tests.

Functional tests ran on:
- NIC: ConnectX-6 Dx / OS: Ubuntu 20.04 / Driver: MLNX_OFED_LINUX-23.10-1.1.9.0 
/ Firmware: 22.39.2048
- NIC: ConnectX-7 / OS: Ubuntu 20.04 / Driver: MLNX_OFED_LINUX-23.10-1.1.9.0 / 
Firmware: 28.39.2048
- DPU: BlueField-2 / DOCA SW version: 2.5.0 / Firmware: 24.39.2048

Additionally, we ran build tests with multiple configurations on the following 
OS/driver combinations (all passed):
- Ubuntu 20.04.6 with MLNX_OFED_LINUX-23.10-1.1.9.0.
- Ubuntu 20.04.6 with rdma-core master (9016f34).
- Ubuntu 20.04.6 with rdma-core v28.0.
- Fedora 38 with rdma-core v44.0.
- Fedora 40 (Rawhide) with rdma-core v48.0.
- OpenSUSE Leap 15.5 with rdma-core v42.0.
- Windows Server 2019 with Clang 16.0.6.

We don't see new issues caused by the changes in this release.

Thanks,
Ali


RE: [EXT] [PATCH] app/test-crypto-perf: fix invalid mbuf next operation

2024-01-03 Thread Anoob Joseph
Hi Suanming,

Please see inline.

Thanks,
Anoob

> -Original Message-
> From: Suanming Mou 
> Sent: Wednesday, January 3, 2024 6:06 PM
> To: Anoob Joseph 
> Cc: dev@dpdk.org; Ciara Power 
> Subject: RE: [EXT] [PATCH] app/test-crypto-perf: fix invalid mbuf next 
> operation
> 
> Hi,
> 
> > -Original Message-
> > From: Anoob Joseph 
> > Sent: Wednesday, January 3, 2024 7:22 PM
> > To: Suanming Mou 
> > Cc: dev@dpdk.org; Ciara Power 
> > Subject: RE: [EXT] [PATCH] app/test-crypto-perf: fix invalid mbuf next
> > operation
> >
> > Hi Suanming,
> >
> > Good catch. Please see inline.
> >
> > Thanks,
> > Anoob
> >
> > > -Original Message-
> > > From: Suanming Mou 
> > > Sent: Wednesday, January 3, 2024 9:24 AM
> > > To: Ciara Power 
> > > Cc: dev@dpdk.org
> > > Subject: [EXT] [PATCH] app/test-crypto-perf: fix invalid mbuf next
> > > operation
> > >
> > > External Email
> > >
> > > 
> > > -- In fill_multi_seg_mbuf(), when remaining_segments is 0, rte_mbuf
> > > m's next should pointer to NULL instead of a new rte_mbuf, that
> > > casues setting m->next as NULL out of the while loop to the invalid
> > > mbuf.
> > >
> > > This commit fixes the invalid mbuf next operation.
> > >
> > > Fixes: bf9d6702eca9 ("app/crypto-perf: use single mempool")
> > >
> > > Signed-off-by: Suanming Mou 
> > > ---
> > >  app/test-crypto-perf/cperf_test_common.c | 12 +++-
> > >  1 file changed, 7 insertions(+), 5 deletions(-)
> > >
> > > diff --git a/app/test-crypto-perf/cperf_test_common.c
> > > b/app/test-crypto- perf/cperf_test_common.c index
> > > 932aab16df..ad2076dd2e 100644
> > > --- a/app/test-crypto-perf/cperf_test_common.c
> > > +++ b/app/test-crypto-perf/cperf_test_common.c
> > > @@ -72,13 +72,15 @@ fill_multi_seg_mbuf(struct rte_mbuf *m, struct
> > > rte_mempool *mp,
> > >   rte_mbuf_refcnt_set(m, 1);
> > >   next_mbuf = (struct rte_mbuf *) ((uint8_t *) m +
> > >   mbuf_hdr_size + segment_sz);
> > > - m->next = next_mbuf;
> > > - m = next_mbuf;
> > > - remaining_segments--;
> > >
> > > + remaining_segments--;
> > > + if (remaining_segments > 0) {
> >
> > [Anoob] Would it make sense to move assignment of next_mbuf also to here?
> > That way, the checks will become self explanatory.
> > next_mbuf = (struct rte_mbuf *) ((uint8_t *) m +
> > mbuf_hdr_size + segment_sz);
> >
> 
> Make sense. Maybe just like that:
>   m->next = (struct rte_mbuf *) ((uint8_t *) m +
>   mbuf_hdr_size + segment_sz);
>   m = m->next;
> 
> What do you think?

[Anoob] Yes. That's even better. 

I think we can have line lengths upto 100 characters now. In case you find it 
easier to put in single line.

> 
> > > + m->next = next_mbuf;
> > > + m = next_mbuf;
> > > + } else {
> > > + m->next = NULL;
> > > + }
> > >   } while (remaining_segments > 0);
> > > -
> > > - m->next = NULL;
> > >  }
> > >
> > >  static void
> > > --
> > > 2.34.1



RE: [RFC] ethdev: fast path async flow API

2024-01-03 Thread Dariusz Sosnowski
Hi Ivan,

> Hi Dariusz,
> 
> I appreciate the proposal. You say that a reference PMD implementation will
> be made available for 24.03 release. What about the applications?
> Is this supposed to go to upstream code of some applications?
No source code changes are required in applications which already use async 
flow APIs.
API signatures are not changed in this proposal.

Only the PMD changes are required.
To be specific - callbacks for async flow APIs should not be put in 
rte_flow_ops,
but registered by calling rte_flow_fp_ops_register(). 

Best regards,
Dariusz Sosnowski


RE: [RFC] ethdev: fast path async flow API

2024-01-03 Thread Ivan Malov

Hi Dariusz,

I appreciate your response. All to the point.

I have to confess my question was inspired by the 23.11 merge commit
in OVS mailing list. I first thought that an obvious consumer for
the async flow API could have been OVS but saw no usage of it in
the current code. It was my impression that there had been some
patches in OVS already, waiting either for approval/testing or
for this particular optimisation to be accepted first.

So far I've been mistaken -- there are no such patches,
hence my question. Do we have real-world examples of
the async flow usage? Should it be tested somehow...

(I apologise in case I'm asking for too many clarifications).

Thank you.

On Wed, 3 Jan 2024, Dariusz Sosnowski wrote:


Hi Ivan,


Hi Dariusz,

I appreciate the proposal. You say that a reference PMD implementation will
be made available for 24.03 release. What about the applications?
Is this supposed to go to upstream code of some applications?

No source code changes are required in applications which already use async 
flow APIs.
API signatures are not changed in this proposal.

Only the PMD changes are required.
To be specific - callbacks for async flow APIs should not be put in 
rte_flow_ops,
but registered by calling rte_flow_fp_ops_register().

Best regards,
Dariusz Sosnowski



RE: [RFC] ethdev: fast path async flow API

2024-01-03 Thread Dariusz Sosnowski
> -Original Message-
> From: Stephen Hemminger 
> Sent: Thursday, December 28, 2023 18:17
> > However, at the moment I see one problem with this approach.
> > It would require DPDK to expose the rte_eth_dev struct definition,
> > because of implied locking implemented in the flow API.
> 
> This is a blocker, showstopper for me.
+1

> Have you considered having something like
>rte_flow_create_bulk()
> 
> or better yet a Linux iouring style API?
> 
> A ring style API would allow for better mixed operations across the board and
> get rid of the I-cache overhead which is the root cause of the needing inline.
Existing async flow API is somewhat close to the io_uring interface.
The difference being that queue is not directly exposed to the application.
Application interacts with the queue using rte_flow_async_* APIs (e.g., places 
operations in the queue, pushes them to the HW).
Such design has some benefits over a flow API which exposes the queue to the 
user:
- Easier to use - Applications do not manage the queue directly, they do it 
through exposed APIs.
- Consistent with other DPDK APIs - In other libraries, queues are manipulated 
through API, not directly by an application.
- Lower memory usage - only HW primitives are needed (e.g., HW queue on PMD 
side), no need to allocate separate application queues.

Bulking of flow operations is a tricky subject.
Compared to packet processing, where it is desired to keep the manipulation of 
raw packet data to the minimum (e.g., only packet headers are accessed),
during flow rule creation all items and actions must be processed by PMD to 
create a flow rule.
The amount of memory consumed by items and actions themselves during this 
process might be nonnegligible.
If flow rule operations were bulked, the size of working set of memory would 
increase, which could have negative consequences on the cache behavior.
So, it might be the case that by utilizing bulking the I-cache overhead is 
removed, but the D-cache overhead is added.
On the other hand, creating flow rule operations (or enqueuing flow rule 
operations) one by one enables applications to reuse the same memory for 
different flow rules.

In summary, in my opinion extending the async flow API with bulking 
capabilities or exposing the queue directly to the application is not desirable.
This proposal aims to reduce the I-cache overhead in async flow API by reusing 
the existing design pattern in DPDK - fast path functions are inlined to the 
application code and they call cached PMD callbacks.

Best regards,
Dariusz Sosnowski


RE: [PATCH] net/e1000: support launchtime feature

2024-01-03 Thread Chuanyu Xue
Hi, Simei

Thank you for your guidance on how to test this feature.

>> Following is how I try to test with testpmd. Please let me know if I did
>> something wrong.
>> 
>>   sudo ./dpdk-testpmd -- -i --forward-mode=txonly
>> 
>>   testpmd> port stop 0
>>   testpmd> set burst 1
>>   testpmd> set txtimes 1,0
>>   testpmd> port config 0 tx_offload send_on_timestamp on
>>   testpmd> port start 0
>>   testpmd> start
>
>When testing launch time feature with igc driver, firstly, some code change 
>made in txonly.c:
>pkt->ol_flags |= RTE_MBUF_F_TX_IEEE1588_TMST; (this flag should be added to 
>forward PTP packet with hardware Tx timestamp)
>
># ./build/app/dpdk-testpmd -a :81:00.0 -c f -n 4 -- -i 
>--tx-offloads=0x20
>testpmd> set burst 1
>testpmd> set fwd txonly
>testpmd> set txtimes 100,0
>testpmd> start
>
>On receiver side (with tcpdump):
># tcpdump -Q in -ttt -ni ens25f3 --time-stamp-precision=nano -j 
>adapter_unsynced -c 32

Now dpdk-testpmd works well with this patch after I add the flag in txonly.c 
as you mentioned. 

It is worth noting that I also added `rte_eth_timesync_enable(pi);` in the 
function `tx_only_begin` in txonly.c to enable the PTP clock. Otherwise, all Tx
packets scheduled are dropped.

Following are the measurement results on the listener. I use the same 
configuration
as you mentioned for dpdk-testpmd on the talker.

➜  ~ sudo tcpdump -Q in -ttt -ni enp1s0 --time-stamp-precision=nano -j 
adapter_unsynced -c 32

tcpdump: verbose output suppressed, use -v[v]... for full protocol decode
listening on enp1s0, link-type EN10MB (Ethernet), snapshot length 262144 
bytes


 00:00:00.0 IP 198.18.0.1.9 > 198.18.0.2.9: UDP, length 22
 00:00:00.00108 IP 198.18.0.1.9 > 198.18.0.2.9: UDP, length 22
 00:00:00.00100 IP 198.18.0.1.9 > 198.18.0.2.9: UDP, length 22
 00:00:00.00100 IP 198.18.0.1.9 > 198.18.0.2.9: UDP, length 22
 00:00:00.00100 IP 198.18.0.1.9 > 198.18.0.2.9: UDP, length 22
 
 00:00:00.00100 IP 198.18.0.1.9 > 198.18.0.2.9: UDP, length 22
 00:00:00.00100 IP 198.18.0.1.9 > 198.18.0.2.9: UDP, length 22
 00:00:00.00100 IP 198.18.0.1.9 > 198.18.0.2.9: UDP, length 22
 00:00:00.00108 IP 198.18.0.1.9 > 198.18.0.2.9: UDP, length 22
 00:00:00.00100 IP 198.18.0.1.9 > 198.18.0.2.9: UDP, length 22
32 packets captured
118 packets received by filter
0 packets dropped by kernel

Above test is based on the patch v2 with Intel i210 NIC.

- Chuanyu


[PATCH v5 0/7] dts: Port scatter suite over

2024-01-03 Thread jspewock
From: Jeremy Spewock 

v5:

Addressed comments and made changes to files where appropriate. Notably,
added things such as verification to testpmd methods and a flag which
hides messages about "link state change events" in testpmd as such
messages changed the expected format of the terminal and cause
collecting output to be unreliable. Link statuses however are verified
manually to account for this.

Most other changes surrounded modification of commit message
descriptions and documentation in docstrings.

Jeremy Spewock (7):
  dts: add startup verification and forwarding modes to testpmd shell
  dts: limit EAL parameters to DPDK apps and add parameters to all apps
  dts: add optional packet filtering to scapy sniffer
  dts: add pci addresses to EAL parameters
  dts: allow configuring MTU of ports
  dts: add scatter to the yaml schema
  dts: add pmd_buffer_scatter test suite

 dts/framework/config/conf_yaml_schema.json|   3 +-
 dts/framework/exception.py|   4 +
 dts/framework/remote_session/testpmd_shell.py | 148 +-
 dts/framework/test_suite.py   |  15 +-
 dts/framework/testbed_model/linux_session.py  |   8 +
 dts/framework/testbed_model/os_session.py |   9 ++
 dts/framework/testbed_model/sut_node.py   |  28 +++-
 dts/framework/testbed_model/tg_node.py|  14 +-
 .../traffic_generator/__init__.py |   5 +-
 .../capturing_traffic_generator.py|  22 ++-
 .../testbed_model/traffic_generator/scapy.py  |  28 +++-
 dts/tests/TestSuite_pmd_buffer_scatter.py | 115 ++
 12 files changed, 384 insertions(+), 15 deletions(-)
 create mode 100644 dts/tests/TestSuite_pmd_buffer_scatter.py

-- 
2.43.0



[PATCH v5 1/7] dts: add startup verification and forwarding modes to testpmd shell

2024-01-03 Thread jspewock
From: Jeremy Spewock 

Added commonly used methods in testpmd such as starting and stopping
packet forwarding, changing foward modes, and verifying link status of
ports so that developers can configure testpmd and start forwaring
through the provided class rather than sending commands to the testpmd
session directly.

Signed-off-by: Jeremy Spewock 
---
 dts/framework/exception.py|   7 +
 dts/framework/remote_session/testpmd_shell.py | 149 +-
 2 files changed, 155 insertions(+), 1 deletion(-)

diff --git a/dts/framework/exception.py b/dts/framework/exception.py
index 658eee2c38..cce1e0231a 100644
--- a/dts/framework/exception.py
+++ b/dts/framework/exception.py
@@ -146,6 +146,13 @@ def __str__(self) -> str:
 return f"Command {self.command} returned a non-zero exit code: 
{self._command_return_code}"
 
 
+class InteractiveCommandExecutionError(DTSError):
+"""An unsuccessful execution of a remote command in an interactive 
environment."""
+
+#:
+severity: ClassVar[ErrorSeverity] = ErrorSeverity.REMOTE_CMD_EXEC_ERR
+
+
 class RemoteDirectoryExistsError(DTSError):
 """A directory that exists on a remote node."""
 
diff --git a/dts/framework/remote_session/testpmd_shell.py 
b/dts/framework/remote_session/testpmd_shell.py
index 0184cc2e71..f310705fac 100644
--- a/dts/framework/remote_session/testpmd_shell.py
+++ b/dts/framework/remote_session/testpmd_shell.py
@@ -15,9 +15,15 @@
 testpmd_shell.close()
 """
 
+import time
+from enum import auto
 from pathlib import PurePath
 from typing import Callable, ClassVar
 
+from framework.exception import InteractiveCommandExecutionError
+from framework.settings import SETTINGS
+from framework.utils import StrEnum
+
 from .interactive_shell import InteractiveShell
 
 
@@ -43,14 +49,51 @@ def __str__(self) -> str:
 return self.pci_address
 
 
+class TestPmdForwardingModes(StrEnum):
+r"""The supported packet forwarding modes for :class:`~TestPmdShell`\s."""
+
+#:
+io = auto()
+#:
+mac = auto()
+#:
+macswap = auto()
+#:
+flowgen = auto()
+#:
+rxonly = auto()
+#:
+txonly = auto()
+#:
+csum = auto()
+#:
+icmpecho = auto()
+#:
+ieee1588 = auto()
+#:
+noisy = auto()
+#:
+fivetswap = "5tswap"
+#:
+shared_rxq = "shared-rxq"
+#:
+recycle_mbufs = auto()
+
+
 class TestPmdShell(InteractiveShell):
 """Testpmd interactive shell.
 
 The testpmd shell users should never use
 the :meth:`~.interactive_shell.InteractiveShell.send_command` method 
directly, but rather
 call specialized methods. If there isn't one that satisfies a need, it 
should be added.
+
+Attributes:
+number_of_ports: The number of ports which were allowed on the 
command-line when testpmd
+was started.
 """
 
+number_of_ports: int
+
 #: The path to the testpmd executable.
 path: ClassVar[PurePath] = PurePath("app", "dpdk-testpmd")
 
@@ -65,9 +108,66 @@ class TestPmdShell(InteractiveShell):
 _command_extra_chars: ClassVar[str] = "\n"
 
 def _start_application(self, get_privileged_command: Callable[[str], str] 
| None) -> None:
-self._app_args += " -- -i"
+"""Overrides :meth:`~.interactive_shell._start_application`.
+
+Add flags for starting testpmd in interactive mode and disabling 
messages for link state
+change events before starting the application. Link state is verified 
before starting
+packet forwarding and the messages create unexpected newlines in the 
terminal which
+complicates output collection.
+
+Also find the number of pci addresses which were allowed on the 
command line when the app
+was started.
+"""
+self._app_args += " -- -i --mask-event intr_lsc"
+self.number_of_ports = self._app_args.count("-a ")
 super()._start_application(get_privileged_command)
 
+def start(self, verify: bool = True) -> None:
+"""Start packet forwarding with the current configuration.
+
+Args:
+verify: If :data:`True` , a second start command will be sent in 
an attempt to verify
+packet forwarding started as expected.
+
+Raises:
+InteractiveCommandExecutionError: If `verify` is :data:`True` and 
forwarding fails to
+start or ports fail to come up.
+"""
+self.send_command("start")
+if verify:
+# If forwarding was already started, sending "start" again should 
tell us
+start_cmd_output = self.send_command("start")
+if "Packet forwarding already started" not in start_cmd_output:
+self._logger.debug(f"Failed to start packet forwarding: 
\n{start_cmd_output}")
+raise InteractiveCommandExecutionError("Testpmd failed to 
start packet forwarding.")
+
+for port_id in range(self.number_of_ports):
+if not s

[PATCH v5 2/7] dts: limit EAL parameters to DPDK apps and add parameters to all apps

2024-01-03 Thread jspewock
From: Jeremy Spewock 

Changed the factory method for creating interactive apps in the SUT Node
so that EAL parameters would only be passed into DPDK apps since
non-DPDK apps wouldn't be able to process them. Also modified
interactive apps to allow for the ability to pass parameters into the
app on startup so that the applications can be started with certain
configuration steps passed on the command line.

Signed-off-by: Jeremy Spewock 
---

I ended up reverting part of this back to making the argument for
eal_parameters allowed to be a string. This was because it was casuing
mypy errors where the method signatures of sut_node did not match with
that of node.

 dts/framework/remote_session/testpmd_shell.py |  2 +-
 dts/framework/testbed_model/sut_node.py   | 14 +-
 2 files changed, 10 insertions(+), 6 deletions(-)

diff --git a/dts/framework/remote_session/testpmd_shell.py 
b/dts/framework/remote_session/testpmd_shell.py
index f310705fac..8f40e8f40e 100644
--- a/dts/framework/remote_session/testpmd_shell.py
+++ b/dts/framework/remote_session/testpmd_shell.py
@@ -118,7 +118,7 @@ def _start_application(self, get_privileged_command: 
Callable[[str], str] | None
 Also find the number of pci addresses which were allowed on the 
command line when the app
 was started.
 """
-self._app_args += " -- -i --mask-event intr_lsc"
+self._app_args += " -i --mask-event intr_lsc"
 self.number_of_ports = self._app_args.count("-a ")
 super()._start_application(get_privileged_command)
 
diff --git a/dts/framework/testbed_model/sut_node.py 
b/dts/framework/testbed_model/sut_node.py
index c4acea38d1..4df18bc183 100644
--- a/dts/framework/testbed_model/sut_node.py
+++ b/dts/framework/testbed_model/sut_node.py
@@ -431,6 +431,7 @@ def create_interactive_shell(
 timeout: float = SETTINGS.timeout,
 privileged: bool = False,
 eal_parameters: EalParameters | str | None = None,
+app_parameters: str = "",
 ) -> InteractiveShellType:
 """Extend the factory for interactive session handlers.
 
@@ -449,20 +450,23 @@ def create_interactive_shell(
 eal_parameters: List of EAL parameters to use to launch the app. 
If this
 isn't provided or an empty string is passed, it will default 
to calling
 :meth:`create_eal_parameters`.
+app_parameters: Additional arguments to pass into the application 
on the
+command-line.
 
 Returns:
 An instance of the desired interactive application shell.
 """
-if not eal_parameters:
-eal_parameters = self.create_eal_parameters()
-
-# We need to append the build directory for DPDK apps
+# We need to append the build directory and add EAL parameters for 
DPDK apps
 if shell_cls.dpdk_app:
+if not eal_parameters:
+eal_parameters = self.create_eal_parameters()
+app_parameters = f"{eal_parameters} -- {app_parameters}"
+
 shell_cls.path = self.main_session.join_remote_path(
 self.remote_dpdk_build_dir, shell_cls.path
 )
 
-return super().create_interactive_shell(shell_cls, timeout, 
privileged, str(eal_parameters))
+return super().create_interactive_shell(shell_cls, timeout, 
privileged, app_parameters)
 
 def bind_ports_to_driver(self, for_dpdk: bool = True) -> None:
 """Bind all ports on the SUT to a driver.
-- 
2.43.0



[PATCH v5 3/7] dts: add optional packet filtering to scapy sniffer

2024-01-03 Thread jspewock
From: Jeremy Spewock 

Added the options to filter out LLDP and ARP packets when
sniffing for packets with scapy. This was done using BPF filters to
ensure that the noise these packets provide does not interfere with test
cases.

Signed-off-by: Jeremy Spewock 
---
 dts/framework/test_suite.py   | 15 +--
 dts/framework/testbed_model/tg_node.py| 14 --
 .../traffic_generator/__init__.py |  7 -
 .../capturing_traffic_generator.py| 22 ++-
 .../testbed_model/traffic_generator/scapy.py  | 27 +++
 5 files changed, 79 insertions(+), 6 deletions(-)

diff --git a/dts/framework/test_suite.py b/dts/framework/test_suite.py
index dfb391ffbd..ffea917690 100644
--- a/dts/framework/test_suite.py
+++ b/dts/framework/test_suite.py
@@ -38,6 +38,7 @@
 from .settings import SETTINGS
 from .test_result import BuildTargetResult, Result, TestCaseResult, 
TestSuiteResult
 from .testbed_model import Port, PortLink, SutNode, TGNode
+from .testbed_model.traffic_generator import PacketFilteringConfig
 from .utils import get_packet_summaries
 
 
@@ -208,7 +209,12 @@ def configure_testbed_ipv4(self, restore: bool = False) -> 
None:
 def _configure_ipv4_forwarding(self, enable: bool) -> None:
 self.sut_node.configure_ipv4_forwarding(enable)
 
-def send_packet_and_capture(self, packet: Packet, duration: float = 1) -> 
list[Packet]:
+def send_packet_and_capture(
+self,
+packet: Packet,
+filter_config: PacketFilteringConfig = PacketFilteringConfig(),
+duration: float = 1,
+) -> list[Packet]:
 """Send and receive `packet` using the associated TG.
 
 Send `packet` through the appropriate interface and receive on the 
appropriate interface.
@@ -216,6 +222,7 @@ def send_packet_and_capture(self, packet: Packet, duration: 
float = 1) -> list[P
 
 Args:
 packet: The packet to send.
+filter_config: The filter to use when capturing packets.
 duration: Capture traffic for this amount of time after sending 
`packet`.
 
 Returns:
@@ -223,7 +230,11 @@ def send_packet_and_capture(self, packet: Packet, 
duration: float = 1) -> list[P
 """
 packet = self._adjust_addresses(packet)
 return self.tg_node.send_packet_and_capture(
-packet, self._tg_port_egress, self._tg_port_ingress, duration
+packet,
+self._tg_port_egress,
+self._tg_port_ingress,
+filter_config,
+duration,
 )
 
 def get_expected_packet(self, packet: Packet) -> Packet:
diff --git a/dts/framework/testbed_model/tg_node.py 
b/dts/framework/testbed_model/tg_node.py
index f269d4c585..d3206e87e0 100644
--- a/dts/framework/testbed_model/tg_node.py
+++ b/dts/framework/testbed_model/tg_node.py
@@ -15,7 +15,11 @@
 
 from .node import Node
 from .port import Port
-from .traffic_generator import CapturingTrafficGenerator, 
create_traffic_generator
+from .traffic_generator import (
+CapturingTrafficGenerator,
+PacketFilteringConfig,
+create_traffic_generator,
+)
 
 
 class TGNode(Node):
@@ -53,6 +57,7 @@ def send_packet_and_capture(
 packet: Packet,
 send_port: Port,
 receive_port: Port,
+filter_config: PacketFilteringConfig = PacketFilteringConfig(),
 duration: float = 1,
 ) -> list[Packet]:
 """Send `packet`, return received traffic.
@@ -65,13 +70,18 @@ def send_packet_and_capture(
 packet: The packet to send.
 send_port: The egress port on the TG node.
 receive_port: The ingress port in the TG node.
+filter_config: The filter to use when capturing packets.
 duration: Capture traffic for this amount of time after sending 
`packet`.
 
 Returns:
  A list of received packets. May be empty if no packets are 
captured.
 """
 return self.traffic_generator.send_packet_and_capture(
-packet, send_port, receive_port, duration
+packet,
+send_port,
+receive_port,
+filter_config,
+duration,
 )
 
 def close(self) -> None:
diff --git a/dts/framework/testbed_model/traffic_generator/__init__.py 
b/dts/framework/testbed_model/traffic_generator/__init__.py
index 11e2bd7d97..0eaf0355cd 100644
--- a/dts/framework/testbed_model/traffic_generator/__init__.py
+++ b/dts/framework/testbed_model/traffic_generator/__init__.py
@@ -14,11 +14,16 @@
 and a capturing traffic generator is required.
 """
 
+# pylama:ignore=W0611
+
 from framework.config import ScapyTrafficGeneratorConfig, TrafficGeneratorType
 from framework.exception import ConfigurationError
 from framework.testbed_model.node import Node
 
-from .capturing_traffic_generator import CapturingTrafficGenerator
+from .capturing_traffic_generator import (
+CapturingTrafficGenerator,
+PacketFilteringConfig,
+)
 

[PATCH v5 5/7] dts: allow configuring MTU of ports

2024-01-03 Thread jspewock
From: Jeremy Spewock 

Adds methods in both os_session and linux session to allow for setting
MTU of port interfaces so that suites that require the sending and
receiving of packets of a specific size, or the rejection of packets
over a certain size, can configure this maximum as needed.

Signed-off-by: Jeremy Spewock 
---
 dts/framework/testbed_model/linux_session.py | 8 
 dts/framework/testbed_model/os_session.py| 9 +
 2 files changed, 17 insertions(+)

diff --git a/dts/framework/testbed_model/linux_session.py 
b/dts/framework/testbed_model/linux_session.py
index 0ab59cef85..5d24030c3d 100644
--- a/dts/framework/testbed_model/linux_session.py
+++ b/dts/framework/testbed_model/linux_session.py
@@ -198,6 +198,14 @@ def configure_port_ip_address(
 verify=True,
 )
 
+def configure_port_mtu(self, mtu: int, port: Port) -> None:
+"""Overrides :meth:`~.os_session.OSSession.configure_port_mtu`."""
+self.send_command(
+f"ip link set dev {port.logical_name} mtu {mtu}",
+privileged=True,
+verify=True,
+)
+
 def configure_ipv4_forwarding(self, enable: bool) -> None:
 """Overrides 
:meth:`~.os_session.OSSession.configure_ipv4_forwarding`."""
 state = 1 if enable else 0
diff --git a/dts/framework/testbed_model/os_session.py 
b/dts/framework/testbed_model/os_session.py
index ac6bb5e112..e42f4d752a 100644
--- a/dts/framework/testbed_model/os_session.py
+++ b/dts/framework/testbed_model/os_session.py
@@ -413,6 +413,15 @@ def configure_port_ip_address(
 delete: If :data:`True`, remove the IP address, otherwise 
configure it.
 """
 
+@abstractmethod
+def configure_port_mtu(self, mtu: int, port: Port) -> None:
+"""Configure `mtu` on `port`.
+
+Args:
+mtu: Desired MTU value.
+port: Port to set `mtu` on.
+"""
+
 @abstractmethod
 def configure_ipv4_forwarding(self, enable: bool) -> None:
 """Enable IPv4 forwarding in the operating system.
-- 
2.43.0



[PATCH v5 4/7] dts: add pci addresses to EAL parameters

2024-01-03 Thread jspewock
From: Jeremy Spewock 

Added allow list to the EAL parameters created in DTS to ensure that
only the relevant PCI devices are considered when launching DPDK
applications.

Signed-off-by: Jeremy Spewock 
---
 dts/framework/testbed_model/sut_node.py | 12 
 1 file changed, 12 insertions(+)

diff --git a/dts/framework/testbed_model/sut_node.py 
b/dts/framework/testbed_model/sut_node.py
index 4df18bc183..cc894fb07d 100644
--- a/dts/framework/testbed_model/sut_node.py
+++ b/dts/framework/testbed_model/sut_node.py
@@ -30,6 +30,7 @@
 from .cpu import LogicalCoreCount, LogicalCoreList
 from .node import Node
 from .os_session import InteractiveShellType, OSSession
+from .port import Port
 from .virtual_device import VirtualDevice
 
 
@@ -46,6 +47,7 @@ def __init__(
 prefix: str,
 no_pci: bool,
 vdevs: list[VirtualDevice],
+ports: list[Port],
 other_eal_param: str,
 ):
 """Initialize the parameters according to inputs.
@@ -63,6 +65,7 @@ def __init__(
 VirtualDevice('net_ring0'),
 VirtualDevice('net_ring1')
 ]
+ports: The list of ports to allow.
 other_eal_param: user defined DPDK EAL parameters, e.g.:
 ``other_eal_param='--single-file-segments'``
 """
@@ -73,6 +76,7 @@ def __init__(
 self._prefix = f"--file-prefix={prefix}"
 self._no_pci = "--no-pci" if no_pci else ""
 self._vdevs = " ".join(f"--vdev {vdev}" for vdev in vdevs)
+self._ports = " ".join(f"-a {port.pci}" for port in ports)
 self._other_eal_param = other_eal_param
 
 def __str__(self) -> str:
@@ -83,6 +87,7 @@ def __str__(self) -> str:
 f"{self._prefix} "
 f"{self._no_pci} "
 f"{self._vdevs} "
+f"{self._ports} "
 f"{self._other_eal_param}"
 )
 
@@ -347,6 +352,7 @@ def create_eal_parameters(
 append_prefix_timestamp: bool = True,
 no_pci: bool = False,
 vdevs: list[VirtualDevice] | None = None,
+ports: list[Port] | None = None,
 other_eal_param: str = "",
 ) -> "EalParameters":
 """Compose the EAL parameters.
@@ -370,6 +376,8 @@ def create_eal_parameters(
 VirtualDevice('net_ring0'),
 VirtualDevice('net_ring1')
 ]
+ports: The list of ports to allow. If :data:`None`, all ports 
listed in `self.ports`
+will be allowed.
 other_eal_param: user defined DPDK EAL parameters, e.g.:
 ``other_eal_param='--single-file-segments'``.
 
@@ -388,12 +396,16 @@ def create_eal_parameters(
 if vdevs is None:
 vdevs = []
 
+if ports is None:
+ports = self.ports
+
 return EalParameters(
 lcore_list=lcore_list,
 memory_channels=self.config.memory_channels,
 prefix=prefix,
 no_pci=no_pci,
 vdevs=vdevs,
+ports=ports,
 other_eal_param=other_eal_param,
 )
 
-- 
2.43.0



[PATCH v5 6/7] dts: add scatter to the yaml schema

2024-01-03 Thread jspewock
From: Jeremy Spewock 

Allow for scatter to be specified in the configuration file.

Signed-off-by: Jeremy Spewock 
---
 dts/framework/config/conf_yaml_schema.json | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/dts/framework/config/conf_yaml_schema.json 
b/dts/framework/config/conf_yaml_schema.json
index 84e45fe3c2..e6dc50ca7f 100644
--- a/dts/framework/config/conf_yaml_schema.json
+++ b/dts/framework/config/conf_yaml_schema.json
@@ -186,7 +186,8 @@
   "type": "string",
   "enum": [
 "hello_world",
-"os_udp"
+"os_udp",
+"pmd_buffer_scatter"
   ]
 },
 "test_target": {
-- 
2.43.0



[PATCH v5 7/7] dts: add pmd_buffer_scatter test suite

2024-01-03 Thread jspewock
From: Jeremy Spewock 

This test suite provides testing of the support of scattered packets by
Poll Mode Drivers using testpmd, verifying the ability to receive and
transmit scattered multi-segment packets made up of multiple
non-contiguous memory buffers. This is tested through 5 different cases
in which the length of the packets sent are less than the mbuf size,
equal to the mbuf size, and 1, 4, and 5 bytes greater than the mbuf size
in order to show both the CRC and the packet data are capable of
existing in the first, second, or both buffers.

Naturally, if the PMD is capable of forwarding scattered packets which
it receives as input, this shows it is capable of both receiving and
transmitting scattered packets.

Signed-off-by: Jeremy Spewock 
---
 dts/tests/TestSuite_pmd_buffer_scatter.py | 126 ++
 1 file changed, 126 insertions(+)
 create mode 100644 dts/tests/TestSuite_pmd_buffer_scatter.py

diff --git a/dts/tests/TestSuite_pmd_buffer_scatter.py 
b/dts/tests/TestSuite_pmd_buffer_scatter.py
new file mode 100644
index 00..8838c3404f
--- /dev/null
+++ b/dts/tests/TestSuite_pmd_buffer_scatter.py
@@ -0,0 +1,126 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2023-2024 University of New Hampshire
+
+"""Multi-segment packet scattering testing suite.
+
+This testing suite tests the support of transmitting and receiving scattered 
packets. This is shown
+by the Poll Mode Driver being able to forward scattered multi-segment packets 
composed of multiple
+non-contiguous memory buffers. To ensure the receipt of scattered packets, the 
DMA rings of the
+port's RX queues must be configured with mbuf data buffers whose size is less 
than the maximum
+length.
+
+If it is the case that the Poll Mode Driver can forward scattered packets 
which it receives, then
+this suffices to show the Poll Mode Driver is capable of both receiving and 
transmitting scattered
+packets.
+"""
+import struct
+
+from scapy.layers.inet import IP  # type: ignore[import]
+from scapy.layers.l2 import Ether  # type: ignore[import]
+from scapy.packet import Raw  # type: ignore[import]
+from scapy.utils import hexstr  # type: ignore[import]
+
+from framework.remote_session.testpmd_shell import TestPmdForwardingModes, 
TestPmdShell
+from framework.test_suite import TestSuite
+
+
+class PmdBufferScatter(TestSuite):
+"""DPDK PMD packet scattering test suite.
+
+Configure the Rx queues to have mbuf data buffers whose sizes are smaller 
than the maximum
+packet size. Specifically, set mbuf data buffers to have a size of 2048 to 
fit a full 1512-byte
+(CRC included) ethernet frame in a mono-segment packet. The testing of 
scattered packets is
+done by sending a packet whose length is greater than the size of the 
configured size of mbuf
+data buffers. There are a total of 5 packets sent within test cases which 
have lengths less
+than, equal to, and greater than the mbuf size. There are multiple packets 
sent with lengths
+greater than the mbuf size in order to test cases such as:
+
+1. A single byte of the CRC being in a second buffer while the remaining 3 
bytes are stored in
+the first buffer alongside packet data.
+2. The entire CRC being stored in a second buffer while all of the packet 
data is stored in the
+first.
+3. Most of the packet data being stored in the first buffer and a single 
byte of packet data
+stored in a second buffer alongside the CRC.
+"""
+
+def set_up_suite(self) -> None:
+"""Set up the test suite.
+
+Setup:
+Verify they we have at least 2 port links in the current execution 
and increase the MTU
+of both ports on the tg_node to 9000 to support larger packet 
sizes.
+"""
+self.verify(
+len(self._port_links) > 1,
+"Must have at least two port links to run scatter",
+)
+
+self.tg_node.main_session.configure_port_mtu(9000, 
self._tg_port_egress)
+self.tg_node.main_session.configure_port_mtu(9000, 
self._tg_port_ingress)
+
+def scatter_pktgen_send_packet(self, pktsize: int) -> str:
+"""Generate and send packet to the SUT.
+
+Functional test for scatter packets.
+
+Args:
+pktsize: Size of the packet to generate and send.
+"""
+packet = Ether() / IP() / Raw()
+packet.getlayer(2).load = ""
+payload_len = pktsize - len(packet) - 4
+payload = ["58"] * payload_len
+# pack the payload
+for X_in_hex in payload:
+packet.load += struct.pack("=B", int("%s%s" % (X_in_hex[0], 
X_in_hex[1]), 16))
+received_packets = self.send_packet_and_capture(packet)
+self.verify(len(received_packets) > 0, "Did not receive any packets.")
+load = hexstr(received_packets[0].getlayer(2), onlyhex=1)
+
+return load
+
+def pmd_scatter(self, mbsize: int) -> None:
+"""Testpmd support of receiving 

[PATCH v6 0/7] dts: Port scatter suite over

2024-01-03 Thread jspewock
From: Jeremy Spewock 

v6:

Fixed spelling mistake that caused checkpatch failure.

Jeremy Spewock (7):
  dts: add startup verification and forwarding modes to testpmd shell
  dts: limit EAL parameters to DPDK apps and add parameters to all apps
  dts: add optional packet filtering to scapy sniffer
  dts: add pci addresses to EAL parameters
  dts: allow configuring MTU of ports
  dts: add scatter to the yaml schema
  dts: add pmd_buffer_scatter test suite

 dts/framework/config/conf_yaml_schema.json|   3 +-
 dts/framework/exception.py|   7 +
 dts/framework/remote_session/testpmd_shell.py | 149 +-
 dts/framework/test_suite.py   |  15 +-
 dts/framework/testbed_model/linux_session.py  |   8 +
 dts/framework/testbed_model/os_session.py |   9 ++
 dts/framework/testbed_model/sut_node.py   |  26 ++-
 dts/framework/testbed_model/tg_node.py|  14 +-
 .../traffic_generator/__init__.py |   7 +-
 .../capturing_traffic_generator.py|  22 ++-
 .../testbed_model/traffic_generator/scapy.py  |  27 
 dts/tests/TestSuite_pmd_buffer_scatter.py | 126 +++
 12 files changed, 400 insertions(+), 13 deletions(-)
 create mode 100644 dts/tests/TestSuite_pmd_buffer_scatter.py

-- 
2.43.0



[PATCH v6 1/7] dts: add startup verification and forwarding modes to testpmd shell

2024-01-03 Thread jspewock
From: Jeremy Spewock 

Added commonly used methods in testpmd such as starting and stopping
packet forwarding, changing forward modes, and verifying link status of
ports so that developers can configure testpmd and start forwarding
through the provided class rather than sending commands to the testpmd
session directly.

Signed-off-by: Jeremy Spewock 
---
 dts/framework/exception.py|   7 +
 dts/framework/remote_session/testpmd_shell.py | 149 +-
 2 files changed, 155 insertions(+), 1 deletion(-)

diff --git a/dts/framework/exception.py b/dts/framework/exception.py
index 658eee2c38..cce1e0231a 100644
--- a/dts/framework/exception.py
+++ b/dts/framework/exception.py
@@ -146,6 +146,13 @@ def __str__(self) -> str:
 return f"Command {self.command} returned a non-zero exit code: 
{self._command_return_code}"
 
 
+class InteractiveCommandExecutionError(DTSError):
+"""An unsuccessful execution of a remote command in an interactive 
environment."""
+
+#:
+severity: ClassVar[ErrorSeverity] = ErrorSeverity.REMOTE_CMD_EXEC_ERR
+
+
 class RemoteDirectoryExistsError(DTSError):
 """A directory that exists on a remote node."""
 
diff --git a/dts/framework/remote_session/testpmd_shell.py 
b/dts/framework/remote_session/testpmd_shell.py
index 0184cc2e71..f310705fac 100644
--- a/dts/framework/remote_session/testpmd_shell.py
+++ b/dts/framework/remote_session/testpmd_shell.py
@@ -15,9 +15,15 @@
 testpmd_shell.close()
 """
 
+import time
+from enum import auto
 from pathlib import PurePath
 from typing import Callable, ClassVar
 
+from framework.exception import InteractiveCommandExecutionError
+from framework.settings import SETTINGS
+from framework.utils import StrEnum
+
 from .interactive_shell import InteractiveShell
 
 
@@ -43,14 +49,51 @@ def __str__(self) -> str:
 return self.pci_address
 
 
+class TestPmdForwardingModes(StrEnum):
+r"""The supported packet forwarding modes for :class:`~TestPmdShell`\s."""
+
+#:
+io = auto()
+#:
+mac = auto()
+#:
+macswap = auto()
+#:
+flowgen = auto()
+#:
+rxonly = auto()
+#:
+txonly = auto()
+#:
+csum = auto()
+#:
+icmpecho = auto()
+#:
+ieee1588 = auto()
+#:
+noisy = auto()
+#:
+fivetswap = "5tswap"
+#:
+shared_rxq = "shared-rxq"
+#:
+recycle_mbufs = auto()
+
+
 class TestPmdShell(InteractiveShell):
 """Testpmd interactive shell.
 
 The testpmd shell users should never use
 the :meth:`~.interactive_shell.InteractiveShell.send_command` method 
directly, but rather
 call specialized methods. If there isn't one that satisfies a need, it 
should be added.
+
+Attributes:
+number_of_ports: The number of ports which were allowed on the 
command-line when testpmd
+was started.
 """
 
+number_of_ports: int
+
 #: The path to the testpmd executable.
 path: ClassVar[PurePath] = PurePath("app", "dpdk-testpmd")
 
@@ -65,9 +108,66 @@ class TestPmdShell(InteractiveShell):
 _command_extra_chars: ClassVar[str] = "\n"
 
 def _start_application(self, get_privileged_command: Callable[[str], str] 
| None) -> None:
-self._app_args += " -- -i"
+"""Overrides :meth:`~.interactive_shell._start_application`.
+
+Add flags for starting testpmd in interactive mode and disabling 
messages for link state
+change events before starting the application. Link state is verified 
before starting
+packet forwarding and the messages create unexpected newlines in the 
terminal which
+complicates output collection.
+
+Also find the number of pci addresses which were allowed on the 
command line when the app
+was started.
+"""
+self._app_args += " -- -i --mask-event intr_lsc"
+self.number_of_ports = self._app_args.count("-a ")
 super()._start_application(get_privileged_command)
 
+def start(self, verify: bool = True) -> None:
+"""Start packet forwarding with the current configuration.
+
+Args:
+verify: If :data:`True` , a second start command will be sent in 
an attempt to verify
+packet forwarding started as expected.
+
+Raises:
+InteractiveCommandExecutionError: If `verify` is :data:`True` and 
forwarding fails to
+start or ports fail to come up.
+"""
+self.send_command("start")
+if verify:
+# If forwarding was already started, sending "start" again should 
tell us
+start_cmd_output = self.send_command("start")
+if "Packet forwarding already started" not in start_cmd_output:
+self._logger.debug(f"Failed to start packet forwarding: 
\n{start_cmd_output}")
+raise InteractiveCommandExecutionError("Testpmd failed to 
start packet forwarding.")
+
+for port_id in range(self.number_of_ports):
+if not

[PATCH v6 2/7] dts: limit EAL parameters to DPDK apps and add parameters to all apps

2024-01-03 Thread jspewock
From: Jeremy Spewock 

Changed the factory method for creating interactive apps in the SUT Node
so that EAL parameters would only be passed into DPDK apps since
non-DPDK apps wouldn't be able to process them. Also modified
interactive apps to allow for the ability to pass parameters into the
app on startup so that the applications can be started with certain
configuration steps passed on the command line.

Signed-off-by: Jeremy Spewock 
---

I ended up reverting part of this back to making the argument for
eal_parameters allowed to be a string. This was because it was casuing
mypy errors where the method signatures of sut_node did not match with
that of node.

 dts/framework/remote_session/testpmd_shell.py |  2 +-
 dts/framework/testbed_model/sut_node.py   | 14 +-
 2 files changed, 10 insertions(+), 6 deletions(-)

diff --git a/dts/framework/remote_session/testpmd_shell.py 
b/dts/framework/remote_session/testpmd_shell.py
index f310705fac..8f40e8f40e 100644
--- a/dts/framework/remote_session/testpmd_shell.py
+++ b/dts/framework/remote_session/testpmd_shell.py
@@ -118,7 +118,7 @@ def _start_application(self, get_privileged_command: 
Callable[[str], str] | None
 Also find the number of pci addresses which were allowed on the 
command line when the app
 was started.
 """
-self._app_args += " -- -i --mask-event intr_lsc"
+self._app_args += " -i --mask-event intr_lsc"
 self.number_of_ports = self._app_args.count("-a ")
 super()._start_application(get_privileged_command)
 
diff --git a/dts/framework/testbed_model/sut_node.py 
b/dts/framework/testbed_model/sut_node.py
index c4acea38d1..4df18bc183 100644
--- a/dts/framework/testbed_model/sut_node.py
+++ b/dts/framework/testbed_model/sut_node.py
@@ -431,6 +431,7 @@ def create_interactive_shell(
 timeout: float = SETTINGS.timeout,
 privileged: bool = False,
 eal_parameters: EalParameters | str | None = None,
+app_parameters: str = "",
 ) -> InteractiveShellType:
 """Extend the factory for interactive session handlers.
 
@@ -449,20 +450,23 @@ def create_interactive_shell(
 eal_parameters: List of EAL parameters to use to launch the app. 
If this
 isn't provided or an empty string is passed, it will default 
to calling
 :meth:`create_eal_parameters`.
+app_parameters: Additional arguments to pass into the application 
on the
+command-line.
 
 Returns:
 An instance of the desired interactive application shell.
 """
-if not eal_parameters:
-eal_parameters = self.create_eal_parameters()
-
-# We need to append the build directory for DPDK apps
+# We need to append the build directory and add EAL parameters for 
DPDK apps
 if shell_cls.dpdk_app:
+if not eal_parameters:
+eal_parameters = self.create_eal_parameters()
+app_parameters = f"{eal_parameters} -- {app_parameters}"
+
 shell_cls.path = self.main_session.join_remote_path(
 self.remote_dpdk_build_dir, shell_cls.path
 )
 
-return super().create_interactive_shell(shell_cls, timeout, 
privileged, str(eal_parameters))
+return super().create_interactive_shell(shell_cls, timeout, 
privileged, app_parameters)
 
 def bind_ports_to_driver(self, for_dpdk: bool = True) -> None:
 """Bind all ports on the SUT to a driver.
-- 
2.43.0



[PATCH v6 3/7] dts: add optional packet filtering to scapy sniffer

2024-01-03 Thread jspewock
From: Jeremy Spewock 

Added the options to filter out LLDP and ARP packets when
sniffing for packets with scapy. This was done using BPF filters to
ensure that the noise these packets provide does not interfere with test
cases.

Signed-off-by: Jeremy Spewock 
---
 dts/framework/test_suite.py   | 15 +--
 dts/framework/testbed_model/tg_node.py| 14 --
 .../traffic_generator/__init__.py |  7 -
 .../capturing_traffic_generator.py| 22 ++-
 .../testbed_model/traffic_generator/scapy.py  | 27 +++
 5 files changed, 79 insertions(+), 6 deletions(-)

diff --git a/dts/framework/test_suite.py b/dts/framework/test_suite.py
index dfb391ffbd..ffea917690 100644
--- a/dts/framework/test_suite.py
+++ b/dts/framework/test_suite.py
@@ -38,6 +38,7 @@
 from .settings import SETTINGS
 from .test_result import BuildTargetResult, Result, TestCaseResult, 
TestSuiteResult
 from .testbed_model import Port, PortLink, SutNode, TGNode
+from .testbed_model.traffic_generator import PacketFilteringConfig
 from .utils import get_packet_summaries
 
 
@@ -208,7 +209,12 @@ def configure_testbed_ipv4(self, restore: bool = False) -> 
None:
 def _configure_ipv4_forwarding(self, enable: bool) -> None:
 self.sut_node.configure_ipv4_forwarding(enable)
 
-def send_packet_and_capture(self, packet: Packet, duration: float = 1) -> 
list[Packet]:
+def send_packet_and_capture(
+self,
+packet: Packet,
+filter_config: PacketFilteringConfig = PacketFilteringConfig(),
+duration: float = 1,
+) -> list[Packet]:
 """Send and receive `packet` using the associated TG.
 
 Send `packet` through the appropriate interface and receive on the 
appropriate interface.
@@ -216,6 +222,7 @@ def send_packet_and_capture(self, packet: Packet, duration: 
float = 1) -> list[P
 
 Args:
 packet: The packet to send.
+filter_config: The filter to use when capturing packets.
 duration: Capture traffic for this amount of time after sending 
`packet`.
 
 Returns:
@@ -223,7 +230,11 @@ def send_packet_and_capture(self, packet: Packet, 
duration: float = 1) -> list[P
 """
 packet = self._adjust_addresses(packet)
 return self.tg_node.send_packet_and_capture(
-packet, self._tg_port_egress, self._tg_port_ingress, duration
+packet,
+self._tg_port_egress,
+self._tg_port_ingress,
+filter_config,
+duration,
 )
 
 def get_expected_packet(self, packet: Packet) -> Packet:
diff --git a/dts/framework/testbed_model/tg_node.py 
b/dts/framework/testbed_model/tg_node.py
index f269d4c585..d3206e87e0 100644
--- a/dts/framework/testbed_model/tg_node.py
+++ b/dts/framework/testbed_model/tg_node.py
@@ -15,7 +15,11 @@
 
 from .node import Node
 from .port import Port
-from .traffic_generator import CapturingTrafficGenerator, 
create_traffic_generator
+from .traffic_generator import (
+CapturingTrafficGenerator,
+PacketFilteringConfig,
+create_traffic_generator,
+)
 
 
 class TGNode(Node):
@@ -53,6 +57,7 @@ def send_packet_and_capture(
 packet: Packet,
 send_port: Port,
 receive_port: Port,
+filter_config: PacketFilteringConfig = PacketFilteringConfig(),
 duration: float = 1,
 ) -> list[Packet]:
 """Send `packet`, return received traffic.
@@ -65,13 +70,18 @@ def send_packet_and_capture(
 packet: The packet to send.
 send_port: The egress port on the TG node.
 receive_port: The ingress port in the TG node.
+filter_config: The filter to use when capturing packets.
 duration: Capture traffic for this amount of time after sending 
`packet`.
 
 Returns:
  A list of received packets. May be empty if no packets are 
captured.
 """
 return self.traffic_generator.send_packet_and_capture(
-packet, send_port, receive_port, duration
+packet,
+send_port,
+receive_port,
+filter_config,
+duration,
 )
 
 def close(self) -> None:
diff --git a/dts/framework/testbed_model/traffic_generator/__init__.py 
b/dts/framework/testbed_model/traffic_generator/__init__.py
index 11e2bd7d97..0eaf0355cd 100644
--- a/dts/framework/testbed_model/traffic_generator/__init__.py
+++ b/dts/framework/testbed_model/traffic_generator/__init__.py
@@ -14,11 +14,16 @@
 and a capturing traffic generator is required.
 """
 
+# pylama:ignore=W0611
+
 from framework.config import ScapyTrafficGeneratorConfig, TrafficGeneratorType
 from framework.exception import ConfigurationError
 from framework.testbed_model.node import Node
 
-from .capturing_traffic_generator import CapturingTrafficGenerator
+from .capturing_traffic_generator import (
+CapturingTrafficGenerator,
+PacketFilteringConfig,
+)
 

[PATCH v6 5/7] dts: allow configuring MTU of ports

2024-01-03 Thread jspewock
From: Jeremy Spewock 

Adds methods in both os_session and linux session to allow for setting
MTU of port interfaces so that suites that require the sending and
receiving of packets of a specific size, or the rejection of packets
over a certain size, can configure this maximum as needed.

Signed-off-by: Jeremy Spewock 
---
 dts/framework/testbed_model/linux_session.py | 8 
 dts/framework/testbed_model/os_session.py| 9 +
 2 files changed, 17 insertions(+)

diff --git a/dts/framework/testbed_model/linux_session.py 
b/dts/framework/testbed_model/linux_session.py
index 0ab59cef85..5d24030c3d 100644
--- a/dts/framework/testbed_model/linux_session.py
+++ b/dts/framework/testbed_model/linux_session.py
@@ -198,6 +198,14 @@ def configure_port_ip_address(
 verify=True,
 )
 
+def configure_port_mtu(self, mtu: int, port: Port) -> None:
+"""Overrides :meth:`~.os_session.OSSession.configure_port_mtu`."""
+self.send_command(
+f"ip link set dev {port.logical_name} mtu {mtu}",
+privileged=True,
+verify=True,
+)
+
 def configure_ipv4_forwarding(self, enable: bool) -> None:
 """Overrides 
:meth:`~.os_session.OSSession.configure_ipv4_forwarding`."""
 state = 1 if enable else 0
diff --git a/dts/framework/testbed_model/os_session.py 
b/dts/framework/testbed_model/os_session.py
index ac6bb5e112..e42f4d752a 100644
--- a/dts/framework/testbed_model/os_session.py
+++ b/dts/framework/testbed_model/os_session.py
@@ -413,6 +413,15 @@ def configure_port_ip_address(
 delete: If :data:`True`, remove the IP address, otherwise 
configure it.
 """
 
+@abstractmethod
+def configure_port_mtu(self, mtu: int, port: Port) -> None:
+"""Configure `mtu` on `port`.
+
+Args:
+mtu: Desired MTU value.
+port: Port to set `mtu` on.
+"""
+
 @abstractmethod
 def configure_ipv4_forwarding(self, enable: bool) -> None:
 """Enable IPv4 forwarding in the operating system.
-- 
2.43.0



[PATCH v6 4/7] dts: add pci addresses to EAL parameters

2024-01-03 Thread jspewock
From: Jeremy Spewock 

Added allow list to the EAL parameters created in DTS to ensure that
only the relevant PCI devices are considered when launching DPDK
applications.

Signed-off-by: Jeremy Spewock 
---
 dts/framework/testbed_model/sut_node.py | 12 
 1 file changed, 12 insertions(+)

diff --git a/dts/framework/testbed_model/sut_node.py 
b/dts/framework/testbed_model/sut_node.py
index 4df18bc183..cc894fb07d 100644
--- a/dts/framework/testbed_model/sut_node.py
+++ b/dts/framework/testbed_model/sut_node.py
@@ -30,6 +30,7 @@
 from .cpu import LogicalCoreCount, LogicalCoreList
 from .node import Node
 from .os_session import InteractiveShellType, OSSession
+from .port import Port
 from .virtual_device import VirtualDevice
 
 
@@ -46,6 +47,7 @@ def __init__(
 prefix: str,
 no_pci: bool,
 vdevs: list[VirtualDevice],
+ports: list[Port],
 other_eal_param: str,
 ):
 """Initialize the parameters according to inputs.
@@ -63,6 +65,7 @@ def __init__(
 VirtualDevice('net_ring0'),
 VirtualDevice('net_ring1')
 ]
+ports: The list of ports to allow.
 other_eal_param: user defined DPDK EAL parameters, e.g.:
 ``other_eal_param='--single-file-segments'``
 """
@@ -73,6 +76,7 @@ def __init__(
 self._prefix = f"--file-prefix={prefix}"
 self._no_pci = "--no-pci" if no_pci else ""
 self._vdevs = " ".join(f"--vdev {vdev}" for vdev in vdevs)
+self._ports = " ".join(f"-a {port.pci}" for port in ports)
 self._other_eal_param = other_eal_param
 
 def __str__(self) -> str:
@@ -83,6 +87,7 @@ def __str__(self) -> str:
 f"{self._prefix} "
 f"{self._no_pci} "
 f"{self._vdevs} "
+f"{self._ports} "
 f"{self._other_eal_param}"
 )
 
@@ -347,6 +352,7 @@ def create_eal_parameters(
 append_prefix_timestamp: bool = True,
 no_pci: bool = False,
 vdevs: list[VirtualDevice] | None = None,
+ports: list[Port] | None = None,
 other_eal_param: str = "",
 ) -> "EalParameters":
 """Compose the EAL parameters.
@@ -370,6 +376,8 @@ def create_eal_parameters(
 VirtualDevice('net_ring0'),
 VirtualDevice('net_ring1')
 ]
+ports: The list of ports to allow. If :data:`None`, all ports 
listed in `self.ports`
+will be allowed.
 other_eal_param: user defined DPDK EAL parameters, e.g.:
 ``other_eal_param='--single-file-segments'``.
 
@@ -388,12 +396,16 @@ def create_eal_parameters(
 if vdevs is None:
 vdevs = []
 
+if ports is None:
+ports = self.ports
+
 return EalParameters(
 lcore_list=lcore_list,
 memory_channels=self.config.memory_channels,
 prefix=prefix,
 no_pci=no_pci,
 vdevs=vdevs,
+ports=ports,
 other_eal_param=other_eal_param,
 )
 
-- 
2.43.0



[PATCH v6 7/7] dts: add pmd_buffer_scatter test suite

2024-01-03 Thread jspewock
From: Jeremy Spewock 

This test suite provides testing of the support of scattered packets by
Poll Mode Drivers using testpmd, verifying the ability to receive and
transmit scattered multi-segment packets made up of multiple
non-contiguous memory buffers. This is tested through 5 different cases
in which the length of the packets sent are less than the mbuf size,
equal to the mbuf size, and 1, 4, and 5 bytes greater than the mbuf size
in order to show both the CRC and the packet data are capable of
existing in the first, second, or both buffers.

Naturally, if the PMD is capable of forwarding scattered packets which
it receives as input, this shows it is capable of both receiving and
transmitting scattered packets.

Signed-off-by: Jeremy Spewock 
---
 dts/tests/TestSuite_pmd_buffer_scatter.py | 126 ++
 1 file changed, 126 insertions(+)
 create mode 100644 dts/tests/TestSuite_pmd_buffer_scatter.py

diff --git a/dts/tests/TestSuite_pmd_buffer_scatter.py 
b/dts/tests/TestSuite_pmd_buffer_scatter.py
new file mode 100644
index 00..8838c3404f
--- /dev/null
+++ b/dts/tests/TestSuite_pmd_buffer_scatter.py
@@ -0,0 +1,126 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2023-2024 University of New Hampshire
+
+"""Multi-segment packet scattering testing suite.
+
+This testing suite tests the support of transmitting and receiving scattered 
packets. This is shown
+by the Poll Mode Driver being able to forward scattered multi-segment packets 
composed of multiple
+non-contiguous memory buffers. To ensure the receipt of scattered packets, the 
DMA rings of the
+port's RX queues must be configured with mbuf data buffers whose size is less 
than the maximum
+length.
+
+If it is the case that the Poll Mode Driver can forward scattered packets 
which it receives, then
+this suffices to show the Poll Mode Driver is capable of both receiving and 
transmitting scattered
+packets.
+"""
+import struct
+
+from scapy.layers.inet import IP  # type: ignore[import]
+from scapy.layers.l2 import Ether  # type: ignore[import]
+from scapy.packet import Raw  # type: ignore[import]
+from scapy.utils import hexstr  # type: ignore[import]
+
+from framework.remote_session.testpmd_shell import TestPmdForwardingModes, 
TestPmdShell
+from framework.test_suite import TestSuite
+
+
+class PmdBufferScatter(TestSuite):
+"""DPDK PMD packet scattering test suite.
+
+Configure the Rx queues to have mbuf data buffers whose sizes are smaller 
than the maximum
+packet size. Specifically, set mbuf data buffers to have a size of 2048 to 
fit a full 1512-byte
+(CRC included) ethernet frame in a mono-segment packet. The testing of 
scattered packets is
+done by sending a packet whose length is greater than the size of the 
configured size of mbuf
+data buffers. There are a total of 5 packets sent within test cases which 
have lengths less
+than, equal to, and greater than the mbuf size. There are multiple packets 
sent with lengths
+greater than the mbuf size in order to test cases such as:
+
+1. A single byte of the CRC being in a second buffer while the remaining 3 
bytes are stored in
+the first buffer alongside packet data.
+2. The entire CRC being stored in a second buffer while all of the packet 
data is stored in the
+first.
+3. Most of the packet data being stored in the first buffer and a single 
byte of packet data
+stored in a second buffer alongside the CRC.
+"""
+
+def set_up_suite(self) -> None:
+"""Set up the test suite.
+
+Setup:
+Verify they we have at least 2 port links in the current execution 
and increase the MTU
+of both ports on the tg_node to 9000 to support larger packet 
sizes.
+"""
+self.verify(
+len(self._port_links) > 1,
+"Must have at least two port links to run scatter",
+)
+
+self.tg_node.main_session.configure_port_mtu(9000, 
self._tg_port_egress)
+self.tg_node.main_session.configure_port_mtu(9000, 
self._tg_port_ingress)
+
+def scatter_pktgen_send_packet(self, pktsize: int) -> str:
+"""Generate and send packet to the SUT.
+
+Functional test for scatter packets.
+
+Args:
+pktsize: Size of the packet to generate and send.
+"""
+packet = Ether() / IP() / Raw()
+packet.getlayer(2).load = ""
+payload_len = pktsize - len(packet) - 4
+payload = ["58"] * payload_len
+# pack the payload
+for X_in_hex in payload:
+packet.load += struct.pack("=B", int("%s%s" % (X_in_hex[0], 
X_in_hex[1]), 16))
+received_packets = self.send_packet_and_capture(packet)
+self.verify(len(received_packets) > 0, "Did not receive any packets.")
+load = hexstr(received_packets[0].getlayer(2), onlyhex=1)
+
+return load
+
+def pmd_scatter(self, mbsize: int) -> None:
+"""Testpmd support of receiving 

[PATCH v6 6/7] dts: add scatter to the yaml schema

2024-01-03 Thread jspewock
From: Jeremy Spewock 

Allow for scatter to be specified in the configuration file.

Signed-off-by: Jeremy Spewock 
---
 dts/framework/config/conf_yaml_schema.json | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/dts/framework/config/conf_yaml_schema.json 
b/dts/framework/config/conf_yaml_schema.json
index 84e45fe3c2..e6dc50ca7f 100644
--- a/dts/framework/config/conf_yaml_schema.json
+++ b/dts/framework/config/conf_yaml_schema.json
@@ -186,7 +186,8 @@
   "type": "string",
   "enum": [
 "hello_world",
-"os_udp"
+"os_udp",
+"pmd_buffer_scatter"
   ]
 },
 "test_target": {
-- 
2.43.0



RE: [PATCH v5 0/3] net/iavf: support Tx LLDP on scalar and AVX512

2024-01-03 Thread Zhang, Qi Z



> -Original Message-
> From: Zeng, ZhichaoX 
> Sent: Thursday, December 28, 2023 11:22 AM
> To: dev@dpdk.org
> Cc: Zhang, Qi Z ; Zeng, ZhichaoX
> 
> Subject: [PATCH v5 0/3] net/iavf: support Tx LLDP on scalar and AVX512
> 
> This patch set adds an IAVF testpmd command "set tx lldp on|off" which will
> register an mbuf dynfield IAVF_TX_LLDP_DYNFIELD to indicate the need to
> send LLDP packet. It needs to close the Tx port first, then "set tx lldp on", 
> and
> reopen the port to select correct Tx path, only supports turning on for now.
> 
> IAVF will fill the SWTCH_UPLINK bit in the Tx context descriptor based on the
> mbuf dynfield to send the LLDP packet.
> 
> ---
> v5: check dynfield at dev_start
> v4: fix compile error
> v3: non-lldp packet do not use the context descriptor
> v2: split into patch set, refine commit log
> 
> Zhichao Zeng (3):
>   net/iavf: support Tx LLDP on scalar
>   net/iavf: support Tx LLDP on AVX512
>   net/iavf: add Tx LLDP command
> 
>  doc/guides/rel_notes/release_24_03.rst  |  3 +
>  drivers/net/iavf/iavf_ethdev.c  |  5 ++
>  drivers/net/iavf/iavf_rxtx.c| 21 ++-
>  drivers/net/iavf/iavf_rxtx.h|  6 ++
>  drivers/net/iavf/iavf_rxtx_vec_avx512.c | 19 ++
> drivers/net/iavf/iavf_rxtx_vec_common.h |  5 ++
>  drivers/net/iavf/iavf_testpmd.c | 81 +
>  drivers/net/iavf/meson.build|  3 +
>  8 files changed, 141 insertions(+), 2 deletions(-)  create mode 100644
> drivers/net/iavf/iavf_testpmd.c
> 
> --
> 2.34.1

Acked-by: Qi Zhang 

Applied to dpdk-next-net-intel.

Thanks
Qi



Re: [RFC] ethdev: fast path async flow API

2024-01-03 Thread Stephen Hemminger
On Wed, 3 Jan 2024 19:14:49 +
Dariusz Sosnowski  wrote:

> In summary, in my opinion extending the async flow API with bulking 
> capabilities or exposing the queue directly to the application is not 
> desirable.
> This proposal aims to reduce the I-cache overhead in async flow API by 
> reusing the existing design pattern in DPDK - fast path functions are inlined 
> to the application code and they call cached PMD callbacks.

Inline needs to more discouraged in DPDK, because it only works if application 
ends up building with DPDK from source.
It doesn't work for the Linux distro packaging model and symbol versioning, etc.


Re: [PATCH v2] net/ice: fix link update

2024-01-03 Thread Shuang Han
ice_atomic_write_link_status in ice_link_update function is not protected
by the spinlock, there maybe a situation:
1.dev_start call ice_link_update with wait_to_complete = 1,and
get link_status = 0
2.LSC interrupt handler call ice_link_update and get link_status = 1
3.LSC interrupt handler call ice_atomic_write_link_status update link
status to 1
4.dev_start call ice_atomic_write_link_status update link status to 0
So I think ice_atomic_write_link_status must be protected by spinlock

Zhang, Qi Z  于2023年12月21日周四 10:44写道:

>
>
> > -Original Message-
> > From: Yang, Qiming 
> > Sent: Thursday, December 21, 2023 9:44 AM
> > To: Zhang, Qi Z 
> > Cc: dev@dpdk.org; sta...@dpdk.org
> > Subject: RE: [PATCH v2] net/ice: fix link update
> >
> > hi
> >
> > > -Original Message-
> > > From: Zhang, Qi Z 
> > > Sent: Thursday, December 14, 2023 4:41 PM
> > > To: Yang, Qiming 
> > > Cc: dev@dpdk.org; Zhang, Qi Z ; sta...@dpdk.org
> > > Subject: [PATCH v2] net/ice: fix link update
> > >
> > > The ice_aq_get_link_info function is not thread-safe. However, it is
> > > possible to simultaneous invocations during both the dev_start and the
> > > LSC interrupt handler, potentially leading to unexpected adminq
> > > errors. This patch addresses the issue by introducing a thread-safe
> > > wrapper that utilizes a spinlock.
> > >
> > > Fixes: cf911d90e366 ("net/ice: support link update")
> > > Cc: sta...@dpdk.org
> > >
> > > Signed-off-by: Qi Zhang 
> > > ---
> > > v2:
> > > - fix coding style warning.
> > >
> > >  drivers/net/ice/ice_ethdev.c | 26 --
> > > drivers/net/ice/ice_ethdev.h |  4 
> > >  2 files changed, 24 insertions(+), 6 deletions(-)
> > >
> > > diff --git a/drivers/net/ice/ice_ethdev.c
> > > b/drivers/net/ice/ice_ethdev.c index 3ccba4db80..1f8ab5158a 100644
> > > --- a/drivers/net/ice/ice_ethdev.c
> > > +++ b/drivers/net/ice/ice_ethdev.c
> > > @@ -1804,6 +1804,7 @@ ice_pf_setup(struct ice_pf *pf)
> > > }
> > >
> > > pf->main_vsi = vsi;
> > > +   rte_spinlock_init(&pf->link_lock);
> > >
> > > return 0;
> > >  }
> > > @@ -3621,17 +3622,31 @@ ice_rxq_intr_setup(struct rte_eth_dev *dev)
> > > return 0;
> > >  }
> > >
> > > +static enum ice_status
> > > +ice_get_link_info_safe(struct ice_pf *pf, bool ena_lse,
> > > +  struct ice_link_status *link) {
> > > +   struct ice_hw *hw = ICE_PF_TO_HW(pf);
> > > +   int ret;
> > > +
> > > +   rte_spinlock_lock(&pf->link_lock);
> > > +
> > > +   ret = ice_aq_get_link_info(hw->port_info, ena_lse, link, NULL);
> > > +
> > > +   rte_spinlock_unlock(&pf->link_lock);
> > > +
> > > +   return ret;
> > > +}
> > > +
> > >  static void
> > >  ice_get_init_link_status(struct rte_eth_dev *dev)  {
> > > -   struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data-
> > > >dev_private);
> > > struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data-
> > > >dev_private);
> > > bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
> > > struct ice_link_status link_status;
> > > int ret;
> > >
> > > -   ret = ice_aq_get_link_info(hw->port_info, enable_lse,
> > > -  &link_status, NULL);
> > > +   ret = ice_get_link_info_safe(pf, enable_lse, &link_status);
> > > if (ret != ICE_SUCCESS) {
> > > PMD_DRV_LOG(ERR, "Failed to get link info");
> > > pf->init_link_up = false;
> > > @@ -3996,7 +4011,7 @@ ice_link_update(struct rte_eth_dev *dev, int
> > > wait_to_complete)  {  #define CHECK_INTERVAL 50  /* 50ms */  #define
> > > MAX_REPEAT_TIME 40  /* 2s (40 * 50ms) in total */
> > > -   struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data-
> > > >dev_private);
> > > +   struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data-
> > > >dev_private);
> > > struct ice_link_status link_status;
> > > struct rte_eth_link link, old;
> > > int status;
> > > @@ -4010,8 +4025,7 @@ ice_link_update(struct rte_eth_dev *dev, int
> > > wait_to_complete)
> > >
> > > do {
> > > /* Get link status information from hardware */
> > > -   status = ice_aq_get_link_info(hw->port_info, enable_lse,
> > > - &link_status, NULL);
> > > +   status = ice_get_link_info_safe(pf, enable_lse,
> &link_status);
> > > if (status != ICE_SUCCESS) {
> > > link.link_speed = RTE_ETH_SPEED_NUM_100M;
> > > link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX; diff -
> > -git
> > > a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h index
> > > abe6dcdc23..d607f028e0 100644
> > > --- a/drivers/net/ice/ice_ethdev.h
> > > +++ b/drivers/net/ice/ice_ethdev.h
> > > @@ -548,6 +548,10 @@ struct ice_pf {
> > > uint64_t rss_hf;
> > > struct ice_tm_conf tm_conf;
> > > uint16_t outer_ethertype;
> > > +   /* lock prevent race condition between lsc interrupt handler
> > > +* and link status update during dev_start.
> > > +*/
> > > +   rte_spinlock_t link_lock;
> > >  

RE: [EXT] [PATCH] app/test-crypto-perf: fix invalid mbuf next operation

2024-01-03 Thread Suanming Mou



> -Original Message-
> From: Anoob Joseph 
> Sent: Wednesday, January 3, 2024 11:43 PM
> To: Suanming Mou 
> Cc: dev@dpdk.org; Ciara Power 
> Subject: RE: [EXT] [PATCH] app/test-crypto-perf: fix invalid mbuf next 
> operation
> 
> Hi Suanming,
> 
> Please see inline.
> 
> Thanks,
> Anoob
> 
> > -Original Message-
> > From: Suanming Mou 
> > Sent: Wednesday, January 3, 2024 6:06 PM
> > To: Anoob Joseph 
> > Cc: dev@dpdk.org; Ciara Power 
> > Subject: RE: [EXT] [PATCH] app/test-crypto-perf: fix invalid mbuf next
> > operation
> >
> > Hi,
> >
> > > -Original Message-
> > > From: Anoob Joseph 
> > > Sent: Wednesday, January 3, 2024 7:22 PM
> > > To: Suanming Mou 
> > > Cc: dev@dpdk.org; Ciara Power 
> > > Subject: RE: [EXT] [PATCH] app/test-crypto-perf: fix invalid mbuf
> > > next operation
> > >
> > > Hi Suanming,
> > >
> > > Good catch. Please see inline.
> > >
> > > Thanks,
> > > Anoob
> > >
> > > > -Original Message-
> > > > From: Suanming Mou 
> > > > Sent: Wednesday, January 3, 2024 9:24 AM
> > > > To: Ciara Power 
> > > > Cc: dev@dpdk.org
> > > > Subject: [EXT] [PATCH] app/test-crypto-perf: fix invalid mbuf next
> > > > operation
> > > >
> > > > External Email
> > > >
> > > > --
> > > > --
> > > > -- In fill_multi_seg_mbuf(), when remaining_segments is 0,
> > > > rte_mbuf m's next should pointer to NULL instead of a new
> > > > rte_mbuf, that casues setting m->next as NULL out of the while
> > > > loop to the invalid mbuf.
> > > >
> > > > This commit fixes the invalid mbuf next operation.
> > > >
> > > > Fixes: bf9d6702eca9 ("app/crypto-perf: use single mempool")
> > > >
> > > > Signed-off-by: Suanming Mou 
> > > > ---
> > > >  app/test-crypto-perf/cperf_test_common.c | 12 +++-
> > > >  1 file changed, 7 insertions(+), 5 deletions(-)
> > > >
> > > > diff --git a/app/test-crypto-perf/cperf_test_common.c
> > > > b/app/test-crypto- perf/cperf_test_common.c index
> > > > 932aab16df..ad2076dd2e 100644
> > > > --- a/app/test-crypto-perf/cperf_test_common.c
> > > > +++ b/app/test-crypto-perf/cperf_test_common.c
> > > > @@ -72,13 +72,15 @@ fill_multi_seg_mbuf(struct rte_mbuf *m, struct
> > > > rte_mempool *mp,
> > > > rte_mbuf_refcnt_set(m, 1);
> > > > next_mbuf = (struct rte_mbuf *) ((uint8_t *) m +
> > > > mbuf_hdr_size + segment_sz);
> > > > -   m->next = next_mbuf;
> > > > -   m = next_mbuf;
> > > > -   remaining_segments--;
> > > >
> > > > +   remaining_segments--;
> > > > +   if (remaining_segments > 0) {
> > >
> > > [Anoob] Would it make sense to move assignment of next_mbuf also to here?
> > > That way, the checks will become self explanatory.
> > >   next_mbuf = (struct rte_mbuf *) ((uint8_t *) m +
> > >   mbuf_hdr_size + segment_sz);
> > >
> >
> > Make sense. Maybe just like that:
> > m->next = (struct rte_mbuf *) ((uint8_t *) m +
> > mbuf_hdr_size + segment_sz);
> > m = m->next;
> >
> > What do you think?
> 
> [Anoob] Yes. That's even better.
> 
> I think we can have line lengths upto 100 characters now. In case you find it
> easier to put in single line.

OK, thanks for the suggestion.

> 
> >
> > > > +   m->next = next_mbuf;
> > > > +   m = next_mbuf;
> > > > +   } else {
> > > > +   m->next = NULL;
> > > > +   }
> > > > } while (remaining_segments > 0);
> > > > -
> > > > -   m->next = NULL;
> > > >  }
> > > >
> > > >  static void
> > > > --
> > > > 2.34.1



[PATCH v2] app/test-crypto-perf: fix invalid mbuf next operation

2024-01-03 Thread Suanming Mou
In fill_multi_seg_mbuf(), when remaining_segments is 0,
rte_mbuf m's next should pointer to NULL instead of a
new rte_mbuf, that causes setting m->next as NULL out
of the while loop to the invalid mbuf.

This commit fixes the invalid mbuf next operation.

Fixes: bf9d6702eca9 ("app/crypto-perf: use single mempool")

Signed-off-by: Suanming Mou 
---

v2: move next_mbuf inside remaining_segments check.

---
 app/test-crypto-perf/cperf_test_common.c | 15 +++
 1 file changed, 7 insertions(+), 8 deletions(-)

diff --git a/app/test-crypto-perf/cperf_test_common.c 
b/app/test-crypto-perf/cperf_test_common.c
index 932aab16df..b3bf9f67e8 100644
--- a/app/test-crypto-perf/cperf_test_common.c
+++ b/app/test-crypto-perf/cperf_test_common.c
@@ -49,7 +49,6 @@ fill_multi_seg_mbuf(struct rte_mbuf *m, struct rte_mempool 
*mp,
 {
uint16_t mbuf_hdr_size = sizeof(struct rte_mbuf);
uint16_t remaining_segments = segments_nb;
-   struct rte_mbuf *next_mbuf;
rte_iova_t next_seg_phys_addr = rte_mempool_virt2iova(obj) +
 mbuf_offset + mbuf_hdr_size;
 
@@ -70,15 +69,15 @@ fill_multi_seg_mbuf(struct rte_mbuf *m, struct rte_mempool 
*mp,
m->nb_segs = segments_nb;
m->port = 0xff;
rte_mbuf_refcnt_set(m, 1);
-   next_mbuf = (struct rte_mbuf *) ((uint8_t *) m +
-   mbuf_hdr_size + segment_sz);
-   m->next = next_mbuf;
-   m = next_mbuf;
-   remaining_segments--;
 
+   remaining_segments--;
+   if (remaining_segments > 0) {
+   m->next = (struct rte_mbuf *)((uint8_t *) m + 
mbuf_hdr_size + segment_sz);
+   m = m->next;
+   } else {
+   m->next = NULL;
+   }
} while (remaining_segments > 0);
-
-   m->next = NULL;
 }
 
 static void
-- 
2.34.1



RE: [PATCH 0/6] net/ice improve qos

2024-01-03 Thread Wu, Wenjun1
> -Original Message-
> From: Zhang, Qi Z 
> Sent: Wednesday, January 3, 2024 3:42 AM
> To: Yang, Qiming ; Wu, Wenjun1
> 
> Cc: dev@dpdk.org; Zhang, Qi Z 
> Subject: [PATCH 0/6] net/ice improve qos
> 
> The patchset enhanced ice rte_tm implemenations
> 
> Qi Zhang (6):
>   net/ice: remove redundent code
>   net/ice: support VSI level bandwidth config
>   net/ice: support queue group weight configure
>   net/ice: refactor hardware Tx sched node config
>   net/ice: reset Tx sched node during commit
>   net/ice: support Tx sched commit before dev_start
> 
>  drivers/net/ice/base/ice_sched.c |   4 +-
>  drivers/net/ice/base/ice_sched.h |   7 +-
>  drivers/net/ice/ice_ethdev.c |   9 +
>  drivers/net/ice/ice_ethdev.h |   5 +
>  drivers/net/ice/ice_tm.c | 361 +--
>  5 files changed, 269 insertions(+), 117 deletions(-)
> 
> --
> 2.31.1

Acked-by: Wenjun Wu 


RE: [PATCH v2] net/ice: fix link update

2024-01-03 Thread Zhang, Qi Z
Thanks for raising this.
Agree with your finding, the current implementation ensures data integrity but 
not guarantee 100% correctness, as updates to link status by interrupts could 
be missed in certain cases though less happen.
Please raise a Bugzilla if you think its urgent for your usage and welcome to 
submit a patch directly.

Regards
Qi

From: Shuang Han 
Sent: Thursday, January 4, 2024 9:17 AM
To: Zhang, Qi Z 
Cc: Yang, Qiming ; dev@dpdk.org; sta...@dpdk.org
Subject: Re: [PATCH v2] net/ice: fix link update

ice_atomic_write_link_status in ice_link_update function is not protected by 
the spinlock, there maybe a situation:
1.dev_start call ice_link_update with wait_to_complete = 1,and get link_status 
= 0
2.LSC interrupt handler call ice_link_update and get link_status = 1
3.LSC interrupt handler call ice_atomic_write_link_status update link status to 
1
4.dev_start call ice_atomic_write_link_status update link status to 0
So I think ice_atomic_write_link_status must be protected by spinlock

Zhang, Qi Z mailto:qi.z.zh...@intel.com>> 于2023年12月21日周四 
10:44写道:


> -Original Message-
> From: Yang, Qiming mailto:qiming.y...@intel.com>>
> Sent: Thursday, December 21, 2023 9:44 AM
> To: Zhang, Qi Z mailto:qi.z.zh...@intel.com>>
> Cc: dev@dpdk.org; sta...@dpdk.org
> Subject: RE: [PATCH v2] net/ice: fix link update
>
> hi
>
> > -Original Message-
> > From: Zhang, Qi Z mailto:qi.z.zh...@intel.com>>
> > Sent: Thursday, December 14, 2023 4:41 PM
> > To: Yang, Qiming mailto:qiming.y...@intel.com>>
> > Cc: dev@dpdk.org; Zhang, Qi Z 
> > mailto:qi.z.zh...@intel.com>>; 
> > sta...@dpdk.org
> > Subject: [PATCH v2] net/ice: fix link update
> >
> > The ice_aq_get_link_info function is not thread-safe. However, it is
> > possible to simultaneous invocations during both the dev_start and the
> > LSC interrupt handler, potentially leading to unexpected adminq
> > errors. This patch addresses the issue by introducing a thread-safe
> > wrapper that utilizes a spinlock.
> >
> > Fixes: cf911d90e366 ("net/ice: support link update")
> > Cc: sta...@dpdk.org
> >
> > Signed-off-by: Qi Zhang mailto:qi.z.zh...@intel.com>>
> > ---
> > v2:
> > - fix coding style warning.
> >
> >  drivers/net/ice/ice_ethdev.c | 26 --
> > drivers/net/ice/ice_ethdev.h |  4 
> >  2 files changed, 24 insertions(+), 6 deletions(-)
> >
> > diff --git a/drivers/net/ice/ice_ethdev.c
> > b/drivers/net/ice/ice_ethdev.c index 3ccba4db80..1f8ab5158a 100644
> > --- a/drivers/net/ice/ice_ethdev.c
> > +++ b/drivers/net/ice/ice_ethdev.c
> > @@ -1804,6 +1804,7 @@ ice_pf_setup(struct ice_pf *pf)
> > }
> >
> > pf->main_vsi = vsi;
> > +   rte_spinlock_init(&pf->link_lock);
> >
> > return 0;
> >  }
> > @@ -3621,17 +3622,31 @@ ice_rxq_intr_setup(struct rte_eth_dev *dev)
> > return 0;
> >  }
> >
> > +static enum ice_status
> > +ice_get_link_info_safe(struct ice_pf *pf, bool ena_lse,
> > +  struct ice_link_status *link) {
> > +   struct ice_hw *hw = ICE_PF_TO_HW(pf);
> > +   int ret;
> > +
> > +   rte_spinlock_lock(&pf->link_lock);
> > +
> > +   ret = ice_aq_get_link_info(hw->port_info, ena_lse, link, NULL);
> > +
> > +   rte_spinlock_unlock(&pf->link_lock);
> > +
> > +   return ret;
> > +}
> > +
> >  static void
> >  ice_get_init_link_status(struct rte_eth_dev *dev)  {
> > -   struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data-
> > >dev_private);
> > struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data-
> > >dev_private);
> > bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
> > struct ice_link_status link_status;
> > int ret;
> >
> > -   ret = ice_aq_get_link_info(hw->port_info, enable_lse,
> > -  &link_status, NULL);
> > +   ret = ice_get_link_info_safe(pf, enable_lse, &link_status);
> > if (ret != ICE_SUCCESS) {
> > PMD_DRV_LOG(ERR, "Failed to get link info");
> > pf->init_link_up = false;
> > @@ -3996,7 +4011,7 @@ ice_link_update(struct rte_eth_dev *dev, int
> > wait_to_complete)  {  #define CHECK_INTERVAL 50  /* 50ms */  #define
> > MAX_REPEAT_TIME 40  /* 2s (40 * 50ms) in total */
> > -   struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data-
> > >dev_private);
> > +   struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data-
> > >dev_private);
> > struct ice_link_status link_status;
> > struct rte_eth_link link, old;
> > int status;
> > @@ -4010,8 +4025,7 @@ ice_link_update(struct rte_eth_dev *dev, int
> > wait_to_complete)
> >
> > do {
> > /* Get link status information from hardware */
> > -   status = ice_aq_get_link_info(hw->port_info, enable_lse,
> > - &link_status, NULL);
> > +   status = ice_get_link_info_safe(pf, enable_lse, &link_status);
> > if (status != ICE_SUCCESS) {
> >

RE: [PATCH] net/e1000: support launchtime feature

2024-01-03 Thread Su, Simei
Hi Chuanyu,

> -Original Message-
> From: Chuanyu Xue 
> Sent: Thursday, January 4, 2024 5:52 AM
> To: Su, Simei 
> Cc: Xing, Beilei ; chuanyu@uconn.edu;
> dev@dpdk.org; Zhang, Qi Z ; Lu, Wenzhuo
> 
> Subject: RE: [PATCH] net/e1000: support launchtime feature
> 
> Hi, Simei
> 
> Thank you for your guidance on how to test this feature.
> 
> >> Following is how I try to test with testpmd. Please let me know if I
> >> did something wrong.
> >>
> >>   sudo ./dpdk-testpmd -- -i --forward-mode=txonly
> >>
> >>   testpmd> port stop 0
> >>   testpmd> set burst 1
> >>   testpmd> set txtimes 1,0
> >>   testpmd> port config 0 tx_offload send_on_timestamp on
> >>   testpmd> port start 0
> >>   testpmd> start
> >
> >When testing launch time feature with igc driver, firstly, some code
> >change made in txonly.c:
> >pkt->ol_flags |= RTE_MBUF_F_TX_IEEE1588_TMST; (this flag should be
> >pkt->added to
> >forward PTP packet with hardware Tx timestamp)
> >
> ># ./build/app/dpdk-testpmd -a :81:00.0 -c f -n 4 -- -i
> >--tx-offloads=0x20
> >testpmd> set burst 1
> >testpmd> set fwd txonly
> >testpmd> set txtimes 100,0
> >testpmd> start
> >
> >On receiver side (with tcpdump):
> ># tcpdump -Q in -ttt -ni ens25f3 --time-stamp-precision=nano -j
> >adapter_unsynced -c 32
> 
> Now dpdk-testpmd works well with this patch after I add the flag in txonly.c 
> as
> you mentioned.

OK, good.

> 
> It is worth noting that I also added `rte_eth_timesync_enable(pi);` in the
> function `tx_only_begin` in txonly.c to enable the PTP clock. Otherwise, all 
> Tx
> packets scheduled are dropped.

Yes, got it.

> 
> Following are the measurement results on the listener. I use the same
> configuration as you mentioned for dpdk-testpmd on the talker.
> 
> ➜  ~ sudo tcpdump -Q in -ttt -ni enp1s0 --time-stamp-precision=nano -j
> adapter_unsynced -c 32
> 
> tcpdump: verbose output suppressed, use -v[v]... for full protocol decode
> listening on enp1s0, link-type EN10MB (Ethernet), snapshot length
> 262144 bytes
> 
> 
>  00:00:00.0 IP 198.18.0.1.9 > 198.18.0.2.9: UDP, length 22
>  00:00:00.00108 IP 198.18.0.1.9 > 198.18.0.2.9: UDP, length 22
>  00:00:00.00100 IP 198.18.0.1.9 > 198.18.0.2.9: UDP, length 22
>  00:00:00.00100 IP 198.18.0.1.9 > 198.18.0.2.9: UDP, length 22
>  00:00:00.00100 IP 198.18.0.1.9 > 198.18.0.2.9: UDP, length 22
>  
>  00:00:00.00100 IP 198.18.0.1.9 > 198.18.0.2.9: UDP, length 22
>  00:00:00.00100 IP 198.18.0.1.9 > 198.18.0.2.9: UDP, length 22
>  00:00:00.00100 IP 198.18.0.1.9 > 198.18.0.2.9: UDP, length 22
>  00:00:00.00108 IP 198.18.0.1.9 > 198.18.0.2.9: UDP, length 22
>  00:00:00.00100 IP 198.18.0.1.9 > 198.18.0.2.9: UDP, length 22
> 32 packets captured
> 118 packets received by filter
> 0 packets dropped by kernel
> 
> Above test is based on the patch v2 with Intel i210 NIC.

OK. I will review v2 patch.

Thanks,
Simei 

> 
> - Chuanyu


RE: [PATCH 0/6] net/ice improve qos

2024-01-03 Thread Zhang, Qi Z



> -Original Message-
> From: Wu, Wenjun1 
> Sent: Thursday, January 4, 2024 10:29 AM
> To: Zhang, Qi Z ; Yang, Qiming
> 
> Cc: dev@dpdk.org
> Subject: RE: [PATCH 0/6] net/ice improve qos
> 
> > -Original Message-
> > From: Zhang, Qi Z 
> > Sent: Wednesday, January 3, 2024 3:42 AM
> > To: Yang, Qiming ; Wu, Wenjun1
> > 
> > Cc: dev@dpdk.org; Zhang, Qi Z 
> > Subject: [PATCH 0/6] net/ice improve qos
> >
> > The patchset enhanced ice rte_tm implemenations
> >
> > Qi Zhang (6):
> >   net/ice: remove redundent code
> >   net/ice: support VSI level bandwidth config
> >   net/ice: support queue group weight configure
> >   net/ice: refactor hardware Tx sched node config
> >   net/ice: reset Tx sched node during commit
> >   net/ice: support Tx sched commit before dev_start
> >
> >  drivers/net/ice/base/ice_sched.c |   4 +-
> >  drivers/net/ice/base/ice_sched.h |   7 +-
> >  drivers/net/ice/ice_ethdev.c |   9 +
> >  drivers/net/ice/ice_ethdev.h |   5 +
> >  drivers/net/ice/ice_tm.c | 361 +--
> >  5 files changed, 269 insertions(+), 117 deletions(-)
> >
> > --
> > 2.31.1
> 
> Acked-by: Wenjun Wu 

Applied to dpdk-next-net-intel after fix the CI typo warning in PATCH 1/6.

Thanks
Qi


RE: [PATCH v2] net/e1000: support launchtime feature

2024-01-03 Thread Su, Simei


> -Original Message-
> From: Chuanyu Xue 
> Sent: Sunday, December 31, 2023 12:35 AM
> To: Su, Simei ; Lu, Wenzhuo ;
> Zhang, Qi Z ; Xing, Beilei 
> Cc: dev@dpdk.org; Chuanyu Xue 
> Subject: [PATCH v2] net/e1000: support launchtime feature
> 
> Enable the time-based scheduled Tx of packets based on the
> RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP flag. The launchtime defines
> the packet transmission time based on PTP clock at MAC layer, which should
> be set to the advanced transmit descriptor.
> 
> Signed-off-by: Chuanyu Xue 
> ---
> change log:
> 
> v2:
> - Add delay compensation for i210 NIC by setting tx offset register.
> - Revise read_clock function.
> 
>  drivers/net/e1000/base/e1000_regs.h |  1 +
>  drivers/net/e1000/e1000_ethdev.h| 14 +++
>  drivers/net/e1000/igb_ethdev.c  | 63
> -
>  drivers/net/e1000/igb_rxtx.c| 42 +++
>  4 files changed, 112 insertions(+), 8 deletions(-)
> 
> diff --git a/drivers/net/e1000/base/e1000_regs.h
> b/drivers/net/e1000/base/e1000_regs.h
> index d44de59c29..092d9d71e6 100644
> --- a/drivers/net/e1000/base/e1000_regs.h
> +++ b/drivers/net/e1000/base/e1000_regs.h
> @@ -162,6 +162,7 @@
> 
>  /* QAV Tx mode control register */
>  #define E1000_I210_TQAVCTRL  0x3570
> +#define E1000_I210_LAUNCH_OS0 0x3578
> 
>  /* QAV Tx mode control register bitfields masks */
>  /* QAV enable */
> diff --git a/drivers/net/e1000/e1000_ethdev.h
> b/drivers/net/e1000/e1000_ethdev.h
> index 718a9746ed..339ae1f4b6 100644
> --- a/drivers/net/e1000/e1000_ethdev.h
> +++ b/drivers/net/e1000/e1000_ethdev.h
> @@ -382,6 +382,20 @@ extern struct igb_rss_filter_list igb_filter_rss_list;
> TAILQ_HEAD(igb_flow_mem_list, igb_flow_mem);  extern struct
> igb_flow_mem_list igb_flow_list;
> 
> +/*
> + * Macros to compensate the constant latency observed in i210 for
> +launch time
> + *
> + * launch time = (offset_speed - offset_base + txtime) * 32
> + * offset_speed is speed dependent, set in E1000_I210_LAUNCH_OS0  */
> +#define IGB_I210_TX_OFFSET_BASE  0xffe0
> +#define IGB_I210_TX_OFFSET_SPEED_10  0xc7a0
> +#define IGB_I210_TX_OFFSET_SPEED_100 0x86e0
> +#define IGB_I210_TX_OFFSET_SPEED_10000xbe00
> +
> +extern uint64_t igb_tx_timestamp_dynflag; extern int
> +igb_tx_timestamp_dynfield_offset;
> +
>  extern const struct rte_flow_ops igb_flow_ops;
> 
>  /*
> diff --git a/drivers/net/e1000/igb_ethdev.c b/drivers/net/e1000/igb_ethdev.c
> index 8858f975f8..2262035710 100644
> --- a/drivers/net/e1000/igb_ethdev.c
> +++ b/drivers/net/e1000/igb_ethdev.c
> @@ -223,6 +223,7 @@ static int igb_timesync_read_time(struct rte_eth_dev
> *dev,
> struct timespec *timestamp);
>  static int igb_timesync_write_time(struct rte_eth_dev *dev,
>  const struct timespec *timestamp);
> +static int eth_igb_read_clock(struct rte_eth_dev *dev, uint64_t
> +*clock);
>  static int eth_igb_rx_queue_intr_enable(struct rte_eth_dev *dev,
>   uint16_t queue_id);
>  static int eth_igb_rx_queue_intr_disable(struct rte_eth_dev *dev, @@ -313,6
> +314,9 @@ static const struct rte_pci_id pci_id_igbvf_map[] = {
>   { .vendor_id = 0, /* sentinel */ },
>  };
> 
> +uint64_t igb_tx_timestamp_dynflag;
> +int igb_tx_timestamp_dynfield_offset = -1;
> +
>  static const struct rte_eth_desc_lim rx_desc_lim = {
>   .nb_max = E1000_MAX_RING_DESC,
>   .nb_min = E1000_MIN_RING_DESC,
> @@ -389,6 +393,7 @@ static const struct eth_dev_ops eth_igb_ops = {
>   .timesync_adjust_time = igb_timesync_adjust_time,
>   .timesync_read_time   = igb_timesync_read_time,
>   .timesync_write_time  = igb_timesync_write_time,
> + .read_clock   = eth_igb_read_clock,
>  };
> 
>  /*
> @@ -1188,6 +1193,40 @@ eth_igb_rxtx_control(struct rte_eth_dev *dev,
>   E1000_WRITE_FLUSH(hw);
>  }
> 
> +
> +static uint32_t igb_tx_offset(struct rte_eth_dev *dev) {
> + struct e1000_hw *hw =
> + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> +
> + uint16_t duplex, speed;
> + hw->mac.ops.get_link_up_info(hw, &speed, &duplex);
> +
> + uint32_t launch_os0 = E1000_READ_REG(hw, E1000_I210_LAUNCH_OS0);
> + if (hw->mac.type != e1000_i210) {
> + /* Set launch offset to base, no compensation */
> + launch_os0 |= IGB_I210_TX_OFFSET_BASE;
> + } else {
> + /* Set launch offset depend on link speeds */
> + switch (speed) {
> + case SPEED_10:
> + launch_os0 |= IGB_I210_TX_OFFSET_SPEED_10;
> + break;
> + case SPEED_100:
> + launch_os0 |= IGB_I210_TX_OFFSET_SPEED_100;
> + break;
> + case SPEED_1000:
> + launch_os0 |= IGB_I210_TX_OFFSET_SPEED_1000;
> + break;
> + default:
> +

[PATCH v2] net/i40e: updated 23.11 recommended matching list

2024-01-03 Thread Simei Su
Add suggested DPDK/kernel driver/firmware version matching list.

Signed-off-by: Simei Su 
---
v2:
* Add commit log.

 doc/guides/nics/i40e.rst | 4 
 1 file changed, 4 insertions(+)

diff --git a/doc/guides/nics/i40e.rst b/doc/guides/nics/i40e.rst
index 3432eab..15689ac 100644
--- a/doc/guides/nics/i40e.rst
+++ b/doc/guides/nics/i40e.rst
@@ -104,6 +104,8 @@ For X710/XL710/XXV710,
+--+---+--+
| DPDK version | Kernel driver version | Firmware version |
+==+===+==+
+   |23.11 | 2.23.17   |   9.30   |
+   +--+---+--+
|23.07 | 2.22.20   |   9.20   |
+--+---+--+
|23.03 | 2.22.18   |   9.20   |
@@ -167,6 +169,8 @@ For X722,
+--+---+--+
| DPDK version | Kernel driver version | Firmware version |
+==+===+==+
+   |23.11 | 2.23.17   |   6.20   |
+   +--+---+--+
|23.07 | 2.22.20   |   6.20   |
+--+---+--+
|23.03 | 2.22.18   |   6.20   |
-- 
2.9.5



RE: [PATCH v2] net/i40e: updated 23.11 recommended matching list

2024-01-03 Thread Xing, Beilei



> -Original Message-
> From: Su, Simei 
> Sent: Thursday, January 4, 2024 11:15 AM
> To: Xing, Beilei ; Zhang, Qi Z 
> Cc: dev@dpdk.org; Yang, Qiming ; Su, Simei
> 
> Subject: [PATCH v2] net/i40e: updated 23.11 recommended matching list
> 
> Add suggested DPDK/kernel driver/firmware version matching list.
> 
> Signed-off-by: Simei Su 
> ---
> v2:
> * Add commit log.
> 
>  doc/guides/nics/i40e.rst | 4 
>  1 file changed, 4 insertions(+)
> 
> diff --git a/doc/guides/nics/i40e.rst b/doc/guides/nics/i40e.rst index
> 3432eab..15689ac 100644
> --- a/doc/guides/nics/i40e.rst
> +++ b/doc/guides/nics/i40e.rst
> @@ -104,6 +104,8 @@ For X710/XL710/XXV710,
> +--+---+--+
> | DPDK version | Kernel driver version | Firmware version |
> +==+===+==+
> +   |23.11 | 2.23.17   |   9.30   |
> +   +--+---+--+
> |23.07 | 2.22.20   |   9.20   |
> +--+---+--+
> |23.03 | 2.22.18   |   9.20   |
> @@ -167,6 +169,8 @@ For X722,
> +--+---+--+
> | DPDK version | Kernel driver version | Firmware version |
> +==+===+==+
> +   |23.11 | 2.23.17   |   6.20   |
> +   +--+---+--+
> |23.07 | 2.22.20   |   6.20   |
> +--+---+--+
> |23.03 | 2.22.18   |   6.20   |
> --
> 2.9.5


Acked-by: Beilei Xing 


RE: [EXT] [PATCH v2] app/test-crypto-perf: fix invalid mbuf next operation

2024-01-03 Thread Anoob Joseph
> In fill_multi_seg_mbuf(), when remaining_segments is 0, rte_mbuf m's next
> should pointer to NULL instead of a new rte_mbuf, that causes setting m->next
> as NULL out of the while loop to the invalid mbuf.
> 
> This commit fixes the invalid mbuf next operation.
> 
> Fixes: bf9d6702eca9 ("app/crypto-perf: use single mempool")
> 
> Signed-off-by: Suanming Mou 

Acked-by: Anoob Joseph 




RE: [EXT] [PATCH 2/2] app/test-crypto-perf: fix encrypt operation verify

2024-01-03 Thread Anoob Joseph
Hi Suanming,

Please see inline.

Thanks,
Anoob

> -Original Message-
> From: Suanming Mou 
> Sent: Wednesday, January 3, 2024 9:26 AM
> To: Ciara Power 
> Cc: dev@dpdk.org
> Subject: [EXT] [PATCH 2/2] app/test-crypto-perf: fix encrypt operation verify
> 
> External Email
> 
> --
> AEAD users RTE_CRYPTO_AEAD_OP_* with aead_op and CIPHER uses
[Anoob] users -> uses

> RTE_CRYPTO_CIPHER_OP_* with cipher_op in current code.
> 
> This commit aligns aead_op and cipher_op operation to fix incorrect AEAD
> verification.
> 
> Fixes: df52cb3b6e13 ("app/crypto-perf: move verify as single test type")
> 
> Signed-off-by: Suanming Mou 
> ---
>  app/test-crypto-perf/cperf_test_verify.c | 9 +++--
>  1 file changed, 7 insertions(+), 2 deletions(-)
> 
> diff --git a/app/test-crypto-perf/cperf_test_verify.c b/app/test-crypto-
> perf/cperf_test_verify.c
> index 8aa714b969..525a2b1373 100644
> --- a/app/test-crypto-perf/cperf_test_verify.c
> +++ b/app/test-crypto-perf/cperf_test_verify.c
> @@ -113,6 +113,7 @@ cperf_verify_op(struct rte_crypto_op *op,
>   uint8_t *data;
>   uint32_t cipher_offset, auth_offset;
>   uint8_t cipher, auth;
> + bool is_encrypt = false;
>   int res = 0;
> 
>   if (op->status != RTE_CRYPTO_OP_STATUS_SUCCESS) @@ -154,12
> +155,14 @@ cperf_verify_op(struct rte_crypto_op *op,
>   cipher_offset = 0;
>   auth = 0;
>   auth_offset = 0;
> + is_encrypt = options->cipher_op ==
> RTE_CRYPTO_CIPHER_OP_ENCRYPT;
>   break;
>   case CPERF_CIPHER_THEN_AUTH:
>   cipher = 1;
>   cipher_offset = 0;
>   auth = 1;
>   auth_offset = options->test_buffer_size;
> + is_encrypt = options->cipher_op ==
> RTE_CRYPTO_CIPHER_OP_ENCRYPT;
>   break;
>   case CPERF_AUTH_ONLY:
>   cipher = 0;
> @@ -172,12 +175,14 @@ cperf_verify_op(struct rte_crypto_op *op,
>   cipher_offset = 0;
>   auth = 1;
>   auth_offset = options->test_buffer_size;
> + is_encrypt = options->cipher_op ==
> RTE_CRYPTO_CIPHER_OP_ENCRYPT;
>   break;
>   case CPERF_AEAD:
>   cipher = 1;
>   cipher_offset = 0;
> - auth = 1;
> + auth = options->aead_op == RTE_CRYPTO_AEAD_OP_ENCRYPT;
>   auth_offset = options->test_buffer_size;
> + is_encrypt = !!auth;
>   break;
>   default:
>   res = 1;
> @@ -185,7 +190,7 @@ cperf_verify_op(struct rte_crypto_op *op,
>   }
> 
>   if (cipher == 1) {
> - if (options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
> + if (is_encrypt)

[Anoob] A similar check is there under 'auth == 1' check, right? Won't that 
also need fixing?

if (auth == 1) {
if (options->auth_op == RTE_CRYPTO_AUTH_OP_GENERATE)

I think some renaming of the local variables might make code better.
bool cipher, digest_verify = false, is_encrypt = false;

case CPERF_CIPHER_THEN_AUTH:
cipher = true;
cipher_offset = 0;
if (options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
is_encrypt = true;
digest_verify = true; /* Assumption - options->auth_op 
== RTE_CRYPTO_AUTH_OP_GENERATE is verified elsewhere */
auth_offset = options->test_buffer_size;
}
break;
<...>
case CPERF_AEAD:
cipher = true;
cipher_offset = 0;
 if (options->aead_op == 
RTE_CRYPTO_AEAD_OP_ENCRYPT) {
is_encrypt = true;
digest_verify = true;
auth_offset = options->test_buffer_size;
}

What do you think?

>   res += !!memcmp(data + cipher_offset,
>   vector->ciphertext.data,
>   options->test_buffer_size);
> --
> 2.34.1



RE: [PATCH v2] net/i40e: updated 23.11 recommended matching list

2024-01-03 Thread Zhang, Qi Z



> -Original Message-
> From: Xing, Beilei 
> Sent: Thursday, January 4, 2024 11:30 AM
> To: Su, Simei ; Zhang, Qi Z 
> Cc: dev@dpdk.org; Yang, Qiming 
> Subject: RE: [PATCH v2] net/i40e: updated 23.11 recommended matching list
> 
> 
> 
> > -Original Message-
> > From: Su, Simei 
> > Sent: Thursday, January 4, 2024 11:15 AM
> > To: Xing, Beilei ; Zhang, Qi Z
> > 
> > Cc: dev@dpdk.org; Yang, Qiming ; Su, Simei
> > 
> > Subject: [PATCH v2] net/i40e: updated 23.11 recommended matching list
> >
> > Add suggested DPDK/kernel driver/firmware version matching list.
> >
> > Signed-off-by: Simei Su 
> > ---
> > v2:
> > * Add commit log.
> >
> >  doc/guides/nics/i40e.rst | 4 
> >  1 file changed, 4 insertions(+)
> >
> > diff --git a/doc/guides/nics/i40e.rst b/doc/guides/nics/i40e.rst index
> > 3432eab..15689ac 100644
> > --- a/doc/guides/nics/i40e.rst
> > +++ b/doc/guides/nics/i40e.rst
> > @@ -104,6 +104,8 @@ For X710/XL710/XXV710,
> > +--+---+--+
> > | DPDK version | Kernel driver version | Firmware version |
> > +==+===+==+
> > +   |23.11 | 2.23.17   |   9.30   |
> > +   +--+---+--+
> > |23.07 | 2.22.20   |   9.20   |
> > +--+---+--+
> > |23.03 | 2.22.18   |   9.20   |
> > @@ -167,6 +169,8 @@ For X722,
> > +--+---+--+
> > | DPDK version | Kernel driver version | Firmware version |
> > +==+===+==+
> > +   |23.11 | 2.23.17   |   6.20   |
> > +   +--+---+--+
> > |23.07 | 2.22.20   |   6.20   |
> > +--+---+--+
> > |23.03 | 2.22.18   |   6.20   |
> > --
> > 2.9.5
> 
> 
> Acked-by: Beilei Xing 

Applied to dpdk-next-net-intel.

Thanks
Qi


Re: [PATCH v1] app/testpmd: use Tx preparation in txonly engine

2024-01-03 Thread Jerin Jacob
On Wed, Jan 3, 2024 at 7:38 AM Kaiwen Deng  wrote:
>
> Txonly forwarding engine does not call the Tx preparation API
> before transmitting packets. This may cause some problems.
>
> TSO breaks when MSS spans more than 8 data fragments. Those
> packets will be dropped by Tx preparation API, but it will cause
> MDD event if txonly forwarding engine does not call the Tx preparation
> API before transmitting packets.
>
> We can reproduce this issue by these steps list blow on ICE and I40e.
>
> ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -c 0xf -n 4 -- -i
> --tx-offloads=0x8000
>
> testpmd>set txpkts 64,128,256,512,64,128,256,512,512
> testpmd>set burst 1
> testpmd>start tx_first 1
>
> This commit will use Tx preparation API in txonly forwarding engine.
>
> Fixes: 655131ccf727 ("app/testpmd: factorize fwd engines Tx")
> Cc: sta...@dpdk.org
>
> Signed-off-by: Kaiwen Deng 
> ---
>  app/test-pmd/txonly.c | 13 -
>  1 file changed, 12 insertions(+), 1 deletion(-)
>
> diff --git a/app/test-pmd/txonly.c b/app/test-pmd/txonly.c
> index c2b88764be..60d69be3f6 100644
> --- a/app/test-pmd/txonly.c
> +++ b/app/test-pmd/txonly.c
> @@ -339,6 +339,7 @@ pkt_burst_transmit(struct fwd_stream *fs)
> struct rte_ether_hdr eth_hdr;
> uint16_t nb_tx;
> uint16_t nb_pkt;
> +   uint16_t nb_prep;
> uint16_t vlan_tci, vlan_tci_outer;
> uint64_t ol_flags = 0;
> uint64_t tx_offloads;
> @@ -396,7 +397,17 @@ pkt_burst_transmit(struct fwd_stream *fs)
> if (nb_pkt == 0)
> return false;
>
> -   nb_tx = common_fwd_stream_transmit(fs, pkts_burst, nb_pkt);
> +   nb_prep = rte_eth_tx_prepare(fs->tx_port, fs->tx_queue,

More performant version will be
to have two versions of fwd_engine.packet_fwd based on the offload.
And fix up the correct  tx_only_engine.packet_fwd at runtime based on
the offload selected.


RE: 22.11.4 patches review and test

2024-01-03 Thread Xueming(Steven) Li
Hi Ali,

Thanks for the testing and report!

Regards,
Xueming

> -Original Message-
> From: Ali Alnubani 
> Sent: 1/3/2024 22:41
> To: Xueming(Steven) Li ; sta...@dpdk.org
> Cc: dev@dpdk.org; Abhishek Marathe ;
> benjamin.wal...@intel.com; David Christensen ;
> Hemant Agrawal ; Ian Stokes
> ; Jerin Jacob ; John McNamara
> ; Ju-Hyoung Lee ; Kevin
> Traynor ; Luca Boccassi ; Pei Zhang
> ; qian.q...@intel.com; Raslan Darawsheh
> ; NBU-Contact-Thomas Monjalon (EXTERNAL)
> ; Yanghang Liu ;
> yuan.p...@intel.com; zhaoyan.c...@intel.com
> Subject: RE: 22.11.4 patches review and test
> 
> > -Original Message-
> > From: Xueming(Steven) Li 
> > Sent: Wednesday, December 20, 2023 9:19 AM
> > To: sta...@dpdk.org
> > Cc: Xueming(Steven) Li ; dev@dpdk.org; Abhishek
> > Marathe ; Ali Alnubani
> > ; benjamin.wal...@intel.com; David Christensen
> > ; Hemant Agrawal ; Ian
> > Stokes ; Jerin Jacob ; John
> > McNamara ; Ju-Hyoung Lee
> > ; Kevin Traynor ; Luca
> > Boccassi ; Pei Zhang ;
> > qian.q...@intel.com; Raslan Darawsheh ; NBU-
> > Contact-Thomas Monjalon (EXTERNAL) ; Yanghang Liu
> > ; yuan.p...@intel.com; zhaoyan.c...@intel.com
> > Subject: 22.11.4 patches review and test
> >
> > Hi all,
> >
> > Here is a list of patches targeted for stable release 22.11.4.
> >
> > The planned date for the final release is 5th January.
> >
> > Please help with testing and validation of your use cases and report
> > any issues/results with reply-all to this mail. For the final release
> > the fixes and reported validations will be added to the release notes.
> >
> > A release candidate tarball can be found at:
> >
> > https://dpdk.org/browse/dpdk-stable/tag/?id=v22.11.4-rc3
> >
> > These patches are located at branch 22.11 of dpdk-stable repo:
> > https://dpdk.org/browse/dpdk-stable/
> >
> > Thanks.
> 
> Hello,
> 
> We ran the following functional tests with Nvidia hardware on v22.11.4-rc3:
> - Basic functionality:
>   Send and receive multiple types of traffic.
> - testpmd xstats counter test.
> - testpmd timestamp test.
> - Changing/checking link status through testpmd.
> - rte_flow tests (https://doc.dpdk.org/guides/nics/mlx5.html#supported-
> hardware-offloads)
> - RSS tests.
> - VLAN filtering, stripping, and insertion tests.
> - Checksum and TSO tests.
> - ptype tests.
> - link_status_interrupt example application tests.
> - l3fwd-power example application tests.
> - Multi-process example applications tests.
> - Hardware LRO tests.
> - Regex application tests.
> - Buffer Split tests.
> - Tx scheduling tests.
> 
> Functional tests ran on:
> - NIC: ConnectX-6 Dx / OS: Ubuntu 20.04 / Driver: MLNX_OFED_LINUX-23.10-
> 1.1.9.0 / Firmware: 22.39.2048
> - NIC: ConnectX-7 / OS: Ubuntu 20.04 / Driver: MLNX_OFED_LINUX-23.10-
> 1.1.9.0 / Firmware: 28.39.2048
> - DPU: BlueField-2 / DOCA SW version: 2.5.0 / Firmware: 24.39.2048
> 
> Additionally, we ran build tests with multiple configurations on the following
> OS/driver combinations (all passed):
> - Ubuntu 20.04.6 with MLNX_OFED_LINUX-23.10-1.1.9.0.
> - Ubuntu 20.04.6 with rdma-core master (9016f34).
> - Ubuntu 20.04.6 with rdma-core v28.0.
> - Fedora 38 with rdma-core v44.0.
> - Fedora 40 (Rawhide) with rdma-core v48.0.
> - OpenSUSE Leap 15.5 with rdma-core v42.0.
> - Windows Server 2019 with Clang 16.0.6.
> 
> We don't see new issues caused by the changes in this release.
> 
> Thanks,
> Ali