Re: [PATCH 1/4] [v1,1/4] zsda: Introduce zsda device drivers

2024-08-21 Thread li.hanxiao
>This table should be const, and is the same CRC32 as implemented alread> in 
>lib/hash/rte_crc ?

Hi Stepen,

The table and CRC32 as implemented already in lib/hash/rte_crc are 
different.The table is calculated by build_crc8_table in app/test/test_atomic.c

Thanks




李晗晓
IT开发工程师
RCH八部/无线及算力研究院/无线及算力产品经营部
RCH Dept. VIII/Wireless and Computing Product R&D Institute/Wireless and 
Computing Product Operation Division
 
中兴通讯股份有限公司
天津市空港经济区东七道2号中兴产业基地8#楼, 邮编: 300300
T: +86 15531662716  
E: li.hanx...@zte.com.cn
www.zte.com.cn




Original


From: StephenHemminger 
To: 李晗晓10332965;
Cc: dev@dpdk.org ;
Date: 2024年08月08日 23:22
Subject: Re: [PATCH 1/4] [v1,1/4] zsda: Introduce zsda device drivers

On Thu,  8 Aug 2024 16:50:11 +0800
Hanxiao Li  wrote:
 
> diff --git a/drivers/common/zsda/zsda_common.c 
> b/drivers/common/zsda/zsda_common.c
> new file mode 100644
> index 00..8f0849c660
> --- /dev/null
> +++ b/drivers/common/zsda/zsda_common.c
> @@ -0,0 +1,166 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2024 ZTE Corporation
> + */
> +
> +#include "zsda_common.h" 
> +#include "zsda_logs.h" 
> +
> +#include "bus_pci_driver.h" 
> +
> +#define MAGIC_SEND 0xab
> +#define MAGIC_RECV 0xcd
> +#define ADMIN_VER 1
> +
> +static uint8_t crc8_table[256] = {
> +0x00, 0x41, 0x13, 0x52, 0x26, 0x67, 0x35, 0x74, 0x4c, 0x0d, 0x5f, 0x1e,
> +0x6a, 0x2b, 0x79, 0x38, 0x09, 0x48, 0x1a, 0x5b, 0x2f, 0x6e, 0x3c, 0x7d,
> +0x45, 0x04, 0x56, 0x17, 0x63, 0x22, 0x70, 0x31, 0x12, 0x53, 0x01, 0x40,
> +0x34, 0x75, 0x27, 0x66, 0x5e, 0x1f, 0x4d, 0x0c, 0x78, 0x39, 0x6b, 0x2a,
> +0x1b, 0x5a, 0x08, 0x49, 0x3d, 0x7c, 0x2e, 0x6f, 0x57, 0x16, 0x44, 0x05,
> +0x71, 0x30, 0x62, 0x23, 0x24, 0x65, 0x37, 0x76, 0x02, 0x43, 0x11, 0x50,
> +0x68, 0x29, 0x7b, 0x3a, 0x4e, 0x0f, 0x5d, 0x1c, 0x2d, 0x6c, 0x3e, 0x7f,
> +0x0b, 0x4a, 0x18, 0x59, 0x61, 0x20, 0x72, 0x33, 0x47, 0x06, 0x54, 0x15,
> +0x36, 0x77, 0x25, 0x64, 0x10, 0x51, 0x03, 0x42, 0x7a, 0x3b, 0x69, 0x28,
> +0x5c, 0x1d, 0x4f, 0x0e, 0x3f, 0x7e, 0x2c, 0x6d, 0x19, 0x58, 0x0a, 0x4b,
> +0x73, 0x32, 0x60, 0x21, 0x55, 0x14, 0x46, 0x07, 0x48, 0x09, 0x5b, 0x1a,
> +0x6e, 0x2f, 0x7d, 0x3c, 0x04, 0x45, 0x17, 0x56, 0x22, 0x63, 0x31, 0x70,
> +0x41, 0x00, 0x52, 0x13, 0x67, 0x26, 0x74, 0x35, 0x0d, 0x4c, 0x1e, 0x5f,
> +0x2b, 0x6a, 0x38, 0x79, 0x5a, 0x1b, 0x49, 0x08, 0x7c, 0x3d, 0x6f, 0x2e,
> +0x16, 0x57, 0x05, 0x44, 0x30, 0x71, 0x23, 0x62, 0x53, 0x12, 0x40, 0x01,
> +0x75, 0x34, 0x66, 0x27, 0x1f, 0x5e, 0x0c, 0x4d, 0x39, 0x78, 0x2a, 0x6b,
> +0x6c, 0x2d, 0x7f, 0x3e, 0x4a, 0x0b, 0x59, 0x18, 0x20, 0x61, 0x33, 0x72,
> +0x06, 0x47, 0x15, 0x54, 0x65, 0x24, 0x76, 0x37, 0x43, 0x02, 0x50, 0x11,
> +0x29, 0x68, 0x3a, 0x7b, 0x0f, 0x4e, 0x1c, 0x5d, 0x7e, 0x3f, 0x6d, 0x2c,
> +0x58, 0x19, 0x4b, 0x0a, 0x32, 0x73, 0x21, 0x60, 0x14, 0x55, 0x07, 0x46,
> +0x77, 0x36, 0x64, 0x25, 0x51, 0x10, 0x42, 0x03, 0x3b, 0x7a, 0x28, 0x69,
> +0x1d, 0x5c, 0x0e, 0x4f};
> +
 
This table should be const, and is the same CRC32 as implemented alread
in lib/hash/rte_crc ?

[PATCH v2 1/4] zsda: Introduce zsda device drivers

2024-08-21 Thread Hanxiao Li
Introduce driver support for ZSDA which can
help to accelerate storage data process.

Signed-off-by: Hanxiao Li 
---
 MAINTAINERS   |   4 +
 config/rte_config.h   |   4 +
 drivers/common/zsda/zsda_common.c | 168 +++
 drivers/common/zsda/zsda_common.h | 328 ++
 drivers/common/zsda/zsda_logs.c   |  21 ++
 drivers/common/zsda/zsda_logs.h   |  32 +++
 usertools/dpdk-devbind.py |   5 +-
 7 files changed, 561 insertions(+), 1 deletion(-)
 create mode 100644 drivers/common/zsda/zsda_common.c
 create mode 100644 drivers/common/zsda/zsda_common.h
 create mode 100644 drivers/common/zsda/zsda_logs.c
 create mode 100644 drivers/common/zsda/zsda_logs.h

diff --git a/MAINTAINERS b/MAINTAINERS
index c5a703b5c0..ea245fc61b 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1268,6 +1268,10 @@ F: drivers/compress/zlib/
 F: doc/guides/compressdevs/zlib.rst
 F: doc/guides/compressdevs/features/zlib.ini
 
+ZTE Storage Data Accelerator
+M: Hanxiao Li 
+F: drivers/compress/zsda/
+F: drivers/common/zsda/
 
 DMAdev Drivers
 --
diff --git a/config/rte_config.h b/config/rte_config.h
index dd7bb0d35b..acfbe5b0f7 100644
--- a/config/rte_config.h
+++ b/config/rte_config.h
@@ -108,6 +108,10 @@
 
 /** driver defines /
 
+/* ZSDA device */
+/* Max. number of ZSDA devices which can be attached */
+#define RTE_PMD_ZSDA_MAX_PCI_DEVICES 256
+
 /* Packet prefetching in PMDs */
 #define RTE_PMD_PACKET_PREFETCH 1
 
diff --git a/drivers/common/zsda/zsda_common.c 
b/drivers/common/zsda/zsda_common.c
new file mode 100644
index 00..fa6b4411a5
--- /dev/null
+++ b/drivers/common/zsda/zsda_common.c
@@ -0,0 +1,168 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+#include "zsda_common.h"
+
+#include "bus_pci_driver.h"
+
+#define MAGIC_SEND 0xab
+#define MAGIC_RECV 0xcd
+#define ADMIN_VER 1
+
+static const uint8_t crc8_table[256] = {
+   0x00, 0x41, 0x13, 0x52, 0x26, 0x67, 0x35, 0x74, 0x4c, 0x0d, 0x5f, 0x1e,
+   0x6a, 0x2b, 0x79, 0x38, 0x09, 0x48, 0x1a, 0x5b, 0x2f, 0x6e, 0x3c, 0x7d,
+   0x45, 0x04, 0x56, 0x17, 0x63, 0x22, 0x70, 0x31, 0x12, 0x53, 0x01, 0x40,
+   0x34, 0x75, 0x27, 0x66, 0x5e, 0x1f, 0x4d, 0x0c, 0x78, 0x39, 0x6b, 0x2a,
+   0x1b, 0x5a, 0x08, 0x49, 0x3d, 0x7c, 0x2e, 0x6f, 0x57, 0x16, 0x44, 0x05,
+   0x71, 0x30, 0x62, 0x23, 0x24, 0x65, 0x37, 0x76, 0x02, 0x43, 0x11, 0x50,
+   0x68, 0x29, 0x7b, 0x3a, 0x4e, 0x0f, 0x5d, 0x1c, 0x2d, 0x6c, 0x3e, 0x7f,
+   0x0b, 0x4a, 0x18, 0x59, 0x61, 0x20, 0x72, 0x33, 0x47, 0x06, 0x54, 0x15,
+   0x36, 0x77, 0x25, 0x64, 0x10, 0x51, 0x03, 0x42, 0x7a, 0x3b, 0x69, 0x28,
+   0x5c, 0x1d, 0x4f, 0x0e, 0x3f, 0x7e, 0x2c, 0x6d, 0x19, 0x58, 0x0a, 0x4b,
+   0x73, 0x32, 0x60, 0x21, 0x55, 0x14, 0x46, 0x07, 0x48, 0x09, 0x5b, 0x1a,
+   0x6e, 0x2f, 0x7d, 0x3c, 0x04, 0x45, 0x17, 0x56, 0x22, 0x63, 0x31, 0x70,
+   0x41, 0x00, 0x52, 0x13, 0x67, 0x26, 0x74, 0x35, 0x0d, 0x4c, 0x1e, 0x5f,
+   0x2b, 0x6a, 0x38, 0x79, 0x5a, 0x1b, 0x49, 0x08, 0x7c, 0x3d, 0x6f, 0x2e,
+   0x16, 0x57, 0x05, 0x44, 0x30, 0x71, 0x23, 0x62, 0x53, 0x12, 0x40, 0x01,
+   0x75, 0x34, 0x66, 0x27, 0x1f, 0x5e, 0x0c, 0x4d, 0x39, 0x78, 0x2a, 0x6b,
+   0x6c, 0x2d, 0x7f, 0x3e, 0x4a, 0x0b, 0x59, 0x18, 0x20, 0x61, 0x33, 0x72,
+   0x06, 0x47, 0x15, 0x54, 0x65, 0x24, 0x76, 0x37, 0x43, 0x02, 0x50, 0x11,
+   0x29, 0x68, 0x3a, 0x7b, 0x0f, 0x4e, 0x1c, 0x5d, 0x7e, 0x3f, 0x6d, 0x2c,
+   0x58, 0x19, 0x4b, 0x0a, 0x32, 0x73, 0x21, 0x60, 0x14, 0x55, 0x07, 0x46,
+   0x77, 0x36, 0x64, 0x25, 0x51, 0x10, 0x42, 0x03, 0x3b, 0x7a, 0x28, 0x69,
+   0x1d, 0x5c, 0x0e, 0x4f};
+
+static uint8_t
+zsda_crc8(const uint8_t *message, const int length)
+{
+   uint8_t crc = 0;
+   int i;
+
+   for (i = 0; i < length; i++)
+   crc = crc8_table[crc ^ message[i]];
+   return crc;
+}
+
+uint32_t
+set_reg_8(void *addr, const uint8_t val0, const uint8_t val1,
+ const uint8_t val2, const uint8_t val3)
+{
+   uint8_t val[4];
+   val[0] = val0;
+   val[1] = val1;
+   val[2] = val2;
+   val[3] = val3;
+   ZSDA_CSR_WRITE32(addr, *(uint32_t *)val);
+   return *(uint32_t *)val;
+}
+
+uint8_t
+get_reg_8(void *addr, const int offset)
+{
+   uint32_t val = ZSDA_CSR_READ32(addr);
+
+   return *(((uint8_t *)&val) + offset);
+}
+
+int
+zsda_admin_msg_init(const struct rte_pci_device *pci_dev)
+{
+   uint8_t *mmio_base = pci_dev->mem_resource[0].addr;
+
+   set_reg_8(mmio_base + ZSDA_ADMIN_WQ_BASE7, 0, 0, MAGIC_RECV, 0);
+   set_reg_8(mmio_base + ZSDA_ADMIN_CQ_BASE7, 0, 0, MAGIC_RECV, 0);
+   return 0;
+}
+
+int
+zsda_send_admin_msg(const struct rte_pci_device *pci_dev, void *req,
+   const uint32_t len)
+{
+   uint8_t *mmio_base = pci_dev->mem_resource[0].addr;
+   uint8_t wq_flag;
+   uint8_t crc;
+   uint16_t admin_db;
+   uint32_t retry = ZSDA_TIME_NUM;
+   int i;
+   

[PATCH v2 3/4] zsda: add support for queue operation

2024-08-21 Thread Hanxiao Li
Add queue initialization, release, enqueue, dequeue and other interface.

Signed-off-by: Hanxiao Li 
---
 drivers/common/zsda/zsda_qp.c | 720 ++
 drivers/common/zsda/zsda_qp.h | 163 
 2 files changed, 883 insertions(+)
 create mode 100644 drivers/common/zsda/zsda_qp.c
 create mode 100644 drivers/common/zsda/zsda_qp.h

diff --git a/drivers/common/zsda/zsda_qp.c b/drivers/common/zsda/zsda_qp.c
new file mode 100644
index 00..0f658a73f4
--- /dev/null
+++ b/drivers/common/zsda/zsda_qp.c
@@ -0,0 +1,720 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+#include 
+
+#include 
+
+#include "zsda_common.h"
+#include "zsda_logs.h"
+#include "zsda_device.h"
+#include "zsda_qp.h"
+
+#define RING_DIR_TX 0
+#define RING_DIR_RX 1
+
+struct ring_size {
+   uint16_t tx_msg_size;
+   uint16_t rx_msg_size;
+};
+
+struct ring_size zsda_qp_hw_ring_size[ZSDA_MAX_SERVICES] = {
+   [ZSDA_SERVICE_COMPRESSION] = {32, 16},
+   [ZSDA_SERVICE_DECOMPRESSION] = {32, 16},
+};
+
+static void
+zsda_set_queue_head_tail(const struct zsda_pci_device *zsda_pci_dev,
+const uint8_t qid)
+{
+   struct rte_pci_device *pci_dev =
+   zsda_devs[zsda_pci_dev->zsda_dev_id].pci_dev;
+   uint8_t *mmio_base = pci_dev->mem_resource[0].addr;
+
+   ZSDA_CSR_WRITE32(mmio_base + IO_DB_INITIAL_CONFIG + (qid * 4),
+SET_HEAD_INTI);
+}
+
+int
+zsda_get_queue_cfg(struct zsda_pci_device *zsda_pci_dev)
+{
+   uint8_t i;
+   uint32_t index;
+   enum zsda_service_type type;
+   struct zsda_qp_hw *zsda_hw_qps = zsda_pci_dev->zsda_hw_qps;
+   struct qinfo qcfg;
+   int ret = 0;
+
+   for (i = 0; i < zsda_num_used_qps; i++) {
+   zsda_set_queue_head_tail(zsda_pci_dev, i);
+   ret = zsda_get_queue_cfg_by_id(zsda_pci_dev, i, &qcfg);
+   type = qcfg.q_type;
+   if (ret) {
+   ZSDA_LOG(ERR, "get queue cfg!");
+   return ret;
+   }
+   if (type >= ZSDA_SERVICE_INVALID)
+   continue;
+
+   index = zsda_pci_dev->zsda_qp_hw_num[type];
+   zsda_hw_qps[type].data[index].used = true;
+   zsda_hw_qps[type].data[index].tx_ring_num = i;
+   zsda_hw_qps[type].data[index].rx_ring_num = i;
+   zsda_hw_qps[type].data[index].tx_msg_size =
+   zsda_qp_hw_ring_size[type].tx_msg_size;
+   zsda_hw_qps[type].data[index].rx_msg_size =
+   zsda_qp_hw_ring_size[type].rx_msg_size;
+
+   zsda_pci_dev->zsda_qp_hw_num[type]++;
+   }
+
+   return ret;
+}
+
+struct zsda_qp_hw *
+zsda_qps_hw_per_service(struct zsda_pci_device *zsda_pci_dev,
+   const enum zsda_service_type service)
+{
+   struct zsda_qp_hw *qp_hw = NULL;
+
+   if (service < ZSDA_SERVICE_INVALID)
+   qp_hw = &(zsda_pci_dev->zsda_hw_qps[service]);
+
+   return qp_hw;
+}
+
+uint16_t
+zsda_qps_per_service(const struct zsda_pci_device *zsda_pci_dev,
+const enum zsda_service_type service)
+{
+   uint16_t qp_hw_num = 0;
+
+   if (service < ZSDA_SERVICE_INVALID)
+   qp_hw_num = zsda_pci_dev->zsda_qp_hw_num[service];
+
+   return qp_hw_num;
+}
+
+uint16_t
+zsda_comp_max_nb_qps(const struct zsda_pci_device *zsda_pci_dev)
+{
+   uint16_t comp =
+   zsda_qps_per_service(zsda_pci_dev, ZSDA_SERVICE_COMPRESSION);
+   uint16_t decomp =
+   zsda_qps_per_service(zsda_pci_dev, ZSDA_SERVICE_DECOMPRESSION);
+   uint16_t min = 0;
+
+   if ((comp == MAX_QPS_ON_FUNCTION) ||
+   (decomp == MAX_QPS_ON_FUNCTION))
+   min = MAX_QPS_ON_FUNCTION;
+   else
+   min = (comp < decomp) ? comp : decomp;
+   if (min == 0)
+   return MAX_QPS_ON_FUNCTION;
+   return min;
+}
+
+
+void
+zsda_stats_get(void **queue_pairs, const uint32_t nb_queue_pairs,
+ struct zsda_common_stat *stats)
+{
+   enum zsda_service_type type;
+   uint32_t i;
+   struct zsda_qp *qp;
+
+   if ((stats == NULL) || (queue_pairs == NULL)) {
+   ZSDA_LOG(ERR, E_NULL);
+   return;
+   }
+
+   for (i = 0; i < nb_queue_pairs; i++) {
+   qp = (struct zsda_qp *)queue_pairs[i];
+
+   if (qp == NULL) {
+   ZSDA_LOG(ERR, E_NULL);
+   break;
+   }
+
+   for (type = 0; type < ZSDA_SERVICE_INVALID; type++) {
+   if (qp->srv[type].used) {
+   stats->enqueued_count +=
+   qp->srv[type].stats.enqueued_count;
+   stats->dequeued_count +=
+   qp->srv[type].stats.dequeued_count;
+

[PATCH v2 2/4] zsda: add support for zsdadev operations

2024-08-21 Thread Hanxiao Li
Add support for zsdadev operations such as dev_start and dev_stop.

Signed-off-by: Hanxiao Li 
---
 drivers/common/zsda/zsda_device.c | 476 ++
 drivers/common/zsda/zsda_device.h | 103 +++
 2 files changed, 579 insertions(+)
 create mode 100644 drivers/common/zsda/zsda_device.c
 create mode 100644 drivers/common/zsda/zsda_device.h

diff --git a/drivers/common/zsda/zsda_device.c 
b/drivers/common/zsda/zsda_device.c
new file mode 100644
index 00..74e6d30624
--- /dev/null
+++ b/drivers/common/zsda/zsda_device.c
@@ -0,0 +1,476 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+
+#include 
+#include 
+
+#include "zsda_device.h"
+
+/* per-process array of device data */
+struct zsda_device_info zsda_devs[RTE_PMD_ZSDA_MAX_PCI_DEVICES];
+static int zsda_nb_pci_devices;
+uint8_t zsda_num_used_qps;
+
+/*
+ * The set of PCI devices this driver supports
+ */
+static const struct rte_pci_id pci_id_zsda_map[] = {
+   {
+   RTE_PCI_DEVICE(0x1cf2, 0x8050),
+   },
+   {
+   RTE_PCI_DEVICE(0x1cf2, 0x8051),
+   },
+   {.device_id = 0},
+};
+
+static int
+zsda_check_write(uint8_t *addr, const uint32_t dst_value)
+{
+   int times = ZSDA_TIME_NUM;
+   uint32_t val;
+
+   val = ZSDA_CSR_READ32(addr);
+
+   while ((val != dst_value) && times--) {
+   val = ZSDA_CSR_READ32(addr);
+   rte_delay_us_sleep(ZSDA_TIME_SLEEP_US);
+   }
+   if (val == dst_value)
+   return ZSDA_SUCCESS;
+   else
+   return ZSDA_FAILED;
+}
+
+static uint8_t
+zsda_get_num_used_qps(const struct rte_pci_device *pci_dev)
+{
+   uint8_t *mmio_base = pci_dev->mem_resource[0].addr;
+   uint8_t num_used_qps;
+
+   num_used_qps = ZSDA_CSR_READ8(mmio_base + 0);
+
+   return num_used_qps;
+}
+
+int
+zsda_admin_q_start(const struct rte_pci_device *pci_dev)
+{
+   uint8_t *mmio_base = pci_dev->mem_resource[0].addr;
+   int ret;
+
+   ZSDA_CSR_WRITE32(mmio_base + ZSDA_ADMIN_Q_START, 0);
+
+   ZSDA_CSR_WRITE32(mmio_base + ZSDA_ADMIN_Q_START, ZSDA_Q_START);
+   ret = zsda_check_write(mmio_base + ZSDA_ADMIN_Q_START, ZSDA_Q_START);
+
+   return ret;
+}
+
+int
+zsda_admin_q_stop(const struct rte_pci_device *pci_dev)
+{
+   uint8_t *mmio_base = pci_dev->mem_resource[0].addr;
+   int ret;
+
+   ZSDA_CSR_WRITE32(mmio_base + ZSDA_ADMIN_Q_STOP_RESP, ZSDA_RESP_INVALID);
+   ZSDA_CSR_WRITE32(mmio_base + ZSDA_ADMIN_Q_STOP, ZSDA_Q_STOP);
+
+   ret = zsda_check_write(mmio_base + ZSDA_ADMIN_Q_STOP_RESP,
+  ZSDA_RESP_VALID);
+
+   if (ret)
+   ZSDA_LOG(INFO, "Failed! zsda_admin q stop");
+
+   return ret;
+}
+
+int
+zsda_admin_q_clear(const struct rte_pci_device *pci_dev)
+{
+   uint8_t *mmio_base = pci_dev->mem_resource[0].addr;
+   int ret;
+
+   ZSDA_CSR_WRITE32(mmio_base + ZSDA_ADMIN_Q_CLR_RESP, ZSDA_RESP_INVALID);
+   ZSDA_CSR_WRITE32(mmio_base + ZSDA_ADMIN_Q_CLR, ZSDA_RESP_VALID);
+
+   ret = zsda_check_write(mmio_base + ZSDA_ADMIN_Q_CLR_RESP,
+  ZSDA_RESP_VALID);
+
+   if (ret)
+   ZSDA_LOG(INFO, "Failed! zsda_admin q clear");
+
+   return ret;
+}
+
+static int
+zsda_queue_stop_single(uint8_t *mmio_base, const uint8_t id)
+{
+   int ret;
+   uint8_t *addr_stop = mmio_base + ZSDA_IO_Q_STOP + (4 * id);
+   uint8_t *addr_resp = mmio_base + ZSDA_IO_Q_STOP_RESP + (4 * id);
+
+   ZSDA_CSR_WRITE32(addr_resp, ZSDA_RESP_INVALID);
+   ZSDA_CSR_WRITE32(addr_stop, ZSDA_Q_STOP);
+
+   ret = zsda_check_write(addr_resp, ZSDA_RESP_VALID);
+   ZSDA_CSR_WRITE32(addr_resp, ZSDA_RESP_INVALID);
+
+   return ret;
+}
+
+int
+zsda_queue_stop(const struct rte_pci_device *pci_dev)
+{
+   uint8_t *mmio_base = pci_dev->mem_resource[0].addr;
+   uint8_t id;
+   int ret = 0;
+
+   for (id = 0; id < zsda_num_used_qps; id++)
+   ret |= zsda_queue_stop_single(mmio_base, id);
+
+   return ret;
+}
+
+static int
+zsda_queue_start_single(uint8_t *mmio_base, const uint8_t id)
+{
+   uint8_t *addr_start = mmio_base + ZSDA_IO_Q_START + (4 * id);
+
+   ZSDA_CSR_WRITE32(addr_start, ZSDA_Q_START);
+   return zsda_check_write(addr_start, ZSDA_Q_START);
+}
+
+int
+zsda_queue_start(const struct rte_pci_device *pci_dev)
+{
+   uint8_t *mmio_base = pci_dev->mem_resource[0].addr;
+   uint8_t id;
+   int ret = 0;
+
+   for (id = 0; id < zsda_num_used_qps; id++)
+   ret |= zsda_queue_start_single(mmio_base, id);
+
+   return ret;
+}
+
+static int
+zsda_queue_clear_single(uint8_t *mmio_base, const uint8_t id)
+{
+   int ret;
+   uint8_t *addr_clear = mmio_base + ZSDA_IO_Q_CLR + (4 * id);
+   uint8_t *addr_resp = mmio_base + ZSDA_IO_Q_CLR_RESP + (4 * id);
+
+   ZSDA_CSR_WRITE32(addr_resp, ZSDA_RESP_INVALID);
+   ZSDA_CSR_WRITE32(addr_clea

[PATCH v2 4/4] zsda: add zsda compressdev driver and interface

2024-08-21 Thread Hanxiao Li
Add zsda compressdev driver and enqueue, dequeue interface.

Signed-off-by: Hanxiao Li 
---
 drivers/common/zsda/meson.build   |  25 ++
 drivers/compress/zsda/zsda_comp.c | 320 ++
 drivers/compress/zsda/zsda_comp.h |  27 ++
 drivers/compress/zsda/zsda_comp_pmd.c | 453 ++
 drivers/compress/zsda/zsda_comp_pmd.h |  39 +++
 drivers/meson.build   |   1 +
 6 files changed, 865 insertions(+)
 create mode 100644 drivers/common/zsda/meson.build
 create mode 100644 drivers/compress/zsda/zsda_comp.c
 create mode 100644 drivers/compress/zsda/zsda_comp.h
 create mode 100644 drivers/compress/zsda/zsda_comp_pmd.c
 create mode 100644 drivers/compress/zsda/zsda_comp_pmd.h

diff --git a/drivers/common/zsda/meson.build b/drivers/common/zsda/meson.build
new file mode 100644
index 00..b12ef17476
--- /dev/null
+++ b/drivers/common/zsda/meson.build
@@ -0,0 +1,25 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2024 ZTE Corporation
+
+config_flag_fmt = 'RTE_LIBRTE_@0@_COMMON'
+
+deps += ['bus_pci', 'compressdev']
+sources += files(
+   'zsda_common.c',
+   'zsda_logs.c',
+   'zsda_device.c',
+   'zsda_qp.c',
+   )
+
+zsda_compress = true
+zsda_compress_path = 'compress/zsda'
+zsda_compress_relpath = '../../' + zsda_compress_path
+includes += include_directories(zsda_compress_relpath)
+
+if zsda_compress
+zlib = dependency('zlib', required: false, method: 'pkg-config')
+   foreach f: ['zsda_comp_pmd.c', 'zsda_comp.c']
+   sources += files(join_paths(zsda_compress_relpath, f))
+   endforeach
+   ext_deps += zlib
+endif
diff --git a/drivers/compress/zsda/zsda_comp.c 
b/drivers/compress/zsda/zsda_comp.c
new file mode 100644
index 00..87ddd0e699
--- /dev/null
+++ b/drivers/compress/zsda/zsda_comp.c
@@ -0,0 +1,320 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+#include "zsda_comp.h"
+
+#include 
+
+#define ZLIB_HEADER_SIZE 2
+#define ZLIB_TRAILER_SIZE 4
+#define GZIP_HEADER_SIZE 10
+#define GZIP_TRAILER_SIZE 8
+#define CHECKSUM_SIZE 4
+
+static uint32_t zsda_read_chksum(uint8_t *data_addr, uint8_t op_code,
+uint32_t produced);
+
+int
+comp_match(const void *op_in)
+{
+   const struct rte_comp_op *op = (const struct rte_comp_op *)op_in;
+   const struct zsda_comp_xform *xform =
+   (struct zsda_comp_xform *)op->private_xform;
+
+   if (op->op_type != RTE_COMP_OP_STATELESS)
+   return 0;
+
+   if (xform->type != RTE_COMP_COMPRESS)
+   return 0;
+
+   return 1;
+}
+
+static uint8_t
+get_opcode(const struct zsda_comp_xform *xform)
+{
+   if (xform->type == RTE_COMP_COMPRESS) {
+   if (xform->checksum_type == RTE_COMP_CHECKSUM_NONE ||
+   xform->checksum_type == RTE_COMP_CHECKSUM_CRC32)
+   return ZSDA_OPC_COMP_GZIP;
+   else if (xform->checksum_type == RTE_COMP_CHECKSUM_ADLER32)
+   return ZSDA_OPC_COMP_ZLIB;
+   }
+   if (xform->type == RTE_COMP_DECOMPRESS) {
+   if (xform->checksum_type == RTE_COMP_CHECKSUM_CRC32 ||
+   xform->checksum_type == RTE_COMP_CHECKSUM_NONE)
+   return ZSDA_OPC_DECOMP_GZIP;
+   else if (xform->checksum_type == RTE_COMP_CHECKSUM_ADLER32)
+   return ZSDA_OPC_DECOMP_ZLIB;
+   }
+
+   return ZSDA_OPC_INVALID;
+}
+
+int
+build_comp_request(void *op_in, const struct zsda_queue *queue,
+  void **op_cookies, const uint16_t new_tail)
+{
+   struct rte_comp_op *op = op_in;
+   struct zsda_comp_xform *xform =
+   (struct zsda_comp_xform *)op->private_xform;
+   struct zsda_wqe_comp *wqe =
+   (struct zsda_wqe_comp *)(queue->base_addr +
+(new_tail * queue->msg_size));
+
+   struct zsda_op_cookie *cookie =
+   (struct zsda_op_cookie *)op_cookies[new_tail];
+   struct zsda_sgl *sgl_src = (struct zsda_sgl *)&cookie->sgl_src;
+   struct zsda_sgl *sgl_dst = (struct zsda_sgl *)&cookie->sgl_dst;
+   struct comp_head_info comp_head_info;
+
+   uint8_t opcode;
+   int ret;
+   uint32_t op_offset;
+   uint32_t op_src_len;
+   uint32_t op_dst_len;
+   uint32_t head_len;
+
+   if ((op->m_dst == NULL) || (op->m_dst == op->m_src)) {
+   ZSDA_LOG(ERR, "Failed! m_dst");
+   return -EINVAL;
+   }
+
+   opcode = get_opcode(xform);
+   if (opcode == ZSDA_OPC_INVALID) {
+   ZSDA_LOG(ERR, E_CONFIG);
+   return -EINVAL;
+   }
+
+   cookie->used = true;
+   cookie->sid = new_tail;
+   cookie->op = op;
+
+   if (opcode == ZSDA_OPC_COMP_GZIP)
+   head_len = GZIP_HEADER_SIZE;
+   else if (opcode == ZSDA_OPC_COMP_ZLIB)
+ 

Re: [PATCH v3 1/4] usertools/cpu_layout: update coding style

2024-08-21 Thread Burakov, Anatoly

On 8/20/2024 5:59 PM, Robin Jarry wrote:

Anatoly Burakov, Aug 20, 2024 at 17:35:

Update coding style:

- make it PEP-484 compliant
- address all flake8, mypy etc. warnings
- use f-strings in place of old-style string interpolation
- refactor printing to make the code more readable
- read valid CPU ID's from "online" sysfs node

Signed-off-by: Anatoly Burakov 
---

Notes:
    v1,v2 -> v3:
    - Import typing as T instead of individual types


Looks good to me. Same remark than dpdk-hugepages.py: could you format 
it using black or ruff?


Thanks!



Hi,

My IDE is already set up to auto-format with Ruff since our last 
conversation, so this is already formatted. I ran ruff format command 
just in case but it produced no changes.


--
Thanks,
Anatoly



Re: [PATCH v3 2/4] usertools/cpu_layout: print out NUMA nodes

2024-08-21 Thread Burakov, Anatoly

On 8/20/2024 9:22 PM, Robin Jarry wrote:

Anatoly Burakov, Aug 20, 2024 at 17:35:

In traditional NUMA case, NUMA nodes and physical sockets were used
interchangeably, but there are cases where there can be multiple NUMA
nodes per socket, as well as all CPU's being assigned NUMA node 0 even in
cases of multiple sockets. Use sysfs to print out NUMA information.

Signed-off-by: Anatoly Burakov 
---

Notes:
    v2 -> v3:
    - Sort imports alphabetically


Looks good to me, can you format the code for that commit as well?




Hi,

(duplicating here for posterity)

My IDE is already set up to auto-format with Ruff since our last 
conversation, so this is already formatted. I ran ruff format command 
just in case but it produced no changes.


--
Thanks,
Anatoly



Re: [PATCH v3 3/4] usertools/dpdk-hugepages.py: update coding style

2024-08-21 Thread Burakov, Anatoly

On 8/20/2024 5:57 PM, Robin Jarry wrote:

Anatoly Burakov, Aug 20, 2024 at 17:35:

Update coding style:

- Make the code PEP-484 compliant
- Add more comments, improve readability, use f-strings everywhere
- Use quotes consistently
- Address all Python static analysis (e.g. mypy, pylint) warnings
- Improve error handling
- Refactor printing and sysfs/procfs access functions
- Sort output by NUMA node

Signed-off-by: Anatoly Burakov 
---

Notes:
    v1 -> v2:
  - Added commit that sorted output by NUMA node
    v2 -> v3:
  - Rewrite of the script as suggested by reviewers


Instead of debating about coding style. I'd like to enforce black/ruff 
for new scripts and/or rewrites.


The code looks good to me, but could you pass through one of these tools 
and send a v4?


    black usertools/dpdk-hugepages.py

or

    ruff format usertools/dpdk-hugepages.py

I think they output the exact same code formatting but I could be wrong.



Hi,

My IDE is already set up to auto-format with Ruff since our last 
conversation, so this is already formatted. I ran ruff format command 
just in case but it produced no changes.


So, no v4 necessary unless you think there are any changes to be made 
about the code :)


--
Thanks,
Anatoly



Re: [PATCH v3 3/4] usertools/dpdk-hugepages.py: update coding style

2024-08-21 Thread Burakov, Anatoly

On 8/20/2024 5:52 PM, Stephen Hemminger wrote:

On Tue, 20 Aug 2024 16:35:16 +0100
Anatoly Burakov  wrote:


Update coding style:

- Make the code PEP-484 compliant
- Add more comments, improve readability, use f-strings everywhere
- Use quotes consistently
- Address all Python static analysis (e.g. mypy, pylint) warnings
- Improve error handling
- Refactor printing and sysfs/procfs access functions
- Sort output by NUMA node

Signed-off-by: Anatoly Burakov 


Looks good, but not sure if always using single quote is really necessary.
Many python programs seem to use either one indiscriminately.


Well, that doesn't mean it's a good example to follow :) At least it 
annoys me visually so I went ahead and fixed it since I was doing a 
rewrite anyway.




Acked-by: Stephen Hemminger 


--
Thanks,
Anatoly



Re: [PATCH v3 3/4] usertools/dpdk-hugepages.py: update coding style

2024-08-21 Thread Burakov, Anatoly

On 8/21/2024 10:52 AM, Burakov, Anatoly wrote:

On 8/20/2024 5:57 PM, Robin Jarry wrote:

Anatoly Burakov, Aug 20, 2024 at 17:35:

Update coding style:

- Make the code PEP-484 compliant
- Add more comments, improve readability, use f-strings everywhere
- Use quotes consistently
- Address all Python static analysis (e.g. mypy, pylint) warnings
- Improve error handling
- Refactor printing and sysfs/procfs access functions
- Sort output by NUMA node

Signed-off-by: Anatoly Burakov 
---

Notes:
    v1 -> v2:
  - Added commit that sorted output by NUMA node
    v2 -> v3:
  - Rewrite of the script as suggested by reviewers


Instead of debating about coding style. I'd like to enforce black/ruff 
for new scripts and/or rewrites.


The code looks good to me, but could you pass through one of these 
tools and send a v4?


    black usertools/dpdk-hugepages.py

or

    ruff format usertools/dpdk-hugepages.py

I think they output the exact same code formatting but I could be wrong.



Hi,

My IDE is already set up to auto-format with Ruff since our last 
conversation, so this is already formatted. I ran ruff format command 
just in case but it produced no changes.


So, no v4 necessary unless you think there are any changes to be made 
about the code :)




Actually, I take that back - I had a configuration mishap and didn't 
notice that I wasn't using Ruff for formatting on the machine I was 
creating the commits.


Still, cpu_layout's formatting is not affected, but hugepage script is.

However, after formatting with ruff, I can see that 1) most single 
quotes became double quotes, 2) some lines I broke up for readability, 
are no longer broken up, and 3) some lines I broke up to avoid exceeding 
the 80 symbols count, are no longer broken up.


I'll see if using Black yields different results.
--
Thanks,
Anatoly



Re: [PATCH v3 3/4] usertools/dpdk-hugepages.py: update coding style

2024-08-21 Thread Burakov, Anatoly

On 8/21/2024 11:06 AM, Burakov, Anatoly wrote:

On 8/21/2024 10:52 AM, Burakov, Anatoly wrote:

On 8/20/2024 5:57 PM, Robin Jarry wrote:

Anatoly Burakov, Aug 20, 2024 at 17:35:

Update coding style:

- Make the code PEP-484 compliant
- Add more comments, improve readability, use f-strings everywhere
- Use quotes consistently
- Address all Python static analysis (e.g. mypy, pylint) warnings
- Improve error handling
- Refactor printing and sysfs/procfs access functions
- Sort output by NUMA node

Signed-off-by: Anatoly Burakov 
---

Notes:
    v1 -> v2:
  - Added commit that sorted output by NUMA node
    v2 -> v3:
  - Rewrite of the script as suggested by reviewers


Instead of debating about coding style. I'd like to enforce 
black/ruff for new scripts and/or rewrites.


The code looks good to me, but could you pass through one of these 
tools and send a v4?


    black usertools/dpdk-hugepages.py

or

    ruff format usertools/dpdk-hugepages.py

I think they output the exact same code formatting but I could be wrong.



Hi,

My IDE is already set up to auto-format with Ruff since our last 
conversation, so this is already formatted. I ran ruff format command 
just in case but it produced no changes.


So, no v4 necessary unless you think there are any changes to be made 
about the code :)




Actually, I take that back - I had a configuration mishap and didn't 
notice that I wasn't using Ruff for formatting on the machine I was 
creating the commits.


Still, cpu_layout's formatting is not affected, but hugepage script is.

However, after formatting with ruff, I can see that 1) most single 
quotes became double quotes, 2) some lines I broke up for readability, 
are no longer broken up, and 3) some lines I broke up to avoid exceeding 
the 80 symbols count, are no longer broken up.


I'll see if using Black yields different results.


Regarding line length, it seems that it's configurable. Perhaps we could 
include a Ruff/Black configuration file with DPDK to solve this problem 
once and for all? Adding --line-length=79 to ruff config addresses the 
last issue, but it wouldn't be necessary if there was a Ruff 
configuration file in the repo. I can live with first two things that I 
highlighted.

--
Thanks,
Anatoly



[PATCH v4 2/4] usertools/cpu_layout: print out NUMA nodes

2024-08-21 Thread Anatoly Burakov
In traditional NUMA case, NUMA nodes and physical sockets were used
interchangeably, but there are cases where there can be multiple NUMA
nodes per socket, as well as all CPU's being assigned NUMA node 0 even in
cases of multiple sockets. Use sysfs to print out NUMA information.

Signed-off-by: Anatoly Burakov 
---

Notes:
v2 -> v3:
- Sort imports alphabetically

 usertools/cpu_layout.py | 36 +++-
 1 file changed, 31 insertions(+), 5 deletions(-)

diff --git a/usertools/cpu_layout.py b/usertools/cpu_layout.py
index 8812ea286b..e4720e27db 100755
--- a/usertools/cpu_layout.py
+++ b/usertools/cpu_layout.py
@@ -5,6 +5,7 @@
 
 """Display CPU topology information."""
 
+import glob
 import typing as T
 
 
@@ -29,12 +30,21 @@ def read_sysfs(path: str) -> str:
 return fd.read().strip()
 
 
+def read_numa_node(base: str) -> int:
+"""Read the NUMA node of a CPU."""
+node_glob = f"{base}/node*"
+node_dirs = glob.glob(node_glob)
+if not node_dirs:
+return 0  # default to node 0
+return int(node_dirs[0].split("node")[1])
+
+
 def print_row(row: T.Tuple[str, ...], col_widths: T.List[int]) -> None:
 """Print a row of a table with the given column widths."""
 first, *rest = row
 w_first, *w_rest = col_widths
 first_end = " " * 4
-rest_end = " " * 10
+rest_end = " " * 4
 
 print(first.ljust(w_first), end=first_end)
 for cell, width in zip(rest, w_rest):
@@ -56,6 +66,7 @@ def main() -> None:
 sockets_s: T.Set[int] = set()
 cores_s: T.Set[int] = set()
 core_map: T.Dict[T.Tuple[int, int], T.List[int]] = {}
+numa_map: T.Dict[int, int] = {}
 base_path = "/sys/devices/system/cpu"
 
 cpus = range_expand(read_sysfs(f"{base_path}/online"))
@@ -64,12 +75,14 @@ def main() -> None:
 lcore_base = f"{base_path}/cpu{cpu}"
 core = int(read_sysfs(f"{lcore_base}/topology/core_id"))
 socket = int(read_sysfs(f"{lcore_base}/topology/physical_package_id"))
+node = read_numa_node(lcore_base)
 
 cores_s.add(core)
 sockets_s.add(socket)
 key = (socket, core)
 core_map.setdefault(key, [])
 core_map[key].append(cpu)
+numa_map[cpu] = node
 
 cores = sorted(cores_s)
 sockets = sorted(sockets_s)
@@ -80,24 +93,37 @@ def main() -> None:
 
 print("cores = ", cores)
 print("sockets = ", sockets)
+print("numa = ", sorted(set(numa_map.values(
 print()
 
-# Core, [Socket, Socket, ...]
-heading_strs = "", *[f"Socket {s}" for s in sockets]
+# Core, [NUMA, Socket, NUMA, Socket, ...]
+heading_strs = "", *[v for s in sockets for v in ("", f"Socket {s}")]
 sep_strs = tuple("-" * len(hstr) for hstr in heading_strs)
 rows: T.List[T.Tuple[str, ...]] = []
 
+prev_numa = None
 for c in cores:
 # Core,
 row: T.Tuple[str, ...] = (f"Core {c}",)
 
-# [lcores, lcores, ...]
+# assume NUMA changes symmetrically
+first_lcore = core_map[(0, c)][0]
+cur_numa = numa_map[first_lcore]
+numa_changed = prev_numa != cur_numa
+prev_numa = cur_numa
+
+# [NUMA, lcores, NUMA, lcores, ...]
 for s in sockets:
 try:
 lcores = core_map[(s, c)]
+numa = numa_map[lcores[0]]
+if numa_changed:
+row += (f"NUMA {numa}",)
+else:
+row += ("",)
 row += (str(lcores),)
 except KeyError:
-row += ("",)
+row += ("", "")
 rows += [row]
 
 # find max widths for each column, including header and rows
-- 
2.43.5



[PATCH v4 4/4] usertools/dpdk-devbind: print NUMA node

2024-08-21 Thread Anatoly Burakov
Currently, devbind does not print out any NUMA information, which makes
figuring out which NUMA node device belongs to not trivial. Add printouts
for NUMA information if NUMA support is enabled on the system.

Signed-off-by: Anatoly Burakov 
Acked-by: Robin Jarry 
---

Notes:
v1 -> v2:
- Added commit to print out NUMA information in devbind

 usertools/dpdk-devbind.py | 29 +
 1 file changed, 21 insertions(+), 8 deletions(-)

diff --git a/usertools/dpdk-devbind.py b/usertools/dpdk-devbind.py
index b276e8efc8..078e8c387b 100755
--- a/usertools/dpdk-devbind.py
+++ b/usertools/dpdk-devbind.py
@@ -110,6 +110,11 @@
 args = []
 
 
+# check if this system has NUMA support
+def is_numa():
+return os.path.exists('/sys/devices/system/node')
+
+
 # check if a specific kernel module is loaded
 def module_is_loaded(module):
 global loaded_modules
@@ -577,20 +582,28 @@ def show_device_status(devices_type, device_name, 
if_field=False):
 print("".join('=' * len(msg)))
 return
 
+print_numa = is_numa()
+
 # print each category separately, so we can clearly see what's used by DPDK
 if dpdk_drv:
+extra_param = "drv=%(Driver_str)s unused=%(Module_str)s"
+if print_numa:
+extra_param = "numa_node=%(NUMANode)s " + extra_param
 display_devices("%s devices using DPDK-compatible driver" % 
device_name,
-dpdk_drv, "drv=%(Driver_str)s unused=%(Module_str)s")
+dpdk_drv, extra_param)
 if kernel_drv:
-if_text = ""
+extra_param = "drv=%(Driver_str)s unused=%(Module_str)s"
 if if_field:
-if_text = "if=%(Interface)s "
-display_devices("%s devices using kernel driver" % device_name, 
kernel_drv,
-if_text + "drv=%(Driver_str)s "
-"unused=%(Module_str)s %(Active)s")
+extra_param = "if=%(Interface)s " + extra_param
+if print_numa:
+extra_param = "numa_node=%(NUMANode)s " + extra_param
+display_devices("%s devices using kernel driver" % device_name,
+kernel_drv, extra_param)
 if no_drv:
-display_devices("Other %s devices" % device_name, no_drv,
-"unused=%(Module_str)s")
+extra_param = "unused=%(Module_str)s"
+if print_numa:
+extra_param = "numa_node=%(NUMANode)s " + extra_param
+display_devices("Other %s devices" % device_name, no_drv, extra_param)
 
 
 def show_status():
-- 
2.43.5



[PATCH v4 3/4] usertools/dpdk-hugepages.py: update coding style

2024-08-21 Thread Anatoly Burakov
Update coding style:

- Make the code PEP-484 compliant
- Add more comments, improve readability, use f-strings everywhere
- Address all Python static analysis (e.g. mypy, pylint) warnings
- Format code with Ruff
- Improve error handling
- Refactor printing and sysfs/procfs access functions
- Sort output by NUMA node

Signed-off-by: Anatoly Burakov 
Acked-by: Stephen Hemminger 
---

Notes:
v3 -> v4:
  - Format code with Ruff, line width 79 to avoid flake8 warnings
(Flake8 is by default configured with line width 79 on my system)
v2 -> v3:
  - Rewrite of the script as suggested by reviewers
v1 -> v2:
  - Added commit that sorted output by NUMA node

 usertools/dpdk-hugepages.py | 524 ++--
 1 file changed, 315 insertions(+), 209 deletions(-)

diff --git a/usertools/dpdk-hugepages.py b/usertools/dpdk-hugepages.py
index bf2575ba36..4c99682848 100755
--- a/usertools/dpdk-hugepages.py
+++ b/usertools/dpdk-hugepages.py
@@ -1,13 +1,15 @@
 #! /usr/bin/env python3
 # SPDX-License-Identifier: BSD-3-Clause
 # Copyright (c) 2020 Microsoft Corporation
+
 """Script to query and setup huge pages for DPDK applications."""
 
 import argparse
-import glob
 import os
 import re
+import subprocess
 import sys
+import typing as T
 from math import log2
 
 # Standard binary prefix
@@ -15,194 +17,268 @@
 
 # systemd mount point for huge pages
 HUGE_MOUNT = "/dev/hugepages"
+# default directory for non-NUMA huge pages
+NO_NUMA_HUGE_DIR = "/sys/kernel/mm/hugepages"
+# default base directory for NUMA nodes
+NUMA_NODE_BASE_DIR = "/sys/devices/system/node"
+# procfs paths
+MEMINFO_PATH = "/proc/meminfo"
+MOUNTS_PATH = "/proc/mounts"
 
 
-def fmt_memsize(kb):
-'''Format memory size in kB into conventional format'''
+class HugepageMount:
+"""Mount operations for huge page filesystem."""
+
+def __init__(self, path: str, mounted: bool):
+self.path = path
+# current mount status
+self.mounted = mounted
+
+def mount(
+self, pagesize_kb: int, user: T.Optional[str], group: T.Optional[str]
+) -> None:
+"""Mount the huge TLB file system"""
+if self.mounted:
+return
+cmd = ["mount", "-t", "hugetlbfs"]
+cmd += ["-o", f"pagesize={pagesize_kb * 1024}"]
+if user is not None:
+cmd += ["-o", f"uid={user}"]
+if group is not None:
+cmd += ["-o", f"gid={group}"]
+cmd += ["nodev", self.path]
+
+subprocess.run(cmd, check=True)
+self.mounted = True
+
+def unmount(self) -> None:
+"""Unmount the huge TLB file system (if mounted)"""
+if self.mounted:
+subprocess.run(["umount", self.path], check=True)
+self.mounted = False
+
+
+class HugepageRes:
+"""Huge page reserve operations. Can be NUMA-node-specific."""
+
+def __init__(self, path: str, node: T.Optional[int] = None):
+self.path = path
+# if this is a per-NUMA node huge page dir, store the node number
+self.node = node
+self.valid_page_sizes = self._get_valid_page_sizes()
+
+def _get_valid_page_sizes(self) -> T.List[int]:
+"""Extract valid huge page sizes"""
+return [get_memsize(d.split("-")[1]) for d in os.listdir(self.path)]
+
+def _nr_pages_path(self, sz: int) -> str:
+if sz not in self.valid_page_sizes:
+raise ValueError(
+f"Invalid page size {sz}. "
+f"Valid sizes: {self.valid_page_sizes}"
+)
+return os.path.join(self.path, f"hugepages-{sz}kB", "nr_hugepages")
+
+def __getitem__(self, sz: int) -> int:
+"""Get current number of reserved pages of specified size"""
+with open(self._nr_pages_path(sz), encoding="utf-8") as f:
+return int(f.read())
+
+def __setitem__(self, sz: int, nr_pages: int) -> None:
+"""Set number of reserved pages of specified size"""
+with open(self._nr_pages_path(sz), "w", encoding="utf-8") as f:
+f.write(f"{nr_pages}\n")
+
+
+def fmt_memsize(kb: int) -> str:
+"""Format memory size in kB into conventional format"""
 logk = int(log2(kb) / 10)
 suffix = BINARY_PREFIX[logk]
-unit = 2**(logk * 10)
-return '{}{}b'.format(int(kb / unit), suffix)
+unit = 2 ** (logk * 10)
+return f"{int(kb / unit)}{suffix}b"
 
 
-def get_memsize(arg):
-'''Convert memory size with suffix to kB'''
-match = re.match(r'(\d+)([' + BINARY_PREFIX + r']?)$', arg.upper())
+def get_memsize(arg: str) -> int:
+"""Convert memory size with suffix to kB"""
+# arg may have a 'b' at the end
+if arg[-1].lower() == "b":
+arg = arg[:-1]
+match = re.match(rf"(\d+)([{BINARY_PREFIX}]?)$", arg.upper())
 if match is None:
-sys.exit('{} is not a valid size'.format(arg))
+raise ValueError(f"{arg} is not a valid size")
 num = float(match.group(1))
 suffix = match.group(2)
-if suffix ==

Re: [PATCH v3 3/4] usertools/dpdk-hugepages.py: update coding style

2024-08-21 Thread Robin Jarry

Burakov, Anatoly, Aug 21, 2024 at 11:16:
> Actually, I take that back - I had a configuration mishap and didn't 
> notice that I wasn't using Ruff for formatting on the machine I was 
> creating the commits.
> 
> Still, cpu_layout's formatting is not affected, but hugepage script is.
> 
> However, after formatting with ruff, I can see that 1) most single 
> quotes became double quotes, 2) some lines I broke up for readability, 
> are no longer broken up, and 3) some lines I broke up to avoid exceeding 
> the 80 symbols count, are no longer broken up.


Using these tools allow developers to stop thinking about coding style 
and focus on more important matters :)



> I'll see if using Black yields different results.

Regarding line length, it seems that it's configurable. Perhaps we could 
include a Ruff/Black configuration file with DPDK to solve this problem 
once and for all? Adding --line-length=79 to ruff config addresses the 
last issue, but it wouldn't be necessary if there was a Ruff 
configuration file in the repo. I can live with first two things that I 
highlighted.


Both black and ruff have the same formatting rules and use a default 88 
line length limit. Which yields good results in most cases:


https://black.readthedocs.io/en/stable/the_black_code_style/current_style.html#line-length

I would prefer if we kept the default settings without any 
customization. The DPDK code base is already allowed to go up to 100 
columns anyways.




[PATCH v4 1/4] usertools/cpu_layout: update coding style

2024-08-21 Thread Anatoly Burakov
Update coding style:

- make it PEP-484 compliant
- address all flake8, mypy etc. warnings
- use f-strings in place of old-style string interpolation
- refactor printing to make the code more readable
- read valid CPU ID's from "online" sysfs node

Signed-off-by: Anatoly Burakov 
---

Notes:
v3->v4:
- Format with Ruff, line width 79

v1,v2 -> v3:
- Import typing as T instead of individual types

 usertools/cpu_layout.py | 163 ++--
 1 file changed, 108 insertions(+), 55 deletions(-)

diff --git a/usertools/cpu_layout.py b/usertools/cpu_layout.py
index 891b9238fa..8812ea286b 100755
--- a/usertools/cpu_layout.py
+++ b/usertools/cpu_layout.py
@@ -3,62 +3,115 @@
 # Copyright(c) 2010-2014 Intel Corporation
 # Copyright(c) 2017 Cavium, Inc. All rights reserved.
 
-sockets = []
-cores = []
-core_map = {}
-base_path = "/sys/devices/system/cpu"
-fd = open("{}/kernel_max".format(base_path))
-max_cpus = int(fd.read())
-fd.close()
-for cpu in range(max_cpus + 1):
-try:
-fd = open("{}/cpu{}/topology/core_id".format(base_path, cpu))
-except IOError:
-continue
-core = int(fd.read())
-fd.close()
-fd = open("{}/cpu{}/topology/physical_package_id".format(base_path, cpu))
-socket = int(fd.read())
-fd.close()
-if core not in cores:
-cores.append(core)
-if socket not in sockets:
-sockets.append(socket)
-key = (socket, core)
-if key not in core_map:
-core_map[key] = []
-core_map[key].append(cpu)
+"""Display CPU topology information."""
 
-print(format("=" * (47 + len(base_path
-print("Core and Socket Information (as reported by '{}')".format(base_path))
-print("{}\n".format("=" * (47 + len(base_path
-print("cores = ", cores)
-print("sockets = ", sockets)
-print("")
+import typing as T
 
-max_processor_len = len(str(len(cores) * len(sockets) * 2 - 1))
-max_thread_count = len(list(core_map.values())[0])
-max_core_map_len = (max_processor_len * max_thread_count)  \
-  + len(", ") * (max_thread_count - 1) \
-  + len('[]') + len('Socket ')
-max_core_id_len = len(str(max(cores)))
 
-output = " ".ljust(max_core_id_len + len('Core '))
-for s in sockets:
-output += " Socket %s" % str(s).ljust(max_core_map_len - len('Socket '))
-print(output)
-
-output = " ".ljust(max_core_id_len + len('Core '))
-for s in sockets:
-output += " ".ljust(max_core_map_len)
-output += " "
-print(output)
-
-for c in cores:
-output = "Core %s" % str(c).ljust(max_core_id_len)
-for s in sockets:
-if (s, c) in core_map:
-output += " " + str(core_map[(s, c)]).ljust(max_core_map_len)
+def range_expand(rstr: str) -> T.List[int]:
+"""Expand a range string into a list of integers."""
+# 0,1-3 => [0, 1-3]
+ranges = rstr.split(",")
+valset: T.List[int] = []
+for r in ranges:
+# 1-3 => [1, 2, 3]
+if "-" in r:
+start, end = r.split("-")
+valset.extend(range(int(start), int(end) + 1))
 else:
-output += " " * (max_core_map_len + 1)
-print(output)
+valset.append(int(r))
+return valset
+
+
+def read_sysfs(path: str) -> str:
+"""Read a sysfs file and return its contents."""
+with open(path, encoding="utf-8") as fd:
+return fd.read().strip()
+
+
+def print_row(row: T.Tuple[str, ...], col_widths: T.List[int]) -> None:
+"""Print a row of a table with the given column widths."""
+first, *rest = row
+w_first, *w_rest = col_widths
+first_end = " " * 4
+rest_end = " " * 10
+
+print(first.ljust(w_first), end=first_end)
+for cell, width in zip(rest, w_rest):
+print(cell.rjust(width), end=rest_end)
+print()
+
+
+def print_section(heading: str) -> None:
+"""Print a section heading."""
+sep = "=" * len(heading)
+print(sep)
+print(heading)
+print(sep)
+print()
+
+
+def main() -> None:
+"""Print CPU topology information."""
+sockets_s: T.Set[int] = set()
+cores_s: T.Set[int] = set()
+core_map: T.Dict[T.Tuple[int, int], T.List[int]] = {}
+base_path = "/sys/devices/system/cpu"
+
+cpus = range_expand(read_sysfs(f"{base_path}/online"))
+
+for cpu in cpus:
+lcore_base = f"{base_path}/cpu{cpu}"
+core = int(read_sysfs(f"{lcore_base}/topology/core_id"))
+socket = int(read_sysfs(f"{lcore_base}/topology/physical_package_id"))
+
+cores_s.add(core)
+sockets_s.add(socket)
+key = (socket, core)
+core_map.setdefault(key, [])
+core_map[key].append(cpu)
+
+cores = sorted(cores_s)
+sockets = sorted(sockets_s)
+
+print_section(
+f"Core and Socket Information (as reported by '{base_path}')"
+)
+
+print("cores = ", cores)
+print("sockets = ", sockets)
+print()
+
+# Core, [Socket, Socket, ...]
+heading_strs = "", *[f"Socket {s}" for s in sockets]
+sep_strs = tuple(

Re: [PATCH v4 3/4] usertools/dpdk-hugepages.py: update coding style

2024-08-21 Thread Robin Jarry

Anatoly Burakov, Aug 21, 2024 at 11:22:

Update coding style:

- Make the code PEP-484 compliant
- Add more comments, improve readability, use f-strings everywhere
- Address all Python static analysis (e.g. mypy, pylint) warnings
- Format code with Ruff
- Improve error handling
- Refactor printing and sysfs/procfs access functions
- Sort output by NUMA node

Signed-off-by: Anatoly Burakov 
Acked-by: Stephen Hemminger 
---

Notes:
v3 -> v4:
  - Format code with Ruff, line width 79 to avoid flake8 warnings
(Flake8 is by default configured with line width 79 on my system)


Please keep the default ruff/black settings. And when formatting with 
these tools, flake8 is mostly useless.


If you want to check your code for defects, you are probably best with 
`ruff check` which combines the features of multiple python linters and 
runs much faster.




v2 -> v3:
  - Rewrite of the script as suggested by reviewers
v1 -> v2:
  - Added commit that sorted output by NUMA node




Re: [PATCH v4 3/4] usertools/dpdk-hugepages.py: update coding style

2024-08-21 Thread Burakov, Anatoly

On 8/21/2024 11:26 AM, Robin Jarry wrote:

Anatoly Burakov, Aug 21, 2024 at 11:22:

Update coding style:

- Make the code PEP-484 compliant
- Add more comments, improve readability, use f-strings everywhere
- Address all Python static analysis (e.g. mypy, pylint) warnings
- Format code with Ruff
- Improve error handling
- Refactor printing and sysfs/procfs access functions
- Sort output by NUMA node

Signed-off-by: Anatoly Burakov 
Acked-by: Stephen Hemminger 
---

Notes:
    v3 -> v4:
  - Format code with Ruff, line width 79 to avoid flake8 warnings
    (Flake8 is by default configured with line width 79 on my system)


Please keep the default ruff/black settings. And when formatting with 
these tools, flake8 is mostly useless.


If you want to check your code for defects, you are probably best with 
`ruff check` which combines the features of multiple python linters and 
runs much faster.


OK, I'll reformat with default settings then! v5 incoming





    v2 -> v3:
  - Rewrite of the script as suggested by reviewers
    v1 -> v2:
  - Added commit that sorted output by NUMA node




--
Thanks,
Anatoly



[PATCH v5 1/4] usertools/cpu_layout: update coding style

2024-08-21 Thread Anatoly Burakov
Update coding style:

- make it PEP-484 compliant
- format code with Ruff
- address all mypy etc. warnings
- use f-strings in place of old-style string interpolation
- refactor printing to make the code more readable
- read valid CPU ID's from "online" sysfs node

Signed-off-by: Anatoly Burakov 
---

Notes:
v4-v5:
- Format with Ruff on default settings

v3->v4:
- Format with Ruff, line width 79

v1,v2 -> v3:
- Import typing as T instead of individual types

 usertools/cpu_layout.py | 161 ++--
 1 file changed, 106 insertions(+), 55 deletions(-)

diff --git a/usertools/cpu_layout.py b/usertools/cpu_layout.py
index 891b9238fa..e133fb8ad3 100755
--- a/usertools/cpu_layout.py
+++ b/usertools/cpu_layout.py
@@ -3,62 +3,113 @@
 # Copyright(c) 2010-2014 Intel Corporation
 # Copyright(c) 2017 Cavium, Inc. All rights reserved.
 
-sockets = []
-cores = []
-core_map = {}
-base_path = "/sys/devices/system/cpu"
-fd = open("{}/kernel_max".format(base_path))
-max_cpus = int(fd.read())
-fd.close()
-for cpu in range(max_cpus + 1):
-try:
-fd = open("{}/cpu{}/topology/core_id".format(base_path, cpu))
-except IOError:
-continue
-core = int(fd.read())
-fd.close()
-fd = open("{}/cpu{}/topology/physical_package_id".format(base_path, cpu))
-socket = int(fd.read())
-fd.close()
-if core not in cores:
-cores.append(core)
-if socket not in sockets:
-sockets.append(socket)
-key = (socket, core)
-if key not in core_map:
-core_map[key] = []
-core_map[key].append(cpu)
+"""Display CPU topology information."""
 
-print(format("=" * (47 + len(base_path
-print("Core and Socket Information (as reported by '{}')".format(base_path))
-print("{}\n".format("=" * (47 + len(base_path
-print("cores = ", cores)
-print("sockets = ", sockets)
-print("")
+import typing as T
 
-max_processor_len = len(str(len(cores) * len(sockets) * 2 - 1))
-max_thread_count = len(list(core_map.values())[0])
-max_core_map_len = (max_processor_len * max_thread_count)  \
-  + len(", ") * (max_thread_count - 1) \
-  + len('[]') + len('Socket ')
-max_core_id_len = len(str(max(cores)))
 
-output = " ".ljust(max_core_id_len + len('Core '))
-for s in sockets:
-output += " Socket %s" % str(s).ljust(max_core_map_len - len('Socket '))
-print(output)
-
-output = " ".ljust(max_core_id_len + len('Core '))
-for s in sockets:
-output += " ".ljust(max_core_map_len)
-output += " "
-print(output)
-
-for c in cores:
-output = "Core %s" % str(c).ljust(max_core_id_len)
-for s in sockets:
-if (s, c) in core_map:
-output += " " + str(core_map[(s, c)]).ljust(max_core_map_len)
+def range_expand(rstr: str) -> T.List[int]:
+"""Expand a range string into a list of integers."""
+# 0,1-3 => [0, 1-3]
+ranges = rstr.split(",")
+valset: T.List[int] = []
+for r in ranges:
+# 1-3 => [1, 2, 3]
+if "-" in r:
+start, end = r.split("-")
+valset.extend(range(int(start), int(end) + 1))
 else:
-output += " " * (max_core_map_len + 1)
-print(output)
+valset.append(int(r))
+return valset
+
+
+def read_sysfs(path: str) -> str:
+"""Read a sysfs file and return its contents."""
+with open(path, encoding="utf-8") as fd:
+return fd.read().strip()
+
+
+def print_row(row: T.Tuple[str, ...], col_widths: T.List[int]) -> None:
+"""Print a row of a table with the given column widths."""
+first, *rest = row
+w_first, *w_rest = col_widths
+first_end = " " * 4
+rest_end = " " * 10
+
+print(first.ljust(w_first), end=first_end)
+for cell, width in zip(rest, w_rest):
+print(cell.rjust(width), end=rest_end)
+print()
+
+
+def print_section(heading: str) -> None:
+"""Print a section heading."""
+sep = "=" * len(heading)
+print(sep)
+print(heading)
+print(sep)
+print()
+
+
+def main() -> None:
+"""Print CPU topology information."""
+sockets_s: T.Set[int] = set()
+cores_s: T.Set[int] = set()
+core_map: T.Dict[T.Tuple[int, int], T.List[int]] = {}
+base_path = "/sys/devices/system/cpu"
+
+cpus = range_expand(read_sysfs(f"{base_path}/online"))
+
+for cpu in cpus:
+lcore_base = f"{base_path}/cpu{cpu}"
+core = int(read_sysfs(f"{lcore_base}/topology/core_id"))
+socket = int(read_sysfs(f"{lcore_base}/topology/physical_package_id"))
+
+cores_s.add(core)
+sockets_s.add(socket)
+key = (socket, core)
+core_map.setdefault(key, [])
+core_map[key].append(cpu)
+
+cores = sorted(cores_s)
+sockets = sorted(sockets_s)
+
+print_section(f"Core and Socket Information (as reported by 
'{base_path}')")
+
+print("cores = ", cores)
+print("sockets = ", sockets)
+print()
+
+# Core, [Socket, Socket, ...]
+heading_strs = 

[PATCH v5 2/4] usertools/cpu_layout: print out NUMA nodes

2024-08-21 Thread Anatoly Burakov
In traditional NUMA case, NUMA nodes and physical sockets were used
interchangeably, but there are cases where there can be multiple NUMA
nodes per socket, as well as all CPU's being assigned NUMA node 0 even in
cases of multiple sockets. Use sysfs to print out NUMA information.

Signed-off-by: Anatoly Burakov 
---

Notes:
v2 -> v3:
- Sort imports alphabetically

 usertools/cpu_layout.py | 36 +++-
 1 file changed, 31 insertions(+), 5 deletions(-)

diff --git a/usertools/cpu_layout.py b/usertools/cpu_layout.py
index e133fb8ad3..976be1f8b2 100755
--- a/usertools/cpu_layout.py
+++ b/usertools/cpu_layout.py
@@ -5,6 +5,7 @@
 
 """Display CPU topology information."""
 
+import glob
 import typing as T
 
 
@@ -29,12 +30,21 @@ def read_sysfs(path: str) -> str:
 return fd.read().strip()
 
 
+def read_numa_node(base: str) -> int:
+"""Read the NUMA node of a CPU."""
+node_glob = f"{base}/node*"
+node_dirs = glob.glob(node_glob)
+if not node_dirs:
+return 0  # default to node 0
+return int(node_dirs[0].split("node")[1])
+
+
 def print_row(row: T.Tuple[str, ...], col_widths: T.List[int]) -> None:
 """Print a row of a table with the given column widths."""
 first, *rest = row
 w_first, *w_rest = col_widths
 first_end = " " * 4
-rest_end = " " * 10
+rest_end = " " * 4
 
 print(first.ljust(w_first), end=first_end)
 for cell, width in zip(rest, w_rest):
@@ -56,6 +66,7 @@ def main() -> None:
 sockets_s: T.Set[int] = set()
 cores_s: T.Set[int] = set()
 core_map: T.Dict[T.Tuple[int, int], T.List[int]] = {}
+numa_map: T.Dict[int, int] = {}
 base_path = "/sys/devices/system/cpu"
 
 cpus = range_expand(read_sysfs(f"{base_path}/online"))
@@ -64,12 +75,14 @@ def main() -> None:
 lcore_base = f"{base_path}/cpu{cpu}"
 core = int(read_sysfs(f"{lcore_base}/topology/core_id"))
 socket = int(read_sysfs(f"{lcore_base}/topology/physical_package_id"))
+node = read_numa_node(lcore_base)
 
 cores_s.add(core)
 sockets_s.add(socket)
 key = (socket, core)
 core_map.setdefault(key, [])
 core_map[key].append(cpu)
+numa_map[cpu] = node
 
 cores = sorted(cores_s)
 sockets = sorted(sockets_s)
@@ -78,24 +91,37 @@ def main() -> None:
 
 print("cores = ", cores)
 print("sockets = ", sockets)
+print("numa = ", sorted(set(numa_map.values(
 print()
 
-# Core, [Socket, Socket, ...]
-heading_strs = "", *[f"Socket {s}" for s in sockets]
+# Core, [NUMA, Socket, NUMA, Socket, ...]
+heading_strs = "", *[v for s in sockets for v in ("", f"Socket {s}")]
 sep_strs = tuple("-" * len(hstr) for hstr in heading_strs)
 rows: T.List[T.Tuple[str, ...]] = []
 
+prev_numa = None
 for c in cores:
 # Core,
 row: T.Tuple[str, ...] = (f"Core {c}",)
 
-# [lcores, lcores, ...]
+# assume NUMA changes symmetrically
+first_lcore = core_map[(0, c)][0]
+cur_numa = numa_map[first_lcore]
+numa_changed = prev_numa != cur_numa
+prev_numa = cur_numa
+
+# [NUMA, lcores, NUMA, lcores, ...]
 for s in sockets:
 try:
 lcores = core_map[(s, c)]
+numa = numa_map[lcores[0]]
+if numa_changed:
+row += (f"NUMA {numa}",)
+else:
+row += ("",)
 row += (str(lcores),)
 except KeyError:
-row += ("",)
+row += ("", "")
 rows += [row]
 
 # find max widths for each column, including header and rows
-- 
2.43.5



[PATCH v5 3/4] usertools/dpdk-hugepages.py: update coding style

2024-08-21 Thread Anatoly Burakov
Update coding style:

- make the code PEP-484 compliant
- add more comments, improve readability, use f-strings everywhere
- address all Python static analysis (e.g. mypy, pylint) warnings
- format code with Ruff
- improve error handling
- refactor printing and sysfs/procfs access functions
- sort huge page reservation status output by NUMA node

Signed-off-by: Anatoly Burakov 
Acked-by: Stephen Hemminger 
---

Notes:
v4 -> v5:
- Format with Ruff on default settings
- Replaced all instances of raw path strings with os.path.join
v3 -> v4:
- Format code with Ruff, line width 79 to avoid flake8 warnings
  (Flake8 is by default configured with line width 79 on my system)
v2 -> v3:
- Rewrite of the script as suggested by reviewers
v1 -> v2:
- Added commit that sorted output by NUMA node

 usertools/dpdk-hugepages.py | 518 +---
 1 file changed, 310 insertions(+), 208 deletions(-)

diff --git a/usertools/dpdk-hugepages.py b/usertools/dpdk-hugepages.py
index bf2575ba36..3fc3269c83 100755
--- a/usertools/dpdk-hugepages.py
+++ b/usertools/dpdk-hugepages.py
@@ -1,13 +1,15 @@
 #! /usr/bin/env python3
 # SPDX-License-Identifier: BSD-3-Clause
 # Copyright (c) 2020 Microsoft Corporation
+
 """Script to query and setup huge pages for DPDK applications."""
 
 import argparse
-import glob
 import os
 import re
+import subprocess
 import sys
+import typing as T
 from math import log2
 
 # Standard binary prefix
@@ -15,194 +17,266 @@
 
 # systemd mount point for huge pages
 HUGE_MOUNT = "/dev/hugepages"
+# default directory for non-NUMA huge pages
+NO_NUMA_HUGE_DIR = "/sys/kernel/mm/hugepages"
+# default base directory for NUMA nodes
+NUMA_NODE_BASE_DIR = "/sys/devices/system/node"
+# procfs paths
+MEMINFO_PATH = "/proc/meminfo"
+MOUNTS_PATH = "/proc/mounts"
 
 
-def fmt_memsize(kb):
-'''Format memory size in kB into conventional format'''
+class HugepageMount:
+"""Mount operations for huge page filesystem."""
+
+def __init__(self, path: str, mounted: bool):
+self.path = path
+# current mount status
+self.mounted = mounted
+
+def mount(
+self, pagesize_kb: int, user: T.Optional[str], group: T.Optional[str]
+) -> None:
+"""Mount the huge TLB file system"""
+if self.mounted:
+return
+cmd = ["mount", "-t", "hugetlbfs"]
+cmd += ["-o", f"pagesize={pagesize_kb * 1024}"]
+if user is not None:
+cmd += ["-o", f"uid={user}"]
+if group is not None:
+cmd += ["-o", f"gid={group}"]
+cmd += ["nodev", self.path]
+
+subprocess.run(cmd, check=True)
+self.mounted = True
+
+def unmount(self) -> None:
+"""Unmount the huge TLB file system (if mounted)"""
+if self.mounted:
+subprocess.run(["umount", self.path], check=True)
+self.mounted = False
+
+
+class HugepageRes:
+"""Huge page reserve operations. Can be NUMA-node-specific."""
+
+def __init__(self, path: str, node: T.Optional[int] = None):
+self.path = path
+# if this is a per-NUMA node huge page dir, store the node number
+self.node = node
+self.valid_page_sizes = self._get_valid_page_sizes()
+
+def _get_valid_page_sizes(self) -> T.List[int]:
+"""Extract valid huge page sizes"""
+return [get_memsize(d.split("-")[1]) for d in os.listdir(self.path)]
+
+def _nr_pages_path(self, sz: int) -> str:
+if sz not in self.valid_page_sizes:
+raise ValueError(
+f"Invalid page size {sz}. " f"Valid sizes: 
{self.valid_page_sizes}"
+)
+return os.path.join(self.path, f"hugepages-{sz}kB", "nr_hugepages")
+
+def __getitem__(self, sz: int) -> int:
+"""Get current number of reserved pages of specified size"""
+with open(self._nr_pages_path(sz), encoding="utf-8") as f:
+return int(f.read())
+
+def __setitem__(self, sz: int, nr_pages: int) -> None:
+"""Set number of reserved pages of specified size"""
+with open(self._nr_pages_path(sz), "w", encoding="utf-8") as f:
+f.write(f"{nr_pages}\n")
+
+
+def fmt_memsize(kb: int) -> str:
+"""Format memory size in kB into conventional format"""
 logk = int(log2(kb) / 10)
 suffix = BINARY_PREFIX[logk]
-unit = 2**(logk * 10)
-return '{}{}b'.format(int(kb / unit), suffix)
+unit = 2 ** (logk * 10)
+return f"{int(kb / unit)}{suffix}b"
 
 
-def get_memsize(arg):
-'''Convert memory size with suffix to kB'''
-match = re.match(r'(\d+)([' + BINARY_PREFIX + r']?)$', arg.upper())
+def get_memsize(arg: str) -> int:
+"""Convert memory size with suffix to kB"""
+# arg may have a 'b' at the end
+if arg[-1].lower() == "b":
+arg = arg[:-1]
+match = re.match(rf"(\d+)([{BINARY_PREFIX}]?)$", arg.upper())
 if match is None:
-sys.exit('{} is not a valid size'.format(arg))
+ 

[PATCH v5 4/4] usertools/dpdk-devbind: print NUMA node

2024-08-21 Thread Anatoly Burakov
Currently, devbind does not print out any NUMA information, which makes
figuring out which NUMA node device belongs to not trivial. Add printouts
for NUMA information if NUMA support is enabled on the system.

Signed-off-by: Anatoly Burakov 
Acked-by: Robin Jarry 
---

Notes:
v1 -> v2:
- Added commit to print out NUMA information in devbind

 usertools/dpdk-devbind.py | 29 +
 1 file changed, 21 insertions(+), 8 deletions(-)

diff --git a/usertools/dpdk-devbind.py b/usertools/dpdk-devbind.py
index b276e8efc8..078e8c387b 100755
--- a/usertools/dpdk-devbind.py
+++ b/usertools/dpdk-devbind.py
@@ -110,6 +110,11 @@
 args = []
 
 
+# check if this system has NUMA support
+def is_numa():
+return os.path.exists('/sys/devices/system/node')
+
+
 # check if a specific kernel module is loaded
 def module_is_loaded(module):
 global loaded_modules
@@ -577,20 +582,28 @@ def show_device_status(devices_type, device_name, 
if_field=False):
 print("".join('=' * len(msg)))
 return
 
+print_numa = is_numa()
+
 # print each category separately, so we can clearly see what's used by DPDK
 if dpdk_drv:
+extra_param = "drv=%(Driver_str)s unused=%(Module_str)s"
+if print_numa:
+extra_param = "numa_node=%(NUMANode)s " + extra_param
 display_devices("%s devices using DPDK-compatible driver" % 
device_name,
-dpdk_drv, "drv=%(Driver_str)s unused=%(Module_str)s")
+dpdk_drv, extra_param)
 if kernel_drv:
-if_text = ""
+extra_param = "drv=%(Driver_str)s unused=%(Module_str)s"
 if if_field:
-if_text = "if=%(Interface)s "
-display_devices("%s devices using kernel driver" % device_name, 
kernel_drv,
-if_text + "drv=%(Driver_str)s "
-"unused=%(Module_str)s %(Active)s")
+extra_param = "if=%(Interface)s " + extra_param
+if print_numa:
+extra_param = "numa_node=%(NUMANode)s " + extra_param
+display_devices("%s devices using kernel driver" % device_name,
+kernel_drv, extra_param)
 if no_drv:
-display_devices("Other %s devices" % device_name, no_drv,
-"unused=%(Module_str)s")
+extra_param = "unused=%(Module_str)s"
+if print_numa:
+extra_param = "numa_node=%(NUMANode)s " + extra_param
+display_devices("Other %s devices" % device_name, no_drv, extra_param)
 
 
 def show_status():
-- 
2.43.5



Re: [PATCH v5 1/4] usertools/cpu_layout: update coding style

2024-08-21 Thread Robin Jarry

Anatoly Burakov, Aug 21, 2024 at 11:44:

Update coding style:

- make it PEP-484 compliant
- format code with Ruff
- address all mypy etc. warnings
- use f-strings in place of old-style string interpolation
- refactor printing to make the code more readable
- read valid CPU ID's from "online" sysfs node

Signed-off-by: Anatoly Burakov 
---

Notes:
v4-v5:
- Format with Ruff on default settings

v3->v4:

- Format with Ruff, line width 79

v1,v2 -> v3:

- Import typing as T instead of individual types

 usertools/cpu_layout.py | 161 ++--
 1 file changed, 106 insertions(+), 55 deletions(-)


Acked-by: Robin Jarry 

Thanks!



[DPDK/core Bug 1525] dpdk mempool creation is resulting into lot of memory getting wasted

2024-08-21 Thread bugzilla
https://bugs.dpdk.org/show_bug.cgi?id=1525

Bug ID: 1525
   Summary: dpdk mempool creation is resulting into lot of memory
getting wasted
   Product: DPDK
   Version: 20.11
  Hardware: x86
OS: Linux
Status: UNCONFIRMED
  Severity: major
  Priority: Normal
 Component: core
  Assignee: dev@dpdk.org
  Reporter: sandipchhik...@gmail.com
  Target Milestone: ---

We are using rte_mempool_op_populate_helper for creating dpdk mempool.  Memory
is allocated from 1GB huge page and passed to this function. It is observed
that rte_mempool_op_populate_helper  function is calculating page size as 4K
and as MEMPOOL_F_NO_IOVA_CONTIG is not specified, so it don't place objects
across pages resulting into lot of memory wastage and creation of pool which is
lesser in size then required.
rte_mempool_op_calc_mem_size_default is used to calculate how much memory is
required for the pool. 

If it is user application, which is working on virtual memory (No driver
accessing physical memory directly), whether it would be fine to enable flag
MEMPOOL_F_NO_IOVA_CONTIG. 

We have object of 712 bytes, (final memory for each object is becoming 812
bytes after accounting header and trailer etc).
We are creating 250 objects in pool, allocating 2 GB (1 GB huge pages)
memory for the same, but after placing 4 objects in one page, it starts using
next page as it is wrongly finding that page size is 4K.

-- 
You are receiving this mail because:
You are the assignee for the bug.

Re: [PATCH v5 3/4] usertools/dpdk-hugepages.py: update coding style

2024-08-21 Thread Robin Jarry

Anatoly Burakov, Aug 21, 2024 at 11:44:

Update coding style:

- make the code PEP-484 compliant
- add more comments, improve readability, use f-strings everywhere
- address all Python static analysis (e.g. mypy, pylint) warnings
- format code with Ruff
- improve error handling
- refactor printing and sysfs/procfs access functions
- sort huge page reservation status output by NUMA node

Signed-off-by: Anatoly Burakov 
Acked-by: Stephen Hemminger 
---

Notes:
v4 -> v5:
- Format with Ruff on default settings
- Replaced all instances of raw path strings with os.path.join
v3 -> v4:
- Format code with Ruff, line width 79 to avoid flake8 warnings
  (Flake8 is by default configured with line width 79 on my system)
v2 -> v3:
- Rewrite of the script as suggested by reviewers
v1 -> v2:
- Added commit that sorted output by NUMA node

 usertools/dpdk-hugepages.py | 518 +---
 1 file changed, 310 insertions(+), 208 deletions(-)


Acked-by: Robin Jarry 

Thanks!



Re: [PATCH v5 2/4] usertools/cpu_layout: print out NUMA nodes

2024-08-21 Thread Robin Jarry

Anatoly Burakov, Aug 21, 2024 at 11:44:

In traditional NUMA case, NUMA nodes and physical sockets were used
interchangeably, but there are cases where there can be multiple NUMA
nodes per socket, as well as all CPU's being assigned NUMA node 0 even in
cases of multiple sockets. Use sysfs to print out NUMA information.

Signed-off-by: Anatoly Burakov 
---

Notes:
v2 -> v3:
- Sort imports alphabetically

 usertools/cpu_layout.py | 36 +++-
 1 file changed, 31 insertions(+), 5 deletions(-)


Acked-by: Robin Jarry 

Thanks!



Re: [PATCH v5 4/4] usertools/dpdk-devbind: print NUMA node

2024-08-21 Thread Robin Jarry

Anatoly Burakov, Aug 21, 2024 at 11:44:

Currently, devbind does not print out any NUMA information, which makes
figuring out which NUMA node device belongs to not trivial. Add printouts
for NUMA information if NUMA support is enabled on the system.

Signed-off-by: Anatoly Burakov 
Acked-by: Robin Jarry 
---

Notes:
v1 -> v2:
- Added commit to print out NUMA information in devbind

 usertools/dpdk-devbind.py | 29 +
 1 file changed, 21 insertions(+), 8 deletions(-)


Acked-by: Robin Jarry 

Thanks!



Re: [dpdk-stable] [PATCH v3 2/2] devtools: fix patches missing if range newer than HEAD

2024-08-21 Thread Xueming Li
Not really needed, check out the main branch can avoid such issue. Thanks for 
looking into this.

From: Stephen Hemminger 
Sent: Friday, August 16, 2024 12:32 AM
To: NBU-Contact-Thomas Monjalon (EXTERNAL) 
Cc: Christian Ehrhardt ; bl...@debian.org 
; ktray...@redhat.com ; sta...@dpdk.org 
; dev@dpdk.org ; Xueming Li 
Subject: Re: [dpdk-stable] [PATCH v3 2/2] devtools: fix patches missing if 
range newer than HEAD

On Sat, 26 Nov 2022 22:44:26 +0100
Thomas Monjalon  wrote:

> Someone to help with review of this patch please?
> Is there a real need?
>
>

The patch looks ok, but no longer applies, needs to be rebased.
Xueming if you are still needing this resubmit please.
>



[PATCH v2 0/3] app/testpmd: improve sse based macswap

2024-08-21 Thread Vipin Varghese
Goal of the patch series is to improve SSE macswap on x86_64 by
reducing the stalls in backend engine. Original implementation of
the SSE-mac-swap makes loop call to multiple load, shuffle & store.

Using SIMD ISA interleaving, register variable and reducing L1 & L2
cache eviction, we can reduce the stalls for
 - load SSE token exhaustion
 - Shuffle and Load dependency

Build test using meson script:
``
build-gcc-static
buildtools
build-gcc-shared
build-mini
build-clang-static
build-clang-shared
build-x86-generic

Test Results:
`

Platform-1: AMD EPYC SIENA 8594P @2.3GHz, no boost
Platform-2: AMD EPYC 9554 @3.1GHz, no boost

NIC:
 1) mellanox CX-7 1*200Gbps
 2) intel E810 1*100Gbps
 3) intel E810 2*200Gbps (2CQ-DA2) - loopback
 4) braodcom P2100 2*100Gbps - loopback


TEST IO 64B: baseline 
 - NIC-1: 42.0
 - NIC-2: 82.0
 - NIC-3: 82.45
 - NIC-3: 47.03

TEST MACSWAP 64B: 
 - NIC-1: 31.533 : 31.90
 - NIC-2: 48.0   : 48.9 
 - NIC-3: 48.840 : 49.827
 - NIC-4: 44.3   : 45.5

TEST MACSWAP 128B: 
 - NIC-1: 30.946 : 31.770
 - NIC-2: 47.4   : 48.3
 - NIC-3: 47.979 : 48.503
 - NIC-4: 41.53  : 44.59

TEST MACSWAP 256B: 
 - NIC-1: 32.480 : 33.150
 - NIC-2: 45.29  : 45.571
 - NIC-3: 45.033 : 45.117
 - NIC-4: 36.49  : 37.5




TEST IO 64B: baseline 
 - intel E810 2*200Gbps (2CQ-DA2): 82.49


TEST MACSWAP: 1Q 1C1T
 64B: : 45.0 : 45.54
128B: : 44.48 : 44.43
256B: : 42.0 : 41.99
+
TEST MACSWAP: 2Q 2C2T
 64B: : 59.5 : 60.55
128B: : 56.78 : 58.1
256B: : 41.85 : 41.99


Signed-off-by: Vipin Varghese 

Vipin Varghese (3):
  app/testpmd: add register keyword
  app/testpmd: move offload update
  app/testpmd: interleave SSE SIMD

 app/test-pmd/macswap_sse.h | 27 ++-
 1 file changed, 14 insertions(+), 13 deletions(-)

-- 
2.34.1



[PATCH v2 1/3] app/testpmd: add register keyword

2024-08-21 Thread Vipin Varghese
Currently SSE SIMD variables are declared as stack variables. Allowing
the use of keyword register for shuffle mask and address variables,
improves the mac-swap Mpps by 1 for single queue.

Test Result:
 * Platform: AMD EPYC 9554 @3.1GHz, no boost
 * Test scenarios: TEST-PMD 64B IO vs MAC-SWAP
 * NIC: broadcom P2100: loopback 2*100Gbps



 - IO: 47.23 : 46.0
 - MAC-SWAP original: 45.75 : 43.8
 - MAC-SWAP register mod 45.73 : 44.83

Signed-off-by: Vipin Varghese 
---
 app/test-pmd/macswap_sse.h | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/app/test-pmd/macswap_sse.h b/app/test-pmd/macswap_sse.h
index 223f87a539..29088843b7 100644
--- a/app/test-pmd/macswap_sse.h
+++ b/app/test-pmd/macswap_sse.h
@@ -16,13 +16,13 @@ do_macswap(struct rte_mbuf *pkts[], uint16_t nb,
uint64_t ol_flags;
int i;
int r;
-   __m128i addr0, addr1, addr2, addr3;
+   register __m128i addr0, addr1, addr2, addr3;
/**
 * shuffle mask be used to shuffle the 16 bytes.
 * byte 0-5 wills be swapped with byte 6-11.
 * byte 12-15 will keep unchanged.
 */
-   __m128i shfl_msk = _mm_set_epi8(15, 14, 13, 12,
+   register const __m128i shfl_msk = _mm_set_epi8(15, 14, 13, 12,
5, 4, 3, 2,
1, 0, 11, 10,
9, 8, 7, 6);
-- 
2.34.1



[PATCH v2 3/3] app/testpmd: interleave SSE SIMD

2024-08-21 Thread Vipin Varghese
Interleaving SSE SIMD load, shuffle, and store, helps to
improve the overall mac-swapp Mpps for both RX and TX.

Test Result:
 * Platform: AMD EPYC 9554 @3.1GHz, no boost
 * Test scenarios: TEST-PMD 64B IO vs MAC-SWAP
 * NIC: broadcom P2100: loopback 2*100Gbps

 
 
  - MAC-SWAP original: 45.75 : 43.8
  - MAC-SWAP register mod: 45.73 : 44.83
  - MAC-SWAP register+ofl mod: 46.36 : 44.79
  - MAC-SWAP register+ofl+interleave mod: 46.0 : 45.1

Signed-off-by: Vipin Varghese 
---
 app/test-pmd/macswap_sse.h | 12 +++-
 1 file changed, 7 insertions(+), 5 deletions(-)

diff --git a/app/test-pmd/macswap_sse.h b/app/test-pmd/macswap_sse.h
index 67ff7fdfbb..1f547388b7 100644
--- a/app/test-pmd/macswap_sse.h
+++ b/app/test-pmd/macswap_sse.h
@@ -52,23 +52,25 @@ do_macswap(struct rte_mbuf *pkts[], uint16_t nb,
addr1 = _mm_loadu_si128((__m128i *)eth_hdr[1]);
mbuf_field_set(mb[1], ol_flags);
 
+   addr0 = _mm_shuffle_epi8(addr0, shfl_msk);
+
mb[2] = pkts[i++];
eth_hdr[2] = rte_pktmbuf_mtod(mb[2], struct rte_ether_hdr *);
addr2 = _mm_loadu_si128((__m128i *)eth_hdr[2]);
mbuf_field_set(mb[2], ol_flags);
 
+   addr1 = _mm_shuffle_epi8(addr1, shfl_msk);
+   _mm_storeu_si128((__m128i *)eth_hdr[0], addr0);
+
mb[3] = pkts[i++];
eth_hdr[3] = rte_pktmbuf_mtod(mb[3], struct rte_ether_hdr *);
addr3 = _mm_loadu_si128((__m128i *)eth_hdr[3]);
mbuf_field_set(mb[3], ol_flags);
 
-   addr0 = _mm_shuffle_epi8(addr0, shfl_msk);
-   addr1 = _mm_shuffle_epi8(addr1, shfl_msk);
addr2 = _mm_shuffle_epi8(addr2, shfl_msk);
-   addr3 = _mm_shuffle_epi8(addr3, shfl_msk);
-
-   _mm_storeu_si128((__m128i *)eth_hdr[0], addr0);
_mm_storeu_si128((__m128i *)eth_hdr[1], addr1);
+
+   addr3 = _mm_shuffle_epi8(addr3, shfl_msk);
_mm_storeu_si128((__m128i *)eth_hdr[2], addr2);
_mm_storeu_si128((__m128i *)eth_hdr[3], addr3);
 
-- 
2.34.1



[PATCH v2 2/3] app/testpmd: move offload update

2024-08-21 Thread Vipin Varghese
Moving the offload flag update from end to start of the loop,
helps to reduce L1 or L2 cache evictions and amortize shuffle.
This helps to improve RX packet in mac-swap processing.

Test Result:
 * Platform: AMD EPYC 9554 @3.1GHz, no boost
 * Test scenarios: TEST-PMD 64B IO vs MAC-SWAP
 * NIC: broadcom P2100: loopback 2*100Gbps

 
 
  - MAC-SWAP original: 45.75 : 43.8
  - MAC-SWAP register mod: 45.73 : 44.83
  - MAC-SWAP register+ofl modified: 46.36 : 44.79

Signed-off-by: Vipin Varghese 
---
 app/test-pmd/macswap_sse.h | 13 ++---
 1 file changed, 6 insertions(+), 7 deletions(-)

diff --git a/app/test-pmd/macswap_sse.h b/app/test-pmd/macswap_sse.h
index 29088843b7..67ff7fdfbb 100644
--- a/app/test-pmd/macswap_sse.h
+++ b/app/test-pmd/macswap_sse.h
@@ -45,19 +45,22 @@ do_macswap(struct rte_mbuf *pkts[], uint16_t nb,
mb[0] = pkts[i++];
eth_hdr[0] = rte_pktmbuf_mtod(mb[0], struct rte_ether_hdr *);
addr0 = _mm_loadu_si128((__m128i *)eth_hdr[0]);
+   mbuf_field_set(mb[0], ol_flags);
 
mb[1] = pkts[i++];
eth_hdr[1] = rte_pktmbuf_mtod(mb[1], struct rte_ether_hdr *);
addr1 = _mm_loadu_si128((__m128i *)eth_hdr[1]);
-
+   mbuf_field_set(mb[1], ol_flags);
 
mb[2] = pkts[i++];
eth_hdr[2] = rte_pktmbuf_mtod(mb[2], struct rte_ether_hdr *);
addr2 = _mm_loadu_si128((__m128i *)eth_hdr[2]);
+   mbuf_field_set(mb[2], ol_flags);
 
mb[3] = pkts[i++];
eth_hdr[3] = rte_pktmbuf_mtod(mb[3], struct rte_ether_hdr *);
addr3 = _mm_loadu_si128((__m128i *)eth_hdr[3]);
+   mbuf_field_set(mb[3], ol_flags);
 
addr0 = _mm_shuffle_epi8(addr0, shfl_msk);
addr1 = _mm_shuffle_epi8(addr1, shfl_msk);
@@ -69,10 +72,6 @@ do_macswap(struct rte_mbuf *pkts[], uint16_t nb,
_mm_storeu_si128((__m128i *)eth_hdr[2], addr2);
_mm_storeu_si128((__m128i *)eth_hdr[3], addr3);
 
-   mbuf_field_set(mb[0], ol_flags);
-   mbuf_field_set(mb[1], ol_flags);
-   mbuf_field_set(mb[2], ol_flags);
-   mbuf_field_set(mb[3], ol_flags);
r -= 4;
}
 
@@ -84,10 +83,10 @@ do_macswap(struct rte_mbuf *pkts[], uint16_t nb,
 
/* Swap dest and src mac addresses. */
addr0 = _mm_loadu_si128((__m128i *)eth_hdr[0]);
+   mbuf_field_set(mb[0], ol_flags);
+
addr0 = _mm_shuffle_epi8(addr0, shfl_msk);
_mm_storeu_si128((__m128i *)eth_hdr[0], addr0);
-
-   mbuf_field_set(mb[0], ol_flags);
}
 }
 
-- 
2.34.1



[PATCH v3 01/12] dts: fix default device error handling mode

2024-08-21 Thread Juraj Linkeš
The device_error_handling_mode of testpmd port may not be present, e.g.
in VM ports.

Fixes: 61d5bc9bf974 ("dts: add port info command to testpmd shell")

Signed-off-by: Juraj Linkeš 
---
 dts/framework/remote_session/testpmd_shell.py | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/dts/framework/remote_session/testpmd_shell.py 
b/dts/framework/remote_session/testpmd_shell.py
index a5347b07dc..b4ad253020 100644
--- a/dts/framework/remote_session/testpmd_shell.py
+++ b/dts/framework/remote_session/testpmd_shell.py
@@ -465,8 +465,8 @@ class TestPmdPort(TextParser):
 metadata=DeviceCapabilitiesFlag.make_parser(),
 )
 #:
-device_error_handling_mode: DeviceErrorHandlingMode = field(
-metadata=DeviceErrorHandlingMode.make_parser()
+device_error_handling_mode: DeviceErrorHandlingMode | None = field(
+default=None, metadata=DeviceErrorHandlingMode.make_parser()
 )
 #:
 device_private_info: str | None = field(
-- 
2.34.1



[PATCH v3 00/12] dts: add test skipping based on capabilities

2024-08-21 Thread Juraj Linkeš
Add an automated way to gather available capabilities of the tested
hardware and skip test suites or cases which require capabilities that
are not available.

This is done through two decorators:
1. The first marks a test suite method as test case. This populates the
   default attributes of each test case.
2. The seconds adds the required capabilities to a test suite or case,
   using the attributes from 1).

Two types of capabilities are added:
1. NIC capabilities. These are gathered once DPDK has been built because
   we use testpmd for this. It's possible to add a function that will
   add configuration before assessing capabilities associated with the
   function. This is because some capabilities return different status
   with different configuration present.
2. The topology capability. Each test case is marked as requiring a
   default topology. The required topology of a test case (or a whole
   test suite) may be change with the second decorator.

This is how it all works:
1. The required capabilities are first all gathered from all test suites
   and test cases.
2. The list of required capabilities is divided into supported and
   unsupported capabilities. In this step, the probing of hardware
   and/or anything else that needs to happen to gauge whether a
   capability is supported is done.
3. Each test suite and test case is then marked to be skipped if any of
   their required capabilities are not supported.

Depends-on: patch-142276 ("dts: add methods for modifying MTU to testpmd
shell")

Juraj Linkeš (12):
  dts: fix default device error handling mode
  dts: add the aenum dependency
  dts: add test case decorators
  dts: add mechanism to skip test cases or suites
  dts: add support for simpler topologies
  dst: add basic capability support
  dts: add testpmd port information caching
  dts: add NIC capability support
  dts: add topology capability
  doc: add DTS capability doc sources
  dts: add Rx offload capabilities
  dts: add NIC capabilities from show port info

 .../framework.testbed_model.capability.rst|   6 +
 doc/api/dts/framework.testbed_model.rst   |   2 +
 .../dts/framework.testbed_model.topology.rst  |   6 +
 dts/framework/remote_session/testpmd_shell.py | 461 +++-
 dts/framework/runner.py   | 155 +++---
 dts/framework/test_result.py  | 120 +++--
 dts/framework/test_suite.py   | 161 +-
 dts/framework/testbed_model/capability.py | 491 ++
 dts/framework/testbed_model/node.py   |   2 +-
 dts/framework/testbed_model/port.py   |   4 +-
 dts/framework/testbed_model/topology.py   | 128 +
 dts/poetry.lock   |  14 +-
 dts/pyproject.toml|   1 +
 dts/tests/TestSuite_hello_world.py|  10 +-
 dts/tests/TestSuite_os_udp.py |   3 +-
 dts/tests/TestSuite_pmd_buffer_scatter.py |  14 +-
 dts/tests/TestSuite_smoke_tests.py|   8 +-
 17 files changed, 1429 insertions(+), 157 deletions(-)
 create mode 100644 doc/api/dts/framework.testbed_model.capability.rst
 create mode 100644 doc/api/dts/framework.testbed_model.topology.rst
 create mode 100644 dts/framework/testbed_model/capability.py
 create mode 100644 dts/framework/testbed_model/topology.py

-- 
2.34.1



[PATCH v3 02/12] dts: add the aenum dependency

2024-08-21 Thread Juraj Linkeš
Regular Python enumerations create only one instance for members with
the same value, such as:
class MyEnum(Enum):
foo = 1
bar = 1

MyEnum.foo and MyEnum.bar are aliases that return the same instance.

DTS needs to return different instances in the above scenario so that we
can map capabilities with different names to the same function that
retrieves the capabilities.

Signed-off-by: Juraj Linkeš 
---
 dts/poetry.lock| 14 +-
 dts/pyproject.toml |  1 +
 2 files changed, 14 insertions(+), 1 deletion(-)

diff --git a/dts/poetry.lock b/dts/poetry.lock
index 2dd8bad498..cf5f6569c6 100644
--- a/dts/poetry.lock
+++ b/dts/poetry.lock
@@ -1,5 +1,17 @@
 # This file is automatically @generated by Poetry 1.8.2 and should not be 
changed by hand.
 
+[[package]]
+name = "aenum"
+version = "3.1.15"
+description = "Advanced Enumerations (compatible with Python's stdlib Enum), 
NamedTuples, and NamedConstants"
+optional = false
+python-versions = "*"
+files = [
+{file = "aenum-3.1.15-py2-none-any.whl", hash = 
"sha256:27b1710b9d084de6e2e695dab78fe9f269de924b51ae2850170ee7e1ca6288a5"},
+{file = "aenum-3.1.15-py3-none-any.whl", hash = 
"sha256:e0dfaeea4c2bd362144b87377e2c61d91958c5ed0b4daf89cb6f45ae23af6288"},
+{file = "aenum-3.1.15.tar.gz", hash = 
"sha256:8cbd76cd18c4f870ff39b24284d3ea028fbe8731a58df3aa581e434c575b9559"},
+]
+
 [[package]]
 name = "alabaster"
 version = "0.7.13"
@@ -1350,4 +1362,4 @@ jsonschema = ">=4,<5"
 [metadata]
 lock-version = "2.0"
 python-versions = "^3.10"
-content-hash = 
"6db17f96cb31fb463b0b0a31dff9c02aa72641e0bffd8a610033fe2324006c43"
+content-hash = 
"6f20ce05310df93fed1d392160d1653ae5de5c6f260a5865eb3c6111a7c2b394"
diff --git a/dts/pyproject.toml b/dts/pyproject.toml
index 38281f0e39..6e347852cc 100644
--- a/dts/pyproject.toml
+++ b/dts/pyproject.toml
@@ -26,6 +26,7 @@ fabric = "^2.7.1"
 scapy = "^2.5.0"
 pydocstyle = "6.1.1"
 typing-extensions = "^4.11.0"
+aenum = "^3.1.15"
 
 [tool.poetry.group.dev.dependencies]
 mypy = "^1.10.0"
-- 
2.34.1



[PATCH v3 03/12] dts: add test case decorators

2024-08-21 Thread Juraj Linkeš
Add decorators for functional and performance test cases. These
decorators add attributes to the decorated test cases.

With the addition of decorators, we change the test case discovery
mechanism from looking at test case names according to a regex to simply
checking an attribute of the function added with one of the decorators.

The decorators allow us to add further variables to test cases.

Also move the test case filtering to TestSuite while changing the
mechanism to separate the logic in a more sensible manner.

Bugzilla ID: 1460

Signed-off-by: Juraj Linkeš 
---
 dts/framework/runner.py   |  93 
 dts/framework/test_result.py  |   5 +-
 dts/framework/test_suite.py   | 125 +-
 dts/tests/TestSuite_hello_world.py|   8 +-
 dts/tests/TestSuite_os_udp.py |   3 +-
 dts/tests/TestSuite_pmd_buffer_scatter.py |   3 +-
 dts/tests/TestSuite_smoke_tests.py|   6 +-
 7 files changed, 160 insertions(+), 83 deletions(-)

diff --git a/dts/framework/runner.py b/dts/framework/runner.py
index 6b6f6a05f5..525f119ab6 100644
--- a/dts/framework/runner.py
+++ b/dts/framework/runner.py
@@ -20,11 +20,10 @@
 import importlib
 import inspect
 import os
-import re
 import sys
 from pathlib import Path
-from types import FunctionType
-from typing import Iterable, Sequence
+from types import MethodType
+from typing import Iterable
 
 from framework.testbed_model.sut_node import SutNode
 from framework.testbed_model.tg_node import TGNode
@@ -53,7 +52,7 @@
 TestSuiteResult,
 TestSuiteWithCases,
 )
-from .test_suite import TestSuite
+from .test_suite import TestCase, TestSuite
 
 
 class DTSRunner:
@@ -232,9 +231,9 @@ def _get_test_suites_with_cases(
 
 for test_suite_config in test_suite_configs:
 test_suite_class = 
self._get_test_suite_class(test_suite_config.test_suite)
-test_cases = []
-func_test_cases, perf_test_cases = self._filter_test_cases(
-test_suite_class, test_suite_config.test_cases
+test_cases: list[type[TestCase]] = []
+func_test_cases, perf_test_cases = test_suite_class.get_test_cases(
+test_suite_config.test_cases
 )
 if func:
 test_cases.extend(func_test_cases)
@@ -309,57 +308,6 @@ def is_test_suite(object) -> bool:
 f"Couldn't find any valid test suites in 
{test_suite_module.__name__}."
 )
 
-def _filter_test_cases(
-self, test_suite_class: type[TestSuite], test_cases_to_run: 
Sequence[str]
-) -> tuple[list[FunctionType], list[FunctionType]]:
-"""Filter `test_cases_to_run` from `test_suite_class`.
-
-There are two rounds of filtering if `test_cases_to_run` is not empty.
-The first filters `test_cases_to_run` from all methods of 
`test_suite_class`.
-Then the methods are separated into functional and performance test 
cases.
-If a method matches neither the functional nor performance name 
prefix, it's an error.
-
-Args:
-test_suite_class: The class of the test suite.
-test_cases_to_run: Test case names to filter from 
`test_suite_class`.
-If empty, return all matching test cases.
-
-Returns:
-A list of test case methods that should be executed.
-
-Raises:
-ConfigurationError: If a test case from `test_cases_to_run` is not 
found
-or it doesn't match either the functional nor performance name 
prefix.
-"""
-func_test_cases = []
-perf_test_cases = []
-name_method_tuples = inspect.getmembers(test_suite_class, 
inspect.isfunction)
-if test_cases_to_run:
-name_method_tuples = [
-(name, method) for name, method in name_method_tuples if name 
in test_cases_to_run
-]
-if len(name_method_tuples) < len(test_cases_to_run):
-missing_test_cases = set(test_cases_to_run) - {
-name for name, _ in name_method_tuples
-}
-raise ConfigurationError(
-f"Test cases {missing_test_cases} not found among methods "
-f"of {test_suite_class.__name__}."
-)
-
-for test_case_name, test_case_method in name_method_tuples:
-if re.match(self._func_test_case_regex, test_case_name):
-func_test_cases.append(test_case_method)
-elif re.match(self._perf_test_case_regex, test_case_name):
-perf_test_cases.append(test_case_method)
-elif test_cases_to_run:
-raise ConfigurationError(
-f"Method '{test_case_name}' matches neither "
-f"a functional nor a performance test case name."
-)
-
-return func_test_cases, perf_test_cases
-
 def _connect_nodes_and_run_test_run(
  

[PATCH v3 04/12] dts: add mechanism to skip test cases or suites

2024-08-21 Thread Juraj Linkeš
If a test case is not relevant to the testing environment (such as when
a NIC doesn't support a tested feature), the framework should skip it.
The mechanism is a skeleton without actual logic that would set a test
case or suite to be skipped.

The mechanism uses a protocol to extend test suites and test cases with
additional attributes that track whether the test case or suite should
be skipped the reason for skipping it.

Also update the results module with the new SKIP result.

Signed-off-by: Juraj Linkeš 
---
 dts/framework/runner.py   | 34 +++---
 dts/framework/test_result.py  | 77 ++-
 dts/framework/test_suite.py   |  7 ++-
 dts/framework/testbed_model/capability.py | 28 +
 4 files changed, 109 insertions(+), 37 deletions(-)
 create mode 100644 dts/framework/testbed_model/capability.py

diff --git a/dts/framework/runner.py b/dts/framework/runner.py
index 525f119ab6..55357ea1fe 100644
--- a/dts/framework/runner.py
+++ b/dts/framework/runner.py
@@ -477,7 +477,20 @@ def _run_test_suites(
 for test_suite_with_cases in test_suites_with_cases:
 test_suite_result = 
build_target_result.add_test_suite(test_suite_with_cases)
 try:
-self._run_test_suite(sut_node, tg_node, test_suite_result, 
test_suite_with_cases)
+if not test_suite_with_cases.skip:
+self._run_test_suite(
+sut_node,
+tg_node,
+test_suite_result,
+test_suite_with_cases,
+)
+else:
+self._logger.info(
+f"Test suite execution SKIPPED: "
+f"'{test_suite_with_cases.test_suite_class.__name__}'. 
Reason: "
+f"{test_suite_with_cases.test_suite_class.skip_reason}"
+)
+test_suite_result.update_setup(Result.SKIP)
 except BlockingTestSuiteError as e:
 self._logger.exception(
 f"An error occurred within 
{test_suite_with_cases.test_suite_class.__name__}. "
@@ -576,14 +589,21 @@ def _execute_test_suite(
 test_case_result = test_suite_result.add_test_case(test_case_name)
 all_attempts = SETTINGS.re_run + 1
 attempt_nr = 1
-self._run_test_case(test_suite, test_case, test_case_result)
-while not test_case_result and attempt_nr < all_attempts:
-attempt_nr += 1
+if not test_case.skip:
+self._run_test_case(test_suite, test_case, test_case_result)
+while not test_case_result and attempt_nr < all_attempts:
+attempt_nr += 1
+self._logger.info(
+f"Re-running FAILED test case '{test_case_name}'. "
+f"Attempt number {attempt_nr} out of {all_attempts}."
+)
+self._run_test_case(test_suite, test_case, 
test_case_result)
+else:
 self._logger.info(
-f"Re-running FAILED test case '{test_case_name}'. "
-f"Attempt number {attempt_nr} out of {all_attempts}."
+f"Test case execution SKIPPED: {test_case_name}. Reason: "
+f"{test_case.skip_reason}"
 )
-self._run_test_case(test_suite, test_case, test_case_result)
+test_case_result.update_setup(Result.SKIP)
 
 def _run_test_case(
 self,
diff --git a/dts/framework/test_result.py b/dts/framework/test_result.py
index b1ca584523..306b100bc6 100644
--- a/dts/framework/test_result.py
+++ b/dts/framework/test_result.py
@@ -75,6 +75,20 @@ def create_config(self) -> TestSuiteConfig:
 test_cases=[test_case.__name__ for test_case in self.test_cases],
 )
 
+@property
+def skip(self) -> bool:
+"""Skip the test suite if all test cases or the suite itself are to be 
skipped.
+
+Returns:
+:data:`True` if the test suite should be skipped, :data:`False` 
otherwise.
+"""
+all_test_cases_skipped = True
+for test_case in self.test_cases:
+if not test_case.skip:
+all_test_cases_skipped = False
+break
+return all_test_cases_skipped or self.test_suite_class.skip
+
 
 class Result(Enum):
 """The possible states that a setup, a teardown or a test case may end up 
in."""
@@ -86,12 +100,12 @@ class Result(Enum):
 #:
 ERROR = auto()
 #:
-SKIP = auto()
-#:
 BLOCK = auto()
+#:
+SKIP = auto()
 
 def __bool__(self) -> bool:
-"""Only PASS is True."""
+"""Only :attr:`PASS` is True."""
 return self is self.PASS
 
 
@@ -169,12 +183,13 @@ def update_setup(self, result: Result, error: Exception | 
None = None) -> N

[PATCH v3 05/12] dts: add support for simpler topologies

2024-08-21 Thread Juraj Linkeš
We currently assume there are two links between the SUT and TG nodes,
but that's too strict, even for some of the already existing test cases.
Add support for topologies with less than two links.

For topologies with no links, dummy ports are used. The expectation is
that test suites or cases that don't require any links won't be using
methods that use ports. Any test suites or cases requiring links will be
skipped in topologies with no links, but this feature is not implemented
in this commit.

Signed-off-by: Juraj Linkeš 
---
 dts/framework/runner.py   |   6 +-
 dts/framework/test_suite.py   |  32 +++
 dts/framework/testbed_model/node.py   |   2 +-
 dts/framework/testbed_model/port.py   |   4 +-
 dts/framework/testbed_model/topology.py   | 101 ++
 dts/tests/TestSuite_pmd_buffer_scatter.py |   2 +-
 6 files changed, 120 insertions(+), 27 deletions(-)
 create mode 100644 dts/framework/testbed_model/topology.py

diff --git a/dts/framework/runner.py b/dts/framework/runner.py
index 55357ea1fe..48ae9cc215 100644
--- a/dts/framework/runner.py
+++ b/dts/framework/runner.py
@@ -53,6 +53,7 @@
 TestSuiteWithCases,
 )
 from .test_suite import TestCase, TestSuite
+from .testbed_model.topology import Topology
 
 
 class DTSRunner:
@@ -474,6 +475,7 @@ def _run_test_suites(
 test_suites_with_cases: The test suites with test cases to run.
 """
 end_build_target = False
+topology = Topology(sut_node.ports, tg_node.ports)
 for test_suite_with_cases in test_suites_with_cases:
 test_suite_result = 
build_target_result.add_test_suite(test_suite_with_cases)
 try:
@@ -481,6 +483,7 @@ def _run_test_suites(
 self._run_test_suite(
 sut_node,
 tg_node,
+topology,
 test_suite_result,
 test_suite_with_cases,
 )
@@ -506,6 +509,7 @@ def _run_test_suite(
 self,
 sut_node: SutNode,
 tg_node: TGNode,
+topology: Topology,
 test_suite_result: TestSuiteResult,
 test_suite_with_cases: TestSuiteWithCases,
 ) -> None:
@@ -533,7 +537,7 @@ def _run_test_suite(
 self._logger.set_stage(
 DtsStage.test_suite_setup, Path(SETTINGS.output_dir, 
test_suite_name)
 )
-test_suite = test_suite_with_cases.test_suite_class(sut_node, tg_node)
+test_suite = test_suite_with_cases.test_suite_class(sut_node, tg_node, 
topology)
 try:
 self._logger.info(f"Starting test suite setup: {test_suite_name}")
 test_suite.set_up_suite()
diff --git a/dts/framework/test_suite.py b/dts/framework/test_suite.py
index c59fc9c6e6..56f153bda6 100644
--- a/dts/framework/test_suite.py
+++ b/dts/framework/test_suite.py
@@ -24,9 +24,10 @@
 from scapy.packet import Packet, Padding  # type: ignore[import-untyped]
 
 from framework.testbed_model.capability import TestProtocol
-from framework.testbed_model.port import Port, PortLink
+from framework.testbed_model.port import Port
 from framework.testbed_model.sut_node import SutNode
 from framework.testbed_model.tg_node import TGNode
+from framework.testbed_model.topology import Topology, TopologyType
 from framework.testbed_model.traffic_generator.capturing_traffic_generator 
import (
 PacketFilteringConfig,
 )
@@ -72,7 +73,7 @@ class TestSuite(TestProtocol):
 #: will block the execution of all subsequent test suites in the current 
build target.
 is_blocking: ClassVar[bool] = False
 _logger: DTSLogger
-_port_links: list[PortLink]
+_topology_type: TopologyType
 _sut_port_ingress: Port
 _sut_port_egress: Port
 _sut_ip_address_ingress: Union[IPv4Interface, IPv6Interface]
@@ -86,6 +87,7 @@ def __init__(
 self,
 sut_node: SutNode,
 tg_node: TGNode,
+topology: Topology,
 ):
 """Initialize the test suite testbed information and basic 
configuration.
 
@@ -95,35 +97,21 @@ def __init__(
 Args:
 sut_node: The SUT node where the test suite will run.
 tg_node: The TG node where the test suite will run.
+topology: The topology where the test suite will run.
 """
 self.sut_node = sut_node
 self.tg_node = tg_node
 self._logger = get_dts_logger(self.__class__.__name__)
-self._port_links = []
-self._process_links()
-self._sut_port_ingress, self._tg_port_egress = (
-self._port_links[0].sut_port,
-self._port_links[0].tg_port,
-)
-self._sut_port_egress, self._tg_port_ingress = (
-self._port_links[1].sut_port,
-self._port_links[1].tg_port,
-)
+self._topology_type = topology.type
+self._tg_port_egress = topology.tg_port_egress
+self._sut_port_ingress = topology.sut_port_ingress

[PATCH v3 06/12] dst: add basic capability support

2024-08-21 Thread Juraj Linkeš
A test case or suite may require certain capabilities to be present in
the tested environment. Add the basic infrastructure for checking the
support status of capabilities:
* The Capability ABC defining the common capability API
* Extension of the TestProtocol with required capabilities (each test
  suite or case stores the capabilities it requires)
* Integration with the runner which calls the new APIs to get which
  capabilities are supported.

Signed-off-by: Juraj Linkeš 
---
 dts/framework/runner.py   |  26 +
 dts/framework/test_result.py  |  38 ++-
 dts/framework/test_suite.py   |   1 +
 dts/framework/testbed_model/capability.py | 117 +-
 4 files changed, 179 insertions(+), 3 deletions(-)

diff --git a/dts/framework/runner.py b/dts/framework/runner.py
index 48ae9cc215..43bb2bc830 100644
--- a/dts/framework/runner.py
+++ b/dts/framework/runner.py
@@ -25,6 +25,7 @@
 from types import MethodType
 from typing import Iterable
 
+from framework.testbed_model.capability import Capability, 
get_supported_capabilities
 from framework.testbed_model.sut_node import SutNode
 from framework.testbed_model.tg_node import TGNode
 
@@ -452,6 +453,21 @@ def _run_build_target(
 self._logger.exception("Build target teardown failed.")
 build_target_result.update_teardown(Result.FAIL, e)
 
+def _get_supported_capabilities(
+self,
+sut_node: SutNode,
+topology_config: Topology,
+test_suites_with_cases: Iterable[TestSuiteWithCases],
+) -> set[Capability]:
+
+capabilities_to_check = set()
+for test_suite_with_cases in test_suites_with_cases:
+
capabilities_to_check.update(test_suite_with_cases.required_capabilities)
+
+self._logger.debug(f"Found capabilities to check: 
{capabilities_to_check}")
+
+return get_supported_capabilities(sut_node, topology_config, 
capabilities_to_check)
+
 def _run_test_suites(
 self,
 sut_node: SutNode,
@@ -464,6 +480,12 @@ def _run_test_suites(
 The method assumes the build target we're testing has already been 
built on the SUT node.
 The current build target thus corresponds to the current DPDK build 
present on the SUT node.
 
+Before running any suites, the method determines whether they should 
be skipped
+by inspecting any required capabilities the test suite needs and 
comparing those
+to capabilities supported by the tested environment. If all 
capabilities are supported,
+the suite is run. If all test cases in a test suite would be skipped, 
the whole test suite
+is skipped (the setup and teardown is not run).
+
 If a blocking test suite (such as the smoke test suite) fails, the 
rest of the test suites
 in the current build target won't be executed.
 
@@ -476,7 +498,11 @@ def _run_test_suites(
 """
 end_build_target = False
 topology = Topology(sut_node.ports, tg_node.ports)
+supported_capabilities = self._get_supported_capabilities(
+sut_node, topology, test_suites_with_cases
+)
 for test_suite_with_cases in test_suites_with_cases:
+test_suite_with_cases.mark_skip_unsupported(supported_capabilities)
 test_suite_result = 
build_target_result.add_test_suite(test_suite_with_cases)
 try:
 if not test_suite_with_cases.skip:
diff --git a/dts/framework/test_result.py b/dts/framework/test_result.py
index 306b100bc6..b4b58ef348 100644
--- a/dts/framework/test_result.py
+++ b/dts/framework/test_result.py
@@ -25,10 +25,12 @@
 
 import os.path
 from collections.abc import MutableSequence
-from dataclasses import dataclass
+from dataclasses import dataclass, field
 from enum import Enum, auto
 from typing import Union
 
+from framework.testbed_model.capability import Capability
+
 from .config import (
 OS,
 Architecture,
@@ -63,6 +65,12 @@ class is to hold a subset of test cases (which could be all 
test cases) because
 
 test_suite_class: type[TestSuite]
 test_cases: list[type[TestCase]]
+required_capabilities: set[Capability] = field(default_factory=set, 
init=False)
+
+def __post_init__(self):
+"""Gather the required capabilities of the test suite and all test 
cases."""
+for test_object in [self.test_suite_class] + self.test_cases:
+
self.required_capabilities.update(test_object.required_capabilities)
 
 def create_config(self) -> TestSuiteConfig:
 """Generate a :class:`TestSuiteConfig` from the stored test suite with 
test cases.
@@ -75,6 +83,34 @@ def create_config(self) -> TestSuiteConfig:
 test_cases=[test_case.__name__ for test_case in self.test_cases],
 )
 
+def mark_skip_unsupported(self, supported_capabilities: set[Capability]) 
-> None:
+"""Mark the test suite and test cases to be skipped.
+
+The mark 

[PATCH v3 07/12] dts: add testpmd port information caching

2024-08-21 Thread Juraj Linkeš
When using port information multiple times in a testpmd shell instance
lifespan, it's desirable to not get the information each time, so
caching is added. In case the information changes, there's a way to
force the update.

Signed-off-by: Juraj Linkeš 
---
 dts/framework/remote_session/testpmd_shell.py | 30 +--
 1 file changed, 28 insertions(+), 2 deletions(-)

diff --git a/dts/framework/remote_session/testpmd_shell.py 
b/dts/framework/remote_session/testpmd_shell.py
index b4ad253020..f0bcc918e5 100644
--- a/dts/framework/remote_session/testpmd_shell.py
+++ b/dts/framework/remote_session/testpmd_shell.py
@@ -654,6 +654,7 @@ class TestPmdShell(DPDKShell):
 """
 
 _app_params: TestPmdParams
+_ports: list[TestPmdPort] | None
 
 #: The path to the testpmd executable.
 path: ClassVar[PurePath] = PurePath("app", "dpdk-testpmd")
@@ -686,6 +687,21 @@ def __init__(
 TestPmdParams(**app_params),
 name,
 )
+self._ports = None
+
+@property
+def ports(self) -> list[TestPmdPort]:
+"""The ports of the instance.
+
+This caches the ports returned by :meth:`show_port_info_all`.
+To force an update of port information, execute 
:meth:`show_port_info_all` or
+:meth:`show_port_info`.
+
+Returns: The list of known testpmd ports.
+"""
+if self._ports is None:
+return self.show_port_info_all()
+return self._ports
 
 def start(self, verify: bool = True) -> None:
 """Start packet forwarding with the current configuration.
@@ -872,7 +888,8 @@ def show_port_info_all(self) -> list[TestPmdPort]:
 # executed on a pseudo-terminal created by paramiko on the remote 
node, lines end with CRLF.
 # Therefore we also need to take the carriage return into account.
 iter = re.finditer(r"\*{21}.*?[\r\n]{4}", output + "\r\n", re.S)
-return [TestPmdPort.parse(block.group(0)) for block in iter]
+self._ports = [TestPmdPort.parse(block.group(0)) for block in iter]
+return self._ports
 
 def show_port_info(self, port_id: int) -> TestPmdPort:
 """Returns the given port information.
@@ -890,7 +907,16 @@ def show_port_info(self, port_id: int) -> TestPmdPort:
 if output.startswith("Invalid port"):
 raise InteractiveCommandExecutionError("invalid port given")
 
-return TestPmdPort.parse(output)
+port = TestPmdPort.parse(output)
+self._update_port(port)
+return port
+
+def _update_port(self, port: TestPmdPort) -> None:
+if self._ports:
+self._ports = [
+existing_port if port.id != existing_port.id else port
+for existing_port in self._ports
+]
 
 def show_port_stats_all(self) -> list[TestPmdPortStats]:
 """Returns the statistics of all the ports.
-- 
2.34.1



[PATCH v3 08/12] dts: add NIC capability support

2024-08-21 Thread Juraj Linkeš
Some test cases or suites may be testing a NIC feature that is not
supported on all NICs, so add support for marking test cases or suites
as requiring NIC capabilities.

The marking is done with a decorator, which populates the internal
required_capabilities attribute of TestProtocol. The NIC capability
itself is a wrapper around the NicCapability defined in testpmd_shell.
The reason is twofold:
1. Enums cannot be extended and the class implements the methods of its
   abstract base superclass,
2. The class also stores an optional decorator function which is used
   before/after capability retrieval. This is needed because some
   capabilities may be advertised differently under different
   configuration.

The decorator API is designed to be simple to use. The arguments passed
to it are all from the testpmd shell. Everything else (even the actual
capability object creation) is done internally.

Signed-off-by: Juraj Linkeš 
---
Depends-on: patch-142276 ("dts: add methods for modifying MTU to testpmd
shell")
---
 dts/framework/remote_session/testpmd_shell.py | 178 -
 dts/framework/testbed_model/capability.py | 180 +-
 dts/tests/TestSuite_pmd_buffer_scatter.py |   2 +
 3 files changed, 356 insertions(+), 4 deletions(-)

diff --git a/dts/framework/remote_session/testpmd_shell.py 
b/dts/framework/remote_session/testpmd_shell.py
index f0bcc918e5..48c31124d1 100644
--- a/dts/framework/remote_session/testpmd_shell.py
+++ b/dts/framework/remote_session/testpmd_shell.py
@@ -16,11 +16,17 @@
 
 import re
 import time
-from collections.abc import Callable
+from collections.abc import Callable, MutableSet
 from dataclasses import dataclass, field
 from enum import Flag, auto
+from functools import partial
 from pathlib import PurePath
-from typing import ClassVar
+from typing import TYPE_CHECKING, Any, ClassVar, TypeAlias
+
+if TYPE_CHECKING:
+from enum import Enum as NoAliasEnum
+else:
+from aenum import NoAliasEnum
 
 from typing_extensions import Self, TypeVarTuple, Unpack
 
@@ -34,6 +40,16 @@
 from framework.testbed_model.sut_node import SutNode
 from framework.utils import StrEnum
 
+TestPmdShellCapabilityMethod: TypeAlias = Callable[
+["TestPmdShell", MutableSet["NicCapability"], 
MutableSet["NicCapability"]], None
+]
+
+TestPmdShellSimpleMethod: TypeAlias = Callable[["TestPmdShell"], Any]
+
+TestPmdShellDecoratedMethod: TypeAlias = Callable[["TestPmdShell"], None]
+
+TestPmdShellDecorator: TypeAlias = Callable[[TestPmdShellSimpleMethod], 
TestPmdShellDecoratedMethod]
+
 
 class TestPmdDevice:
 """The data of a device that testpmd can recognize.
@@ -377,6 +393,71 @@ def _validate(info: str):
 return TextParser.wrap(TextParser.find(r"Device private 
info:\s+([\s\S]+)"), _validate)
 
 
+class RxQueueState(StrEnum):
+"""RX queue states."""
+
+#:
+stopped = auto()
+#:
+started = auto()
+#:
+hairpin = auto()
+#:
+unknown = auto()
+
+@classmethod
+def make_parser(cls) -> ParserFn:
+"""Makes a parser function.
+
+Returns:
+ParserFn: A dictionary for the `dataclasses.field` metadata 
argument containing a
+parser function that makes an instance of this enum from text.
+"""
+return TextParser.wrap(TextParser.find(r"Rx queue state: ([^\r\n]+)"), 
cls)
+
+
+@dataclass
+class TestPmdRxqInfo(TextParser):
+"""Representation of testpmd's ``show rxq info  `` 
command."""
+
+#:
+port_id: int = field(metadata=TextParser.find_int(r"Infos for port (\d+)\b 
?, RX queue \d+\b"))
+#:
+queue_id: int = field(metadata=TextParser.find_int(r"Infos for port \d+\b 
?, RX queue (\d+)\b"))
+#: Mempool used by that queue
+mempool: str = field(metadata=TextParser.find(r"Mempool: ([^\r\n]+)"))
+#: Ring prefetch threshold
+rx_prefetch_threshold: int = field(
+metadata=TextParser.find_int(r"RX prefetch threshold: (\d+)\b")
+)
+#: Ring host threshold
+rx_host_threshold: int = field(metadata=TextParser.find_int(r"RX host 
threshold: (\d+)\b"))
+#: Ring writeback threshold
+rx_writeback_threshold: int = field(
+metadata=TextParser.find_int(r"RX writeback threshold: (\d+)\b")
+)
+#: Drives the freeing of Rx descriptors
+rx_free_threshold: int = field(metadata=TextParser.find_int(r"RX free 
threshold: (\d+)\b"))
+#: Drop packets if no descriptors are available
+rx_drop_packets: bool = field(metadata=TextParser.find(r"RX drop packets: 
on"))
+#: Do not start queue with rte_eth_dev_start()
+rx_deferred_start: bool = field(metadata=TextParser.find(r"RX deferred 
start: on"))
+#: Scattered packets Rx enabled
+rx_scattered_packets: bool = field(metadata=TextParser.find(r"RX scattered 
packets: on"))
+#: The state of the queue
+rx_queue_state: str = field(metadata=RxQueueState.make_parser())
+#: Configured number of RXDs
+number_of_rxds: int = field(metadata=TextParser.find_int

[PATCH v3 09/12] dts: add topology capability

2024-08-21 Thread Juraj Linkeš
Add support for marking test cases as requiring a certain topology. The
default topology is a two link topology and the other supported
topologies are one link and no link topologies.

The TestProtocol of test suites and cases is extended with the topology
type each test suite or case requires. Each test case starts out as
requiring a two link topology and can be marked as requiring as
topology directly (by decorating the test case) or through its test
suite. If a test suite is decorated as requiring a certain topology, all
its test cases are marked as such. If both test suite and a test case
are decorated as requiring a topology, the test case cannot require a
more complex topology than the whole suite (but it can require a less
complex one). If a test suite is not decorated, this has no effect on
required test case topology.

Since the default topology is defined as a reference to one of the
actual topologies, the NoAliasEnum from the aenum package is utilized,
which removes the aliasing of Enums so that TopologyType.two_links and
TopologyType.default are distinct. This is needed to distinguish between
a user passed value and the default value being used (which is used when
a test suite is or isn't decorated).

Signed-off-by: Juraj Linkeš 
---
 dts/framework/test_suite.py   |   6 +-
 dts/framework/testbed_model/capability.py | 182 +-
 dts/framework/testbed_model/topology.py   |  35 -
 dts/tests/TestSuite_hello_world.py|   2 +
 dts/tests/TestSuite_pmd_buffer_scatter.py |   8 +-
 dts/tests/TestSuite_smoke_tests.py|   2 +
 6 files changed, 217 insertions(+), 18 deletions(-)

diff --git a/dts/framework/test_suite.py b/dts/framework/test_suite.py
index 5c393ce8bf..51f49bd601 100644
--- a/dts/framework/test_suite.py
+++ b/dts/framework/test_suite.py
@@ -27,7 +27,7 @@
 from framework.testbed_model.port import Port
 from framework.testbed_model.sut_node import SutNode
 from framework.testbed_model.tg_node import TGNode
-from framework.testbed_model.topology import Topology, TopologyType
+from framework.testbed_model.topology import Topology
 from framework.testbed_model.traffic_generator.capturing_traffic_generator 
import (
 PacketFilteringConfig,
 )
@@ -73,7 +73,6 @@ class TestSuite(TestProtocol):
 #: will block the execution of all subsequent test suites in the current 
build target.
 is_blocking: ClassVar[bool] = False
 _logger: DTSLogger
-_topology_type: TopologyType
 _sut_port_ingress: Port
 _sut_port_egress: Port
 _sut_ip_address_ingress: Union[IPv4Interface, IPv6Interface]
@@ -102,7 +101,6 @@ def __init__(
 self.sut_node = sut_node
 self.tg_node = tg_node
 self._logger = get_dts_logger(self.__class__.__name__)
-self._topology_type = topology.type
 self._tg_port_egress = topology.tg_port_egress
 self._sut_port_ingress = topology.sut_port_ingress
 self._sut_port_egress = topology.sut_port_egress
@@ -468,6 +466,8 @@ def _decorator(func: TestSuiteMethodType) -> type[TestCase]:
 test_case.skip = cls.skip
 test_case.skip_reason = cls.skip_reason
 test_case.required_capabilities = set()
+test_case.topology_type = cls.topology_type
+test_case.topology_type.add_to_required(test_case)
 test_case.test_type = test_case_type
 return test_case
 
diff --git a/dts/framework/testbed_model/capability.py 
b/dts/framework/testbed_model/capability.py
index 9a79e6ebb3..998efa95d2 100644
--- a/dts/framework/testbed_model/capability.py
+++ b/dts/framework/testbed_model/capability.py
@@ -7,11 +7,29 @@
 and support for test environment capabilities.
 
 Many test cases are testing features not available on all hardware.
+On the other hand, some test cases or suites may not need the most complex 
topology available.
 
-The module also allows developers to mark test cases or suites a requiring 
certain
-hardware capabilities with the :func:`requires` decorator.
+The module allows developers to mark test cases or suites a requiring certain 
hardware capabilities
+or a particular topology with the :func:`requires` decorator.
+
+There are differences between hardware and topology capabilities:
+
+* Hardware capabilities are assumed to not be required when not specified.
+* However, some topology is always available, so each test case or suite 
is assigned
+  a default topology if no topology is specified in the decorator.
+
+Examples:
+.. code:: python
+
+from framework.test_suite import TestSuite, func_test
+from framework.testbed_model.capability import TopologyType, requires
+# The whole test suite (each test case within) doesn't require any 
links.
+@requires(topology_type=TopologyType.no_link)
+@func_test
+class TestHelloWorld(TestSuite):
+def hello_world_single_core(self):
+...
 
-Example:
 .. code:: python
 
 f

[PATCH v3 10/12] doc: add DTS capability doc sources

2024-08-21 Thread Juraj Linkeš
Add new files to generate DTS API documentation from.

Signed-off-by: Juraj Linkeš 
---
 doc/api/dts/framework.testbed_model.capability.rst | 6 ++
 doc/api/dts/framework.testbed_model.rst| 2 ++
 doc/api/dts/framework.testbed_model.topology.rst   | 6 ++
 3 files changed, 14 insertions(+)
 create mode 100644 doc/api/dts/framework.testbed_model.capability.rst
 create mode 100644 doc/api/dts/framework.testbed_model.topology.rst

diff --git a/doc/api/dts/framework.testbed_model.capability.rst 
b/doc/api/dts/framework.testbed_model.capability.rst
new file mode 100644
index 00..326fed9104
--- /dev/null
+++ b/doc/api/dts/framework.testbed_model.capability.rst
@@ -0,0 +1,6 @@
+capability - Testbed Capabilities
+=
+
+.. automodule:: framework.testbed_model.capability
+   :members:
+   :show-inheritance:
diff --git a/doc/api/dts/framework.testbed_model.rst 
b/doc/api/dts/framework.testbed_model.rst
index 4b024e47e6..e1f9543b28 100644
--- a/doc/api/dts/framework.testbed_model.rst
+++ b/doc/api/dts/framework.testbed_model.rst
@@ -21,6 +21,8 @@ testbed\_model - Testbed Modelling Package
framework.testbed_model.node
framework.testbed_model.sut_node
framework.testbed_model.tg_node
+   framework.testbed_model.capability
framework.testbed_model.cpu
framework.testbed_model.port
+   framework.testbed_model.topology
framework.testbed_model.virtual_device
diff --git a/doc/api/dts/framework.testbed_model.topology.rst 
b/doc/api/dts/framework.testbed_model.topology.rst
new file mode 100644
index 00..f3d9d1f418
--- /dev/null
+++ b/doc/api/dts/framework.testbed_model.topology.rst
@@ -0,0 +1,6 @@
+topology - Testbed Topology
+===
+
+.. automodule:: framework.testbed_model.topology
+   :members:
+   :show-inheritance:
-- 
2.34.1



[PATCH v3 11/12] dts: add Rx offload capabilities

2024-08-21 Thread Juraj Linkeš
The scatter Rx offload capability is needed for the pmd_buffer_scatter
test suite. The command that retrieves the capability is:
show port  rx_offload capabilities

The command also retrieves a lot of other capabilities (RX_OFFLOAD_*)
which are all added into a Flag. The Flag members correspond to NIC
capability names so a convenience function that looks for the supported
Flags in a testpmd output is also added.

The NIC capability names (mentioned above) are copy-pasted from the
Flag. Dynamic addition of Enum members runs into problems with typing
(mypy doesn't know about the members) and documentation generation
(Sphinx doesn't know about the members).

Signed-off-by: Juraj Linkeš 
---
 dts/framework/remote_session/testpmd_shell.py | 213 ++
 dts/tests/TestSuite_pmd_buffer_scatter.py |   1 +
 2 files changed, 214 insertions(+)

diff --git a/dts/framework/remote_session/testpmd_shell.py 
b/dts/framework/remote_session/testpmd_shell.py
index 48c31124d1..f83569669e 100644
--- a/dts/framework/remote_session/testpmd_shell.py
+++ b/dts/framework/remote_session/testpmd_shell.py
@@ -659,6 +659,103 @@ class TestPmdPortStats(TextParser):
 tx_bps: int = field(metadata=TextParser.find_int(r"Tx-bps:\s+(\d+)"))
 
 
+class RxOffloadCapability(Flag):
+"""Rx offload capabilities of a device."""
+
+#:
+RX_OFFLOAD_VLAN_STRIP = auto()
+#: Device supports L3 checksum offload.
+RX_OFFLOAD_IPV4_CKSUM = auto()
+#: Device supports L4 checksum offload.
+RX_OFFLOAD_UDP_CKSUM = auto()
+#: Device supports L4 checksum offload.
+RX_OFFLOAD_TCP_CKSUM = auto()
+#: Device supports Large Receive Offload.
+RX_OFFLOAD_TCP_LRO = auto()
+#: Device supports QinQ (queue in queue) offload.
+RX_OFFLOAD_QINQ_STRIP = auto()
+#: Device supports inner packet L3 checksum.
+RX_OFFLOAD_OUTER_IPV4_CKSUM = auto()
+#: Device supports MACsec.
+RX_OFFLOAD_MACSEC_STRIP = auto()
+#: Device supports filtering of a VLAN Tag identifier.
+RX_OFFLOAD_VLAN_FILTER = 1 << 9
+#: Device supports VLAN offload.
+RX_OFFLOAD_VLAN_EXTEND = auto()
+#: Device supports receiving segmented mbufs.
+RX_OFFLOAD_SCATTER = 1 << 13
+#: Device supports Timestamp.
+RX_OFFLOAD_TIMESTAMP = auto()
+#: Device supports crypto processing while packet is received in NIC.
+RX_OFFLOAD_SECURITY = auto()
+#: Device supports CRC stripping.
+RX_OFFLOAD_KEEP_CRC = auto()
+#: Device supports L4 checksum offload.
+RX_OFFLOAD_SCTP_CKSUM = auto()
+#: Device supports inner packet L4 checksum.
+RX_OFFLOAD_OUTER_UDP_CKSUM = auto()
+#: Device supports RSS hashing.
+RX_OFFLOAD_RSS_HASH = auto()
+#: Device supports
+RX_OFFLOAD_BUFFER_SPLIT = auto()
+#: Device supports all checksum capabilities.
+RX_OFFLOAD_CHECKSUM = RX_OFFLOAD_IPV4_CKSUM | RX_OFFLOAD_UDP_CKSUM | 
RX_OFFLOAD_TCP_CKSUM
+#: Device supports all VLAN capabilities.
+RX_OFFLOAD_VLAN = (
+RX_OFFLOAD_VLAN_STRIP
+| RX_OFFLOAD_VLAN_FILTER
+| RX_OFFLOAD_VLAN_EXTEND
+| RX_OFFLOAD_QINQ_STRIP
+)
+
+@classmethod
+def from_string(cls, line: str) -> Self:
+"""Make an instance from a string containing the flag names separated 
with a space.
+
+Args:
+line: The line to parse.
+
+Returns:
+A new instance containing all found flags.
+"""
+flag = cls(0)
+for flag_name in line.split():
+flag |= cls[f"RX_OFFLOAD_{flag_name}"]
+return flag
+
+@classmethod
+def make_parser(cls, per_port: bool) -> ParserFn:
+"""Make a parser function.
+
+Args:
+per_port: If :data:`True`, will return capabilities per port. If 
:data:`False`,
+will return capabilities per queue.
+
+Returns:
+ParserFn: A dictionary for the `dataclasses.field` metadata 
argument containing a
+parser function that makes an instance of this flag from text.
+"""
+granularity = "Port" if per_port else "Queue"
+return TextParser.wrap(
+TextParser.find(rf"Per {granularity}\s+:(.*)$", re.MULTILINE),
+cls.from_string,
+)
+
+
+@dataclass
+class RxOffloadCapabilities(TextParser):
+"""The result of testpmd's ``show port  rx_offload capabilities`` 
command."""
+
+#:
+port_id: int = field(
+metadata=TextParser.find_int(r"Rx Offloading Capabilities of port 
(\d+) :")
+)
+#: Per-queue Rx offload capabilities.
+per_queue: RxOffloadCapability = 
field(metadata=RxOffloadCapability.make_parser(False))
+#: Capabilities other than per-queue Rx offload capabilities.
+per_port: RxOffloadCapability = 
field(metadata=RxOffloadCapability.make_parser(True))
+
+
 T = TypeVarTuple("T")  # type: ignore[misc]
 
 
@@ -1048,6 +1145,42 @@ def _close(self) -> None:
 == Capability retrieval methods ==
 """
 
+def get_capabili

[PATCH v3 12/12] dts: add NIC capabilities from show port info

2024-08-21 Thread Juraj Linkeš
Add the capabilities advertised by the testpmd command "show port info"
so that test cases may be marked as requiring those capabilities:
RUNTIME_RX_QUEUE_SETUP
RUNTIME_TX_QUEUE_SETUP
RXQ_SHARE
FLOW_RULE_KEEP
FLOW_SHARED_OBJECT_KEEP

These names are copy pasted from the existing DeviceCapabilitiesFlag
class. Dynamic addition of Enum members runs into problems with typing
(mypy doesn't know about the members) and documentation generation
(Sphinx doesn't know about the members).

Signed-off-by: Juraj Linkeš 
---
 dts/framework/remote_session/testpmd_shell.py | 36 +++
 1 file changed, 36 insertions(+)

diff --git a/dts/framework/remote_session/testpmd_shell.py 
b/dts/framework/remote_session/testpmd_shell.py
index f83569669e..166ffc827e 100644
--- a/dts/framework/remote_session/testpmd_shell.py
+++ b/dts/framework/remote_session/testpmd_shell.py
@@ -1200,6 +1200,24 @@ def get_capabilities_rxq_info(
 else:
 unsupported_capabilities.add(NicCapability.SCATTERED_RX_ENABLED)
 
+def get_capabilities_show_port_info(
+self,
+supported_capabilities: MutableSet["NicCapability"],
+unsupported_capabilities: MutableSet["NicCapability"],
+) -> None:
+"""Get all capabilities from show port info and divide them into 
supported and unsupported.
+
+Args:
+supported_capabilities: Supported capabilities will be added to 
this set.
+unsupported_capabilities: Unsupported capabilities will be added 
to this set.
+"""
+self._update_capabilities_from_flag(
+supported_capabilities,
+unsupported_capabilities,
+DeviceCapabilitiesFlag,
+self.ports[0].device_capabilities,
+)
+
 """
 == Decorator methods ==
 """
@@ -1332,6 +1350,24 @@ class NicCapability(NoAliasEnum):
 RX_OFFLOAD_VLAN: TestPmdShellCapabilityMethod = partial(
 TestPmdShell.get_capabilities_rx_offload
 )
+#: Device supports Rx queue setup after device started.
+RUNTIME_RX_QUEUE_SETUP: TestPmdShellCapabilityMethod = partial(
+TestPmdShell.get_capabilities_show_port_info
+)
+#: Device supports Tx queue setup after device started.
+RUNTIME_TX_QUEUE_SETUP: TestPmdShellCapabilityMethod = partial(
+TestPmdShell.get_capabilities_show_port_info
+)
+#: Device supports shared Rx queue among ports within Rx domain and switch 
domain.
+RXQ_SHARE: TestPmdShellCapabilityMethod = 
partial(TestPmdShell.get_capabilities_show_port_info)
+#: Device supports keeping flow rules across restart.
+FLOW_RULE_KEEP: TestPmdShellCapabilityMethod = partial(
+TestPmdShell.get_capabilities_show_port_info
+)
+#: Device supports keeping shared flow objects across restart.
+FLOW_SHARED_OBJECT_KEEP: TestPmdShellCapabilityMethod = partial(
+TestPmdShell.get_capabilities_show_port_info
+)
 
 def __call__(
 self,
-- 
2.34.1



Re: [PATCH v2 3/4] zsda: add support for queue operation

2024-08-21 Thread Stephen Hemminger
On Thu, 15 Aug 2024 10:54:38 +0800
Hanxiao Li  wrote:

> +uint16_t zsda_enqueue_op_burst(struct zsda_qp *qp, void **ops, const 
> uint16_t nb_ops);
> +uint16_t zsda_dequeue_op_burst(struct zsda_qp *qp, void **ops, const 
> uint16_t nb_ops);
> +
> +void tx_write_tail(struct zsda_queue *queue);
> +int zsda_queue_pair_setup(uint32_t dev_id, struct zsda_qp **qp_addr,
> +   const uint16_t queue_pair_id,
> +   const struct zsda_qp_config *zsda_qp_conf);
> +
> +int zsda_queue_pair_release(struct zsda_qp **qp_addr);
> +int zsda_fill_sgl(const struct rte_mbuf *buf, uint32_t offset,
> +   struct zsda_sgl *sgl, const phys_addr_t sgl_phy_addr,
> +   uint32_t remain_len, struct comp_head_info *comp_head_info);
> +
> +int zsda_get_sgl_num(const struct zsda_sgl *sgl);
> +int zsda_sgl_opt_addr_lost(struct rte_mbuf *mbuf);
> +
> +int find_next_free_cookie(const struct zsda_queue *queue, void **op_cookie,
> +   uint16_t *idx);
> +int common_setup_qp(uint32_t dev_id, struct zsda_qp **qp_addr,
> + const uint16_t queue_pair_id,
> + const struct zsda_qp_config *conf);
> +

Since these are public but not exported functions, best to prefix
them with zsda_ to avoid any name conflicts.


Re: [PATCH v2 1/3] app/testpmd: add register keyword

2024-08-21 Thread Stephen Hemminger
On Wed, 21 Aug 2024 20:08:55 +0530
Vipin Varghese  wrote:

> diff --git a/app/test-pmd/macswap_sse.h b/app/test-pmd/macswap_sse.h
> index 223f87a539..29088843b7 100644
> --- a/app/test-pmd/macswap_sse.h
> +++ b/app/test-pmd/macswap_sse.h
> @@ -16,13 +16,13 @@ do_macswap(struct rte_mbuf *pkts[], uint16_t nb,
>   uint64_t ol_flags;
>   int i;
>   int r;
> - __m128i addr0, addr1, addr2, addr3;
> + register __m128i addr0, addr1, addr2, addr3;

Some compilers treat register as a no-op. Are you sure? Did you check with 
godbolt.


[PATCH v19 1/5] dts: update params and parser docstrings

2024-08-21 Thread Juraj Linkeš
Address a few errors reported by Sphinx when generating documentation:
framework/params/__init__.py:docstring of framework.params.modify_str:3:
WARNING: Inline interpreted text or phrase reference start-string
without end-string.
framework/params/eal.py:docstring of framework.params.eal.EalParams:35:
WARNING: Definition list ends without a blank line; unexpected
unindent.
framework/params/types.py:docstring of framework.params.types:8:
WARNING: Inline strong start-string without end-string.
framework/params/types.py:docstring of framework.params.types:9:
WARNING: Inline strong start-string without end-string.
framework/parser.py:docstring of framework.parser.TextParser:33: ERROR:
Unexpected indentation.
framework/parser.py:docstring of framework.parser.TextParser:43: ERROR:
Unexpected indentation.
framework/parser.py:docstring of framework.parser.TextParser:49: ERROR:
Unexpected indentation.
framework/parser.py:docstring of framework.parser.TextParser.wrap:8:
ERROR: Unexpected indentation.
framework/parser.py:docstring of framework.parser.TextParser.wrap:9:
WARNING: Block quote ends without a blank line; unexpected unindent.

Fixes: 87ba4cdc0dbb ("dts: use Unpack for type checking and hinting")
Fixes: d70159cb62f5 ("dts: add params manipulation module")
Fixes: 967fc62b0a43 ("dts: refactor EAL parameters class")
Fixes: 818fe14e3422 ("dts: add parsing utility module")
Cc: luca.vizza...@arm.com

Signed-off-by: Juraj Linkeš 
Reviewed-by: Luca Vizzarro 
Reviewed-by: Jeremy Spewock 
---
 dts/framework/params/__init__.py | 4 ++--
 dts/framework/params/eal.py  | 7 +--
 dts/framework/params/types.py| 3 ++-
 dts/framework/parser.py  | 4 ++--
 4 files changed, 11 insertions(+), 7 deletions(-)

diff --git a/dts/framework/params/__init__.py b/dts/framework/params/__init__.py
index 5a6fd93053..1ae227d7b4 100644
--- a/dts/framework/params/__init__.py
+++ b/dts/framework/params/__init__.py
@@ -53,9 +53,9 @@ def reduced_fn(value):
 
 
 def modify_str(*funcs: FnPtr) -> Callable[[T], T]:
-"""Class decorator modifying the ``__str__`` method with a function 
created from its arguments.
+r"""Class decorator modifying the ``__str__`` method with a function 
created from its arguments.
 
-The :attr:`FnPtr`s fed to the decorator are executed from left to right in 
the arguments list
+The :attr:`FnPtr`\s fed to the decorator are executed from left to right 
in the arguments list
 order.
 
 Args:
diff --git a/dts/framework/params/eal.py b/dts/framework/params/eal.py
index 8d7766fefc..cf1594353a 100644
--- a/dts/framework/params/eal.py
+++ b/dts/framework/params/eal.py
@@ -26,13 +26,16 @@ class EalParams(Params):
 prefix: Set the file prefix string with which to start DPDK, e.g.: 
``prefix="vf"``.
 no_pci: Switch to disable PCI bus, e.g.: ``no_pci=True``.
 vdevs: Virtual devices, e.g.::
+
 vdevs=[
 VirtualDevice('net_ring0'),
 VirtualDevice('net_ring1')
 ]
+
 ports: The list of ports to allow.
-other_eal_param: user defined DPDK EAL parameters, e.g.:
-``other_eal_param='--single-file-segments'``
+other_eal_param: user defined DPDK EAL parameters, e.g.::
+
+``other_eal_param='--single-file-segments'``
 """
 
 lcore_list: LogicalCoreList | None = field(default=None, 
metadata=Params.short("l"))
diff --git a/dts/framework/params/types.py b/dts/framework/params/types.py
index e668f658d8..d77c4625fb 100644
--- a/dts/framework/params/types.py
+++ b/dts/framework/params/types.py
@@ -6,7 +6,8 @@
 TypedDicts can be used in conjunction with Unpack and kwargs for type hinting 
on function calls.
 
 Example:
-..code:: python
+.. code:: python
+
 def create_testpmd(**kwargs: Unpack[TestPmdParamsDict]):
 params = TestPmdParams(**kwargs)
 """
diff --git a/dts/framework/parser.py b/dts/framework/parser.py
index 741dfff821..7254c75b71 100644
--- a/dts/framework/parser.py
+++ b/dts/framework/parser.py
@@ -46,7 +46,7 @@ class TextParser(ABC):
 Example:
 The following example makes use of and demonstrates every parser 
function available:
 
-..code:: python
+.. code:: python
 
 from dataclasses import dataclass, field
 from enum import Enum
@@ -90,7 +90,7 @@ def wrap(parser_fn: ParserFn, wrapper_fn: Callable) -> 
ParserFn:
 """Makes a wrapped parser function.
 
 `parser_fn` is called and if a non-None value is returned, 
`wrapper_function` is called with
-it. Otherwise the function returns early with None. In pseudo-code:
+it. Otherwise the function returns early with None. In pseudo-code::
 
 intermediate_value := parser_fn(input)
 if intermediary_value is None then
-- 
2.34.1



[PATCH v19 0/5] DTS API docs generation

2024-08-21 Thread Juraj Linkeš
The generation is done with Sphinx, which DPDK already uses, with
slightly modified configuration of the sidebar present in an if block.

DTS dependencies do not need to be installed, but there is the option to
install doc build dependencies with Poetry:
poetry install --with docs

The build itself may be run with:
meson setup  -Denable_docs=true
ninja -C 

The above will do a full DPDK build with docs. To build just docs:
meson setup 
ninja -C  doc

Python3.10 is required to build the DTS API docs.

The patchset contains the .rst sources which Sphinx uses to generate the
html pages. These were first generated with the sphinx-apidoc utility
and modified to provide a better look. The documentation just doesn't
look that good without the modifications and there isn't enough
configuration options to achieve that without manual changes to the .rst
files. This introduces extra maintenance which involves adding new .rst
files when a new Python module is added or changing the .rst structure
if the Python directory/file structure is changed (moved, renamed
files). This small maintenance burden is outweighed by the flexibility
afforded by the ability to make manual changes to the .rst files.

v10:
Fix dts doc generation issue: Only copy the custom rss file if it exists.

v11:
Added the config option autodoc_mock_imports, which eliminates the need
for DTS dependencies. Added a script that find out which imports need to
be added to autodoc_mock_imports. The script also check the required
Python version for building DTS docs.
Removed tags from the two affected patches which will need to be
reviewed again.

v12:
Added paramiko to the required dependencies of get-dts-deps.py.

v13:
Fixed build error:
TypeError: unsupported operand type(s) for |: 'NoneType' and 'Transport'

v14:
Fixed install error:
ERROR: File 'dts/doc/html' could not be found
This required me to put the built docs into dts/doc which is outside the
DPDK API doc dir, resulting in linking between DPDK and DTS api docs not
working properly. I addressed this by adding a symlink to the build dir.
This way the link works after installing the docs and the symlink is
just one extra file in the build dir.

v15:
Moved DTS API sources to doc/api/dts. This simplifies a lot of things in
the build, but mainly makes a lot of sense. Now the source, build and
install paths are the same so there isn't any need for any symlinks or
other workarounds.
Also added a symlink to the custom.css file so that it works with
call-sphinx-build.py without any modifications.

v16:
Renamed the dependency python file to get-dts-runtime-deps.py a modified
it to only get runtime dependencies. We don't need to check docs
dependencies (Sphinx) as we don't need to mock those.
Also moved all new Sphinx configuration into the DTS if branch to make
sure it won't ever affect the DPDK doc build.

v17:
Removed the dts-doc build target to mirror the functionality of using
-Denable_docs=true.
Moved DTS-specific meson build code to doc/api/dts/meson.build.
Added comments to get_missing_imports() and the top level docstring of
get-dts-runtime-deps.py to explain the function is there to be imported.

v18:
Added PyYAML to get-dts-runtime-deps.py.

v19:
Fixed a regression in get-dts-runtime-deps.py introduced in v18:
AttributeError: 'dict' object has no attribute 'strip'

Juraj Linkeš (5):
  dts: update params and parser docstrings
  dts: replace the or operator in third party types
  dts: add doc generation dependencies
  dts: add API doc sources
  dts: add API doc generation

 buildtools/call-sphinx-build.py   |   2 +
 buildtools/get-dts-runtime-deps.py|  95 
 buildtools/meson.build|   1 +
 doc/api/doxy-api-index.md |   3 +
 doc/api/doxy-api.conf.in  |   2 +
 doc/api/dts/conf_yaml_schema.json |   1 +
 doc/api/dts/custom.css|   1 +
 doc/api/dts/framework.config.rst  |  12 +
 doc/api/dts/framework.config.types.rst|   6 +
 doc/api/dts/framework.exception.rst   |   6 +
 doc/api/dts/framework.logger.rst  |   6 +
 doc/api/dts/framework.params.eal.rst  |   6 +
 doc/api/dts/framework.params.rst  |  14 +
 doc/api/dts/framework.params.testpmd.rst  |   6 +
 doc/api/dts/framework.params.types.rst|   6 +
 doc/api/dts/framework.parser.rst  |   6 +
 .../framework.remote_session.dpdk_shell.rst   |   6 +
 ...ote_session.interactive_remote_session.rst |   6 +
 ...ework.remote_session.interactive_shell.rst |   6 +
 .../framework.remote_session.python_shell.rst |   6 +
 ...ramework.remote_session.remote_session.rst |   6 +
 doc/api/dts/framework.remote_session.rst  |  18 +
 .../framework.remote_session.ssh_session.rst  |   6 +
 ...framework.remote_session.testpmd_shell.rst |   6 +
 doc/api/dts/framework.runner.rst  |   6 +
 doc/api/dts/framework.settings.rst|   6 +
 doc/api/dts/framework

[PATCH v19 3/5] dts: add doc generation dependencies

2024-08-21 Thread Juraj Linkeš
Sphinx imports every Python module (through the autodoc extension)
when generating documentation from docstrings, meaning all DTS
dependencies, including Python version, should be satisfied. This is not
a hard requirement, as imports from dependencies may be mocked in the
autodoc_mock_imports autodoc option.
In case DTS developers want to use a Sphinx installation from their
virtualenv, we provide an optional Poetry group for doc generation. The
pyelftools package is there so that meson picks up the correct Python
installation, as pyelftools is required by the build system.

Signed-off-by: Juraj Linkeš 
Reviewed-by: Jeremy Spewock 
---
 dts/poetry.lock| 521 +++--
 dts/pyproject.toml |   8 +
 2 files changed, 517 insertions(+), 12 deletions(-)

diff --git a/dts/poetry.lock b/dts/poetry.lock
index 5f8fa03933..2dd8bad498 100644
--- a/dts/poetry.lock
+++ b/dts/poetry.lock
@@ -1,5 +1,16 @@
 # This file is automatically @generated by Poetry 1.8.2 and should not be 
changed by hand.
 
+[[package]]
+name = "alabaster"
+version = "0.7.13"
+description = "A configurable sidebar-enabled Sphinx theme"
+optional = false
+python-versions = ">=3.6"
+files = [
+{file = "alabaster-0.7.13-py3-none-any.whl", hash = 
"sha256:1ee19aca801bbabb5ba3f5f258e4422dfa86f82f3e9cefb0859b283cdd7f62a3"},
+{file = "alabaster-0.7.13.tar.gz", hash = 
"sha256:a27a4a084d5e690e16e01e03ad2b2e552c61a65469419b907243193de1a84ae2"},
+]
+
 [[package]]
 name = "attrs"
 version = "23.1.0"
@@ -18,6 +29,23 @@ docs = ["furo", "myst-parser", "sphinx", 
"sphinx-notfound-page", "sphinxcontrib-
 tests = ["attrs[tests-no-zope]", "zope-interface"]
 tests-no-zope = ["cloudpickle", "hypothesis", "mypy (>=1.1.1)", "pympler", 
"pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"]
 
+[[package]]
+name = "babel"
+version = "2.13.1"
+description = "Internationalization utilities"
+optional = false
+python-versions = ">=3.7"
+files = [
+{file = "Babel-2.13.1-py3-none-any.whl", hash = 
"sha256:7077a4984b02b6727ac10f1f7294484f737443d7e2e66c5e4380e41a3ae0b4ed"},
+{file = "Babel-2.13.1.tar.gz", hash = 
"sha256:33e0952d7dd6374af8dbf6768cc4ddf3ccfefc244f9986d4074704f2fbd18900"},
+]
+
+[package.dependencies]
+setuptools = {version = "*", markers = "python_version >= \"3.12\""}
+
+[package.extras]
+dev = ["freezegun (>=1.0,<2.0)", "pytest (>=6.0)", "pytest-cov"]
+
 [[package]]
 name = "bcrypt"
 version = "4.0.1"
@@ -86,6 +114,17 @@ d = ["aiohttp (>=3.7.4)"]
 jupyter = ["ipython (>=7.8.0)", "tokenize-rt (>=3.2.0)"]
 uvloop = ["uvloop (>=0.15.2)"]
 
+[[package]]
+name = "certifi"
+version = "2023.7.22"
+description = "Python package for providing Mozilla's CA Bundle."
+optional = false
+python-versions = ">=3.6"
+files = [
+{file = "certifi-2023.7.22-py3-none-any.whl", hash = 
"sha256:92d6037539857d8206b8f6ae472e8b77db8058fec5937a1ef3f54304089edbb9"},
+{file = "certifi-2023.7.22.tar.gz", hash = 
"sha256:539cc1d13202e33ca466e88b2807e29f4c13049d6d87031a3c110744495cb082"},
+]
+
 [[package]]
 name = "cffi"
 version = "1.15.1"
@@ -162,6 +201,105 @@ files = [
 [package.dependencies]
 pycparser = "*"
 
+[[package]]
+name = "charset-normalizer"
+version = "3.3.2"
+description = "The Real First Universal Charset Detector. Open, modern and 
actively maintained alternative to Chardet."
+optional = false
+python-versions = ">=3.7.0"
+files = [
+{file = "charset-normalizer-3.3.2.tar.gz", hash = 
"sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5"},
+{file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_universal2.whl", 
hash = 
"sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3"},
+{file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_x86_64.whl", 
hash = 
"sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027"},
+{file = "charset_normalizer-3.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash 
= "sha256:9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03"},
+{file = 
"charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl",
 hash = 
"sha256:6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d"},
+{file = 
"charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl",
 hash = 
"sha256:1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e"},
+{file = 
"charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl",
 hash = 
"sha256:cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6"},
+{file = 
"charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl",
 hash = 
"sha256:8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5"},
+{file = 
"charset_normalizer-3.3.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl",
 hash = 
"sha256:a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537"},
+{file = "char

[PATCH v19 2/5] dts: replace the or operator in third party types

2024-08-21 Thread Juraj Linkeš
When the DTS dependencies are not installed when building DTS API
documentation, the or operator produces errors when used with types from
those libraries:
autodoc: failed to import module 'remote_session' from module
'framework'; the following exception was raised:
Traceback (most recent call last):
...
TypeError: unsupported operand type(s) for |: 'Transport' and 'NoneType'

The third part type here is Transport from the paramiko library.

Signed-off-by: Juraj Linkeš 
Reviewed-by: Jeremy Spewock 
---
 dts/framework/remote_session/interactive_remote_session.py | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/dts/framework/remote_session/interactive_remote_session.py 
b/dts/framework/remote_session/interactive_remote_session.py
index 97194e6af8..4605ee14b4 100644
--- a/dts/framework/remote_session/interactive_remote_session.py
+++ b/dts/framework/remote_session/interactive_remote_session.py
@@ -5,6 +5,7 @@
 
 import socket
 import traceback
+from typing import Union
 
 from paramiko import AutoAddPolicy, SSHClient, Transport  # type: 
ignore[import-untyped]
 from paramiko.ssh_exception import (  # type: ignore[import-untyped]
@@ -52,7 +53,7 @@ class InteractiveRemoteSession:
 session: SSHClient
 _logger: DTSLogger
 _node_config: NodeConfiguration
-_transport: Transport | None
+_transport: Union[Transport, None]
 
 def __init__(self, node_config: NodeConfiguration, logger: DTSLogger) -> 
None:
 """Connect to the node during initialization.
-- 
2.34.1



[PATCH v19 4/5] dts: add API doc sources

2024-08-21 Thread Juraj Linkeš
These sources could be generated with the sphinx-apidoc utility, but
that doesn't give us enough flexibility, such as sorting the order of
modules or changing the headers of the modules.

The sources included in this patch were in fact generated by said
utility, but modified to improve the look of the documentation. The
improvements are mainly in toctree definitions and the titles of the
modules/packages. These were made with specific Sphinx config options in
mind.

Signed-off-by: Juraj Linkeš 
Reviewed-by: Luca Vizzarro 
Reviewed-by: Jeremy Spewock 
Tested-by: Luca Vizzarro 
---
 doc/api/dts/conf_yaml_schema.json |  1 +
 doc/api/dts/framework.config.rst  | 12 ++
 doc/api/dts/framework.config.types.rst|  6 +++
 doc/api/dts/framework.exception.rst   |  6 +++
 doc/api/dts/framework.logger.rst  |  6 +++
 doc/api/dts/framework.params.eal.rst  |  6 +++
 doc/api/dts/framework.params.rst  | 14 ++
 doc/api/dts/framework.params.testpmd.rst  |  6 +++
 doc/api/dts/framework.params.types.rst|  6 +++
 doc/api/dts/framework.parser.rst  |  6 +++
 .../framework.remote_session.dpdk_shell.rst   |  6 +++
 ...ote_session.interactive_remote_session.rst |  6 +++
 ...ework.remote_session.interactive_shell.rst |  6 +++
 .../framework.remote_session.python_shell.rst |  6 +++
 ...ramework.remote_session.remote_session.rst |  6 +++
 doc/api/dts/framework.remote_session.rst  | 18 
 .../framework.remote_session.ssh_session.rst  |  6 +++
 ...framework.remote_session.testpmd_shell.rst |  6 +++
 doc/api/dts/framework.runner.rst  |  6 +++
 doc/api/dts/framework.settings.rst|  6 +++
 doc/api/dts/framework.test_result.rst |  6 +++
 doc/api/dts/framework.test_suite.rst  |  6 +++
 doc/api/dts/framework.testbed_model.cpu.rst   |  6 +++
 .../framework.testbed_model.linux_session.rst |  6 +++
 doc/api/dts/framework.testbed_model.node.rst  |  6 +++
 .../framework.testbed_model.os_session.rst|  6 +++
 doc/api/dts/framework.testbed_model.port.rst  |  6 +++
 .../framework.testbed_model.posix_session.rst |  6 +++
 doc/api/dts/framework.testbed_model.rst   | 26 +++
 .../dts/framework.testbed_model.sut_node.rst  |  6 +++
 .../dts/framework.testbed_model.tg_node.rst   |  6 +++
 ..._generator.capturing_traffic_generator.rst |  6 +++
 ...mework.testbed_model.traffic_generator.rst | 14 ++
 testbed_model.traffic_generator.scapy.rst |  6 +++
 ...el.traffic_generator.traffic_generator.rst |  6 +++
 ...framework.testbed_model.virtual_device.rst |  6 +++
 doc/api/dts/framework.utils.rst   |  6 +++
 doc/api/dts/index.rst | 43 +++
 38 files changed, 314 insertions(+)
 create mode 12 doc/api/dts/conf_yaml_schema.json
 create mode 100644 doc/api/dts/framework.config.rst
 create mode 100644 doc/api/dts/framework.config.types.rst
 create mode 100644 doc/api/dts/framework.exception.rst
 create mode 100644 doc/api/dts/framework.logger.rst
 create mode 100644 doc/api/dts/framework.params.eal.rst
 create mode 100644 doc/api/dts/framework.params.rst
 create mode 100644 doc/api/dts/framework.params.testpmd.rst
 create mode 100644 doc/api/dts/framework.params.types.rst
 create mode 100644 doc/api/dts/framework.parser.rst
 create mode 100644 doc/api/dts/framework.remote_session.dpdk_shell.rst
 create mode 100644 
doc/api/dts/framework.remote_session.interactive_remote_session.rst
 create mode 100644 doc/api/dts/framework.remote_session.interactive_shell.rst
 create mode 100644 doc/api/dts/framework.remote_session.python_shell.rst
 create mode 100644 doc/api/dts/framework.remote_session.remote_session.rst
 create mode 100644 doc/api/dts/framework.remote_session.rst
 create mode 100644 doc/api/dts/framework.remote_session.ssh_session.rst
 create mode 100644 doc/api/dts/framework.remote_session.testpmd_shell.rst
 create mode 100644 doc/api/dts/framework.runner.rst
 create mode 100644 doc/api/dts/framework.settings.rst
 create mode 100644 doc/api/dts/framework.test_result.rst
 create mode 100644 doc/api/dts/framework.test_suite.rst
 create mode 100644 doc/api/dts/framework.testbed_model.cpu.rst
 create mode 100644 doc/api/dts/framework.testbed_model.linux_session.rst
 create mode 100644 doc/api/dts/framework.testbed_model.node.rst
 create mode 100644 doc/api/dts/framework.testbed_model.os_session.rst
 create mode 100644 doc/api/dts/framework.testbed_model.port.rst
 create mode 100644 doc/api/dts/framework.testbed_model.posix_session.rst
 create mode 100644 doc/api/dts/framework.testbed_model.rst
 create mode 100644 doc/api/dts/framework.testbed_model.sut_node.rst
 create mode 100644 doc/api/dts/framework.testbed_model.tg_node.rst
 create mode 100644 
doc/api/dts/framework.testbed_model.traffic_generator.capturing_traffic_generator.rst
 create mode 100644 doc/api/dts/framework.testbed_model.traffic_generator.rst
 create mode 100644 
doc/api/dts/fra

[PATCH v19 5/5] dts: add API doc generation

2024-08-21 Thread Juraj Linkeš
The tool used to generate DTS API docs is Sphinx, which is already in
use in DPDK. The same configuration is used to preserve style with one
DTS-specific configuration (so that the DPDK docs are unchanged) that
modifies how the sidebar displays the content. There's other Sphinx
configuration related to Python docstrings which doesn't affect DPDK doc
build. All new configuration is in a conditional block, applied only
when DTS API docs are built to not interfere with DPDK doc build.

Sphinx generates the documentation from Python docstrings. The docstring
format is the Google format [0] which requires the sphinx.ext.napoleon
extension. The other extension, sphinx.ext.intersphinx, enables linking
to objects in external documentations, such as the Python documentation.

There is one requirement for building DTS docs - the same Python version
as DTS or higher, because Sphinx's autodoc extension imports the code.

The dependencies needed to import the code don't have to be satisfied,
as the autodoc extension allows us to mock the imports. The missing
packages are taken from the DTS pyproject.toml file.

And finally, the DTS API docs can be accessed from the DPDK API doxygen
page.

[0] https://google.github.io/styleguide/pyguide.html#38-comments-and-docstrings

Cc: Bruce Richardson 

Signed-off-by: Juraj Linkeš 
Reviewed-by: Jeremy Spewock 
Reviewed-by: Dean Marx 
---
 buildtools/call-sphinx-build.py   |  2 +
 buildtools/get-dts-runtime-deps.py| 95 +++
 buildtools/meson.build|  1 +
 doc/api/doxy-api-index.md |  3 +
 doc/api/doxy-api.conf.in  |  2 +
 doc/api/dts/custom.css|  1 +
 doc/api/dts/meson.build   | 31 
 doc/api/meson.build   |  6 +-
 doc/guides/conf.py| 44 ++-
 doc/guides/contributing/documentation.rst |  2 +
 doc/guides/contributing/patches.rst   |  4 +
 doc/guides/tools/dts.rst  | 39 +-
 doc/meson.build   |  1 +
 13 files changed, 228 insertions(+), 3 deletions(-)
 create mode 100755 buildtools/get-dts-runtime-deps.py
 create mode 12 doc/api/dts/custom.css
 create mode 100644 doc/api/dts/meson.build

diff --git a/buildtools/call-sphinx-build.py b/buildtools/call-sphinx-build.py
index 623e7363ee..154e9f907b 100755
--- a/buildtools/call-sphinx-build.py
+++ b/buildtools/call-sphinx-build.py
@@ -15,6 +15,8 @@
 
 # set the version in environment for sphinx to pick up
 os.environ['DPDK_VERSION'] = version
+if 'dts' in src:
+os.environ['DTS_BUILD'] = "y"
 
 sphinx_cmd = [sphinx] + extra_args
 
diff --git a/buildtools/get-dts-runtime-deps.py 
b/buildtools/get-dts-runtime-deps.py
new file mode 100755
index 00..1636a6dbf5
--- /dev/null
+++ b/buildtools/get-dts-runtime-deps.py
@@ -0,0 +1,95 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2024 PANTHEON.tech s.r.o.
+#
+
+"""Utilities for DTS dependencies.
+
+The module can be used as an executable script,
+which verifies that the running Python version meets the version requirement 
of DTS.
+The script exits with the standard exit codes in this mode (0 is success, 1 is 
failure).
+
+The module also contains a function, get_missing_imports,
+which looks for runtime dependencies in the DTS pyproject.toml file
+and returns a list of module names used in an import statement (import 
packages) that are missing.
+This function is not used when the module is run as a script and is available 
to be imported.
+"""
+
+import configparser
+import importlib.metadata
+import importlib.util
+import os.path
+import platform
+
+from packaging.version import Version
+
+_VERSION_COMPARISON_CHARS = '^<>='
+_EXTRA_DEPS = {
+'invoke': {'version': '>=1.3'},
+'paramiko': {'version': '>=2.4'},
+'PyYAML': {'version': '^6.0', 'import_package': 'yaml'}
+}
+_DPDK_ROOT = os.path.dirname(os.path.dirname(__file__))
+_DTS_DEP_FILE_PATH = os.path.join(_DPDK_ROOT, 'dts', 'pyproject.toml')
+
+
+def _get_dependencies(cfg_file_path):
+cfg = configparser.ConfigParser()
+with open(cfg_file_path) as f:
+dts_deps_file_str = f.read()
+dts_deps_file_str = dts_deps_file_str.replace("\n]", "]")
+cfg.read_string(dts_deps_file_str)
+
+deps_section = cfg['tool.poetry.dependencies']
+return {dep: {'version': deps_section[dep].strip('"\'')} for dep in 
deps_section}
+
+
+def get_missing_imports():
+"""Get missing DTS import packages from third party libraries.
+
+Scan the DTS pyproject.toml file for dependencies and find those that are 
not installed.
+The dependencies in pyproject.toml are listed by their distribution 
package names,
+but the function finds the associated import packages - those used in 
import statements.
+
+The function is not used when the module is run as a script. It should be 
imported.
+
+Returns:
+A list of missing import p

Re: [PATCH v19 5/5] dts: add API doc generation

2024-08-21 Thread Dean Marx
On Wed, Aug 21, 2024 at 11:03 AM Juraj Linkeš 
wrote:

> The tool used to generate DTS API docs is Sphinx, which is already in
> use in DPDK. The same configuration is used to preserve style with one
> DTS-specific configuration (so that the DPDK docs are unchanged) that
> modifies how the sidebar displays the content. There's other Sphinx
> configuration related to Python docstrings which doesn't affect DPDK doc
> build. All new configuration is in a conditional block, applied only
> when DTS API docs are built to not interfere with DPDK doc build.
>
> Sphinx generates the documentation from Python docstrings. The docstring
> format is the Google format [0] which requires the sphinx.ext.napoleon
> extension. The other extension, sphinx.ext.intersphinx, enables linking
> to objects in external documentations, such as the Python documentation.
>
> There is one requirement for building DTS docs - the same Python version
> as DTS or higher, because Sphinx's autodoc extension imports the code.
>
> The dependencies needed to import the code don't have to be satisfied,
> as the autodoc extension allows us to mock the imports. The missing
> packages are taken from the DTS pyproject.toml file.
>
> And finally, the DTS API docs can be accessed from the DPDK API doxygen
> page.
>

Tested-by: Dean Marx 


[PATCH] dts: add RSS functions to testpmd

2024-08-21 Thread Alex Chapman
This patch adds the required functionality for the RSS key_update,
RETA, and hash test suites. This includes:
The setting of custom RETA values for routing packets to specific
queues.
The setting of the RSS mode on all ports, to specify how to hash
the packets.
The updating of the RSS hash key used during the hashing process.

Alongside this, there is the addition of a __str__ method to the
RSSOffloadTypesFlags class, so that when flag names are cast to
a string they will use '-' as separators, instead of '_'.
This allows them to be directly used within testpmd RSS commands
without any further changes.

Signed-off-by: Alex Chapman 
Reviewed-by: Luca Vizzarro 
---
 dts/framework/remote_session/testpmd_shell.py | 84 ++-
 1 file changed, 83 insertions(+), 1 deletion(-)

diff --git a/dts/framework/remote_session/testpmd_shell.py 
b/dts/framework/remote_session/testpmd_shell.py
index 18b0533658..6a2d8be6fa 100644
--- a/dts/framework/remote_session/testpmd_shell.py
+++ b/dts/framework/remote_session/testpmd_shell.py
@@ -23,7 +23,7 @@
 
 from typing_extensions import Self, Unpack
 
-from framework.exception import InteractiveCommandExecutionError
+from framework.exception import InteractiveCommandExecutionError, InternalError
 from framework.params.testpmd import SimpleForwardingModes, TestPmdParams
 from framework.params.types import TestPmdParamsDict
 from framework.parser import ParserFn, TextParser
@@ -305,6 +305,12 @@ def make_parser(cls) -> ParserFn:
 RSSOffloadTypesFlag.from_list_string,
 )
 
+def __str__(self):
+"""Stringifies the flag name."""
+if self.name is None:
+return ""
+return self.name.replace("_", "-")
+
 
 class DeviceCapabilitiesFlag(Flag):
 """Flag representing the device capabilities."""
@@ -1178,6 +1184,82 @@ def set_forward_mode(self, mode: SimpleForwardingModes, 
verify: bool = True):
 f"Test pmd failed to set fwd mode to {mode.value}"
 )
 
+def port_config_rss_reta(
+self, port_id: int, hash_index: int, queue_id: int, verify: bool = True
+) -> None:
+"""Configures a port's RSS redirection table.
+
+Args:
+port_id: Port where redirection table will be configured.
+hash_index: The index into the redirection table associated with 
the destination queue.
+queue_id: The destination queue of the packet.
+verify: If :data:`True`, it verifies if a port's redirection table
+was correctly configured.
+
+Raises:
+InteractiveCommandExecutionError: If `verify` is :data:`True`
+Testpmd failed to config RSS reta.
+"""
+out = self.send_command(f"port config {port_id} rss reta 
({hash_index},{queue_id})")
+if verify:
+if f"The reta size of port {port_id} is" not in out:
+self._logger.debug(f"Failed to config RSS reta: \n{out}")
+raise InteractiveCommandExecutionError("Testpmd failed to 
config RSS reta.")
+
+def port_config_all_rss_offload_type(
+self, flag: RSSOffloadTypesFlag, verify: bool = True
+) -> None:
+"""Set the RSS mode on all ports.
+
+Args:
+rss_offload_type: The RSS iptype all ports will be configured to.
+verify: If :data:`True`, it verifies if all ports RSS offload type
+was correctly configured.
+
+Raises:
+InternalError: Offload Flag has contradictory values set.
+InteractiveCommandExecutionError: If `verify` is :data:`True`
+Testpmd failed to config the RSS mode on all ports.
+"""
+if not flag.name:
+raise InternalError("Offload Flag has contradictory values set")
+
+out = self.send_command(f"port config all rss {flag.name}")
+
+if verify:
+if "error" in out:
+self._logger.debug(f"Failed to config the RSS mode on all 
ports: \n{out}")
+raise InteractiveCommandExecutionError(
+"Testpmd failed to config the RSS mode on all ports"
+)
+
+def port_config_rss_hash_key(
+self, port_id: int, offload_type: RSSOffloadTypesFlag, hex_str: str, 
verify: bool = True
+) -> str:
+"""Set the RSS hash key for the specified port.
+
+Args:
+port_id: Port the hash key will be set.
+offload_type: The offload type the hash key will be applied to.
+hex_str: The new hash key.
+verify: If :data:`True`, RSS hash key has been correctly set.
+
+Raises:
+InteractiveCommandExecutionError: If `verify` is :data:`True`
+Testpmd failed to set the RSS hash key.
+"""
+output = self.send_command(f"port config {port_id} rss-hash-key 
{offload_type} {hex_str}")
+
+if verify:
+if (
+"invalid - key must be a str

[PATCH v2 0/2] dts: port over checksum offload suite

2024-08-21 Thread Dean Marx
---
v2:
* added filter for verbose output using dst mac address

Dean Marx (2):
  dts: add csum HW offload to testpmd shell
  dts: checksum offload test suite

 dts/framework/config/conf_yaml_schema.json|   3 +-
 dts/framework/remote_session/testpmd_shell.py | 124 
 dts/tests/TestSuite_checksum_offload.py   | 264 ++
 3 files changed, 390 insertions(+), 1 deletion(-)
 create mode 100644 dts/tests/TestSuite_checksum_offload.py

-- 
2.44.0



[PATCH v2 1/2] dts: add csum HW offload to testpmd shell

2024-08-21 Thread Dean Marx
add csum_set_hw method to testpmd shell class. Port over
set_verbose and port start/stop from queue start/stop suite.

Signed-off-by: Dean Marx 
---
 dts/framework/remote_session/testpmd_shell.py | 124 ++
 1 file changed, 124 insertions(+)

diff --git a/dts/framework/remote_session/testpmd_shell.py 
b/dts/framework/remote_session/testpmd_shell.py
index 43e9f56517..be7cd16b96 100644
--- a/dts/framework/remote_session/testpmd_shell.py
+++ b/dts/framework/remote_session/testpmd_shell.py
@@ -334,6 +334,32 @@ def make_parser(cls) -> ParserFn:
 )
 
 
+class ChecksumOffloadOptions(Flag):
+"""Flag representing checksum hardware offload layer options."""
+
+#:
+ip = auto()
+#:
+udp = auto()
+#:
+tcp = auto()
+#:
+sctp = auto()
+#:
+outerip = auto()
+#:
+outerudp = auto()
+
+def __str__(self):
+"""String method for use in csum_set_hw."""
+if self == ChecksumOffloadOptions.outerip:
+return "outer-ip"
+elif self == ChecksumOffloadOptions.outerudp:
+return "outer-udp"
+else:
+return f"{self.name}"
+
+
 class DeviceErrorHandlingMode(StrEnum):
 """Enum representing the device error handling mode."""
 
@@ -806,6 +832,104 @@ def show_port_stats(self, port_id: int) -> 
TestPmdPortStats:
 
 return TestPmdPortStats.parse(output)
 
+def port_stop(self, port: int, verify: bool = True):
+"""Stop specified port.
+
+Args:
+port: Specifies the port number to use, must be between 0-32.
+verify: If :data:`True`, the output of the command is scanned
+to ensure specified port is stopped. If not, it is considered
+an error.
+
+Raises:
+InteractiveCommandExecutionError: If `verify` is :data:`True` and 
the port
+is not stopped.
+"""
+port_output = self.send_command(f"port stop {port}")
+if verify:
+if "Done" not in port_output:
+self._logger.debug(f"Failed to stop port {port}: 
\n{port_output}")
+raise InteractiveCommandExecutionError(f"Testpmd failed to 
stop port {port}.")
+
+def port_start(self, port: int, verify: bool = True):
+"""Start specified port.
+
+Args:
+port: Specifies the port number to use, must be between 0-32.
+verify: If :data:`True`, the output of the command is scanned
+to ensure specified port is started. If not, it is considered
+an error.
+
+Raises:
+InteractiveCommandExecutionError: If `verify` is :data:`True` and 
the port
+is not started.
+"""
+port_output = self.send_command(f"port start {port}")
+if verify:
+if "Done" not in port_output:
+self._logger.debug(f"Failed to start port {port}: 
\n{port_output}")
+raise InteractiveCommandExecutionError(f"Testpmd failed to 
start port {port}.")
+
+def csum_set_hw(self, layer: ChecksumOffloadOptions, port_id: int, verify: 
bool = True) -> None:
+"""Enables hardware checksum offloading on the specified layer.
+
+Args:
+layer: The layer that checksum offloading should be enabled on.
+options: tcp, ip, udp, sctp, outer-ip, outer-udp.
+port_id: The port number to enable checksum offloading on, should 
be within 0-32.
+verify: If :data:`True` the output of the command will be scanned 
in an attempt to
+verify that checksum offloading was enabled on the port.
+
+Raises:
+InteractiveCommandExecutionError: If checksum offload is not 
enabled successfully.
+"""
+csum_output = self.send_command(f"csum set {str(layer)} hw {port_id}")
+if verify:
+if "Bad arguments" in csum_output or f"Please stop port {port_id} 
first" in csum_output:
+self._logger.debug(f"Failed to set csum hw mode on port 
{port_id}:\n{csum_output}")
+raise InteractiveCommandExecutionError(
+f"Failed to set csum hw mode on port {port_id}"
+)
+if verify and f"checksum offload is not supported by port {port_id}" 
in csum_output:
+self._logger.debug(f"Checksum {layer} offload is not 
supported:\n{csum_output}")
+
+success = False
+if layer == ChecksumOffloadOptions.outerip:
+if "Outer-Ip checksum offload is hw" in csum_output:
+success = True
+elif layer == ChecksumOffloadOptions.outerudp:
+if "Outer-Udp checksum offload is hw" in csum_output:
+success = True
+else:
+if f"{str(layer).upper} checksum offload is hw" in csum_output:
+success = True
+if not success and verify:
+self._logger.debug(f"Failed to set csum hw mode on port 
{port_id}:\n{

[PATCH v2 2/2] dts: checksum offload test suite

2024-08-21 Thread Dean Marx
test suite for verifying layer 3/4 checksum offload
features on poll mode driver.

Depends-on: patch-143033
("dts: add text parser for testpmd verbose output")
Depends-on: patch-142691
("dts: add send_packets to test suites and rework packet addressing")

Signed-off-by: Dean Marx 
---
 dts/framework/config/conf_yaml_schema.json |   3 +-
 dts/tests/TestSuite_checksum_offload.py| 264 +
 2 files changed, 266 insertions(+), 1 deletion(-)
 create mode 100644 dts/tests/TestSuite_checksum_offload.py

diff --git a/dts/framework/config/conf_yaml_schema.json 
b/dts/framework/config/conf_yaml_schema.json
index f02a310bb5..a83a6786df 100644
--- a/dts/framework/config/conf_yaml_schema.json
+++ b/dts/framework/config/conf_yaml_schema.json
@@ -187,7 +187,8 @@
   "enum": [
 "hello_world",
 "os_udp",
-"pmd_buffer_scatter"
+"pmd_buffer_scatter",
+"checksum_offload"
   ]
 },
 "test_target": {
diff --git a/dts/tests/TestSuite_checksum_offload.py 
b/dts/tests/TestSuite_checksum_offload.py
new file mode 100644
index 00..406c9077dc
--- /dev/null
+++ b/dts/tests/TestSuite_checksum_offload.py
@@ -0,0 +1,264 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2024 University of New Hampshire
+
+"""DPDK checksum offload testing suite.
+
+This suite verifies L3/L4 checksum offload features of the Poll Mode Driver.
+On the Rx side, IPv4 and UDP/TCP checksum by hardware is checked to ensure
+checksum flags match expected flags. On the Tx side, IPv4/UDP, IPv4/TCP,
+IPv6/UDP, and IPv6/TCP insertion by hardware is checked to checksum flags
+match expected flags.
+
+"""
+
+from typing import List
+
+from scapy.all import Packet  # type: ignore[import-untyped]
+from scapy.layers.inet import IP, TCP, UDP  # type: ignore[import-untyped]
+from scapy.layers.inet6 import IPv6  # type: ignore[import-untyped]
+from scapy.layers.sctp import SCTP  # type: ignore[import-untyped]
+from scapy.layers.l2 import Dot1Q  # type: ignore[import-untyped]
+from scapy.layers.l2 import Ether
+from scapy.packet import Raw  # type: ignore[import-untyped]
+
+from framework.remote_session.testpmd_shell import (
+SimpleForwardingModes,
+TestPmdShell,
+OLFlag,
+ChecksumOffloadOptions
+)
+from framework.test_suite import TestSuite
+
+
+class TestChecksumOffload(TestSuite):
+"""Checksum offload test suite.
+
+This suite consists of 6 test cases:
+1. Insert checksum on transmit packet
+2. Do not insert checksum on transmit packet
+3. Validate Rx checksum valid flags
+4. Hardware checksum check L4 Rx
+5. Hardware checksum check L3 Rx
+6. Checksum offload with vlan
+
+"""
+
+def set_up_suite(self) -> None:
+"""Set up the test suite.
+
+Setup:
+Verify that at least two port links are created when the
+test run is initialized.
+"""
+self.verify(len(self._port_links) > 1, "Not enough port links.")
+
+def send_packets_and_verify(
+self, packet_list: List[Packet], load: str, should_receive: bool
+) -> None:
+"""Send and verify packet is received on the traffic generator.
+
+Args:
+packet_list: list of Scapy packets to send and verify.
+load: Raw layer load attribute in the sent packet.
+should_receive: Indicates whether the packet should be received
+by the traffic generator.
+"""
+for i in range(0, len(packet_list)):
+received_packets = 
self.send_packet_and_capture(packet=packet_list[i])
+received = any(
+packet.haslayer(Raw) and load in str(packet.load) for packet 
in received_packets
+)
+self.verify(
+received == should_receive,
+f"Packet was {'dropped' if should_receive else 'received'}",
+)
+
+def send_packet_and_verify_checksum(
+self, packet: Packet, goodL4: bool, goodIP: bool, testpmd: TestPmdShell
+) -> None:
+"""Send packet and verify verbose output matches expected output.
+
+Args:
+packet: Scapy packet to send to DUT.
+goodL4: Verifies RTE_MBUF_F_RX_L4_CKSUM_GOOD in verbose output
+if :data:`True`, or RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN if 
:data:`False`.
+goodIP: Verifies RTE_MBUF_F_RX_IP_CKSUM_GOOD in verbose output
+if :data:`True`, or RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN if 
:data:`False`.
+testpmd: Testpmd shell session to analyze verbose output of.
+"""
+testpmd.start()
+self.send_packet_and_capture(packet=packet)
+verbose_output = testpmd.extract_verbose_output(testpmd.stop())
+for packet in verbose_output:
+if packet.dst_mac == "00:00:00:00:00:01":
+if OLFlag.RTE_MBUF_F_RX_L4_CKSUM_GOOD in packet.ol_flags:
+isIP = True
+else:
+is

[PATCH dpdk v1 00/15] IPv6 APIs overhaul

2024-08-21 Thread Robin Jarry
Hi everyone,

As discussed recently [1], here is a first draft of the IPv6 APIs rework. The
API change was announced before the 24.07 release [2]. This series is intended
for 24.11.

[1] http://inbox.dpdk.org/dev/d2sr8t1h39cj.jrqfi6jeh...@redhat.com/
[2] 
https://git.dpdk.org/dpdk/commit/?id=835d4c41e0ab58a115c2170c886ba6d3cc1b5764

I tried to keep the patches as small as possible; unfortunately some of them
are quite big and cannot be broken down if we want to preserve a bisectable
tree.

Let me know what you think.

Thanks!

Cc: Morten Brørup 
Cc: Stephen Hemminger 
Cc: Vladimir Medvedkin 
Cc: Konstantin Ananyev 
Cc: Bruce Richardson 

Robin Jarry (15):
  net: split raw checksum functions in separate header
  net: split ipv6 symbols in separate header
  net: add structure for ipv6 addresses
  net: use ipv6 structure for header addresses
  fib6,rib6,lpm6: use ipv6 addr struct
  net: add ipv6 address utilities
  fib6,rib6,lpm6: use ipv6 utils
  graph,node: use ipv6 addr struct and utils
  pipeline: use ipv6 addr struct
  ipsec: use ipv6 addr struct
  thash: use ipv6 addr struct
  gro: use ipv6 addr struct
  rte_flow: use ipv6 addr struct
  rib6,fib6,lpm6: remove duplicate constants
  net: add utilities for well known ipv6 address types

 app/graph/ethdev.c  |   40 +-
 app/graph/ethdev.h  |9 +-
 app/graph/ip6_route.c   |   47 +-
 app/graph/meson.build   |2 +-
 app/graph/neigh.c   |   22 +-
 app/graph/neigh_priv.h  |4 +-
 app/graph/route.h   |8 +-
 app/test-fib/main.c |   51 +-
 app/test-flow-perf/actions_gen.c|4 +-
 app/test-flow-perf/items_gen.c  |4 +-
 app/test-pipeline/pipeline_hash.c   |4 +-
 app/test-pipeline/pipeline_lpm_ipv6.c   |   10 +-
 app/test-sad/main.c |   24 +-
 app/test/meson.build|1 +
 app/test/packet_burst_generator.c   |5 +-
 app/test/test_cryptodev_security_ipsec.c|1 +
 app/test/test_fib6.c|   93 +-
 app/test/test_fib6_perf.c   |8 +-
 app/test/test_ipfrag.c  |4 +-
 app/test/test_ipsec_sad.c   |   44 +-
 app/test/test_lpm6.c|  523 ++---
 app/test/test_lpm6_data.h   | 2025 ++-
 app/test/test_lpm6_perf.c   |   10 +-
 app/test/test_net_ipv6.c|  203 ++
 app/test/test_reassembly_perf.c |   20 +-
 app/test/test_rib6.c|   65 +-
 app/test/test_table_combined.c  |2 +-
 app/test/test_table_tables.c|8 +-
 app/test/test_thash.c   |   46 +-
 drivers/common/cnxk/cnxk_security.c |1 +
 drivers/crypto/cnxk/cn9k_cryptodev_ops.c|1 +
 drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c |1 +
 drivers/net/bnxt/bnxt_flow.c|   12 +-
 drivers/net/bonding/rte_eth_bond_pmd.c  |4 +-
 drivers/net/cxgbe/cxgbe_flow.c  |   18 +-
 drivers/net/dpaa2/dpaa2_flow.c  |   22 +-
 drivers/net/hinic/hinic_pmd_flow.c  |6 +-
 drivers/net/hinic/hinic_pmd_tx.c|2 +-
 drivers/net/hns3/hns3_flow.c|8 +-
 drivers/net/i40e/i40e_flow.c|   12 +-
 drivers/net/iavf/iavf_fdir.c|8 +-
 drivers/net/iavf/iavf_fsub.c|8 +-
 drivers/net/iavf/iavf_ipsec_crypto.c|6 +-
 drivers/net/ice/ice_fdir_filter.c   |   12 +-
 drivers/net/ice/ice_switch_filter.c |   16 +-
 drivers/net/igc/igc_flow.c  |4 +-
 drivers/net/ixgbe/ixgbe_flow.c  |   12 +-
 drivers/net/ixgbe/ixgbe_ipsec.c |4 +-
 drivers/net/mlx5/hws/mlx5dr_definer.c   |   36 +-
 drivers/net/mlx5/mlx5_flow.c|6 +-
 drivers/net/mlx5/mlx5_flow_dv.c |   16 +-
 drivers/net/mlx5/mlx5_flow_hw.c |   10 +-
 drivers/net/mlx5/mlx5_flow_verbs.c  |8 +-
 drivers/net/nfp/flower/nfp_flower_flow.c|   36 +-
 drivers/net/nfp/nfp_net_flow.c  |   44 +-
 drivers/net/qede/qede_filter.c  |4 +-
 drivers/net/sfc/sfc_flow.c  |   28 +-
 drivers/net/tap/tap_flow.c  |8 +-
 drivers/net/txgbe/txgbe_flow.c  |   12 +-
 drivers/net/txgbe/txgbe_ipsec.c |4 +-
 examples/ip_fragmentation/main.c|   24 +-
 examples/ip_pipeline/cli.c  |   12 +-
 examples/ip_pipeline/pipeline.c |   17 +-
 examples/ip_pipeline/thread.c   |2 +-
 examples/ip_reassembly/main.c   |   24 +-
 examples/ipsec-secgw/flow.c |   12 +-
 examples/ipsec-secgw/ipsec.c|8 +-
 examples/ipsec-secgw/ipsec_lpm_neon.h

[PATCH dpdk v1 01/15] net: split raw checksum functions in separate header

2024-08-21 Thread Robin Jarry
The checksum functions are used by both ipv4 and ipv6 functions. In
preparation of moving ipv6 symbols to a new header, move the checksum
related symbols to another dedicated header.

Signed-off-by: Robin Jarry 
---
 lib/net/meson.build |   1 +
 lib/net/rte_cksum.h | 189 
 lib/net/rte_ip.h| 148 +-
 3 files changed, 191 insertions(+), 147 deletions(-)
 create mode 100644 lib/net/rte_cksum.h

diff --git a/lib/net/meson.build b/lib/net/meson.build
index 0b691389495a..2e65bd19b7d4 100644
--- a/lib/net/meson.build
+++ b/lib/net/meson.build
@@ -3,6 +3,7 @@
 
 headers = files(
 'rte_ip.h',
+'rte_cksum.h',
 'rte_tcp.h',
 'rte_udp.h',
 'rte_tls.h',
diff --git a/lib/net/rte_cksum.h b/lib/net/rte_cksum.h
new file mode 100644
index ..8deceab51508
--- /dev/null
+++ b/lib/net/rte_cksum.h
@@ -0,0 +1,189 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 1982, 1986, 1990, 1993
+ *  The Regents of the University of California.
+ * Copyright(c) 2010-2014 Intel Corporation.
+ * Copyright(c) 2014 6WIND S.A.
+ * All rights reserved.
+ */
+
+#ifndef _RTE_CKSUM_H_
+#define _RTE_CKSUM_H_
+
+/**
+ * @file
+ *
+ * Protocol independent checksum utilities.
+ */
+
+#include 
+
+#ifdef RTE_EXEC_ENV_WINDOWS
+#include 
+#else
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#endif
+
+#include 
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/**
+ * @internal Calculate a sum of all words in the buffer.
+ * Helper routine for the rte_raw_cksum().
+ *
+ * @param buf
+ *   Pointer to the buffer.
+ * @param len
+ *   Length of the buffer.
+ * @param sum
+ *   Initial value of the sum.
+ * @return
+ *   sum += Sum of all words in the buffer.
+ */
+static inline uint32_t
+__rte_raw_cksum(const void *buf, size_t len, uint32_t sum)
+{
+   const void *end;
+
+   for (end = RTE_PTR_ADD(buf, RTE_ALIGN_FLOOR(len, sizeof(uint16_t)));
+buf != end; buf = RTE_PTR_ADD(buf, sizeof(uint16_t))) {
+   uint16_t v;
+
+   memcpy(&v, buf, sizeof(uint16_t));
+   sum += v;
+   }
+
+   /* if length is odd, keeping it byte order independent */
+   if (unlikely(len % 2)) {
+   uint16_t left = 0;
+
+   memcpy(&left, end, 1);
+   sum += left;
+   }
+
+   return sum;
+}
+
+/**
+ * @internal Reduce a sum to the non-complemented checksum.
+ * Helper routine for the rte_raw_cksum().
+ *
+ * @param sum
+ *   Value of the sum.
+ * @return
+ *   The non-complemented checksum.
+ */
+static inline uint16_t
+__rte_raw_cksum_reduce(uint32_t sum)
+{
+   sum = ((sum & 0x) >> 16) + (sum & 0x);
+   sum = ((sum & 0x) >> 16) + (sum & 0x);
+   return (uint16_t)sum;
+}
+
+/**
+ * Process the non-complemented checksum of a buffer.
+ *
+ * @param buf
+ *   Pointer to the buffer.
+ * @param len
+ *   Length of the buffer.
+ * @return
+ *   The non-complemented checksum.
+ */
+static inline uint16_t
+rte_raw_cksum(const void *buf, size_t len)
+{
+   uint32_t sum;
+
+   sum = __rte_raw_cksum(buf, len, 0);
+   return __rte_raw_cksum_reduce(sum);
+}
+
+/**
+ * Compute the raw (non complemented) checksum of a packet.
+ *
+ * @param m
+ *   The pointer to the mbuf.
+ * @param off
+ *   The offset in bytes to start the checksum.
+ * @param len
+ *   The length in bytes of the data to checksum.
+ * @param cksum
+ *   A pointer to the checksum, filled on success.
+ * @return
+ *   0 on success, -1 on error (bad length or offset).
+ */
+static inline int
+rte_raw_cksum_mbuf(const struct rte_mbuf *m, uint32_t off, uint32_t len,
+   uint16_t *cksum)
+{
+   const struct rte_mbuf *seg;
+   const char *buf;
+   uint32_t sum, tmp;
+   uint32_t seglen, done;
+
+   /* easy case: all data in the first segment */
+   if (off + len <= rte_pktmbuf_data_len(m)) {
+   *cksum = rte_raw_cksum(rte_pktmbuf_mtod_offset(m,
+   const char *, off), len);
+   return 0;
+   }
+
+   if (unlikely(off + len > rte_pktmbuf_pkt_len(m)))
+   return -1; /* invalid params, return a dummy value */
+
+   /* else browse the segment to find offset */
+   seglen = 0;
+   for (seg = m; seg != NULL; seg = seg->next) {
+   seglen = rte_pktmbuf_data_len(seg);
+   if (off < seglen)
+   break;
+   off -= seglen;
+   }
+   RTE_ASSERT(seg != NULL);
+   if (seg == NULL)
+   return -1;
+   seglen -= off;
+   buf = rte_pktmbuf_mtod_offset(seg, const char *, off);
+   if (seglen >= len) {
+   /* all in one segment */
+   *cksum = rte_raw_cksum(buf, len);
+   return 0;
+   }
+
+   /* hard case: process checksum of several segments */
+   sum = 0;
+   done = 0;
+   for (;;) {
+

[PATCH dpdk v1 03/15] net: add structure for ipv6 addresses

2024-08-21 Thread Robin Jarry
There is currently no structure defined for IPv6 addresses. Introduce
one that is simply a uint8_t array of 16 elements without any union. The
idea is to ensure this structure alignment is 1 so that it can be mapped
directly on unaligned packet memory.

Signed-off-by: Robin Jarry 
---
 lib/net/rte_ip6.h | 10 ++
 1 file changed, 10 insertions(+)

diff --git a/lib/net/rte_ip6.h b/lib/net/rte_ip6.h
index 9ed737d5eb81..f948f95db7ca 100644
--- a/lib/net/rte_ip6.h
+++ b/lib/net/rte_ip6.h
@@ -35,6 +35,16 @@
 extern "C" {
 #endif
 
+#define RTE_IPV6_ADDR_SIZE 16
+#define RTE_IPV6_MAX_DEPTH 128
+
+/**
+ * IPv6 Address
+ */
+struct rte_ipv6_addr {
+   unsigned char a[RTE_IPV6_ADDR_SIZE];
+};
+
 /**
  * IPv6 Header
  */
-- 
2.46.0



[PATCH dpdk v1 02/15] net: split ipv6 symbols in separate header

2024-08-21 Thread Robin Jarry
Move all ipv6 related symbols to a dedicated header. Update all code
accordingly.

Signed-off-by: Robin Jarry 
---
 app/test/packet_burst_generator.c   |   1 +
 app/test/test_cryptodev_security_ipsec.c|   1 +
 drivers/common/cnxk/cnxk_security.c |   1 +
 drivers/crypto/cnxk/cn9k_cryptodev_ops.c|   1 +
 drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c |   1 +
 examples/ip_pipeline/pipeline.c |   1 +
 lib/ethdev/rte_flow.h   |   1 +
 lib/hash/rte_thash.h|   1 +
 lib/ip_frag/rte_ip_frag.h   |   1 +
 lib/ipsec/iph.h |   1 +
 lib/net/meson.build |   1 +
 lib/net/rte_ip.h| 319 --
 lib/net/{rte_ip.h => rte_ip6.h} | 352 +---
 lib/net/rte_net.c   |   1 +
 lib/net/rte_net.h   |   1 +
 lib/pipeline/rte_swx_ipsec.c|   1 +
 lib/pipeline/rte_table_action.c |   1 +
 lib/vhost/virtio_net.c  |   1 +
 18 files changed, 22 insertions(+), 665 deletions(-)
 copy lib/net/{rte_ip.h => rte_ip6.h} (50%)

diff --git a/app/test/packet_burst_generator.c 
b/app/test/packet_burst_generator.c
index 867a88da0055..2cd34abc1a65 100644
--- a/app/test/packet_burst_generator.c
+++ b/app/test/packet_burst_generator.c
@@ -5,6 +5,7 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 
 #include "packet_burst_generator.h"
diff --git a/app/test/test_cryptodev_security_ipsec.c 
b/app/test/test_cryptodev_security_ipsec.c
index 1aba1ad9934b..9ac4a6e599b0 100644
--- a/app/test/test_cryptodev_security_ipsec.c
+++ b/app/test/test_cryptodev_security_ipsec.c
@@ -6,6 +6,7 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 #include 
 #include 
diff --git a/drivers/common/cnxk/cnxk_security.c 
b/drivers/common/cnxk/cnxk_security.c
index 15b0bedf43e2..e275d6cad3ea 100644
--- a/drivers/common/cnxk/cnxk_security.c
+++ b/drivers/common/cnxk/cnxk_security.c
@@ -2,6 +2,7 @@
  * Copyright(C) 2021 Marvell.
  */
 
+#include 
 #include 
 
 #include "cnxk_security.h"
diff --git a/drivers/crypto/cnxk/cn9k_cryptodev_ops.c 
b/drivers/crypto/cnxk/cn9k_cryptodev_ops.c
index f443cb9563ec..a3c737ef40f7 100644
--- a/drivers/crypto/cnxk/cn9k_cryptodev_ops.c
+++ b/drivers/crypto/cnxk/cn9k_cryptodev_ops.c
@@ -6,6 +6,7 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 
 #include "roc_cpt.h"
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c 
b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
index c1f7181d5587..5bf690e3d82a 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
@@ -10,6 +10,7 @@
 #include 
 
 #include 
+#include 
 #include 
 #include 
 #include 
diff --git a/examples/ip_pipeline/pipeline.c b/examples/ip_pipeline/pipeline.c
index 63352257c6e9..301c52d061be 100644
--- a/examples/ip_pipeline/pipeline.c
+++ b/examples/ip_pipeline/pipeline.c
@@ -7,6 +7,7 @@
 
 #include 
 #include 
+#include 
 #include 
 
 #include 
diff --git a/lib/ethdev/rte_flow.h b/lib/ethdev/rte_flow.h
index f864578f806b..3a3ab781c494 100644
--- a/lib/ethdev/rte_flow.h
+++ b/lib/ethdev/rte_flow.h
@@ -23,6 +23,7 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 #include 
 #include 
diff --git a/lib/hash/rte_thash.h b/lib/hash/rte_thash.h
index 30b657e67a7d..af40afd2d475 100644
--- a/lib/hash/rte_thash.h
+++ b/lib/hash/rte_thash.h
@@ -23,6 +23,7 @@ extern "C" {
 
 #include 
 #include 
+#include 
 #include 
 #include 
 
diff --git a/lib/ip_frag/rte_ip_frag.h b/lib/ip_frag/rte_ip_frag.h
index 2ad318096b7b..cb06d5f5977a 100644
--- a/lib/ip_frag/rte_ip_frag.h
+++ b/lib/ip_frag/rte_ip_frag.h
@@ -23,6 +23,7 @@ extern "C" {
 #include 
 #include 
 #include 
+#include 
 #include 
 
 struct rte_mbuf;
diff --git a/lib/ipsec/iph.h b/lib/ipsec/iph.h
index 861f16905ad0..815a3c90d76b 100644
--- a/lib/ipsec/iph.h
+++ b/lib/ipsec/iph.h
@@ -6,6 +6,7 @@
 #define _IPH_H_
 
 #include 
+#include 
 
 /**
  * @file iph.h
diff --git a/lib/net/meson.build b/lib/net/meson.build
index 2e65bd19b7d4..35ac334a18cf 100644
--- a/lib/net/meson.build
+++ b/lib/net/meson.build
@@ -3,6 +3,7 @@
 
 headers = files(
 'rte_ip.h',
+'rte_ip6.h',
 'rte_cksum.h',
 'rte_tcp.h',
 'rte_udp.h',
diff --git a/lib/net/rte_ip.h b/lib/net/rte_ip.h
index 0ae7c0565047..e3c8d0163f64 100644
--- a/lib/net/rte_ip.h
+++ b/lib/net/rte_ip.h
@@ -374,325 +374,6 @@ rte_ipv4_udptcp_cksum_mbuf_verify(const struct rte_mbuf 
*m,
return 0;
 }
 
-/**
- * IPv6 Header
- */
-struct rte_ipv6_hdr {
-   rte_be32_t vtc_flow;/**< IP version, traffic class & flow label. */
-   rte_be16_t payload_len; /**< IP payload size, including ext. headers */
-   uint8_t  proto; /**< Protocol, next header. */
-   uint8_t  hop_limits;/**< Hop limits. */
-   uint8_t  src_addr[16];  /**< IP address of source host. */
- 

[PATCH dpdk v1 04/15] net: use ipv6 structure for header addresses

2024-08-21 Thread Robin Jarry
The rte_ipv6_hdr uses ad-hoc uint8_t[16] arrays to represent addresses.
Replace these arrays with the previously introduced rte_ipv6_addr
structure. Adapt all code accordingly.

Signed-off-by: Robin Jarry 
---
 app/test-flow-perf/items_gen.c   |  4 +--
 app/test-pipeline/pipeline_hash.c|  4 +--
 app/test/packet_burst_generator.c|  4 +--
 app/test/test_ipfrag.c   |  4 +--
 app/test/test_reassembly_perf.c  | 20 +--
 app/test/test_thash.c|  8 ++---
 drivers/net/bnxt/bnxt_flow.c | 12 +++
 drivers/net/bonding/rte_eth_bond_pmd.c   |  4 +--
 drivers/net/cxgbe/cxgbe_flow.c   | 14 
 drivers/net/dpaa2/dpaa2_flow.c   | 22 ++--
 drivers/net/hinic/hinic_pmd_flow.c   |  6 ++--
 drivers/net/hinic/hinic_pmd_tx.c |  2 +-
 drivers/net/hns3/hns3_flow.c |  8 ++---
 drivers/net/i40e/i40e_flow.c | 12 +++
 drivers/net/iavf/iavf_fdir.c |  8 ++---
 drivers/net/iavf/iavf_fsub.c |  8 ++---
 drivers/net/iavf/iavf_ipsec_crypto.c |  6 ++--
 drivers/net/ice/ice_fdir_filter.c| 12 +++
 drivers/net/ice/ice_switch_filter.c  | 16 -
 drivers/net/igc/igc_flow.c   |  4 +--
 drivers/net/ixgbe/ixgbe_flow.c   | 12 +++
 drivers/net/ixgbe/ixgbe_ipsec.c  |  4 +--
 drivers/net/mlx5/hws/mlx5dr_definer.c| 36 +--
 drivers/net/mlx5/mlx5_flow.c |  6 ++--
 drivers/net/mlx5/mlx5_flow_dv.c  | 16 +
 drivers/net/mlx5/mlx5_flow_hw.c  | 10 +++---
 drivers/net/mlx5/mlx5_flow_verbs.c   |  8 ++---
 drivers/net/nfp/flower/nfp_flower_flow.c | 34 ++
 drivers/net/nfp/nfp_net_flow.c   | 44 +---
 drivers/net/qede/qede_filter.c   |  4 +--
 drivers/net/sfc/sfc_flow.c   | 28 +++
 drivers/net/tap/tap_flow.c   |  8 ++---
 drivers/net/txgbe/txgbe_flow.c   | 12 +++
 drivers/net/txgbe/txgbe_ipsec.c  |  4 +--
 examples/ip_fragmentation/main.c |  2 +-
 examples/ip_pipeline/pipeline.c  | 16 -
 examples/ip_reassembly/main.c|  2 +-
 examples/ipsec-secgw/flow.c  | 12 +++
 examples/ipsec-secgw/ipsec.c |  8 ++---
 examples/ipsec-secgw/sa.c|  4 +--
 examples/ipsec-secgw/sad.h   | 10 +++---
 examples/l3fwd/l3fwd_fib.c   |  2 +-
 examples/l3fwd/l3fwd_lpm.c   |  4 +--
 lib/ethdev/rte_flow.h|  6 ++--
 lib/hash/rte_thash.h | 12 +++
 lib/ip_frag/rte_ipv6_reassembly.c|  4 +--
 lib/net/rte_ip6.h|  6 ++--
 lib/node/ip6_lookup.c| 10 +++---
 lib/pipeline/rte_swx_ipsec.c |  6 ++--
 lib/pipeline/rte_table_action.c  | 24 ++---
 50 files changed, 276 insertions(+), 256 deletions(-)

diff --git a/app/test-flow-perf/items_gen.c b/app/test-flow-perf/items_gen.c
index 4ae72509d445..c740e1838ffb 100644
--- a/app/test-flow-perf/items_gen.c
+++ b/app/test-flow-perf/items_gen.c
@@ -78,8 +78,8 @@ add_ipv6(struct rte_flow_item *items,
for (i = 0; i < 16; i++) {
/* Currently src_ip is limited to 32 bit */
if (i < 4)
-   ipv6_specs[ti].hdr.src_addr[15 - i] = para.src_ip >> (i 
* 8);
-   ipv6_masks[ti].hdr.src_addr[15 - i] = 0xff;
+   ipv6_specs[ti].hdr.src_addr.a[15 - i] = para.src_ip >> 
(i * 8);
+   ipv6_masks[ti].hdr.src_addr.a[15 - i] = 0xff;
}
 
items[items_counter].type = RTE_FLOW_ITEM_TYPE_IPV6;
diff --git a/app/test-pipeline/pipeline_hash.c 
b/app/test-pipeline/pipeline_hash.c
index cab9c2098014..194e5c5dcc53 100644
--- a/app/test-pipeline/pipeline_hash.c
+++ b/app/test-pipeline/pipeline_hash.c
@@ -432,7 +432,6 @@ app_main_loop_rx_metadata(void) {
struct rte_ipv4_hdr *ip_hdr;
struct rte_ipv6_hdr *ipv6_hdr;
uint32_t ip_dst;
-   uint8_t *ipv6_dst;
uint32_t *signature, *k32;
 
m = app.mbuf_rx.array[j];
@@ -452,9 +451,8 @@ app_main_loop_rx_metadata(void) {
} else if (RTE_ETH_IS_IPV6_HDR(m->packet_type)) 
{
ipv6_hdr = (struct rte_ipv6_hdr *)
&m_data[sizeof(struct 
rte_ether_hdr)];
-   ipv6_dst = ipv6_hdr->dst_addr;
 
-   memcpy(key, ipv6_dst, 16);
+   memcpy(key, &ipv6_hdr->dst_addr, 16);
} else
continue;
 
diff --git a/app/test/packet_burst_generator.c 
b/app/test/packet_

[PATCH dpdk v1 06/15] net: add ipv6 address utilities

2024-08-21 Thread Robin Jarry
Add utility functions that use the previously introduced IPv6 address
structure. Add basic unit tests to ensure everything works as expected.

These functions will be used in the next commits to replace private
and/or duplicated functions.

Signed-off-by: Robin Jarry 
---
 app/test/meson.build |   1 +
 app/test/test_net_ipv6.c | 129 +++
 lib/net/rte_ip6.h| 112 +
 3 files changed, 242 insertions(+)
 create mode 100644 app/test/test_net_ipv6.c

diff --git a/app/test/meson.build b/app/test/meson.build
index e29258e6ec05..f5276e28c3b9 100644
--- a/app/test/meson.build
+++ b/app/test/meson.build
@@ -130,6 +130,7 @@ source_file_deps = {
 'test_metrics.c': ['metrics'],
 'test_mp_secondary.c': ['hash'],
 'test_net_ether.c': ['net'],
+'test_net_ipv6.c': ['net'],
 'test_pcapng.c': ['ethdev', 'net', 'pcapng', 'bus_vdev'],
 'test_pdcp.c': ['eventdev', 'pdcp', 'net', 'timer', 'security'],
 'test_pdump.c': ['pdump'] + sample_packet_forward_deps,
diff --git a/app/test/test_net_ipv6.c b/app/test/test_net_ipv6.c
new file mode 100644
index ..c2b42d67285e
--- /dev/null
+++ b/app/test/test_net_ipv6.c
@@ -0,0 +1,129 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2024 Robin Jarry
+ */
+
+#include 
+
+#include "test.h"
+
+static const struct rte_ipv6_addr bcast_addr = {
+   "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
+};
+static const struct rte_ipv6_addr zero_addr = { 0 };
+
+static int
+test_ipv6_addr_mask(void)
+{
+   const struct rte_ipv6_addr masked_3 = {
+   
"\xe0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
+   };
+   const struct rte_ipv6_addr masked_42 = {
+   
"\xff\xff\xff\xff\xff\xc0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
+   };
+   const struct rte_ipv6_addr masked_85 = {
+   
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xf8\x00\x00\x00\x00\x00"
+   };
+   const struct rte_ipv6_addr masked_127 = {
+   
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfe"
+   };
+   struct rte_ipv6_addr ip;
+
+   ip = bcast_addr;
+   rte_ipv6_addr_mask(&ip, 0);
+   TEST_ASSERT(rte_ipv6_addr_eq(&ip, &zero_addr), "");
+   TEST_ASSERT_EQUAL(rte_ipv6_mask_depth(&zero_addr), 0, "");
+
+   ip = bcast_addr;
+   rte_ipv6_addr_mask(&ip, 3);
+   TEST_ASSERT(rte_ipv6_addr_eq(&ip, &masked_3), "");
+   TEST_ASSERT_EQUAL(rte_ipv6_mask_depth(&masked_3), 3, "");
+
+   ip = bcast_addr;
+   rte_ipv6_addr_mask(&ip, 42);
+   TEST_ASSERT(rte_ipv6_addr_eq(&ip, &masked_42), "");
+   TEST_ASSERT_EQUAL(rte_ipv6_mask_depth(&masked_42), 42, "");
+
+   ip = bcast_addr;
+   rte_ipv6_addr_mask(&ip, 85);
+   TEST_ASSERT(rte_ipv6_addr_eq(&ip, &masked_85), "");
+   TEST_ASSERT_EQUAL(rte_ipv6_mask_depth(&masked_85), 85, "");
+
+   ip = bcast_addr;
+   rte_ipv6_addr_mask(&ip, 127);
+   TEST_ASSERT(rte_ipv6_addr_eq(&ip, &masked_127), "");
+   TEST_ASSERT_EQUAL(rte_ipv6_mask_depth(&masked_127), 127, "");
+
+   ip = bcast_addr;
+   rte_ipv6_addr_mask(&ip, 128);
+   TEST_ASSERT(rte_ipv6_addr_eq(&ip, &bcast_addr), "");
+   TEST_ASSERT_EQUAL(rte_ipv6_mask_depth(&bcast_addr), 128, "");
+
+   const struct rte_ipv6_addr holed_mask = {
+   
"\xff\xff\xff\xff\xff\xff\xef\xff\xff\xff\xff\xff\xff\xff\xff\xff"
+   };
+   TEST_ASSERT_EQUAL(rte_ipv6_mask_depth(&holed_mask), 51, "");
+
+   return TEST_SUCCESS;
+}
+
+static int
+test_ipv6_addr_eq_prefix(void)
+{
+   struct rte_ipv6_addr ip1 = {
+   
"\x2a\x01\xcb\x00\x02\x54\x33\x00\x1b\x9f\x80\x71\x67\xcd\xbf\x20"
+   };
+   struct rte_ipv6_addr ip2 = {
+   
"\x2a\x01\xcb\x00\x02\x54\x33\x00\x62\x39\xe1\xf4\x7a\x0b\x23\x71"
+   };
+   struct rte_ipv6_addr ip3 = {
+   
"\xfd\x10\x00\x39\x02\x08\x00\x01\x00\x00\x00\x00\x00\x00\x10\x08"
+   };
+
+   TEST_ASSERT(rte_ipv6_addr_eq_prefix(&ip1, &ip2, 1), "");
+   TEST_ASSERT(rte_ipv6_addr_eq_prefix(&ip1, &ip2, 37), "");
+   TEST_ASSERT(rte_ipv6_addr_eq_prefix(&ip1, &ip2, 64), "");
+   TEST_ASSERT(!rte_ipv6_addr_eq_prefix(&ip1, &ip2, 112), "");
+   TEST_ASSERT(rte_ipv6_addr_eq_prefix(&ip1, &ip3, 0), "");
+   TEST_ASSERT(!rte_ipv6_addr_eq_prefix(&ip1, &ip3, 13), "");
+
+   return TEST_SUCCESS;
+}
+
+static int
+test_ipv6_addr_kind(void)
+{
+   TEST_ASSERT(rte_ipv6_addr_is_unspec(&zero_addr), "");
+
+   struct rte_ipv6_addr ucast = {
+   
"\x2a\x01\xcb\x00\x02\x54\x33\x00\x62\x39\xe1\xf4\x7a\x0b\x23\x71"
+   };
+   TEST_ASSERT(!rte_ipv6_addr_is_unspec(&ucast), "");
+
+   struct rte_ipv6_addr mcast = {
+   
"\xff\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01"
+   };
+   TEST_ASSERT(!rte_ipv6_addr_is_unspec(&mcast), "");
+
+   struct rte_ipv6_addr 

[PATCH dpdk v1 07/15] fib6,rib6,lpm6: use ipv6 utils

2024-08-21 Thread Robin Jarry
Replace duplicated and/or private functions by some of the previously
introduced utility functions.

Mark rib6 functions that deal with ipv6 addresses as deprecated.

Signed-off-by: Robin Jarry 
---
 app/test/test_fib6.c  |  9 +++--
 app/test/test_rib6.c  |  2 +-
 examples/ipsec-secgw/ipsec_lpm_neon.h |  2 +-
 lib/fib/trie.c| 30 ++
 lib/lpm/rte_lpm6.c| 51 +---
 lib/rib/rte_rib6.c| 57 +--
 lib/rib/rte_rib6.h|  8 
 7 files changed, 56 insertions(+), 103 deletions(-)

diff --git a/app/test/test_fib6.c b/app/test/test_fib6.c
index c3b947d789bb..7134c4d335cd 100644
--- a/app/test/test_fib6.c
+++ b/app/test/test_fib6.c
@@ -279,9 +279,12 @@ check_fib(struct rte_fib6 *fib)
int ret;
 
for (i = 0; i < RTE_FIB6_MAXDEPTH; i++) {
-   for (j = 0; j < RTE_FIB6_IPV6_ADDR_SIZE; j++) {
-   ip_arr[i].a[j] = ip_add.a[j] |
-   ~get_msk_part(RTE_FIB6_MAXDEPTH - i, j);
+   rte_ipv6_addr_cpy(&ip_arr[i], &ip_add);
+   j = (RTE_FIB6_MAXDEPTH - i) / CHAR_BIT;
+   if (j < RTE_FIB6_IPV6_ADDR_SIZE) {
+   ip_arr[i].a[j] |= UINT8_MAX >> ((RTE_FIB6_MAXDEPTH - i) 
% CHAR_BIT);
+   for (j++; j < RTE_FIB6_IPV6_ADDR_SIZE; j++)
+   ip_arr[i].a[j] = 0xff;
}
}
 
diff --git a/app/test/test_rib6.c b/app/test/test_rib6.c
index f3b6f1474348..33288f9c26d6 100644
--- a/app/test/test_rib6.c
+++ b/app/test/test_rib6.c
@@ -217,7 +217,7 @@ test_get_fn(void)
 
/* check the return values */
ret = rte_rib6_get_ip(node, &ip_ret);
-   RTE_TEST_ASSERT((ret == 0) && (rte_rib6_is_equal(ip_ret.a, ip.a)),
+   RTE_TEST_ASSERT((ret == 0) && (rte_ipv6_addr_eq(&ip_ret, &ip)),
"Failed to get proper node ip\n");
ret = rte_rib6_get_depth(node, &depth_ret);
RTE_TEST_ASSERT((ret == 0) && (depth_ret == depth),
diff --git a/examples/ipsec-secgw/ipsec_lpm_neon.h 
b/examples/ipsec-secgw/ipsec_lpm_neon.h
index 865b9624a86e..62b4260843a3 100644
--- a/examples/ipsec-secgw/ipsec_lpm_neon.h
+++ b/examples/ipsec-secgw/ipsec_lpm_neon.h
@@ -144,7 +144,7 @@ route6_pkts_neon(struct rt_ctx *rt_ctx, struct rte_mbuf 
**pkts, int nb_rx)
 * required to get the hop
 */
ipv6_hdr = (struct rte_ipv6_hdr *)(eth_hdr + 1);
-   memcpy(&dst_ip6[lpm_pkts], &ipv6_hdr->dst_addr, 16);
+   rte_ipv6_addr_cpy(&dst_ip6[lpm_pkts], 
&ipv6_hdr->dst_addr);
lpm_pkts++;
}
}
diff --git a/lib/fib/trie.c b/lib/fib/trie.c
index bd0c7ec63b7f..8a69702eabb2 100644
--- a/lib/fib/trie.c
+++ b/lib/fib/trie.c
@@ -367,7 +367,7 @@ install_to_dp(struct rte_trie_tbl *dp, const struct 
rte_ipv6_addr *ledge,
struct rte_ipv6_addr redge;
 
/* decrement redge by 1*/
-   rte_rib6_copy_addr(redge.a, r->a);
+   rte_ipv6_addr_cpy(&redge, r);
for (i = 15; i >= 0; i--) {
redge.a[i]--;
if (redge.a[i] != 0xff)
@@ -451,14 +451,6 @@ get_nxt_net(struct rte_ipv6_addr *ip, uint8_t depth)
}
 }
 
-static int
-v6_addr_is_zero(const uint8_t ip[RTE_FIB6_IPV6_ADDR_SIZE])
-{
-   uint8_t ip_addr[RTE_FIB6_IPV6_ADDR_SIZE] = {0};
-
-   return rte_rib6_is_equal(ip, ip_addr);
-}
-
 static int
 modify_dp(struct rte_trie_tbl *dp, struct rte_rib6 *rib,
const struct rte_ipv6_addr *ip,
@@ -472,7 +464,7 @@ modify_dp(struct rte_trie_tbl *dp, struct rte_rib6 *rib,
if (next_hop > get_max_nh(dp->nh_sz))
return -EINVAL;
 
-   rte_rib6_copy_addr(ledge.a, ip->a);
+   rte_ipv6_addr_cpy(&ledge, ip);
do {
tmp = rte_rib6_get_nxt(rib, ip, depth, tmp,
RTE_RIB6_GET_NXT_COVER);
@@ -481,7 +473,7 @@ modify_dp(struct rte_trie_tbl *dp, struct rte_rib6 *rib,
if (tmp_depth == depth)
continue;
rte_rib6_get_ip(tmp, &redge);
-   if (rte_rib6_is_equal(ledge.a, redge.a)) {
+   if (rte_ipv6_addr_eq(&ledge, &redge)) {
get_nxt_net(&ledge, tmp_depth);
continue;
}
@@ -489,18 +481,18 @@ modify_dp(struct rte_trie_tbl *dp, struct rte_rib6 *rib,
if (ret != 0)
return ret;
get_nxt_net(&redge, tmp_depth);
-   rte_rib6_copy_addr(ledge.a, redge.a);
+   rte_ipv6_addr_cpy(&ledge, &redge);
/*
 * we got to the end of address space
 * and wrapped around
   

[PATCH dpdk v1 08/15] graph,node: use ipv6 addr struct and utils

2024-08-21 Thread Robin Jarry
Replace ad-hoc uint8_t[16] arrays with the previously introduced IPv6
address structure.

Signed-off-by: Robin Jarry 
---
 app/graph/ethdev.c  | 40 ++-
 app/graph/ethdev.h  |  9 ---
 app/graph/ip6_route.c   | 47 +
 app/graph/meson.build   |  2 +-
 app/graph/neigh.c   | 22 +++--
 app/graph/neigh_priv.h  |  4 +++-
 app/graph/route.h   |  8 ---
 examples/l3fwd-graph/main.c | 30 +--
 lib/node/ip6_lookup.c   | 17 ++
 lib/node/rte_node_ip6_api.h |  3 ++-
 10 files changed, 65 insertions(+), 117 deletions(-)

diff --git a/app/graph/ethdev.c b/app/graph/ethdev.c
index cfc1b1856910..19c5ab685464 100644
--- a/app/graph/ethdev.c
+++ b/app/graph/ethdev.c
@@ -124,30 +124,19 @@ ethdev_portid_by_ip4(uint32_t ip, uint32_t mask)
 }
 
 int16_t
-ethdev_portid_by_ip6(uint8_t *ip, uint8_t *mask)
+ethdev_portid_by_ip6(struct rte_ipv6_addr *ip, struct rte_ipv6_addr *mask)
 {
-   int portid = -EINVAL;
struct ethdev *port;
-   int j;
 
TAILQ_FOREACH(port, ð_node, next) {
-   for (j = 0; j < ETHDEV_IPV6_ADDR_LEN; j++) {
-   if (mask == NULL) {
-   if ((port->ip6_addr.ip[j] & 
port->ip6_addr.mask[j]) !=
-   (ip[j] & port->ip6_addr.mask[j]))
-   break;
-
-   } else {
-   if ((port->ip6_addr.ip[j] & 
port->ip6_addr.mask[j]) !=
-   (ip[j] & mask[j]))
-   break;
-   }
-   }
-   if (j == ETHDEV_IPV6_ADDR_LEN)
+   uint8_t depth = rte_ipv6_mask_depth(&port->ip6_addr.mask);
+   if (mask != NULL)
+   depth = RTE_MAX(depth, rte_ipv6_mask_depth(mask));
+   if (rte_ipv6_addr_eq_prefix(&port->ip6_addr.ip, ip, depth))
return port->config.port_id;
}
 
-   return portid;
+   return -EINVAL;
 }
 
 void
@@ -285,7 +274,7 @@ ethdev_ip6_addr_add(const char *name, struct 
ipv6_addr_config *config)
 {
struct ethdev *eth_hdl;
uint16_t portid = 0;
-   int rc, i;
+   int rc;
 
rc = rte_eth_dev_get_port_by_name(name, &portid);
if (rc < 0)
@@ -294,10 +283,8 @@ ethdev_ip6_addr_add(const char *name, struct 
ipv6_addr_config *config)
eth_hdl = ethdev_port_by_id(portid);
 
if (eth_hdl) {
-   for (i = 0; i < ETHDEV_IPV6_ADDR_LEN; i++) {
-   eth_hdl->ip6_addr.ip[i] = config->ip[i];
-   eth_hdl->ip6_addr.mask[i] = config->mask[i];
-   }
+   rte_ipv6_addr_cpy(ð_hdl->ip6_addr.ip, &config->ip);
+   rte_ipv6_addr_cpy(ð_hdl->ip6_addr.mask, &config->mask);
return 0;
}
rc = -EINVAL;
@@ -624,13 +611,10 @@ cmd_ethdev_dev_ip6_addr_add_parsed(void *parsed_result, 
__rte_unused struct cmdl
 {
struct cmd_ethdev_dev_ip6_addr_add_result *res = parsed_result;
struct ipv6_addr_config config;
-   int rc = -EINVAL, i;
+   int rc = -EINVAL;
 
-   for (i = 0; i < ETHDEV_IPV6_ADDR_LEN; i++)
-   config.ip[i] = res->ip.addr.ipv6.s6_addr[i];
-
-   for (i = 0; i < ETHDEV_IPV6_ADDR_LEN; i++)
-   config.mask[i] = res->mask.addr.ipv6.s6_addr[i];
+   rte_memcpy(&config.ip, &res->ip.addr.ipv6, sizeof(config.ip));
+   rte_memcpy(&config.mask, &res->mask.addr.ipv6, sizeof(config.mask));
 
rc = ethdev_ip6_addr_add(res->dev, &config);
if (rc < 0)
diff --git a/app/graph/ethdev.h b/app/graph/ethdev.h
index d0de593fc743..046689ee5fc7 100644
--- a/app/graph/ethdev.h
+++ b/app/graph/ethdev.h
@@ -6,8 +6,7 @@
 #define APP_GRAPH_ETHDEV_H
 
 #include 
-
-#define ETHDEV_IPV6_ADDR_LEN   16
+#include 
 
 struct ipv4_addr_config {
uint32_t ip;
@@ -15,8 +14,8 @@ struct ipv4_addr_config {
 };
 
 struct ipv6_addr_config {
-   uint8_t ip[ETHDEV_IPV6_ADDR_LEN];
-   uint8_t mask[ETHDEV_IPV6_ADDR_LEN];
+   struct rte_ipv6_addr ip;
+   struct rte_ipv6_addr mask;
 };
 
 extern uint32_t enabled_port_mask;
@@ -25,7 +24,7 @@ void ethdev_start(void);
 void ethdev_stop(void);
 void *ethdev_mempool_list_by_portid(uint16_t portid);
 int16_t ethdev_portid_by_ip4(uint32_t ip, uint32_t mask);
-int16_t ethdev_portid_by_ip6(uint8_t *ip, uint8_t *mask);
+int16_t ethdev_portid_by_ip6(struct rte_ipv6_addr *ip, struct rte_ipv6_addr 
*mask);
 int16_t ethdev_txport_by_rxport_get(uint16_t portid_rx);
 void ethdev_list_clean(void);
 
diff --git a/app/graph/ip6_route.c b/app/graph/ip6_route.c
index 834719ecaeb4..5460e5070b42 100644
--- a/app/graph/ip6_route.c
+++ b/app/graph/ip6_route.c
@@ -11,6 +11,7 @@
 #include 
 
 #include 
+#include 
 
 #include "module_api.h"
 #include "route_priv.h"
@@ -43,38 +44,20 @

[PATCH dpdk v1 12/15] gro: use ipv6 addr struct

2024-08-21 Thread Robin Jarry
Update tcp6_flow_key to use the recently introduced IPv6 address
structure instead of uint8_t[16] arrays.

Signed-off-by: Robin Jarry 
---
 lib/gro/gro_tcp6.c | 8 
 lib/gro/gro_tcp6.h | 6 --
 2 files changed, 8 insertions(+), 6 deletions(-)

diff --git a/lib/gro/gro_tcp6.c b/lib/gro/gro_tcp6.c
index 6edfb6045cf6..0c6adbf2ecdb 100644
--- a/lib/gro/gro_tcp6.c
+++ b/lib/gro/gro_tcp6.c
@@ -99,8 +99,8 @@ insert_new_flow(struct gro_tcp6_tbl *tbl,
dst = &(tbl->flows[flow_idx].key);
 
ASSIGN_COMMON_TCP_KEY((&src->cmn_key), (&dst->cmn_key));
-   memcpy(&dst->src_addr[0], &src->src_addr[0], sizeof(dst->src_addr));
-   memcpy(&dst->dst_addr[0], &src->dst_addr[0], sizeof(dst->dst_addr));
+   rte_ipv6_addr_cpy(&dst->src_addr, &src->src_addr);
+   rte_ipv6_addr_cpy(&dst->dst_addr, &src->dst_addr);
dst->vtc_flow = src->vtc_flow;
 
tbl->flows[flow_idx].start_index = item_idx;
@@ -168,8 +168,8 @@ gro_tcp6_reassemble(struct rte_mbuf *pkt,
 
rte_ether_addr_copy(&(eth_hdr->src_addr), &(key.cmn_key.eth_saddr));
rte_ether_addr_copy(&(eth_hdr->dst_addr), &(key.cmn_key.eth_daddr));
-   memcpy(&key.src_addr[0], &ipv6_hdr->src_addr, sizeof(key.src_addr));
-   memcpy(&key.dst_addr[0], &ipv6_hdr->dst_addr, sizeof(key.dst_addr));
+   rte_ipv6_addr_cpy(&key.src_addr, &ipv6_hdr->src_addr);
+   rte_ipv6_addr_cpy(&key.dst_addr, &ipv6_hdr->dst_addr);
key.cmn_key.src_port = tcp_hdr->src_port;
key.cmn_key.dst_port = tcp_hdr->dst_port;
key.cmn_key.recv_ack = tcp_hdr->recv_ack;
diff --git a/lib/gro/gro_tcp6.h b/lib/gro/gro_tcp6.h
index 073122f0ec84..acf3971bb6e9 100644
--- a/lib/gro/gro_tcp6.h
+++ b/lib/gro/gro_tcp6.h
@@ -5,6 +5,8 @@
 #ifndef _GRO_TCP6_H_
 #define _GRO_TCP6_H_
 
+#include 
+
 #include "gro_tcp.h"
 
 #define GRO_TCP6_TBL_MAX_ITEM_NUM (1024UL * 1024UL)
@@ -12,8 +14,8 @@
 /* Header fields representing a TCP/IPv6 flow */
 struct tcp6_flow_key {
struct cmn_tcp_key cmn_key;
-   uint8_t  src_addr[16];
-   uint8_t  dst_addr[16];
+   struct rte_ipv6_addr src_addr;
+   struct rte_ipv6_addr dst_addr;
rte_be32_t vtc_flow;
 };
 
-- 
2.46.0



[PATCH dpdk v1 11/15] thash: use ipv6 addr struct

2024-08-21 Thread Robin Jarry
Update rte_ipv6_tuple to use the recently added IPv6 address structure
instead of uint8_t[16] arrays.

Signed-off-by: Robin Jarry 
---
 app/test/test_thash.c | 46 ---
 lib/hash/rte_thash.h  | 20 +--
 2 files changed, 27 insertions(+), 39 deletions(-)

diff --git a/app/test/test_thash.c b/app/test/test_thash.c
index 952da6a52954..262f84433461 100644
--- a/app/test/test_thash.c
+++ b/app/test/test_thash.c
@@ -25,8 +25,8 @@ struct test_thash_v4 {
 };
 
 struct test_thash_v6 {
-   uint8_t dst_ip[16];
-   uint8_t src_ip[16];
+   struct rte_ipv6_addr dst_ip;
+   struct rte_ipv6_addr src_ip;
uint16_tdst_port;
uint16_tsrc_port;
uint32_thash_l3;
@@ -49,25 +49,19 @@ struct test_thash_v4 v4_tbl[] = {
 
 struct test_thash_v6 v6_tbl[] = {
 /*3ffe:2501:200:3::1*/
-{{0x3f, 0xfe, 0x25, 0x01, 0x02, 0x00, 0x00, 0x03,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,},
+{{.a = "\x3f\xfe\x25\x01\x02\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x01"},
 /*3ffe:2501:200:1fff::7*/
-{0x3f, 0xfe, 0x25, 0x01, 0x02, 0x00, 0x1f, 0xff,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07,},
+{.a = "\x3f\xfe\x25\x01\x02\x00\x1f\xff\x00\x00\x00\x00\x00\x00\x00\x07"},
 1766, 2794, 0x2cc18cd5, 0x40207d3d},
 /*ff02::1*/
-{{0xff, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,},
+{{.a = "\xff\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01"},
 /*3ffe:501:8::260:97ff:fe40:efab*/
-{0x3f, 0xfe, 0x05, 0x01, 0x00, 0x08, 0x00, 0x00,
-0x02, 0x60, 0x97, 0xff, 0xfe, 0x40, 0xef, 0xab,},
+{.a = "\x3f\xfe\x05\x01\x00\x08\x00\x00\x02\x60\x97\xff\xfe\x40\xef\xab"},
 4739, 14230, 0x0f0c461c, 0xdde51bbf},
 /*fe80::200:f8ff:fe21:67cf*/
-{{0xfe, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x02, 0x00, 0xf8, 0xff, 0xfe, 0x21, 0x67, 0xcf,},
+{{.a = "\xfe\x80\x00\x00\x00\x00\x00\x00\x02\x00\xf8\xff\xfe\x21\x67\xcf"},
 /*3ffe:1900:4545:3:200:f8ff:fe21:67cf*/
-{0x3f, 0xfe, 0x19, 0x00, 0x45, 0x45, 0x00, 0x03,
-0x02, 0x00, 0xf8, 0xff, 0xfe, 0x21, 0x67, 0xcf,},
+{.a = "\x3f\xfe\x19\x00\x45\x45\x00\x03\x02\x00\xf8\xff\xfe\x21\x67\xcf"},
 38024, 44251, 0x4b61e985, 0x02d1feef},
 };
 
@@ -110,7 +104,7 @@ static const uint8_t big_rss_key[] = {
 static int
 test_toeplitz_hash_calc(void)
 {
-   uint32_t i, j;
+   uint32_t i;
union rte_thash_tuple tuple;
uint32_t rss_l3, rss_l3l4;
uint8_t rss_key_be[RTE_DIM(default_rss_key)];
@@ -145,10 +139,8 @@ test_toeplitz_hash_calc(void)
}
for (i = 0; i < RTE_DIM(v6_tbl); i++) {
/*Fill ipv6 hdr*/
-   for (j = 0; j < RTE_DIM(ipv6_hdr.src_addr.a); j++)
-   ipv6_hdr.src_addr.a[j] = v6_tbl[i].src_ip[j];
-   for (j = 0; j < RTE_DIM(ipv6_hdr.dst_addr.a); j++)
-   ipv6_hdr.dst_addr.a[j] = v6_tbl[i].dst_ip[j];
+   rte_ipv6_addr_cpy(&ipv6_hdr.src_addr, &v6_tbl[i].src_ip);
+   rte_ipv6_addr_cpy(&ipv6_hdr.dst_addr, &v6_tbl[i].dst_ip);
/*Load and convert ipv6 address into tuple*/
rte_thash_load_v6_addrs(&ipv6_hdr, &tuple);
tuple.v6.sport = v6_tbl[i].src_port;
@@ -176,7 +168,7 @@ test_toeplitz_hash_calc(void)
 static int
 test_toeplitz_hash_gfni(void)
 {
-   uint32_t i, j;
+   uint32_t i;
union rte_thash_tuple tuple;
uint32_t rss_l3, rss_l3l4;
uint64_t rss_key_matrixes[RTE_DIM(default_rss_key)];
@@ -204,10 +196,8 @@ test_toeplitz_hash_gfni(void)
}
 
for (i = 0; i < RTE_DIM(v6_tbl); i++) {
-   for (j = 0; j < RTE_DIM(tuple.v6.src_addr); j++)
-   tuple.v6.src_addr[j] = v6_tbl[i].src_ip[j];
-   for (j = 0; j < RTE_DIM(tuple.v6.dst_addr); j++)
-   tuple.v6.dst_addr[j] = v6_tbl[i].dst_ip[j];
+   rte_ipv6_addr_cpy(&tuple.v6.src_addr, &v6_tbl[i].src_ip);
+   rte_ipv6_addr_cpy(&tuple.v6.dst_addr, &v6_tbl[i].dst_ip);
tuple.v6.sport = rte_cpu_to_be_16(v6_tbl[i].dst_port);
tuple.v6.dport = rte_cpu_to_be_16(v6_tbl[i].src_port);
rss_l3 = rte_thash_gfni(rss_key_matrixes, (uint8_t *)&tuple,
@@ -299,7 +289,7 @@ enum {
 static int
 test_toeplitz_hash_gfni_bulk(void)
 {
-   uint32_t i, j;
+   uint32_t i;
union rte_thash_tuple tuple[2];
uint8_t *tuples[2];
uint32_t rss[2] = { 0 };
@@ -328,10 +318,8 @@ test_toeplitz_hash_gfni_bulk(void)
rte_memcpy(tuples[0], &tuple[0], RTE_THASH_V4_L4_LEN * 4);
 
/*Load IPv6 headers and copy it into the corresponding tuple*/
-   for (j = 0; j < RTE_DIM(tuple[1].v6.src_addr); j++)
-   tuple[1].v6.src_addr[j] = v6_tbl[i].src_ip[j];
-   for (j = 0; j < RTE_DIM(tuple[1].v6.dst_addr); j++)
-   tuple[1].v6.dst_addr[j] = v6_tbl[i].dst_ip[j];
+   rte_ipv6

[PATCH dpdk v1 10/15] ipsec: use ipv6 addr struct

2024-08-21 Thread Robin Jarry
Update rte_ipsec_sadv6_key to use the recently introduced IPv6 address
structure instead of ad-hoc uint8_t[16] arrays.

Signed-off-by: Robin Jarry 
---
 app/test-sad/main.c| 24 ++---
 app/test/test_ipsec_sad.c  | 44 +++---
 examples/ipsec-secgw/sad.h | 10 +
 lib/ipsec/rte_ipsec_sad.h  |  7 +++---
 4 files changed, 44 insertions(+), 41 deletions(-)

diff --git a/app/test-sad/main.c b/app/test-sad/main.c
index addfc0714521..7253f6488dce 100644
--- a/app/test-sad/main.c
+++ b/app/test-sad/main.c
@@ -248,8 +248,8 @@ get_random_rules(struct rule *tbl, uint32_t nb_rules, int 
rule_tbl)
(uint64_t)(edge + step));
if (config.ipv6) {
for (j = 0; j < 16; j++) {
-   tbl[i].tuple.v6.dip[j] = rte_rand();
-   tbl[i].tuple.v6.sip[j] = rte_rand();
+   tbl[i].tuple.v6.dip.a[j] = rte_rand();
+   tbl[i].tuple.v6.sip.a[j] = rte_rand();
}
} else {
tbl[i].tuple.v4.dip = rte_rand();
@@ -274,9 +274,9 @@ get_random_rules(struct rule *tbl, uint32_t nb_rules, int 
rule_tbl)
(uint64_t)(edge + step));
if (config.ipv6) {
for (j = 0; j < 16; j++) {
-   tbl[i].tuple.v6.dip[j] =
+   tbl[i].tuple.v6.dip.a[j] =
rte_rand();
-   tbl[i].tuple.v6.sip[j] =
+   tbl[i].tuple.v6.sip.a[j] =
rte_rand();
}
} else {
@@ -289,11 +289,11 @@ get_random_rules(struct rule *tbl, uint32_t nb_rules, int 
rule_tbl)
config.nb_rules].tuple.v4.spi;
if (config.ipv6) {
int r_idx = i % config.nb_rules;
-   memcpy(tbl[i].tuple.v6.dip,
-   rules_tbl[r_idx].tuple.v6.dip,
+   memcpy(&tbl[i].tuple.v6.dip,
+   &rules_tbl[r_idx].tuple.v6.dip,
sizeof(tbl[i].tuple.v6.dip));
-   memcpy(tbl[i].tuple.v6.sip,
-   rules_tbl[r_idx].tuple.v6.sip,
+   memcpy(&tbl[i].tuple.v6.sip,
+   &rules_tbl[r_idx].tuple.v6.sip,
sizeof(tbl[i].tuple.v6.sip));
} else {
tbl[i].tuple.v4.dip = rules_tbl[i %
@@ -472,8 +472,8 @@ print_result(const union rte_ipsec_sad_key *key, void *res)
v4 = &key->v4;
v6 = &key->v6;
spi = (config.ipv6 == 0) ? v4->spi : v6->spi;
-   dip = (config.ipv6 == 0) ? &v4->dip : (const void *)v6->dip;
-   sip = (config.ipv6 == 0) ? &v4->sip : (const void *)v6->sip;
+   dip = (config.ipv6 == 0) ? &v4->dip : (const void *)&v6->dip;
+   sip = (config.ipv6 == 0) ? &v4->sip : (const void *)&v6->sip;
 
if (res == NULL) {
printf("TUPLE: ");
@@ -500,8 +500,8 @@ print_result(const union rte_ipsec_sad_key *key, void *res)
v4 = &rule->tuple.v4;
v6 = &rule->tuple.v6;
spi = (config.ipv6 == 0) ? v4->spi : v6->spi;
-   dip = (config.ipv6 == 0) ? &v4->dip : (const void *)v6->dip;
-   sip = (config.ipv6 == 0) ? &v4->sip : (const void *)v6->sip;
+   dip = (config.ipv6 == 0) ? &v4->dip : (const void *)&v6->dip;
+   sip = (config.ipv6 == 0) ? &v4->sip : (const void *)&v6->sip;
printf("\n\tpoints to RULE ID %zu ",
RTE_PTR_DIFF(res, rules_tbl)/sizeof(struct rule));
print_tuple(af, spi, dip, sip);
diff --git a/app/test/test_ipsec_sad.c b/app/test/test_ipsec_sad.c
index 7534f16f89d0..79ea8ee915dd 100644
--- a/app/test/test_ipsec_sad.c
+++ b/app/test/test_ipsec_sad.c
@@ -212,7 +212,7 @@ test_add_invalid(void)
 {
int status;
struct rte_ipsec_sadv4_key tuple_v4 = {10, 20, 30};
-   struct rte_ipsec_sadv6_key tuple_v6 = {10, {20, }, {30, } };
+   struct rte_ipsec_sadv6_key tuple_v6 = {10, {.a = {20, }}, {.a = {30, }} 
};
 
status = __test_add_invalid(0, (union rte_ipsec_sad_key *)&tuple_v4);
if (status != TEST_SUCCESS)
@@ -271,8 +271,8 @@ test_delete_invalid(void)
 {
int

[PATCH dpdk v1 14/15] rib6,fib6,lpm6: remove duplicate constants

2024-08-21 Thread Robin Jarry
Replace all address size and max depth macros with common ones from
rte_ip6.h.

Signed-off-by: Robin Jarry 
---
 app/test/test_fib6.c | 50 ++--
 lib/fib/rte_fib6.c   |  6 +++---
 lib/fib/rte_fib6.h   |  4 
 lib/fib/trie.c   |  6 +++---
 lib/fib/trie.h   |  2 --
 lib/lpm/rte_lpm6.c   | 10 -
 lib/lpm/rte_lpm6.h   |  3 ---
 lib/rib/rte_rib6.c   | 11 +-
 lib/rib/rte_rib6.h   |  6 ++
 9 files changed, 43 insertions(+), 55 deletions(-)

diff --git a/app/test/test_fib6.c b/app/test/test_fib6.c
index 7134c4d335cd..812da01d6e33 100644
--- a/app/test/test_fib6.c
+++ b/app/test/test_fib6.c
@@ -170,13 +170,13 @@ test_add_del_invalid(void)
fib = rte_fib6_create(__func__, SOCKET_ID_ANY, &config);
RTE_TEST_ASSERT(fib != NULL, "Failed to create FIB\n");
 
-   /* rte_fib6_add: depth > RTE_FIB6_MAXDEPTH */
-   ret = rte_fib6_add(fib, &ip, RTE_FIB6_MAXDEPTH + 1, nh);
+   /* rte_fib6_add: depth > RTE_IPV6_MAX_DEPTH */
+   ret = rte_fib6_add(fib, &ip, RTE_IPV6_MAX_DEPTH + 1, nh);
RTE_TEST_ASSERT(ret < 0,
"Call succeeded with invalid parameters\n");
 
-   /* rte_fib6_delete: depth > RTE_FIB6_MAXDEPTH */
-   ret = rte_fib6_delete(fib, &ip, RTE_FIB6_MAXDEPTH + 1);
+   /* rte_fib6_delete: depth > RTE_IPV6_MAX_DEPTH */
+   ret = rte_fib6_delete(fib, &ip, RTE_IPV6_MAX_DEPTH + 1);
RTE_TEST_ASSERT(ret < 0,
"Call succeeded with invalid parameters\n");
 
@@ -216,18 +216,18 @@ lookup_and_check_asc(struct rte_fib6 *fib,
struct rte_ipv6_addr *ip_missing, uint64_t def_nh,
uint32_t n)
 {
-   uint64_t nh_arr[RTE_FIB6_MAXDEPTH];
+   uint64_t nh_arr[RTE_IPV6_MAX_DEPTH];
int ret;
uint32_t i = 0;
 
-   ret = rte_fib6_lookup_bulk(fib, ip_arr, nh_arr, RTE_FIB6_MAXDEPTH);
+   ret = rte_fib6_lookup_bulk(fib, ip_arr, nh_arr, RTE_IPV6_MAX_DEPTH);
RTE_TEST_ASSERT(ret == 0, "Failed to lookup\n");
 
-   for (; i <= RTE_FIB6_MAXDEPTH - n; i++)
+   for (; i <= RTE_IPV6_MAX_DEPTH - n; i++)
RTE_TEST_ASSERT(nh_arr[i] == n,
"Failed to get proper nexthop\n");
 
-   for (; i < RTE_FIB6_MAXDEPTH; i++)
+   for (; i < RTE_IPV6_MAX_DEPTH; i++)
RTE_TEST_ASSERT(nh_arr[i] == --n,
"Failed to get proper nexthop\n");
 
@@ -244,18 +244,18 @@ lookup_and_check_desc(struct rte_fib6 *fib,
struct rte_ipv6_addr *ip_missing, uint64_t def_nh,
uint32_t n)
 {
-   uint64_t nh_arr[RTE_FIB6_MAXDEPTH];
+   uint64_t nh_arr[RTE_IPV6_MAX_DEPTH];
int ret;
uint32_t i = 0;
 
-   ret = rte_fib6_lookup_bulk(fib, ip_arr, nh_arr, RTE_FIB6_MAXDEPTH);
+   ret = rte_fib6_lookup_bulk(fib, ip_arr, nh_arr, RTE_IPV6_MAX_DEPTH);
RTE_TEST_ASSERT(ret == 0, "Failed to lookup\n");
 
for (; i < n; i++)
-   RTE_TEST_ASSERT(nh_arr[i] == RTE_FIB6_MAXDEPTH - i,
+   RTE_TEST_ASSERT(nh_arr[i] == RTE_IPV6_MAX_DEPTH - i,
"Failed to get proper nexthop\n");
 
-   for (; i < RTE_FIB6_MAXDEPTH; i++)
+   for (; i < RTE_IPV6_MAX_DEPTH; i++)
RTE_TEST_ASSERT(nh_arr[i] == def_nh,
"Failed to get proper nexthop\n");
 
@@ -270,7 +270,7 @@ static int
 check_fib(struct rte_fib6 *fib)
 {
uint64_t def_nh = 100;
-   struct rte_ipv6_addr ip_arr[RTE_FIB6_MAXDEPTH];
+   struct rte_ipv6_addr ip_arr[RTE_IPV6_MAX_DEPTH];
struct rte_ipv6_addr ip_add = {.a = {[0] = 128}};
struct rte_ipv6_addr ip_missing = {
.a = {[0] = 127, [1 ... 15] = 255},
@@ -278,12 +278,12 @@ check_fib(struct rte_fib6 *fib)
uint32_t i, j;
int ret;
 
-   for (i = 0; i < RTE_FIB6_MAXDEPTH; i++) {
+   for (i = 0; i < RTE_IPV6_MAX_DEPTH; i++) {
rte_ipv6_addr_cpy(&ip_arr[i], &ip_add);
-   j = (RTE_FIB6_MAXDEPTH - i) / CHAR_BIT;
-   if (j < RTE_FIB6_IPV6_ADDR_SIZE) {
-   ip_arr[i].a[j] |= UINT8_MAX >> ((RTE_FIB6_MAXDEPTH - i) 
% CHAR_BIT);
-   for (j++; j < RTE_FIB6_IPV6_ADDR_SIZE; j++)
+   j = (RTE_IPV6_MAX_DEPTH - i) / CHAR_BIT;
+   if (j < RTE_IPV6_ADDR_SIZE) {
+   ip_arr[i].a[j] |= UINT8_MAX >> ((RTE_IPV6_MAX_DEPTH - 
i) % CHAR_BIT);
+   for (j++; j < RTE_IPV6_ADDR_SIZE; j++)
ip_arr[i].a[j] = 0xff;
}
}
@@ -291,7 +291,7 @@ check_fib(struct rte_fib6 *fib)
ret = lookup_and_check_desc(fib, ip_arr, &ip_missing, def_nh, 0);
RTE_TEST_ASSERT(ret == TEST_SUCCESS, "Lookup and check fails\n");
 
-   for (i = 1; i <= RTE_FIB6_MAXDEPTH; i++) {
+   for (i = 1; i <= RTE_IPV6_MAX_DEPTH; i++) {
ret = rte_fib6_add(fib, &ip_add, i, i);
RTE_TEST_ASSERT(ret == 0, "Failed to add a route\n");
  

[PATCH dpdk v1 15/15] net: add utilities for well known ipv6 address types

2024-08-21 Thread Robin Jarry
Add more utilities to work with IPv6 addresses. These functions will be
required in order to help building IPv6 routing applications.

Signed-off-by: Robin Jarry 
---
 app/test/test_net_ipv6.c | 74 ++
 lib/net/rte_ip6.h| 98 
 2 files changed, 172 insertions(+)

diff --git a/app/test/test_net_ipv6.c b/app/test/test_net_ipv6.c
index c2b42d67285e..b087b5c60d73 100644
--- a/app/test/test_net_ipv6.c
+++ b/app/test/test_net_ipv6.c
@@ -93,26 +93,97 @@ static int
 test_ipv6_addr_kind(void)
 {
TEST_ASSERT(rte_ipv6_addr_is_unspec(&zero_addr), "");
+   TEST_ASSERT(!rte_ipv6_addr_is_linklocal(&zero_addr), "");
+   TEST_ASSERT(!rte_ipv6_addr_is_loopback(&zero_addr), "");
+   TEST_ASSERT(!rte_ipv6_addr_is_mcast(&zero_addr), "");
 
struct rte_ipv6_addr ucast = {

"\x2a\x01\xcb\x00\x02\x54\x33\x00\x62\x39\xe1\xf4\x7a\x0b\x23\x71"
};
TEST_ASSERT(!rte_ipv6_addr_is_unspec(&ucast), "");
+   TEST_ASSERT(!rte_ipv6_addr_is_linklocal(&ucast), "");
+   TEST_ASSERT(!rte_ipv6_addr_is_loopback(&ucast), "");
+   TEST_ASSERT(!rte_ipv6_addr_is_mcast(&ucast), "");
 
struct rte_ipv6_addr mcast = {

"\xff\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01"
};
TEST_ASSERT(!rte_ipv6_addr_is_unspec(&mcast), "");
+   TEST_ASSERT(!rte_ipv6_addr_is_linklocal(&mcast), "");
+   TEST_ASSERT(!rte_ipv6_addr_is_loopback(&mcast), "");
+   TEST_ASSERT(rte_ipv6_addr_is_mcast(&mcast), "");
 
struct rte_ipv6_addr lo = {

"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01"
};
TEST_ASSERT(!rte_ipv6_addr_is_unspec(&lo), "");
+   TEST_ASSERT(!rte_ipv6_addr_is_linklocal(&lo), "");
+   TEST_ASSERT(rte_ipv6_addr_is_loopback(&lo), "");
+   TEST_ASSERT(!rte_ipv6_addr_is_mcast(&lo), "");
 
struct rte_ipv6_addr local = {

"\xfe\x80\x00\x00\x00\x00\x00\x00\x5a\x84\xc5\x2c\x6a\xef\x46\x39"
};
TEST_ASSERT(!rte_ipv6_addr_is_unspec(&local), "");
+   TEST_ASSERT(rte_ipv6_addr_is_linklocal(&local), "");
+   TEST_ASSERT(!rte_ipv6_addr_is_loopback(&local), "");
+   TEST_ASSERT(!rte_ipv6_addr_is_mcast(&local), "");
+
+   return TEST_SUCCESS;
+}
+
+static int
+test_ipv6_llocal_from_ethernet(void)
+{
+   const struct rte_ether_addr local_mac = { "\x04\x7b\xcb\x5c\x08\x44" };
+   const struct rte_ipv6_addr local_ip = {
+   
"\xfe\x80\x00\x00\x00\x00\x00\x00\x04\x7b\xcb\xff\xfe\x5c\x08\x44"
+   };
+   struct rte_ipv6_addr ip;
+
+   rte_ipv6_llocal_from_ethernet(&ip, &local_mac);
+   TEST_ASSERT(rte_ipv6_addr_eq(&ip, &local_ip), "");
+
+   return TEST_SUCCESS;
+}
+
+static int
+test_ipv6_solnode_from_addr(void)
+{
+   struct rte_ipv6_addr sol;
+
+   const struct rte_ipv6_addr llocal = {
+   
"\xfe\x80\x00\x00\x00\x00\x00\x00\x04\x7b\xcb\xff\xfe\x5c\x08\x44"
+   };
+   const struct rte_ipv6_addr llocal_sol = {
+   
"\xff\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\xff\x5c\x08\x44"
+   };
+   rte_ipv6_solnode_from_addr(&sol, &llocal);
+   TEST_ASSERT(rte_ipv6_addr_eq(&sol, &llocal_sol), "");
+
+   const struct rte_ipv6_addr ucast = {
+   
"\x2a\x01\xcb\x00\x02\x54\x33\x00\x1b\x9f\x80\x71\x67\xcd\xbf\x20"
+   };
+   const struct rte_ipv6_addr ucast_sol = {
+   
"\xff\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\xff\xcd\xbf\x20"
+   };
+   rte_ipv6_solnode_from_addr(&sol, &ucast);
+   TEST_ASSERT(rte_ipv6_addr_eq(&sol, &ucast_sol), "");
+
+   return TEST_SUCCESS;
+}
+
+static int
+test_ether_mcast_from_ipv6(void)
+{
+   const struct rte_ether_addr mcast_mac = { "\x33\x33\xd3\x00\x02\x01" };
+   const struct rte_ipv6_addr mcast_ip = {
+   
"\xff\x02\x00\x00\x00\x00\x02\x01\x00\x00\x00\x00\xd3\x00\x02\x01"
+   };
+   struct rte_ether_addr mac;
+
+   rte_ether_mcast_from_ipv6(&mac, &mcast_ip);
+   TEST_ASSERT(rte_is_same_ether_addr(&mac, &mcast_mac), "");
 
return TEST_SUCCESS;
 }
@@ -123,6 +194,9 @@ test_net_ipv6(void)
TEST_ASSERT_SUCCESS(test_ipv6_addr_mask(), "");
TEST_ASSERT_SUCCESS(test_ipv6_addr_eq_prefix(), "");
TEST_ASSERT_SUCCESS(test_ipv6_addr_kind(), "");
+   TEST_ASSERT_SUCCESS(test_ipv6_llocal_from_ethernet(), "");
+   TEST_ASSERT_SUCCESS(test_ipv6_solnode_from_addr(), "");
+   TEST_ASSERT_SUCCESS(test_ether_mcast_from_ipv6(), "");
return TEST_SUCCESS;
 }
 
diff --git a/lib/net/rte_ip6.h b/lib/net/rte_ip6.h
index 6bc18a1c8dd6..d7eba63fe111 100644
--- a/lib/net/rte_ip6.h
+++ b/lib/net/rte_ip6.h
@@ -28,6 +28,7 @@
 #include 
 #endif
 
+#include 
 #include 
 #include 
 #include 
@@ -157,6 +158,103 @@ rte_ipv6_addr_is_unspec(const struct rte_ipv6_addr *ip)
return rte_ipv6_addr_eq(ip, &unspec);
 }
 
+/**
+ * Check if an I

[PATCH dpdk v1 09/15] pipeline: use ipv6 addr struct

2024-08-21 Thread Robin Jarry
Update rte_table_action_ipv6_header and rte_table_action_nat_params to
use the recently added IPv6 address structure instead of uint8_t[16]
arrays.

Signed-off-by: Robin Jarry 
---
 examples/ip_pipeline/cli.c  | 12 +--
 lib/pipeline/rte_table_action.c | 36 +
 lib/pipeline/rte_table_action.h |  7 ---
 3 files changed, 24 insertions(+), 31 deletions(-)

diff --git a/examples/ip_pipeline/cli.c b/examples/ip_pipeline/cli.c
index e8269ea90c11..66fdeac8f501 100644
--- a/examples/ip_pipeline/cli.c
+++ b/examples/ip_pipeline/cli.c
@@ -3540,8 +3540,8 @@ parse_table_action_encap(char **tokens,
parser_read_uint8(&hop_limit, tokens[5]))
return 0;
 
-   memcpy(a->encap.vxlan.ipv6.sa, sa.s6_addr, 16);
-   memcpy(a->encap.vxlan.ipv6.da, da.s6_addr, 16);
+   memcpy(&a->encap.vxlan.ipv6.sa, sa.s6_addr, 16);
+   memcpy(&a->encap.vxlan.ipv6.da, da.s6_addr, 16);
a->encap.vxlan.ipv6.flow_label = flow_label;
a->encap.vxlan.ipv6.dscp = dscp;
a->encap.vxlan.ipv6.hop_limit = hop_limit;
@@ -3615,7 +3615,7 @@ parse_table_action_nat(char **tokens,
return 0;
 
a->nat.ip_version = 0;
-   memcpy(a->nat.addr.ipv6, addr.s6_addr, 16);
+   memcpy(&a->nat.addr.ipv6, addr.s6_addr, 16);
a->nat.port = port;
a->action_mask |= 1 << RTE_TABLE_ACTION_NAT;
return 4;
@@ -4956,9 +4956,9 @@ table_rule_show(const char *pipeline_name,

(uint32_t)a->encap.vxlan.ipv4.ttl);
} else {
fprintf(f, " ipv6 ");
-   ipv6_addr_show(f, 
a->encap.vxlan.ipv6.sa);
+   ipv6_addr_show(f, 
a->encap.vxlan.ipv6.sa.a);
fprintf(f, " ");
-   ipv6_addr_show(f, 
a->encap.vxlan.ipv6.da);
+   ipv6_addr_show(f, 
a->encap.vxlan.ipv6.da.a);
fprintf(f, " %u %u %u ",
a->encap.vxlan.ipv6.flow_label,

(uint32_t)a->encap.vxlan.ipv6.dscp,
@@ -4980,7 +4980,7 @@ table_rule_show(const char *pipeline_name,
if (a->nat.ip_version)
ipv4_addr_show(f, a->nat.addr.ipv4);
else
-   ipv6_addr_show(f, a->nat.addr.ipv6);
+   ipv6_addr_show(f, a->nat.addr.ipv6.a);
fprintf(f, " %u ", (uint32_t)(a->nat.port));
}
 
diff --git a/lib/pipeline/rte_table_action.c b/lib/pipeline/rte_table_action.c
index a04d5121e1f0..8479f984e4ea 100644
--- a/lib/pipeline/rte_table_action.c
+++ b/lib/pipeline/rte_table_action.c
@@ -872,12 +872,8 @@ encap_vxlan_apply(void *data,
d->ipv6.payload_len = 0; /* not pre-computed */
d->ipv6.proto = IP_PROTO_UDP;
d->ipv6.hop_limits = p->vxlan.ipv6.hop_limit;
-   memcpy(&d->ipv6.src_addr,
-   p->vxlan.ipv6.sa,
-   sizeof(p->vxlan.ipv6.sa));
-   memcpy(&d->ipv6.dst_addr,
-   p->vxlan.ipv6.da,
-   sizeof(p->vxlan.ipv6.da));
+   rte_ipv6_addr_cpy(&d->ipv6.src_addr, &p->vxlan.ipv6.sa);
+   rte_ipv6_addr_cpy(&d->ipv6.dst_addr, &p->vxlan.ipv6.da);
 
/* UDP */
d->udp.src_port = rte_htons(p->vxlan.udp.sp);
@@ -907,12 +903,8 @@ encap_vxlan_apply(void *data,
d->ipv6.payload_len = 0; /* not pre-computed */
d->ipv6.proto = IP_PROTO_UDP;
d->ipv6.hop_limits = p->vxlan.ipv6.hop_limit;
-   memcpy(&d->ipv6.src_addr,
-   p->vxlan.ipv6.sa,
-   sizeof(p->vxlan.ipv6.sa));
-   memcpy(&d->ipv6.dst_addr,
-   p->vxlan.ipv6.da,
-   sizeof(p->vxlan.ipv6.da));
+   rte_ipv6_addr_cpy(&d->ipv6.src_addr, &p->vxlan.ipv6.sa);
+   rte_ipv6_addr_cpy(&d->ipv6.dst_addr, &p->vxlan.ipv6.da);
 
/* UDP */
d->udp.src_port = rte_htons(p->vxlan.udp.sp);
@@ -1211,7 +1203,7 @@ struct nat_ipv4_data {
 } __rte_packed;
 
 struct nat_ipv6_data {
-   uint8_t addr[16];
+   struct rte_ipv6_addr addr;
uint16_t port;
 } __rte_packed;
 
@@ -1258,

[PATCH dpdk v1 13/15] rte_flow: use ipv6 addr struct

2024-08-21 Thread Robin Jarry
Update rte_flow_tunnel, rte_flow_action_set_ipv6,
rte_flow_item_icmp6_nd_na and rte_flow_item_icmp6_nd_ns to use the
recently introduced IPv6 address structure instead of uint8_t[16]
arrays.

Update all code accordingly.

Signed-off-by: Robin Jarry 
---
 app/test-flow-perf/actions_gen.c |  4 ++--
 drivers/net/cxgbe/cxgbe_flow.c   |  4 ++--
 drivers/net/nfp/flower/nfp_flower_flow.c |  2 +-
 lib/ethdev/rte_flow.h| 16 +---
 4 files changed, 14 insertions(+), 12 deletions(-)

diff --git a/app/test-flow-perf/actions_gen.c b/app/test-flow-perf/actions_gen.c
index b5336e83ff9f..54fcbacb98ce 100644
--- a/app/test-flow-perf/actions_gen.c
+++ b/app/test-flow-perf/actions_gen.c
@@ -304,7 +304,7 @@ add_set_src_ipv6(struct rte_flow_action *actions,
 
/* IPv6 value to set is random each time */
for (i = 0; i < 16; i++) {
-   set_ipv6[para.core_idx].ipv6_addr[i] = ipv6 & 0xff;
+   set_ipv6[para.core_idx].ipv6_addr.a[i] = ipv6 & 0xff;
ipv6 = ipv6 >> 8;
}
 
@@ -327,7 +327,7 @@ add_set_dst_ipv6(struct rte_flow_action *actions,
 
/* IPv6 value to set is random each time */
for (i = 0; i < 16; i++) {
-   set_ipv6[para.core_idx].ipv6_addr[i] = ipv6 & 0xff;
+   set_ipv6[para.core_idx].ipv6_addr.a[i] = ipv6 & 0xff;
ipv6 = ipv6 >> 8;
}
 
diff --git a/drivers/net/cxgbe/cxgbe_flow.c b/drivers/net/cxgbe/cxgbe_flow.c
index 37c566b131e9..00c55af4bcc6 100644
--- a/drivers/net/cxgbe/cxgbe_flow.c
+++ b/drivers/net/cxgbe/cxgbe_flow.c
@@ -680,7 +680,7 @@ ch_rte_parse_atype_switch(const struct rte_flow_action *a,
  "found.");
 
ipv6 = (const struct rte_flow_action_set_ipv6 *)a->conf;
-   memcpy(fs->nat_fip, ipv6->ipv6_addr, sizeof(ipv6->ipv6_addr));
+   memcpy(fs->nat_fip, &ipv6->ipv6_addr, sizeof(ipv6->ipv6_addr));
*nmode |= 1 << 0;
break;
case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
@@ -693,7 +693,7 @@ ch_rte_parse_atype_switch(const struct rte_flow_action *a,
  "found.");
 
ipv6 = (const struct rte_flow_action_set_ipv6 *)a->conf;
-   memcpy(fs->nat_lip, ipv6->ipv6_addr, sizeof(ipv6->ipv6_addr));
+   memcpy(fs->nat_lip, &ipv6->ipv6_addr, sizeof(ipv6->ipv6_addr));
*nmode |= 1 << 1;
break;
case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
diff --git a/drivers/net/nfp/flower/nfp_flower_flow.c 
b/drivers/net/nfp/flower/nfp_flower_flow.c
index 3f597d394b24..6db1fb141d3e 100644
--- a/drivers/net/nfp/flower/nfp_flower_flow.c
+++ b/drivers/net/nfp/flower/nfp_flower_flow.c
@@ -2978,7 +2978,7 @@ nfp_flow_action_set_ipv6(char *act_data,
set_ip->reserved = 0;
 
for (i = 0; i < 4; i++) {
-   rte_memcpy(&tmp, &set_ipv6->ipv6_addr[i * 4], 4);
+   rte_memcpy(&tmp, &set_ipv6->ipv6_addr.a[i * 4], 4);
set_ip->ipv6[i].exact = tmp;
set_ip->ipv6[i].mask = RTE_BE32(0x);
}
diff --git a/lib/ethdev/rte_flow.h b/lib/ethdev/rte_flow.h
index b3ede7ccdaef..628292c1df07 100644
--- a/lib/ethdev/rte_flow.h
+++ b/lib/ethdev/rte_flow.h
@@ -1505,16 +1505,17 @@ struct rte_flow_item_icmp6_nd_ns {
uint8_t code; /**< ICMPv6 code, normally 0. */
rte_be16_t checksum; /**< ICMPv6 checksum. */
rte_be32_t reserved; /**< Reserved, normally 0. */
-   uint8_t target_addr[16]; /**< Target address. */
+   struct rte_ipv6_addr target_addr; /**< Target address. */
 };
 
 /** Default mask for RTE_FLOW_ITEM_TYPE_ICMP6_ND_NS. */
 #ifndef __cplusplus
 static const
 struct rte_flow_item_icmp6_nd_ns rte_flow_item_icmp6_nd_ns_mask = {
-   .target_addr =
+   .target_addr = {
"\xff\xff\xff\xff\xff\xff\xff\xff"
"\xff\xff\xff\xff\xff\xff\xff\xff",
+   }
 };
 #endif
 
@@ -1532,16 +1533,17 @@ struct rte_flow_item_icmp6_nd_na {
 * reserved (29b).
 */
rte_be32_t rso_reserved;
-   uint8_t target_addr[16]; /**< Target address. */
+   struct rte_ipv6_addr target_addr; /**< Target address. */
 };
 
 /** Default mask for RTE_FLOW_ITEM_TYPE_ICMP6_ND_NA. */
 #ifndef __cplusplus
 static const
 struct rte_flow_item_icmp6_nd_na rte_flow_item_icmp6_nd_na_mask = {
-   .target_addr =
+   .target_addr = {
"\xff\xff\xff\xff\xff\xff\xff\xff"
"\xff\xff\xff\xff\xff\xff\xff\xff",
+   }
 };
 #endif
 
@@ -3812,7 +3814,7 @@ struct rte_flow_action_set_ipv4 {
  * specified outermost IPv6 header.
  */
 struct rte_flow_action_set_ipv6 {
-   uint8_t ipv6_addr[16];
+   struct rte_ipv6_addr ipv6_addr;
 };
 
 /**
@@ -5196,8 +5198,8 @@ struct rte_flow_tunnel {
rte_be32_t dst_addr; /**< IPv4 destination address. */
} ipv4;
struct 

[PATCH v1 0/3] dts: rework topology definition in dts config

2024-08-21 Thread Nicholas Pratte
Series to rework how testbed topologies are defined within the dts
configuration. Bugzilla ID 1360 originally asked for a rework to
define physical NIC devices on nodes. However, defining testbeds
purely by device negates the ability to create specific, multi-device
testers running performance tests, for example. We could keep the device
design specified, but adding in this flexibility might make
configuration more puzzling and complicated than necessary. Instead, it
may make sense to use unique identifiers on ports/interfaces, and have
DTS users select ports based on these identifiers within test run
configuration; that is what this series achieves.

Nicholas Pratte (3):
  dts: rework port attributes in config module
  dts: rework testbed_model Port objects to contain unique identifiers
  dts: rework test suite and dts runner to include test_run configs

 dts/conf.yaml  | 32 ++---
 dts/framework/config/__init__.py   | 12 +++--
 dts/framework/config/conf_yaml_schema.json | 52 +-
 dts/framework/config/types.py  | 19 +---
 dts/framework/runner.py| 16 +--
 dts/framework/test_suite.py| 33 +-
 dts/framework/testbed_model/port.py| 45 +--
 7 files changed, 113 insertions(+), 96 deletions(-)

-- 
2.44.0



[PATCH v1 1/3] dts: rework port attributes in config module

2024-08-21 Thread Nicholas Pratte
The current design requires that a peer pci port is identified so that
test suites can create the correct port links. While this can work, it
also creates a lot of room for user error. Instead, devices should be
given a unique identifier which is referenced in defined test runs.

Both defined testbeds for the SUT and TG must have an equal number of
specified ports. In each given array or ports, SUT port 0 is connected
to TG port 0, SUT port 1 is connected to TG port 1, etc.

Bugzilla ID: 1478

Signed-off-by: Nicholas Pratte 
---
 dts/conf.yaml  | 32 ++---
 dts/framework/config/__init__.py   | 12 +++--
 dts/framework/config/conf_yaml_schema.json | 52 +-
 dts/framework/config/types.py  | 19 +---
 4 files changed, 69 insertions(+), 46 deletions(-)

diff --git a/dts/conf.yaml b/dts/conf.yaml
index 7d95016e68..16214ee267 100644
--- a/dts/conf.yaml
+++ b/dts/conf.yaml
@@ -20,10 +20,17 @@ test_runs:
 # The machine running the DPDK test executable
 system_under_test_node:
   node_name: "SUT 1"
+  test_bed:
+- "Intel SUT Port 1"
+- "Intel SUT Port 2"
   vdevs: # optional; if removed, vdevs won't be used in the test run
 - "crypto_openssl"
 # Traffic generator node to use for this test run
-traffic_generator_node: "TG 1"
+traffic_generator_node:
+  node_name: "TG 1"
+  test_bed:
+- "Mellanox TG Port 1"
+- "Broadcom TG Port 1"
 nodes:
   # Define a system under test node, having two network ports physically
   # connected to the corresponding ports in TG 1 (the peer node)
@@ -40,17 +47,14 @@ nodes:
 force_first_numa: false
 ports:
   # sets up the physical link between "SUT 1"@000:00:08.0 and "TG 
1"@:00:08.0
-  - pci: ":00:08.0"
+  - name: "Intel SUT Port 1"
+pci: ":00:08.0"
 os_driver_for_dpdk: vfio-pci # OS driver that DPDK will use
 os_driver: i40e  # OS driver to bind when the tests are 
not running
-peer_node: "TG 1"
-peer_pci: ":00:08.0"
-  # sets up the physical link between "SUT 1"@000:00:08.1 and "TG 
1"@:00:08.1
-  - pci: ":00:08.1"
+  - name: "Intel SUT Port 2"
+pci: ":00:08.1"
 os_driver_for_dpdk: vfio-pci
 os_driver: i40e
-peer_node: "TG 1"
-peer_pci: ":00:08.1"
   # Define a Scapy traffic generator node, having two network ports
   # physically connected to the corresponding ports in SUT 1 (the peer node).
   - name: "TG 1"
@@ -59,18 +63,14 @@ nodes:
 arch: x86_64
 os: linux
 ports:
-  # sets up the physical link between "TG 1"@000:00:08.0 and "SUT 
1"@:00:08.0
-  - pci: ":00:08.0"
+  - name: "Mellanox TG Port 1"
+pci: ":00:08.0"
 os_driver_for_dpdk: rdma
 os_driver: rdma
-peer_node: "SUT 1"
-peer_pci: ":00:08.0"
-  # sets up the physical link between "SUT 1"@000:00:08.0 and "TG 
1"@:00:08.0
-  - pci: ":00:08.1"
+  - name: "Broadcom TG Port 1"
+pci: ":00:08.1"
 os_driver_for_dpdk: rdma
 os_driver: rdma
-peer_node: "SUT 1"
-peer_pci: ":00:08.1"
 hugepages_2mb: # optional; if removed, will use system hugepage 
configuration
 number_of: 256
 force_first_numa: false
diff --git a/dts/framework/config/__init__.py b/dts/framework/config/__init__.py
index df60a5030e..534821ed22 100644
--- a/dts/framework/config/__init__.py
+++ b/dts/framework/config/__init__.py
@@ -151,11 +151,10 @@ class PortConfig:
 """
 
 node: str
+name: str
 pci: str
 os_driver_for_dpdk: str
 os_driver: str
-peer_node: str
-peer_pci: str
 
 @classmethod
 def from_dict(cls, node: str, d: PortConfigDict) -> Self:
@@ -487,12 +486,19 @@ def from_dict(
 system_under_test_node, SutNodeConfiguration
 ), f"Invalid SUT configuration {system_under_test_node}"
 
-tg_name = d["traffic_generator_node"]
+tg_name = d["traffic_generator_node"]["node_name"]
 assert tg_name in node_map, f"Unknown TG {tg_name} in test run {d}"
 traffic_generator_node = node_map[tg_name]
 assert isinstance(
 traffic_generator_node, TGNodeConfiguration
 ), f"Invalid TG configuration {traffic_generator_node}"
+assert len(traffic_generator_node.ports) == len(
+system_under_test_node.ports
+), "Insufficient ports defined on nodes."
+for port_name in d["system_under_test_node"]["test_bed"]:
+assert port_name in {port.name: port for port in 
system_under_test_node.ports}
+for port_name in d["traffic_generator_node"]["test_bed"]:
+assert port_name in {port.name: port for port in 
traffic_generator_node.ports}
 
 vdevs = (
 d["system_under_test_node"]["vdevs"] if "vdevs" in 
d["system_under_test_node

[PATCH v1 2/3] dts: rework testbed_model Port objects to contain unique identifiers

2024-08-21 Thread Nicholas Pratte
In order to leverage the usability of unique identifiers on ports, the
testbed_model Port object needs some refactoring/trimming of obsolete or
needless attributes.

Bugzilla ID: 1478

Signed-off-by: Nicholas Pratte 
---
 dts/framework/testbed_model/port.py | 45 +++--
 1 file changed, 10 insertions(+), 35 deletions(-)

diff --git a/dts/framework/testbed_model/port.py 
b/dts/framework/testbed_model/port.py
index 817405bea4..75bc38f16e 100644
--- a/dts/framework/testbed_model/port.py
+++ b/dts/framework/testbed_model/port.py
@@ -14,43 +14,30 @@
 from framework.config import PortConfig
 
 
-@dataclass(slots=True, frozen=True)
-class PortIdentifier:
-"""The port identifier.
-
-Attributes:
-node: The node where the port resides.
-pci: The PCI address of the port on `node`.
-"""
-
-node: str
-pci: str
-
-
 @dataclass(slots=True)
 class Port:
 """Physical port on a node.
 
-The ports are identified by the node they're on and their PCI addresses. 
The port on the other
-side of the connection is also captured here.
+The ports are identified using a unique, user-defined name/identifier.
 Each port is serviced by a driver, which may be different for the 
operating system (`os_driver`)
 and for DPDK (`os_driver_for_dpdk`). For some devices, they are the same, 
e.g.: ``mlx5_core``.
 
 Attributes:
-identifier: The PCI address of the port on a node.
+node_name: Node the port exists on.
+name: User-defined unique identifier of the port.
+pci: The pci address assigned to the port.
 os_driver: The operating system driver name when the operating system 
controls the port,
 e.g.: ``i40e``.
 os_driver_for_dpdk: The operating system driver name for use with 
DPDK, e.g.: ``vfio-pci``.
-peer: The identifier of a port this port is connected with.
-The `peer` is on a different node.
 mac_address: The MAC address of the port.
 logical_name: The logical name of the port. Must be discovered.
 """
 
-identifier: PortIdentifier
+node: str
+name: str
+pci: str
 os_driver: str
 os_driver_for_dpdk: str
-peer: PortIdentifier
 mac_address: str = ""
 logical_name: str = ""
 
@@ -61,23 +48,11 @@ def __init__(self, node_name: str, config: PortConfig):
 node_name: The name of the port's node.
 config: The test run configuration of the port.
 """
-self.identifier = PortIdentifier(
-node=node_name,
-pci=config.pci,
-)
+self.node = node_name
+self.name = config.name
+self.pci = config.pci
 self.os_driver = config.os_driver
 self.os_driver_for_dpdk = config.os_driver_for_dpdk
-self.peer = PortIdentifier(node=config.peer_node, pci=config.peer_pci)
-
-@property
-def node(self) -> str:
-"""The node where the port resides."""
-return self.identifier.node
-
-@property
-def pci(self) -> str:
-"""The PCI address of the port."""
-return self.identifier.pci
 
 
 @dataclass(slots=True, frozen=True)
-- 
2.44.0



[PATCH v1 3/3] dts: rework test suite and dts runner to include test_run configs

2024-08-21 Thread Nicholas Pratte
The TestSuite object needs to access the running test run config in
order to identify the ports that need to be linked. To do this,
DTSRunner needs to be tweaked to accept test run configs to test suites,
and the TestSuite constructor needs to be reworked to accept test run
configs.

Bugzilla ID: 1478

Signed-off-by: Nicholas Pratte 
---
 dts/framework/runner.py | 16 +---
 dts/framework/test_suite.py | 33 +
 2 files changed, 34 insertions(+), 15 deletions(-)

diff --git a/dts/framework/runner.py b/dts/framework/runner.py
index 6b6f6a05f5..a5629c2072 100644
--- a/dts/framework/runner.py
+++ b/dts/framework/runner.py
@@ -444,6 +444,7 @@ def _run_test_run(
 tg_node,
 build_target_config,
 build_target_result,
+test_run_config,
 test_suites_with_cases,
 )
 
@@ -463,6 +464,7 @@ def _run_build_target(
 tg_node: TGNode,
 build_target_config: BuildTargetConfiguration,
 build_target_result: BuildTargetResult,
+test_run_config: TestRunConfiguration,
 test_suites_with_cases: Iterable[TestSuiteWithCases],
 ) -> None:
 """Run the given build target.
@@ -477,6 +479,7 @@ def _run_build_target(
 build_target_config: A build target's test run configuration.
 build_target_result: The build target level result object 
associated
 with the current build target.
+test_run_config: The current test run configuration to be used by 
test suites.
 test_suites_with_cases: The test suites with test cases to run.
 """
 self._logger.set_stage(DtsStage.build_target_setup)
@@ -492,7 +495,9 @@ def _run_build_target(
 build_target_result.update_setup(Result.FAIL, e)
 
 else:
-self._run_test_suites(sut_node, tg_node, build_target_result, 
test_suites_with_cases)
+self._run_test_suites(
+sut_node, tg_node, build_target_result, 
test_suites_with_cases, test_run_config
+)
 
 finally:
 try:
@@ -509,6 +514,7 @@ def _run_test_suites(
 tg_node: TGNode,
 build_target_result: BuildTargetResult,
 test_suites_with_cases: Iterable[TestSuiteWithCases],
+test_run_config: TestRunConfiguration,
 ) -> None:
 """Run `test_suites_with_cases` with the current build target.
 
@@ -524,12 +530,15 @@ def _run_test_suites(
 build_target_result: The build target level result object 
associated
 with the current build target.
 test_suites_with_cases: The test suites with test cases to run.
+test_run_config: The current test run config running the test 
suites.
 """
 end_build_target = False
 for test_suite_with_cases in test_suites_with_cases:
 test_suite_result = 
build_target_result.add_test_suite(test_suite_with_cases)
 try:
-self._run_test_suite(sut_node, tg_node, test_suite_result, 
test_suite_with_cases)
+self._run_test_suite(
+sut_node, tg_node, test_suite_result, test_run_config, 
test_suite_with_cases
+)
 except BlockingTestSuiteError as e:
 self._logger.exception(
 f"An error occurred within 
{test_suite_with_cases.test_suite_class.__name__}. "
@@ -546,6 +555,7 @@ def _run_test_suite(
 sut_node: SutNode,
 tg_node: TGNode,
 test_suite_result: TestSuiteResult,
+test_run_config: TestRunConfiguration,
 test_suite_with_cases: TestSuiteWithCases,
 ) -> None:
 """Set up, execute and tear down `test_suite_with_cases`.
@@ -572,7 +582,7 @@ def _run_test_suite(
 self._logger.set_stage(
 DtsStage.test_suite_setup, Path(SETTINGS.output_dir, 
test_suite_name)
 )
-test_suite = test_suite_with_cases.test_suite_class(sut_node, tg_node)
+test_suite = test_suite_with_cases.test_suite_class(sut_node, tg_node, 
test_run_config)
 try:
 self._logger.info(f"Starting test suite setup: {test_suite_name}")
 test_suite.set_up_suite()
diff --git a/dts/framework/test_suite.py b/dts/framework/test_suite.py
index 694b2eba65..fd51796a06 100644
--- a/dts/framework/test_suite.py
+++ b/dts/framework/test_suite.py
@@ -20,6 +20,7 @@
 from scapy.layers.l2 import Ether  # type: ignore[import-untyped]
 from scapy.packet import Packet, Padding  # type: ignore[import-untyped]
 
+from framework.config import TestRunConfiguration
 from framework.testbed_model.port import Port, PortLink
 from framework.testbed_model.sut_node import SutNode
 from framework.testbed_model.tg_node import TGNode
@@ -64,6 +65,7 @@ class TestSuite:
 
 sut_node: SutNode
 tg_node: TGNode
+test_run_config: TestRunConfiguration
 #: Whether the test suit

[RFC PATCH v1 0/5] dts: add VFs to the framework

2024-08-21 Thread jspewock
From: Jeremy Spewock 

There currently is no method of creating or managing virtual functions
(VFs) in the new DTS framework but there are multiple test suites in
the old DTS framework that provide testing coverage using them. This
patch adds the functionality to the framework that is needed to create
and use VFs in test suites in the future.

The series is marked as an RFC primarily because it is a new feature
that has been a recent talking point on the DTS bugzilla. The code
however is functional.

Jeremy Spewock (5):
  dts: allow binding only a single port to a different driver
  dts: parameterize what ports the TG sends packets to
  dts: add class for virtual functions
  dts: add OS abstractions for creating virtual functions
  dts: add functions for managing VFs to Node

 dts/framework/test_suite.py  |  38 --
 dts/framework/testbed_model/linux_session.py |  36 +-
 dts/framework/testbed_model/node.py  | 115 +--
 dts/framework/testbed_model/os_session.py|  40 +++
 dts/framework/testbed_model/port.py  |  37 +-
 5 files changed, 247 insertions(+), 19 deletions(-)

-- 
2.46.0



[RFC PATCH v1 1/5] dts: allow binding only a single port to a different driver

2024-08-21 Thread jspewock
From: Jeremy Spewock 

Previously the DTS framework only included methods that bind all ports
that the test run was aware of to either the DPDK driver or the OS
driver. There are however some cases, like creating virtual functions,
where you would want some ports bound to the OS driver and others bound
to their DPDK driver. This patch adds the ability to bind individual
drivers to their respective ports to solve this problem.

Signed-off-by: Jeremy Spewock 
---
 dts/framework/testbed_model/node.py | 21 -
 1 file changed, 12 insertions(+), 9 deletions(-)

diff --git a/dts/framework/testbed_model/node.py 
b/dts/framework/testbed_model/node.py
index 8e6181e424..85d4eb1f7c 100644
--- a/dts/framework/testbed_model/node.py
+++ b/dts/framework/testbed_model/node.py
@@ -167,12 +167,12 @@ def set_up_build_target(self, build_target_config: 
BuildTargetConfiguration) ->
 the setup steps will be taken.
 """
 self._copy_dpdk_tarball()
-self.bind_ports_to_driver()
+self.bind_all_ports_to_driver()
 
 def tear_down_build_target(self) -> None:
 """Reset DPDK variables and bind port driver to the OS driver."""
 self.__remote_dpdk_dir = None
-self.bind_ports_to_driver(for_dpdk=False)
+self.bind_all_ports_to_driver(for_dpdk=False)
 
 def create_session(self, name: str) -> OSSession:
 """Create and return a new OS-aware remote session.
@@ -317,7 +317,7 @@ def _copy_dpdk_tarball(self) -> None:
 # then extract to remote path
 self.main_session.extract_remote_tarball(remote_tarball_path, 
self._remote_dpdk_dir)
 
-def bind_ports_to_driver(self, for_dpdk: bool = True) -> None:
+def bind_all_ports_to_driver(self, for_dpdk: bool = True) -> None:
 """Bind all ports on the node to a driver.
 
 Args:
@@ -325,12 +325,15 @@ def bind_ports_to_driver(self, for_dpdk: bool = True) -> 
None:
 If :data:`False`, binds to os_driver.
 """
 for port in self.ports:
-driver = port.os_driver_for_dpdk if for_dpdk else port.os_driver
-self.main_session.send_command(
-f"{self.path_to_devbind_script} -b {driver} --force 
{port.pci}",
-privileged=True,
-verify=True,
-)
+self._bind_port_to_driver(port, for_dpdk)
+
+def _bind_port_to_driver(self, port: Port, for_dpdk: bool = True) -> None:
+driver = port.os_driver_for_dpdk if for_dpdk else port.os_driver
+self.main_session.send_command(
+f"{self.path_to_devbind_script} -b {driver} --force {port.pci}",
+privileged=True,
+verify=True,
+)
 
 
 def create_session(node_config: NodeConfiguration, name: str, logger: 
DTSLogger) -> OSSession:
-- 
2.46.0



[RFC PATCH v1 2/5] dts: parameterize what ports the TG sends packets to

2024-08-21 Thread jspewock
From: Jeremy Spewock 

Previously in the DTS framework the helper methods in the TestSutie
class designated ports as either ingress or egress ports and would wrap
the methods of the traffic generator to allow packets to only flow to
those designated ingress or egress ports. This is undesirable in some
cases, such as when you have virtual functions on top of your port,
where the TG ports can send to more than one SUT port since the
framework limits where the TG is allowed to send packets. This patch
solves this problem by creating optional parameters that allow the user
to specify which port to gather the MAC addresses from when sending and
receiving packets.

Signed-off-by: Jeremy Spewock 
---
 dts/framework/test_suite.py | 38 ++---
 1 file changed, 31 insertions(+), 7 deletions(-)

diff --git a/dts/framework/test_suite.py b/dts/framework/test_suite.py
index 694b2eba65..d5c0021503 100644
--- a/dts/framework/test_suite.py
+++ b/dts/framework/test_suite.py
@@ -185,6 +185,8 @@ def send_packet_and_capture(
 packet: Packet,
 filter_config: PacketFilteringConfig = PacketFilteringConfig(),
 duration: float = 1,
+sut_ingress: Port | None = None,
+sut_egress: Port | None = None,
 ) -> list[Packet]:
 """Send and receive `packet` using the associated TG.
 
@@ -195,11 +197,19 @@ def send_packet_and_capture(
 packet: The packet to send.
 filter_config: The filter to use when capturing packets.
 duration: Capture traffic for this amount of time after sending 
`packet`.
+sut_ingress: Optional port to use as the SUT ingress port. 
Defaults to
+`self._sut_port_ingress`.
+sut_egress: Optional port to use as the SUT egress port. Defaults 
to
+`self._sut_port_egress`
 
 Returns:
 A list of received packets.
 """
-packet = self._adjust_addresses(packet)
+if sut_ingress is None:
+sut_ingress = self._sut_port_ingress
+if sut_egress is None:
+sut_egress = self._sut_port_egress
+packet = self._adjust_addresses(packet, sut_ingress, sut_egress)
 return self.tg_node.send_packet_and_capture(
 packet,
 self._tg_port_egress,
@@ -208,18 +218,30 @@ def send_packet_and_capture(
 duration,
 )
 
-def get_expected_packet(self, packet: Packet) -> Packet:
+def get_expected_packet(
+self, packet: Packet, sut_ingress: Port | None = None, sut_egress: 
Port | None = None
+) -> Packet:
 """Inject the proper L2/L3 addresses into `packet`.
 
 Args:
 packet: The packet to modify.
+sut_ingress: Optional port to use as the SUT ingress port. 
Defaults to
+`self._sut_port_ingress`.
+sut_egress: Optional port to use as the SUT egress port. Defaults 
to
+`self._sut_port_egress`.
 
 Returns:
 `packet` with injected L2/L3 addresses.
 """
-return self._adjust_addresses(packet, expected=True)
-
-def _adjust_addresses(self, packet: Packet, expected: bool = False) -> 
Packet:
+if sut_ingress is None:
+sut_ingress = self._sut_port_ingress
+if sut_egress is None:
+sut_egress = self._sut_port_egress
+return self._adjust_addresses(packet, sut_ingress, sut_egress, 
expected=True)
+
+def _adjust_addresses(
+self, packet: Packet, sut_ingress_port: Port, sut_egress_port: Port, 
expected: bool = False
+) -> Packet:
 """L2 and L3 address additions in both directions.
 
 Assumptions:
@@ -229,11 +251,13 @@ def _adjust_addresses(self, packet: Packet, expected: 
bool = False) -> Packet:
 packet: The packet to modify.
 expected: If :data:`True`, the direction is SUT -> TG,
 otherwise the direction is TG -> SUT.
+sut_ingress_port: The port to use as the Rx port on the SUT.
+sut_egress_port: The port to use as the Tx port on the SUT.
 """
 if expected:
 # The packet enters the TG from SUT
 # update l2 addresses
-packet.src = self._sut_port_egress.mac_address
+packet.src = sut_egress_port.mac_address
 packet.dst = self._tg_port_ingress.mac_address
 
 # The packet is routed from TG egress to TG ingress
@@ -244,7 +268,7 @@ def _adjust_addresses(self, packet: Packet, expected: bool 
= False) -> Packet:
 # The packet leaves TG towards SUT
 # update l2 addresses
 packet.src = self._tg_port_egress.mac_address
-packet.dst = self._sut_port_ingress.mac_address
+packet.dst = sut_ingress_port.mac_address
 
 # The packet is routed from TG egress to TG ingress
 # update l3 addresses
-- 
2.46.0



[RFC PATCH v1 4/5] dts: add OS abstractions for creating virtual functions

2024-08-21 Thread jspewock
From: Jeremy Spewock 

Virtual functions in the framework are created using SR-IOV. The process
for doing this can vary depending on the operating system, so the
commands to create VFs have to be abstracted into different classes
based on the operating system. This patch adds the stubs for methods
that create VFs and get the PCI addresses of all VFs on a port to the
abstract class as well as a linux implementation for the methods.

Bugzilla ID: 1500

Signed-off-by: Jeremy Spewock 
---
 dts/framework/testbed_model/linux_session.py | 36 +-
 dts/framework/testbed_model/os_session.py| 40 
 2 files changed, 75 insertions(+), 1 deletion(-)

diff --git a/dts/framework/testbed_model/linux_session.py 
b/dts/framework/testbed_model/linux_session.py
index 99abc21353..48bf212f6a 100644
--- a/dts/framework/testbed_model/linux_session.py
+++ b/dts/framework/testbed_model/linux_session.py
@@ -15,7 +15,7 @@
 
 from typing_extensions import NotRequired
 
-from framework.exception import ConfigurationError, RemoteCommandExecutionError
+from framework.exception import ConfigurationError, 
RemoteCommandExecutionError, InternalError
 from framework.utils import expand_range
 
 from .cpu import LogicalCore
@@ -210,3 +210,37 @@ def configure_ipv4_forwarding(self, enable: bool) -> None:
 """Overrides 
:meth:`~.os_session.OSSession.configure_ipv4_forwarding`."""
 state = 1 if enable else 0
 self.send_command(f"sysctl -w net.ipv4.ip_forward={state}", 
privileged=True)
+
+def set_num_virtual_functions(self, num: int, pf_port: Port) -> bool:
+"""Overrides 
:meth:`~.os_session.OSSession.set_num_virtual_functions`."""
+sys_bus_path = 
f"/sys/bus/pci/devices/{pf_port.pci}/sriov_numvfs".replace(":", "\\:")
+curr_num_vfs = int(self.send_command(f"cat {sys_bus_path}").stdout)
+if num > 0 and curr_num_vfs >= num:
+self._logger.info(
+f"{curr_num_vfs} VFs already configured on port 
{pf_port.identifier.pci} on node "
+f"{pf_port.identifier.node}."
+)
+return False
+elif num > 0 and curr_num_vfs > 0:
+self._logger.error(
+f"Not enough VFs configured on port {pf_port.identifier.pci} 
on node "
+f"{pf_port.identifier.node}. Need {num} but only 
{curr_num_vfs} are configured. "
+"DTS is unable to modify number of preexisting VFs."
+)
+raise InternalError("Failed to create VFs on port.")
+self.send_command(f"echo {num} > {sys_bus_path}", privileged=True, 
verify=True)
+return True
+
+def get_pci_addr_of_vfs(self, pf_port: Port) -> list[str]:
+"""Overrides :meth:`~.os_session.OSSession.get_pci_addr_of_vfs`."""
+sys_bus_path = f"/sys/bus/pci/devices/{pf_port.pci}".replace(":", 
"\\:")
+curr_num_vfs = int(self.send_command(f"cat 
{sys_bus_path}/sriov_numvfs").stdout)
+if curr_num_vfs > 0:
+pci_addrs = self.send_command(
+'awk -F "PCI_SLOT_NAME=" "/PCI_SLOT_NAME=/ {print \\$2}" '
++ f"{sys_bus_path}/virtfn*/uevent",
+privileged=True,
+)
+return pci_addrs.stdout.splitlines()
+else:
+return []
diff --git a/dts/framework/testbed_model/os_session.py 
b/dts/framework/testbed_model/os_session.py
index 79f56b289b..191fc3c0c8 100644
--- a/dts/framework/testbed_model/os_session.py
+++ b/dts/framework/testbed_model/os_session.py
@@ -395,3 +395,43 @@ def configure_ipv4_forwarding(self, enable: bool) -> None:
 Args:
 enable: If :data:`True`, enable the forwarding, otherwise disable 
it.
 """
+
+@abstractmethod
+def set_num_virtual_functions(self, num: int, pf_port: Port) -> bool:
+"""Update the number of virtual functions (VFs) on a port.
+
+It should be noted that, due to the nature of VFs, if there are 
already VFs that exist on
+the physical function (PF) prior to calling this function, additional 
ones cannot be added.
+The only way to add more VFs is to remove the existing and then set 
the desired amount. For
+this reason, this method will handle creation in the following order:
+
+1. Use existing VFs on the PF if the number of existing VFs is greater 
than or equal to
+`num`
+2. Throw an exception noting that VFs cannot be created if the PF has 
some VFs already set
+on it, but the total VFs that it has are less then `num`.
+3. Create `num` VFs on the PF if there are none on it already
+
+Args:
+num: The number of VFs to set on the port.
+pf_port: The port to add the VFs to.
+
+Raises:
+InternalError: If `pf_port` has less than `num` VFs configured on 
it
+already.
+
+Returns:
+:data:`True` if this method successfully created VFs, 
:data:`False` i

[RFC PATCH v1 3/5] dts: add class for virtual functions

2024-08-21 Thread jspewock
From: Jeremy Spewock 

In DPDK applications virtual functions are treated the same as ports,
but within the framework there are benefits to differentiating the two
in order to add more metadata to VFs about where they originate from.
For this reason this patch adds a new class for handling virtual
functions that extends the Port class with some additional information
about the VF.

Bugzilla ID: 1500

Signed-off-by: Jeremy Spewock 
---
 dts/framework/testbed_model/port.py | 37 -
 1 file changed, 36 insertions(+), 1 deletion(-)

diff --git a/dts/framework/testbed_model/port.py 
b/dts/framework/testbed_model/port.py
index 817405bea4..c1d85fec2b 100644
--- a/dts/framework/testbed_model/port.py
+++ b/dts/framework/testbed_model/port.py
@@ -27,7 +27,7 @@ class PortIdentifier:
 pci: str
 
 
-@dataclass(slots=True)
+@dataclass
 class Port:
 """Physical port on a node.
 
@@ -80,6 +80,41 @@ def pci(self) -> str:
 return self.identifier.pci
 
 
+@dataclass
+class VirtualFunction(Port):
+"""Virtual Function (VF) on a port.
+
+DPDK applications often treat VFs the same as they do the physical ports 
(PFs) on the host.
+For this reason VFs are represented in the framework as a type of port 
with some additional
+metadata that allows the framework to more easily identify which device 
the VF belongs to as
+well as where the VF originated from.
+
+Attributes:
+created_by_framework: :data:`True` if this VF represents one that the 
DTS framework created
+on the node, :data:`False` otherwise.
+pf_port: The PF that this VF was created on/gathered from.
+"""
+
+created_by_framework: bool = False
+pf_port: Port | None = None
+
+def __init__(
+self, node_name: str, config: PortConfig, created_by_framework: bool, 
pf_port: Port
+) -> None:
+"""Extends :meth:`Port.__init__` with VF specific metadata.
+
+Args:
+node_name: The name of the node the VF resides on.
+config: Configuration information about the VF.
+created_by_framework: :data:`True` if DTS created this VF, 
otherwise :data:`False` if
+this class represents a VF that was preexisting on the node.
+pf_port: The PF that this VF was created on/gathered from.
+"""
+super().__init__(node_name, config)
+self.created_by_framework = created_by_framework
+self.pf_port = pf_port
+
+
 @dataclass(slots=True, frozen=True)
 class PortLink:
 """The physical, cabled connection between the ports.
-- 
2.46.0



[RFC PATCH v1 5/5] dts: add functions for managing VFs to Node

2024-08-21 Thread jspewock
From: Jeremy Spewock 

In order for test suites to create virtual functions there has to be
functions in the API that developers can use. This patch adds the
ability to create virtual functions to the Node API so that they are
reachable within test suites.

Bugzilla ID: 1500

Signed-off-by: Jeremy Spewock 
---
 dts/framework/testbed_model/node.py | 96 -
 1 file changed, 94 insertions(+), 2 deletions(-)

diff --git a/dts/framework/testbed_model/node.py 
b/dts/framework/testbed_model/node.py
index 85d4eb1f7c..101a8edfbc 100644
--- a/dts/framework/testbed_model/node.py
+++ b/dts/framework/testbed_model/node.py
@@ -14,6 +14,7 @@
 """
 
 import os
+import re
 import tarfile
 from abc import ABC
 from ipaddress import IPv4Interface, IPv6Interface
@@ -24,9 +25,10 @@
 OS,
 BuildTargetConfiguration,
 NodeConfiguration,
+PortConfig,
 TestRunConfiguration,
 )
-from framework.exception import ConfigurationError
+from framework.exception import ConfigurationError, InternalError
 from framework.logger import DTSLogger, get_dts_logger
 from framework.settings import SETTINGS
 
@@ -39,7 +41,7 @@
 )
 from .linux_session import LinuxSession
 from .os_session import OSSession
-from .port import Port
+from .port import Port, VirtualFunction
 
 
 class Node(ABC):
@@ -335,6 +337,96 @@ def _bind_port_to_driver(self, port: Port, for_dpdk: bool 
= True) -> None:
 verify=True,
 )
 
+def create_virtual_functions(
+self, num: int, pf_port: Port, dpdk_driver: str | None = None
+) -> list[VirtualFunction]:
+"""Create virtual functions (VFs) from a given physical function (PF) 
on the node.
+
+Virtual functions will be created if there are not any currently 
configured on `pf_port`.
+If there are greater than or equal to `num` VFs already configured on 
`pf_port`, those will
+be used instead of creating more. In order to create VFs, the PF must 
be bound to its
+kernel driver. This method will handle binding `pf_port` and any other 
ports in the test
+run that reside on the same device back to their OS drivers if this 
was not done already.
+VFs gathered in this method will be bound to `driver` if one is 
provided, or the DPDK
+driver for `pf_port` and then added to `self.ports`.
+
+Args:
+num: The number of VFs to create. Must be greater than 0.
+pf_port: The PF to create the VFs on.
+dpdk_driver: Optional driver to bind the VFs to after they are 
created. Defaults to the
+DPDK driver of `pf_port`.
+
+Raises:
+InternalError: If `num` is less than or equal to 0.
+"""
+if num <= 0:
+raise InternalError(
+"Method for creating virtual functions received a non-positive 
value."
+)
+if not dpdk_driver:
+dpdk_driver = pf_port.os_driver_for_dpdk
+# Get any other port that is on the same device which DTS is aware of
+all_device_ports = [
+p for p in self.ports if p.pci.split(".")[0] == 
pf_port.pci.split(".")[0]
+]
+# Ports must be bound to the kernel driver in order to create VFs from 
them
+for port in all_device_ports:
+self._bind_port_to_driver(port, False)
+# Some PMDs require the interface being up in order to make VFs
+self.configure_port_state(port)
+created_vfs = self.main_session.set_num_virtual_functions(num, pf_port)
+# We don't need more then `num` VFs from the list
+vf_pcis = self.main_session.get_pci_addr_of_vfs(pf_port)[:num]
+devbind_info = self.main_session.send_command(
+f"{self.path_to_devbind_script} -s", privileged=True
+).stdout
+
+ret = []
+
+for pci in vf_pcis:
+original_driver = re.search(f"{pci}.*drv=([\\d\\w-]*)", 
devbind_info)
+os_driver = original_driver[1] if original_driver else 
pf_port.os_driver
+vf_config = PortConfig(
+self.name, pci, dpdk_driver, os_driver, pf_port.peer.node, 
pf_port.peer.pci
+)
+vf_port = VirtualFunction(self.name, vf_config, created_vfs, 
pf_port)
+self.main_session.update_ports([vf_port])
+self._bind_port_to_driver(vf_port)
+self.ports.append(vf_port)
+ret.append(vf_port)
+return ret
+
+def get_vfs_on_port(self, pf_port: Port) -> list[VirtualFunction]:
+"""Get all virtual functions (VFs) that DTS is aware of on `pf_port`.
+
+Args:
+pf_port: The port to search for the VFs on.
+
+Returns:
+A list of VFs in the framework that were created/gathered from 
`pf_port`.
+"""
+return [p for p in self.ports if isinstance(p, VirtualFunction) and 
p.pf_port == pf_port]
+
+def remove_virtual_functions(self, pf_port: Port) -> None:
+"""Remove

[RFC PATCH v2 0/5] dts: add VFs to the framework

2024-08-21 Thread jspewock
From: Jeremy Spewock 

v2:
  * Accidentally left out a formatting fix in v1.

Jeremy Spewock (5):
  dts: allow binding only a single port to a different driver
  dts: parameterize what ports the TG sends packets to
  dts: add class for virtual functions
  dts: add OS abstractions for creating virtual functions
  dts: add functions for managing VFs to Node

 dts/framework/test_suite.py  |  38 --
 dts/framework/testbed_model/linux_session.py |  40 ++-
 dts/framework/testbed_model/node.py  | 115 +--
 dts/framework/testbed_model/os_session.py|  40 +++
 dts/framework/testbed_model/port.py  |  37 +-
 5 files changed, 251 insertions(+), 19 deletions(-)

-- 
2.46.0



[RFC PATCH v2 1/5] dts: allow binding only a single port to a different driver

2024-08-21 Thread jspewock
From: Jeremy Spewock 

Previously the DTS framework only included methods that bind all ports
that the test run was aware of to either the DPDK driver or the OS
driver. There are however some cases, like creating virtual functions,
where you would want some ports bound to the OS driver and others bound
to their DPDK driver. This patch adds the ability to bind individual
drivers to their respective ports to solve this problem.

Signed-off-by: Jeremy Spewock 
---
 dts/framework/testbed_model/node.py | 21 -
 1 file changed, 12 insertions(+), 9 deletions(-)

diff --git a/dts/framework/testbed_model/node.py 
b/dts/framework/testbed_model/node.py
index 8e6181e424..85d4eb1f7c 100644
--- a/dts/framework/testbed_model/node.py
+++ b/dts/framework/testbed_model/node.py
@@ -167,12 +167,12 @@ def set_up_build_target(self, build_target_config: 
BuildTargetConfiguration) ->
 the setup steps will be taken.
 """
 self._copy_dpdk_tarball()
-self.bind_ports_to_driver()
+self.bind_all_ports_to_driver()
 
 def tear_down_build_target(self) -> None:
 """Reset DPDK variables and bind port driver to the OS driver."""
 self.__remote_dpdk_dir = None
-self.bind_ports_to_driver(for_dpdk=False)
+self.bind_all_ports_to_driver(for_dpdk=False)
 
 def create_session(self, name: str) -> OSSession:
 """Create and return a new OS-aware remote session.
@@ -317,7 +317,7 @@ def _copy_dpdk_tarball(self) -> None:
 # then extract to remote path
 self.main_session.extract_remote_tarball(remote_tarball_path, 
self._remote_dpdk_dir)
 
-def bind_ports_to_driver(self, for_dpdk: bool = True) -> None:
+def bind_all_ports_to_driver(self, for_dpdk: bool = True) -> None:
 """Bind all ports on the node to a driver.
 
 Args:
@@ -325,12 +325,15 @@ def bind_ports_to_driver(self, for_dpdk: bool = True) -> 
None:
 If :data:`False`, binds to os_driver.
 """
 for port in self.ports:
-driver = port.os_driver_for_dpdk if for_dpdk else port.os_driver
-self.main_session.send_command(
-f"{self.path_to_devbind_script} -b {driver} --force 
{port.pci}",
-privileged=True,
-verify=True,
-)
+self._bind_port_to_driver(port, for_dpdk)
+
+def _bind_port_to_driver(self, port: Port, for_dpdk: bool = True) -> None:
+driver = port.os_driver_for_dpdk if for_dpdk else port.os_driver
+self.main_session.send_command(
+f"{self.path_to_devbind_script} -b {driver} --force {port.pci}",
+privileged=True,
+verify=True,
+)
 
 
 def create_session(node_config: NodeConfiguration, name: str, logger: 
DTSLogger) -> OSSession:
-- 
2.46.0



[RFC PATCH v2 2/5] dts: parameterize what ports the TG sends packets to

2024-08-21 Thread jspewock
From: Jeremy Spewock 

Previously in the DTS framework the helper methods in the TestSutie
class designated ports as either ingress or egress ports and would wrap
the methods of the traffic generator to allow packets to only flow to
those designated ingress or egress ports. This is undesirable in some
cases, such as when you have virtual functions on top of your port,
where the TG ports can send to more than one SUT port since the
framework limits where the TG is allowed to send packets. This patch
solves this problem by creating optional parameters that allow the user
to specify which port to gather the MAC addresses from when sending and
receiving packets.

Signed-off-by: Jeremy Spewock 
---
 dts/framework/test_suite.py | 38 ++---
 1 file changed, 31 insertions(+), 7 deletions(-)

diff --git a/dts/framework/test_suite.py b/dts/framework/test_suite.py
index 694b2eba65..d5c0021503 100644
--- a/dts/framework/test_suite.py
+++ b/dts/framework/test_suite.py
@@ -185,6 +185,8 @@ def send_packet_and_capture(
 packet: Packet,
 filter_config: PacketFilteringConfig = PacketFilteringConfig(),
 duration: float = 1,
+sut_ingress: Port | None = None,
+sut_egress: Port | None = None,
 ) -> list[Packet]:
 """Send and receive `packet` using the associated TG.
 
@@ -195,11 +197,19 @@ def send_packet_and_capture(
 packet: The packet to send.
 filter_config: The filter to use when capturing packets.
 duration: Capture traffic for this amount of time after sending 
`packet`.
+sut_ingress: Optional port to use as the SUT ingress port. 
Defaults to
+`self._sut_port_ingress`.
+sut_egress: Optional port to use as the SUT egress port. Defaults 
to
+`self._sut_port_egress`
 
 Returns:
 A list of received packets.
 """
-packet = self._adjust_addresses(packet)
+if sut_ingress is None:
+sut_ingress = self._sut_port_ingress
+if sut_egress is None:
+sut_egress = self._sut_port_egress
+packet = self._adjust_addresses(packet, sut_ingress, sut_egress)
 return self.tg_node.send_packet_and_capture(
 packet,
 self._tg_port_egress,
@@ -208,18 +218,30 @@ def send_packet_and_capture(
 duration,
 )
 
-def get_expected_packet(self, packet: Packet) -> Packet:
+def get_expected_packet(
+self, packet: Packet, sut_ingress: Port | None = None, sut_egress: 
Port | None = None
+) -> Packet:
 """Inject the proper L2/L3 addresses into `packet`.
 
 Args:
 packet: The packet to modify.
+sut_ingress: Optional port to use as the SUT ingress port. 
Defaults to
+`self._sut_port_ingress`.
+sut_egress: Optional port to use as the SUT egress port. Defaults 
to
+`self._sut_port_egress`.
 
 Returns:
 `packet` with injected L2/L3 addresses.
 """
-return self._adjust_addresses(packet, expected=True)
-
-def _adjust_addresses(self, packet: Packet, expected: bool = False) -> 
Packet:
+if sut_ingress is None:
+sut_ingress = self._sut_port_ingress
+if sut_egress is None:
+sut_egress = self._sut_port_egress
+return self._adjust_addresses(packet, sut_ingress, sut_egress, 
expected=True)
+
+def _adjust_addresses(
+self, packet: Packet, sut_ingress_port: Port, sut_egress_port: Port, 
expected: bool = False
+) -> Packet:
 """L2 and L3 address additions in both directions.
 
 Assumptions:
@@ -229,11 +251,13 @@ def _adjust_addresses(self, packet: Packet, expected: 
bool = False) -> Packet:
 packet: The packet to modify.
 expected: If :data:`True`, the direction is SUT -> TG,
 otherwise the direction is TG -> SUT.
+sut_ingress_port: The port to use as the Rx port on the SUT.
+sut_egress_port: The port to use as the Tx port on the SUT.
 """
 if expected:
 # The packet enters the TG from SUT
 # update l2 addresses
-packet.src = self._sut_port_egress.mac_address
+packet.src = sut_egress_port.mac_address
 packet.dst = self._tg_port_ingress.mac_address
 
 # The packet is routed from TG egress to TG ingress
@@ -244,7 +268,7 @@ def _adjust_addresses(self, packet: Packet, expected: bool 
= False) -> Packet:
 # The packet leaves TG towards SUT
 # update l2 addresses
 packet.src = self._tg_port_egress.mac_address
-packet.dst = self._sut_port_ingress.mac_address
+packet.dst = sut_ingress_port.mac_address
 
 # The packet is routed from TG egress to TG ingress
 # update l3 addresses
-- 
2.46.0



[RFC PATCH v2 3/5] dts: add class for virtual functions

2024-08-21 Thread jspewock
From: Jeremy Spewock 

In DPDK applications virtual functions are treated the same as ports,
but within the framework there are benefits to differentiating the two
in order to add more metadata to VFs about where they originate from.
For this reason this patch adds a new class for handling virtual
functions that extends the Port class with some additional information
about the VF.

Bugzilla ID: 1500

Signed-off-by: Jeremy Spewock 
---
 dts/framework/testbed_model/port.py | 37 -
 1 file changed, 36 insertions(+), 1 deletion(-)

diff --git a/dts/framework/testbed_model/port.py 
b/dts/framework/testbed_model/port.py
index 817405bea4..c1d85fec2b 100644
--- a/dts/framework/testbed_model/port.py
+++ b/dts/framework/testbed_model/port.py
@@ -27,7 +27,7 @@ class PortIdentifier:
 pci: str
 
 
-@dataclass(slots=True)
+@dataclass
 class Port:
 """Physical port on a node.
 
@@ -80,6 +80,41 @@ def pci(self) -> str:
 return self.identifier.pci
 
 
+@dataclass
+class VirtualFunction(Port):
+"""Virtual Function (VF) on a port.
+
+DPDK applications often treat VFs the same as they do the physical ports 
(PFs) on the host.
+For this reason VFs are represented in the framework as a type of port 
with some additional
+metadata that allows the framework to more easily identify which device 
the VF belongs to as
+well as where the VF originated from.
+
+Attributes:
+created_by_framework: :data:`True` if this VF represents one that the 
DTS framework created
+on the node, :data:`False` otherwise.
+pf_port: The PF that this VF was created on/gathered from.
+"""
+
+created_by_framework: bool = False
+pf_port: Port | None = None
+
+def __init__(
+self, node_name: str, config: PortConfig, created_by_framework: bool, 
pf_port: Port
+) -> None:
+"""Extends :meth:`Port.__init__` with VF specific metadata.
+
+Args:
+node_name: The name of the node the VF resides on.
+config: Configuration information about the VF.
+created_by_framework: :data:`True` if DTS created this VF, 
otherwise :data:`False` if
+this class represents a VF that was preexisting on the node.
+pf_port: The PF that this VF was created on/gathered from.
+"""
+super().__init__(node_name, config)
+self.created_by_framework = created_by_framework
+self.pf_port = pf_port
+
+
 @dataclass(slots=True, frozen=True)
 class PortLink:
 """The physical, cabled connection between the ports.
-- 
2.46.0



[RFC PATCH v2 4/5] dts: add OS abstractions for creating virtual functions

2024-08-21 Thread jspewock
From: Jeremy Spewock 

Virtual functions in the framework are created using SR-IOV. The process
for doing this can vary depending on the operating system, so the
commands to create VFs have to be abstracted into different classes
based on the operating system. This patch adds the stubs for methods
that create VFs and get the PCI addresses of all VFs on a port to the
abstract class as well as a linux implementation for the methods.

Bugzilla ID: 1500

Signed-off-by: Jeremy Spewock 
---
 dts/framework/testbed_model/linux_session.py | 40 +++-
 dts/framework/testbed_model/os_session.py| 40 
 2 files changed, 79 insertions(+), 1 deletion(-)

diff --git a/dts/framework/testbed_model/linux_session.py 
b/dts/framework/testbed_model/linux_session.py
index 99abc21353..738ddd7600 100644
--- a/dts/framework/testbed_model/linux_session.py
+++ b/dts/framework/testbed_model/linux_session.py
@@ -15,7 +15,11 @@
 
 from typing_extensions import NotRequired
 
-from framework.exception import ConfigurationError, RemoteCommandExecutionError
+from framework.exception import (
+ConfigurationError,
+InternalError,
+RemoteCommandExecutionError,
+)
 from framework.utils import expand_range
 
 from .cpu import LogicalCore
@@ -210,3 +214,37 @@ def configure_ipv4_forwarding(self, enable: bool) -> None:
 """Overrides 
:meth:`~.os_session.OSSession.configure_ipv4_forwarding`."""
 state = 1 if enable else 0
 self.send_command(f"sysctl -w net.ipv4.ip_forward={state}", 
privileged=True)
+
+def set_num_virtual_functions(self, num: int, pf_port: Port) -> bool:
+"""Overrides 
:meth:`~.os_session.OSSession.set_num_virtual_functions`."""
+sys_bus_path = 
f"/sys/bus/pci/devices/{pf_port.pci}/sriov_numvfs".replace(":", "\\:")
+curr_num_vfs = int(self.send_command(f"cat {sys_bus_path}").stdout)
+if num > 0 and curr_num_vfs >= num:
+self._logger.info(
+f"{curr_num_vfs} VFs already configured on port 
{pf_port.identifier.pci} on node "
+f"{pf_port.identifier.node}."
+)
+return False
+elif num > 0 and curr_num_vfs > 0:
+self._logger.error(
+f"Not enough VFs configured on port {pf_port.identifier.pci} 
on node "
+f"{pf_port.identifier.node}. Need {num} but only 
{curr_num_vfs} are configured. "
+"DTS is unable to modify number of preexisting VFs."
+)
+raise InternalError("Failed to create VFs on port.")
+self.send_command(f"echo {num} > {sys_bus_path}", privileged=True, 
verify=True)
+return True
+
+def get_pci_addr_of_vfs(self, pf_port: Port) -> list[str]:
+"""Overrides :meth:`~.os_session.OSSession.get_pci_addr_of_vfs`."""
+sys_bus_path = f"/sys/bus/pci/devices/{pf_port.pci}".replace(":", 
"\\:")
+curr_num_vfs = int(self.send_command(f"cat 
{sys_bus_path}/sriov_numvfs").stdout)
+if curr_num_vfs > 0:
+pci_addrs = self.send_command(
+'awk -F "PCI_SLOT_NAME=" "/PCI_SLOT_NAME=/ {print \\$2}" '
++ f"{sys_bus_path}/virtfn*/uevent",
+privileged=True,
+)
+return pci_addrs.stdout.splitlines()
+else:
+return []
diff --git a/dts/framework/testbed_model/os_session.py 
b/dts/framework/testbed_model/os_session.py
index 79f56b289b..191fc3c0c8 100644
--- a/dts/framework/testbed_model/os_session.py
+++ b/dts/framework/testbed_model/os_session.py
@@ -395,3 +395,43 @@ def configure_ipv4_forwarding(self, enable: bool) -> None:
 Args:
 enable: If :data:`True`, enable the forwarding, otherwise disable 
it.
 """
+
+@abstractmethod
+def set_num_virtual_functions(self, num: int, pf_port: Port) -> bool:
+"""Update the number of virtual functions (VFs) on a port.
+
+It should be noted that, due to the nature of VFs, if there are 
already VFs that exist on
+the physical function (PF) prior to calling this function, additional 
ones cannot be added.
+The only way to add more VFs is to remove the existing and then set 
the desired amount. For
+this reason, this method will handle creation in the following order:
+
+1. Use existing VFs on the PF if the number of existing VFs is greater 
than or equal to
+`num`
+2. Throw an exception noting that VFs cannot be created if the PF has 
some VFs already set
+on it, but the total VFs that it has are less then `num`.
+3. Create `num` VFs on the PF if there are none on it already
+
+Args:
+num: The number of VFs to set on the port.
+pf_port: The port to add the VFs to.
+
+Raises:
+InternalError: If `pf_port` has less than `num` VFs configured on 
it
+already.
+
+Returns:
+:data:`True` if this method successfully create

[RFC PATCH v2 5/5] dts: add functions for managing VFs to Node

2024-08-21 Thread jspewock
From: Jeremy Spewock 

In order for test suites to create virtual functions there has to be
functions in the API that developers can use. This patch adds the
ability to create virtual functions to the Node API so that they are
reachable within test suites.

Bugzilla ID: 1500

Signed-off-by: Jeremy Spewock 
---
 dts/framework/testbed_model/node.py | 96 -
 1 file changed, 94 insertions(+), 2 deletions(-)

diff --git a/dts/framework/testbed_model/node.py 
b/dts/framework/testbed_model/node.py
index 85d4eb1f7c..101a8edfbc 100644
--- a/dts/framework/testbed_model/node.py
+++ b/dts/framework/testbed_model/node.py
@@ -14,6 +14,7 @@
 """
 
 import os
+import re
 import tarfile
 from abc import ABC
 from ipaddress import IPv4Interface, IPv6Interface
@@ -24,9 +25,10 @@
 OS,
 BuildTargetConfiguration,
 NodeConfiguration,
+PortConfig,
 TestRunConfiguration,
 )
-from framework.exception import ConfigurationError
+from framework.exception import ConfigurationError, InternalError
 from framework.logger import DTSLogger, get_dts_logger
 from framework.settings import SETTINGS
 
@@ -39,7 +41,7 @@
 )
 from .linux_session import LinuxSession
 from .os_session import OSSession
-from .port import Port
+from .port import Port, VirtualFunction
 
 
 class Node(ABC):
@@ -335,6 +337,96 @@ def _bind_port_to_driver(self, port: Port, for_dpdk: bool 
= True) -> None:
 verify=True,
 )
 
+def create_virtual_functions(
+self, num: int, pf_port: Port, dpdk_driver: str | None = None
+) -> list[VirtualFunction]:
+"""Create virtual functions (VFs) from a given physical function (PF) 
on the node.
+
+Virtual functions will be created if there are not any currently 
configured on `pf_port`.
+If there are greater than or equal to `num` VFs already configured on 
`pf_port`, those will
+be used instead of creating more. In order to create VFs, the PF must 
be bound to its
+kernel driver. This method will handle binding `pf_port` and any other 
ports in the test
+run that reside on the same device back to their OS drivers if this 
was not done already.
+VFs gathered in this method will be bound to `driver` if one is 
provided, or the DPDK
+driver for `pf_port` and then added to `self.ports`.
+
+Args:
+num: The number of VFs to create. Must be greater than 0.
+pf_port: The PF to create the VFs on.
+dpdk_driver: Optional driver to bind the VFs to after they are 
created. Defaults to the
+DPDK driver of `pf_port`.
+
+Raises:
+InternalError: If `num` is less than or equal to 0.
+"""
+if num <= 0:
+raise InternalError(
+"Method for creating virtual functions received a non-positive 
value."
+)
+if not dpdk_driver:
+dpdk_driver = pf_port.os_driver_for_dpdk
+# Get any other port that is on the same device which DTS is aware of
+all_device_ports = [
+p for p in self.ports if p.pci.split(".")[0] == 
pf_port.pci.split(".")[0]
+]
+# Ports must be bound to the kernel driver in order to create VFs from 
them
+for port in all_device_ports:
+self._bind_port_to_driver(port, False)
+# Some PMDs require the interface being up in order to make VFs
+self.configure_port_state(port)
+created_vfs = self.main_session.set_num_virtual_functions(num, pf_port)
+# We don't need more then `num` VFs from the list
+vf_pcis = self.main_session.get_pci_addr_of_vfs(pf_port)[:num]
+devbind_info = self.main_session.send_command(
+f"{self.path_to_devbind_script} -s", privileged=True
+).stdout
+
+ret = []
+
+for pci in vf_pcis:
+original_driver = re.search(f"{pci}.*drv=([\\d\\w-]*)", 
devbind_info)
+os_driver = original_driver[1] if original_driver else 
pf_port.os_driver
+vf_config = PortConfig(
+self.name, pci, dpdk_driver, os_driver, pf_port.peer.node, 
pf_port.peer.pci
+)
+vf_port = VirtualFunction(self.name, vf_config, created_vfs, 
pf_port)
+self.main_session.update_ports([vf_port])
+self._bind_port_to_driver(vf_port)
+self.ports.append(vf_port)
+ret.append(vf_port)
+return ret
+
+def get_vfs_on_port(self, pf_port: Port) -> list[VirtualFunction]:
+"""Get all virtual functions (VFs) that DTS is aware of on `pf_port`.
+
+Args:
+pf_port: The port to search for the VFs on.
+
+Returns:
+A list of VFs in the framework that were created/gathered from 
`pf_port`.
+"""
+return [p for p in self.ports if isinstance(p, VirtualFunction) and 
p.pf_port == pf_port]
+
+def remove_virtual_functions(self, pf_port: Port) -> None:
+"""Remove

[RFC PATCH v2 0/5] dts: add VFs to the framework

2024-08-21 Thread jspewock
From: Jeremy Spewock 

v2:
  * Accidentally left out a formatting fix in v1.

Jeremy Spewock (5):
  dts: allow binding only a single port to a different driver
  dts: parameterize what ports the TG sends packets to
  dts: add class for virtual functions
  dts: add OS abstractions for creating virtual functions
  dts: add functions for managing VFs to Node

 dts/framework/test_suite.py  |  38 --
 dts/framework/testbed_model/linux_session.py |  40 ++-
 dts/framework/testbed_model/node.py  | 115 +--
 dts/framework/testbed_model/os_session.py|  40 +++
 dts/framework/testbed_model/port.py  |  37 +-
 5 files changed, 251 insertions(+), 19 deletions(-)

-- 
2.46.0



[RFC PATCH v2 1/5] dts: allow binding only a single port to a different driver

2024-08-21 Thread jspewock
From: Jeremy Spewock 

Previously the DTS framework only included methods that bind all ports
that the test run was aware of to either the DPDK driver or the OS
driver. There are however some cases, like creating virtual functions,
where you would want some ports bound to the OS driver and others bound
to their DPDK driver. This patch adds the ability to bind individual
drivers to their respective ports to solve this problem.

Signed-off-by: Jeremy Spewock 
---
 dts/framework/testbed_model/node.py | 21 -
 1 file changed, 12 insertions(+), 9 deletions(-)

diff --git a/dts/framework/testbed_model/node.py 
b/dts/framework/testbed_model/node.py
index 8e6181e424..85d4eb1f7c 100644
--- a/dts/framework/testbed_model/node.py
+++ b/dts/framework/testbed_model/node.py
@@ -167,12 +167,12 @@ def set_up_build_target(self, build_target_config: 
BuildTargetConfiguration) ->
 the setup steps will be taken.
 """
 self._copy_dpdk_tarball()
-self.bind_ports_to_driver()
+self.bind_all_ports_to_driver()
 
 def tear_down_build_target(self) -> None:
 """Reset DPDK variables and bind port driver to the OS driver."""
 self.__remote_dpdk_dir = None
-self.bind_ports_to_driver(for_dpdk=False)
+self.bind_all_ports_to_driver(for_dpdk=False)
 
 def create_session(self, name: str) -> OSSession:
 """Create and return a new OS-aware remote session.
@@ -317,7 +317,7 @@ def _copy_dpdk_tarball(self) -> None:
 # then extract to remote path
 self.main_session.extract_remote_tarball(remote_tarball_path, 
self._remote_dpdk_dir)
 
-def bind_ports_to_driver(self, for_dpdk: bool = True) -> None:
+def bind_all_ports_to_driver(self, for_dpdk: bool = True) -> None:
 """Bind all ports on the node to a driver.
 
 Args:
@@ -325,12 +325,15 @@ def bind_ports_to_driver(self, for_dpdk: bool = True) -> 
None:
 If :data:`False`, binds to os_driver.
 """
 for port in self.ports:
-driver = port.os_driver_for_dpdk if for_dpdk else port.os_driver
-self.main_session.send_command(
-f"{self.path_to_devbind_script} -b {driver} --force 
{port.pci}",
-privileged=True,
-verify=True,
-)
+self._bind_port_to_driver(port, for_dpdk)
+
+def _bind_port_to_driver(self, port: Port, for_dpdk: bool = True) -> None:
+driver = port.os_driver_for_dpdk if for_dpdk else port.os_driver
+self.main_session.send_command(
+f"{self.path_to_devbind_script} -b {driver} --force {port.pci}",
+privileged=True,
+verify=True,
+)
 
 
 def create_session(node_config: NodeConfiguration, name: str, logger: 
DTSLogger) -> OSSession:
-- 
2.46.0



[RFC PATCH v2 2/5] dts: parameterize what ports the TG sends packets to

2024-08-21 Thread jspewock
From: Jeremy Spewock 

Previously in the DTS framework the helper methods in the TestSutie
class designated ports as either ingress or egress ports and would wrap
the methods of the traffic generator to allow packets to only flow to
those designated ingress or egress ports. This is undesirable in some
cases, such as when you have virtual functions on top of your port,
where the TG ports can send to more than one SUT port since the
framework limits where the TG is allowed to send packets. This patch
solves this problem by creating optional parameters that allow the user
to specify which port to gather the MAC addresses from when sending and
receiving packets.

Signed-off-by: Jeremy Spewock 
---
 dts/framework/test_suite.py | 38 ++---
 1 file changed, 31 insertions(+), 7 deletions(-)

diff --git a/dts/framework/test_suite.py b/dts/framework/test_suite.py
index 694b2eba65..d5c0021503 100644
--- a/dts/framework/test_suite.py
+++ b/dts/framework/test_suite.py
@@ -185,6 +185,8 @@ def send_packet_and_capture(
 packet: Packet,
 filter_config: PacketFilteringConfig = PacketFilteringConfig(),
 duration: float = 1,
+sut_ingress: Port | None = None,
+sut_egress: Port | None = None,
 ) -> list[Packet]:
 """Send and receive `packet` using the associated TG.
 
@@ -195,11 +197,19 @@ def send_packet_and_capture(
 packet: The packet to send.
 filter_config: The filter to use when capturing packets.
 duration: Capture traffic for this amount of time after sending 
`packet`.
+sut_ingress: Optional port to use as the SUT ingress port. 
Defaults to
+`self._sut_port_ingress`.
+sut_egress: Optional port to use as the SUT egress port. Defaults 
to
+`self._sut_port_egress`
 
 Returns:
 A list of received packets.
 """
-packet = self._adjust_addresses(packet)
+if sut_ingress is None:
+sut_ingress = self._sut_port_ingress
+if sut_egress is None:
+sut_egress = self._sut_port_egress
+packet = self._adjust_addresses(packet, sut_ingress, sut_egress)
 return self.tg_node.send_packet_and_capture(
 packet,
 self._tg_port_egress,
@@ -208,18 +218,30 @@ def send_packet_and_capture(
 duration,
 )
 
-def get_expected_packet(self, packet: Packet) -> Packet:
+def get_expected_packet(
+self, packet: Packet, sut_ingress: Port | None = None, sut_egress: 
Port | None = None
+) -> Packet:
 """Inject the proper L2/L3 addresses into `packet`.
 
 Args:
 packet: The packet to modify.
+sut_ingress: Optional port to use as the SUT ingress port. 
Defaults to
+`self._sut_port_ingress`.
+sut_egress: Optional port to use as the SUT egress port. Defaults 
to
+`self._sut_port_egress`.
 
 Returns:
 `packet` with injected L2/L3 addresses.
 """
-return self._adjust_addresses(packet, expected=True)
-
-def _adjust_addresses(self, packet: Packet, expected: bool = False) -> 
Packet:
+if sut_ingress is None:
+sut_ingress = self._sut_port_ingress
+if sut_egress is None:
+sut_egress = self._sut_port_egress
+return self._adjust_addresses(packet, sut_ingress, sut_egress, 
expected=True)
+
+def _adjust_addresses(
+self, packet: Packet, sut_ingress_port: Port, sut_egress_port: Port, 
expected: bool = False
+) -> Packet:
 """L2 and L3 address additions in both directions.
 
 Assumptions:
@@ -229,11 +251,13 @@ def _adjust_addresses(self, packet: Packet, expected: 
bool = False) -> Packet:
 packet: The packet to modify.
 expected: If :data:`True`, the direction is SUT -> TG,
 otherwise the direction is TG -> SUT.
+sut_ingress_port: The port to use as the Rx port on the SUT.
+sut_egress_port: The port to use as the Tx port on the SUT.
 """
 if expected:
 # The packet enters the TG from SUT
 # update l2 addresses
-packet.src = self._sut_port_egress.mac_address
+packet.src = sut_egress_port.mac_address
 packet.dst = self._tg_port_ingress.mac_address
 
 # The packet is routed from TG egress to TG ingress
@@ -244,7 +268,7 @@ def _adjust_addresses(self, packet: Packet, expected: bool 
= False) -> Packet:
 # The packet leaves TG towards SUT
 # update l2 addresses
 packet.src = self._tg_port_egress.mac_address
-packet.dst = self._sut_port_ingress.mac_address
+packet.dst = sut_ingress_port.mac_address
 
 # The packet is routed from TG egress to TG ingress
 # update l3 addresses
-- 
2.46.0



[RFC PATCH v2 3/5] dts: add class for virtual functions

2024-08-21 Thread jspewock
From: Jeremy Spewock 

In DPDK applications virtual functions are treated the same as ports,
but within the framework there are benefits to differentiating the two
in order to add more metadata to VFs about where they originate from.
For this reason this patch adds a new class for handling virtual
functions that extends the Port class with some additional information
about the VF.

Bugzilla ID: 1500

Signed-off-by: Jeremy Spewock 
---
 dts/framework/testbed_model/port.py | 37 -
 1 file changed, 36 insertions(+), 1 deletion(-)

diff --git a/dts/framework/testbed_model/port.py 
b/dts/framework/testbed_model/port.py
index 817405bea4..c1d85fec2b 100644
--- a/dts/framework/testbed_model/port.py
+++ b/dts/framework/testbed_model/port.py
@@ -27,7 +27,7 @@ class PortIdentifier:
 pci: str
 
 
-@dataclass(slots=True)
+@dataclass
 class Port:
 """Physical port on a node.
 
@@ -80,6 +80,41 @@ def pci(self) -> str:
 return self.identifier.pci
 
 
+@dataclass
+class VirtualFunction(Port):
+"""Virtual Function (VF) on a port.
+
+DPDK applications often treat VFs the same as they do the physical ports 
(PFs) on the host.
+For this reason VFs are represented in the framework as a type of port 
with some additional
+metadata that allows the framework to more easily identify which device 
the VF belongs to as
+well as where the VF originated from.
+
+Attributes:
+created_by_framework: :data:`True` if this VF represents one that the 
DTS framework created
+on the node, :data:`False` otherwise.
+pf_port: The PF that this VF was created on/gathered from.
+"""
+
+created_by_framework: bool = False
+pf_port: Port | None = None
+
+def __init__(
+self, node_name: str, config: PortConfig, created_by_framework: bool, 
pf_port: Port
+) -> None:
+"""Extends :meth:`Port.__init__` with VF specific metadata.
+
+Args:
+node_name: The name of the node the VF resides on.
+config: Configuration information about the VF.
+created_by_framework: :data:`True` if DTS created this VF, 
otherwise :data:`False` if
+this class represents a VF that was preexisting on the node.
+pf_port: The PF that this VF was created on/gathered from.
+"""
+super().__init__(node_name, config)
+self.created_by_framework = created_by_framework
+self.pf_port = pf_port
+
+
 @dataclass(slots=True, frozen=True)
 class PortLink:
 """The physical, cabled connection between the ports.
-- 
2.46.0



[RFC PATCH v2 4/5] dts: add OS abstractions for creating virtual functions

2024-08-21 Thread jspewock
From: Jeremy Spewock 

Virtual functions in the framework are created using SR-IOV. The process
for doing this can vary depending on the operating system, so the
commands to create VFs have to be abstracted into different classes
based on the operating system. This patch adds the stubs for methods
that create VFs and get the PCI addresses of all VFs on a port to the
abstract class as well as a linux implementation for the methods.

Bugzilla ID: 1500

Signed-off-by: Jeremy Spewock 
---
 dts/framework/testbed_model/linux_session.py | 40 +++-
 dts/framework/testbed_model/os_session.py| 40 
 2 files changed, 79 insertions(+), 1 deletion(-)

diff --git a/dts/framework/testbed_model/linux_session.py 
b/dts/framework/testbed_model/linux_session.py
index 99abc21353..738ddd7600 100644
--- a/dts/framework/testbed_model/linux_session.py
+++ b/dts/framework/testbed_model/linux_session.py
@@ -15,7 +15,11 @@
 
 from typing_extensions import NotRequired
 
-from framework.exception import ConfigurationError, RemoteCommandExecutionError
+from framework.exception import (
+ConfigurationError,
+InternalError,
+RemoteCommandExecutionError,
+)
 from framework.utils import expand_range
 
 from .cpu import LogicalCore
@@ -210,3 +214,37 @@ def configure_ipv4_forwarding(self, enable: bool) -> None:
 """Overrides 
:meth:`~.os_session.OSSession.configure_ipv4_forwarding`."""
 state = 1 if enable else 0
 self.send_command(f"sysctl -w net.ipv4.ip_forward={state}", 
privileged=True)
+
+def set_num_virtual_functions(self, num: int, pf_port: Port) -> bool:
+"""Overrides 
:meth:`~.os_session.OSSession.set_num_virtual_functions`."""
+sys_bus_path = 
f"/sys/bus/pci/devices/{pf_port.pci}/sriov_numvfs".replace(":", "\\:")
+curr_num_vfs = int(self.send_command(f"cat {sys_bus_path}").stdout)
+if num > 0 and curr_num_vfs >= num:
+self._logger.info(
+f"{curr_num_vfs} VFs already configured on port 
{pf_port.identifier.pci} on node "
+f"{pf_port.identifier.node}."
+)
+return False
+elif num > 0 and curr_num_vfs > 0:
+self._logger.error(
+f"Not enough VFs configured on port {pf_port.identifier.pci} 
on node "
+f"{pf_port.identifier.node}. Need {num} but only 
{curr_num_vfs} are configured. "
+"DTS is unable to modify number of preexisting VFs."
+)
+raise InternalError("Failed to create VFs on port.")
+self.send_command(f"echo {num} > {sys_bus_path}", privileged=True, 
verify=True)
+return True
+
+def get_pci_addr_of_vfs(self, pf_port: Port) -> list[str]:
+"""Overrides :meth:`~.os_session.OSSession.get_pci_addr_of_vfs`."""
+sys_bus_path = f"/sys/bus/pci/devices/{pf_port.pci}".replace(":", 
"\\:")
+curr_num_vfs = int(self.send_command(f"cat 
{sys_bus_path}/sriov_numvfs").stdout)
+if curr_num_vfs > 0:
+pci_addrs = self.send_command(
+'awk -F "PCI_SLOT_NAME=" "/PCI_SLOT_NAME=/ {print \\$2}" '
++ f"{sys_bus_path}/virtfn*/uevent",
+privileged=True,
+)
+return pci_addrs.stdout.splitlines()
+else:
+return []
diff --git a/dts/framework/testbed_model/os_session.py 
b/dts/framework/testbed_model/os_session.py
index 79f56b289b..191fc3c0c8 100644
--- a/dts/framework/testbed_model/os_session.py
+++ b/dts/framework/testbed_model/os_session.py
@@ -395,3 +395,43 @@ def configure_ipv4_forwarding(self, enable: bool) -> None:
 Args:
 enable: If :data:`True`, enable the forwarding, otherwise disable 
it.
 """
+
+@abstractmethod
+def set_num_virtual_functions(self, num: int, pf_port: Port) -> bool:
+"""Update the number of virtual functions (VFs) on a port.
+
+It should be noted that, due to the nature of VFs, if there are 
already VFs that exist on
+the physical function (PF) prior to calling this function, additional 
ones cannot be added.
+The only way to add more VFs is to remove the existing and then set 
the desired amount. For
+this reason, this method will handle creation in the following order:
+
+1. Use existing VFs on the PF if the number of existing VFs is greater 
than or equal to
+`num`
+2. Throw an exception noting that VFs cannot be created if the PF has 
some VFs already set
+on it, but the total VFs that it has are less then `num`.
+3. Create `num` VFs on the PF if there are none on it already
+
+Args:
+num: The number of VFs to set on the port.
+pf_port: The port to add the VFs to.
+
+Raises:
+InternalError: If `pf_port` has less than `num` VFs configured on 
it
+already.
+
+Returns:
+:data:`True` if this method successfully create

[RFC PATCH v2 5/5] dts: add functions for managing VFs to Node

2024-08-21 Thread jspewock
From: Jeremy Spewock 

In order for test suites to create virtual functions there has to be
functions in the API that developers can use. This patch adds the
ability to create virtual functions to the Node API so that they are
reachable within test suites.

Bugzilla ID: 1500

Signed-off-by: Jeremy Spewock 
---
 dts/framework/testbed_model/node.py | 96 -
 1 file changed, 94 insertions(+), 2 deletions(-)

diff --git a/dts/framework/testbed_model/node.py 
b/dts/framework/testbed_model/node.py
index 85d4eb1f7c..101a8edfbc 100644
--- a/dts/framework/testbed_model/node.py
+++ b/dts/framework/testbed_model/node.py
@@ -14,6 +14,7 @@
 """
 
 import os
+import re
 import tarfile
 from abc import ABC
 from ipaddress import IPv4Interface, IPv6Interface
@@ -24,9 +25,10 @@
 OS,
 BuildTargetConfiguration,
 NodeConfiguration,
+PortConfig,
 TestRunConfiguration,
 )
-from framework.exception import ConfigurationError
+from framework.exception import ConfigurationError, InternalError
 from framework.logger import DTSLogger, get_dts_logger
 from framework.settings import SETTINGS
 
@@ -39,7 +41,7 @@
 )
 from .linux_session import LinuxSession
 from .os_session import OSSession
-from .port import Port
+from .port import Port, VirtualFunction
 
 
 class Node(ABC):
@@ -335,6 +337,96 @@ def _bind_port_to_driver(self, port: Port, for_dpdk: bool 
= True) -> None:
 verify=True,
 )
 
+def create_virtual_functions(
+self, num: int, pf_port: Port, dpdk_driver: str | None = None
+) -> list[VirtualFunction]:
+"""Create virtual functions (VFs) from a given physical function (PF) 
on the node.
+
+Virtual functions will be created if there are not any currently 
configured on `pf_port`.
+If there are greater than or equal to `num` VFs already configured on 
`pf_port`, those will
+be used instead of creating more. In order to create VFs, the PF must 
be bound to its
+kernel driver. This method will handle binding `pf_port` and any other 
ports in the test
+run that reside on the same device back to their OS drivers if this 
was not done already.
+VFs gathered in this method will be bound to `driver` if one is 
provided, or the DPDK
+driver for `pf_port` and then added to `self.ports`.
+
+Args:
+num: The number of VFs to create. Must be greater than 0.
+pf_port: The PF to create the VFs on.
+dpdk_driver: Optional driver to bind the VFs to after they are 
created. Defaults to the
+DPDK driver of `pf_port`.
+
+Raises:
+InternalError: If `num` is less than or equal to 0.
+"""
+if num <= 0:
+raise InternalError(
+"Method for creating virtual functions received a non-positive 
value."
+)
+if not dpdk_driver:
+dpdk_driver = pf_port.os_driver_for_dpdk
+# Get any other port that is on the same device which DTS is aware of
+all_device_ports = [
+p for p in self.ports if p.pci.split(".")[0] == 
pf_port.pci.split(".")[0]
+]
+# Ports must be bound to the kernel driver in order to create VFs from 
them
+for port in all_device_ports:
+self._bind_port_to_driver(port, False)
+# Some PMDs require the interface being up in order to make VFs
+self.configure_port_state(port)
+created_vfs = self.main_session.set_num_virtual_functions(num, pf_port)
+# We don't need more then `num` VFs from the list
+vf_pcis = self.main_session.get_pci_addr_of_vfs(pf_port)[:num]
+devbind_info = self.main_session.send_command(
+f"{self.path_to_devbind_script} -s", privileged=True
+).stdout
+
+ret = []
+
+for pci in vf_pcis:
+original_driver = re.search(f"{pci}.*drv=([\\d\\w-]*)", 
devbind_info)
+os_driver = original_driver[1] if original_driver else 
pf_port.os_driver
+vf_config = PortConfig(
+self.name, pci, dpdk_driver, os_driver, pf_port.peer.node, 
pf_port.peer.pci
+)
+vf_port = VirtualFunction(self.name, vf_config, created_vfs, 
pf_port)
+self.main_session.update_ports([vf_port])
+self._bind_port_to_driver(vf_port)
+self.ports.append(vf_port)
+ret.append(vf_port)
+return ret
+
+def get_vfs_on_port(self, pf_port: Port) -> list[VirtualFunction]:
+"""Get all virtual functions (VFs) that DTS is aware of on `pf_port`.
+
+Args:
+pf_port: The port to search for the VFs on.
+
+Returns:
+A list of VFs in the framework that were created/gathered from 
`pf_port`.
+"""
+return [p for p in self.ports if isinstance(p, VirtualFunction) and 
p.pf_port == pf_port]
+
+def remove_virtual_functions(self, pf_port: Port) -> None:
+"""Remove

Re: [RFC PATCH v2 0/5] dts: add VFs to the framework

2024-08-21 Thread Jeremy Spewock
Apologies for sending out the v2 twice for this series, when I
attempted the first time the reply chain broke somehow and patchwork
didn't consider all the patches as one series. This should be fixed
now.


[PATCH] test/event: fix incorrect target event queue

2024-08-21 Thread pbhagavatula
From: Pavan Nikhilesh 

In OP_FWD mode, if internal port is supported, the target event queue
should be the TEST_APP_EV_QUEUE_ID.

Fixes: a276e7c8fbb3 ("test/event: add DMA adapter auto-test")

Signed-off-by: Pavan Nikhilesh 
---
 app/test/test_event_dma_adapter.c | 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/app/test/test_event_dma_adapter.c 
b/app/test/test_event_dma_adapter.c
index 3b39521153bb..9988d4fc7bc5 100644
--- a/app/test/test_event_dma_adapter.c
+++ b/app/test/test_event_dma_adapter.c
@@ -271,7 +271,10 @@ test_op_forward_mode(void)
ev[i].event = 0;
ev[i].op = RTE_EVENT_OP_NEW;
ev[i].event_type = RTE_EVENT_TYPE_DMADEV;
-   ev[i].queue_id = TEST_DMA_EV_QUEUE_ID;
+   if (params.internal_port_op_fwd)
+   ev[i].queue_id = TEST_APP_EV_QUEUE_ID;
+   else
+   ev[i].queue_id = TEST_DMA_EV_QUEUE_ID;
ev[i].sched_type = RTE_SCHED_TYPE_ATOMIC;
ev[i].flow_id = 0xAABB;
ev[i].event_ptr = op;
-- 
2.45.2



[RFC PATCH v3 0/5] dts: add VFs to the framework

2024-08-21 Thread jspewock
From: Jeremy Spewock 

rfc-v3:
 * add missing depends-on tags to appropriate commits.
 * adjust some other small issues in commit bodies
 * add changes to fix name of function in os_udp

Jeremy Spewock (5):
  dts: allow binding only a single port to a different driver
  dts: parameterize what ports the TG sends packets to
  dts: add class for virtual functions
  dts: add OS abstractions for creating virtual functions
  dts: add functions for managing VFs to Node

 dts/framework/test_suite.py  |  38 --
 dts/framework/testbed_model/linux_session.py |  40 ++-
 dts/framework/testbed_model/node.py  | 115 +--
 dts/framework/testbed_model/os_session.py|  40 +++
 dts/framework/testbed_model/port.py  |  37 +-
 dts/tests/TestSuite_os_udp.py|   4 +-
 6 files changed, 253 insertions(+), 21 deletions(-)

-- 
2.46.0



[RFC PATCH v3 1/5] dts: allow binding only a single port to a different driver

2024-08-21 Thread jspewock
From: Jeremy Spewock 

Previously the DTS framework only included methods that bind all ports
that the test run was aware of to either the DPDK driver or the OS
driver. There are however some cases, like creating virtual functions,
where you would want some ports bound to the OS driver and others bound
to their DPDK driver. This patch adds the ability to bind individual
ports to their respective drviers to solve this problem.

Depends-on: patch-143101 ("dts: add binding to different drivers to TG
node")

Signed-off-by: Jeremy Spewock 
---
 dts/framework/testbed_model/node.py | 21 -
 dts/tests/TestSuite_os_udp.py   |  4 ++--
 2 files changed, 14 insertions(+), 11 deletions(-)

diff --git a/dts/framework/testbed_model/node.py 
b/dts/framework/testbed_model/node.py
index 8e6181e424..85d4eb1f7c 100644
--- a/dts/framework/testbed_model/node.py
+++ b/dts/framework/testbed_model/node.py
@@ -167,12 +167,12 @@ def set_up_build_target(self, build_target_config: 
BuildTargetConfiguration) ->
 the setup steps will be taken.
 """
 self._copy_dpdk_tarball()
-self.bind_ports_to_driver()
+self.bind_all_ports_to_driver()
 
 def tear_down_build_target(self) -> None:
 """Reset DPDK variables and bind port driver to the OS driver."""
 self.__remote_dpdk_dir = None
-self.bind_ports_to_driver(for_dpdk=False)
+self.bind_all_ports_to_driver(for_dpdk=False)
 
 def create_session(self, name: str) -> OSSession:
 """Create and return a new OS-aware remote session.
@@ -317,7 +317,7 @@ def _copy_dpdk_tarball(self) -> None:
 # then extract to remote path
 self.main_session.extract_remote_tarball(remote_tarball_path, 
self._remote_dpdk_dir)
 
-def bind_ports_to_driver(self, for_dpdk: bool = True) -> None:
+def bind_all_ports_to_driver(self, for_dpdk: bool = True) -> None:
 """Bind all ports on the node to a driver.
 
 Args:
@@ -325,12 +325,15 @@ def bind_ports_to_driver(self, for_dpdk: bool = True) -> 
None:
 If :data:`False`, binds to os_driver.
 """
 for port in self.ports:
-driver = port.os_driver_for_dpdk if for_dpdk else port.os_driver
-self.main_session.send_command(
-f"{self.path_to_devbind_script} -b {driver} --force 
{port.pci}",
-privileged=True,
-verify=True,
-)
+self._bind_port_to_driver(port, for_dpdk)
+
+def _bind_port_to_driver(self, port: Port, for_dpdk: bool = True) -> None:
+driver = port.os_driver_for_dpdk if for_dpdk else port.os_driver
+self.main_session.send_command(
+f"{self.path_to_devbind_script} -b {driver} --force {port.pci}",
+privileged=True,
+verify=True,
+)
 
 
 def create_session(node_config: NodeConfiguration, name: str, logger: 
DTSLogger) -> OSSession:
diff --git a/dts/tests/TestSuite_os_udp.py b/dts/tests/TestSuite_os_udp.py
index a78bd74139..5e9469bbac 100644
--- a/dts/tests/TestSuite_os_udp.py
+++ b/dts/tests/TestSuite_os_udp.py
@@ -23,7 +23,7 @@ def set_up_suite(self) -> None:
 Bind the SUT ports to the OS driver, configure the ports and 
configure the SUT
 to route traffic from if1 to if2.
 """
-self.sut_node.bind_ports_to_driver(for_dpdk=False)
+self.sut_node.bind_all_ports_to_driver(for_dpdk=False)
 self.configure_testbed_ipv4()
 
 def test_os_udp(self) -> None:
@@ -50,4 +50,4 @@ def tear_down_suite(self) -> None:
 """
 self.configure_testbed_ipv4(restore=True)
 # Assume other suites will likely need dpdk driver
-self.sut_node.bind_ports_to_driver(for_dpdk=True)
+self.sut_node.bind_all_ports_to_driver(for_dpdk=True)
-- 
2.46.0



[RFC PATCH v3 2/5] dts: parameterize what ports the TG sends packets to

2024-08-21 Thread jspewock
From: Jeremy Spewock 

Previously in the DTS framework the helper methods in the TestSuite
class designated ports as either ingress or egress ports and would wrap
the methods of the traffic generator to allow packets to only flow to
those designated ingress or egress ports. This is undesirable in some
cases, such as when you have virtual functions on top of your port,
where the TG ports can send to more than one SUT port. This patch
solves this problem by creating optional parameters that allow the user
to specify which port to gather the MAC addresses from when sending and
receiving packets.

Signed-off-by: Jeremy Spewock 
---
 dts/framework/test_suite.py | 38 ++---
 1 file changed, 31 insertions(+), 7 deletions(-)

diff --git a/dts/framework/test_suite.py b/dts/framework/test_suite.py
index 694b2eba65..d5c0021503 100644
--- a/dts/framework/test_suite.py
+++ b/dts/framework/test_suite.py
@@ -185,6 +185,8 @@ def send_packet_and_capture(
 packet: Packet,
 filter_config: PacketFilteringConfig = PacketFilteringConfig(),
 duration: float = 1,
+sut_ingress: Port | None = None,
+sut_egress: Port | None = None,
 ) -> list[Packet]:
 """Send and receive `packet` using the associated TG.
 
@@ -195,11 +197,19 @@ def send_packet_and_capture(
 packet: The packet to send.
 filter_config: The filter to use when capturing packets.
 duration: Capture traffic for this amount of time after sending 
`packet`.
+sut_ingress: Optional port to use as the SUT ingress port. 
Defaults to
+`self._sut_port_ingress`.
+sut_egress: Optional port to use as the SUT egress port. Defaults 
to
+`self._sut_port_egress`
 
 Returns:
 A list of received packets.
 """
-packet = self._adjust_addresses(packet)
+if sut_ingress is None:
+sut_ingress = self._sut_port_ingress
+if sut_egress is None:
+sut_egress = self._sut_port_egress
+packet = self._adjust_addresses(packet, sut_ingress, sut_egress)
 return self.tg_node.send_packet_and_capture(
 packet,
 self._tg_port_egress,
@@ -208,18 +218,30 @@ def send_packet_and_capture(
 duration,
 )
 
-def get_expected_packet(self, packet: Packet) -> Packet:
+def get_expected_packet(
+self, packet: Packet, sut_ingress: Port | None = None, sut_egress: 
Port | None = None
+) -> Packet:
 """Inject the proper L2/L3 addresses into `packet`.
 
 Args:
 packet: The packet to modify.
+sut_ingress: Optional port to use as the SUT ingress port. 
Defaults to
+`self._sut_port_ingress`.
+sut_egress: Optional port to use as the SUT egress port. Defaults 
to
+`self._sut_port_egress`.
 
 Returns:
 `packet` with injected L2/L3 addresses.
 """
-return self._adjust_addresses(packet, expected=True)
-
-def _adjust_addresses(self, packet: Packet, expected: bool = False) -> 
Packet:
+if sut_ingress is None:
+sut_ingress = self._sut_port_ingress
+if sut_egress is None:
+sut_egress = self._sut_port_egress
+return self._adjust_addresses(packet, sut_ingress, sut_egress, 
expected=True)
+
+def _adjust_addresses(
+self, packet: Packet, sut_ingress_port: Port, sut_egress_port: Port, 
expected: bool = False
+) -> Packet:
 """L2 and L3 address additions in both directions.
 
 Assumptions:
@@ -229,11 +251,13 @@ def _adjust_addresses(self, packet: Packet, expected: 
bool = False) -> Packet:
 packet: The packet to modify.
 expected: If :data:`True`, the direction is SUT -> TG,
 otherwise the direction is TG -> SUT.
+sut_ingress_port: The port to use as the Rx port on the SUT.
+sut_egress_port: The port to use as the Tx port on the SUT.
 """
 if expected:
 # The packet enters the TG from SUT
 # update l2 addresses
-packet.src = self._sut_port_egress.mac_address
+packet.src = sut_egress_port.mac_address
 packet.dst = self._tg_port_ingress.mac_address
 
 # The packet is routed from TG egress to TG ingress
@@ -244,7 +268,7 @@ def _adjust_addresses(self, packet: Packet, expected: bool 
= False) -> Packet:
 # The packet leaves TG towards SUT
 # update l2 addresses
 packet.src = self._tg_port_egress.mac_address
-packet.dst = self._sut_port_ingress.mac_address
+packet.dst = sut_ingress_port.mac_address
 
 # The packet is routed from TG egress to TG ingress
 # update l3 addresses
-- 
2.46.0



  1   2   >