Re: [PATCH v5] virtio: optimize stats counters performance

2024-08-06 Thread Chenbo Xia


> On Aug 2, 2024, at 00:03, Morten Brørup  wrote:
> 
> External email: Use caution opening links or attachments
> 
> 
> Optimized the performance of updating the virtio statistics counters by
> reducing the number of branches.
> 
> Ordered the packet size comparisons according to the probability with
> typical internet traffic mix.
> 
> Signed-off-by: Morten Brørup 
> ---
> v5:
> * Do not inline the function. (Stephen)
> v4:
> * Consider multicast/broadcast packets unlikely.
> v3:
> * Eliminated a local variable.
> * Note: Substituted sizeof(uint32_t)*4 by 32UL, using unsigned long type
>  to keep optimal offsetting in generated assembler output.
> * Removed unnecessary curly braces.
> v2:
> * Fixed checkpatch warning about line length.
> ---
> drivers/net/virtio/virtio_rxtx.c | 39 
> drivers/net/virtio/virtio_rxtx.h |  4 ++--
> 2 files changed, 16 insertions(+), 27 deletions(-)
> 
> diff --git a/drivers/net/virtio/virtio_rxtx.c 
> b/drivers/net/virtio/virtio_rxtx.c
> index f69b9453a2..b67f063b31 100644
> --- a/drivers/net/virtio/virtio_rxtx.c
> +++ b/drivers/net/virtio/virtio_rxtx.c
> @@ -82,37 +82,26 @@ vq_ring_free_chain(struct virtqueue *vq, uint16_t 
> desc_idx)
> }
> 
> void
> -virtio_update_packet_stats(struct virtnet_stats *stats, struct rte_mbuf 
> *mbuf)
> +virtio_update_packet_stats(struct virtnet_stats *const stats,
> +   const struct rte_mbuf *const mbuf)
> {
>uint32_t s = mbuf->pkt_len;
> -   struct rte_ether_addr *ea;
> +   const struct rte_ether_addr *const ea =
> +   rte_pktmbuf_mtod(mbuf, const struct rte_ether_addr *);
> 
>stats->bytes += s;
> 
> -   if (s == 64) {
> -   stats->size_bins[1]++;
> -   } else if (s > 64 && s < 1024) {
> -   uint32_t bin;
> -
> -   /* count zeros, and offset into correct bin */
> -   bin = (sizeof(s) * 8) - rte_clz32(s) - 5;
> -   stats->size_bins[bin]++;
> -   } else {
> -   if (s < 64)
> -   stats->size_bins[0]++;
> -   else if (s < 1519)
> -   stats->size_bins[6]++;
> -   else
> -   stats->size_bins[7]++;
> -   }
> +   if (s >= 1024)
> +   stats->size_bins[6 + (s > 1518)]++;
> +   else if (s <= 64)
> +   stats->size_bins[s >> 6]++;
> +   else
> +   stats->size_bins[32UL - rte_clz32(s) - 5]++;
> 
> -   ea = rte_pktmbuf_mtod(mbuf, struct rte_ether_addr *);
> -   if (rte_is_multicast_ether_addr(ea)) {
> -   if (rte_is_broadcast_ether_addr(ea))
> -   stats->broadcast++;
> -   else
> -   stats->multicast++;
> -   }
> +   RTE_BUILD_BUG_ON(offsetof(struct virtnet_stats, broadcast) !=
> +   offsetof(struct virtnet_stats, multicast) + 
> sizeof(uint64_t));
> +   if (unlikely(rte_is_multicast_ether_addr(ea)))
> +   (&stats->multicast)[rte_is_broadcast_ether_addr(ea)]++;
> }
> 
> static inline void
> diff --git a/drivers/net/virtio/virtio_rxtx.h 
> b/drivers/net/virtio/virtio_rxtx.h
> index afc4b74534..68034c914b 100644
> --- a/drivers/net/virtio/virtio_rxtx.h
> +++ b/drivers/net/virtio/virtio_rxtx.h
> @@ -35,7 +35,7 @@ struct virtnet_tx {
> };
> 
> int virtio_rxq_vec_setup(struct virtnet_rx *rxvq);
> -void virtio_update_packet_stats(struct virtnet_stats *stats,
> -   struct rte_mbuf *mbuf);
> +void virtio_update_packet_stats(struct virtnet_stats *const stats,
> +   const struct rte_mbuf *const mbuf);
> 
> #endif /* _VIRTIO_RXTX_H_ */
> —
> 2.43.0
> 

Reviewed-by: Chenbo Xia 

Re: [PATCH] vhost-user: optimize stats counters performance

2024-08-06 Thread Chenbo Xia


> On Aug 2, 2024, at 22:32, Morten Brørup  wrote:
> 
> External email: Use caution opening links or attachments
> 
> 
> Optimized the performance of updating the statistics counters by reducing
> the number of branches.
> 
> Ordered the packet size comparisons according to the probability with
> typical internet traffic mix.
> 
> Signed-off-by: Morten Brørup 
> ---
> lib/vhost/virtio_net.c | 40 ++--
> 1 file changed, 14 insertions(+), 26 deletions(-)
> 
> diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c
> index 370402d849..25a495df56 100644
> --- a/lib/vhost/virtio_net.c
> +++ b/lib/vhost/virtio_net.c
> @@ -53,7 +53,7 @@ is_valid_virt_queue_idx(uint32_t idx, int is_tx, uint32_t 
> nr_vring)
> }
> 
> static inline void
> -vhost_queue_stats_update(struct virtio_net *dev, struct vhost_virtqueue *vq,
> +vhost_queue_stats_update(const struct virtio_net *dev, struct 
> vhost_virtqueue *vq,
>struct rte_mbuf **pkts, uint16_t count)
>__rte_shared_locks_required(&vq->access_lock)
> {
> @@ -64,37 +64,25 @@ vhost_queue_stats_update(struct virtio_net *dev, struct 
> vhost_virtqueue *vq,
>return;
> 
>for (i = 0; i < count; i++) {
> -   struct rte_ether_addr *ea;
> -   struct rte_mbuf *pkt = pkts[i];
> +   const struct rte_ether_addr *ea;
> +   const struct rte_mbuf *pkt = pkts[i];
>uint32_t pkt_len = rte_pktmbuf_pkt_len(pkt);
> 
>stats->packets++;
>stats->bytes += pkt_len;
> 
> -   if (pkt_len == 64) {
> -   stats->size_bins[1]++;
> -   } else if (pkt_len > 64 && pkt_len < 1024) {
> -   uint32_t bin;
> -
> -   /* count zeros, and offset into correct bin */
> -   bin = (sizeof(pkt_len) * 8) - rte_clz32(pkt_len) - 5;
> -   stats->size_bins[bin]++;
> -   } else {
> -   if (pkt_len < 64)
> -   stats->size_bins[0]++;
> -   else if (pkt_len < 1519)
> -   stats->size_bins[6]++;
> -   else
> -   stats->size_bins[7]++;
> -   }
> +   if (pkt_len >= 1024)
> +   stats->size_bins[6 + (pkt_len > 1518)]++;
> +   else if (pkt_len <= 64)
> +   stats->size_bins[pkt_len >> 6]++;
> +   else
> +   stats->size_bins[32UL - rte_clz32(pkt_len) - 5]++;
> 
> -   ea = rte_pktmbuf_mtod(pkt, struct rte_ether_addr *);
> -   if (rte_is_multicast_ether_addr(ea)) {
> -   if (rte_is_broadcast_ether_addr(ea))
> -   stats->broadcast++;
> -   else
> -   stats->multicast++;
> -   }
> +   ea = rte_pktmbuf_mtod(pkt, const struct rte_ether_addr *);
> +   RTE_BUILD_BUG_ON(offsetof(struct virtqueue_stats, broadcast) 
> !=
> +   offsetof(struct virtqueue_stats, multicast) + 
> sizeof(uint64_t));
> +   if (unlikely(rte_is_multicast_ether_addr(ea)))
> +   
> (&stats->multicast)[rte_is_broadcast_ether_addr(ea)]++;
>}
> }
> 
> --
> 2.43.0
> 

Reviewed-by: Chenbo Xia 



Re: [PATCH v2 0/7] record and rework component dependencies

2024-08-06 Thread Bruce Richardson
On Fri, Aug 02, 2024 at 06:18:26PM +0100, Ferruh Yigit wrote:
> On 8/2/2024 1:44 PM, Bruce Richardson wrote:
> > As part of the meson build, we can record the dependencies for each
> > component as we process it, logging them to a file. This file can be
> > used as input to a number of other scripts and tools, for example, to
> > graph the dependencies, or to allow higher-level build-config tools to
> > automatically enable component requirements, etc.
> > 
> > The first patch of this set generates the basic dependency tree. The
> > second patch does some processing of that dependency tree to identify
> > cases where dependencies are being unnecessarily specified. Reducing
> > these makes it easier to have readable dependency graphs in future,
> > without affecting the build.
> > 
> > The following 4 patches are based on the output of the second patch, and
> > greatly cut down the number of direct dependency links between
> > components. Even with the cut-down dependencies, the full dependency
> > graph is nigh-unreadable, so the final patch adds a new script to
> > generate dependency tree subgraphs, creating dot files for e.g. the
> > dependencies of a particular component, or a component class such as
> > mempool drivers.
> > 
> > Bruce Richardson (7):
> >   build: output a dependency log in build directory
> >   devtools: add script to flag unneeded dependencies
> >   build: remove kvargs from driver class dependencies
> >   build: reduce library dependencies
> >   build: reduce driver dependencies
> >   build: reduce app dependencies
> >   devtools: add script to generate DPDK dependency graphs
> >
> 
> Tested-by: Ferruh Yigit 
> 
> Thanks for the update, output is now easier to consume with the help of
> the 'devtools/draw-dependency-graphs.py' script.
> A detail but script is not actually drawing graph, but parsing .dot
> file, name is a little misleading.
> 
> 
> Also for the patches that are converting explicit dependency to implicit
> dependency, I can see the benefit for the meson and graph. But also
> there is a value of keeping explicit dependency, it was an easy way to
> document dependencies of component for developers.
> So, I am not really sure which one is better.
> 

Up till now we did not keep a fully dependency list for each component
because the configuration time with meson completely exploded due to really
slow deduplication of dependencies there. I haven't checked with recent
versions of meson, though, so the problem may no longer be there now. Right
now, we have "partial dependency lists" - neither full list of dependencies
of a component, nor a minimal set. These patches don't bring us fully to a
minimal set, but bring us closer. I honestly don't think we need to enforce
either case - so long as we have all dependencies either explicilty or via
recursive dependency, things are fine.

/Bruce


Re: [PATCH v5 4/4] test: rearrange test_cfgfiles cases

2024-08-06 Thread Bruce Richardson
On Fri, Aug 02, 2024 at 10:06:35AM -0700, Stephen Hemminger wrote:
> On Fri, 2 Aug 2024 17:51:01 +0100
> Bruce Richardson  wrote:
> 
> > On Fri, Aug 02, 2024 at 09:45:03AM -0700, Stephen Hemminger wrote:
> > > The input files don't need to be in a separate subdirectory.
> > > 
> > > Signed-off-by: Stephen Hemminger   
> > 
> > Small suggestion - I'd move this up to be patch 3, rather than patch 4,
> > which would save editing the list in the meson.build file to remove the
> > "etc/" prefix
> > 
> > Acked-by: Bruce Richardson 
> 
> Sure it would be logical to merge the two, just wanted the patches to
> be smaller and easier to review 
> 
I actually think they are better separate, just switch the order.

/Bruce


[v1 0/7] DPAA2 crypto changes

2024-08-06 Thread Gagandeep Singh
Changes related to crypto driver

Gagandeep Singh (1):
  crypto/dpaa2_sec: fix memory leak

Jun Yang (5):
  crypto/dpaa2_sec: enhance IPsec RFLC handling
  crypto/dpaa2_sec: enhance pdcp FLC handling
  net/dpaa2: support FLC stashing API
  crypto/dpaa2_sec: remove prefetch code in event mode
  crypto/dpaa2_sec: rework debug code

Varun Sethi (1):
  common/dpaax: caamflib: fix PDCP SNOW-ZUC wdog DECO err

 drivers/bus/fslmc/portal/dpaa2_hw_pvt.h |  44 ++
 drivers/common/dpaax/caamflib/desc/pdcp.h   |  10 ++
 drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c | 144 
 drivers/net/dpaa2/dpaa2_ethdev.c|  25 ++--
 4 files changed, 153 insertions(+), 70 deletions(-)

-- 
2.25.1



[v1 1/7] crypto/dpaa2_sec: fix memory leak

2024-08-06 Thread Gagandeep Singh
fixing memory leak while creating the PDCP session
with invalid data.

Fixes: bef594ec5cc8 ("crypto/dpaa2_sec: support PDCP offload")
Cc: akhil.go...@nxp.com
Cc: sta...@dpdk.org

Signed-off-by: Gagandeep Singh 
---
 drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c 
b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
index c1f7181d55..e0b8bacdb8 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
@@ -3420,6 +3420,7 @@ dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev,
}
} else {
DPAA2_SEC_ERR("Invalid crypto type");
+   rte_free(priv);
return -EINVAL;
}
 
-- 
2.25.1



[v1 2/7] common/dpaax: caamflib: fix PDCP SNOW-ZUC wdog DECO err

2024-08-06 Thread Gagandeep Singh
From: Varun Sethi 

Adding a Jump instruction with CALM flag to ensure
previous processing has been completed.

Fixes: 8827d94398f1 ("crypto/dpaa2_sec/hw: support AES-AES 18-bit PDCP")
Cc: vakul.g...@nxp.com
Cc: sta...@dpdk.org

Signed-off-by: Gagandeep Singh 
Signed-off-by: Varun Sethi 
Acked-by: Hemant Agrawal 
---
 drivers/common/dpaax/caamflib/desc/pdcp.h | 10 ++
 1 file changed, 10 insertions(+)

diff --git a/drivers/common/dpaax/caamflib/desc/pdcp.h 
b/drivers/common/dpaax/caamflib/desc/pdcp.h
index bc35114cf4..9ada3905c5 100644
--- a/drivers/common/dpaax/caamflib/desc/pdcp.h
+++ b/drivers/common/dpaax/caamflib/desc/pdcp.h
@@ -1220,6 +1220,11 @@ pdcp_insert_cplane_snow_aes_op(struct program *p,
SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1 | FLUSH1);
MOVEB(p, CONTEXT1, 0, MATH3, 0, 4, WAITCOMP | IMMED);
 
+   /* conditional jump with calm added to ensure that the
+* previous processing has been completed
+*/
+   JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CALM);
+
LOAD(p, CLRW_RESET_CLS1_CHA |
 CLRW_CLR_C1KEY |
 CLRW_CLR_C1CTX |
@@ -1921,6 +1926,11 @@ pdcp_insert_cplane_zuc_aes_op(struct program *p,
 
MOVEB(p, OFIFO, 0, MATH3, 0, 4, IMMED);
 
+   /* conditional jump with calm added to ensure that the
+* previous processing has been completed
+*/
+   JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CALM);
+
LOAD(p, CLRW_RESET_CLS1_CHA |
 CLRW_CLR_C1KEY |
 CLRW_CLR_C1CTX |
-- 
2.25.1



[v1 3/7] crypto/dpaa2_sec: enhance IPsec RFLC handling

2024-08-06 Thread Gagandeep Singh
From: Jun Yang 

Point Response FLC to FLC(SEC descriptor context).
Response FLC can be used for debug purpose in dequeue process.
Enable data stashing only. Annotation stashing is disabled which
is not suitable for SEC and impacts performance.

Signed-off-by: Jun Yang 
Acked-by: Hemant Agrawal 
---
 drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c | 44 -
 1 file changed, 26 insertions(+), 18 deletions(-)

diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c 
b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
index e0b8bacdb8..0e30192b30 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
@@ -2005,7 +2005,7 @@ dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, 
uint16_t qp_id,
struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
struct dpseci_rx_queue_cfg cfg;
int32_t retcode;
-   char str[30];
+   char str[RTE_MEMZONE_NAMESIZE];
 
PMD_INIT_FUNC_TRACE();
 
@@ -2065,8 +2065,7 @@ dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, 
uint16_t qp_id,
return -ENOMEM;
}
 
-   cfg.options = cfg.options | DPSECI_QUEUE_OPT_USER_CTX;
-   cfg.user_ctx = (size_t)(&qp->rx_vq);
+   cfg.dest_cfg.dest_type = DPSECI_DEST_NONE;
retcode = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
  qp_id, &cfg);
return retcode;
@@ -3060,14 +3059,19 @@ dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
struct alginfo authdata, cipherdata;
int bufsize;
struct sec_flow_context *flc;
+   uint64_t flc_iova;
int ret = -1;
 
PMD_INIT_FUNC_TRACE();
 
-   priv = (struct ctxt_priv *)rte_zmalloc(NULL,
-   sizeof(struct ctxt_priv) +
-   sizeof(struct sec_flc_desc),
-   RTE_CACHE_LINE_SIZE);
+   RTE_SET_USED(dev);
+
+   /** Make FLC address to align with stashing, low 6 bits are used
+* control stashing.
+*/
+   priv = rte_zmalloc(NULL, sizeof(struct ctxt_priv) +
+   sizeof(struct sec_flc_desc),
+   DPAA2_STASHING_ALIGN_SIZE);
 
if (priv == NULL) {
DPAA2_SEC_ERR("No memory for priv CTXT");
@@ -3077,10 +3081,12 @@ dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
flc = &priv->flc_desc[0].flc;
 
if (ipsec_xform->life.bytes_hard_limit != 0 ||
-   ipsec_xform->life.bytes_soft_limit != 0 ||
-   ipsec_xform->life.packets_hard_limit != 0 ||
-   ipsec_xform->life.packets_soft_limit != 0)
+   ipsec_xform->life.bytes_soft_limit != 0 ||
+   ipsec_xform->life.packets_hard_limit != 0 ||
+   ipsec_xform->life.packets_soft_limit != 0) {
+   rte_free(priv);
return -ENOTSUP;
+   }
 
memset(session, 0, sizeof(dpaa2_sec_session));
 
@@ -3330,24 +3336,26 @@ dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
1, 0, (rta_sec_era >= RTA_SEC_ERA_10) ?
SHR_WAIT : SHR_SERIAL,
&decap_pdb, &cipherdata, &authdata);
-   } else
+   } else {
+   ret = -EINVAL;
goto out;
+   }
 
if (bufsize < 0) {
+   ret = -EINVAL;
DPAA2_SEC_ERR("Crypto: Invalid SEC-DESC buffer length");
goto out;
}
 
flc->word1_sdl = (uint8_t)bufsize;
 
-   /* Enable the stashing control bit */
+   flc_iova = DPAA2_VADDR_TO_IOVA(flc);
+   /* Enable the stashing control bit and data stashing only.*/
DPAA2_SET_FLC_RSC(flc);
-   flc->word2_rflc_31_0 = lower_32_bits(
-   (size_t)&(((struct dpaa2_sec_qp *)
-   dev->data->queue_pairs[0])->rx_vq) | 0x14);
-   flc->word3_rflc_63_32 = upper_32_bits(
-   (size_t)&(((struct dpaa2_sec_qp *)
-   dev->data->queue_pairs[0])->rx_vq));
+   dpaa2_flc_stashing_set(DPAA2_FLC_DATA_STASHING, 1,
+   &flc_iova);
+   flc->word2_rflc_31_0 = lower_32_bits(flc_iova);
+   flc->word3_rflc_63_32 = upper_32_bits(flc_iova);
 
/* Set EWS bit i.e. enable write-safe */
DPAA2_SET_FLC_EWS(flc);
-- 
2.25.1



[v1 5/7] net/dpaa2: support FLC stashing API

2024-08-06 Thread Gagandeep Singh
From: Jun Yang 

Configure flow steering action with FLC enabled to align stashing
setting with RSS configuration.

Signed-off-by: Jun Yang 
Acked-by: Hemant Agrawal 
---
 drivers/bus/fslmc/portal/dpaa2_hw_pvt.h | 44 +
 drivers/net/dpaa2/dpaa2_ethdev.c| 25 +++---
 2 files changed, 58 insertions(+), 11 deletions(-)

diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h 
b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
index 169c7917ea..4c30e6db18 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
@@ -179,6 +179,7 @@ struct __rte_cache_aligned dpaa2_queue {
uint16_t resv;
uint64_t offloads;
uint64_t lpbk_cntx;
+   uint8_t data_stashing_off;
 };
 
 struct swp_active_dqs {
@@ -463,6 +464,49 @@ struct qbman_result *get_swp_active_dqs(uint16_t 
dpio_index)
return rte_global_active_dqs_list[dpio_index].global_active_dqs;
 }
 
+/* 00 00 00 - last 6 bit represent data, annotation,
+ * context stashing setting 01 01 00 (0x14)
+ * (in following order ->DS AS CS)
+ * to enable 1 line data, 1 line annotation.
+ * For LX2, this setting should be 01 00 00 (0x10)
+ */
+#define DPAA2_FLC_STASHING_MAX_BIT_SIZE 2
+#define DPAA2_FLC_STASHING_MAX_CACHE_LINE \
+   ((1ULL << DPAA2_FLC_STASHING_MAX_BIT_SIZE) - 1)
+
+enum dpaa2_flc_stashing_type {
+   DPAA2_FLC_CNTX_STASHING = 0,
+   DPAA2_FLC_ANNO_STASHING =
+   DPAA2_FLC_CNTX_STASHING + DPAA2_FLC_STASHING_MAX_BIT_SIZE,
+   DPAA2_FLC_DATA_STASHING =
+   DPAA2_FLC_ANNO_STASHING + DPAA2_FLC_STASHING_MAX_BIT_SIZE,
+   DPAA2_FLC_END_STASHING =
+   DPAA2_FLC_DATA_STASHING + DPAA2_FLC_STASHING_MAX_BIT_SIZE
+};
+
+#define DPAA2_STASHING_ALIGN_SIZE (1 << DPAA2_FLC_END_STASHING)
+
+static inline void
+dpaa2_flc_stashing_set(enum dpaa2_flc_stashing_type type,
+   uint8_t cache_line, uint64_t *flc)
+{
+   RTE_ASSERT(cache_line <= DPAA2_FLC_STASHING_MAX_CACHE_LINE);
+   RTE_ASSERT(type == DPAA2_FLC_CNTX_STASHING ||
+   type == DPAA2_FLC_ANNO_STASHING ||
+   type == DPAA2_FLC_DATA_STASHING);
+
+   (*flc) &= ~(DPAA2_FLC_STASHING_MAX_CACHE_LINE << type);
+   (*flc) |= (cache_line << type);
+}
+
+static inline void
+dpaa2_flc_stashing_clear_all(uint64_t *flc)
+{
+   dpaa2_flc_stashing_set(DPAA2_FLC_CNTX_STASHING, 0, flc);
+   dpaa2_flc_stashing_set(DPAA2_FLC_ANNO_STASHING, 0, flc);
+   dpaa2_flc_stashing_set(DPAA2_FLC_DATA_STASHING, 0, flc);
+}
+
 static inline
 void set_swp_active_dqs(uint16_t dpio_index, struct qbman_result *dqs)
 {
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.c b/drivers/net/dpaa2/dpaa2_ethdev.c
index 449bbda7ca..726bc0cf3e 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.c
+++ b/drivers/net/dpaa2/dpaa2_ethdev.c
@@ -786,17 +786,20 @@ dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev,
if ((dpaa2_svr_family & 0x) != SVR_LS2080A) {
options |= DPNI_QUEUE_OPT_FLC;
cfg.flc.stash_control = true;
-   cfg.flc.value &= 0xFFC0;
-   /* 00 00 00 - last 6 bit represent annotation, context stashing,
-* data stashing setting 01 01 00 (0x14)
-* (in following order ->DS AS CS)
-* to enable 1 line data, 1 line annotation.
-* For LX2, this setting should be 01 00 00 (0x10)
-*/
-   if ((dpaa2_svr_family & 0x) == SVR_LX2160A)
-   cfg.flc.value |= 0x10;
-   else
-   cfg.flc.value |= 0x14;
+   dpaa2_flc_stashing_clear_all(&cfg.flc.value);
+   if (getenv("DPAA2_DATA_STASHING_OFF")) {
+   dpaa2_flc_stashing_set(DPAA2_FLC_DATA_STASHING, 0,
+   &cfg.flc.value);
+   dpaa2_q->data_stashing_off = 1;
+   } else {
+   dpaa2_flc_stashing_set(DPAA2_FLC_DATA_STASHING, 1,
+   &cfg.flc.value);
+   dpaa2_q->data_stashing_off = 0;
+   }
+   if ((dpaa2_svr_family & 0x) != SVR_LX2160A) {
+   dpaa2_flc_stashing_set(DPAA2_FLC_ANNO_STASHING, 1,
+   &cfg.flc.value);
+   }
}
ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_RX,
 dpaa2_q->tc_index, flow_id, options, &cfg);
-- 
2.25.1



[v1 4/7] crypto/dpaa2_sec: enhance pdcp FLC handling

2024-08-06 Thread Gagandeep Singh
From: Jun Yang 

Set RFLC with FLC IOVA address and data stashing only.

Signed-off-by: Jun Yang 
Acked-by: Hemant Agrawal 
---
 drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c | 16 +---
 1 file changed, 9 insertions(+), 7 deletions(-)

diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c 
b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
index 0e30192b30..ff24a8919a 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
@@ -3389,6 +3389,7 @@ dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev,
struct alginfo *p_authdata = NULL;
int bufsize = -1;
struct sec_flow_context *flc;
+   uint64_t flc_iova;
 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
int swap = true;
 #else
@@ -3397,6 +3398,8 @@ dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev,
 
PMD_INIT_FUNC_TRACE();
 
+   RTE_SET_USED(dev);
+
memset(session, 0, sizeof(dpaa2_sec_session));
 
priv = (struct ctxt_priv *)rte_zmalloc(NULL,
@@ -3646,14 +3649,13 @@ dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev,
goto out;
}
 
-   /* Enable the stashing control bit */
+   flc_iova = DPAA2_VADDR_TO_IOVA(flc);
+   /* Enable the stashing control bit and data stashing only.*/
DPAA2_SET_FLC_RSC(flc);
-   flc->word2_rflc_31_0 = lower_32_bits(
-   (size_t)&(((struct dpaa2_sec_qp *)
-   dev->data->queue_pairs[0])->rx_vq) | 0x14);
-   flc->word3_rflc_63_32 = upper_32_bits(
-   (size_t)&(((struct dpaa2_sec_qp *)
-   dev->data->queue_pairs[0])->rx_vq));
+   dpaa2_flc_stashing_set(DPAA2_FLC_DATA_STASHING, 1,
+   &flc_iova);
+   flc->word2_rflc_31_0 = lower_32_bits(flc_iova);
+   flc->word3_rflc_63_32 = upper_32_bits(flc_iova);
 
flc->word1_sdl = (uint8_t)bufsize;
 
-- 
2.25.1



[v1 6/7] crypto/dpaa2_sec: remove prefetch code in event mode

2024-08-06 Thread Gagandeep Singh
From: Jun Yang 

Should not prefetch mbuf and crypto_op which are not touched
by hardware.

Signed-off-by: Jun Yang 
---
 drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c | 20 +---
 1 file changed, 1 insertion(+), 19 deletions(-)

diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c 
b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
index ff24a8919a..1e28c71b53 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
@@ -3991,12 +3991,6 @@ dpaa2_sec_process_parallel_event(struct qbman_swp *swp,
 struct rte_event *ev)
 {
struct dpaa2_sec_qp *qp;
-   /* Prefetching mbuf */
-   rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-
-   rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size));
-
-   /* Prefetching ipsec crypto_op stored in priv data of mbuf */
-   rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64));
 
qp = container_of(rxq, struct dpaa2_sec_qp, rx_vq);
ev->flow_id = rxq->ev.flow_id;
@@ -4010,6 +4004,7 @@ dpaa2_sec_process_parallel_event(struct qbman_swp *swp,
 
qbman_swp_dqrr_consume(swp, dq);
 }
+
 static void
 dpaa2_sec_process_atomic_event(struct qbman_swp *swp __rte_unused,
 const struct qbman_fd *fd,
@@ -4020,12 +4015,6 @@ dpaa2_sec_process_atomic_event(struct qbman_swp *swp 
__rte_unused,
uint8_t dqrr_index;
struct dpaa2_sec_qp *qp;
struct rte_crypto_op *crypto_op;
-   /* Prefetching mbuf */
-   rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-
-   rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size));
-
-   /* Prefetching ipsec crypto_op stored in priv data of mbuf */
-   rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64));
 
qp = container_of(rxq, struct dpaa2_sec_qp, rx_vq);
ev->flow_id = rxq->ev.flow_id;
@@ -4055,13 +4044,6 @@ dpaa2_sec_process_ordered_event(struct qbman_swp *swp,
struct rte_crypto_op *crypto_op;
struct dpaa2_sec_qp *qp;
 
-   /* Prefetching mbuf */
-   rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-
-   rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size));
-
-   /* Prefetching ipsec crypto_op stored in priv data of mbuf */
-   rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64));
-
qp = container_of(rxq, struct dpaa2_sec_qp, rx_vq);
ev->flow_id = rxq->ev.flow_id;
ev->sub_event_type = rxq->ev.sub_event_type;
-- 
2.25.1



[v1 7/7] crypto/dpaa2_sec: rework debug code

2024-08-06 Thread Gagandeep Singh
From: Jun Yang 

Output debug information according to various modes.

Signed-off-by: Jun Yang 
---
 drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c | 63 -
 1 file changed, 48 insertions(+), 15 deletions(-)

diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c 
b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
index 1e28c71b53..da3bd871ba 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
@@ -1095,7 +1095,7 @@ build_auth_fd(dpaa2_sec_session *sess, struct 
rte_crypto_op *op,
 
 static int
 build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
-   struct qbman_fd *fd, __rte_unused uint16_t bpid)
+   struct qbman_fd *fd, uint16_t bpid)
 {
struct rte_crypto_sym_op *sym_op = op->sym;
struct qbman_fle *ip_fle, *op_fle, *sge, *fle;
@@ -1105,6 +1105,10 @@ build_cipher_sg_fd(dpaa2_sec_session *sess, struct 
rte_crypto_op *op,
struct rte_mbuf *mbuf;
uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
sess->iv.offset);
+#if (RTE_LOG_DEBUG <= RTE_LOG_DP_LEVEL)
+   char debug_str[1024];
+   int offset;
+#endif
 
data_len = sym_op->cipher.data.length;
data_offset = sym_op->cipher.data.offset;
@@ -1210,14 +1214,26 @@ build_cipher_sg_fd(dpaa2_sec_session *sess, struct 
rte_crypto_op *op,
DPAA2_SET_FD_COMPOUND_FMT(fd);
DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
 
-   DPAA2_SEC_DP_DEBUG(
-   "CIPHER SG: fdaddr =%" PRIx64 " bpid =%d meta =%d"
-   " off =%d, len =%d\n",
+#if (RTE_LOG_DEBUG <= RTE_LOG_DP_LEVEL)
+   offset = sprintf(debug_str,
+   "CIPHER SG: fdaddr =%" PRIx64 ", from %s pool ",
DPAA2_GET_FD_ADDR(fd),
-   DPAA2_GET_FD_BPID(fd),
-   rte_dpaa2_bpid_info[bpid].meta_data_size,
-   DPAA2_GET_FD_OFFSET(fd),
-   DPAA2_GET_FD_LEN(fd));
+   bpid < MAX_BPID ? "SW" : "BMAN");
+   if (bpid < MAX_BPID) {
+   offset += sprintf(&debug_str[offset],
+   "bpid = %d ", bpid);
+   }
+   offset += sprintf(&debug_str[offset],
+   "private size = %d ",
+   mbuf->pool->private_data_size);
+   offset += sprintf(&debug_str[offset],
+   "off =%d, len =%d\n",
+   DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_LEN(fd));
+   DPAA2_SEC_DP_DEBUG("%s", debug_str);
+#else
+   RTE_SET_USED(bpid);
+#endif
+
return 0;
 }
 
@@ -1233,6 +1249,10 @@ build_cipher_fd(dpaa2_sec_session *sess, struct 
rte_crypto_op *op,
uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
sess->iv.offset);
struct rte_mbuf *dst;
+#if (RTE_LOG_DEBUG <= RTE_LOG_DP_LEVEL)
+   char debug_str[1024];
+   int offset;
+#endif
 
data_len = sym_op->cipher.data.length;
data_offset = sym_op->cipher.data.offset;
@@ -1324,14 +1344,23 @@ build_cipher_fd(dpaa2_sec_session *sess, struct 
rte_crypto_op *op,
DPAA2_SET_FLE_FIN(sge);
DPAA2_SET_FLE_FIN(fle);
 
-   DPAA2_SEC_DP_DEBUG(
-   "CIPHER: fdaddr =%" PRIx64 " bpid =%d meta =%d"
-   " off =%d, len =%d\n",
+#if (RTE_LOG_DEBUG <= RTE_LOG_DP_LEVEL)
+   offset = sprintf(debug_str,
+   "CIPHER: fdaddr =%" PRIx64 ", from %s pool ",
DPAA2_GET_FD_ADDR(fd),
-   DPAA2_GET_FD_BPID(fd),
-   rte_dpaa2_bpid_info[bpid].meta_data_size,
-   DPAA2_GET_FD_OFFSET(fd),
-   DPAA2_GET_FD_LEN(fd));
+   bpid < MAX_BPID ? "SW" : "BMAN");
+   if (bpid < MAX_BPID) {
+   offset += sprintf(&debug_str[offset],
+   "bpid = %d ", bpid);
+   }
+   offset += sprintf(&debug_str[offset],
+   "private size = %d ",
+   dst->pool->private_data_size);
+   offset += sprintf(&debug_str[offset],
+   "off =%d, len =%d\n",
+   DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_LEN(fd));
+   DPAA2_SEC_DP_DEBUG("%s", debug_str);
+#endif
 
return 0;
 }
@@ -1564,6 +1593,10 @@ sec_fd_to_mbuf(const struct qbman_fd *fd, struct 
dpaa2_sec_qp *qp)
struct qbman_fle *fle;
struct rte_crypto_op *op;
struct rte_mbuf *dst, *src;
+#if (RTE_LOG_DEBUG <= RTE_LOG_DP_LEVEL)
+   char debug_str[1024];
+   int offset;
+#endif
 
if (DPAA2_FD_GET_FORMAT(fd) == qbman_fd_single)
return sec_simple_fd_to_mbuf(fd);
-- 
2.25.1



[PATCH v13 0/6] API docs generation

2024-08-06 Thread Juraj Linkeš
The generation is done with Sphinx, which DPDK already uses, with
slightly modified configuration of the sidebar present in an if block.

DTS dependencies do not need to be installed, but there is the option to
install doc build dependencies with Poetry:
poetry install --with docs

The build itself may be run with:
meson setup  -Denable_docs=true
ninja -C 

The above will do a full DPDK build with docs. To build just docs:
meson setup 
ninja -C  dts-doc

Python3.10 is required to build the DTS API docs.

The patchset contains the .rst sources which Sphinx uses to generate the
html pages. These were first generated with the sphinx-apidoc utility
and modified to provide a better look. The documentation just doesn't
look that good without the modifications and there isn't enough
configuration options to achieve that without manual changes to the .rst
files. This introduces extra maintenance which involves adding new .rst
files when a new Python module is added or changing the .rst structure
if the Python directory/file structure is changed (moved, renamed
files). This small maintenance burden is outweighed by the flexibility
afforded by the ability to make manual changes to the .rst files.

v10:
Fix dts doc generation issue: Only copy the custom rss file if it exists.

v11:
Added the config option autodoc_mock_imports, which eliminates the need
for DTS dependencies. Added a script that find out which imports need to
be added to autodoc_mock_imports. The script also check the required
Python version for building DTS docs.
Removed tags from the two affected patches which will need to be
reviewed again.

v12:
Added paramiko to the required dependencies of get-dts-deps.py.

v13:
Fixed build error:
TypeError: unsupported operand type(s) for |: 'NoneType' and 'Transport'

Juraj Linkeš (6):
  dts: update params and parser docstrings
  dts: replace the or operator in third party types
  dts: add doc generation dependencies
  dts: add API doc sources
  doc: meson doc API build dir variable
  dts: add API doc generation

 buildtools/call-sphinx-build.py   |  10 +-
 buildtools/get-dts-deps.py|  78 +++
 buildtools/meson.build|   1 +
 doc/api/doxy-api-index.md |   3 +
 doc/api/doxy-api.conf.in  |   2 +
 doc/api/meson.build   |   8 +-
 doc/guides/conf.py|  41 +-
 doc/guides/contributing/documentation.rst |   2 +
 doc/guides/contributing/patches.rst   |   4 +
 doc/guides/meson.build|   1 +
 doc/guides/tools/dts.rst  |  39 +-
 dts/doc/conf_yaml_schema.json |   1 +
 dts/doc/framework.config.rst  |  12 +
 dts/doc/framework.config.types.rst|   6 +
 dts/doc/framework.exception.rst   |   6 +
 dts/doc/framework.logger.rst  |   6 +
 dts/doc/framework.params.eal.rst  |   6 +
 dts/doc/framework.params.rst  |  14 +
 dts/doc/framework.params.testpmd.rst  |   6 +
 dts/doc/framework.params.types.rst|   6 +
 dts/doc/framework.parser.rst  |   6 +
 .../framework.remote_session.dpdk_shell.rst   |   6 +
 ...ote_session.interactive_remote_session.rst |   6 +
 ...ework.remote_session.interactive_shell.rst |   6 +
 .../framework.remote_session.python_shell.rst |   6 +
 ...ramework.remote_session.remote_session.rst |   6 +
 dts/doc/framework.remote_session.rst  |  18 +
 .../framework.remote_session.ssh_session.rst  |   6 +
 ...framework.remote_session.testpmd_shell.rst |   6 +
 dts/doc/framework.runner.rst  |   6 +
 dts/doc/framework.settings.rst|   6 +
 dts/doc/framework.test_result.rst |   6 +
 dts/doc/framework.test_suite.rst  |   6 +
 dts/doc/framework.testbed_model.cpu.rst   |   6 +
 .../framework.testbed_model.linux_session.rst |   6 +
 dts/doc/framework.testbed_model.node.rst  |   6 +
 .../framework.testbed_model.os_session.rst|   6 +
 dts/doc/framework.testbed_model.port.rst  |   6 +
 .../framework.testbed_model.posix_session.rst |   6 +
 dts/doc/framework.testbed_model.rst   |  26 +
 dts/doc/framework.testbed_model.sut_node.rst  |   6 +
 dts/doc/framework.testbed_model.tg_node.rst   |   6 +
 ..._generator.capturing_traffic_generator.rst |   6 +
 ...mework.testbed_model.traffic_generator.rst |  14 +
 testbed_model.traffic_generator.scapy.rst |   6 +
 ...el.traffic_generator.traffic_generator.rst |   6 +
 ...framework.testbed_model.virtual_device.rst |   6 +
 dts/doc/framework.utils.rst   |   6 +
 dts/doc/index.rst |  43 ++
 dts/doc/meson.build   |  30 +
 dts/framework/params/__init__.py  |   4 +-
 dts/framework/params/eal.py   |   7 +-
 dts/framework/params/types.py |   3 +-
 dts/framework/parser.py  

[PATCH v13 1/6] dts: update params and parser docstrings

2024-08-06 Thread Juraj Linkeš
Address a few errors reported by Sphinx when generating documentation:
framework/params/__init__.py:docstring of framework.params.modify_str:3:
WARNING: Inline interpreted text or phrase reference start-string
without end-string.
framework/params/eal.py:docstring of framework.params.eal.EalParams:35:
WARNING: Definition list ends without a blank line; unexpected
unindent.
framework/params/types.py:docstring of framework.params.types:8:
WARNING: Inline strong start-string without end-string.
framework/params/types.py:docstring of framework.params.types:9:
WARNING: Inline strong start-string without end-string.
framework/parser.py:docstring of framework.parser.TextParser:33: ERROR:
Unexpected indentation.
framework/parser.py:docstring of framework.parser.TextParser:43: ERROR:
Unexpected indentation.
framework/parser.py:docstring of framework.parser.TextParser:49: ERROR:
Unexpected indentation.
framework/parser.py:docstring of framework.parser.TextParser.wrap:8:
ERROR: Unexpected indentation.
framework/parser.py:docstring of framework.parser.TextParser.wrap:9:
WARNING: Block quote ends without a blank line; unexpected unindent.

Fixes: 87ba4cdc0dbb ("dts: use Unpack for type checking and hinting")
Fixes: d70159cb62f5 ("dts: add params manipulation module")
Fixes: 967fc62b0a43 ("dts: refactor EAL parameters class")
Fixes: 818fe14e3422 ("dts: add parsing utility module")
Cc: luca.vizza...@arm.com

Signed-off-by: Juraj Linkeš 
Reviewed-by: Luca Vizzarro 
---
 dts/framework/params/__init__.py | 4 ++--
 dts/framework/params/eal.py  | 7 +--
 dts/framework/params/types.py| 3 ++-
 dts/framework/parser.py  | 4 ++--
 4 files changed, 11 insertions(+), 7 deletions(-)

diff --git a/dts/framework/params/__init__.py b/dts/framework/params/__init__.py
index 5a6fd93053..1ae227d7b4 100644
--- a/dts/framework/params/__init__.py
+++ b/dts/framework/params/__init__.py
@@ -53,9 +53,9 @@ def reduced_fn(value):
 
 
 def modify_str(*funcs: FnPtr) -> Callable[[T], T]:
-"""Class decorator modifying the ``__str__`` method with a function 
created from its arguments.
+r"""Class decorator modifying the ``__str__`` method with a function 
created from its arguments.
 
-The :attr:`FnPtr`s fed to the decorator are executed from left to right in 
the arguments list
+The :attr:`FnPtr`\s fed to the decorator are executed from left to right 
in the arguments list
 order.
 
 Args:
diff --git a/dts/framework/params/eal.py b/dts/framework/params/eal.py
index 8d7766fefc..cf1594353a 100644
--- a/dts/framework/params/eal.py
+++ b/dts/framework/params/eal.py
@@ -26,13 +26,16 @@ class EalParams(Params):
 prefix: Set the file prefix string with which to start DPDK, e.g.: 
``prefix="vf"``.
 no_pci: Switch to disable PCI bus, e.g.: ``no_pci=True``.
 vdevs: Virtual devices, e.g.::
+
 vdevs=[
 VirtualDevice('net_ring0'),
 VirtualDevice('net_ring1')
 ]
+
 ports: The list of ports to allow.
-other_eal_param: user defined DPDK EAL parameters, e.g.:
-``other_eal_param='--single-file-segments'``
+other_eal_param: user defined DPDK EAL parameters, e.g.::
+
+``other_eal_param='--single-file-segments'``
 """
 
 lcore_list: LogicalCoreList | None = field(default=None, 
metadata=Params.short("l"))
diff --git a/dts/framework/params/types.py b/dts/framework/params/types.py
index e668f658d8..d77c4625fb 100644
--- a/dts/framework/params/types.py
+++ b/dts/framework/params/types.py
@@ -6,7 +6,8 @@
 TypedDicts can be used in conjunction with Unpack and kwargs for type hinting 
on function calls.
 
 Example:
-..code:: python
+.. code:: python
+
 def create_testpmd(**kwargs: Unpack[TestPmdParamsDict]):
 params = TestPmdParams(**kwargs)
 """
diff --git a/dts/framework/parser.py b/dts/framework/parser.py
index 741dfff821..7254c75b71 100644
--- a/dts/framework/parser.py
+++ b/dts/framework/parser.py
@@ -46,7 +46,7 @@ class TextParser(ABC):
 Example:
 The following example makes use of and demonstrates every parser 
function available:
 
-..code:: python
+.. code:: python
 
 from dataclasses import dataclass, field
 from enum import Enum
@@ -90,7 +90,7 @@ def wrap(parser_fn: ParserFn, wrapper_fn: Callable) -> 
ParserFn:
 """Makes a wrapped parser function.
 
 `parser_fn` is called and if a non-None value is returned, 
`wrapper_function` is called with
-it. Otherwise the function returns early with None. In pseudo-code:
+it. Otherwise the function returns early with None. In pseudo-code::
 
 intermediate_value := parser_fn(input)
 if intermediary_value is None then
-- 
2.34.1



[PATCH v13 2/6] dts: replace the or operator in third party types

2024-08-06 Thread Juraj Linkeš
When the DTS dependencies are not installed when building DTS API
documentation, the or operator produces errors when used with types from
those libraries:
autodoc: failed to import module 'remote_session' from module
'framework'; the following exception was raised:
Traceback (most recent call last):
...
TypeError: unsupported operand type(s) for |: 'Transport' and 'NoneType'

The third part type here is Transport from the paramiko library.

Signed-off-by: Juraj Linkeš 
---
 dts/framework/remote_session/interactive_remote_session.py | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/dts/framework/remote_session/interactive_remote_session.py 
b/dts/framework/remote_session/interactive_remote_session.py
index 97194e6af8..4605ee14b4 100644
--- a/dts/framework/remote_session/interactive_remote_session.py
+++ b/dts/framework/remote_session/interactive_remote_session.py
@@ -5,6 +5,7 @@
 
 import socket
 import traceback
+from typing import Union
 
 from paramiko import AutoAddPolicy, SSHClient, Transport  # type: 
ignore[import-untyped]
 from paramiko.ssh_exception import (  # type: ignore[import-untyped]
@@ -52,7 +53,7 @@ class InteractiveRemoteSession:
 session: SSHClient
 _logger: DTSLogger
 _node_config: NodeConfiguration
-_transport: Transport | None
+_transport: Union[Transport, None]
 
 def __init__(self, node_config: NodeConfiguration, logger: DTSLogger) -> 
None:
 """Connect to the node during initialization.
-- 
2.34.1



[PATCH v13 3/6] dts: add doc generation dependencies

2024-08-06 Thread Juraj Linkeš
Sphinx imports every Python module (through the autodoc extension)
when generating documentation from docstrings, meaning all DTS
dependencies, including Python version, should be satisfied. This is not
a hard requirement, as imports from dependencies may be mocked in the
autodoc_mock_imports autodoc option.
In case DTS developers want to use a Sphinx installation from their
virtualenv, we provide an optional Poetry group for doc generation. The
pyelftools package is there so that meson picks up the correct Python
installation, as pyelftools is required by the build system.

Signed-off-by: Juraj Linkeš 
---
 dts/poetry.lock| 521 +++--
 dts/pyproject.toml |   8 +
 2 files changed, 517 insertions(+), 12 deletions(-)

diff --git a/dts/poetry.lock b/dts/poetry.lock
index 5f8fa03933..2dd8bad498 100644
--- a/dts/poetry.lock
+++ b/dts/poetry.lock
@@ -1,5 +1,16 @@
 # This file is automatically @generated by Poetry 1.8.2 and should not be 
changed by hand.
 
+[[package]]
+name = "alabaster"
+version = "0.7.13"
+description = "A configurable sidebar-enabled Sphinx theme"
+optional = false
+python-versions = ">=3.6"
+files = [
+{file = "alabaster-0.7.13-py3-none-any.whl", hash = 
"sha256:1ee19aca801bbabb5ba3f5f258e4422dfa86f82f3e9cefb0859b283cdd7f62a3"},
+{file = "alabaster-0.7.13.tar.gz", hash = 
"sha256:a27a4a084d5e690e16e01e03ad2b2e552c61a65469419b907243193de1a84ae2"},
+]
+
 [[package]]
 name = "attrs"
 version = "23.1.0"
@@ -18,6 +29,23 @@ docs = ["furo", "myst-parser", "sphinx", 
"sphinx-notfound-page", "sphinxcontrib-
 tests = ["attrs[tests-no-zope]", "zope-interface"]
 tests-no-zope = ["cloudpickle", "hypothesis", "mypy (>=1.1.1)", "pympler", 
"pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"]
 
+[[package]]
+name = "babel"
+version = "2.13.1"
+description = "Internationalization utilities"
+optional = false
+python-versions = ">=3.7"
+files = [
+{file = "Babel-2.13.1-py3-none-any.whl", hash = 
"sha256:7077a4984b02b6727ac10f1f7294484f737443d7e2e66c5e4380e41a3ae0b4ed"},
+{file = "Babel-2.13.1.tar.gz", hash = 
"sha256:33e0952d7dd6374af8dbf6768cc4ddf3ccfefc244f9986d4074704f2fbd18900"},
+]
+
+[package.dependencies]
+setuptools = {version = "*", markers = "python_version >= \"3.12\""}
+
+[package.extras]
+dev = ["freezegun (>=1.0,<2.0)", "pytest (>=6.0)", "pytest-cov"]
+
 [[package]]
 name = "bcrypt"
 version = "4.0.1"
@@ -86,6 +114,17 @@ d = ["aiohttp (>=3.7.4)"]
 jupyter = ["ipython (>=7.8.0)", "tokenize-rt (>=3.2.0)"]
 uvloop = ["uvloop (>=0.15.2)"]
 
+[[package]]
+name = "certifi"
+version = "2023.7.22"
+description = "Python package for providing Mozilla's CA Bundle."
+optional = false
+python-versions = ">=3.6"
+files = [
+{file = "certifi-2023.7.22-py3-none-any.whl", hash = 
"sha256:92d6037539857d8206b8f6ae472e8b77db8058fec5937a1ef3f54304089edbb9"},
+{file = "certifi-2023.7.22.tar.gz", hash = 
"sha256:539cc1d13202e33ca466e88b2807e29f4c13049d6d87031a3c110744495cb082"},
+]
+
 [[package]]
 name = "cffi"
 version = "1.15.1"
@@ -162,6 +201,105 @@ files = [
 [package.dependencies]
 pycparser = "*"
 
+[[package]]
+name = "charset-normalizer"
+version = "3.3.2"
+description = "The Real First Universal Charset Detector. Open, modern and 
actively maintained alternative to Chardet."
+optional = false
+python-versions = ">=3.7.0"
+files = [
+{file = "charset-normalizer-3.3.2.tar.gz", hash = 
"sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5"},
+{file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_universal2.whl", 
hash = 
"sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3"},
+{file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_x86_64.whl", 
hash = 
"sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027"},
+{file = "charset_normalizer-3.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash 
= "sha256:9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03"},
+{file = 
"charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl",
 hash = 
"sha256:6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d"},
+{file = 
"charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl",
 hash = 
"sha256:1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e"},
+{file = 
"charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl",
 hash = 
"sha256:cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6"},
+{file = 
"charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl",
 hash = 
"sha256:8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5"},
+{file = 
"charset_normalizer-3.3.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl",
 hash = 
"sha256:a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537"},
+{file = "charset_normalizer-3.3.2-cp310-cp

[PATCH v13 4/6] dts: add API doc sources

2024-08-06 Thread Juraj Linkeš
These sources could be generated with the sphinx-apidoc utility, but
that doesn't give us enough flexibility, such as sorting the order of
modules or changing the headers of the modules.

The sources included in this patch were in fact generated by said
utility, but modified to improve the look of the documentation. The
improvements are mainly in toctree definitions and the titles of the
modules/packages. These were made with specific Sphinx config options in
mind.

Signed-off-by: Juraj Linkeš 
Reviewed-by: Luca Vizzarro 
Reviewed-by: Jeremy Spewock 
Tested-by: Luca Vizzarro 
---
 dts/doc/conf_yaml_schema.json |  1 +
 dts/doc/framework.config.rst  | 12 ++
 dts/doc/framework.config.types.rst|  6 +++
 dts/doc/framework.exception.rst   |  6 +++
 dts/doc/framework.logger.rst  |  6 +++
 dts/doc/framework.params.eal.rst  |  6 +++
 dts/doc/framework.params.rst  | 14 ++
 dts/doc/framework.params.testpmd.rst  |  6 +++
 dts/doc/framework.params.types.rst|  6 +++
 dts/doc/framework.parser.rst  |  6 +++
 .../framework.remote_session.dpdk_shell.rst   |  6 +++
 ...ote_session.interactive_remote_session.rst |  6 +++
 ...ework.remote_session.interactive_shell.rst |  6 +++
 .../framework.remote_session.python_shell.rst |  6 +++
 ...ramework.remote_session.remote_session.rst |  6 +++
 dts/doc/framework.remote_session.rst  | 18 
 .../framework.remote_session.ssh_session.rst  |  6 +++
 ...framework.remote_session.testpmd_shell.rst |  6 +++
 dts/doc/framework.runner.rst  |  6 +++
 dts/doc/framework.settings.rst|  6 +++
 dts/doc/framework.test_result.rst |  6 +++
 dts/doc/framework.test_suite.rst  |  6 +++
 dts/doc/framework.testbed_model.cpu.rst   |  6 +++
 .../framework.testbed_model.linux_session.rst |  6 +++
 dts/doc/framework.testbed_model.node.rst  |  6 +++
 .../framework.testbed_model.os_session.rst|  6 +++
 dts/doc/framework.testbed_model.port.rst  |  6 +++
 .../framework.testbed_model.posix_session.rst |  6 +++
 dts/doc/framework.testbed_model.rst   | 26 +++
 dts/doc/framework.testbed_model.sut_node.rst  |  6 +++
 dts/doc/framework.testbed_model.tg_node.rst   |  6 +++
 ..._generator.capturing_traffic_generator.rst |  6 +++
 ...mework.testbed_model.traffic_generator.rst | 14 ++
 testbed_model.traffic_generator.scapy.rst |  6 +++
 ...el.traffic_generator.traffic_generator.rst |  6 +++
 ...framework.testbed_model.virtual_device.rst |  6 +++
 dts/doc/framework.utils.rst   |  6 +++
 dts/doc/index.rst | 43 +++
 38 files changed, 314 insertions(+)
 create mode 12 dts/doc/conf_yaml_schema.json
 create mode 100644 dts/doc/framework.config.rst
 create mode 100644 dts/doc/framework.config.types.rst
 create mode 100644 dts/doc/framework.exception.rst
 create mode 100644 dts/doc/framework.logger.rst
 create mode 100644 dts/doc/framework.params.eal.rst
 create mode 100644 dts/doc/framework.params.rst
 create mode 100644 dts/doc/framework.params.testpmd.rst
 create mode 100644 dts/doc/framework.params.types.rst
 create mode 100644 dts/doc/framework.parser.rst
 create mode 100644 dts/doc/framework.remote_session.dpdk_shell.rst
 create mode 100644 
dts/doc/framework.remote_session.interactive_remote_session.rst
 create mode 100644 dts/doc/framework.remote_session.interactive_shell.rst
 create mode 100644 dts/doc/framework.remote_session.python_shell.rst
 create mode 100644 dts/doc/framework.remote_session.remote_session.rst
 create mode 100644 dts/doc/framework.remote_session.rst
 create mode 100644 dts/doc/framework.remote_session.ssh_session.rst
 create mode 100644 dts/doc/framework.remote_session.testpmd_shell.rst
 create mode 100644 dts/doc/framework.runner.rst
 create mode 100644 dts/doc/framework.settings.rst
 create mode 100644 dts/doc/framework.test_result.rst
 create mode 100644 dts/doc/framework.test_suite.rst
 create mode 100644 dts/doc/framework.testbed_model.cpu.rst
 create mode 100644 dts/doc/framework.testbed_model.linux_session.rst
 create mode 100644 dts/doc/framework.testbed_model.node.rst
 create mode 100644 dts/doc/framework.testbed_model.os_session.rst
 create mode 100644 dts/doc/framework.testbed_model.port.rst
 create mode 100644 dts/doc/framework.testbed_model.posix_session.rst
 create mode 100644 dts/doc/framework.testbed_model.rst
 create mode 100644 dts/doc/framework.testbed_model.sut_node.rst
 create mode 100644 dts/doc/framework.testbed_model.tg_node.rst
 create mode 100644 
dts/doc/framework.testbed_model.traffic_generator.capturing_traffic_generator.rst
 create mode 100644 dts/doc/framework.testbed_model.traffic_generator.rst
 create mode 100644 dts/doc/framework.testbed_model.traffic_generator.scapy.rst
 create mode 100644 
dts/doc/framework.testbed_model.traffic_generator.traffic_generator

[PATCH v13 5/6] doc: meson doc API build dir variable

2024-08-06 Thread Juraj Linkeš
The three instances of the path 'dpdk_build_root/doc/api' are replaced
with a variable, moving the definition to one place.

Signed-off-by: Juraj Linkeš 
Reviewed-by: Luca Vizzarro 
Reviewed-by: Jeremy Spewock 
Acked-by: Bruce Richardson 
Tested-by: Luca Vizzarro 
Tested-by: Nicholas Pratte 
---
 doc/api/meson.build | 7 ---
 1 file changed, 4 insertions(+), 3 deletions(-)

diff --git a/doc/api/meson.build b/doc/api/meson.build
index 5b50692df9..b828b1ed66 100644
--- a/doc/api/meson.build
+++ b/doc/api/meson.build
@@ -1,6 +1,7 @@
 # SPDX-License-Identifier: BSD-3-Clause
 # Copyright(c) 2018 Luca Boccassi 
 
+doc_api_build_dir = meson.current_build_dir()
 doxygen = find_program('doxygen', required: get_option('enable_docs'))
 
 if not doxygen.found()
@@ -32,10 +33,10 @@ example = custom_target('examples.dox',
 # set up common Doxygen configuration
 cdata = configuration_data()
 cdata.set('VERSION', meson.project_version())
-cdata.set('API_EXAMPLES', join_paths(dpdk_build_root, 'doc', 'api', 
'examples.dox'))
-cdata.set('OUTPUT', join_paths(dpdk_build_root, 'doc', 'api'))
+cdata.set('API_EXAMPLES', join_paths(doc_api_build_dir, 'examples.dox'))
+cdata.set('OUTPUT', doc_api_build_dir)
 cdata.set('TOPDIR', dpdk_source_root)
-cdata.set('STRIP_FROM_PATH', ' '.join([dpdk_source_root, 
join_paths(dpdk_build_root, 'doc', 'api')]))
+cdata.set('STRIP_FROM_PATH', ' '.join([dpdk_source_root, doc_api_build_dir]))
 cdata.set('WARN_AS_ERROR', 'NO')
 if get_option('werror')
 cdata.set('WARN_AS_ERROR', 'YES')
-- 
2.34.1



[PATCH v13 6/6] dts: add API doc generation

2024-08-06 Thread Juraj Linkeš
The tool used to generate DTS API docs is Sphinx, which is already in
use in DPDK. The same configuration is used to preserve style with one
DTS-specific configuration (so that the DPDK docs are unchanged) that
modifies how the sidebar displays the content.

Sphinx generates the documentation from Python docstrings. The docstring
format is the Google format [0] which requires the sphinx.ext.napoleon
extension. The other extension, sphinx.ext.intersphinx, enables linking
to objects in external documentations, such as the Python documentation.

There is one requirement for building DTS docs - the same Python version
as DTS or higher, because Sphinx's autodoc extension imports the code.

The dependencies needed to import the code don't have to be satisfied,
as the autodoc extension allows us to mock the imports. The missing
packages are taken from the DTS pyproject.toml file.

[0] https://google.github.io/styleguide/pyguide.html#38-comments-and-docstrings

Signed-off-by: Juraj Linkeš 
---
 buildtools/call-sphinx-build.py   | 10 ++-
 buildtools/get-dts-deps.py| 78 +++
 buildtools/meson.build|  1 +
 doc/api/doxy-api-index.md |  3 +
 doc/api/doxy-api.conf.in  |  2 +
 doc/api/meson.build   |  1 +
 doc/guides/conf.py| 41 +++-
 doc/guides/contributing/documentation.rst |  2 +
 doc/guides/contributing/patches.rst   |  4 ++
 doc/guides/meson.build|  1 +
 doc/guides/tools/dts.rst  | 39 +++-
 dts/doc/meson.build   | 30 +
 dts/meson.build   | 15 +
 meson.build   |  1 +
 14 files changed, 225 insertions(+), 3 deletions(-)
 create mode 100755 buildtools/get-dts-deps.py
 create mode 100644 dts/doc/meson.build
 create mode 100644 dts/meson.build

diff --git a/buildtools/call-sphinx-build.py b/buildtools/call-sphinx-build.py
index 623e7363ee..5dd59907cd 100755
--- a/buildtools/call-sphinx-build.py
+++ b/buildtools/call-sphinx-build.py
@@ -15,6 +15,11 @@
 
 # set the version in environment for sphinx to pick up
 os.environ['DPDK_VERSION'] = version
+conf_src = src
+if src.find('dts') != -1:
+if '-c' in extra_args:
+conf_src = extra_args[extra_args.index('-c') + 1]
+os.environ['DTS_BUILD'] = "y"
 
 sphinx_cmd = [sphinx] + extra_args
 
@@ -23,6 +28,9 @@
 for root, dirs, files in os.walk(src):
 srcfiles.extend([join(root, f) for f in files])
 
+if not os.path.exists(dst):
+os.makedirs(dst)
+
 # run sphinx, putting the html output in a "html" directory
 with open(join(dst, 'sphinx_html.out'), 'w') as out:
 process = run(sphinx_cmd + ['-b', 'html', src, join(dst, 'html')],
@@ -34,7 +42,7 @@
 
 # copy custom CSS file
 css = 'custom.css'
-src_css = join(src, css)
+src_css = join(conf_src, css)
 dst_css = join(dst, 'html', '_static', 'css', css)
 if not os.path.exists(dst_css) or not filecmp.cmp(src_css, dst_css):
 os.makedirs(os.path.dirname(dst_css), exist_ok=True)
diff --git a/buildtools/get-dts-deps.py b/buildtools/get-dts-deps.py
new file mode 100755
index 00..309b83cb5c
--- /dev/null
+++ b/buildtools/get-dts-deps.py
@@ -0,0 +1,78 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2024 PANTHEON.tech s.r.o.
+#
+
+"""Utilities for DTS dependencies.
+
+The module can be used as an executable script,
+which verifies that the running Python version meets the version requirement 
of DTS.
+The script returns the standard exit codes in this mode (0 is success, 1 is 
failure).
+
+The module also contains a function, get_missing_imports,
+which looks for runtime and doc generation dependencies in the DTS 
pyproject.toml file
+a returns a list of module names used in an import statement that are missing.
+"""
+
+import configparser
+import importlib.metadata
+import importlib.util
+import os.path
+import platform
+
+_VERSION_COMPARISON_CHARS = '^<>='
+_EXTRA_DEPS = {'invoke': '>=1.3', 'paramiko': '>=2.4'}
+_DPDK_ROOT = os.path.dirname(os.path.dirname(__file__))
+_DTS_DEP_FILE_PATH = os.path.join(_DPDK_ROOT, 'dts', 'pyproject.toml')
+
+
+def _get_version_tuple(version_str):
+return tuple(map(int, version_str.split(".")))
+
+
+def _get_dependencies(cfg_file_path):
+cfg = configparser.ConfigParser()
+with open(cfg_file_path) as f:
+dts_deps_file_str = f.read()
+dts_deps_file_str = dts_deps_file_str.replace("\n]", "]")
+cfg.read_string(dts_deps_file_str)
+
+deps_section = cfg['tool.poetry.dependencies']
+deps = {dep: deps_section[dep].strip('"\'') for dep in deps_section}
+doc_deps_section = cfg['tool.poetry.group.docs.dependencies']
+doc_deps = {dep: doc_deps_section[dep].strip("\"'") for dep in 
doc_deps_section}
+
+return deps | doc_deps
+
+
+def get_missing_imports():
+missing_imports = []
+req_deps = _get_dependencies(_DTS_DEP_FI

Re: [v1 7/7] crypto/dpaa2_sec: rework debug code

2024-08-06 Thread Hemant Agrawal

Hi Gagan,

On 06-08-2024 14:11, Gagandeep Singh wrote:

From: Jun Yang 

Output debug information according to various modes.

Signed-off-by: Jun Yang 
---
  drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c | 63 -
  1 file changed, 48 insertions(+), 15 deletions(-)

diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c 
b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
index 1e28c71b53..da3bd871ba 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
@@ -1095,7 +1095,7 @@ build_auth_fd(dpaa2_sec_session *sess, struct 
rte_crypto_op *op,
  
  static int

  build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
-   struct qbman_fd *fd, __rte_unused uint16_t bpid)
+   struct qbman_fd *fd, uint16_t bpid)
  {
struct rte_crypto_sym_op *sym_op = op->sym;
struct qbman_fle *ip_fle, *op_fle, *sge, *fle;
@@ -1105,6 +1105,10 @@ build_cipher_sg_fd(dpaa2_sec_session *sess, struct 
rte_crypto_op *op,
struct rte_mbuf *mbuf;
uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
sess->iv.offset);
+#if (RTE_LOG_DEBUG <= RTE_LOG_DP_LEVEL)
+   char debug_str[1024];
+   int offset;
+#endif
  
  	data_len = sym_op->cipher.data.length;

data_offset = sym_op->cipher.data.offset;
@@ -1210,14 +1214,26 @@ build_cipher_sg_fd(dpaa2_sec_session *sess, struct 
rte_crypto_op *op,
DPAA2_SET_FD_COMPOUND_FMT(fd);
DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
  
-	DPAA2_SEC_DP_DEBUG(

-   "CIPHER SG: fdaddr =%" PRIx64 " bpid =%d meta =%d"
-   " off =%d, len =%d\n",
+#if (RTE_LOG_DEBUG <= RTE_LOG_DP_LEVEL)
+   offset = sprintf(debug_str,
+   "CIPHER SG: fdaddr =%" PRIx64 ", from %s pool ",
DPAA2_GET_FD_ADDR(fd),
-   DPAA2_GET_FD_BPID(fd),
-   rte_dpaa2_bpid_info[bpid].meta_data_size,
-   DPAA2_GET_FD_OFFSET(fd),
-   DPAA2_GET_FD_LEN(fd));
+   bpid < MAX_BPID ? "SW" : "BMAN");
+   if (bpid < MAX_BPID) {
+   offset += sprintf(&debug_str[offset],
+   "bpid = %d ", bpid);
+   }
+   offset += sprintf(&debug_str[offset],
+   "private size = %d ",
+   mbuf->pool->private_data_size);
+   offset += sprintf(&debug_str[offset],
+   "off =%d, len =%d\n",
+   DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_LEN(fd));
+   DPAA2_SEC_DP_DEBUG("%s", debug_str);
+#else
+   RTE_SET_USED(bpid);
+#endif
+
return 0;
  }
  
@@ -1233,6 +1249,10 @@ build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,

uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
sess->iv.offset);
struct rte_mbuf *dst;
+#if (RTE_LOG_DEBUG <= RTE_LOG_DP_LEVEL)
+   char debug_str[1024];
+   int offset;
+#endif
  
  	data_len = sym_op->cipher.data.length;

data_offset = sym_op->cipher.data.offset;
@@ -1324,14 +1344,23 @@ build_cipher_fd(dpaa2_sec_session *sess, struct 
rte_crypto_op *op,
DPAA2_SET_FLE_FIN(sge);
DPAA2_SET_FLE_FIN(fle);
  
-	DPAA2_SEC_DP_DEBUG(

-   "CIPHER: fdaddr =%" PRIx64 " bpid =%d meta =%d"
-   " off =%d, len =%d\n",
+#if (RTE_LOG_DEBUG <= RTE_LOG_DP_LEVEL)
+   offset = sprintf(debug_str,
+   "CIPHER: fdaddr =%" PRIx64 ", from %s pool ",
DPAA2_GET_FD_ADDR(fd),
-   DPAA2_GET_FD_BPID(fd),
-   rte_dpaa2_bpid_info[bpid].meta_data_size,
-   DPAA2_GET_FD_OFFSET(fd),
-   DPAA2_GET_FD_LEN(fd));
+   bpid < MAX_BPID ? "SW" : "BMAN");
+   if (bpid < MAX_BPID) {
+   offset += sprintf(&debug_str[offset],
+   "bpid = %d ", bpid);
+   }
+   offset += sprintf(&debug_str[offset],
+   "private size = %d ",
+   dst->pool->private_data_size);
+   offset += sprintf(&debug_str[offset],
+   "off =%d, len =%d\n",
+   DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_LEN(fd));
+   DPAA2_SEC_DP_DEBUG("%s", debug_str);
+#endif
  
  	return 0;

  }
@@ -1564,6 +1593,10 @@ sec_fd_to_mbuf(const struct qbman_fd *fd, struct 
dpaa2_sec_qp *qp)
struct qbman_fle *fle;
struct rte_crypto_op *op;
struct rte_mbuf *dst, *src;
+#if (RTE_LOG_DEBUG <= RTE_LOG_DP_LEVEL)
+   char debug_str[1024];
+   int offset;
+#endif


you have defined these variables but not used?  have you missed 
something in this patch?



  
  	if (DPAA2_FD_GET_FORMAT(fd) == qbman_fd_single)

return sec_simple_fd_to_mbuf(fd);


Re: [v1 5/7] net/dpaa2: support FLC stashing API

2024-08-06 Thread Hemant Agrawal

Hi Gagan,


I think this patch shall come before the 4th patch:   [v1 4/7] 
crypto/dpaa2_sec: enhance pdcp FLC handling



- Hemant


On 06-08-2024 14:11, Gagandeep Singh wrote:

From: Jun Yang 

Configure flow steering action with FLC enabled to align stashing
setting with RSS configuration.

Signed-off-by: Jun Yang 
Acked-by: Hemant Agrawal 
---
  drivers/bus/fslmc/portal/dpaa2_hw_pvt.h | 44 +
  drivers/net/dpaa2/dpaa2_ethdev.c| 25 +++---
  2 files changed, 58 insertions(+), 11 deletions(-)

diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h 
b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
index 169c7917ea..4c30e6db18 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
@@ -179,6 +179,7 @@ struct __rte_cache_aligned dpaa2_queue {
uint16_t resv;
uint64_t offloads;
uint64_t lpbk_cntx;
+   uint8_t data_stashing_off;
  };
  
  struct swp_active_dqs {

@@ -463,6 +464,49 @@ struct qbman_result *get_swp_active_dqs(uint16_t 
dpio_index)
return rte_global_active_dqs_list[dpio_index].global_active_dqs;
  }
  
+/* 00 00 00 - last 6 bit represent data, annotation,

+ * context stashing setting 01 01 00 (0x14)
+ * (in following order ->DS AS CS)
+ * to enable 1 line data, 1 line annotation.
+ * For LX2, this setting should be 01 00 00 (0x10)
+ */
+#define DPAA2_FLC_STASHING_MAX_BIT_SIZE 2
+#define DPAA2_FLC_STASHING_MAX_CACHE_LINE \
+   ((1ULL << DPAA2_FLC_STASHING_MAX_BIT_SIZE) - 1)
+
+enum dpaa2_flc_stashing_type {
+   DPAA2_FLC_CNTX_STASHING = 0,
+   DPAA2_FLC_ANNO_STASHING =
+   DPAA2_FLC_CNTX_STASHING + DPAA2_FLC_STASHING_MAX_BIT_SIZE,
+   DPAA2_FLC_DATA_STASHING =
+   DPAA2_FLC_ANNO_STASHING + DPAA2_FLC_STASHING_MAX_BIT_SIZE,
+   DPAA2_FLC_END_STASHING =
+   DPAA2_FLC_DATA_STASHING + DPAA2_FLC_STASHING_MAX_BIT_SIZE
+};
+
+#define DPAA2_STASHING_ALIGN_SIZE (1 << DPAA2_FLC_END_STASHING)
+
+static inline void
+dpaa2_flc_stashing_set(enum dpaa2_flc_stashing_type type,
+   uint8_t cache_line, uint64_t *flc)
+{
+   RTE_ASSERT(cache_line <= DPAA2_FLC_STASHING_MAX_CACHE_LINE);
+   RTE_ASSERT(type == DPAA2_FLC_CNTX_STASHING ||
+   type == DPAA2_FLC_ANNO_STASHING ||
+   type == DPAA2_FLC_DATA_STASHING);
+
+   (*flc) &= ~(DPAA2_FLC_STASHING_MAX_CACHE_LINE << type);
+   (*flc) |= (cache_line << type);
+}
+
+static inline void
+dpaa2_flc_stashing_clear_all(uint64_t *flc)
+{
+   dpaa2_flc_stashing_set(DPAA2_FLC_CNTX_STASHING, 0, flc);
+   dpaa2_flc_stashing_set(DPAA2_FLC_ANNO_STASHING, 0, flc);
+   dpaa2_flc_stashing_set(DPAA2_FLC_DATA_STASHING, 0, flc);
+}
+
  static inline
  void set_swp_active_dqs(uint16_t dpio_index, struct qbman_result *dqs)
  {
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.c b/drivers/net/dpaa2/dpaa2_ethdev.c
index 449bbda7ca..726bc0cf3e 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.c
+++ b/drivers/net/dpaa2/dpaa2_ethdev.c
@@ -786,17 +786,20 @@ dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev,
if ((dpaa2_svr_family & 0x) != SVR_LS2080A) {
options |= DPNI_QUEUE_OPT_FLC;
cfg.flc.stash_control = true;
-   cfg.flc.value &= 0xFFC0;
-   /* 00 00 00 - last 6 bit represent annotation, context stashing,
-* data stashing setting 01 01 00 (0x14)
-* (in following order ->DS AS CS)
-* to enable 1 line data, 1 line annotation.
-* For LX2, this setting should be 01 00 00 (0x10)
-*/
-   if ((dpaa2_svr_family & 0x) == SVR_LX2160A)
-   cfg.flc.value |= 0x10;
-   else
-   cfg.flc.value |= 0x14;
+   dpaa2_flc_stashing_clear_all(&cfg.flc.value);
+   if (getenv("DPAA2_DATA_STASHING_OFF")) {
+   dpaa2_flc_stashing_set(DPAA2_FLC_DATA_STASHING, 0,
+   &cfg.flc.value);
+   dpaa2_q->data_stashing_off = 1;
+   } else {
+   dpaa2_flc_stashing_set(DPAA2_FLC_DATA_STASHING, 1,
+   &cfg.flc.value);
+   dpaa2_q->data_stashing_off = 0;
+   }
+   if ((dpaa2_svr_family & 0x) != SVR_LX2160A) {
+   dpaa2_flc_stashing_set(DPAA2_FLC_ANNO_STASHING, 1,
+   &cfg.flc.value);
+   }
}
ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_RX,
 dpaa2_q->tc_index, flow_id, options, &cfg);


[v2 0/7] DPAA2 crypto changes

2024-08-06 Thread Gagandeep Singh
v2 changes:
* fix patch sequence
* add missing code in
"crypto/dpaa2_sec: rework debug code"

Changes related to crypto driver

Gagandeep Singh (1):
  crypto/dpaa2_sec: fix memory leak

Jun Yang (5):
  net/dpaa2: support FLC stashing API
  crypto/dpaa2_sec: enhance IPsec RFLC handling
  crypto/dpaa2_sec: enhance pdcp FLC handling
  crypto/dpaa2_sec: remove prefetch code in event mode
  crypto/dpaa2_sec: rework debug code

Varun Sethi (1):
  common/dpaax: caamflib: fix PDCP SNOW-ZUC wdog DECO err

 drivers/bus/fslmc/portal/dpaa2_hw_pvt.h |  44 +
 drivers/common/dpaax/caamflib/desc/pdcp.h   |  10 ++
 drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c | 168 
 drivers/net/dpaa2/dpaa2_ethdev.c|  25 +--
 4 files changed, 168 insertions(+), 79 deletions(-)

-- 
2.25.1



[v2 1/7] crypto/dpaa2_sec: fix memory leak

2024-08-06 Thread Gagandeep Singh
fixing memory leak while creating the PDCP session
with invalid data.

Fixes: bef594ec5cc8 ("crypto/dpaa2_sec: support PDCP offload")
Cc: akhil.go...@nxp.com
Cc: sta...@dpdk.org

Signed-off-by: Gagandeep Singh 
---
 drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c 
b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
index c1f7181d55..e0b8bacdb8 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
@@ -3420,6 +3420,7 @@ dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev,
}
} else {
DPAA2_SEC_ERR("Invalid crypto type");
+   rte_free(priv);
return -EINVAL;
}
 
-- 
2.25.1



[v2 2/7] common/dpaax: caamflib: fix PDCP SNOW-ZUC wdog DECO err

2024-08-06 Thread Gagandeep Singh
From: Varun Sethi 

Adding a Jump instruction with CALM flag to ensure
previous processing has been completed.

Fixes: 8827d94398f1 ("crypto/dpaa2_sec/hw: support AES-AES 18-bit PDCP")
Cc: vakul.g...@nxp.com
Cc: sta...@dpdk.org

Signed-off-by: Gagandeep Singh 
Signed-off-by: Varun Sethi 
Acked-by: Hemant Agrawal 
---
 drivers/common/dpaax/caamflib/desc/pdcp.h | 10 ++
 1 file changed, 10 insertions(+)

diff --git a/drivers/common/dpaax/caamflib/desc/pdcp.h 
b/drivers/common/dpaax/caamflib/desc/pdcp.h
index bc35114cf4..9ada3905c5 100644
--- a/drivers/common/dpaax/caamflib/desc/pdcp.h
+++ b/drivers/common/dpaax/caamflib/desc/pdcp.h
@@ -1220,6 +1220,11 @@ pdcp_insert_cplane_snow_aes_op(struct program *p,
SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1 | FLUSH1);
MOVEB(p, CONTEXT1, 0, MATH3, 0, 4, WAITCOMP | IMMED);
 
+   /* conditional jump with calm added to ensure that the
+* previous processing has been completed
+*/
+   JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CALM);
+
LOAD(p, CLRW_RESET_CLS1_CHA |
 CLRW_CLR_C1KEY |
 CLRW_CLR_C1CTX |
@@ -1921,6 +1926,11 @@ pdcp_insert_cplane_zuc_aes_op(struct program *p,
 
MOVEB(p, OFIFO, 0, MATH3, 0, 4, IMMED);
 
+   /* conditional jump with calm added to ensure that the
+* previous processing has been completed
+*/
+   JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CALM);
+
LOAD(p, CLRW_RESET_CLS1_CHA |
 CLRW_CLR_C1KEY |
 CLRW_CLR_C1CTX |
-- 
2.25.1



[v2 3/7] net/dpaa2: support FLC stashing API

2024-08-06 Thread Gagandeep Singh
From: Jun Yang 

Configure flow steering action with FLC enabled to align stashing
setting with RSS configuration.

Signed-off-by: Jun Yang 
Acked-by: Hemant Agrawal 
---
 drivers/bus/fslmc/portal/dpaa2_hw_pvt.h | 44 +
 drivers/net/dpaa2/dpaa2_ethdev.c| 25 +++---
 2 files changed, 58 insertions(+), 11 deletions(-)

diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h 
b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
index 169c7917ea..4c30e6db18 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
@@ -179,6 +179,7 @@ struct __rte_cache_aligned dpaa2_queue {
uint16_t resv;
uint64_t offloads;
uint64_t lpbk_cntx;
+   uint8_t data_stashing_off;
 };
 
 struct swp_active_dqs {
@@ -463,6 +464,49 @@ struct qbman_result *get_swp_active_dqs(uint16_t 
dpio_index)
return rte_global_active_dqs_list[dpio_index].global_active_dqs;
 }
 
+/* 00 00 00 - last 6 bit represent data, annotation,
+ * context stashing setting 01 01 00 (0x14)
+ * (in following order ->DS AS CS)
+ * to enable 1 line data, 1 line annotation.
+ * For LX2, this setting should be 01 00 00 (0x10)
+ */
+#define DPAA2_FLC_STASHING_MAX_BIT_SIZE 2
+#define DPAA2_FLC_STASHING_MAX_CACHE_LINE \
+   ((1ULL << DPAA2_FLC_STASHING_MAX_BIT_SIZE) - 1)
+
+enum dpaa2_flc_stashing_type {
+   DPAA2_FLC_CNTX_STASHING = 0,
+   DPAA2_FLC_ANNO_STASHING =
+   DPAA2_FLC_CNTX_STASHING + DPAA2_FLC_STASHING_MAX_BIT_SIZE,
+   DPAA2_FLC_DATA_STASHING =
+   DPAA2_FLC_ANNO_STASHING + DPAA2_FLC_STASHING_MAX_BIT_SIZE,
+   DPAA2_FLC_END_STASHING =
+   DPAA2_FLC_DATA_STASHING + DPAA2_FLC_STASHING_MAX_BIT_SIZE
+};
+
+#define DPAA2_STASHING_ALIGN_SIZE (1 << DPAA2_FLC_END_STASHING)
+
+static inline void
+dpaa2_flc_stashing_set(enum dpaa2_flc_stashing_type type,
+   uint8_t cache_line, uint64_t *flc)
+{
+   RTE_ASSERT(cache_line <= DPAA2_FLC_STASHING_MAX_CACHE_LINE);
+   RTE_ASSERT(type == DPAA2_FLC_CNTX_STASHING ||
+   type == DPAA2_FLC_ANNO_STASHING ||
+   type == DPAA2_FLC_DATA_STASHING);
+
+   (*flc) &= ~(DPAA2_FLC_STASHING_MAX_CACHE_LINE << type);
+   (*flc) |= (cache_line << type);
+}
+
+static inline void
+dpaa2_flc_stashing_clear_all(uint64_t *flc)
+{
+   dpaa2_flc_stashing_set(DPAA2_FLC_CNTX_STASHING, 0, flc);
+   dpaa2_flc_stashing_set(DPAA2_FLC_ANNO_STASHING, 0, flc);
+   dpaa2_flc_stashing_set(DPAA2_FLC_DATA_STASHING, 0, flc);
+}
+
 static inline
 void set_swp_active_dqs(uint16_t dpio_index, struct qbman_result *dqs)
 {
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.c b/drivers/net/dpaa2/dpaa2_ethdev.c
index 449bbda7ca..726bc0cf3e 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.c
+++ b/drivers/net/dpaa2/dpaa2_ethdev.c
@@ -786,17 +786,20 @@ dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev,
if ((dpaa2_svr_family & 0x) != SVR_LS2080A) {
options |= DPNI_QUEUE_OPT_FLC;
cfg.flc.stash_control = true;
-   cfg.flc.value &= 0xFFC0;
-   /* 00 00 00 - last 6 bit represent annotation, context stashing,
-* data stashing setting 01 01 00 (0x14)
-* (in following order ->DS AS CS)
-* to enable 1 line data, 1 line annotation.
-* For LX2, this setting should be 01 00 00 (0x10)
-*/
-   if ((dpaa2_svr_family & 0x) == SVR_LX2160A)
-   cfg.flc.value |= 0x10;
-   else
-   cfg.flc.value |= 0x14;
+   dpaa2_flc_stashing_clear_all(&cfg.flc.value);
+   if (getenv("DPAA2_DATA_STASHING_OFF")) {
+   dpaa2_flc_stashing_set(DPAA2_FLC_DATA_STASHING, 0,
+   &cfg.flc.value);
+   dpaa2_q->data_stashing_off = 1;
+   } else {
+   dpaa2_flc_stashing_set(DPAA2_FLC_DATA_STASHING, 1,
+   &cfg.flc.value);
+   dpaa2_q->data_stashing_off = 0;
+   }
+   if ((dpaa2_svr_family & 0x) != SVR_LX2160A) {
+   dpaa2_flc_stashing_set(DPAA2_FLC_ANNO_STASHING, 1,
+   &cfg.flc.value);
+   }
}
ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_RX,
 dpaa2_q->tc_index, flow_id, options, &cfg);
-- 
2.25.1



[v2 4/7] crypto/dpaa2_sec: enhance IPsec RFLC handling

2024-08-06 Thread Gagandeep Singh
From: Jun Yang 

Point Response FLC to FLC(SEC descriptor context).
Response FLC can be used for debug purpose in dequeue process.
Enable data stashing only. Annotation stashing is disabled which
is not suitable for SEC and impacts performance.

Signed-off-by: Jun Yang 
Acked-by: Hemant Agrawal 
---
 drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c | 44 -
 1 file changed, 26 insertions(+), 18 deletions(-)

diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c 
b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
index e0b8bacdb8..0e30192b30 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
@@ -2005,7 +2005,7 @@ dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, 
uint16_t qp_id,
struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
struct dpseci_rx_queue_cfg cfg;
int32_t retcode;
-   char str[30];
+   char str[RTE_MEMZONE_NAMESIZE];
 
PMD_INIT_FUNC_TRACE();
 
@@ -2065,8 +2065,7 @@ dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, 
uint16_t qp_id,
return -ENOMEM;
}
 
-   cfg.options = cfg.options | DPSECI_QUEUE_OPT_USER_CTX;
-   cfg.user_ctx = (size_t)(&qp->rx_vq);
+   cfg.dest_cfg.dest_type = DPSECI_DEST_NONE;
retcode = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
  qp_id, &cfg);
return retcode;
@@ -3060,14 +3059,19 @@ dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
struct alginfo authdata, cipherdata;
int bufsize;
struct sec_flow_context *flc;
+   uint64_t flc_iova;
int ret = -1;
 
PMD_INIT_FUNC_TRACE();
 
-   priv = (struct ctxt_priv *)rte_zmalloc(NULL,
-   sizeof(struct ctxt_priv) +
-   sizeof(struct sec_flc_desc),
-   RTE_CACHE_LINE_SIZE);
+   RTE_SET_USED(dev);
+
+   /** Make FLC address to align with stashing, low 6 bits are used
+* control stashing.
+*/
+   priv = rte_zmalloc(NULL, sizeof(struct ctxt_priv) +
+   sizeof(struct sec_flc_desc),
+   DPAA2_STASHING_ALIGN_SIZE);
 
if (priv == NULL) {
DPAA2_SEC_ERR("No memory for priv CTXT");
@@ -3077,10 +3081,12 @@ dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
flc = &priv->flc_desc[0].flc;
 
if (ipsec_xform->life.bytes_hard_limit != 0 ||
-   ipsec_xform->life.bytes_soft_limit != 0 ||
-   ipsec_xform->life.packets_hard_limit != 0 ||
-   ipsec_xform->life.packets_soft_limit != 0)
+   ipsec_xform->life.bytes_soft_limit != 0 ||
+   ipsec_xform->life.packets_hard_limit != 0 ||
+   ipsec_xform->life.packets_soft_limit != 0) {
+   rte_free(priv);
return -ENOTSUP;
+   }
 
memset(session, 0, sizeof(dpaa2_sec_session));
 
@@ -3330,24 +3336,26 @@ dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
1, 0, (rta_sec_era >= RTA_SEC_ERA_10) ?
SHR_WAIT : SHR_SERIAL,
&decap_pdb, &cipherdata, &authdata);
-   } else
+   } else {
+   ret = -EINVAL;
goto out;
+   }
 
if (bufsize < 0) {
+   ret = -EINVAL;
DPAA2_SEC_ERR("Crypto: Invalid SEC-DESC buffer length");
goto out;
}
 
flc->word1_sdl = (uint8_t)bufsize;
 
-   /* Enable the stashing control bit */
+   flc_iova = DPAA2_VADDR_TO_IOVA(flc);
+   /* Enable the stashing control bit and data stashing only.*/
DPAA2_SET_FLC_RSC(flc);
-   flc->word2_rflc_31_0 = lower_32_bits(
-   (size_t)&(((struct dpaa2_sec_qp *)
-   dev->data->queue_pairs[0])->rx_vq) | 0x14);
-   flc->word3_rflc_63_32 = upper_32_bits(
-   (size_t)&(((struct dpaa2_sec_qp *)
-   dev->data->queue_pairs[0])->rx_vq));
+   dpaa2_flc_stashing_set(DPAA2_FLC_DATA_STASHING, 1,
+   &flc_iova);
+   flc->word2_rflc_31_0 = lower_32_bits(flc_iova);
+   flc->word3_rflc_63_32 = upper_32_bits(flc_iova);
 
/* Set EWS bit i.e. enable write-safe */
DPAA2_SET_FLC_EWS(flc);
-- 
2.25.1



[v2 6/7] crypto/dpaa2_sec: remove prefetch code in event mode

2024-08-06 Thread Gagandeep Singh
From: Jun Yang 

Should not prefetch mbuf and crypto_op which are not touched
by hardware.

Signed-off-by: Jun Yang 
---
 drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c | 20 +---
 1 file changed, 1 insertion(+), 19 deletions(-)

diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c 
b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
index ff24a8919a..1e28c71b53 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
@@ -3991,12 +3991,6 @@ dpaa2_sec_process_parallel_event(struct qbman_swp *swp,
 struct rte_event *ev)
 {
struct dpaa2_sec_qp *qp;
-   /* Prefetching mbuf */
-   rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-
-   rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size));
-
-   /* Prefetching ipsec crypto_op stored in priv data of mbuf */
-   rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64));
 
qp = container_of(rxq, struct dpaa2_sec_qp, rx_vq);
ev->flow_id = rxq->ev.flow_id;
@@ -4010,6 +4004,7 @@ dpaa2_sec_process_parallel_event(struct qbman_swp *swp,
 
qbman_swp_dqrr_consume(swp, dq);
 }
+
 static void
 dpaa2_sec_process_atomic_event(struct qbman_swp *swp __rte_unused,
 const struct qbman_fd *fd,
@@ -4020,12 +4015,6 @@ dpaa2_sec_process_atomic_event(struct qbman_swp *swp 
__rte_unused,
uint8_t dqrr_index;
struct dpaa2_sec_qp *qp;
struct rte_crypto_op *crypto_op;
-   /* Prefetching mbuf */
-   rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-
-   rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size));
-
-   /* Prefetching ipsec crypto_op stored in priv data of mbuf */
-   rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64));
 
qp = container_of(rxq, struct dpaa2_sec_qp, rx_vq);
ev->flow_id = rxq->ev.flow_id;
@@ -4055,13 +4044,6 @@ dpaa2_sec_process_ordered_event(struct qbman_swp *swp,
struct rte_crypto_op *crypto_op;
struct dpaa2_sec_qp *qp;
 
-   /* Prefetching mbuf */
-   rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-
-   rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size));
-
-   /* Prefetching ipsec crypto_op stored in priv data of mbuf */
-   rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64));
-
qp = container_of(rxq, struct dpaa2_sec_qp, rx_vq);
ev->flow_id = rxq->ev.flow_id;
ev->sub_event_type = rxq->ev.sub_event_type;
-- 
2.25.1



[v2 5/7] crypto/dpaa2_sec: enhance pdcp FLC handling

2024-08-06 Thread Gagandeep Singh
From: Jun Yang 

Set RFLC with FLC IOVA address and data stashing only.

Signed-off-by: Jun Yang 
Acked-by: Hemant Agrawal 
---
 drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c | 16 +---
 1 file changed, 9 insertions(+), 7 deletions(-)

diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c 
b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
index 0e30192b30..ff24a8919a 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
@@ -3389,6 +3389,7 @@ dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev,
struct alginfo *p_authdata = NULL;
int bufsize = -1;
struct sec_flow_context *flc;
+   uint64_t flc_iova;
 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
int swap = true;
 #else
@@ -3397,6 +3398,8 @@ dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev,
 
PMD_INIT_FUNC_TRACE();
 
+   RTE_SET_USED(dev);
+
memset(session, 0, sizeof(dpaa2_sec_session));
 
priv = (struct ctxt_priv *)rte_zmalloc(NULL,
@@ -3646,14 +3649,13 @@ dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev,
goto out;
}
 
-   /* Enable the stashing control bit */
+   flc_iova = DPAA2_VADDR_TO_IOVA(flc);
+   /* Enable the stashing control bit and data stashing only.*/
DPAA2_SET_FLC_RSC(flc);
-   flc->word2_rflc_31_0 = lower_32_bits(
-   (size_t)&(((struct dpaa2_sec_qp *)
-   dev->data->queue_pairs[0])->rx_vq) | 0x14);
-   flc->word3_rflc_63_32 = upper_32_bits(
-   (size_t)&(((struct dpaa2_sec_qp *)
-   dev->data->queue_pairs[0])->rx_vq));
+   dpaa2_flc_stashing_set(DPAA2_FLC_DATA_STASHING, 1,
+   &flc_iova);
+   flc->word2_rflc_31_0 = lower_32_bits(flc_iova);
+   flc->word3_rflc_63_32 = upper_32_bits(flc_iova);
 
flc->word1_sdl = (uint8_t)bufsize;
 
-- 
2.25.1



[v2 7/7] crypto/dpaa2_sec: rework debug code

2024-08-06 Thread Gagandeep Singh
From: Jun Yang 

Output debug information according to various modes.

Signed-off-by: Jun Yang 
---
 drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c | 87 +++--
 1 file changed, 63 insertions(+), 24 deletions(-)

diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c 
b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
index 1e28c71b53..a293a21881 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
@@ -1095,7 +1095,7 @@ build_auth_fd(dpaa2_sec_session *sess, struct 
rte_crypto_op *op,
 
 static int
 build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
-   struct qbman_fd *fd, __rte_unused uint16_t bpid)
+   struct qbman_fd *fd, uint16_t bpid)
 {
struct rte_crypto_sym_op *sym_op = op->sym;
struct qbman_fle *ip_fle, *op_fle, *sge, *fle;
@@ -1105,6 +1105,10 @@ build_cipher_sg_fd(dpaa2_sec_session *sess, struct 
rte_crypto_op *op,
struct rte_mbuf *mbuf;
uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
sess->iv.offset);
+#if (RTE_LOG_DEBUG <= RTE_LOG_DP_LEVEL)
+   char debug_str[1024];
+   int offset;
+#endif
 
data_len = sym_op->cipher.data.length;
data_offset = sym_op->cipher.data.offset;
@@ -1210,14 +1214,26 @@ build_cipher_sg_fd(dpaa2_sec_session *sess, struct 
rte_crypto_op *op,
DPAA2_SET_FD_COMPOUND_FMT(fd);
DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
 
-   DPAA2_SEC_DP_DEBUG(
-   "CIPHER SG: fdaddr =%" PRIx64 " bpid =%d meta =%d"
-   " off =%d, len =%d\n",
+#if (RTE_LOG_DEBUG <= RTE_LOG_DP_LEVEL)
+   offset = sprintf(debug_str,
+   "CIPHER SG: fdaddr =%" PRIx64 ", from %s pool ",
DPAA2_GET_FD_ADDR(fd),
-   DPAA2_GET_FD_BPID(fd),
-   rte_dpaa2_bpid_info[bpid].meta_data_size,
-   DPAA2_GET_FD_OFFSET(fd),
-   DPAA2_GET_FD_LEN(fd));
+   bpid < MAX_BPID ? "SW" : "BMAN");
+   if (bpid < MAX_BPID) {
+   offset += sprintf(&debug_str[offset],
+   "bpid = %d ", bpid);
+   }
+   offset += sprintf(&debug_str[offset],
+   "private size = %d ",
+   mbuf->pool->private_data_size);
+   offset += sprintf(&debug_str[offset],
+   "off =%d, len =%d\n",
+   DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_LEN(fd));
+   DPAA2_SEC_DP_DEBUG("%s", debug_str);
+#else
+   RTE_SET_USED(bpid);
+#endif
+
return 0;
 }
 
@@ -1233,6 +1249,10 @@ build_cipher_fd(dpaa2_sec_session *sess, struct 
rte_crypto_op *op,
uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
sess->iv.offset);
struct rte_mbuf *dst;
+#if (RTE_LOG_DEBUG <= RTE_LOG_DP_LEVEL)
+   char debug_str[1024];
+   int offset;
+#endif
 
data_len = sym_op->cipher.data.length;
data_offset = sym_op->cipher.data.offset;
@@ -1324,14 +1344,23 @@ build_cipher_fd(dpaa2_sec_session *sess, struct 
rte_crypto_op *op,
DPAA2_SET_FLE_FIN(sge);
DPAA2_SET_FLE_FIN(fle);
 
-   DPAA2_SEC_DP_DEBUG(
-   "CIPHER: fdaddr =%" PRIx64 " bpid =%d meta =%d"
-   " off =%d, len =%d\n",
+#if (RTE_LOG_DEBUG <= RTE_LOG_DP_LEVEL)
+   offset = sprintf(debug_str,
+   "CIPHER: fdaddr =%" PRIx64 ", from %s pool ",
DPAA2_GET_FD_ADDR(fd),
-   DPAA2_GET_FD_BPID(fd),
-   rte_dpaa2_bpid_info[bpid].meta_data_size,
-   DPAA2_GET_FD_OFFSET(fd),
-   DPAA2_GET_FD_LEN(fd));
+   bpid < MAX_BPID ? "SW" : "BMAN");
+   if (bpid < MAX_BPID) {
+   offset += sprintf(&debug_str[offset],
+   "bpid = %d ", bpid);
+   }
+   offset += sprintf(&debug_str[offset],
+   "private size = %d ",
+   dst->pool->private_data_size);
+   offset += sprintf(&debug_str[offset],
+   "off =%d, len =%d\n",
+   DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_LEN(fd));
+   DPAA2_SEC_DP_DEBUG("%s", debug_str);
+#endif
 
return 0;
 }
@@ -1564,6 +1593,10 @@ sec_fd_to_mbuf(const struct qbman_fd *fd, struct 
dpaa2_sec_qp *qp)
struct qbman_fle *fle;
struct rte_crypto_op *op;
struct rte_mbuf *dst, *src;
+#if (RTE_LOG_DEBUG <= RTE_LOG_DP_LEVEL)
+   char debug_str[1024];
+   int offset;
+#endif
 
if (DPAA2_FD_GET_FORMAT(fd) == qbman_fd_single)
return sec_simple_fd_to_mbuf(fd);
@@ -1602,15 +1635,21 @@ sec_fd_to_mbuf(const struct qbman_fd *fd, struct 
dpaa2_sec_qp *qp)
dst->data_len = len;
}
 
-   DPAA2_SEC_DP_DEBUG("mbuf %p BMAN buf addr %p,"
-   " fdaddr =%" PRIx64 " bpid =%d meta =%d off =%d, len =%d\n",
-   (void *)dst,
-   dst->buf_addr,
-   DPAA2_GET_FD_ADDR(fd),
-   

[DPDK/meson Bug 1515] Meson install replaces api doc index.html

2024-08-06 Thread bugzilla
https://bugs.dpdk.org/show_bug.cgi?id=1515

Bug ID: 1515
   Summary: Meson install replaces api doc index.html
   Product: DPDK
   Version: 24.07
  Hardware: All
OS: All
Status: UNCONFIRMED
  Severity: normal
  Priority: Normal
 Component: meson
  Assignee: dev@dpdk.org
  Reporter: juraj.lin...@pantheon.tech
  Target Milestone: ---

When running meson install such as:
meson setup ci_docs -Denable_docs=true -Dexamples=all -Dplatform=generic
-Ddefault_library=shared -Dbuildtype=debug -Dcheck_includes=true
-Denable_deprecated_libs=* -Dlibdir=lib -Dwerror=true
DESTDIR=/home/jlinkes/workspace/dpdk/build-install meson install -C ci_docs

The index.html file from API docs get overwritten by the guides index.html:
Installing /home/jlinkes/workspace/dpdk/dpdk/ci_docs/doc/api/html/index.html to
/home/jlinkes/workspace/dpdk/build-install/usr/local/share/doc/dpdk/html
...
Installing /home/jlinkes/workspace/dpdk/dpdk/ci_docs/doc/guides/html/index.html
to /home/jlinkes/workspace/dpdk/build-install/usr/local/share/doc/dpdk/html

The resulting installed API docs are thus missing their index.html page.

-- 
You are receiving this mail because:
You are the assignee for the bug.

[PATCH v14 0/6] API docs generation

2024-08-06 Thread Juraj Linkeš
The generation is done with Sphinx, which DPDK already uses, with
slightly modified configuration of the sidebar present in an if block.

DTS dependencies do not need to be installed, but there is the option to
install doc build dependencies with Poetry:
poetry install --with docs

The build itself may be run with:
meson setup  -Denable_docs=true
ninja -C 

The above will do a full DPDK build with docs. To build just docs:
meson setup 
ninja -C  dts-doc

Python3.10 is required to build the DTS API docs.

The patchset contains the .rst sources which Sphinx uses to generate the
html pages. These were first generated with the sphinx-apidoc utility
and modified to provide a better look. The documentation just doesn't
look that good without the modifications and there isn't enough
configuration options to achieve that without manual changes to the .rst
files. This introduces extra maintenance which involves adding new .rst
files when a new Python module is added or changing the .rst structure
if the Python directory/file structure is changed (moved, renamed
files). This small maintenance burden is outweighed by the flexibility
afforded by the ability to make manual changes to the .rst files.

v10:
Fix dts doc generation issue: Only copy the custom rss file if it exists.

v11:
Added the config option autodoc_mock_imports, which eliminates the need
for DTS dependencies. Added a script that find out which imports need to
be added to autodoc_mock_imports. The script also check the required
Python version for building DTS docs.
Removed tags from the two affected patches which will need to be
reviewed again.

v12:
Added paramiko to the required dependencies of get-dts-deps.py.

v13:
Fixed build error:
TypeError: unsupported operand type(s) for |: 'NoneType' and 'Transport'

v14:
Fixed install error:
ERROR: File 'dts/doc/html' could not be found
This required me to put the built docs into dts/doc which is outside the
DPDK API doc dir, resulting in linking between DPDK and DTS api docs not
working properly. I addressed this by adding a symlink to the build dir.
This way the link works after installing the docs and the symlink is
just one extra file in the build dir.

Juraj Linkeš (6):
  dts: update params and parser docstrings
  dts: replace the or operator in third party types
  dts: add doc generation dependencies
  dts: add API doc sources
  doc: meson doc API build dir variable
  dts: add API doc generation

 buildtools/call-sphinx-build.py   |  10 +-
 buildtools/get-dts-deps.py|  78 +++
 buildtools/meson.build|   1 +
 doc/api/doxy-api-index.md |   3 +
 doc/api/doxy-api.conf.in  |   2 +
 doc/api/meson.build   |   8 +-
 doc/guides/conf.py|  41 +-
 doc/guides/contributing/documentation.rst |   2 +
 doc/guides/contributing/patches.rst   |   4 +
 doc/guides/meson.build|   1 +
 doc/guides/tools/dts.rst  |  39 +-
 dts/doc/conf_yaml_schema.json |   1 +
 dts/doc/framework.config.rst  |  12 +
 dts/doc/framework.config.types.rst|   6 +
 dts/doc/framework.exception.rst   |   6 +
 dts/doc/framework.logger.rst  |   6 +
 dts/doc/framework.params.eal.rst  |   6 +
 dts/doc/framework.params.rst  |  14 +
 dts/doc/framework.params.testpmd.rst  |   6 +
 dts/doc/framework.params.types.rst|   6 +
 dts/doc/framework.parser.rst  |   6 +
 .../framework.remote_session.dpdk_shell.rst   |   6 +
 ...ote_session.interactive_remote_session.rst |   6 +
 ...ework.remote_session.interactive_shell.rst |   6 +
 .../framework.remote_session.python_shell.rst |   6 +
 ...ramework.remote_session.remote_session.rst |   6 +
 dts/doc/framework.remote_session.rst  |  18 +
 .../framework.remote_session.ssh_session.rst  |   6 +
 ...framework.remote_session.testpmd_shell.rst |   6 +
 dts/doc/framework.runner.rst  |   6 +
 dts/doc/framework.settings.rst|   6 +
 dts/doc/framework.test_result.rst |   6 +
 dts/doc/framework.test_suite.rst  |   6 +
 dts/doc/framework.testbed_model.cpu.rst   |   6 +
 .../framework.testbed_model.linux_session.rst |   6 +
 dts/doc/framework.testbed_model.node.rst  |   6 +
 .../framework.testbed_model.os_session.rst|   6 +
 dts/doc/framework.testbed_model.port.rst  |   6 +
 .../framework.testbed_model.posix_session.rst |   6 +
 dts/doc/framework.testbed_model.rst   |  26 +
 dts/doc/framework.testbed_model.sut_node.rst  |   6 +
 dts/doc/framework.testbed_model.tg_node.rst   |   6 +
 ..._generator.capturing_traffic_generator.rst |   6 +
 ...mework.testbed_model.traffic_generator.rst |  14 +
 testbed_model.traffic_generator.scapy.rst |   6 +
 ...el.traffic_generator.traffic_generator.rst |   6 +
 ...framework.testbe

[PATCH v14 1/6] dts: update params and parser docstrings

2024-08-06 Thread Juraj Linkeš
Address a few errors reported by Sphinx when generating documentation:
framework/params/__init__.py:docstring of framework.params.modify_str:3:
WARNING: Inline interpreted text or phrase reference start-string
without end-string.
framework/params/eal.py:docstring of framework.params.eal.EalParams:35:
WARNING: Definition list ends without a blank line; unexpected
unindent.
framework/params/types.py:docstring of framework.params.types:8:
WARNING: Inline strong start-string without end-string.
framework/params/types.py:docstring of framework.params.types:9:
WARNING: Inline strong start-string without end-string.
framework/parser.py:docstring of framework.parser.TextParser:33: ERROR:
Unexpected indentation.
framework/parser.py:docstring of framework.parser.TextParser:43: ERROR:
Unexpected indentation.
framework/parser.py:docstring of framework.parser.TextParser:49: ERROR:
Unexpected indentation.
framework/parser.py:docstring of framework.parser.TextParser.wrap:8:
ERROR: Unexpected indentation.
framework/parser.py:docstring of framework.parser.TextParser.wrap:9:
WARNING: Block quote ends without a blank line; unexpected unindent.

Fixes: 87ba4cdc0dbb ("dts: use Unpack for type checking and hinting")
Fixes: d70159cb62f5 ("dts: add params manipulation module")
Fixes: 967fc62b0a43 ("dts: refactor EAL parameters class")
Fixes: 818fe14e3422 ("dts: add parsing utility module")
Cc: luca.vizza...@arm.com

Signed-off-by: Juraj Linkeš 
Reviewed-by: Luca Vizzarro 
---
 dts/framework/params/__init__.py | 4 ++--
 dts/framework/params/eal.py  | 7 +--
 dts/framework/params/types.py| 3 ++-
 dts/framework/parser.py  | 4 ++--
 4 files changed, 11 insertions(+), 7 deletions(-)

diff --git a/dts/framework/params/__init__.py b/dts/framework/params/__init__.py
index 5a6fd93053..1ae227d7b4 100644
--- a/dts/framework/params/__init__.py
+++ b/dts/framework/params/__init__.py
@@ -53,9 +53,9 @@ def reduced_fn(value):
 
 
 def modify_str(*funcs: FnPtr) -> Callable[[T], T]:
-"""Class decorator modifying the ``__str__`` method with a function 
created from its arguments.
+r"""Class decorator modifying the ``__str__`` method with a function 
created from its arguments.
 
-The :attr:`FnPtr`s fed to the decorator are executed from left to right in 
the arguments list
+The :attr:`FnPtr`\s fed to the decorator are executed from left to right 
in the arguments list
 order.
 
 Args:
diff --git a/dts/framework/params/eal.py b/dts/framework/params/eal.py
index 8d7766fefc..cf1594353a 100644
--- a/dts/framework/params/eal.py
+++ b/dts/framework/params/eal.py
@@ -26,13 +26,16 @@ class EalParams(Params):
 prefix: Set the file prefix string with which to start DPDK, e.g.: 
``prefix="vf"``.
 no_pci: Switch to disable PCI bus, e.g.: ``no_pci=True``.
 vdevs: Virtual devices, e.g.::
+
 vdevs=[
 VirtualDevice('net_ring0'),
 VirtualDevice('net_ring1')
 ]
+
 ports: The list of ports to allow.
-other_eal_param: user defined DPDK EAL parameters, e.g.:
-``other_eal_param='--single-file-segments'``
+other_eal_param: user defined DPDK EAL parameters, e.g.::
+
+``other_eal_param='--single-file-segments'``
 """
 
 lcore_list: LogicalCoreList | None = field(default=None, 
metadata=Params.short("l"))
diff --git a/dts/framework/params/types.py b/dts/framework/params/types.py
index e668f658d8..d77c4625fb 100644
--- a/dts/framework/params/types.py
+++ b/dts/framework/params/types.py
@@ -6,7 +6,8 @@
 TypedDicts can be used in conjunction with Unpack and kwargs for type hinting 
on function calls.
 
 Example:
-..code:: python
+.. code:: python
+
 def create_testpmd(**kwargs: Unpack[TestPmdParamsDict]):
 params = TestPmdParams(**kwargs)
 """
diff --git a/dts/framework/parser.py b/dts/framework/parser.py
index 741dfff821..7254c75b71 100644
--- a/dts/framework/parser.py
+++ b/dts/framework/parser.py
@@ -46,7 +46,7 @@ class TextParser(ABC):
 Example:
 The following example makes use of and demonstrates every parser 
function available:
 
-..code:: python
+.. code:: python
 
 from dataclasses import dataclass, field
 from enum import Enum
@@ -90,7 +90,7 @@ def wrap(parser_fn: ParserFn, wrapper_fn: Callable) -> 
ParserFn:
 """Makes a wrapped parser function.
 
 `parser_fn` is called and if a non-None value is returned, 
`wrapper_function` is called with
-it. Otherwise the function returns early with None. In pseudo-code:
+it. Otherwise the function returns early with None. In pseudo-code::
 
 intermediate_value := parser_fn(input)
 if intermediary_value is None then
-- 
2.34.1



[PATCH v14 2/6] dts: replace the or operator in third party types

2024-08-06 Thread Juraj Linkeš
When the DTS dependencies are not installed when building DTS API
documentation, the or operator produces errors when used with types from
those libraries:
autodoc: failed to import module 'remote_session' from module
'framework'; the following exception was raised:
Traceback (most recent call last):
...
TypeError: unsupported operand type(s) for |: 'Transport' and 'NoneType'

The third part type here is Transport from the paramiko library.

Signed-off-by: Juraj Linkeš 
---
 dts/framework/remote_session/interactive_remote_session.py | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/dts/framework/remote_session/interactive_remote_session.py 
b/dts/framework/remote_session/interactive_remote_session.py
index 97194e6af8..4605ee14b4 100644
--- a/dts/framework/remote_session/interactive_remote_session.py
+++ b/dts/framework/remote_session/interactive_remote_session.py
@@ -5,6 +5,7 @@
 
 import socket
 import traceback
+from typing import Union
 
 from paramiko import AutoAddPolicy, SSHClient, Transport  # type: 
ignore[import-untyped]
 from paramiko.ssh_exception import (  # type: ignore[import-untyped]
@@ -52,7 +53,7 @@ class InteractiveRemoteSession:
 session: SSHClient
 _logger: DTSLogger
 _node_config: NodeConfiguration
-_transport: Transport | None
+_transport: Union[Transport, None]
 
 def __init__(self, node_config: NodeConfiguration, logger: DTSLogger) -> 
None:
 """Connect to the node during initialization.
-- 
2.34.1



[PATCH v14 3/6] dts: add doc generation dependencies

2024-08-06 Thread Juraj Linkeš
Sphinx imports every Python module (through the autodoc extension)
when generating documentation from docstrings, meaning all DTS
dependencies, including Python version, should be satisfied. This is not
a hard requirement, as imports from dependencies may be mocked in the
autodoc_mock_imports autodoc option.
In case DTS developers want to use a Sphinx installation from their
virtualenv, we provide an optional Poetry group for doc generation. The
pyelftools package is there so that meson picks up the correct Python
installation, as pyelftools is required by the build system.

Signed-off-by: Juraj Linkeš 
---
 dts/poetry.lock| 521 +++--
 dts/pyproject.toml |   8 +
 2 files changed, 517 insertions(+), 12 deletions(-)

diff --git a/dts/poetry.lock b/dts/poetry.lock
index 5f8fa03933..2dd8bad498 100644
--- a/dts/poetry.lock
+++ b/dts/poetry.lock
@@ -1,5 +1,16 @@
 # This file is automatically @generated by Poetry 1.8.2 and should not be 
changed by hand.
 
+[[package]]
+name = "alabaster"
+version = "0.7.13"
+description = "A configurable sidebar-enabled Sphinx theme"
+optional = false
+python-versions = ">=3.6"
+files = [
+{file = "alabaster-0.7.13-py3-none-any.whl", hash = 
"sha256:1ee19aca801bbabb5ba3f5f258e4422dfa86f82f3e9cefb0859b283cdd7f62a3"},
+{file = "alabaster-0.7.13.tar.gz", hash = 
"sha256:a27a4a084d5e690e16e01e03ad2b2e552c61a65469419b907243193de1a84ae2"},
+]
+
 [[package]]
 name = "attrs"
 version = "23.1.0"
@@ -18,6 +29,23 @@ docs = ["furo", "myst-parser", "sphinx", 
"sphinx-notfound-page", "sphinxcontrib-
 tests = ["attrs[tests-no-zope]", "zope-interface"]
 tests-no-zope = ["cloudpickle", "hypothesis", "mypy (>=1.1.1)", "pympler", 
"pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"]
 
+[[package]]
+name = "babel"
+version = "2.13.1"
+description = "Internationalization utilities"
+optional = false
+python-versions = ">=3.7"
+files = [
+{file = "Babel-2.13.1-py3-none-any.whl", hash = 
"sha256:7077a4984b02b6727ac10f1f7294484f737443d7e2e66c5e4380e41a3ae0b4ed"},
+{file = "Babel-2.13.1.tar.gz", hash = 
"sha256:33e0952d7dd6374af8dbf6768cc4ddf3ccfefc244f9986d4074704f2fbd18900"},
+]
+
+[package.dependencies]
+setuptools = {version = "*", markers = "python_version >= \"3.12\""}
+
+[package.extras]
+dev = ["freezegun (>=1.0,<2.0)", "pytest (>=6.0)", "pytest-cov"]
+
 [[package]]
 name = "bcrypt"
 version = "4.0.1"
@@ -86,6 +114,17 @@ d = ["aiohttp (>=3.7.4)"]
 jupyter = ["ipython (>=7.8.0)", "tokenize-rt (>=3.2.0)"]
 uvloop = ["uvloop (>=0.15.2)"]
 
+[[package]]
+name = "certifi"
+version = "2023.7.22"
+description = "Python package for providing Mozilla's CA Bundle."
+optional = false
+python-versions = ">=3.6"
+files = [
+{file = "certifi-2023.7.22-py3-none-any.whl", hash = 
"sha256:92d6037539857d8206b8f6ae472e8b77db8058fec5937a1ef3f54304089edbb9"},
+{file = "certifi-2023.7.22.tar.gz", hash = 
"sha256:539cc1d13202e33ca466e88b2807e29f4c13049d6d87031a3c110744495cb082"},
+]
+
 [[package]]
 name = "cffi"
 version = "1.15.1"
@@ -162,6 +201,105 @@ files = [
 [package.dependencies]
 pycparser = "*"
 
+[[package]]
+name = "charset-normalizer"
+version = "3.3.2"
+description = "The Real First Universal Charset Detector. Open, modern and 
actively maintained alternative to Chardet."
+optional = false
+python-versions = ">=3.7.0"
+files = [
+{file = "charset-normalizer-3.3.2.tar.gz", hash = 
"sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5"},
+{file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_universal2.whl", 
hash = 
"sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3"},
+{file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_x86_64.whl", 
hash = 
"sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027"},
+{file = "charset_normalizer-3.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash 
= "sha256:9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03"},
+{file = 
"charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl",
 hash = 
"sha256:6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d"},
+{file = 
"charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl",
 hash = 
"sha256:1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e"},
+{file = 
"charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl",
 hash = 
"sha256:cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6"},
+{file = 
"charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl",
 hash = 
"sha256:8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5"},
+{file = 
"charset_normalizer-3.3.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl",
 hash = 
"sha256:a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537"},
+{file = "charset_normalizer-3.3.2-cp310-cp

[PATCH v14 5/6] doc: meson doc API build dir variable

2024-08-06 Thread Juraj Linkeš
The three instances of the path 'dpdk_build_root/doc/api' are replaced
with a variable, moving the definition to one place.

Signed-off-by: Juraj Linkeš 
Reviewed-by: Luca Vizzarro 
Reviewed-by: Jeremy Spewock 
Acked-by: Bruce Richardson 
Tested-by: Luca Vizzarro 
Tested-by: Nicholas Pratte 
---
 doc/api/meson.build | 7 ---
 1 file changed, 4 insertions(+), 3 deletions(-)

diff --git a/doc/api/meson.build b/doc/api/meson.build
index 5b50692df9..b828b1ed66 100644
--- a/doc/api/meson.build
+++ b/doc/api/meson.build
@@ -1,6 +1,7 @@
 # SPDX-License-Identifier: BSD-3-Clause
 # Copyright(c) 2018 Luca Boccassi 
 
+doc_api_build_dir = meson.current_build_dir()
 doxygen = find_program('doxygen', required: get_option('enable_docs'))
 
 if not doxygen.found()
@@ -32,10 +33,10 @@ example = custom_target('examples.dox',
 # set up common Doxygen configuration
 cdata = configuration_data()
 cdata.set('VERSION', meson.project_version())
-cdata.set('API_EXAMPLES', join_paths(dpdk_build_root, 'doc', 'api', 
'examples.dox'))
-cdata.set('OUTPUT', join_paths(dpdk_build_root, 'doc', 'api'))
+cdata.set('API_EXAMPLES', join_paths(doc_api_build_dir, 'examples.dox'))
+cdata.set('OUTPUT', doc_api_build_dir)
 cdata.set('TOPDIR', dpdk_source_root)
-cdata.set('STRIP_FROM_PATH', ' '.join([dpdk_source_root, 
join_paths(dpdk_build_root, 'doc', 'api')]))
+cdata.set('STRIP_FROM_PATH', ' '.join([dpdk_source_root, doc_api_build_dir]))
 cdata.set('WARN_AS_ERROR', 'NO')
 if get_option('werror')
 cdata.set('WARN_AS_ERROR', 'YES')
-- 
2.34.1



[PATCH v14 4/6] dts: add API doc sources

2024-08-06 Thread Juraj Linkeš
These sources could be generated with the sphinx-apidoc utility, but
that doesn't give us enough flexibility, such as sorting the order of
modules or changing the headers of the modules.

The sources included in this patch were in fact generated by said
utility, but modified to improve the look of the documentation. The
improvements are mainly in toctree definitions and the titles of the
modules/packages. These were made with specific Sphinx config options in
mind.

Signed-off-by: Juraj Linkeš 
Reviewed-by: Luca Vizzarro 
Reviewed-by: Jeremy Spewock 
Tested-by: Luca Vizzarro 
---
 dts/doc/conf_yaml_schema.json |  1 +
 dts/doc/framework.config.rst  | 12 ++
 dts/doc/framework.config.types.rst|  6 +++
 dts/doc/framework.exception.rst   |  6 +++
 dts/doc/framework.logger.rst  |  6 +++
 dts/doc/framework.params.eal.rst  |  6 +++
 dts/doc/framework.params.rst  | 14 ++
 dts/doc/framework.params.testpmd.rst  |  6 +++
 dts/doc/framework.params.types.rst|  6 +++
 dts/doc/framework.parser.rst  |  6 +++
 .../framework.remote_session.dpdk_shell.rst   |  6 +++
 ...ote_session.interactive_remote_session.rst |  6 +++
 ...ework.remote_session.interactive_shell.rst |  6 +++
 .../framework.remote_session.python_shell.rst |  6 +++
 ...ramework.remote_session.remote_session.rst |  6 +++
 dts/doc/framework.remote_session.rst  | 18 
 .../framework.remote_session.ssh_session.rst  |  6 +++
 ...framework.remote_session.testpmd_shell.rst |  6 +++
 dts/doc/framework.runner.rst  |  6 +++
 dts/doc/framework.settings.rst|  6 +++
 dts/doc/framework.test_result.rst |  6 +++
 dts/doc/framework.test_suite.rst  |  6 +++
 dts/doc/framework.testbed_model.cpu.rst   |  6 +++
 .../framework.testbed_model.linux_session.rst |  6 +++
 dts/doc/framework.testbed_model.node.rst  |  6 +++
 .../framework.testbed_model.os_session.rst|  6 +++
 dts/doc/framework.testbed_model.port.rst  |  6 +++
 .../framework.testbed_model.posix_session.rst |  6 +++
 dts/doc/framework.testbed_model.rst   | 26 +++
 dts/doc/framework.testbed_model.sut_node.rst  |  6 +++
 dts/doc/framework.testbed_model.tg_node.rst   |  6 +++
 ..._generator.capturing_traffic_generator.rst |  6 +++
 ...mework.testbed_model.traffic_generator.rst | 14 ++
 testbed_model.traffic_generator.scapy.rst |  6 +++
 ...el.traffic_generator.traffic_generator.rst |  6 +++
 ...framework.testbed_model.virtual_device.rst |  6 +++
 dts/doc/framework.utils.rst   |  6 +++
 dts/doc/index.rst | 43 +++
 38 files changed, 314 insertions(+)
 create mode 12 dts/doc/conf_yaml_schema.json
 create mode 100644 dts/doc/framework.config.rst
 create mode 100644 dts/doc/framework.config.types.rst
 create mode 100644 dts/doc/framework.exception.rst
 create mode 100644 dts/doc/framework.logger.rst
 create mode 100644 dts/doc/framework.params.eal.rst
 create mode 100644 dts/doc/framework.params.rst
 create mode 100644 dts/doc/framework.params.testpmd.rst
 create mode 100644 dts/doc/framework.params.types.rst
 create mode 100644 dts/doc/framework.parser.rst
 create mode 100644 dts/doc/framework.remote_session.dpdk_shell.rst
 create mode 100644 
dts/doc/framework.remote_session.interactive_remote_session.rst
 create mode 100644 dts/doc/framework.remote_session.interactive_shell.rst
 create mode 100644 dts/doc/framework.remote_session.python_shell.rst
 create mode 100644 dts/doc/framework.remote_session.remote_session.rst
 create mode 100644 dts/doc/framework.remote_session.rst
 create mode 100644 dts/doc/framework.remote_session.ssh_session.rst
 create mode 100644 dts/doc/framework.remote_session.testpmd_shell.rst
 create mode 100644 dts/doc/framework.runner.rst
 create mode 100644 dts/doc/framework.settings.rst
 create mode 100644 dts/doc/framework.test_result.rst
 create mode 100644 dts/doc/framework.test_suite.rst
 create mode 100644 dts/doc/framework.testbed_model.cpu.rst
 create mode 100644 dts/doc/framework.testbed_model.linux_session.rst
 create mode 100644 dts/doc/framework.testbed_model.node.rst
 create mode 100644 dts/doc/framework.testbed_model.os_session.rst
 create mode 100644 dts/doc/framework.testbed_model.port.rst
 create mode 100644 dts/doc/framework.testbed_model.posix_session.rst
 create mode 100644 dts/doc/framework.testbed_model.rst
 create mode 100644 dts/doc/framework.testbed_model.sut_node.rst
 create mode 100644 dts/doc/framework.testbed_model.tg_node.rst
 create mode 100644 
dts/doc/framework.testbed_model.traffic_generator.capturing_traffic_generator.rst
 create mode 100644 dts/doc/framework.testbed_model.traffic_generator.rst
 create mode 100644 dts/doc/framework.testbed_model.traffic_generator.scapy.rst
 create mode 100644 
dts/doc/framework.testbed_model.traffic_generator.traffic_generator

[PATCH v14 6/6] dts: add API doc generation

2024-08-06 Thread Juraj Linkeš
The tool used to generate DTS API docs is Sphinx, which is already in
use in DPDK. The same configuration is used to preserve style with one
DTS-specific configuration (so that the DPDK docs are unchanged) that
modifies how the sidebar displays the content.

Sphinx generates the documentation from Python docstrings. The docstring
format is the Google format [0] which requires the sphinx.ext.napoleon
extension. The other extension, sphinx.ext.intersphinx, enables linking
to objects in external documentations, such as the Python documentation.

There is one requirement for building DTS docs - the same Python version
as DTS or higher, because Sphinx's autodoc extension imports the code.

The dependencies needed to import the code don't have to be satisfied,
as the autodoc extension allows us to mock the imports. The missing
packages are taken from the DTS pyproject.toml file.

The generated DTS API docs are linked with the DPDK API docs according
to their placement after installing them with 'meson install'. However,
the build path differs from the install path, requiring a symlink from
DPDK API doc build path to DTS API build path to produce the proper link
in the build directory.

[0] https://google.github.io/styleguide/pyguide.html#38-comments-and-docstrings

Signed-off-by: Juraj Linkeš 
---
 buildtools/call-sphinx-build.py   | 10 ++-
 buildtools/get-dts-deps.py| 78 +++
 buildtools/meson.build|  1 +
 doc/api/doxy-api-index.md |  3 +
 doc/api/doxy-api.conf.in  |  2 +
 doc/api/meson.build   |  1 +
 doc/guides/conf.py| 41 +++-
 doc/guides/contributing/documentation.rst |  2 +
 doc/guides/contributing/patches.rst   |  4 ++
 doc/guides/meson.build|  1 +
 doc/guides/tools/dts.rst  | 39 +++-
 dts/doc/meson.build   | 43 +
 dts/meson.build   | 15 +
 meson.build   |  1 +
 14 files changed, 238 insertions(+), 3 deletions(-)
 create mode 100755 buildtools/get-dts-deps.py
 create mode 100644 dts/doc/meson.build
 create mode 100644 dts/meson.build

diff --git a/buildtools/call-sphinx-build.py b/buildtools/call-sphinx-build.py
index 623e7363ee..5dd59907cd 100755
--- a/buildtools/call-sphinx-build.py
+++ b/buildtools/call-sphinx-build.py
@@ -15,6 +15,11 @@
 
 # set the version in environment for sphinx to pick up
 os.environ['DPDK_VERSION'] = version
+conf_src = src
+if src.find('dts') != -1:
+if '-c' in extra_args:
+conf_src = extra_args[extra_args.index('-c') + 1]
+os.environ['DTS_BUILD'] = "y"
 
 sphinx_cmd = [sphinx] + extra_args
 
@@ -23,6 +28,9 @@
 for root, dirs, files in os.walk(src):
 srcfiles.extend([join(root, f) for f in files])
 
+if not os.path.exists(dst):
+os.makedirs(dst)
+
 # run sphinx, putting the html output in a "html" directory
 with open(join(dst, 'sphinx_html.out'), 'w') as out:
 process = run(sphinx_cmd + ['-b', 'html', src, join(dst, 'html')],
@@ -34,7 +42,7 @@
 
 # copy custom CSS file
 css = 'custom.css'
-src_css = join(src, css)
+src_css = join(conf_src, css)
 dst_css = join(dst, 'html', '_static', 'css', css)
 if not os.path.exists(dst_css) or not filecmp.cmp(src_css, dst_css):
 os.makedirs(os.path.dirname(dst_css), exist_ok=True)
diff --git a/buildtools/get-dts-deps.py b/buildtools/get-dts-deps.py
new file mode 100755
index 00..309b83cb5c
--- /dev/null
+++ b/buildtools/get-dts-deps.py
@@ -0,0 +1,78 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2024 PANTHEON.tech s.r.o.
+#
+
+"""Utilities for DTS dependencies.
+
+The module can be used as an executable script,
+which verifies that the running Python version meets the version requirement 
of DTS.
+The script returns the standard exit codes in this mode (0 is success, 1 is 
failure).
+
+The module also contains a function, get_missing_imports,
+which looks for runtime and doc generation dependencies in the DTS 
pyproject.toml file
+a returns a list of module names used in an import statement that are missing.
+"""
+
+import configparser
+import importlib.metadata
+import importlib.util
+import os.path
+import platform
+
+_VERSION_COMPARISON_CHARS = '^<>='
+_EXTRA_DEPS = {'invoke': '>=1.3', 'paramiko': '>=2.4'}
+_DPDK_ROOT = os.path.dirname(os.path.dirname(__file__))
+_DTS_DEP_FILE_PATH = os.path.join(_DPDK_ROOT, 'dts', 'pyproject.toml')
+
+
+def _get_version_tuple(version_str):
+return tuple(map(int, version_str.split(".")))
+
+
+def _get_dependencies(cfg_file_path):
+cfg = configparser.ConfigParser()
+with open(cfg_file_path) as f:
+dts_deps_file_str = f.read()
+dts_deps_file_str = dts_deps_file_str.replace("\n]", "]")
+cfg.read_string(dts_deps_file_str)
+
+deps_section = cfg['tool.poetry.dependencies']
+deps = {dep: deps_section[dep].strip('"\

[PATCH 0/4] dts: add pktgen and testpmd changes

2024-08-06 Thread Luca Vizzarro
From: Luca Vizzarro 

Hello,

sending some framework changes that will be required in my upcoming
l2fwd test suite.

Best,
Luca

Luca Vizzarro (5):
  dts: add ability to send/receive multiple packets
  dts: add random generation seed setting
  dts: add random packet generator
  dts: add ability to start/stop testpmd ports
  dts: add testpmd set ports queues

 doc/guides/tools/dts.rst  |   5 +
 dts/framework/config/__init__.py  |   4 +
 dts/framework/config/conf_yaml_schema.json|   4 +
 dts/framework/config/types.py |   2 +
 dts/framework/remote_session/testpmd_shell.py | 102 +-
 dts/framework/runner.py   |   8 ++
 dts/framework/settings.py |  17 +++
 dts/framework/test_suite.py   |  68 +++-
 dts/framework/testbed_model/tg_node.py|  14 +--
 .../capturing_traffic_generator.py|  31 --
 dts/framework/utils.py|  79 +-
 11 files changed, 288 insertions(+), 46 deletions(-)

-- 
2.34.1



[PATCH 1/5] dts: add ability to send/receive multiple packets

2024-08-06 Thread Luca Vizzarro
From: Luca Vizzarro 

The framework allows only to send one packet at once via Scapy. This
change adds the ability to send multiple packets, and also introduces a
new fast way to verify if we received several expected packets.

Moreover, it reduces code duplication by keeping a single packet sending
method only at the test suite level.

Signed-off-by: Luca Vizzarro 
Reviewed-by: Paul Szczpanek 
Reviewed-by: Alex Chapman 
---
 dts/framework/test_suite.py   | 68 +--
 dts/framework/testbed_model/tg_node.py| 14 ++--
 .../capturing_traffic_generator.py| 31 -
 3 files changed, 71 insertions(+), 42 deletions(-)

diff --git a/dts/framework/test_suite.py b/dts/framework/test_suite.py
index 694b2eba65..051509fb86 100644
--- a/dts/framework/test_suite.py
+++ b/dts/framework/test_suite.py
@@ -13,12 +13,13 @@
 * Test case verification.
 """
 
+from collections import Counter
 from ipaddress import IPv4Interface, IPv6Interface, ip_interface
 from typing import ClassVar, Union
 
 from scapy.layers.inet import IP  # type: ignore[import-untyped]
 from scapy.layers.l2 import Ether  # type: ignore[import-untyped]
-from scapy.packet import Packet, Padding  # type: ignore[import-untyped]
+from scapy.packet import Packet, Padding, raw  # type: ignore[import-untyped]
 
 from framework.testbed_model.port import Port, PortLink
 from framework.testbed_model.sut_node import SutNode
@@ -199,9 +200,34 @@ def send_packet_and_capture(
 Returns:
 A list of received packets.
 """
-packet = self._adjust_addresses(packet)
-return self.tg_node.send_packet_and_capture(
-packet,
+return self.send_packets_and_capture(
+[packet],
+filter_config,
+duration,
+)
+
+def send_packets_and_capture(
+self,
+packets: list[Packet],
+filter_config: PacketFilteringConfig = PacketFilteringConfig(),
+duration: float = 1,
+) -> list[Packet]:
+"""Send and receive `packets` using the associated TG.
+
+Send `packets` through the appropriate interface and receive on the 
appropriate interface.
+Modify the packets with l3/l2 addresses corresponding to the testbed 
and desired traffic.
+
+Args:
+packets: The packets to send.
+filter_config: The filter to use when capturing packets.
+duration: Capture traffic for this amount of time after sending 
`packet`.
+
+Returns:
+A list of received packets.
+"""
+packets = [self._adjust_addresses(packet) for packet in packets]
+return self.tg_node.send_packets_and_capture(
+packets,
 self._tg_port_egress,
 self._tg_port_ingress,
 filter_config,
@@ -303,6 +329,40 @@ def verify_packets(self, expected_packet: Packet, 
received_packets: list[Packet]
 )
 self._fail_test_case_verify("An expected packet not found among 
received packets.")
 
+def match_all_packets(
+self, expected_packets: list[Packet], received_packets: list[Packet]
+) -> None:
+"""Matches all the expected packets against the received ones.
+
+Matching is performed by counting down the occurrences in a dictionary 
which keys are the
+raw packet bytes. No deep packet comparison is performed. All the 
unexpected packets (noise)
+are automatically ignored.
+
+Args:
+expected_packets: The packets we are expecting to receive.
+received_packets: All the packets that were received.
+
+Raises:
+TestCaseVerifyError: if and not all the `expected_packets` were 
found in
+`received_packets`.
+"""
+expected_packets_counters = Counter(map(raw, expected_packets))
+received_packets_counters = Counter(map(raw, received_packets))
+# The number of expected packets is subtracted by the number of 
received packets, ignoring
+# any unexpected packets and capping at zero.
+missing_packets_counters = expected_packets_counters - 
received_packets_counters
+missing_packets_count = missing_packets_counters.total()
+self._logger.debug(
+f"match_all_packets: expected {len(expected_packets)}, "
+f"received {len(received_packets)}, missing 
{missing_packets_count}"
+)
+
+if missing_packets_count != 0:
+self._fail_test_case_verify(
+f"Not all packets were received, expected 
{len(expected_packets)} "
+f"but {missing_packets_count} were missing."
+)
+
 def _compare_packets(self, expected_packet: Packet, received_packet: 
Packet) -> bool:
 self._logger.debug(
 f"Comparing packets: 
\n{expected_packet.summary()}\n{received_packet.summary()}"
diff --git a/dts/framework/testbed_model/tg_node.py 
b/dts/framework/testbed_

[PATCH 2/5] dts: add random generation seed setting

2024-08-06 Thread Luca Vizzarro
From: Luca Vizzarro 

When introducing pseudo-random generation in the test runs we need to
ensure that these can be reproduced by setting a pre-defined seed.
This commits adds the ability to set one or allow for one to be
generated and reported back to the user.

Signed-off-by: Luca Vizzarro 
Reviewed-by: Paul Szczpanek 
Reviewed-by: Alex Chapman 
---
 doc/guides/tools/dts.rst   |  5 +
 dts/framework/config/__init__.py   |  4 
 dts/framework/config/conf_yaml_schema.json |  4 
 dts/framework/config/types.py  |  2 ++
 dts/framework/runner.py|  8 
 dts/framework/settings.py  | 17 +
 6 files changed, 40 insertions(+)

diff --git a/doc/guides/tools/dts.rst b/doc/guides/tools/dts.rst
index 515b15e4d8..9b5ea9779c 100644
--- a/doc/guides/tools/dts.rst
+++ b/doc/guides/tools/dts.rst
@@ -251,6 +251,8 @@ DTS is run with ``main.py`` located in the ``dts`` 
directory after entering Poet
... | DTS_TEST_SUITES='suite, suite case, ...' 
(default: [])
  --re-run N_TIMES, --re_run N_TIMES
[DTS_RERUN] Re-run each test case the specified 
number of times if a test failure occurs. (default: 0)
+ --random-seed NUMBER  [DTS_RANDOM_SEED] The seed to use with the 
pseudo-random generator. If not specified, the configuration value is
+   used instead. If that's also not specified, a 
random seed is generated. (default: None)
 
 
 The brackets contain the names of environment variables that set the same 
thing.
@@ -548,6 +550,9 @@ involved in the testing. These can be defined with the 
following mappings:

++---+---+
| ``traffic_generator_node`` | Node name for the traffic generator node.
 |

++---+
+   | ``random_seed``| (*optional*) *int* – Allows you to set a 
seed for pseudo-random   |
+   || generation.  
 |
+   
++---+
 
 ``nodes``
`sequence 
`_
 listing
diff --git a/dts/framework/config/__init__.py b/dts/framework/config/__init__.py
index df60a5030e..269d9ec318 100644
--- a/dts/framework/config/__init__.py
+++ b/dts/framework/config/__init__.py
@@ -445,6 +445,7 @@ class TestRunConfiguration:
 system_under_test_node: The SUT node to use in this test run.
 traffic_generator_node: The TG node to use in this test run.
 vdevs: The names of virtual devices to test.
+random_seed: The seed to use for pseudo-random generation.
 """
 
 build_targets: list[BuildTargetConfiguration]
@@ -455,6 +456,7 @@ class TestRunConfiguration:
 system_under_test_node: SutNodeConfiguration
 traffic_generator_node: TGNodeConfiguration
 vdevs: list[str]
+random_seed: int | None
 
 @classmethod
 def from_dict(
@@ -497,6 +499,7 @@ def from_dict(
 vdevs = (
 d["system_under_test_node"]["vdevs"] if "vdevs" in 
d["system_under_test_node"] else []
 )
+random_seed = d.get("random_seed", None)
 return cls(
 build_targets=build_targets,
 perf=d["perf"],
@@ -506,6 +509,7 @@ def from_dict(
 system_under_test_node=system_under_test_node,
 traffic_generator_node=traffic_generator_node,
 vdevs=vdevs,
+random_seed=random_seed,
 )
 
 def copy_and_modify(self, **kwargs) -> Self:
diff --git a/dts/framework/config/conf_yaml_schema.json 
b/dts/framework/config/conf_yaml_schema.json
index f02a310bb5..df390e8ae2 100644
--- a/dts/framework/config/conf_yaml_schema.json
+++ b/dts/framework/config/conf_yaml_schema.json
@@ -379,6 +379,10 @@
   },
   "traffic_generator_node": {
 "$ref": "#/definitions/node_name"
+  },
+  "random_seed": {
+"type": "integer",
+"description": "Optional field. Allows you to set a seed for 
pseudo-random generation."
   }
 },
 "additionalProperties": false,
diff --git a/dts/framework/config/types.py b/dts/framework/config/types.py
index cf16556403..ce7b784ac8 100644
--- a/dts/framework/config/types.py
+++ b/dts/framework/config/types.py
@@ -121,6 +121,8 @@ class TestRunConfigDict(TypedDict):
 system_under_test_node: TestRunSUTConfigDict
 #:
 traffic_generator_node: str
+#:
+random_seed: int
 
 
 class ConfigurationDict(TypedDict):
diff --git a/dts/framework/runner.py b/dts/framework/runner.py
index 6b6f6a05f5..34b1dad5c4 100644
--- a/dts/framework/runner.py
+++ b/dts/framework/runner.py

[PATCH 3/5] dts: add random packet generator

2024-08-06 Thread Luca Vizzarro
From: Luca Vizzarro 

Add a basic utility that can create random L3 and L4 packets with random
payloads and port numbers (if L4).

Signed-off-by: Luca Vizzarro 
Reviewed-by: Paul Szczpanek 
Reviewed-by: Alex Chapman 
---
 dts/framework/utils.py | 79 --
 1 file changed, 77 insertions(+), 2 deletions(-)

diff --git a/dts/framework/utils.py b/dts/framework/utils.py
index 6b5d5a805f..c768dd0c99 100644
--- a/dts/framework/utils.py
+++ b/dts/framework/utils.py
@@ -17,14 +17,16 @@
 import atexit
 import json
 import os
+import random
 import subprocess
-from enum import Enum
+from enum import Enum, Flag
 from pathlib import Path
 from subprocess import SubprocessError
 
+from scapy.layers.inet import IP, TCP, UDP, Ether  # type: 
ignore[import-untyped]
 from scapy.packet import Packet  # type: ignore[import-untyped]
 
-from .exception import ConfigurationError
+from .exception import ConfigurationError, InternalError
 
 REGEX_FOR_PCI_ADDRESS: str = 
"/[0-9a-fA-F]{4}:[0-9a-fA-F]{2}:[0-9a-fA-F]{2}.[0-9]{1}/"
 
@@ -244,3 +246,76 @@ def _delete_tarball(self) -> None:
 def __fspath__(self) -> str:
 """The os.PathLike protocol implementation."""
 return str(self._tarball_path)
+
+
+class PacketProtocols(Flag):
+"""Flag specifying which protocols to use for packet generation."""
+
+#:
+IP = 1
+#:
+TCP = 2 | IP
+#:
+UDP = 4 | IP
+#:
+ALL = TCP | UDP
+
+
+def generate_random_packets(
+number_of: int,
+payload_size: int = 1500,
+protocols: PacketProtocols = PacketProtocols.ALL,
+ports_range: range = range(1024, 49152),
+mtu: int = 1500,
+) -> list[Packet]:
+"""Generate a number of random packets.
+
+The payload of the packets will consist of random bytes. If `payload_size` 
is too big, then the
+maximum payload size allowed for the specific packet type is used. The 
size is calculated based
+on the specified `mtu`, therefore it is essential that `mtu` is set 
correctly to match the MTU
+of the port that will send out the generated packets.
+
+If `protocols` has any L4 protocol enabled then all the packets are 
generated with any of
+the specified L4 protocols chosen at random. If only 
:attr:`~PacketProtocols.IP` is set, then
+only L3 packets are generated.
+
+If L4 packets will be generated, then the TCP/UDP ports to be used will be 
chosen at random from
+`ports_range`.
+
+Args:
+number_of: The number of packets to generate.
+payload_size: The packet payload size to generate, capped based on 
`mtu`.
+protocols: The protocols to use for the generated packets.
+ports_range: The range of L4 port numbers to use. Used only if 
`protocols` has L4 protocols.
+mtu: The MTU of the NIC port that will send out the generated packets.
+
+Raises:
+InternalError: If the `payload_size` is invalid.
+
+Returns:
+A list containing the randomly generated packets.
+"""
+if payload_size < 0:
+raise InternalError(f"An invalid payload_size of {payload_size} was 
given.")
+
+l4_factories = []
+if protocols & PacketProtocols.TCP:
+l4_factories.append(TCP)
+if protocols & PacketProtocols.UDP:
+l4_factories.append(UDP)
+
+def _make_packet() -> Packet:
+packet = Ether()
+
+if protocols & PacketProtocols.IP:
+packet /= IP()
+
+if len(l4_factories) > 0:
+src_port, dst_port = random.choices(ports_range, k=2)
+packet /= random.choice(l4_factories)(sport=src_port, 
dport=dst_port)
+
+max_payload_size = mtu - len(packet)
+usable_payload_size = payload_size if payload_size < max_payload_size 
else max_payload_size
+return packet / random.randbytes(usable_payload_size)
+
+return [_make_packet() for _ in range(number_of)]
-- 
2.34.1



[PATCH 4/5] dts: add ability to start/stop ports

2024-08-06 Thread Luca Vizzarro
From: Luca Vizzarro 

Add the commands to start and stop all the ports, so that they can be
configured. Because there is a distinction of commands that require the
ports to be stopped and started, also add decorators for commands that
require a specific state, removing this logic from the test writer's
duty.

Signed-off-by: Luca Vizzarro 
Reviewed-by: Paul Szczpanek 
Reviewed-by: Alex Chapman 
---
 dts/framework/remote_session/testpmd_shell.py | 86 ++-
 1 file changed, 84 insertions(+), 2 deletions(-)

diff --git a/dts/framework/remote_session/testpmd_shell.py 
b/dts/framework/remote_session/testpmd_shell.py
index eda6eb320f..293c7b9dff 100644
--- a/dts/framework/remote_session/testpmd_shell.py
+++ b/dts/framework/remote_session/testpmd_shell.py
@@ -14,16 +14,17 @@
 testpmd_shell.close()
 """
 
+import functools
 import re
 import time
 from dataclasses import dataclass, field
 from enum import Flag, auto
 from pathlib import PurePath
-from typing import ClassVar
+from typing import Any, Callable, ClassVar, Concatenate, ParamSpec
 
 from typing_extensions import Self, Unpack
 
-from framework.exception import InteractiveCommandExecutionError
+from framework.exception import InteractiveCommandExecutionError, InternalError
 from framework.params.testpmd import SimpleForwardingModes, TestPmdParams
 from framework.params.types import TestPmdParamsDict
 from framework.parser import ParserFn, TextParser
@@ -33,6 +34,9 @@
 from framework.testbed_model.sut_node import SutNode
 from framework.utils import StrEnum
 
+P = ParamSpec("P")
+TestPmdShellMethod = Callable[Concatenate["TestPmdShell", P], Any]
+
 
 class TestPmdDevice:
 """The data of a device that testpmd can recognize.
@@ -577,12 +581,51 @@ class TestPmdPortStats(TextParser):
 tx_bps: int = field(metadata=TextParser.find_int(r"Tx-bps:\s+(\d+)"))
 
 
+def requires_stopped_ports(func: TestPmdShellMethod) -> TestPmdShellMethod:
+"""Decorator for :class:`TestPmdShell` commands methods that require 
stopped ports.
+
+If the decorated method is called while the ports are started, then these 
are stopped before
+continuing.
+"""
+
+@functools.wraps(func)
+def _wrapper(self: "TestPmdShell", *args: P.args, **kwargs: P.kwargs):
+if self.ports_started:
+self._logger.debug("Ports need to be stopped to continue")
+self.stop_all_ports()
+
+return func(self, *args, **kwargs)
+
+return _wrapper
+
+
+def requires_started_ports(func: TestPmdShellMethod) -> TestPmdShellMethod:
+"""Decorator for :class:`TestPmdShell` commands methods that require 
started ports.
+
+If the decorated method is called while the ports are stopped, then these 
are started before
+continuing.
+"""
+
+@functools.wraps(func)
+def _wrapper(self: "TestPmdShell", *args: P.args, **kwargs: P.kwargs):
+if not self.ports_started:
+self._logger.debug("Ports need to be started to continue")
+self.start_all_ports()
+
+return func(self, *args, **kwargs)
+
+return _wrapper
+
+
 class TestPmdShell(DPDKShell):
 """Testpmd interactive shell.
 
 The testpmd shell users should never use
 the :meth:`~.interactive_shell.InteractiveShell.send_command` method 
directly, but rather
 call specialized methods. If there isn't one that satisfies a need, it 
should be added.
+
+Attributes:
+ports_started: Indicates whether the ports are started.
 """
 
 _app_params: TestPmdParams
@@ -617,6 +660,9 @@ def __init__(
 TestPmdParams(**app_params),
 )
 
+self.ports_started = not self._app_params.disable_device_start
+
+@requires_started_ports
 def start(self, verify: bool = True) -> None:
 """Start packet forwarding with the current configuration.
 
@@ -721,6 +767,42 @@ def set_forward_mode(self, mode: SimpleForwardingModes, 
verify: bool = True):
 f"Test pmd failed to set fwd mode to {mode.value}"
 )
 
+def stop_all_ports(self, verify: bool = True) -> None:
+"""Stops all the ports.
+
+Args:
+verify: If :data:`True`, the output of the command will be checked 
for a successful
+execution.
+
+Raises:
+InteractiveCommandExecutionError: If `verify` is :data:`True` and 
the ports were not
+stopped successfully.
+"""
+self._logger.debug("Stopping all the ports...")
+output = self.send_command("port stop all")
+if verify and not output.strip().endswith("Done"):
+raise InteractiveCommandExecutionError("Ports were not stopped 
successfully")
+
+self.ports_started = False
+
+def start_all_ports(self, verify: bool = True) -> None:
+"""Starts all the ports.
+
+Args:
+verify: If :data:`True`, the output of the command will be checked 
for a successful
+execution.
+
+Raises:
+In

[PATCH 4/5] dts: add ability to start/stop testpmd ports

2024-08-06 Thread Luca Vizzarro
From: Luca Vizzarro 

Add testpmd commands to start and stop all the ports, so that they can
be configured. Because there is a distinction of commands that require
the ports to be stopped and started, also add decorators for commands
that require a specific state, removing this logic from the test
writer's duty.

Signed-off-by: Luca Vizzarro 
Reviewed-by: Paul Szczpanek 
---
 dts/framework/remote_session/testpmd_shell.py | 86 ++-
 1 file changed, 84 insertions(+), 2 deletions(-)

diff --git a/dts/framework/remote_session/testpmd_shell.py 
b/dts/framework/remote_session/testpmd_shell.py
index eda6eb320f..293c7b9dff 100644
--- a/dts/framework/remote_session/testpmd_shell.py
+++ b/dts/framework/remote_session/testpmd_shell.py
@@ -14,16 +14,17 @@
 testpmd_shell.close()
 """
 
+import functools
 import re
 import time
 from dataclasses import dataclass, field
 from enum import Flag, auto
 from pathlib import PurePath
-from typing import ClassVar
+from typing import Any, Callable, ClassVar, Concatenate, ParamSpec
 
 from typing_extensions import Self, Unpack
 
-from framework.exception import InteractiveCommandExecutionError
+from framework.exception import InteractiveCommandExecutionError, InternalError
 from framework.params.testpmd import SimpleForwardingModes, TestPmdParams
 from framework.params.types import TestPmdParamsDict
 from framework.parser import ParserFn, TextParser
@@ -33,6 +34,9 @@
 from framework.testbed_model.sut_node import SutNode
 from framework.utils import StrEnum
 
+P = ParamSpec("P")
+TestPmdShellMethod = Callable[Concatenate["TestPmdShell", P], Any]
+
 
 class TestPmdDevice:
 """The data of a device that testpmd can recognize.
@@ -577,12 +581,51 @@ class TestPmdPortStats(TextParser):
 tx_bps: int = field(metadata=TextParser.find_int(r"Tx-bps:\s+(\d+)"))
 
 
+def requires_stopped_ports(func: TestPmdShellMethod) -> TestPmdShellMethod:
+"""Decorator for :class:`TestPmdShell` commands methods that require 
stopped ports.
+
+If the decorated method is called while the ports are started, then these 
are stopped before
+continuing.
+"""
+
+@functools.wraps(func)
+def _wrapper(self: "TestPmdShell", *args: P.args, **kwargs: P.kwargs):
+if self.ports_started:
+self._logger.debug("Ports need to be stopped to continue")
+self.stop_all_ports()
+
+return func(self, *args, **kwargs)
+
+return _wrapper
+
+
+def requires_started_ports(func: TestPmdShellMethod) -> TestPmdShellMethod:
+"""Decorator for :class:`TestPmdShell` commands methods that require 
started ports.
+
+If the decorated method is called while the ports are stopped, then these 
are started before
+continuing.
+"""
+
+@functools.wraps(func)
+def _wrapper(self: "TestPmdShell", *args: P.args, **kwargs: P.kwargs):
+if not self.ports_started:
+self._logger.debug("Ports need to be started to continue")
+self.start_all_ports()
+
+return func(self, *args, **kwargs)
+
+return _wrapper
+
+
 class TestPmdShell(DPDKShell):
 """Testpmd interactive shell.
 
 The testpmd shell users should never use
 the :meth:`~.interactive_shell.InteractiveShell.send_command` method 
directly, but rather
 call specialized methods. If there isn't one that satisfies a need, it 
should be added.
+
+Attributes:
+ports_started: Indicates whether the ports are started.
 """
 
 _app_params: TestPmdParams
@@ -617,6 +660,9 @@ def __init__(
 TestPmdParams(**app_params),
 )
 
+self.ports_started = not self._app_params.disable_device_start
+
+@requires_started_ports
 def start(self, verify: bool = True) -> None:
 """Start packet forwarding with the current configuration.
 
@@ -721,6 +767,42 @@ def set_forward_mode(self, mode: SimpleForwardingModes, 
verify: bool = True):
 f"Test pmd failed to set fwd mode to {mode.value}"
 )
 
+def stop_all_ports(self, verify: bool = True) -> None:
+"""Stops all the ports.
+
+Args:
+verify: If :data:`True`, the output of the command will be checked 
for a successful
+execution.
+
+Raises:
+InteractiveCommandExecutionError: If `verify` is :data:`True` and 
the ports were not
+stopped successfully.
+"""
+self._logger.debug("Stopping all the ports...")
+output = self.send_command("port stop all")
+if verify and not output.strip().endswith("Done"):
+raise InteractiveCommandExecutionError("Ports were not stopped 
successfully")
+
+self.ports_started = False
+
+def start_all_ports(self, verify: bool = True) -> None:
+"""Starts all the ports.
+
+Args:
+verify: If :data:`True`, the output of the command will be checked 
for a successful
+execution.
+
+Raises:
+InteractiveCommandExecuti

[PATCH 5/5] dts: add testpmd set ports queues

2024-08-06 Thread Luca Vizzarro
From: Luca Vizzarro 

Add a facility to update the number of TX/RX queues during the runtime
of testpmd.

Signed-off-by: Luca Vizzarro 
Reviewed-by: Paul Szczpanek 
---
 dts/framework/remote_session/testpmd_shell.py | 16 
 1 file changed, 16 insertions(+)

diff --git a/dts/framework/remote_session/testpmd_shell.py 
b/dts/framework/remote_session/testpmd_shell.py
index 293c7b9dff..40e850502c 100644
--- a/dts/framework/remote_session/testpmd_shell.py
+++ b/dts/framework/remote_session/testpmd_shell.py
@@ -803,6 +803,22 @@ def start_all_ports(self, verify: bool = True) -> None:
 
 self.ports_started = True
 
+@requires_stopped_ports
+def set_ports_queues(self, number_of: int) -> None:
+"""Sets the number of queues per port.
+
+Args:
+number_of: The number of RX/TX queues to create per port.
+
+Raises:
+InternalError: If `number_of` is invalid.
+"""
+if number_of < 1:
+raise InternalError("The number of queues must be positive and 
non-zero")
+
+self.send_command(f"port config all rxq {number_of}")
+self.send_command(f"port config all txq {number_of}")
+
 def show_port_info_all(self) -> list[TestPmdPort]:
 """Returns the information of all the ports.
 
-- 
2.34.1



Re: [PATCH 3/4] test: make red test part of fast suite

2024-08-06 Thread David Marchand
On Thu, Jul 18, 2024 at 9:11 PM Stephen Hemminger
 wrote:
>
> The red tests were not run because not part of any suite.
> Meson warning is:
>  WARNING: Test "red_autotest" is not defined in any test suite
>
> Signed-off-by: Stephen Hemminger 

There is an open bz about this unit test.
https://bugs.dpdk.org/show_bug.cgi?id=826


-- 
David Marchand



Re: [PATCH 1/4] test: update alarm test

2024-08-06 Thread David Marchand
On Thu, Jul 18, 2024 at 9:11 PM Stephen Hemminger
 wrote:
>
> This test should be using the TEST_ASSERT macros, and can be
> run as part of the fast test suite now.
>
> Signed-off-by: Stephen Hemminger 

[...]

> +   ret = rte_eal_alarm_cancel(test_alarm_callback, NULL);
> +   /* return is the number of the alarm set (or 0 if none or -1 if 
> error) */
> +   TEST_ASSERT(ret > 0, "could not cancel an alarm: %d", ret);
>
> return 0;
>  }
>
> -REGISTER_TEST_COMMAND(alarm_autotest, test_alarm);
> +REGISTER_FAST_TEST(alarm_autotest, true, true, test_alarm);

This test was removed from the fast-tests list as it was triggering
false failures in some CI.
ee00af60170b ("test: remove strict timing requirements some tests")


-- 
David Marchand



[PATCH v2 0/5] dts: add pktgen and testpmd changes

2024-08-06 Thread Luca Vizzarro
Apologies, re-sending again due to errors in sending v1.

v2:
- rebased

Luca Vizzarro (5):
  dts: add ability to send/receive multiple packets
  dts: add random generation seed setting
  dts: add random packet generator
  dts: add ability to start/stop testpmd ports
  dts: add testpmd set ports queues

 doc/guides/tools/dts.rst  |   5 +
 dts/framework/config/__init__.py  |   4 +
 dts/framework/config/conf_yaml_schema.json|   4 +
 dts/framework/config/types.py |   2 +
 dts/framework/remote_session/testpmd_shell.py | 102 +-
 dts/framework/runner.py   |   8 ++
 dts/framework/settings.py |  17 +++
 dts/framework/test_suite.py   |  68 +++-
 dts/framework/testbed_model/tg_node.py|  14 +--
 .../capturing_traffic_generator.py|  31 --
 dts/framework/utils.py|  79 +-
 11 files changed, 288 insertions(+), 46 deletions(-)

-- 
2.34.1



[PATCH v2 1/5] dts: add ability to send/receive multiple packets

2024-08-06 Thread Luca Vizzarro
The framework allows only to send one packet at once via Scapy. This
change adds the ability to send multiple packets, and also introduces a
new fast way to verify if we received several expected packets.

Moreover, it reduces code duplication by keeping a single packet sending
method only at the test suite level.

Signed-off-by: Luca Vizzarro 
Reviewed-by: Paul Szczepanek 
Reviewed-by: Alex Chapman 
---
 dts/framework/test_suite.py   | 68 +--
 dts/framework/testbed_model/tg_node.py| 14 ++--
 .../capturing_traffic_generator.py| 31 -
 3 files changed, 71 insertions(+), 42 deletions(-)

diff --git a/dts/framework/test_suite.py b/dts/framework/test_suite.py
index 694b2eba65..051509fb86 100644
--- a/dts/framework/test_suite.py
+++ b/dts/framework/test_suite.py
@@ -13,12 +13,13 @@
 * Test case verification.
 """
 
+from collections import Counter
 from ipaddress import IPv4Interface, IPv6Interface, ip_interface
 from typing import ClassVar, Union
 
 from scapy.layers.inet import IP  # type: ignore[import-untyped]
 from scapy.layers.l2 import Ether  # type: ignore[import-untyped]
-from scapy.packet import Packet, Padding  # type: ignore[import-untyped]
+from scapy.packet import Packet, Padding, raw  # type: ignore[import-untyped]
 
 from framework.testbed_model.port import Port, PortLink
 from framework.testbed_model.sut_node import SutNode
@@ -199,9 +200,34 @@ def send_packet_and_capture(
 Returns:
 A list of received packets.
 """
-packet = self._adjust_addresses(packet)
-return self.tg_node.send_packet_and_capture(
-packet,
+return self.send_packets_and_capture(
+[packet],
+filter_config,
+duration,
+)
+
+def send_packets_and_capture(
+self,
+packets: list[Packet],
+filter_config: PacketFilteringConfig = PacketFilteringConfig(),
+duration: float = 1,
+) -> list[Packet]:
+"""Send and receive `packets` using the associated TG.
+
+Send `packets` through the appropriate interface and receive on the 
appropriate interface.
+Modify the packets with l3/l2 addresses corresponding to the testbed 
and desired traffic.
+
+Args:
+packets: The packets to send.
+filter_config: The filter to use when capturing packets.
+duration: Capture traffic for this amount of time after sending 
`packet`.
+
+Returns:
+A list of received packets.
+"""
+packets = [self._adjust_addresses(packet) for packet in packets]
+return self.tg_node.send_packets_and_capture(
+packets,
 self._tg_port_egress,
 self._tg_port_ingress,
 filter_config,
@@ -303,6 +329,40 @@ def verify_packets(self, expected_packet: Packet, 
received_packets: list[Packet]
 )
 self._fail_test_case_verify("An expected packet not found among 
received packets.")
 
+def match_all_packets(
+self, expected_packets: list[Packet], received_packets: list[Packet]
+) -> None:
+"""Matches all the expected packets against the received ones.
+
+Matching is performed by counting down the occurrences in a dictionary 
which keys are the
+raw packet bytes. No deep packet comparison is performed. All the 
unexpected packets (noise)
+are automatically ignored.
+
+Args:
+expected_packets: The packets we are expecting to receive.
+received_packets: All the packets that were received.
+
+Raises:
+TestCaseVerifyError: if and not all the `expected_packets` were 
found in
+`received_packets`.
+"""
+expected_packets_counters = Counter(map(raw, expected_packets))
+received_packets_counters = Counter(map(raw, received_packets))
+# The number of expected packets is subtracted by the number of 
received packets, ignoring
+# any unexpected packets and capping at zero.
+missing_packets_counters = expected_packets_counters - 
received_packets_counters
+missing_packets_count = missing_packets_counters.total()
+self._logger.debug(
+f"match_all_packets: expected {len(expected_packets)}, "
+f"received {len(received_packets)}, missing 
{missing_packets_count}"
+)
+
+if missing_packets_count != 0:
+self._fail_test_case_verify(
+f"Not all packets were received, expected 
{len(expected_packets)} "
+f"but {missing_packets_count} were missing."
+)
+
 def _compare_packets(self, expected_packet: Packet, received_packet: 
Packet) -> bool:
 self._logger.debug(
 f"Comparing packets: 
\n{expected_packet.summary()}\n{received_packet.summary()}"
diff --git a/dts/framework/testbed_model/tg_node.py 
b/dts/framework/testbed_model/tg_node.py
inde

[PATCH v2 2/5] dts: add random generation seed setting

2024-08-06 Thread Luca Vizzarro
When introducing pseudo-random generation in the test runs we need to
ensure that these can be reproduced by setting a pre-defined seed.
This commits adds the ability to set one or allow for one to be
generated and reported back to the user.

Signed-off-by: Luca Vizzarro 
Reviewed-by: Paul Szczepanek 
Reviewed-by: Alex Chapman 
---
 doc/guides/tools/dts.rst   |  5 +
 dts/framework/config/__init__.py   |  4 
 dts/framework/config/conf_yaml_schema.json |  4 
 dts/framework/config/types.py  |  2 ++
 dts/framework/runner.py|  8 
 dts/framework/settings.py  | 17 +
 6 files changed, 40 insertions(+)

diff --git a/doc/guides/tools/dts.rst b/doc/guides/tools/dts.rst
index 515b15e4d8..9b5ea9779c 100644
--- a/doc/guides/tools/dts.rst
+++ b/doc/guides/tools/dts.rst
@@ -251,6 +251,8 @@ DTS is run with ``main.py`` located in the ``dts`` 
directory after entering Poet
... | DTS_TEST_SUITES='suite, suite case, ...' 
(default: [])
  --re-run N_TIMES, --re_run N_TIMES
[DTS_RERUN] Re-run each test case the specified 
number of times if a test failure occurs. (default: 0)
+ --random-seed NUMBER  [DTS_RANDOM_SEED] The seed to use with the 
pseudo-random generator. If not specified, the configuration value is
+   used instead. If that's also not specified, a 
random seed is generated. (default: None)
 
 
 The brackets contain the names of environment variables that set the same 
thing.
@@ -548,6 +550,9 @@ involved in the testing. These can be defined with the 
following mappings:

++---+---+
| ``traffic_generator_node`` | Node name for the traffic generator node.
 |

++---+
+   | ``random_seed``| (*optional*) *int* – Allows you to set a 
seed for pseudo-random   |
+   || generation.  
 |
+   
++---+
 
 ``nodes``
`sequence 
`_
 listing
diff --git a/dts/framework/config/__init__.py b/dts/framework/config/__init__.py
index df60a5030e..269d9ec318 100644
--- a/dts/framework/config/__init__.py
+++ b/dts/framework/config/__init__.py
@@ -445,6 +445,7 @@ class TestRunConfiguration:
 system_under_test_node: The SUT node to use in this test run.
 traffic_generator_node: The TG node to use in this test run.
 vdevs: The names of virtual devices to test.
+random_seed: The seed to use for pseudo-random generation.
 """
 
 build_targets: list[BuildTargetConfiguration]
@@ -455,6 +456,7 @@ class TestRunConfiguration:
 system_under_test_node: SutNodeConfiguration
 traffic_generator_node: TGNodeConfiguration
 vdevs: list[str]
+random_seed: int | None
 
 @classmethod
 def from_dict(
@@ -497,6 +499,7 @@ def from_dict(
 vdevs = (
 d["system_under_test_node"]["vdevs"] if "vdevs" in 
d["system_under_test_node"] else []
 )
+random_seed = d.get("random_seed", None)
 return cls(
 build_targets=build_targets,
 perf=d["perf"],
@@ -506,6 +509,7 @@ def from_dict(
 system_under_test_node=system_under_test_node,
 traffic_generator_node=traffic_generator_node,
 vdevs=vdevs,
+random_seed=random_seed,
 )
 
 def copy_and_modify(self, **kwargs) -> Self:
diff --git a/dts/framework/config/conf_yaml_schema.json 
b/dts/framework/config/conf_yaml_schema.json
index f02a310bb5..df390e8ae2 100644
--- a/dts/framework/config/conf_yaml_schema.json
+++ b/dts/framework/config/conf_yaml_schema.json
@@ -379,6 +379,10 @@
   },
   "traffic_generator_node": {
 "$ref": "#/definitions/node_name"
+  },
+  "random_seed": {
+"type": "integer",
+"description": "Optional field. Allows you to set a seed for 
pseudo-random generation."
   }
 },
 "additionalProperties": false,
diff --git a/dts/framework/config/types.py b/dts/framework/config/types.py
index cf16556403..ce7b784ac8 100644
--- a/dts/framework/config/types.py
+++ b/dts/framework/config/types.py
@@ -121,6 +121,8 @@ class TestRunConfigDict(TypedDict):
 system_under_test_node: TestRunSUTConfigDict
 #:
 traffic_generator_node: str
+#:
+random_seed: int
 
 
 class ConfigurationDict(TypedDict):
diff --git a/dts/framework/runner.py b/dts/framework/runner.py
index 6b6f6a05f5..34b1dad5c4 100644
--- a/dts/framework/runner.py
+++ b/dts/framework/runner.py
@@ -20,6 +20,7 @@
 i

[PATCH v2 3/5] dts: add random packet generator

2024-08-06 Thread Luca Vizzarro
Add a basic utility that can create random L3 and L4 packets with random
payloads and port numbers (if L4).

Signed-off-by: Luca Vizzarro 
Reviewed-by: Paul Szczepanek 
Reviewed-by: Alex Chapman 
---
 dts/framework/utils.py | 79 --
 1 file changed, 77 insertions(+), 2 deletions(-)

diff --git a/dts/framework/utils.py b/dts/framework/utils.py
index 6b5d5a805f..c768dd0c99 100644
--- a/dts/framework/utils.py
+++ b/dts/framework/utils.py
@@ -17,14 +17,16 @@
 import atexit
 import json
 import os
+import random
 import subprocess
-from enum import Enum
+from enum import Enum, Flag
 from pathlib import Path
 from subprocess import SubprocessError
 
+from scapy.layers.inet import IP, TCP, UDP, Ether  # type: 
ignore[import-untyped]
 from scapy.packet import Packet  # type: ignore[import-untyped]
 
-from .exception import ConfigurationError
+from .exception import ConfigurationError, InternalError
 
 REGEX_FOR_PCI_ADDRESS: str = 
"/[0-9a-fA-F]{4}:[0-9a-fA-F]{2}:[0-9a-fA-F]{2}.[0-9]{1}/"
 
@@ -244,3 +246,76 @@ def _delete_tarball(self) -> None:
 def __fspath__(self) -> str:
 """The os.PathLike protocol implementation."""
 return str(self._tarball_path)
+
+
+class PacketProtocols(Flag):
+"""Flag specifying which protocols to use for packet generation."""
+
+#:
+IP = 1
+#:
+TCP = 2 | IP
+#:
+UDP = 4 | IP
+#:
+ALL = TCP | UDP
+
+
+def generate_random_packets(
+number_of: int,
+payload_size: int = 1500,
+protocols: PacketProtocols = PacketProtocols.ALL,
+ports_range: range = range(1024, 49152),
+mtu: int = 1500,
+) -> list[Packet]:
+"""Generate a number of random packets.
+
+The payload of the packets will consist of random bytes. If `payload_size` 
is too big, then the
+maximum payload size allowed for the specific packet type is used. The 
size is calculated based
+on the specified `mtu`, therefore it is essential that `mtu` is set 
correctly to match the MTU
+of the port that will send out the generated packets.
+
+If `protocols` has any L4 protocol enabled then all the packets are 
generated with any of
+the specified L4 protocols chosen at random. If only 
:attr:`~PacketProtocols.IP` is set, then
+only L3 packets are generated.
+
+If L4 packets will be generated, then the TCP/UDP ports to be used will be 
chosen at random from
+`ports_range`.
+
+Args:
+number_of: The number of packets to generate.
+payload_size: The packet payload size to generate, capped based on 
`mtu`.
+protocols: The protocols to use for the generated packets.
+ports_range: The range of L4 port numbers to use. Used only if 
`protocols` has L4 protocols.
+mtu: The MTU of the NIC port that will send out the generated packets.
+
+Raises:
+InternalError: If the `payload_size` is invalid.
+
+Returns:
+A list containing the randomly generated packets.
+"""
+if payload_size < 0:
+raise InternalError(f"An invalid payload_size of {payload_size} was 
given.")
+
+l4_factories = []
+if protocols & PacketProtocols.TCP:
+l4_factories.append(TCP)
+if protocols & PacketProtocols.UDP:
+l4_factories.append(UDP)
+
+def _make_packet() -> Packet:
+packet = Ether()
+
+if protocols & PacketProtocols.IP:
+packet /= IP()
+
+if len(l4_factories) > 0:
+src_port, dst_port = random.choices(ports_range, k=2)
+packet /= random.choice(l4_factories)(sport=src_port, 
dport=dst_port)
+
+max_payload_size = mtu - len(packet)
+usable_payload_size = payload_size if payload_size < max_payload_size 
else max_payload_size
+return packet / random.randbytes(usable_payload_size)
+
+return [_make_packet() for _ in range(number_of)]
-- 
2.34.1



[PATCH v2 4/5] dts: add ability to start/stop testpmd ports

2024-08-06 Thread Luca Vizzarro
Add testpmd commands to start and stop all the ports, so that they can
be configured. Because there is a distinction of commands that require
the ports to be stopped and started, also add decorators for commands
that require a specific state, removing this logic from the test
writer's duty.

Signed-off-by: Luca Vizzarro 
Reviewed-by: Paul Szczepanek 
---
 dts/framework/remote_session/testpmd_shell.py | 86 ++-
 1 file changed, 84 insertions(+), 2 deletions(-)

diff --git a/dts/framework/remote_session/testpmd_shell.py 
b/dts/framework/remote_session/testpmd_shell.py
index 43e9f56517..ca24b28070 100644
--- a/dts/framework/remote_session/testpmd_shell.py
+++ b/dts/framework/remote_session/testpmd_shell.py
@@ -14,16 +14,17 @@
 testpmd_shell.close()
 """
 
+import functools
 import re
 import time
 from dataclasses import dataclass, field
 from enum import Flag, auto
 from pathlib import PurePath
-from typing import ClassVar
+from typing import Any, Callable, ClassVar, Concatenate, ParamSpec
 
 from typing_extensions import Self, Unpack
 
-from framework.exception import InteractiveCommandExecutionError
+from framework.exception import InteractiveCommandExecutionError, InternalError
 from framework.params.testpmd import SimpleForwardingModes, TestPmdParams
 from framework.params.types import TestPmdParamsDict
 from framework.parser import ParserFn, TextParser
@@ -33,6 +34,9 @@
 from framework.testbed_model.sut_node import SutNode
 from framework.utils import StrEnum
 
+P = ParamSpec("P")
+TestPmdShellMethod = Callable[Concatenate["TestPmdShell", P], Any]
+
 
 class TestPmdDevice:
 """The data of a device that testpmd can recognize.
@@ -577,12 +581,51 @@ class TestPmdPortStats(TextParser):
 tx_bps: int = field(metadata=TextParser.find_int(r"Tx-bps:\s+(\d+)"))
 
 
+def requires_stopped_ports(func: TestPmdShellMethod) -> TestPmdShellMethod:
+"""Decorator for :class:`TestPmdShell` commands methods that require 
stopped ports.
+
+If the decorated method is called while the ports are started, then these 
are stopped before
+continuing.
+"""
+
+@functools.wraps(func)
+def _wrapper(self: "TestPmdShell", *args: P.args, **kwargs: P.kwargs):
+if self.ports_started:
+self._logger.debug("Ports need to be stopped to continue")
+self.stop_all_ports()
+
+return func(self, *args, **kwargs)
+
+return _wrapper
+
+
+def requires_started_ports(func: TestPmdShellMethod) -> TestPmdShellMethod:
+"""Decorator for :class:`TestPmdShell` commands methods that require 
started ports.
+
+If the decorated method is called while the ports are stopped, then these 
are started before
+continuing.
+"""
+
+@functools.wraps(func)
+def _wrapper(self: "TestPmdShell", *args: P.args, **kwargs: P.kwargs):
+if not self.ports_started:
+self._logger.debug("Ports need to be started to continue")
+self.start_all_ports()
+
+return func(self, *args, **kwargs)
+
+return _wrapper
+
+
 class TestPmdShell(DPDKShell):
 """Testpmd interactive shell.
 
 The testpmd shell users should never use
 the :meth:`~.interactive_shell.InteractiveShell.send_command` method 
directly, but rather
 call specialized methods. If there isn't one that satisfies a need, it 
should be added.
+
+Attributes:
+ports_started: Indicates whether the ports are started.
 """
 
 _app_params: TestPmdParams
@@ -619,6 +662,9 @@ def __init__(
 name,
 )
 
+self.ports_started = not self._app_params.disable_device_start
+
+@requires_started_ports
 def start(self, verify: bool = True) -> None:
 """Start packet forwarding with the current configuration.
 
@@ -723,6 +769,42 @@ def set_forward_mode(self, mode: SimpleForwardingModes, 
verify: bool = True):
 f"Test pmd failed to set fwd mode to {mode.value}"
 )
 
+def stop_all_ports(self, verify: bool = True) -> None:
+"""Stops all the ports.
+
+Args:
+verify: If :data:`True`, the output of the command will be checked 
for a successful
+execution.
+
+Raises:
+InteractiveCommandExecutionError: If `verify` is :data:`True` and 
the ports were not
+stopped successfully.
+"""
+self._logger.debug("Stopping all the ports...")
+output = self.send_command("port stop all")
+if verify and not output.strip().endswith("Done"):
+raise InteractiveCommandExecutionError("Ports were not stopped 
successfully")
+
+self.ports_started = False
+
+def start_all_ports(self, verify: bool = True) -> None:
+"""Starts all the ports.
+
+Args:
+verify: If :data:`True`, the output of the command will be checked 
for a successful
+execution.
+
+Raises:
+InteractiveCommandExecutionError: If `verify` is :data:`True` and 
th

[PATCH v2 5/5] dts: add testpmd set ports queues

2024-08-06 Thread Luca Vizzarro
Add a facility to update the number of TX/RX queues during the runtime
of testpmd.

Signed-off-by: Luca Vizzarro 
Reviewed-by: Paul Szczepanek 
---
 dts/framework/remote_session/testpmd_shell.py | 16 
 1 file changed, 16 insertions(+)

diff --git a/dts/framework/remote_session/testpmd_shell.py 
b/dts/framework/remote_session/testpmd_shell.py
index ca24b28070..85fbc42696 100644
--- a/dts/framework/remote_session/testpmd_shell.py
+++ b/dts/framework/remote_session/testpmd_shell.py
@@ -805,6 +805,22 @@ def start_all_ports(self, verify: bool = True) -> None:
 
 self.ports_started = True
 
+@requires_stopped_ports
+def set_ports_queues(self, number_of: int) -> None:
+"""Sets the number of queues per port.
+
+Args:
+number_of: The number of RX/TX queues to create per port.
+
+Raises:
+InternalError: If `number_of` is invalid.
+"""
+if number_of < 1:
+raise InternalError("The number of queues must be positive and 
non-zero")
+
+self.send_command(f"port config all rxq {number_of}")
+self.send_command(f"port config all txq {number_of}")
+
 def show_port_info_all(self) -> list[TestPmdPort]:
 """Returns the information of all the ports.
 
-- 
2.34.1



Re: [PATCH 3/4] test: make red test part of fast suite

2024-08-06 Thread Bruce Richardson
On Tue, Aug 06, 2024 at 02:28:13PM +0200, David Marchand wrote:
> On Thu, Jul 18, 2024 at 9:11 PM Stephen Hemminger
>  wrote:
> >
> > The red tests were not run because not part of any suite.
> > Meson warning is:
> >  WARNING: Test "red_autotest" is not defined in any test suite
> >
> > Signed-off-by: Stephen Hemminger 
> 
> There is an open bz about this unit test.
> https://bugs.dpdk.org/show_bug.cgi?id=826
> 
Do we need a "broken tests" placeholder suite, or is it better to just
leave the broken tests unassigned and warning in meson?

/Bruce


[PATCH] dts: add l2fwd test suite

2024-08-06 Thread Luca Vizzarro
Add a basic L2 forwarding test suite which tests the correct
functionality of the forwarding facility built-in in the DPDK.

The tests are performed with different queues numbers per port.

Bugzilla ID: 1481

Signed-off-by: Luca Vizzarro 
Reviewed-by: Paul Szczepanek 
---
Depends-on: series-32714 ("dts: add pktgen and testpmd changes")
---
 dts/framework/config/conf_yaml_schema.json |  3 +-
 dts/tests/TestSuite_l2fwd.py   | 58 ++
 2 files changed, 60 insertions(+), 1 deletion(-)
 create mode 100644 dts/tests/TestSuite_l2fwd.py

diff --git a/dts/framework/config/conf_yaml_schema.json 
b/dts/framework/config/conf_yaml_schema.json
index df390e8ae2..58a719a923 100644
--- a/dts/framework/config/conf_yaml_schema.json
+++ b/dts/framework/config/conf_yaml_schema.json
@@ -187,7 +187,8 @@
   "enum": [
 "hello_world",
 "os_udp",
-"pmd_buffer_scatter"
+"pmd_buffer_scatter",
+"l2fwd"
   ]
 },
 "test_target": {
diff --git a/dts/tests/TestSuite_l2fwd.py b/dts/tests/TestSuite_l2fwd.py
new file mode 100644
index 00..46f07b78eb
--- /dev/null
+++ b/dts/tests/TestSuite_l2fwd.py
@@ -0,0 +1,58 @@
+"""Basic L2 forwarding test suite.
+
+This testing suites runs basic L2 forwarding on testpmd with different queue 
sizes per port.
+The forwarding test is performed with several packets being sent at once.
+"""
+
+from framework.params.testpmd import EthPeer, SimpleForwardingModes
+from framework.remote_session.testpmd_shell import TestPmdShell
+from framework.test_suite import TestSuite
+from framework.testbed_model.cpu import LogicalCoreCount
+from framework.utils import generate_random_packets
+
+
+class TestL2fwd(TestSuite):
+"""L2 forwarding test suite."""
+
+#: The total number of packets to generate and send for forwarding.
+NUMBER_OF_PACKETS_TO_SEND = 50
+#: The payload size to use for the generated packets in bytes.
+PAYLOAD_SIZE = 100
+
+def set_up_suite(self) -> None:
+"""Set up the test suite.
+
+Setup:
+Verify that we have at least 2 ports in the current test. Generate 
the random packets
+that will be sent and spawn a reusable testpmd shell.
+"""
+self.verify(len(self.sut_node.ports) >= 2, "At least 2 ports are 
required for this test.")
+self.packets = generate_random_packets(self.NUMBER_OF_PACKETS_TO_SEND, 
self.PAYLOAD_SIZE)
+
+def test_l2fwd_integrity(self) -> None:
+"""Test the L2 forwarding integrity.
+
+Test:
+Configure a testpmd shell with a different numbers of queues per 
run. Start up L2
+forwarding, send random packets from the TG and verify they were 
all received back.
+"""
+queues = [1, 2, 4, 8]
+
+with TestPmdShell(
+self.sut_node,
+lcore_filter_specifier=LogicalCoreCount(cores_per_socket=4),
+forward_mode=SimpleForwardingModes.mac,
+eth_peer=[EthPeer(1, self.tg_node.ports[1].mac_address)],
+disable_device_start=True,
+) as shell:
+for queues_num in queues:
+self._logger.info(f"Testing L2 forwarding with {queues_num} 
queue(s)")
+shell.set_ports_queues(queues_num)
+shell.start()
+
+received_packets = self.send_packets_and_capture(self.packets)
+
+expected_packets = [self.get_expected_packet(packet) for 
packet in self.packets]
+self.match_all_packets(expected_packets, received_packets)
+
+shell.stop()
-- 
2.34.1



[v1 0/3] meson options related changes

2024-08-06 Thread Gagandeep Singh
meson options related changes

Gagandeep Singh (3):
  meson: add a meson option to install examples
  meson: add a meson option to install examples source
  meson: skip symlinks to PMDs script on no sub directory

 config/meson.build   |  7 +--
 examples/meson.build | 13 -
 meson.build  |  7 ---
 meson_options.txt|  4 
 4 files changed, 25 insertions(+), 6 deletions(-)

-- 
2.25.1



[v1 1/3] meson: add a meson option to install examples

2024-08-06 Thread Gagandeep Singh
Adding a meson option "enable_examples_bin_install"
to install the examples binaries in bin.

Default value is false.

Signed-off-by: Gagandeep Singh 
---
 examples/meson.build | 13 -
 meson_options.txt|  2 ++
 2 files changed, 14 insertions(+), 1 deletion(-)

diff --git a/examples/meson.build b/examples/meson.build
index 8e8968a1fa..0d0df4e36d 100644
--- a/examples/meson.build
+++ b/examples/meson.build
@@ -124,10 +124,21 @@ foreach example: examples
 if allow_experimental_apis
 cflags += '-DALLOW_EXPERIMENTAL_API'
 endif
-executable('dpdk-' + name, sources,
+if get_option('enable_examples_bin_install')
+  executable('dpdk-' + name, sources,
+include_directories: includes,
+link_whole: link_whole_libs,
+link_args: ldflags,
+c_args: cflags,
+dependencies: dep_objs,
+   install_rpath: join_paths(get_option('prefix'), 
driver_install_path),
+   install: true)
+else
+  executable('dpdk-' + name, sources,
 include_directories: includes,
 link_whole: link_whole_libs,
 link_args: ldflags,
 c_args: cflags,
 dependencies: dep_objs)
+endif
 endforeach
diff --git a/meson_options.txt b/meson_options.txt
index e49b2fc089..e6f83f3f92 100644
--- a/meson_options.txt
+++ b/meson_options.txt
@@ -24,6 +24,8 @@ option('enable_drivers', type: 'string', value: '', 
description:
'Comma-separated list of drivers to build. If unspecified, build all 
drivers.')
 option('enable_driver_sdk', type: 'boolean', value: false, description:
'Install headers to build drivers.')
+option('enable_examples_bin_install', type: 'boolean', value: false, 
description:
+   'Install examples binaries')
 option('enable_kmods', type: 'boolean', value: true, description:
'[Deprecated - will be removed in future release] build kernel modules')
 option('enable_libs', type: 'string', value: '', description:
-- 
2.25.1



[v1 2/3] meson: add a meson option to install examples source

2024-08-06 Thread Gagandeep Singh
Adding a meson option "enable_examples_source_install"
to enable or disable installation of examples source code.

Default value is true.

Signed-off-by: Gagandeep Singh 
---
 meson.build   | 7 ---
 meson_options.txt | 2 ++
 2 files changed, 6 insertions(+), 3 deletions(-)

diff --git a/meson.build b/meson.build
index 8b248d4505..7cdd54f088 100644
--- a/meson.build
+++ b/meson.build
@@ -88,12 +88,13 @@ subdir('app')
 # build docs
 subdir('doc')
 
-# build any examples explicitly requested - useful for developers - and
-# install any example code into the appropriate install path
+# build any examples explicitly requested
 subdir('examples')
-install_subdir('examples',
+if get_option('enable_examples_source_install')
+  install_subdir('examples',
 install_dir: get_option('datadir') + '/dpdk',
 exclude_files: ex_file_excludes)
+endif
 
 # build kernel modules
 subdir('kernel')
diff --git a/meson_options.txt b/meson_options.txt
index e6f83f3f92..4f498093e6 100644
--- a/meson_options.txt
+++ b/meson_options.txt
@@ -26,6 +26,8 @@ option('enable_driver_sdk', type: 'boolean', value: false, 
description:
'Install headers to build drivers.')
 option('enable_examples_bin_install', type: 'boolean', value: false, 
description:
'Install examples binaries')
+option('enable_examples_source_install', type: 'boolean', value: true, 
description:
+   'Install examples source code')
 option('enable_kmods', type: 'boolean', value: true, description:
'[Deprecated - will be removed in future release] build kernel modules')
 option('enable_libs', type: 'string', value: '', description:
-- 
2.25.1



[v1 3/3] meson: skip symlinks to PMDs script on no sub directory

2024-08-06 Thread Gagandeep Singh
if user gives option -Ddrivers_install_subdir= or
-Ddrivers_install_subdir=. to install all the PMDs in
parent directory, The post installation script
"symlink-drivers-solibs.sh" can create invalid library file
named 'librte_*.so*' or meson installation errors:

ln: './librte_*' and './librte_*' are the same file
FAILED: install script '/bin/sh /home/nxa12342/upstream/dpdk/config/.
/buildtools/symlink-drivers-solibs.sh lib/x86_64-linux-gnu .' exit code 1,
stopped
FAILED: meson-install

This patch removes this post-install script execution to symlink
the PMDs libraries when no sub directory is given.

Signed-off-by: Gagandeep Singh 
---
 config/meson.build | 7 +--
 1 file changed, 5 insertions(+), 2 deletions(-)

diff --git a/config/meson.build b/config/meson.build
index 8c8b019c25..4c1c6848ad 100644
--- a/config/meson.build
+++ b/config/meson.build
@@ -95,8 +95,11 @@ eal_pmd_path = join_paths(get_option('prefix'), 
driver_install_path)
 # e.g. ixgbe depends on librte_bus_pci. This means that the bus drivers need
 # to be in the library path, so symlink the drivers from the main lib 
directory.
 if not is_windows
-meson.add_install_script('../buildtools/symlink-drivers-solibs.sh',
-get_option('libdir'), pmd_subdir_opt)
+# skip symlink-drivers-solibs.sh execution on no sub directory
+if pmd_subdir_opt != '' and pmd_subdir_opt != '.'
+   meson.add_install_script('../buildtools/symlink-drivers-solibs.sh',
+   get_option('libdir'), pmd_subdir_opt)
+endif
 elif meson.version().version_compare('>=0.55.0')
 # 0.55.0 is required to use external program with add_install_script
 meson.add_install_script(py3,
-- 
2.25.1



[DPDK/DTS Bug 1516] Port over checksum offload suite to new DTS

2024-08-06 Thread bugzilla
https://bugs.dpdk.org/show_bug.cgi?id=1516

Bug ID: 1516
   Summary: Port over checksum offload suite to new DTS
   Product: DPDK
   Version: unspecified
  Hardware: All
OS: All
Status: UNCONFIRMED
  Severity: normal
  Priority: Normal
 Component: DTS
  Assignee: dev@dpdk.org
  Reporter: dm...@iol.unh.edu
CC: juraj.lin...@pantheon.tech, pr...@iol.unh.edu
  Target Milestone: ---

-- 
You are receiving this mail because:
You are the assignee for the bug.

Re: [v1 3/3] meson: skip symlinks to PMDs script on no sub directory

2024-08-06 Thread Bruce Richardson
On Tue, Aug 06, 2024 at 07:12:18PM +0530, Gagandeep Singh wrote:
> if user gives option -Ddrivers_install_subdir= or
> -Ddrivers_install_subdir=. to install all the PMDs in
> parent directory, The post installation script
> "symlink-drivers-solibs.sh" can create invalid library file
> named 'librte_*.so*' or meson installation errors:
> 
> ln: './librte_*' and './librte_*' are the same file
> FAILED: install script '/bin/sh /home/nxa12342/upstream/dpdk/config/.
> /buildtools/symlink-drivers-solibs.sh lib/x86_64-linux-gnu .' exit code 1,
> stopped
> FAILED: meson-install
> 
> This patch removes this post-install script execution to symlink
> the PMDs libraries when no sub directory is given.
> 
> Signed-off-by: Gagandeep Singh 
> ---

Acked-by: Bruce Richardson 


Re: [v1 1/3] meson: add a meson option to install examples

2024-08-06 Thread Bruce Richardson
On Tue, Aug 06, 2024 at 07:12:16PM +0530, Gagandeep Singh wrote:
> Adding a meson option "enable_examples_bin_install"
> to install the examples binaries in bin.
> 
> Default value is false.
> 
> Signed-off-by: Gagandeep Singh 
> ---
>  examples/meson.build | 13 -
>  meson_options.txt|  2 ++
>  2 files changed, 14 insertions(+), 1 deletion(-)
> 
Is there a particular reason we might want to do this? Installing sample
code binaries in bin seems rather strange to me.

/Bruce


Re: [v1 2/3] meson: add a meson option to install examples source

2024-08-06 Thread Bruce Richardson
On Tue, Aug 06, 2024 at 07:12:17PM +0530, Gagandeep Singh wrote:
> Adding a meson option "enable_examples_source_install"
> to enable or disable installation of examples source code.
> 
> Default value is true.
> 
> Signed-off-by: Gagandeep Singh 
> ---
>  meson.build   | 7 ---
>  meson_options.txt | 2 ++
>  2 files changed, 6 insertions(+), 3 deletions(-)
> 
Is installing sample code for DPDK a problem that we need to disable it? I
was expecting that such filtering out of unwanted files could be done via
packaging rather than us having to add lots of DPDK build options to
control these things.

/Bruce


Re: [PATCH v5] net/gve : Update EOP & csum bit in txd rte_mbuf chain

2024-08-06 Thread Tathagat Priyadarshi
hi Ferruh,

Could you please accept the updated patch?! let us know what's pending.

https://patches.dpdk.org/project/dpdk/patch/1722575288-2408630-1-git-send-email-tathagat.d...@gmail.com/

TIA

On Fri, Aug 2, 2024 at 10:40 AM Tathagat Priyadarshi
 wrote:
>
> Updated the if-else block with an optimised inverse operator. Thanks
> for your suggestion Stephen.
>
> On Fri, Aug 2, 2024 at 10:36 AM Tathagat Priyadarshi
>  wrote:
> >
> > The EOP and csum bit was not set for all the packets in mbuf chain
> > causing packet transmission stalls for packets split across
> > mbuf in chain.
> >
> > Fixes: 4022f99 ("net/gve: support basic Tx data path for DQO")
> > Cc: sta...@dpdk.org
> >
> > Signed-off-by: Tathagat Priyadarshi 
> > Signed-off-by: Varun Lakkur Ambaji Rao 
> >
> > Acked-by: Joshua Washington 
> > ---
> >  drivers/net/gve/gve_tx_dqo.c | 9 ++---
> >  1 file changed, 6 insertions(+), 3 deletions(-)
> >
> > diff --git a/drivers/net/gve/gve_tx_dqo.c b/drivers/net/gve/gve_tx_dqo.c
> > index a65e6aa..bbaf46d 100644
> > --- a/drivers/net/gve/gve_tx_dqo.c
> > +++ b/drivers/net/gve/gve_tx_dqo.c
> > @@ -89,6 +89,7 @@
> > uint16_t sw_id;
> > uint64_t bytes;
> > uint16_t first_sw_id;
> > +   uint8_t csum;
> >
> > sw_ring = txq->sw_ring;
> > txr = txq->tx_ring;
> > @@ -114,6 +115,9 @@
> > ol_flags = tx_pkt->ol_flags;
> > nb_used = tx_pkt->nb_segs;
> > first_sw_id = sw_id;
> > +
> > +   csum = !!(ol_flags & GVE_TX_CKSUM_OFFLOAD_MASK_DQO);
> > +
> > do {
> > if (sw_ring[sw_id] != NULL)
> > PMD_DRV_LOG(DEBUG, "Overwriting an entry in 
> > sw_ring");
> > @@ -126,6 +130,8 @@
> > txd->pkt.dtype = GVE_TX_PKT_DESC_DTYPE_DQO;
> > txd->pkt.compl_tag = rte_cpu_to_le_16(first_sw_id);
> > txd->pkt.buf_size = RTE_MIN(tx_pkt->data_len, 
> > GVE_TX_MAX_BUF_SIZE_DQO);
> > +   txd->pkt.end_of_packet = 0;
> > +   txd->pkt.checksum_offload_enable = csum;
> >
> > /* size of desc_ring and sw_ring could be different 
> > */
> > tx_id = (tx_id + 1) & mask;
> > @@ -138,9 +144,6 @@
> > /* fill the last descriptor with End of Packet (EOP) bit */
> > txd->pkt.end_of_packet = 1;
> >
> > -   if (ol_flags & GVE_TX_CKSUM_OFFLOAD_MASK_DQO)
> > -   txd->pkt.checksum_offload_enable = 1;
> > -
> > txq->nb_free -= nb_used;
> > txq->nb_used += nb_used;
> > }
> > --
> > 1.8.3.1
> >


[PATCH] net/octeon_ep: extend mailbox functionality

2024-08-06 Thread Vamsi Attunuru
Patch extends mbox functionality to handle pf to vf mbox
messages and also updates current mbox version to V3.

As part of PF FLR notify event, event handler invokes
device removal event callback to tear down the driver.

Signed-off-by: Vamsi Attunuru 
---
 drivers/net/octeon_ep/cnxk_ep_vf.h|  5 ++
 drivers/net/octeon_ep/otx_ep_ethdev.c |  4 +-
 drivers/net/octeon_ep/otx_ep_mbox.c   | 70 +--
 drivers/net/octeon_ep/otx_ep_mbox.h   | 11 -
 4 files changed, 82 insertions(+), 8 deletions(-)

diff --git a/drivers/net/octeon_ep/cnxk_ep_vf.h 
b/drivers/net/octeon_ep/cnxk_ep_vf.h
index 41d8fbbb3a..8981dd7e86 100644
--- a/drivers/net/octeon_ep/cnxk_ep_vf.h
+++ b/drivers/net/octeon_ep/cnxk_ep_vf.h
@@ -134,6 +134,9 @@
 #define CNXK_EP_R_MBOX_VF_PF_DATA(ring)  \
(CNXK_EP_R_MBOX_VF_PF_DATA_START + ((ring) * CNXK_EP_RING_OFFSET))
 
+#define CNXK_EP_R_MBOX_PF_VF_DATA(ring)  \
+   (CNXK_EP_R_MBOX_PF_VF_DATA_START + ((ring) * CNXK_EP_RING_OFFSET))
+
 #define CNXK_EP_R_MBOX_PF_VF_INT(ring)   \
(CNXK_EP_R_MBOX_PF_VF_INT_START + ((ring) * CNXK_EP_RING_OFFSET))
 
@@ -195,5 +198,7 @@ struct cnxk_ep_instr_32B {
 #define CNXK_EP_OQ_ISM_OFFSET(queue)(RTE_CACHE_LINE_SIZE * (queue))
 #define CNXK_EP_ISM_EN  (0x1)
 #define CNXK_EP_ISM_MSIX_DIS(0x2)
+#define CNXK_EP_MBOX_INTR   (0x1)
+#define CNXK_EP_MBOX_ENAB   (0x2)
 
 #endif /*_CNXK_EP_VF_H_ */
diff --git a/drivers/net/octeon_ep/otx_ep_ethdev.c 
b/drivers/net/octeon_ep/otx_ep_ethdev.c
index 46211361a0..196ed69123 100644
--- a/drivers/net/octeon_ep/otx_ep_ethdev.c
+++ b/drivers/net/octeon_ep/otx_ep_ethdev.c
@@ -656,6 +656,7 @@ otx_ep_dev_close(struct rte_eth_dev *eth_dev)
 
otx_epvf = OTX_EP_DEV(eth_dev);
otx_ep_mbox_send_dev_exit(eth_dev);
+   otx_ep_mbox_uninit(eth_dev);
otx_epvf->fn_list.disable_io_queues(otx_epvf);
num_queues = otx_epvf->nb_rx_queues;
for (q_no = 0; q_no < num_queues; q_no++) {
@@ -725,6 +726,7 @@ otx_ep_eth_dev_uninit(struct rte_eth_dev *eth_dev)
return 0;
}
 
+   otx_ep_mbox_uninit(eth_dev);
eth_dev->dev_ops = NULL;
eth_dev->rx_pkt_burst = NULL;
eth_dev->tx_pkt_burst = NULL;
@@ -826,7 +828,7 @@ otx_ep_eth_dev_init(struct rte_eth_dev *eth_dev)
return -EINVAL;
}
 
-   if (otx_ep_mbox_version_check(eth_dev))
+   if (otx_ep_mbox_init(eth_dev))
return -EINVAL;
 
if (otx_ep_eth_dev_query_set_vf_mac(eth_dev,
diff --git a/drivers/net/octeon_ep/otx_ep_mbox.c 
b/drivers/net/octeon_ep/otx_ep_mbox.c
index 4118645dc7..0474419599 100644
--- a/drivers/net/octeon_ep/otx_ep_mbox.c
+++ b/drivers/net/octeon_ep/otx_ep_mbox.c
@@ -17,7 +17,10 @@
  * with new command and it's version info.
  */
 static uint32_t otx_ep_cmd_versions[OTX_EP_MBOX_CMD_MAX] = {
-   [0 ... OTX_EP_MBOX_CMD_DEV_REMOVE] = OTX_EP_MBOX_VERSION_V1
+   [0 ... OTX_EP_MBOX_CMD_DEV_REMOVE] = OTX_EP_MBOX_VERSION_V1,
+   [OTX_EP_MBOX_CMD_GET_FW_INFO ... OTX_EP_MBOX_NOTIF_LINK_STATUS] = 
OTX_EP_MBOX_VERSION_V2,
+   [OTX_EP_MBOX_NOTIF_PF_FLR] = OTX_EP_MBOX_VERSION_V3
+
 };
 
 static int
@@ -288,10 +291,9 @@ otx_ep_mbox_get_max_pkt_len(struct rte_eth_dev *eth_dev)
return rsp.s_get_mtu.mtu;
 }
 
-int otx_ep_mbox_version_check(struct rte_eth_dev *eth_dev)
+static void
+otx_ep_mbox_version_check(struct otx_ep_device *otx_ep)
 {
-   struct otx_ep_device *otx_ep =
-   (struct otx_ep_device *)(eth_dev)->data->dev_private;
union otx_ep_mbox_word cmd;
union otx_ep_mbox_word rsp;
int ret;
@@ -312,15 +314,73 @@ int otx_ep_mbox_version_check(struct rte_eth_dev *eth_dev)
if (ret == OTX_EP_MBOX_CMD_STATUS_NACK || rsp.s_version.version == 0) {
otx_ep_dbg("VF Mbox version fallback to base version from:%u\n",
(uint32_t)cmd.s_version.version);
-   return 0;
+   return;
}
otx_ep->mbox_neg_ver = (uint32_t)rsp.s_version.version;
otx_ep_dbg("VF Mbox version:%u Negotiated VF version with PF:%u\n",
(uint32_t)cmd.s_version.version,
(uint32_t)rsp.s_version.version);
+}
+
+static void
+otx_ep_mbox_intr_handler(void *param)
+{
+   struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
+   struct otx_ep_device *otx_ep = (struct otx_ep_device 
*)eth_dev->data->dev_private;
+   struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(eth_dev);
+   union otx_ep_mbox_word mbox_cmd;
+
+   if (otx2_read64(otx_ep->hw_addr + CNXK_EP_R_MBOX_PF_VF_INT(0)) & 
CNXK_EP_MBOX_INTR) {
+   mbox_cmd.u64 = otx2_read64(otx_ep->hw_addr + 
CNXK_EP_R_MBOX_PF_VF_DATA(0));
+   otx2_write64(CNXK_EP_MBOX_ENAB | CNXK_EP_MBOX_INTR,
+otx_ep->hw_addr + CNXK_EP_R_MBOX_PF_VF_INT(0));
+   if (mbox_cmd.s.opcode == OTX_EP_MBOX_NOT

[PATCH v15 0/5] API docs generation

2024-08-06 Thread Juraj Linkeš
The generation is done with Sphinx, which DPDK already uses, with
slightly modified configuration of the sidebar present in an if block.

DTS dependencies do not need to be installed, but there is the option to
install doc build dependencies with Poetry:
poetry install --with docs

The build itself may be run with:
meson setup  -Denable_docs=true
ninja -C 

The above will do a full DPDK build with docs. To build just docs:
meson setup 
ninja -C  dts-doc

Python3.10 is required to build the DTS API docs.

The patchset contains the .rst sources which Sphinx uses to generate the
html pages. These were first generated with the sphinx-apidoc utility
and modified to provide a better look. The documentation just doesn't
look that good without the modifications and there isn't enough
configuration options to achieve that without manual changes to the .rst
files. This introduces extra maintenance which involves adding new .rst
files when a new Python module is added or changing the .rst structure
if the Python directory/file structure is changed (moved, renamed
files). This small maintenance burden is outweighed by the flexibility
afforded by the ability to make manual changes to the .rst files.

v10:
Fix dts doc generation issue: Only copy the custom rss file if it exists.

v11:
Added the config option autodoc_mock_imports, which eliminates the need
for DTS dependencies. Added a script that find out which imports need to
be added to autodoc_mock_imports. The script also check the required
Python version for building DTS docs.
Removed tags from the two affected patches which will need to be
reviewed again.

v12:
Added paramiko to the required dependencies of get-dts-deps.py.

v13:
Fixed build error:
TypeError: unsupported operand type(s) for |: 'NoneType' and 'Transport'

v14:
Fixed install error:
ERROR: File 'dts/doc/html' could not be found
This required me to put the built docs into dts/doc which is outside the
DPDK API doc dir, resulting in linking between DPDK and DTS api docs not
working properly. I addressed this by adding a symlink to the build dir.
This way the link works after installing the docs and the symlink is
just one extra file in the build dir.

v15:
Moved DTS API sources to doc/api/dts. This simplifies a lot of things in
the build, but mainly makes a lot of sense. Now the source, build and
install paths are the same so there isn't any need for any symlinks or
other workarounds.
Also added a symlink to the custom.css file so that it works with
call-sphinx-build.py without any modifications.

Juraj Linkeš (5):
  dts: update params and parser docstrings
  dts: replace the or operator in third party types
  dts: add doc generation dependencies
  dts: add API doc sources
  dts: add API doc generation

 buildtools/call-sphinx-build.py   |   2 +
 buildtools/get-dts-deps.py|  78 +++
 buildtools/meson.build|   1 +
 doc/api/doxy-api-index.md |   3 +
 doc/api/doxy-api.conf.in  |   2 +
 doc/api/dts/conf_yaml_schema.json |   1 +
 doc/api/dts/custom.css|   1 +
 doc/api/dts/framework.config.rst  |  12 +
 doc/api/dts/framework.config.types.rst|   6 +
 doc/api/dts/framework.exception.rst   |   6 +
 doc/api/dts/framework.logger.rst  |   6 +
 doc/api/dts/framework.params.eal.rst  |   6 +
 doc/api/dts/framework.params.rst  |  14 +
 doc/api/dts/framework.params.testpmd.rst  |   6 +
 doc/api/dts/framework.params.types.rst|   6 +
 doc/api/dts/framework.parser.rst  |   6 +
 .../framework.remote_session.dpdk_shell.rst   |   6 +
 ...ote_session.interactive_remote_session.rst |   6 +
 ...ework.remote_session.interactive_shell.rst |   6 +
 .../framework.remote_session.python_shell.rst |   6 +
 ...ramework.remote_session.remote_session.rst |   6 +
 doc/api/dts/framework.remote_session.rst  |  18 +
 .../framework.remote_session.ssh_session.rst  |   6 +
 ...framework.remote_session.testpmd_shell.rst |   6 +
 doc/api/dts/framework.runner.rst  |   6 +
 doc/api/dts/framework.settings.rst|   6 +
 doc/api/dts/framework.test_result.rst |   6 +
 doc/api/dts/framework.test_suite.rst  |   6 +
 doc/api/dts/framework.testbed_model.cpu.rst   |   6 +
 .../framework.testbed_model.linux_session.rst |   6 +
 doc/api/dts/framework.testbed_model.node.rst  |   6 +
 .../framework.testbed_model.os_session.rst|   6 +
 doc/api/dts/framework.testbed_model.port.rst  |   6 +
 .../framework.testbed_model.posix_session.rst |   6 +
 doc/api/dts/framework.testbed_model.rst   |  26 +
 .../dts/framework.testbed_model.sut_node.rst  |   6 +
 .../dts/framework.testbed_model.tg_node.rst   |   6 +
 ..._generator.capturing_traffic_generator.rst |   6 +
 ...mework.testbed_model.traffic_generator.rst |  14 +
 testbed_model.traffic_generator.scapy.rst |   6 +
 ...el.traffic_generator.traffic_generato

[PATCH v15 1/5] dts: update params and parser docstrings

2024-08-06 Thread Juraj Linkeš
Address a few errors reported by Sphinx when generating documentation:
framework/params/__init__.py:docstring of framework.params.modify_str:3:
WARNING: Inline interpreted text or phrase reference start-string
without end-string.
framework/params/eal.py:docstring of framework.params.eal.EalParams:35:
WARNING: Definition list ends without a blank line; unexpected
unindent.
framework/params/types.py:docstring of framework.params.types:8:
WARNING: Inline strong start-string without end-string.
framework/params/types.py:docstring of framework.params.types:9:
WARNING: Inline strong start-string without end-string.
framework/parser.py:docstring of framework.parser.TextParser:33: ERROR:
Unexpected indentation.
framework/parser.py:docstring of framework.parser.TextParser:43: ERROR:
Unexpected indentation.
framework/parser.py:docstring of framework.parser.TextParser:49: ERROR:
Unexpected indentation.
framework/parser.py:docstring of framework.parser.TextParser.wrap:8:
ERROR: Unexpected indentation.
framework/parser.py:docstring of framework.parser.TextParser.wrap:9:
WARNING: Block quote ends without a blank line; unexpected unindent.

Fixes: 87ba4cdc0dbb ("dts: use Unpack for type checking and hinting")
Fixes: d70159cb62f5 ("dts: add params manipulation module")
Fixes: 967fc62b0a43 ("dts: refactor EAL parameters class")
Fixes: 818fe14e3422 ("dts: add parsing utility module")
Cc: luca.vizza...@arm.com

Signed-off-by: Juraj Linkeš 
Reviewed-by: Luca Vizzarro 
---
 dts/framework/params/__init__.py | 4 ++--
 dts/framework/params/eal.py  | 7 +--
 dts/framework/params/types.py| 3 ++-
 dts/framework/parser.py  | 4 ++--
 4 files changed, 11 insertions(+), 7 deletions(-)

diff --git a/dts/framework/params/__init__.py b/dts/framework/params/__init__.py
index 5a6fd93053..1ae227d7b4 100644
--- a/dts/framework/params/__init__.py
+++ b/dts/framework/params/__init__.py
@@ -53,9 +53,9 @@ def reduced_fn(value):
 
 
 def modify_str(*funcs: FnPtr) -> Callable[[T], T]:
-"""Class decorator modifying the ``__str__`` method with a function 
created from its arguments.
+r"""Class decorator modifying the ``__str__`` method with a function 
created from its arguments.
 
-The :attr:`FnPtr`s fed to the decorator are executed from left to right in 
the arguments list
+The :attr:`FnPtr`\s fed to the decorator are executed from left to right 
in the arguments list
 order.
 
 Args:
diff --git a/dts/framework/params/eal.py b/dts/framework/params/eal.py
index 8d7766fefc..cf1594353a 100644
--- a/dts/framework/params/eal.py
+++ b/dts/framework/params/eal.py
@@ -26,13 +26,16 @@ class EalParams(Params):
 prefix: Set the file prefix string with which to start DPDK, e.g.: 
``prefix="vf"``.
 no_pci: Switch to disable PCI bus, e.g.: ``no_pci=True``.
 vdevs: Virtual devices, e.g.::
+
 vdevs=[
 VirtualDevice('net_ring0'),
 VirtualDevice('net_ring1')
 ]
+
 ports: The list of ports to allow.
-other_eal_param: user defined DPDK EAL parameters, e.g.:
-``other_eal_param='--single-file-segments'``
+other_eal_param: user defined DPDK EAL parameters, e.g.::
+
+``other_eal_param='--single-file-segments'``
 """
 
 lcore_list: LogicalCoreList | None = field(default=None, 
metadata=Params.short("l"))
diff --git a/dts/framework/params/types.py b/dts/framework/params/types.py
index e668f658d8..d77c4625fb 100644
--- a/dts/framework/params/types.py
+++ b/dts/framework/params/types.py
@@ -6,7 +6,8 @@
 TypedDicts can be used in conjunction with Unpack and kwargs for type hinting 
on function calls.
 
 Example:
-..code:: python
+.. code:: python
+
 def create_testpmd(**kwargs: Unpack[TestPmdParamsDict]):
 params = TestPmdParams(**kwargs)
 """
diff --git a/dts/framework/parser.py b/dts/framework/parser.py
index 741dfff821..7254c75b71 100644
--- a/dts/framework/parser.py
+++ b/dts/framework/parser.py
@@ -46,7 +46,7 @@ class TextParser(ABC):
 Example:
 The following example makes use of and demonstrates every parser 
function available:
 
-..code:: python
+.. code:: python
 
 from dataclasses import dataclass, field
 from enum import Enum
@@ -90,7 +90,7 @@ def wrap(parser_fn: ParserFn, wrapper_fn: Callable) -> 
ParserFn:
 """Makes a wrapped parser function.
 
 `parser_fn` is called and if a non-None value is returned, 
`wrapper_function` is called with
-it. Otherwise the function returns early with None. In pseudo-code:
+it. Otherwise the function returns early with None. In pseudo-code::
 
 intermediate_value := parser_fn(input)
 if intermediary_value is None then
-- 
2.34.1



[PATCH v15 2/5] dts: replace the or operator in third party types

2024-08-06 Thread Juraj Linkeš
When the DTS dependencies are not installed when building DTS API
documentation, the or operator produces errors when used with types from
those libraries:
autodoc: failed to import module 'remote_session' from module
'framework'; the following exception was raised:
Traceback (most recent call last):
...
TypeError: unsupported operand type(s) for |: 'Transport' and 'NoneType'

The third part type here is Transport from the paramiko library.

Signed-off-by: Juraj Linkeš 
---
 dts/framework/remote_session/interactive_remote_session.py | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/dts/framework/remote_session/interactive_remote_session.py 
b/dts/framework/remote_session/interactive_remote_session.py
index 97194e6af8..4605ee14b4 100644
--- a/dts/framework/remote_session/interactive_remote_session.py
+++ b/dts/framework/remote_session/interactive_remote_session.py
@@ -5,6 +5,7 @@
 
 import socket
 import traceback
+from typing import Union
 
 from paramiko import AutoAddPolicy, SSHClient, Transport  # type: 
ignore[import-untyped]
 from paramiko.ssh_exception import (  # type: ignore[import-untyped]
@@ -52,7 +53,7 @@ class InteractiveRemoteSession:
 session: SSHClient
 _logger: DTSLogger
 _node_config: NodeConfiguration
-_transport: Transport | None
+_transport: Union[Transport, None]
 
 def __init__(self, node_config: NodeConfiguration, logger: DTSLogger) -> 
None:
 """Connect to the node during initialization.
-- 
2.34.1



[PATCH v15 3/5] dts: add doc generation dependencies

2024-08-06 Thread Juraj Linkeš
Sphinx imports every Python module (through the autodoc extension)
when generating documentation from docstrings, meaning all DTS
dependencies, including Python version, should be satisfied. This is not
a hard requirement, as imports from dependencies may be mocked in the
autodoc_mock_imports autodoc option.
In case DTS developers want to use a Sphinx installation from their
virtualenv, we provide an optional Poetry group for doc generation. The
pyelftools package is there so that meson picks up the correct Python
installation, as pyelftools is required by the build system.

Signed-off-by: Juraj Linkeš 
---
 dts/poetry.lock| 521 +++--
 dts/pyproject.toml |   8 +
 2 files changed, 517 insertions(+), 12 deletions(-)

diff --git a/dts/poetry.lock b/dts/poetry.lock
index 5f8fa03933..2dd8bad498 100644
--- a/dts/poetry.lock
+++ b/dts/poetry.lock
@@ -1,5 +1,16 @@
 # This file is automatically @generated by Poetry 1.8.2 and should not be 
changed by hand.
 
+[[package]]
+name = "alabaster"
+version = "0.7.13"
+description = "A configurable sidebar-enabled Sphinx theme"
+optional = false
+python-versions = ">=3.6"
+files = [
+{file = "alabaster-0.7.13-py3-none-any.whl", hash = 
"sha256:1ee19aca801bbabb5ba3f5f258e4422dfa86f82f3e9cefb0859b283cdd7f62a3"},
+{file = "alabaster-0.7.13.tar.gz", hash = 
"sha256:a27a4a084d5e690e16e01e03ad2b2e552c61a65469419b907243193de1a84ae2"},
+]
+
 [[package]]
 name = "attrs"
 version = "23.1.0"
@@ -18,6 +29,23 @@ docs = ["furo", "myst-parser", "sphinx", 
"sphinx-notfound-page", "sphinxcontrib-
 tests = ["attrs[tests-no-zope]", "zope-interface"]
 tests-no-zope = ["cloudpickle", "hypothesis", "mypy (>=1.1.1)", "pympler", 
"pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"]
 
+[[package]]
+name = "babel"
+version = "2.13.1"
+description = "Internationalization utilities"
+optional = false
+python-versions = ">=3.7"
+files = [
+{file = "Babel-2.13.1-py3-none-any.whl", hash = 
"sha256:7077a4984b02b6727ac10f1f7294484f737443d7e2e66c5e4380e41a3ae0b4ed"},
+{file = "Babel-2.13.1.tar.gz", hash = 
"sha256:33e0952d7dd6374af8dbf6768cc4ddf3ccfefc244f9986d4074704f2fbd18900"},
+]
+
+[package.dependencies]
+setuptools = {version = "*", markers = "python_version >= \"3.12\""}
+
+[package.extras]
+dev = ["freezegun (>=1.0,<2.0)", "pytest (>=6.0)", "pytest-cov"]
+
 [[package]]
 name = "bcrypt"
 version = "4.0.1"
@@ -86,6 +114,17 @@ d = ["aiohttp (>=3.7.4)"]
 jupyter = ["ipython (>=7.8.0)", "tokenize-rt (>=3.2.0)"]
 uvloop = ["uvloop (>=0.15.2)"]
 
+[[package]]
+name = "certifi"
+version = "2023.7.22"
+description = "Python package for providing Mozilla's CA Bundle."
+optional = false
+python-versions = ">=3.6"
+files = [
+{file = "certifi-2023.7.22-py3-none-any.whl", hash = 
"sha256:92d6037539857d8206b8f6ae472e8b77db8058fec5937a1ef3f54304089edbb9"},
+{file = "certifi-2023.7.22.tar.gz", hash = 
"sha256:539cc1d13202e33ca466e88b2807e29f4c13049d6d87031a3c110744495cb082"},
+]
+
 [[package]]
 name = "cffi"
 version = "1.15.1"
@@ -162,6 +201,105 @@ files = [
 [package.dependencies]
 pycparser = "*"
 
+[[package]]
+name = "charset-normalizer"
+version = "3.3.2"
+description = "The Real First Universal Charset Detector. Open, modern and 
actively maintained alternative to Chardet."
+optional = false
+python-versions = ">=3.7.0"
+files = [
+{file = "charset-normalizer-3.3.2.tar.gz", hash = 
"sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5"},
+{file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_universal2.whl", 
hash = 
"sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3"},
+{file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_x86_64.whl", 
hash = 
"sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027"},
+{file = "charset_normalizer-3.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash 
= "sha256:9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03"},
+{file = 
"charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl",
 hash = 
"sha256:6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d"},
+{file = 
"charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl",
 hash = 
"sha256:1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e"},
+{file = 
"charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl",
 hash = 
"sha256:cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6"},
+{file = 
"charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl",
 hash = 
"sha256:8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5"},
+{file = 
"charset_normalizer-3.3.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl",
 hash = 
"sha256:a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537"},
+{file = "charset_normalizer-3.3.2-cp310-cp

[PATCH v15 4/5] dts: add API doc sources

2024-08-06 Thread Juraj Linkeš
These sources could be generated with the sphinx-apidoc utility, but
that doesn't give us enough flexibility, such as sorting the order of
modules or changing the headers of the modules.

The sources included in this patch were in fact generated by said
utility, but modified to improve the look of the documentation. The
improvements are mainly in toctree definitions and the titles of the
modules/packages. These were made with specific Sphinx config options in
mind.

Signed-off-by: Juraj Linkeš 
Reviewed-by: Luca Vizzarro 
Reviewed-by: Jeremy Spewock 
Tested-by: Luca Vizzarro 
---
 doc/api/dts/conf_yaml_schema.json |  1 +
 doc/api/dts/framework.config.rst  | 12 ++
 doc/api/dts/framework.config.types.rst|  6 +++
 doc/api/dts/framework.exception.rst   |  6 +++
 doc/api/dts/framework.logger.rst  |  6 +++
 doc/api/dts/framework.params.eal.rst  |  6 +++
 doc/api/dts/framework.params.rst  | 14 ++
 doc/api/dts/framework.params.testpmd.rst  |  6 +++
 doc/api/dts/framework.params.types.rst|  6 +++
 doc/api/dts/framework.parser.rst  |  6 +++
 .../framework.remote_session.dpdk_shell.rst   |  6 +++
 ...ote_session.interactive_remote_session.rst |  6 +++
 ...ework.remote_session.interactive_shell.rst |  6 +++
 .../framework.remote_session.python_shell.rst |  6 +++
 ...ramework.remote_session.remote_session.rst |  6 +++
 doc/api/dts/framework.remote_session.rst  | 18 
 .../framework.remote_session.ssh_session.rst  |  6 +++
 ...framework.remote_session.testpmd_shell.rst |  6 +++
 doc/api/dts/framework.runner.rst  |  6 +++
 doc/api/dts/framework.settings.rst|  6 +++
 doc/api/dts/framework.test_result.rst |  6 +++
 doc/api/dts/framework.test_suite.rst  |  6 +++
 doc/api/dts/framework.testbed_model.cpu.rst   |  6 +++
 .../framework.testbed_model.linux_session.rst |  6 +++
 doc/api/dts/framework.testbed_model.node.rst  |  6 +++
 .../framework.testbed_model.os_session.rst|  6 +++
 doc/api/dts/framework.testbed_model.port.rst  |  6 +++
 .../framework.testbed_model.posix_session.rst |  6 +++
 doc/api/dts/framework.testbed_model.rst   | 26 +++
 .../dts/framework.testbed_model.sut_node.rst  |  6 +++
 .../dts/framework.testbed_model.tg_node.rst   |  6 +++
 ..._generator.capturing_traffic_generator.rst |  6 +++
 ...mework.testbed_model.traffic_generator.rst | 14 ++
 testbed_model.traffic_generator.scapy.rst |  6 +++
 ...el.traffic_generator.traffic_generator.rst |  6 +++
 ...framework.testbed_model.virtual_device.rst |  6 +++
 doc/api/dts/framework.utils.rst   |  6 +++
 doc/api/dts/index.rst | 43 +++
 38 files changed, 314 insertions(+)
 create mode 12 doc/api/dts/conf_yaml_schema.json
 create mode 100644 doc/api/dts/framework.config.rst
 create mode 100644 doc/api/dts/framework.config.types.rst
 create mode 100644 doc/api/dts/framework.exception.rst
 create mode 100644 doc/api/dts/framework.logger.rst
 create mode 100644 doc/api/dts/framework.params.eal.rst
 create mode 100644 doc/api/dts/framework.params.rst
 create mode 100644 doc/api/dts/framework.params.testpmd.rst
 create mode 100644 doc/api/dts/framework.params.types.rst
 create mode 100644 doc/api/dts/framework.parser.rst
 create mode 100644 doc/api/dts/framework.remote_session.dpdk_shell.rst
 create mode 100644 
doc/api/dts/framework.remote_session.interactive_remote_session.rst
 create mode 100644 doc/api/dts/framework.remote_session.interactive_shell.rst
 create mode 100644 doc/api/dts/framework.remote_session.python_shell.rst
 create mode 100644 doc/api/dts/framework.remote_session.remote_session.rst
 create mode 100644 doc/api/dts/framework.remote_session.rst
 create mode 100644 doc/api/dts/framework.remote_session.ssh_session.rst
 create mode 100644 doc/api/dts/framework.remote_session.testpmd_shell.rst
 create mode 100644 doc/api/dts/framework.runner.rst
 create mode 100644 doc/api/dts/framework.settings.rst
 create mode 100644 doc/api/dts/framework.test_result.rst
 create mode 100644 doc/api/dts/framework.test_suite.rst
 create mode 100644 doc/api/dts/framework.testbed_model.cpu.rst
 create mode 100644 doc/api/dts/framework.testbed_model.linux_session.rst
 create mode 100644 doc/api/dts/framework.testbed_model.node.rst
 create mode 100644 doc/api/dts/framework.testbed_model.os_session.rst
 create mode 100644 doc/api/dts/framework.testbed_model.port.rst
 create mode 100644 doc/api/dts/framework.testbed_model.posix_session.rst
 create mode 100644 doc/api/dts/framework.testbed_model.rst
 create mode 100644 doc/api/dts/framework.testbed_model.sut_node.rst
 create mode 100644 doc/api/dts/framework.testbed_model.tg_node.rst
 create mode 100644 
doc/api/dts/framework.testbed_model.traffic_generator.capturing_traffic_generator.rst
 create mode 100644 doc/api/dts/framework.testbed_model.traffic_generator.rst
 create mode 100644 
doc/api/dts/fra

[PATCH v15 5/5] dts: add API doc generation

2024-08-06 Thread Juraj Linkeš
The tool used to generate DTS API docs is Sphinx, which is already in
use in DPDK. The same configuration is used to preserve style with one
DTS-specific configuration (so that the DPDK docs are unchanged) that
modifies how the sidebar displays the content.

Sphinx generates the documentation from Python docstrings. The docstring
format is the Google format [0] which requires the sphinx.ext.napoleon
extension. The other extension, sphinx.ext.intersphinx, enables linking
to objects in external documentations, such as the Python documentation.

There is one requirement for building DTS docs - the same Python version
as DTS or higher, because Sphinx's autodoc extension imports the code.

The dependencies needed to import the code don't have to be satisfied,
as the autodoc extension allows us to mock the imports. The missing
packages are taken from the DTS pyproject.toml file.

[0] https://google.github.io/styleguide/pyguide.html#38-comments-and-docstrings

Signed-off-by: Juraj Linkeš 
---
 buildtools/call-sphinx-build.py   |  2 +
 buildtools/get-dts-deps.py| 78 +++
 buildtools/meson.build|  1 +
 doc/api/doxy-api-index.md |  3 +
 doc/api/doxy-api.conf.in  |  2 +
 doc/api/dts/custom.css|  1 +
 doc/api/dts/meson.build   | 29 +
 doc/api/meson.build   | 13 
 doc/guides/conf.py| 41 +++-
 doc/guides/contributing/documentation.rst |  2 +
 doc/guides/contributing/patches.rst   |  4 ++
 doc/guides/tools/dts.rst  | 39 +++-
 doc/meson.build   |  1 +
 13 files changed, 214 insertions(+), 2 deletions(-)
 create mode 100755 buildtools/get-dts-deps.py
 create mode 12 doc/api/dts/custom.css
 create mode 100644 doc/api/dts/meson.build

diff --git a/buildtools/call-sphinx-build.py b/buildtools/call-sphinx-build.py
index 623e7363ee..45724ffcd4 100755
--- a/buildtools/call-sphinx-build.py
+++ b/buildtools/call-sphinx-build.py
@@ -15,6 +15,8 @@
 
 # set the version in environment for sphinx to pick up
 os.environ['DPDK_VERSION'] = version
+if src.find('dts') != -1:
+os.environ['DTS_BUILD'] = "y"
 
 sphinx_cmd = [sphinx] + extra_args
 
diff --git a/buildtools/get-dts-deps.py b/buildtools/get-dts-deps.py
new file mode 100755
index 00..309b83cb5c
--- /dev/null
+++ b/buildtools/get-dts-deps.py
@@ -0,0 +1,78 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2024 PANTHEON.tech s.r.o.
+#
+
+"""Utilities for DTS dependencies.
+
+The module can be used as an executable script,
+which verifies that the running Python version meets the version requirement 
of DTS.
+The script returns the standard exit codes in this mode (0 is success, 1 is 
failure).
+
+The module also contains a function, get_missing_imports,
+which looks for runtime and doc generation dependencies in the DTS 
pyproject.toml file
+a returns a list of module names used in an import statement that are missing.
+"""
+
+import configparser
+import importlib.metadata
+import importlib.util
+import os.path
+import platform
+
+_VERSION_COMPARISON_CHARS = '^<>='
+_EXTRA_DEPS = {'invoke': '>=1.3', 'paramiko': '>=2.4'}
+_DPDK_ROOT = os.path.dirname(os.path.dirname(__file__))
+_DTS_DEP_FILE_PATH = os.path.join(_DPDK_ROOT, 'dts', 'pyproject.toml')
+
+
+def _get_version_tuple(version_str):
+return tuple(map(int, version_str.split(".")))
+
+
+def _get_dependencies(cfg_file_path):
+cfg = configparser.ConfigParser()
+with open(cfg_file_path) as f:
+dts_deps_file_str = f.read()
+dts_deps_file_str = dts_deps_file_str.replace("\n]", "]")
+cfg.read_string(dts_deps_file_str)
+
+deps_section = cfg['tool.poetry.dependencies']
+deps = {dep: deps_section[dep].strip('"\'') for dep in deps_section}
+doc_deps_section = cfg['tool.poetry.group.docs.dependencies']
+doc_deps = {dep: doc_deps_section[dep].strip("\"'") for dep in 
doc_deps_section}
+
+return deps | doc_deps
+
+
+def get_missing_imports():
+missing_imports = []
+req_deps = _get_dependencies(_DTS_DEP_FILE_PATH)
+req_deps.pop('python')
+
+for req_dep, req_ver in (req_deps | _EXTRA_DEPS).items():
+try:
+req_ver = 
_get_version_tuple(req_ver.strip(_VERSION_COMPARISON_CHARS))
+found_dep_ver = 
_get_version_tuple(importlib.metadata.version(req_dep))
+if found_dep_ver < req_ver:
+print(
+f'The version "{found_dep_ver}" of package "{req_dep}" '
+f'is lower than required "{req_ver}".'
+)
+except importlib.metadata.PackageNotFoundError:
+print(f'Package "{req_dep}" not found.')
+missing_imports.append(req_dep.lower().replace('-', '_'))
+
+return missing_imports
+
+
+if __name__ == '__main__':
+python_version = _get_dependencies(_DTS_DE

[PATCH 0/4] improve rte_tm APIs

2024-08-06 Thread Bruce Richardson
This patchset makes some small updates to the traffic manager (TM) APIs
in ethdev.

* For functions for creating profiles, shapers and hierarchy
  nodes, make the parameter structure pointer a pointer to a const
  object. This guarantees to the user that the struct won't be modified
  by the function, which allows the user to re-use the same parameters
  multiple times without having to constantly reinitialize it.
* Add a function to allow the user to query the previously provided
  parameters used to create a TM node. This saves applications from
  having to provide a "shadow" hierarchy in the app mirroring that in
  the library.

Bruce Richardson (4):
  ethdev: make parameters to TM node add fn constant
  ethdev: make parameters to TM profile add fn constant
  ethdev: make TM shaper parameters constant
  ethdev: add traffic manager query function

 drivers/net/cnxk/cnxk_tm.c   |  4 +--
 drivers/net/dpaa2/dpaa2_tm.c |  6 ++--
 drivers/net/hns3/hns3_tm.c   | 22 +++---
 drivers/net/i40e/i40e_tm.c   | 12 
 drivers/net/iavf/iavf_tm.c   | 12 
 drivers/net/ice/ice_dcf_sched.c  | 12 
 drivers/net/ice/ice_tm.c | 12 
 drivers/net/ipn3ke/ipn3ke_tm.c   | 12 
 drivers/net/ixgbe/ixgbe_tm.c | 12 
 drivers/net/mvpp2/mrvl_tm.c  |  2 +-
 drivers/net/txgbe/txgbe_tm.c | 12 
 lib/ethdev/ethdev_trace.h| 16 ++
 lib/ethdev/ethdev_trace_points.c |  3 ++
 lib/ethdev/rte_tm.c  | 31 +--
 lib/ethdev/rte_tm.h  | 52 ++--
 lib/ethdev/rte_tm_driver.h   | 18 +--
 16 files changed, 171 insertions(+), 67 deletions(-)

--
2.43.0



[PATCH 1/4] ethdev: make parameters to TM node add fn constant

2024-08-06 Thread Bruce Richardson
The function to add a new scheduling node in rte_tm should not (and
does not) modify the actual node parameters passed in via struct
pointer. We should guarantee this by marking the parameter pointer as
const. This allows SW to create multiple scheduling nodes using the same
parameter struct without having to reset it each time.

Signed-off-by: Bruce Richardson 
---
 drivers/net/cnxk/cnxk_tm.c  |  2 +-
 drivers/net/dpaa2/dpaa2_tm.c|  4 ++--
 drivers/net/hns3/hns3_tm.c  | 16 
 drivers/net/i40e/i40e_tm.c  |  6 +++---
 drivers/net/iavf/iavf_tm.c  |  6 +++---
 drivers/net/ice/ice_dcf_sched.c |  6 +++---
 drivers/net/ice/ice_tm.c|  6 +++---
 drivers/net/ipn3ke/ipn3ke_tm.c  |  4 ++--
 drivers/net/ixgbe/ixgbe_tm.c|  6 +++---
 drivers/net/txgbe/txgbe_tm.c|  6 +++---
 lib/ethdev/rte_tm.c |  2 +-
 lib/ethdev/rte_tm.h |  2 +-
 lib/ethdev/rte_tm_driver.h  |  2 +-
 13 files changed, 34 insertions(+), 34 deletions(-)

diff --git a/drivers/net/cnxk/cnxk_tm.c b/drivers/net/cnxk/cnxk_tm.c
index c799193cb8..9293b3e8f2 100644
--- a/drivers/net/cnxk/cnxk_tm.c
+++ b/drivers/net/cnxk/cnxk_tm.c
@@ -336,7 +336,7 @@ static int
 cnxk_nix_tm_node_add(struct rte_eth_dev *eth_dev, uint32_t node_id,
 uint32_t parent_node_id, uint32_t priority,
 uint32_t weight, uint32_t lvl,
-struct rte_tm_node_params *params,
+const struct rte_tm_node_params *params,
 struct rte_tm_error *error)
 {
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
diff --git a/drivers/net/dpaa2/dpaa2_tm.c b/drivers/net/dpaa2/dpaa2_tm.c
index cb854964b4..22337097e5 100644
--- a/drivers/net/dpaa2/dpaa2_tm.c
+++ b/drivers/net/dpaa2/dpaa2_tm.c
@@ -359,7 +359,7 @@ static int
 dpaa2_node_check_params(struct rte_eth_dev *dev, uint32_t node_id,
__rte_unused uint32_t priority, uint32_t weight,
   uint32_t level_id,
-  struct rte_tm_node_params *params,
+  const struct rte_tm_node_params *params,
   struct rte_tm_error *error)
 {
if (node_id == RTE_TM_NODE_ID_NULL)
@@ -431,7 +431,7 @@ dpaa2_node_check_params(struct rte_eth_dev *dev, uint32_t 
node_id,
 static int
 dpaa2_node_add(struct rte_eth_dev *dev, uint32_t node_id,
  uint32_t parent_node_id, uint32_t priority, uint32_t weight,
- uint32_t level_id, struct rte_tm_node_params *params,
+ uint32_t level_id, const struct rte_tm_node_params *params,
  struct rte_tm_error *error)
 {
struct dpaa2_dev_priv *priv = dev->data->dev_private;
diff --git a/drivers/net/hns3/hns3_tm.c b/drivers/net/hns3/hns3_tm.c
index 92a668538f..06df32bbcd 100644
--- a/drivers/net/hns3/hns3_tm.c
+++ b/drivers/net/hns3/hns3_tm.c
@@ -329,7 +329,7 @@ hns3_tm_node_search(struct rte_eth_dev *dev,
 
 static int
 hns3_tm_nonleaf_node_param_check(struct rte_eth_dev *dev,
-struct rte_tm_node_params *params,
+const struct rte_tm_node_params *params,
 struct rte_tm_error *error)
 {
struct hns3_tm_shaper_profile *shaper_profile;
@@ -364,7 +364,7 @@ hns3_tm_nonleaf_node_param_check(struct rte_eth_dev *dev,
 
 static int
 hns3_tm_leaf_node_param_check(struct rte_eth_dev *dev __rte_unused,
- struct rte_tm_node_params *params,
+ const struct rte_tm_node_params *params,
  struct rte_tm_error *error)
 
 {
@@ -408,7 +408,7 @@ hns3_tm_leaf_node_param_check(struct rte_eth_dev *dev 
__rte_unused,
 static int
 hns3_tm_node_param_check(struct rte_eth_dev *dev, uint32_t node_id,
 uint32_t priority, uint32_t weight,
-struct rte_tm_node_params *params,
+const struct rte_tm_node_params *params,
 struct rte_tm_error *error)
 {
struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
@@ -457,7 +457,7 @@ hns3_tm_node_param_check(struct rte_eth_dev *dev, uint32_t 
node_id,
 
 static int
 hns3_tm_port_node_add(struct rte_eth_dev *dev, uint32_t node_id,
- uint32_t level_id, struct rte_tm_node_params *params,
+ uint32_t level_id, const struct rte_tm_node_params 
*params,
  struct rte_tm_error *error)
 {
struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
@@ -503,7 +503,7 @@ hns3_tm_port_node_add(struct rte_eth_dev *dev, uint32_t 
node_id,
 static int
 hns3_tm_tc_node_add(struct rte_eth_dev *dev, uint32_t node_id,
uint32_t level_id, struct hns3_tm_node *parent_node,
-   struct rte_tm_node_params *params,
+   const struct rte_tm_node_params *params,
struct rte_tm_error *error)
 {
stru

[PATCH 2/4] ethdev: make parameters to TM profile add fn constant

2024-08-06 Thread Bruce Richardson
The function to add a new profile in rte_tm should not (and does not)
modify the profile parameters passed in via struct pointer. We should
guarantee this by marking the parameter pointer as const.  This allows
SW to create multiple profiles using the same parameter struct without
having to reset it each time.

Signed-off-by: Bruce Richardson 
---
 drivers/net/ipn3ke/ipn3ke_tm.c | 4 ++--
 lib/ethdev/rte_tm.c| 2 +-
 lib/ethdev/rte_tm.h| 2 +-
 lib/ethdev/rte_tm_driver.h | 2 +-
 4 files changed, 5 insertions(+), 5 deletions(-)

diff --git a/drivers/net/ipn3ke/ipn3ke_tm.c b/drivers/net/ipn3ke/ipn3ke_tm.c
index cffe1fdaa4..20a0ed0467 100644
--- a/drivers/net/ipn3ke/ipn3ke_tm.c
+++ b/drivers/net/ipn3ke/ipn3ke_tm.c
@@ -848,7 +848,7 @@ ipn3ke_tm_shaper_profile_delete(struct rte_eth_dev *dev,

 static int
 ipn3ke_tm_tdrop_profile_check(__rte_unused struct rte_eth_dev *dev,
-   uint32_t tdrop_profile_id, struct rte_tm_wred_params *profile,
+   uint32_t tdrop_profile_id, const struct rte_tm_wred_params *profile,
struct rte_tm_error *error)
 {
enum rte_color color;
@@ -931,7 +931,7 @@ ipn3ke_hw_tm_tdrop_wr(struct ipn3ke_hw *hw,
 /* Traffic manager TDROP profile add */
 static int
 ipn3ke_tm_tdrop_profile_add(struct rte_eth_dev *dev,
-   uint32_t tdrop_profile_id, struct rte_tm_wred_params *profile,
+   uint32_t tdrop_profile_id, const struct rte_tm_wred_params *profile,
struct rte_tm_error *error)
 {
struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
diff --git a/lib/ethdev/rte_tm.c b/lib/ethdev/rte_tm.c
index 74e6f4d610..d221b1e553 100644
--- a/lib/ethdev/rte_tm.c
+++ b/lib/ethdev/rte_tm.c
@@ -153,7 +153,7 @@ int rte_tm_node_capabilities_get(uint16_t port_id,
 /* Add WRED profile */
 int rte_tm_wred_profile_add(uint16_t port_id,
uint32_t wred_profile_id,
-   struct rte_tm_wred_params *profile,
+   const struct rte_tm_wred_params *profile,
struct rte_tm_error *error)
 {
struct rte_eth_dev *dev = &rte_eth_devices[port_id];
diff --git a/lib/ethdev/rte_tm.h b/lib/ethdev/rte_tm.h
index c52acd1b4f..f6f3f6a8d4 100644
--- a/lib/ethdev/rte_tm.h
+++ b/lib/ethdev/rte_tm.h
@@ -1347,7 +1347,7 @@ rte_tm_node_capabilities_get(uint16_t port_id,
 int
 rte_tm_wred_profile_add(uint16_t port_id,
uint32_t wred_profile_id,
-   struct rte_tm_wred_params *profile,
+   const struct rte_tm_wred_params *profile,
struct rte_tm_error *error);

 /**
diff --git a/lib/ethdev/rte_tm_driver.h b/lib/ethdev/rte_tm_driver.h
index 25d688516b..b6ecf1bd4d 100644
--- a/lib/ethdev/rte_tm_driver.h
+++ b/lib/ethdev/rte_tm_driver.h
@@ -51,7 +51,7 @@ typedef int (*rte_tm_node_capabilities_get_t)(struct 
rte_eth_dev *dev,
 /** @internal Traffic manager WRED profile add */
 typedef int (*rte_tm_wred_profile_add_t)(struct rte_eth_dev *dev,
uint32_t wred_profile_id,
-   struct rte_tm_wred_params *profile,
+   const struct rte_tm_wred_params *profile,
struct rte_tm_error *error);

 /** @internal Traffic manager WRED profile delete */
--
2.43.0



[PATCH 3/4] ethdev: make TM shaper parameters constant

2024-08-06 Thread Bruce Richardson
The function to add a new shaper profile in rte_tm should not (and does
not) modify the profile parameters passed in via struct pointer. We
should guarantee this by marking the parameter pointer as const. This
allows SW to create multiple profiles using the same parameter
struct without having to reset it each time.

Signed-off-by: Bruce Richardson 
---
 drivers/net/cnxk/cnxk_tm.c  | 2 +-
 drivers/net/dpaa2/dpaa2_tm.c| 2 +-
 drivers/net/hns3/hns3_tm.c  | 6 +++---
 drivers/net/i40e/i40e_tm.c  | 6 +++---
 drivers/net/iavf/iavf_tm.c  | 6 +++---
 drivers/net/ice/ice_dcf_sched.c | 6 +++---
 drivers/net/ice/ice_tm.c| 6 +++---
 drivers/net/ipn3ke/ipn3ke_tm.c  | 4 ++--
 drivers/net/ixgbe/ixgbe_tm.c| 6 +++---
 drivers/net/mvpp2/mrvl_tm.c | 2 +-
 drivers/net/txgbe/txgbe_tm.c| 6 +++---
 lib/ethdev/rte_tm.c | 2 +-
 lib/ethdev/rte_tm.h | 2 +-
 lib/ethdev/rte_tm_driver.h  | 2 +-
 14 files changed, 29 insertions(+), 29 deletions(-)

diff --git a/drivers/net/cnxk/cnxk_tm.c b/drivers/net/cnxk/cnxk_tm.c
index 9293b3e8f2..0ed6732dda 100644
--- a/drivers/net/cnxk/cnxk_tm.c
+++ b/drivers/net/cnxk/cnxk_tm.c
@@ -267,7 +267,7 @@ cnxk_nix_tm_node_capa_get(struct rte_eth_dev *eth_dev, 
uint32_t node_id,

 static int
 cnxk_nix_tm_shaper_profile_add(struct rte_eth_dev *eth_dev, uint32_t id,
-  struct rte_tm_shaper_params *params,
+  const struct rte_tm_shaper_params *params,
   struct rte_tm_error *error)
 {
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
diff --git a/drivers/net/dpaa2/dpaa2_tm.c b/drivers/net/dpaa2/dpaa2_tm.c
index 22337097e5..115397ce47 100644
--- a/drivers/net/dpaa2/dpaa2_tm.c
+++ b/drivers/net/dpaa2/dpaa2_tm.c
@@ -268,7 +268,7 @@ dpaa2_shaper_profile_from_id(struct dpaa2_dev_priv *priv,

 static int
 dpaa2_shaper_profile_add(struct rte_eth_dev *dev, uint32_t shaper_profile_id,
-struct rte_tm_shaper_params *params,
+const struct rte_tm_shaper_params *params,
struct rte_tm_error *error)
 {
struct dpaa2_dev_priv *priv = dev->data->dev_private;
diff --git a/drivers/net/hns3/hns3_tm.c b/drivers/net/hns3/hns3_tm.c
index 06df32bbcd..1c2ad71133 100644
--- a/drivers/net/hns3/hns3_tm.c
+++ b/drivers/net/hns3/hns3_tm.c
@@ -166,7 +166,7 @@ hns3_tm_shaper_profile_search(struct rte_eth_dev *dev,

 static int
 hns3_tm_shaper_profile_param_check(struct rte_eth_dev *dev,
-  struct rte_tm_shaper_params *profile,
+  const struct rte_tm_shaper_params *profile,
   struct rte_tm_error *error)
 {
struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -220,7 +220,7 @@ hns3_tm_shaper_profile_param_check(struct rte_eth_dev *dev,
 static int
 hns3_tm_shaper_profile_add(struct rte_eth_dev *dev,
   uint32_t shaper_profile_id,
-  struct rte_tm_shaper_params *profile,
+  const struct rte_tm_shaper_params *profile,
   struct rte_tm_error *error)
 {
struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
@@ -1198,7 +1198,7 @@ hns3_tm_capabilities_get_wrap(struct rte_eth_dev *dev,
 static int
 hns3_tm_shaper_profile_add_wrap(struct rte_eth_dev *dev,
uint32_t shaper_profile_id,
-   struct rte_tm_shaper_params *profile,
+   const struct rte_tm_shaper_params *profile,
struct rte_tm_error *error)
 {
struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
diff --git a/drivers/net/i40e/i40e_tm.c b/drivers/net/i40e/i40e_tm.c
index c7d4680fb4..4c0940f355 100644
--- a/drivers/net/i40e/i40e_tm.c
+++ b/drivers/net/i40e/i40e_tm.c
@@ -12,7 +12,7 @@ static int i40e_tm_capabilities_get(struct rte_eth_dev *dev,
struct rte_tm_error *error);
 static int i40e_shaper_profile_add(struct rte_eth_dev *dev,
   uint32_t shaper_profile_id,
-  struct rte_tm_shaper_params *profile,
+  const struct rte_tm_shaper_params *profile,
   struct rte_tm_error *error);
 static int i40e_shaper_profile_del(struct rte_eth_dev *dev,
   uint32_t shaper_profile_id,
@@ -217,7 +217,7 @@ i40e_shaper_profile_search(struct rte_eth_dev *dev,
 }

 static int
-i40e_shaper_profile_param_check(struct rte_tm_shaper_params *profile,
+i40e_shaper_profile_param_check(const struct rte_tm_shaper_params *profile,
struct rte_tm_error *error)
 {
/* min rate not supported */
@@ -251,7 +251,7 @@ i40e_shaper_profile_param_check(struct rte_tm_shaper_params 
*profile,

[PATCH 4/4] ethdev: add traffic manager query function

2024-08-06 Thread Bruce Richardson
Add function to allow querying a node in the scheduler tree.  Returns
the parameters as were given to the add function. Adding this function
allows apps to just query the hierarchy rather than having to maintain
their own copies of it internally.

Signed-off-by: Bruce Richardson 
---
 lib/ethdev/ethdev_trace.h| 16 +++
 lib/ethdev/ethdev_trace_points.c |  3 ++
 lib/ethdev/rte_tm.c  | 25 +
 lib/ethdev/rte_tm.h  | 48 
 lib/ethdev/rte_tm_driver.h   | 12 
 5 files changed, 104 insertions(+)

diff --git a/lib/ethdev/ethdev_trace.h b/lib/ethdev/ethdev_trace.h
index 3bec87bfdb..dee2819531 100644
--- a/lib/ethdev/ethdev_trace.h
+++ b/lib/ethdev/ethdev_trace.h
@@ -1903,6 +1903,22 @@ RTE_TRACE_POINT(
rte_trace_point_emit_int(ret);
 )
 
+RTE_TRACE_POINT(
+   rte_tm_trace_node_query,
+   RTE_TRACE_POINT_ARGS(uint16_t port_id, uint32_t node_id,
+   uint32_t *parent_node_id, uint32_t *priority,
+   uint32_t *weight, uint32_t *level_id,
+   struct rte_tm_node_params *params, int ret),
+   rte_trace_point_emit_u16(port_id);
+   rte_trace_point_emit_u32(node_id);
+   rte_trace_point_emit_ptr(parent_node_id);
+   rte_trace_point_emit_ptr(priority);
+   rte_trace_point_emit_ptr(weight);
+   rte_trace_point_emit_ptr(level_id);
+   rte_trace_point_emit_ptr(params);
+   rte_trace_point_emit_int(ret);
+)
+
 RTE_TRACE_POINT(
rte_tm_trace_node_delete,
RTE_TRACE_POINT_ARGS(uint16_t port_id, uint32_t node_id, int ret),
diff --git a/lib/ethdev/ethdev_trace_points.c b/lib/ethdev/ethdev_trace_points.c
index 99e04f5893..f5ed7ca637 100644
--- a/lib/ethdev/ethdev_trace_points.c
+++ b/lib/ethdev/ethdev_trace_points.c
@@ -694,6 +694,9 @@ RTE_TRACE_POINT_REGISTER(rte_tm_trace_mark_vlan_dei,
 RTE_TRACE_POINT_REGISTER(rte_tm_trace_node_add,
lib.ethdev.tm.node_add)
 
+RTE_TRACE_POINT_REGISTER(rte_tm_trace_node_query,
+   lib.ethdev.tm.node_query)
+
 RTE_TRACE_POINT_REGISTER(rte_tm_trace_node_capabilities_get,
lib.ethdev.tm.node_capabilities_get)
 
diff --git a/lib/ethdev/rte_tm.c b/lib/ethdev/rte_tm.c
index 3eb98e618a..8000b66af9 100644
--- a/lib/ethdev/rte_tm.c
+++ b/lib/ethdev/rte_tm.c
@@ -301,6 +301,31 @@ int rte_tm_node_add(uint16_t port_id,
return ret;
 }
 
+int rte_tm_node_query(uint16_t port_id,
+   uint32_t node_id,
+   uint32_t *parent_node_id,
+   uint32_t *priority,
+   uint32_t *weight,
+   uint32_t *level_id,
+   struct rte_tm_node_params *params,
+   struct rte_tm_error *error)
+{
+   struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+   int ret;
+
+   if (dev == NULL)
+   return -EINVAL;
+
+   ret = RTE_TM_FUNC(port_id, node_query)(dev,
+   node_id, parent_node_id, priority, weight, level_id,
+   params, error);
+
+   rte_tm_trace_node_query(port_id, node_id, parent_node_id, priority,
+ weight, level_id, params, ret);
+
+   return ret;
+}
+
 /* Delete node from traffic manager hierarchy */
 int rte_tm_node_delete(uint16_t port_id,
uint32_t node_id,
diff --git a/lib/ethdev/rte_tm.h b/lib/ethdev/rte_tm.h
index e5da9b8323..419e491043 100644
--- a/lib/ethdev/rte_tm.h
+++ b/lib/ethdev/rte_tm.h
@@ -1599,6 +1599,54 @@ rte_tm_node_add(uint16_t port_id,
const struct rte_tm_node_params *params,
struct rte_tm_error *error);
 
+/**
+ * Return information about a traffic management node
+ *
+ * Return information about a hierarchy node, using the same format of 
parameters
+ * as was passed to the rte_rm_node_add() function.
+ * Each of the "out" parameters pointers (except error) may be passed as NULL 
if the
+ * information is not needed by the caller. For example, to one may check if a 
node id
+ * is in use by:
+ *
+ *  struct rte_tm_error error;
+ *  int ret = rte_tm_node_query(port, node_id, NULL, NULL, NULL, NULL, NULL, 
&error);
+ *  if (ret == ENOENT) ...
+ *
+ * @param[in] port_id
+ *   The port identifier of the Ethernet device.
+ * @param[in] node_id
+ *   Node ID. Should be a valid node id.
+ * @param[out] parent_node_id
+ *   Parent node ID.
+ * @param[out] priority
+ *   Node priority. The highest node priority is zero. Used by the SP algorithm
+ *   running on the parent of the current node for scheduling this child node.
+ * @param[out] weight
+ *   Node weight. The node weight is relative to the weight sum of all siblings
+ *   that have the same priority. The lowest weight is one. Used by the WFQ
+ *   algorithm running on the parent of the current node for scheduling this
+ *   child node.
+ * @param[out] level_id
+ *   The node level in the scheduler hierarchy.
+ * @param[out] params
+ *   Node parameters, as would be used when creating the node.
+ * @param[out] error
+ *   Error details. Filled in only on error, when not NULL.
+ * @return
+ *   0 on success,

[PATCH v2] dts: add flow rule dataclass to testpmd shell

2024-08-06 Thread Dean Marx
add dataclass for passing in flow rule creation arguments, as well as a
__str__ method for converting to a sendable testpmd command. Add
flow_create method to TestPmdShell class for initializing flow rules.

Signed-off-by: Dean Marx 
---
 dts/framework/remote_session/testpmd_shell.py | 58 ++-
 1 file changed, 57 insertions(+), 1 deletion(-)

diff --git a/dts/framework/remote_session/testpmd_shell.py 
b/dts/framework/remote_session/testpmd_shell.py
index 43e9f56517..59b2bb914b 100644
--- a/dts/framework/remote_session/testpmd_shell.py
+++ b/dts/framework/remote_session/testpmd_shell.py
@@ -19,7 +19,7 @@
 from dataclasses import dataclass, field
 from enum import Flag, auto
 from pathlib import PurePath
-from typing import ClassVar
+from typing import ClassVar, Optional
 
 from typing_extensions import Self, Unpack
 
@@ -577,6 +577,43 @@ class TestPmdPortStats(TextParser):
 tx_bps: int = field(metadata=TextParser.find_int(r"Tx-bps:\s+(\d+)"))
 
 
+@dataclass
+class flow_func:
+"""Dataclass for setting flow rule parameters."""
+
+#:
+port_id: int
+#:
+ingress: bool
+#:
+pattern: str
+#:
+actions: str
+
+#:
+group_id: Optional[int] = None
+#:
+priority_level: Optional[int] = None
+#:
+user_id: Optional[int] = None
+
+def __str__(self) -> str:
+"""Returns the string representation of a flow_func instance.
+
+In this case, a properly formatted flow create command that can be 
sent to testpmd.
+"""
+ret = []
+ret.append(f"flow create {self.port_id} ")
+ret.append(f"group {self.group_id} " if self.group_id is not None else 
"")
+ret.append(f"priority {self.priority_level} " if self.priority_level 
is not None else "")
+ret.append("ingress " if self.ingress else "egress ")
+ret.append(f"user_id {self.user_id} " if self.user_id is not None else 
"")
+ret.append(f"pattern {self.pattern} ")
+ret.append(" / end actions ")
+ret.append(f"{self.actions} / end")
+return "".join(ret)
+
+
 class TestPmdShell(DPDKShell):
 """Testpmd interactive shell.
 
@@ -806,6 +843,25 @@ def show_port_stats(self, port_id: int) -> 
TestPmdPortStats:
 
 return TestPmdPortStats.parse(output)
 
+def flow_create(self, cmd: flow_func, verify: bool = True) -> None:
+"""Creates a flow rule in the testpmd session.
+
+Args:
+cmd: String from flow_func instance to send as a flow rule.
+verify: If :data:`True`, the output of the command is scanned
+to ensure the flow rule was created successfully.
+
+Raises:
+InteractiveCommandExecutionError: If flow rule is invalid.
+"""
+flow_output = self.send_command(str(cmd))
+if verify:
+if "created" not in flow_output:
+self._logger.debug(f"Failed to create flow 
rule:\n{flow_output}")
+raise InteractiveCommandExecutionError(
+f"Failed to create flow rule:\n{flow_output}"
+)
+
 def _close(self) -> None:
 """Overrides :meth:`~.interactive_shell.close`."""
 self.stop()
-- 
2.44.0



[RFC PATCH v1 0/3] dts: pf_smoke port

2024-08-06 Thread jspewock
From: Jeremy Spewock 

This series ports the functionality of the pf_msoke test sutie from old
DTS to the new framework. It is listed as an RFC mainly due to the fact
that is uses different verification steps than most other test suites
by utilizing checksums to differentiate packets sent by the framework
and ones that are just noise on the wire. It should be noted however
that this will not work as expected on mlx5 due to the following
bugzilla ticket:

https://bugs.dpdk.org/show_bug.cgi?id=1514

Jeremy Spewock (3):
  dts: add ability to modify number of queues on a port to testpmd
  dts: add pf smoke testing suite
  dts: added pf_smoke_tests to yaml schema

 dts/framework/config/conf_yaml_schema.json|   3 +-
 dts/framework/remote_session/testpmd_shell.py |  36 +
 dts/tests/TestSuite_pf_smoke_tests.py | 129 ++
 3 files changed, 167 insertions(+), 1 deletion(-)
 create mode 100644 dts/tests/TestSuite_pf_smoke_tests.py

-- 
2.45.2



[RFC PATCH v1 1/3] dts: add ability to modify number of queues on a port to testpmd

2024-08-06 Thread jspewock
From: Jeremy Spewock 

The ability to change the configuration of a port at runtime is a
crucial aspect of DPDK. This patch adds both the steps required to
modify the number of queues on a port at runtime and also the
verification steps to ensure that the command behaved as expected.

Depends-on: patch-142762 ("dts: add text parser for testpmd verbose
 output")
Depends-on: patch-142696 ("dts: add VLAN methods to testpmd shell")

Signed-off-by: Jeremy Spewock 
---
 dts/framework/remote_session/testpmd_shell.py | 36 +++
 1 file changed, 36 insertions(+)

diff --git a/dts/framework/remote_session/testpmd_shell.py 
b/dts/framework/remote_session/testpmd_shell.py
index 6bde7f536f..6eb6360bf7 100644
--- a/dts/framework/remote_session/testpmd_shell.py
+++ b/dts/framework/remote_session/testpmd_shell.py
@@ -1191,6 +1191,42 @@ def set_verbose(self, level: int, verify: bool = True) 
-> None:
 f"Testpmd failed to set verbose level to {level}."
 )
 
+def set_num_queues_all(self, num_queues: int, is_rx: bool, verify: bool = 
True) -> None:
+"""Modify the number of Rx/Tx queues configured on all ports.
+
+Args:
+num_queues: Number of queues to set on all ports.
+is_rx: If :data:`True` then the number of Rx queues will be 
modified, otherwise the
+number of Tx queues will be modified.
+verify: If :data:`True` then an additional command will be sent to 
check the info of
+`port_id` and verify that the number of queues is equal to 
`num_queues`.
+
+Raises:
+InteractiveCommandExecutionError: If `verify` is :data:`True` and 
testpmd failed to
+update the number of queues on the ports.
+"""
+queue_type = "rxq" if is_rx else "txq"
+self.port_stop_all(verify=verify)
+port_config_output = self.send_command(f"port config all {queue_type} 
{num_queues}")
+# ports have to be started before the output can be verified.
+self.port_start_all(verify=verify)
+if verify:
+all_ports_modified = all(
+queues == num_queues
+for queues in map(
+lambda info: info.rx_queues_num if is_rx else 
info.tx_queues_num,
+self.show_port_info_all(),
+)
+)
+if not all_ports_modified:
+self._logger.debug(
+f"Failed to set number of queues on all ports to "
+f"{num_queues}:\n{port_config_output}"
+)
+raise InteractiveCommandExecutionError(
+"Testpmd failed to update the number of queues on all 
ports."
+)
+
 def _close(self) -> None:
 """Overrides :meth:`~.interactive_shell.close`."""
 self.stop()
-- 
2.45.2



[RFC PATCH v1 2/3] dts: add pf smoke testing suite

2024-08-06 Thread jspewock
From: Jeremy Spewock 

This patch adds a smoke testing suite for Physical Function features.
The goal of this suite is to test some of the most basic features of
DPDK on a physical function and bail out early if any of these features
aren't supported as expected. Unlike DTS smoke tests, these ones are not
included as a switch in the config file and thus are an additional test
suite that developers can include alongside others at their own
discretion.

Depends-on: patch-142691 ("dts: add send_packets to test suites and
 rework packet addressing")

Signed-off-by: Jeremy Spewock 
---
 dts/tests/TestSuite_pf_smoke_tests.py | 129 ++
 1 file changed, 129 insertions(+)
 create mode 100644 dts/tests/TestSuite_pf_smoke_tests.py

diff --git a/dts/tests/TestSuite_pf_smoke_tests.py 
b/dts/tests/TestSuite_pf_smoke_tests.py
new file mode 100644
index 00..82c84c7c8d
--- /dev/null
+++ b/dts/tests/TestSuite_pf_smoke_tests.py
@@ -0,0 +1,129 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2024 University of New Hampshire
+"""Physical Function (PF) smoke testing suite.
+
+This test suite tests some of the more common DPDK functionality on a PF. 
Things such as
+jumbroframes, Receive Side Scaling (RSS) functions, and being able to modify 
the number of queues
+at runtime should all be supported by PMDs that are capable of running DPDK. 
Since this is a smoke
+testing suite, it is considered a blocking suite that will stop following ones 
from running.
+"""
+
+from typing import ClassVar
+
+from scapy.layers.inet import IP  # type: ignore[import-untyped]
+from scapy.layers.l2 import Ether  # type: ignore[import-untyped]
+from scapy.packet import Raw  # type: ignore[import-untyped]
+
+from framework.exception import InteractiveCommandExecutionError, 
TestCaseVerifyError
+from framework.params.testpmd import SimpleForwardingModes
+from framework.remote_session.testpmd_shell import TestPmdShell, VerboseOLFlag
+from framework.test_suite import TestSuite
+
+
+class TestPfSmokeTests(TestSuite):
+"""DPDK Physical Function Testing Suite.
+
+This test suite is designed to verify the basic functions of DPDK on a PF. 
The MTU of the ports
+on the traffic generator are increased to 9000 to support jumboframes for 
one of the test
+cases, and then reverted back to 1500 once the test suite is complete. 
Some functionality in
+this test suite also relies on the ability of testpmd to recognize and 
flag invalid checksum
+values in its verbose output.
+
+Attributes:
+is_blocking: This test suite will block the execution of all other 
test suites
+in the build target after it.
+"""
+
+is_blocking: ClassVar[bool] = True
+jumbo_frame_len: ClassVar[int] = 9000
+num_queues: int = 4
+rx_port: int = 0
+
+def set_up_suite(self) -> None:
+"""Increase the MTU of the traffic generator to support jumboframes."""
+for port_link in self._port_links:
+self.tg_node.main_session.configure_port_mtu(self.jumbo_frame_len, 
port_link.tg_port)
+
+def test_jumbo_frame_support(self) -> None:
+"""Verify that the PF is able to send and receive jumboframes."""
+with TestPmdShell(
+self.sut_node,
+max_pkt_len=self.jumbo_frame_len,
+mbuf_size=[self.jumbo_frame_len + 128],
+forward_mode=SimpleForwardingModes.mac,
+) as testpmd:
+testpmd.start()
+# Take 26 bytes off the MTU size to account for Ethernet headers
+payload_len = self.jumbo_frame_len - 26
+packet = Ether() / Raw("X" * payload_len)
+recv = self.send_packet_and_capture(packet)
+self.verify(
+any(hasattr(p, "load") and "X" * 20 in str(p.load) for p in 
recv),
+f"Jumboframe was not received even when MTU was set to 
{self.jumbo_frame_len}.",
+)
+
+def test_rss_functionality(self) -> None:
+"""Test that Receive Side Scaling functions are working as intended.
+
+The primary things to test in this case are that packets that are sent 
with different
+destination IP addresses are handled by different queues and that the 
RSS hash of every
+packet is unique. Verification of these functionalities is done by 
sending packets with
+invalid checksums so that the packets sent by this test suite can be 
differentiated from
+other packets sent to the same port. This makes the assumption that 
other packets sent to
+the port will all have valid checksums.
+"""
+with TestPmdShell(
+self.sut_node,
+forward_mode=SimpleForwardingModes.rxonly,
+rx_queues=self.num_queues,
+tx_queues=self.num_queues,
+) as testpmd:
+testpmd.set_verbose(1)
+send_pkts = [
+Ether() / IP(dst=f"192.168.0.{i+1}", chksum=0x0) for i in 
range(self.num_queues * 4)

[RFC PATCH v1 3/3] dts: added pf_smoke_tests to yaml schema

2024-08-06 Thread jspewock
From: Jeremy Spewock 

Add the PF smoke testing suite to the yaml schema so that it can be
specified in conf.yaml.

Signed-off-by: Jeremy Spewock 
---
 dts/framework/config/conf_yaml_schema.json | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/dts/framework/config/conf_yaml_schema.json 
b/dts/framework/config/conf_yaml_schema.json
index f02a310bb5..910134f9e4 100644
--- a/dts/framework/config/conf_yaml_schema.json
+++ b/dts/framework/config/conf_yaml_schema.json
@@ -187,7 +187,8 @@
   "enum": [
 "hello_world",
 "os_udp",
-"pmd_buffer_scatter"
+"pmd_buffer_scatter",
+"pf_smoke_tests"
   ]
 },
 "test_target": {
-- 
2.45.2



RE: [v1 2/3] meson: add a meson option to install examples source

2024-08-06 Thread Gagandeep Singh
Hi,

> -Original Message-
> From: Bruce Richardson 
> Sent: Tuesday, August 6, 2024 7:32 PM
> To: Gagandeep Singh 
> Cc: dev@dpdk.org
> Subject: Re: [v1 2/3] meson: add a meson option to install examples source
> 
> On Tue, Aug 06, 2024 at 07:12:17PM +0530, Gagandeep Singh wrote:
> > Adding a meson option "enable_examples_source_install"
> > to enable or disable installation of examples source code.
> >
> > Default value is true.
> >
> > Signed-off-by: Gagandeep Singh 
> > ---
> >  meson.build   | 7 ---
> >  meson_options.txt | 2 ++
> >  2 files changed, 6 insertions(+), 3 deletions(-)
> >
> Is installing sample code for DPDK a problem that we need to disable it? I
> was expecting that such filtering out of unwanted files could be done via
> packaging rather than us having to add lots of DPDK build options to control
> these things.
> 
I understand your point that packaging could handle filtering out unwanted 
files.
However, adding a build option to disable DPDK examples source code 
installation provides
more control and flexibility for users who may have specific requirements or 
constraints.

While packaging can filter out files, this option allows users to avoid
downloading and compiling the examples altogether, which can be beneficial for:

- Users with limited disk space or network bandwidth
- Users who only need the core DPDK libraries and binaries and don't want the 
examples source code.

By default, the option is set to true, so the behavior remains unchanged.
This option is only for those who need more control over their build process.
I believe this optional flag adds flexibility without imposing unnecessary 
files on users and is a minor
and useful extension to the build system.

> /Bruce


RE: [v1 1/3] meson: add a meson option to install examples

2024-08-06 Thread Gagandeep Singh
Hi,

> -Original Message-
> From: Bruce Richardson 
> Sent: Tuesday, August 6, 2024 7:28 PM
> To: Gagandeep Singh 
> Cc: dev@dpdk.org
> Subject: Re: [v1 1/3] meson: add a meson option to install examples
> 
> On Tue, Aug 06, 2024 at 07:12:16PM +0530, Gagandeep Singh wrote:
> > Adding a meson option "enable_examples_bin_install"
> > to install the examples binaries in bin.
> >
> > Default value is false.
> >
> > Signed-off-by: Gagandeep Singh 
> > ---
> >  examples/meson.build | 13 -
> >  meson_options.txt|  2 ++
> >  2 files changed, 14 insertions(+), 1 deletion(-)
> >
> Is there a particular reason we might want to do this? Installing sample code
> binaries in bin seems rather strange to me.
> 
Currently, I can see only app binaries are getting installed in bin but not 
examples binaries. I am not able to
find the particular reason behind this.
The main reason to have the examples in installation directory is to provides a 
convenient
way for users to explore DPDK's examples without having to manually scan each 
example build directory and copy them in
their package.
The default behavior remains unchanged.

> /Bruce