Re: [RFC] eventdev: add atomic queue to test-eventdev app

2024-12-16 Thread Luka Jankovic
Thank you for the feedback. I will re-implement the test by not checking 
port-flow-queue combination and generally clean-up the code based on your 
comments.

On Tue, 2024-12-10 at 11:37 +0100, Mattias Rönnblom wrote:
> 
> > +{
> > +   struct rte_event_dev_info dev_info;
> > +
> > +   rte_event_dev_info_get(dev_id, &dev_info);
> > +   return (dev_info.event_dev_cap & 
> > RTE_EVENT_DEV_CAP_MAINTENANCE_FREE) ?
> > +   true : false;
> 
> return dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_MAINTENANCE_FREE;
> 
> will work fine.
> 
> 
I decided against it in order to maintain consistent styling with similar 
functions in the file.

> 
> > +static int
> > +worker_wrapper(void *arg)
> 
> Delete "wrapper".

All other eventdev-tests name their equivalent functions "worker_wrapper", so I 
picked it to be consistent with the other tests.

> 
> > +
> > +   /* setup one port per worker, linking to all queues */
> > +   ret = order_event_dev_port_setup(test, opt, nb_workers, NB_QUEUES);
> 
> "order"?

This function is declared in test_order_common.h and is used in all tests. It 
is not specific for "ordered" ports, so I thought it was OK to use.


> > +
> > +static void
> > +atomic_queue_opt_dump(struct evt_options *opt)
> > +{
> > +   order_opt_dump(opt);
> 
> "order"?

Same thing here.



[RFC v2 1/1] eventdev: add atomic queue to test-eventdev app

2024-12-19 Thread Luka Jankovic
From 2e55ecd0e522f50cbb3635f53b025e165db7cf3e Mon Sep 17 00:00:00 2001
In-Reply-To: <228d44a6f2f1f6a4fb5519d9a91c99973f8d7352.ca...@ericsson.com>
References: <228d44a6f2f1f6a4fb5519d9a91c99973f8d7352.ca...@ericsson.com>
From: Luka Jankovic 
Date: Thu, 19 Dec 2024 13:31:26 +
Subject: [RFC v2 1/1] eventdev: add atomic queue to test-eventdev app
To: luka.janko...@ericsson.com
Cc: dev@dpdk.org,
mattias.ronnb...@ericsson.com

Add an atomic queue test based on the order queue test but use exclusively 
atomic queues.
This makes it compatible with event devices such as the distributed software 
eventdev.

The test detects whether port maintenance is required.

To verify atomicity, a spinlock is set up for each combination of queue and 
flow.
It is taken whenever an event is dequeued for processing and released when 
processing is finished.
The test will fail if a port attempts to take a lock which is already taken.

Signed-off-by: Luka Jankovic 
---
v2:
 * Changed to only check queue, flow combination, not port, queue, flow.
 * Lock is only held when a packet is processed.
 * Utilize event u64 instead of mbuf.
 * General cleanup.
---
 app/test-eventdev/evt_common.h|  10 +
 app/test-eventdev/meson.build |   1 +
 app/test-eventdev/test_atomic_queue.c | 372 ++
 3 files changed, 383 insertions(+)
 create mode 100644 app/test-eventdev/test_atomic_queue.c

diff --git a/app/test-eventdev/evt_common.h b/app/test-eventdev/evt_common.h
index 901b8ba55d..adb024c011 100644
--- a/app/test-eventdev/evt_common.h
+++ b/app/test-eventdev/evt_common.h
@@ -138,6 +138,16 @@ evt_has_flow_id(uint8_t dev_id)
true : false;
 }
 
+static inline bool
+evt_is_maintenance_free(uint8_t dev_id)
+{
+   struct rte_event_dev_info dev_info;
+
+   rte_event_dev_info_get(dev_id, &dev_info);
+   return (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_MAINTENANCE_FREE) ?
+   true : false;
+}
+
 static inline int
 evt_service_setup(uint32_t service_id)
 {
diff --git a/app/test-eventdev/meson.build b/app/test-eventdev/meson.build
index ab8769c755..db5add39eb 100644
--- a/app/test-eventdev/meson.build
+++ b/app/test-eventdev/meson.build
@@ -15,6 +15,7 @@ sources = files(
 'test_order_atq.c',
 'test_order_common.c',
 'test_order_queue.c',
+'test_atomic_queue.c',
 'test_perf_atq.c',
 'test_perf_common.c',
 'test_perf_queue.c',
diff --git a/app/test-eventdev/test_atomic_queue.c 
b/app/test-eventdev/test_atomic_queue.c
new file mode 100644
index 00..51e988c527
--- /dev/null
+++ b/app/test-eventdev/test_atomic_queue.c
@@ -0,0 +1,372 @@
+#include 
+#include 
+#include 
+
+#include "test_order_common.h"
+
+#define NB_QUEUES 2
+
+static rte_spinlock_t *atomic_locks;
+
+static inline uint64_t
+event_data_create(flow_id_t flow, uint32_t seq)
+{
+   return ((uint64_t)flow << 32) | seq;
+}
+
+static inline uint32_t
+event_data_get_seq(struct rte_event *const ev)
+{
+   return ev->u64 & 0x;
+}
+
+static inline uint32_t
+event_data_get_flow(struct rte_event *const ev)
+{
+   return ev->u64 >> 32;
+}
+
+static inline uint32_t
+get_lock_idx(int queue, flow_id_t flow, uint32_t nb_flows)
+{
+   return (queue * nb_flows) + flow;
+}
+
+static inline bool
+test_done(struct test_order *const t)
+{
+   return t->err || t->result == EVT_TEST_SUCCESS;
+}
+
+static inline int
+atomic_producer(void *arg)
+{
+   struct prod_data *p = arg;
+   struct test_order *t = p->t;
+   struct evt_options *opt = t->opt;
+   const uint8_t dev_id = p->dev_id;
+   const uint8_t port = p->port_id;
+   const uint64_t nb_pkts = t->nb_pkts;
+   uint32_t *producer_flow_seq = t->producer_flow_seq;
+   const uint32_t nb_flows = t->nb_flows;
+   uint64_t count = 0;
+   struct rte_event ev;
+
+   if (opt->verbose_level > 1)
+   printf("%s(): lcore %d dev_id %d port=%d queue=%d\n", __func__, 
rte_lcore_id(),
+   dev_id, port, p->queue_id);
+
+   ev = (struct rte_event){
+   .event = 0,
+   .op = RTE_EVENT_OP_NEW,
+   .queue_id = p->queue_id,
+   .sched_type = RTE_SCHED_TYPE_ORDERED,
+   .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+   .event_type = RTE_EVENT_TYPE_CPU,
+   .sub_event_type = 0 /* stage 0 */
+   };
+
+   while (count < nb_pkts && t->err == false) {
+   const flow_id_t flow = rte_rand_max(nb_flows);
+
+   /* Maintain seq number per flow */
+   ev.u64 = event_data_create(flow, producer_flow_seq[flow]++);
+   ev.flow_id = flow;
+

Re: [RFC] eventdev: add atomic queue to test-eventdev app

2024-12-19 Thread Luka Jankovic
On Tue, 2024-12-10 at 11:37 +0100, Mattias Rönnblom wrote:
>
> > +{
> > +   struct rte_event_dev_info dev_info;
> > +
> > +   rte_event_dev_info_get(dev_id, &dev_info);
> > +   return (dev_info.event_dev_cap & 
> > RTE_EVENT_DEV_CAP_MAINTENANCE_FREE) ?
> > +   true : false;
>
> return dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_MAINTENANCE_FREE;
>
> will work fine.
>
>
I decided against it in order to maintain consistent styling with similar 
functions in the file.

>
> > +static int
> > +worker_wrapper(void *arg)
>
> Delete "wrapper".

All other eventdev-tests name their equivalent functions "worker_wrapper", so I 
picked it to be consistent with the other tests.

>
> > +
> > +   /* setup one port per worker, linking to all queues */
> > +   ret = order_event_dev_port_setup(test, opt, nb_workers, NB_QUEUES);
>
> "order"?

This function is declared in test_order_common.h and is used in all tests. It 
is not specific for "ordered" ports, so I thought it was OK to use.


> > +
> > +static void
> > +atomic_queue_opt_dump(struct evt_options *opt)
> > +{
> > +   order_opt_dump(opt);
>
> "order"?

Same thing here.

Thank you for the feedback. I will re-implement the test by not checking 
port-flow-queue combination and generally clean-up the code based on your 
comments.



[RFC] eventdev: add atomic queue to test-eventdev app

2024-12-05 Thread Luka Jankovic
From 753273ab9af49e16d7f7b577d6263e3db51257d7 Mon Sep 17 00:00:00 2001
From: Luka Jankovic 
Date: Thu, 5 Dec 2024 13:05:35 +
Subject: [RFC] eventdev: add atomic queue to test-eventdev app

Add an atomic queue test based on the order queue test but use exclusively 
atomic queues.
This makes it compatible with event devices such as the distributed software 
eventdev.

The test detects whether port maintenance is required.

To verify atomicity, a spinlock is set up for each combination of port, queue, 
and flow.
It is taken whenever an event enters a new flow and released when all events 
from a flow are processed.
The test will fail if a port attempts to take the lock for a given flow which 
is already taken by another port.
In the end, it is verified that an equal amount of locks and unlocks occured, 
and that all events have been processed.

Signed-off-by: Luka Jankovic 
---
 app/test-eventdev/evt_common.h|  10 +
 app/test-eventdev/meson.build |   1 +
 app/test-eventdev/test_atomic_queue.c | 569 ++
 app/test-eventdev/test_order_common.h |   1 +
 4 files changed, 581 insertions(+)
 create mode 100644 app/test-eventdev/test_atomic_queue.c

diff --git a/app/test-eventdev/evt_common.h b/app/test-eventdev/evt_common.h
index 901b8ba55d..f0036fb620 100644
--- a/app/test-eventdev/evt_common.h
+++ b/app/test-eventdev/evt_common.h
@@ -138,6 +138,16 @@ evt_has_flow_id(uint8_t dev_id)
true : false;
 }
 
+static inline bool
+evt_has_maintenance_free(uint8_t dev_id)
+{
+   struct rte_event_dev_info dev_info;
+
+   rte_event_dev_info_get(dev_id, &dev_info);
+   return (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_MAINTENANCE_FREE) ?
+   true : false;
+}
+
 static inline int
 evt_service_setup(uint32_t service_id)
 {
diff --git a/app/test-eventdev/meson.build b/app/test-eventdev/meson.build
index ab8769c755..db5add39eb 100644
--- a/app/test-eventdev/meson.build
+++ b/app/test-eventdev/meson.build
@@ -15,6 +15,7 @@ sources = files(
 'test_order_atq.c',
 'test_order_common.c',
 'test_order_queue.c',
+'test_atomic_queue.c',
 'test_perf_atq.c',
 'test_perf_common.c',
 'test_perf_queue.c',
diff --git a/app/test-eventdev/test_atomic_queue.c 
b/app/test-eventdev/test_atomic_queue.c
new file mode 100644
index 00..02aec95d59
--- /dev/null
+++ b/app/test-eventdev/test_atomic_queue.c
@@ -0,0 +1,569 @@
+#include 
+#include 
+#include 
+
+#include "test_order_common.h"
+
+#define NB_QUEUES 2
+
+rte_spinlock_t *atomic_locks;
+
+struct port_stat_counters {
+   uint32_t num_locked[NB_QUEUES];
+   uint32_t num_unlocked[NB_QUEUES];
+   uint64_t *num_pkts;
+};
+
+static RTE_LCORE_VAR_HANDLE(struct port_stat_counters, port_counters);
+
+static inline int
+get_num_pkts_index(int queue, uint32_t flow, uint32_t nb_flows)
+{
+   return (queue * nb_flows) + flow;
+}
+
+static inline uint32_t
+get_lock_idx(int queue, uint32_t nb_ports, uint32_t nb_flows, uint32_t port, 
flow_id_t flow)
+{
+   return (queue * nb_ports * nb_flows) + (port * nb_flows) + flow;
+}
+
+static inline int
+atomic_producer(void *arg)
+{
+   struct prod_data *p = arg;
+   struct test_order *t = p->t;
+   struct evt_options *opt = t->opt;
+   const uint8_t dev_id = p->dev_id;
+   const uint8_t port = p->port_id;
+   struct rte_mempool *pool = t->pool;
+   const uint64_t nb_pkts = t->nb_pkts;
+   uint32_t *producer_flow_seq = t->producer_flow_seq;
+   const uint32_t nb_flows = t->nb_flows;
+   uint64_t count = 0;
+   struct rte_mbuf *m;
+   struct rte_event ev;
+
+   if (opt->verbose_level > 1)
+   printf("%s(): lcore %d dev_id %d port=%d queue=%d\n", __func__, 
rte_lcore_id(),
+   dev_id, port, p->queue_id);
+
+   ev.event = 0;
+   ev.op = RTE_EVENT_OP_NEW;
+   ev.queue_id = p->queue_id;
+   ev.sched_type = RTE_SCHED_TYPE_ORDERED;
+   ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
+   ev.event_type = RTE_EVENT_TYPE_CPU;
+   ev.sub_event_type = 0; /* stage 0 */
+
+   while (count < nb_pkts && t->err == false) {
+   m = rte_pktmbuf_alloc(pool);
+   if (m == NULL)
+   continue;
+
+   const flow_id_t flow = (uintptr_t)m % nb_flows;
+   /* Maintain seq number per flow */
+   *order_mbuf_seqn(t, m) = producer_flow_seq[flow]++;
+   order_flow_id_save(t, flow, m, &ev);
+
+   while (rte_event_enqueue_burst(dev_id, port, &ev, 1) != 1) {
+   if (t->err)
+   break;
+   rte_pause();
+   }
+
+   count++;
+   }
+
+   if (p->mai

Re: [EXTERNAL] [RFC v5 1/2] eventdev: add atomic queue to test-eventdev app

2025-01-22 Thread Luka Jankovic
On Wed, 2025-01-22 at 10:20 +, Pavan Nikhilesh Bhagavatula wrote:
> > Add an atomic queue test based on the order queue test that exclusively uses
> > atomic queues.
> > This makes it compatible with event devices such as the distributed software
> > eventdev.
> >
> > The test detects if port maintenance is required.
> >
> > To verify atomicity, a spinlock is set up for each combination of queue and
> > flow.
> > It is taken whenever an event is dequeued for processing and released when
> > processing is finished.
> > The test will fail if a port attempts to take a lock which is already taken.
> >
> > Signed-off-by: Luka Jankovic 
> > ---
> > v5:
> >  * Updated documentation for dpdk-test-eventdev
> > v4:
> >  * Fix code style issues.
> >  * Remove unused imports.
> > v3:
> >  * Use struct to avoid bit operations when accessing event u64.
> >  * Changed __rte_always_inline to inline for processing stages.
> >  * Introduce idle timeout constant.
> >  * Formatting and cleanup.
> > v2:
> >  * Changed to only check queue, flow combination, not port, queue, flow.
> >  * Lock is only held when a packet is processed.
> >  * Utilize event u64 instead of mbuf.
>
> Hi Luka,
>
> This test fails on Marvell CNXK platform because HW assumes that value of
> event.u64
> will be 8byte aligned and upper bits as per[1], for optimizations purposes.
> Could you go back to using mbuf similar to ordered_atq/queue
>
> Thanks,
> Pavan.

Thanks for bringing it up, I wasn't aware. I will revert back to the mbuf
implementation.

>
> >  * General cleanup.
>
> [1]
> https://docs.kernel.org/arch/arm64/memory.html



[RFC PATCH v6 1/2] eventdev: add atomic queue to test-eventdev app

2025-01-24 Thread Luka Jankovic
Add an atomic queue test to the test-eventdev app, which is based on the
order queue test that exclusively uses atomic queues.

This makes it compatible with event devices such as the
distributed software eventdev.

The test detects if port maintenance is required.

To verify atomicity, a spinlock is used for each combination of queue and flow.
It is acquired whenever an event is dequeued for processing and
released when processing is finished.

The test will fail if a port attempts to acquire a lock which is already held.

Signed-off-by: Luka Jankovic 
---
v6:
 * Revert the use of event.u64 to mbufs as the Marvell CNXK platform assumes
   event.u64 to be 8-byte aligned, which causes the test to fail.
 * Clarified deadlock error message.
v5:
 * Updated documentation for dpdk-test-eventdev
v4:
 * Fix code style issues.
 * Remove unused imports.
v3:
 * Use struct to avoid bit operations when accessing event u64.
 * Changed __rte_always_inline to inline for processing stages.
 * Introduce idle timeout constant.
 * Formatting and cleanup.
v2:
 * Changed to only check queue, flow combination, not port, queue, flow.
 * Lock is only held when a packet is processed.
 * Utilize event u64 instead of mbuf.
 * General cleanup.
---
 app/test-eventdev/evt_common.h|   9 +
 app/test-eventdev/meson.build |   1 +
 app/test-eventdev/test_atomic_queue.c | 390 ++
 app/test-eventdev/test_order_common.h |   6 +
 4 files changed, 406 insertions(+)
 create mode 100644 app/test-eventdev/test_atomic_queue.c

diff --git a/app/test-eventdev/evt_common.h b/app/test-eventdev/evt_common.h
index 63b782f11a..74f9d187f3 100644
--- a/app/test-eventdev/evt_common.h
+++ b/app/test-eventdev/evt_common.h
@@ -138,6 +138,15 @@ evt_has_flow_id(uint8_t dev_id)
true : false;
 }

+static inline bool
+evt_is_maintenance_free(uint8_t dev_id)
+{
+   struct rte_event_dev_info dev_info;
+
+   rte_event_dev_info_get(dev_id, &dev_info);
+   return dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_MAINTENANCE_FREE;
+}
+
 static inline int
 evt_service_setup(uint32_t service_id)
 {
diff --git a/app/test-eventdev/meson.build b/app/test-eventdev/meson.build
index ab8769c755..db5add39eb 100644
--- a/app/test-eventdev/meson.build
+++ b/app/test-eventdev/meson.build
@@ -15,6 +15,7 @@ sources = files(
 'test_order_atq.c',
 'test_order_common.c',
 'test_order_queue.c',
+'test_atomic_queue.c',
 'test_perf_atq.c',
 'test_perf_common.c',
 'test_perf_queue.c',
diff --git a/app/test-eventdev/test_atomic_queue.c 
b/app/test-eventdev/test_atomic_queue.c
new file mode 100644
index 00..d923df23cd
--- /dev/null
+++ b/app/test-eventdev/test_atomic_queue.c
@@ -0,0 +1,390 @@
+#include 
+#include 
+
+#include "test_order_common.h"
+
+#define IDLE_TIMEOUT 1
+#define NB_QUEUES 2
+
+static rte_spinlock_t *atomic_locks;
+
+static inline uint32_t
+get_lock_idx(int queue, flow_id_t flow, uint32_t nb_flows)
+{
+   return (queue * nb_flows) + flow;
+}
+
+static inline bool
+atomic_spinlock_trylock(uint32_t queue, uint32_t flow, uint32_t nb_flows)
+{
+   return rte_spinlock_trylock(&atomic_locks[get_lock_idx(queue, flow, 
nb_flows)]);
+}
+
+static inline void
+atomic_spinlock_unlock(uint32_t queue, uint32_t flow, uint32_t nb_flows)
+{
+   rte_spinlock_unlock(&atomic_locks[get_lock_idx(queue, flow, nb_flows)]);
+}
+
+static inline bool
+test_done(struct test_order *const t)
+{
+   return t->err || t->result == EVT_TEST_SUCCESS;
+}
+
+static inline int
+atomic_producer(void *arg)
+{
+   struct prod_data *p = arg;
+   struct test_order *t = p->t;
+   struct evt_options *opt = t->opt;
+   const uint8_t dev_id = p->dev_id;
+   const uint8_t port = p->port_id;
+   struct rte_mempool *pool = t->pool;
+   const uint64_t nb_pkts = t->nb_pkts;
+   uint32_t *producer_flow_seq = t->producer_flow_seq;
+   const uint32_t nb_flows = t->nb_flows;
+   uint64_t count = 0;
+   struct rte_mbuf *m;
+   struct rte_event ev;
+
+   if (opt->verbose_level > 1)
+   printf("%s(): lcore %d dev_id %d port=%d queue=%d\n",
+   __func__, rte_lcore_id(), dev_id, port, p->queue_id);
+
+   ev = (struct rte_event) {
+   .op = RTE_EVENT_OP_NEW,
+   .queue_id = p->queue_id,
+   .sched_type = RTE_SCHED_TYPE_ATOMIC,
+   .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+   .event_type = RTE_EVENT_TYPE_CPU
+   };
+
+   while (count < nb_pkts && t->err == false) {
+   m = rte_pktmbuf_alloc(pool);
+   if (m == NULL)
+   continue;
+
+   /* Maintain seq number per flow */
+
+   const flow_id_t flow = rte_rand_m

[RFC PATCH v6 2/2] eventdev: documentation for atomic queue test

2025-01-24 Thread Luka Jankovic
Add relevant documentation to the tools/testeventdev page.

Signed-off-by: Luka Jankovic 
---
 .../tools/img/eventdev_atomic_queue_test.svg  | 1701 +
 doc/guides/tools/testeventdev.rst |   92 +
 2 files changed, 1793 insertions(+)
 create mode 100644 doc/guides/tools/img/eventdev_atomic_queue_test.svg

diff --git a/doc/guides/tools/img/eventdev_atomic_queue_test.svg 
b/doc/guides/tools/img/eventdev_atomic_queue_test.svg
new file mode 100644
index 00..38d9a555df
--- /dev/null
+++ b/doc/guides/tools/img/eventdev_atomic_queue_test.svg
@@ -0,0 +1,1701 @@
+
+
+
+
+
+http://www.inkscape.org/namespaces/inkscape";
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd";
+   xmlns:xlink="http://www.w3.org/1999/xlink";
+   xmlns="http://www.w3.org/2000/svg";
+   xmlns:svg="http://www.w3.org/2000/svg";
+   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#";
+   xmlns:cc="http://creativecommons.org/ns#";
+   xmlns:dc="http://purl.org/dc/elements/1.1/";>
+  
+
+
+
+  
+
+
+  
+
+
+
+
+
+
+
+  
+
+
+
+
+
+
+  
+
+
+  
+
+
+  
+
+
+  
+
+
+  
+
+
+  
+
+
+  
+
+
+  
+
+
+  
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+  
+
+
+
+  
+
+
+  
+
+
+  
+  
+
+
+
+
+  
+
+
+  
+
+
+  
+
+
+  
+
+
+  
+
+
+  
+
+
+  
+
+
+
+
+
+
+  
+
+
+
+  
+
+
+  
+
+
+
+  
+
+
+  
+
+
+  
+
+
+  
+
+
+
+  
+
+
+
+  
+
+
+
+
+
+
+
+
+
+
+
+
+  
+
+
+
+  
+
+
+
+  
+
+
+
+
+
+
+
+  
+
+
+
+  
+
+
+
+
+
+
+
+  
+
+
+
+  
+
+
+
+  
+
+
+
+  
+
+
+  
+  
+  
+
+  
+image/svg+xml
+http://purl.org/dc/dcmitype/StillImage"; />
+
+  
+
+  
+  
+
+test: 
atomic_queue
+ 
+
+
+
+
+
+
+
+
+
+
+
+
+
+  producer_flow_seq
+  
+producer maintains per 
flow sequence number
+
+
+flow 0
+
+flow 1
+flow 2
+
+flow n
+
+  
+
+producer0
+atomic queue 0
+atomic queue 1
+worker 0
+port n+1
+worker 1
+worker 2
+worker n
+port 0
+port 1
+port 2
+port n
+expected_flow_seq
+per flow expected sequence 
number
+
+
+flow 0
+
+flow 1
+flow 2
+
+flow n
+
+
+
+
+
+enqueue(step 1)
+produce atomic flows(step 
0)
+
+dequeue, lock(step 
2)
+
+
+
+
+
+
+
+
+
+update queue id, enqueue, 
unlock(step 3)
+
+dequeue, lock(step 
4)
+
+
+unlock(step 5)
+
+  
+
diff --git a/doc/guides/tools/testeventdev.rst 
b/doc/guides/tools/testeventdev.rst
index aaa0e5f24c..2f47c0aae8 100644
--- a/doc/guides/tools/testeventdev.rst
+++ b/doc/guides/tools/testeventdev.rst
@@ -55,6 +55,7 @@ The following are the application command-line options:
 
  order_queue
  order_atq
+ atomic_queue
  perf_queue
  perf_atq
  pipeline_atq
@@ -326,6 +327,97 @@ Example command to run order queue test:
sudo /app/dpdk-test-eventdev -c 0x1f -s 0x10 --vdev=event_sw0 -- 
\
 --test=order_queue --plcores 1 --wlcores 2,3
 
+ORDER_ATOMIC Test
+~
+
+This is a functional test is similar to the ORDER_QUEUE test, but differs in 
two
+critical ways:
+
+#. Both queues (q0 and q1) are atomic. This makes it compatible with the
+   distributed software event device (dsw).
+#. Atomicity is verified using spinlocks for each combination of flow id and
+   queue id.
+
+.. _table_eventdev_atomic_queue_test:
+
+.. table:: Atomic queue test eventdev configuration.
+
+   +---+--++---+
+   | # | Items| Value  | Comments  |
+   |   |  ||   |
+   +===+==++===+
+   | 1 | nb_queues| 2  | q0(atomic), q1(atomic)|
+   |   |  ||   |
+   +---+-

[RFC v4 1/1] eventdev: add atomic queue to test-eventdev app

2025-01-13 Thread Luka Jankovic
Add an atomic queue test based on the order queue test that exclusively uses 
atomic queues.
This makes it compatible with event devices such as the distributed software 
eventdev.

The test detects if port maintenance is required.

To verify atomicity, a spinlock is set up for each combination of queue and 
flow.
It is taken whenever an event is dequeued for processing and released when 
processing is finished.
The test will fail if a port attempts to take a lock which is already taken.

Signed-off-by: Luka Jankovic 
---
v4:
 * Fix code style issues.
 * Remove unused imports.
v3:
 * Use struct to avoid bit operations when accessing event u64.
 * Changed __rte_always_inline to inline for processing stages.
 * Introduce idle timeout constant.
 * Formatting and cleanup.

v2:
 * Changed to only check queue, flow combination, not port, queue, flow.
 * Lock is only held when a packet is processed.
 * Utilize event u64 instead of mbuf.
 * General cleanup.
---
 app/test-eventdev/evt_common.h|   9 +
 app/test-eventdev/meson.build |   1 +
 app/test-eventdev/test_atomic_queue.c | 412 ++
 3 files changed, 422 insertions(+)
 create mode 100644 app/test-eventdev/test_atomic_queue.c

diff --git a/app/test-eventdev/evt_common.h b/app/test-eventdev/evt_common.h
index 63b782f11a..74f9d187f3 100644
--- a/app/test-eventdev/evt_common.h
+++ b/app/test-eventdev/evt_common.h
@@ -138,6 +138,15 @@ evt_has_flow_id(uint8_t dev_id)
true : false;
 }

+static inline bool
+evt_is_maintenance_free(uint8_t dev_id)
+{
+   struct rte_event_dev_info dev_info;
+
+   rte_event_dev_info_get(dev_id, &dev_info);
+   return dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_MAINTENANCE_FREE;
+}
+
 static inline int
 evt_service_setup(uint32_t service_id)
 {
diff --git a/app/test-eventdev/meson.build b/app/test-eventdev/meson.build
index ab8769c755..db5add39eb 100644
--- a/app/test-eventdev/meson.build
+++ b/app/test-eventdev/meson.build
@@ -15,6 +15,7 @@ sources = files(
 'test_order_atq.c',
 'test_order_common.c',
 'test_order_queue.c',
+'test_atomic_queue.c',
 'test_perf_atq.c',
 'test_perf_common.c',
 'test_perf_queue.c',
diff --git a/app/test-eventdev/test_atomic_queue.c 
b/app/test-eventdev/test_atomic_queue.c
new file mode 100644
index 00..4059a28a43
--- /dev/null
+++ b/app/test-eventdev/test_atomic_queue.c
@@ -0,0 +1,412 @@
+#include 
+#include 
+
+#include "test_order_common.h"
+
+#define IDLE_TIMEOUT 1
+#define NB_QUEUES 2
+
+static rte_spinlock_t *atomic_locks;
+
+struct event_data {
+   union {
+   struct {
+   uint32_t flow;
+   uint32_t seq;
+   };
+   uint64_t raw;
+   };
+};
+
+static inline uint64_t
+event_data_create(flow_id_t flow, uint32_t seq)
+{
+   struct event_data data = {.flow = flow, .seq = seq};
+   return data.raw;
+}
+
+static inline uint32_t
+event_data_get_seq(struct rte_event *const ev)
+{
+   struct event_data data = {.raw = ev->u64};
+   return data.seq;
+}
+
+static inline uint32_t
+event_data_get_flow(struct rte_event *const ev)
+{
+   struct event_data data = {.raw = ev->u64};
+   return data.flow;
+}
+
+static inline uint32_t
+get_lock_idx(int queue, flow_id_t flow, uint32_t nb_flows)
+{
+   return (queue * nb_flows) + flow;
+}
+
+static inline bool
+atomic_spinlock_trylock(uint32_t queue, uint32_t flow, uint32_t nb_flows)
+{
+   return rte_spinlock_trylock(&atomic_locks[get_lock_idx(queue, flow, 
nb_flows)]);
+}
+
+static inline void
+atomic_spinlock_unlock(uint32_t queue, uint32_t flow, uint32_t nb_flows)
+{
+   rte_spinlock_unlock(&atomic_locks[get_lock_idx(queue, flow, nb_flows)]);
+}
+
+static inline bool
+test_done(struct test_order *const t)
+{
+   return t->err || t->result == EVT_TEST_SUCCESS;
+}
+
+static inline int
+atomic_producer(void *arg)
+{
+   struct prod_data *p = arg;
+   struct test_order *t = p->t;
+   struct evt_options *opt = t->opt;
+   const uint8_t dev_id = p->dev_id;
+   const uint8_t port = p->port_id;
+   const uint64_t nb_pkts = t->nb_pkts;
+   uint32_t *producer_flow_seq = t->producer_flow_seq;
+   const uint32_t nb_flows = t->nb_flows;
+   uint64_t count = 0;
+   struct rte_event ev;
+
+   if (opt->verbose_level > 1)
+   printf("%s(): lcore %d dev_id %d port=%d queue=%d\n",
+   __func__, rte_lcore_id(), dev_id, port, p->queue_id);
+
+   ev = (struct rte_event) {
+   .op = RTE_EVENT_OP_NEW,
+   .queue_id = p->queue_id,
+   .sched_type = RTE_SCHED_TYPE_ATOMIC,
+   .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+   .event_t

[RFC v5 1/2] eventdev: add atomic queue to test-eventdev app

2025-01-15 Thread Luka Jankovic
Add an atomic queue test based on the order queue test that exclusively uses 
atomic queues.
This makes it compatible with event devices such as the distributed software 
eventdev.

The test detects if port maintenance is required.

To verify atomicity, a spinlock is set up for each combination of queue and 
flow.
It is taken whenever an event is dequeued for processing and released when 
processing is finished.
The test will fail if a port attempts to take a lock which is already taken.

Signed-off-by: Luka Jankovic 
---
v5:
 * Updated documentation for dpdk-test-eventdev
v4:
 * Fix code style issues.
 * Remove unused imports.
v3:
 * Use struct to avoid bit operations when accessing event u64.
 * Changed __rte_always_inline to inline for processing stages.
 * Introduce idle timeout constant.
 * Formatting and cleanup.
v2:
 * Changed to only check queue, flow combination, not port, queue, flow.
 * Lock is only held when a packet is processed.
 * Utilize event u64 instead of mbuf.
 * General cleanup.
---
 app/test-eventdev/evt_common.h|   9 +
 app/test-eventdev/meson.build |   1 +
 app/test-eventdev/test_atomic_queue.c | 412 ++
 3 files changed, 422 insertions(+)
 create mode 100644 app/test-eventdev/test_atomic_queue.c

diff --git a/app/test-eventdev/evt_common.h b/app/test-eventdev/evt_common.h
index 63b782f11a..74f9d187f3 100644
--- a/app/test-eventdev/evt_common.h
+++ b/app/test-eventdev/evt_common.h
@@ -138,6 +138,15 @@ evt_has_flow_id(uint8_t dev_id)
true : false;
 }
 
+static inline bool
+evt_is_maintenance_free(uint8_t dev_id)
+{
+   struct rte_event_dev_info dev_info;
+
+   rte_event_dev_info_get(dev_id, &dev_info);
+   return dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_MAINTENANCE_FREE;
+}
+
 static inline int
 evt_service_setup(uint32_t service_id)
 {
diff --git a/app/test-eventdev/meson.build b/app/test-eventdev/meson.build
index ab8769c755..db5add39eb 100644
--- a/app/test-eventdev/meson.build
+++ b/app/test-eventdev/meson.build
@@ -15,6 +15,7 @@ sources = files(
 'test_order_atq.c',
 'test_order_common.c',
 'test_order_queue.c',
+'test_atomic_queue.c',
 'test_perf_atq.c',
 'test_perf_common.c',
 'test_perf_queue.c',
diff --git a/app/test-eventdev/test_atomic_queue.c 
b/app/test-eventdev/test_atomic_queue.c
new file mode 100644
index 00..4059a28a43
--- /dev/null
+++ b/app/test-eventdev/test_atomic_queue.c
@@ -0,0 +1,412 @@
+#include 
+#include 
+
+#include "test_order_common.h"
+
+#define IDLE_TIMEOUT 1
+#define NB_QUEUES 2
+
+static rte_spinlock_t *atomic_locks;
+
+struct event_data {
+   union {
+   struct {
+   uint32_t flow;
+   uint32_t seq;
+   };
+   uint64_t raw;
+   };
+};
+
+static inline uint64_t
+event_data_create(flow_id_t flow, uint32_t seq)
+{
+   struct event_data data = {.flow = flow, .seq = seq};
+   return data.raw;
+}
+
+static inline uint32_t
+event_data_get_seq(struct rte_event *const ev)
+{
+   struct event_data data = {.raw = ev->u64};
+   return data.seq;
+}
+
+static inline uint32_t
+event_data_get_flow(struct rte_event *const ev)
+{
+   struct event_data data = {.raw = ev->u64};
+   return data.flow;
+}
+
+static inline uint32_t
+get_lock_idx(int queue, flow_id_t flow, uint32_t nb_flows)
+{
+   return (queue * nb_flows) + flow;
+}
+
+static inline bool
+atomic_spinlock_trylock(uint32_t queue, uint32_t flow, uint32_t nb_flows)
+{
+   return rte_spinlock_trylock(&atomic_locks[get_lock_idx(queue, flow, 
nb_flows)]);
+}
+
+static inline void
+atomic_spinlock_unlock(uint32_t queue, uint32_t flow, uint32_t nb_flows)
+{
+   rte_spinlock_unlock(&atomic_locks[get_lock_idx(queue, flow, nb_flows)]);
+}
+
+static inline bool
+test_done(struct test_order *const t)
+{
+   return t->err || t->result == EVT_TEST_SUCCESS;
+}
+
+static inline int
+atomic_producer(void *arg)
+{
+   struct prod_data *p = arg;
+   struct test_order *t = p->t;
+   struct evt_options *opt = t->opt;
+   const uint8_t dev_id = p->dev_id;
+   const uint8_t port = p->port_id;
+   const uint64_t nb_pkts = t->nb_pkts;
+   uint32_t *producer_flow_seq = t->producer_flow_seq;
+   const uint32_t nb_flows = t->nb_flows;
+   uint64_t count = 0;
+   struct rte_event ev;
+
+   if (opt->verbose_level > 1)
+   printf("%s(): lcore %d dev_id %d port=%d queue=%d\n",
+   __func__, rte_lcore_id(), dev_id, port, p->queue_id);
+
+   ev = (struct rte_event) {
+   .op = RTE_EVENT_OP_NEW,
+   .queue_id = p->queue_id,
+   .sched_type = RTE_SCHED_TYPE_ATOMIC,
+   .priority = RTE_EVENT_DEV_PRIORITY_NOR

[RFC v5 2/2] eventdev: documentation for atomic queue test

2025-01-15 Thread Luka Jankovic
Signed-off-by: Luka Jankovic 
---
 .../tools/img/eventdev_atomic_queue_test.svg  | 1701 +
 doc/guides/tools/testeventdev.rst |   92 +
 2 files changed, 1793 insertions(+)
 create mode 100644 doc/guides/tools/img/eventdev_atomic_queue_test.svg

diff --git a/doc/guides/tools/img/eventdev_atomic_queue_test.svg 
b/doc/guides/tools/img/eventdev_atomic_queue_test.svg
new file mode 100644
index 00..38d9a555df
--- /dev/null
+++ b/doc/guides/tools/img/eventdev_atomic_queue_test.svg
@@ -0,0 +1,1701 @@
+
+
+
+
+
+http://www.inkscape.org/namespaces/inkscape";
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd";
+   xmlns:xlink="http://www.w3.org/1999/xlink";
+   xmlns="http://www.w3.org/2000/svg";
+   xmlns:svg="http://www.w3.org/2000/svg";
+   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#";
+   xmlns:cc="http://creativecommons.org/ns#";
+   xmlns:dc="http://purl.org/dc/elements/1.1/";>
+  
+
+
+
+  
+
+
+  
+
+
+
+
+
+
+
+  
+
+
+
+
+
+
+  
+
+
+  
+
+
+  
+
+
+  
+
+
+  
+
+
+  
+
+
+  
+
+
+  
+
+
+  
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+  
+
+
+
+  
+
+
+  
+
+
+  
+  
+
+
+
+
+  
+
+
+  
+
+
+  
+
+
+  
+
+
+  
+
+
+  
+
+
+  
+
+
+
+
+
+
+  
+
+
+
+  
+
+
+  
+
+
+
+  
+
+
+  
+
+
+  
+
+
+  
+
+
+
+  
+
+
+
+  
+
+
+
+
+
+
+
+
+
+
+
+
+  
+
+
+
+  
+
+
+
+  
+
+
+
+
+
+
+
+  
+
+
+
+  
+
+
+
+
+
+
+
+  
+
+
+
+  
+
+
+
+  
+
+
+
+  
+
+
+  
+  
+  
+
+  
+image/svg+xml
+http://purl.org/dc/dcmitype/StillImage"; />
+
+  
+
+  
+  
+
+test: 
atomic_queue
+ 
+
+
+
+
+
+
+
+
+
+
+
+
+
+  producer_flow_seq
+  
+producer maintains per 
flow sequence number
+
+
+flow 0
+
+flow 1
+flow 2
+
+flow n
+
+  
+
+producer0
+atomic queue 0
+atomic queue 1
+worker 0
+port n+1
+worker 1
+worker 2
+worker n
+port 0
+port 1
+port 2
+port n
+expected_flow_seq
+per flow expected sequence 
number
+
+
+flow 0
+
+flow 1
+flow 2
+
+flow n
+
+
+
+
+
+enqueue(step 1)
+produce atomic flows(step 
0)
+
+dequeue, lock(step 
2)
+
+
+
+
+
+
+
+
+
+update queue id, enqueue, 
unlock(step 3)
+
+dequeue, lock(step 
4)
+
+
+unlock(step 5)
+
+  
+
diff --git a/doc/guides/tools/testeventdev.rst 
b/doc/guides/tools/testeventdev.rst
index aaa0e5f24c..6f9268e320 100644
--- a/doc/guides/tools/testeventdev.rst
+++ b/doc/guides/tools/testeventdev.rst
@@ -55,6 +55,7 @@ The following are the application command-line options:
 
  order_queue
  order_atq
+ order_atomic
  perf_queue
  perf_atq
  pipeline_atq
@@ -326,6 +327,97 @@ Example command to run order queue test:
sudo /app/dpdk-test-eventdev -c 0x1f -s 0x10 --vdev=event_sw0 -- 
\
 --test=order_queue --plcores 1 --wlcores 2,3
 
+ORDER_ATOMIC Test
+~
+
+This is a functional test is similar to the ORDER_QUEUE test, but differs in 
two
+critical ways:
+
+#. Both queues (q0 and q1) are atomic. This makes it compatible with the
+   distributed software event device (dsw).
+#. Atomicity is verified using spinlocks for each combination of flow id and
+   queue id.
+
+.. _table_eventdev_atomic_queue_test:
+
+.. table:: Atomic queue test eventdev configuration.
+
+   +---+--++---+
+   | # | Items| Value  | Comments  |
+   |   |  ||   |
+   +===+==++===+
+   | 1 | nb_queues| 2  | q0(atomic), q1(atomic)|
+   |   |  ||   |
+   +---+-

Re: [RFC v2 1/1] eventdev: add atomic queue to test-eventdev app

2025-01-09 Thread Luka Jankovic


On Mon, 2024-12-23 at 12:16 +0100, Mattias Rönnblom wrote:
> 
> > +static __rte_always_inline void
> 
> Why is this __rte_always_inline?
> 

The stage functions are based on the ones defined in test_order_queue.c and the
test_order_common.h respectively, where they were defined with
__rte_always_inline. I tried using normal inline and it works, so I will change
it.

Thank you for the feedback. I will revise and send a new version.
> 



[PATCH] eventdev: fix dereferencing null atomic locks pointer in test-eventdev

2025-03-26 Thread Luka Jankovic
Update atomic_init_locks to immediately return if memory allocation
fails. Atomic queue and atq tests updated to handle atomic locks being
null.

Coverity issue: 457876
Fixes: 9d619f82321b ("app/eventdev: introduce atomic tests")

Signed-off-by: Luka Jankovic 
---
 app/test-eventdev/test_atomic_atq.c| 2 ++
 app/test-eventdev/test_atomic_common.h | 4 +++-
 app/test-eventdev/test_atomic_queue.c  | 2 ++
 3 files changed, 7 insertions(+), 1 deletion(-)

diff --git a/app/test-eventdev/test_atomic_atq.c 
b/app/test-eventdev/test_atomic_atq.c
index 4810d2eaae..73e2a53ae4 100644
--- a/app/test-eventdev/test_atomic_atq.c
+++ b/app/test-eventdev/test_atomic_atq.c
@@ -175,6 +175,8 @@ atomic_atq_eventdev_setup(struct evt_test *test, struct 
evt_options *opt)
}

atomic_locks = atomic_init_locks(NB_STAGES, opt->nb_flows);
+   if (atomic_locks == NULL)
+   return -1;

return 0;
 }
diff --git a/app/test-eventdev/test_atomic_common.h 
b/app/test-eventdev/test_atomic_common.h
index a3cec4791d..0ee81328a4 100644
--- a/app/test-eventdev/test_atomic_common.h
+++ b/app/test-eventdev/test_atomic_common.h
@@ -66,8 +66,10 @@ atomic_init_locks(uint32_t nb_stages, uint32_t nb_flows)

rte_spinlock_t *atomic_locks = rte_calloc(NULL, num_locks, 
sizeof(rte_spinlock_t), 0);

-   if (atomic_locks == NULL)
+   if (atomic_locks == NULL) {
evt_err("Unable to allocate memory for spinlocks.");
+   return NULL;
+   }

for (uint32_t i = 0; i < num_locks; i++)
rte_spinlock_init(&atomic_locks[i]);
diff --git a/app/test-eventdev/test_atomic_queue.c 
b/app/test-eventdev/test_atomic_queue.c
index c1a447bbac..8ce0849664 100644
--- a/app/test-eventdev/test_atomic_queue.c
+++ b/app/test-eventdev/test_atomic_queue.c
@@ -189,6 +189,8 @@ atomic_queue_eventdev_setup(struct evt_test *test, struct 
evt_options *opt)
}

atomic_locks = atomic_init_locks(NB_STAGES, opt->nb_flows);
+   if (atomic_locks == NULL)
+   return -1;

return 0;
 }
--
2.34.1



[RFC v7 1/4] eventdev: atomic common for test-eventdev app

2025-02-19 Thread Luka Jankovic
Introduce changes required for atomic tests to run atomic tests.

- Producer port maintenance
- Common spinlock implementation

Signed-off-by: Luka Jankovic 
---
 app/test-eventdev/evt_common.h |   9 ++
 app/test-eventdev/meson.build  |   1 +
 app/test-eventdev/test_atomic_common.c | 134 +
 app/test-eventdev/test_atomic_common.h |  85 
 4 files changed, 229 insertions(+)
 create mode 100644 app/test-eventdev/test_atomic_common.c
 create mode 100644 app/test-eventdev/test_atomic_common.h

diff --git a/app/test-eventdev/evt_common.h b/app/test-eventdev/evt_common.h
index 63b782f11a..74f9d187f3 100644
--- a/app/test-eventdev/evt_common.h
+++ b/app/test-eventdev/evt_common.h
@@ -138,6 +138,15 @@ evt_has_flow_id(uint8_t dev_id)
true : false;
 }
 
+static inline bool
+evt_is_maintenance_free(uint8_t dev_id)
+{
+   struct rte_event_dev_info dev_info;
+
+   rte_event_dev_info_get(dev_id, &dev_info);
+   return dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_MAINTENANCE_FREE;
+}
+
 static inline int
 evt_service_setup(uint32_t service_id)
 {
diff --git a/app/test-eventdev/meson.build b/app/test-eventdev/meson.build
index ab8769c755..78af30cb10 100644
--- a/app/test-eventdev/meson.build
+++ b/app/test-eventdev/meson.build
@@ -15,6 +15,7 @@ sources = files(
 'test_order_atq.c',
 'test_order_common.c',
 'test_order_queue.c',
+'test_atomic_common.c',
 'test_perf_atq.c',
 'test_perf_common.c',
 'test_perf_queue.c',
diff --git a/app/test-eventdev/test_atomic_common.c 
b/app/test-eventdev/test_atomic_common.c
new file mode 100644
index 00..79b7b77101
--- /dev/null
+++ b/app/test-eventdev/test_atomic_common.c
@@ -0,0 +1,134 @@
+#include "test_atomic_common.h"
+
+static inline bool
+test_done(struct test_order *const t)
+{
+   return t->err || t->result == EVT_TEST_SUCCESS;
+}
+
+static inline int
+atomic_producer(void *arg)
+{
+   struct prod_data *p = arg;
+   struct test_order *t = p->t;
+   struct evt_options *opt = t->opt;
+   const uint8_t dev_id = p->dev_id;
+   const uint8_t port = p->port_id;
+   struct rte_mempool *pool = t->pool;
+   const uint64_t nb_pkts = t->nb_pkts;
+   uint32_t *producer_flow_seq = t->producer_flow_seq;
+   const uint32_t nb_flows = t->nb_flows;
+   uint64_t count = 0;
+   struct rte_mbuf *m;
+   struct rte_event ev;
+
+   if (opt->verbose_level > 1)
+   printf("%s(): lcore %d dev_id %d port=%d queue=%d\n",
+   __func__, rte_lcore_id(), dev_id, port, p->queue_id);
+
+   ev = (struct rte_event) {
+   .op = RTE_EVENT_OP_NEW,
+   .queue_id = p->queue_id,
+   .sched_type = RTE_SCHED_TYPE_ATOMIC,
+   .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+   .event_type = RTE_EVENT_TYPE_CPU,
+   .sub_event_type = 0
+   };
+
+   while (count < nb_pkts && t->err == false) {
+   m = rte_pktmbuf_alloc(pool);
+   if (m == NULL)
+   continue;
+
+   /* Maintain seq number per flow */
+
+   const flow_id_t flow = rte_rand_max(nb_flows);
+
+   *order_mbuf_flow_id(t, m) = flow;
+   *order_mbuf_seqn(t, m) = producer_flow_seq[flow]++;
+
+   ev.flow_id = flow;
+   ev.mbuf = m;
+
+   while (rte_event_enqueue_burst(dev_id, port, &ev, 1) != 1) {
+   if (t->err)
+   break;
+   rte_pause();
+   }
+
+   count++;
+   }
+
+   if (!evt_is_maintenance_free(dev_id)) {
+   while (!test_done(t)) {
+   rte_event_maintain(dev_id, port, 
RTE_EVENT_DEV_MAINT_OP_FLUSH);
+   rte_pause();
+   }
+   }
+
+   return 0;
+}
+
+int
+atomic_launch_lcores(struct evt_test *test, struct evt_options *opt,
+   int (*worker)(void *))
+{
+   int ret, lcore_id;
+   struct test_order *t = evt_test_priv(test);
+
+   /* launch workers */
+
+   int wkr_idx = 0;
+   RTE_LCORE_FOREACH_WORKER(lcore_id) {
+   if (!(opt->wlcores[lcore_id]))
+   continue;
+
+   ret = rte_eal_remote_launch(worker, &t->worker[wkr_idx], 
lcore_id);
+   if (ret) {
+   evt_err("failed to launch worker %d", lcore_id);
+   return ret;
+   }
+   wkr_idx++;
+   }
+
+   /* launch producer */
+   int plcore = evt_get_first_active_lcore(opt->plcores);
+
+   ret = rte_eal_remote_launch(atomic_producer, &t->prod, plcore);
+  

[RFC v7 2/4] eventdev: add atomic queue test to test-evnetdev app

2025-02-19 Thread Luka Jankovic
Add an atomic queue test to the test-eventdev app, which is based on the
order queue test that exclusively uses atomic queues.

Signed-off-by: Luka Jankovic 
---
 app/test-eventdev/meson.build |   1 +
 app/test-eventdev/test_atomic_queue.c | 230 ++
 2 files changed, 231 insertions(+)
 create mode 100644 app/test-eventdev/test_atomic_queue.c

diff --git a/app/test-eventdev/meson.build b/app/test-eventdev/meson.build
index 78af30cb10..c5d3974bb4 100644
--- a/app/test-eventdev/meson.build
+++ b/app/test-eventdev/meson.build
@@ -16,6 +16,7 @@ sources = files(
 'test_order_common.c',
 'test_order_queue.c',
 'test_atomic_common.c',
+'test_atomic_queue.c',
 'test_perf_atq.c',
 'test_perf_common.c',
 'test_perf_queue.c',
diff --git a/app/test-eventdev/test_atomic_queue.c 
b/app/test-eventdev/test_atomic_queue.c
new file mode 100644
index 00..46f9ca98fa
--- /dev/null
+++ b/app/test-eventdev/test_atomic_queue.c
@@ -0,0 +1,230 @@
+#include 
+#include 
+
+#include "test_atomic_common.h"
+
+#define NB_QUEUES 2
+#define NB_STAGES 2
+
+static rte_spinlock_t *atomic_locks;
+
+static inline void
+atomic_queue_process_stage_0(struct test_order *const t,
+   struct rte_event *const ev,
+   uint32_t nb_flows,
+   uint32_t port)
+{
+   const uint32_t flow = *order_mbuf_flow_id(t, ev->mbuf);
+
+   atomic_lock_verify(atomic_locks, 0, flow, nb_flows, t, port);
+
+   ev->queue_id = 1;
+   ev->op = RTE_EVENT_OP_FORWARD;
+   ev->sched_type = RTE_SCHED_TYPE_ATOMIC;
+   ev->event_type = RTE_EVENT_TYPE_CPU;
+
+   atomic_spinlock_unlock(atomic_locks, 0, flow, nb_flows);
+}
+
+static inline void
+atomic_queue_process_stage_1(struct test_order *const t,
+   struct rte_event *const ev,
+   uint32_t nb_flows,
+   rte_spinlock_t *atomic_locks,
+   uint32_t *const expected_flow_seq,
+   RTE_ATOMIC(uint64_t) *const outstand_pkts,
+   uint32_t port)
+{
+   const uint32_t flow = *order_mbuf_flow_id(t, ev->mbuf);
+   const uint32_t seq = *order_mbuf_seqn(t, ev->mbuf);
+
+   atomic_lock_verify(atomic_locks, 1, flow, nb_flows, t, port);
+
+   /* compare the seqn against expected value */
+   if (seq != expected_flow_seq[flow]) {
+   evt_err("flow=%x seqn mismatch got=%x expected=%x", flow, seq,
+   expected_flow_seq[flow]);
+   t->err = true;
+   }
+
+   expected_flow_seq[flow]++;
+   rte_pktmbuf_free(ev->mbuf);
+
+   rte_atomic_fetch_sub_explicit(outstand_pkts, 1, 
rte_memory_order_relaxed);
+
+   ev->op = RTE_EVENT_OP_RELEASE;
+
+   atomic_spinlock_unlock(atomic_locks, 1, flow, nb_flows);
+}
+
+static int
+atomic_queue_worker_burst(void *arg, bool flow_id_cap, uint32_t max_burst)
+{
+   ORDER_WORKER_INIT;
+   struct rte_event ev[BURST_SIZE];
+   uint16_t i;
+
+   while (t->err == false) {
+
+   uint16_t const nb_rx = rte_event_dequeue_burst(dev_id, port, 
ev, max_burst, 0);
+
+   if (nb_rx == 0) {
+   if (rte_atomic_load_explicit(outstand_pkts, 
rte_memory_order_relaxed) <= 0)
+   break;
+   rte_pause();
+   continue;
+   }
+
+   for (i = 0; i < nb_rx; i++) {
+   if (!flow_id_cap)
+   order_flow_id_copy_from_mbuf(t, &ev[i]);
+
+   switch (ev[i].queue_id) {
+   case 0:
+   atomic_queue_process_stage_0(t, &ev[i], 
nb_flows, port);
+   break;
+   case 1:
+   atomic_queue_process_stage_1(t, &ev[i], 
nb_flows, atomic_locks, expected_flow_seq,
+   outstand_pkts, port);
+   break;
+   default:
+   order_process_stage_invalid(t, &ev[i]);
+   break;
+   }
+   }
+
+   uint16_t total_enq = 0;
+
+   do {
+   total_enq += rte_event_enqueue_burst(
+   dev_id, port, ev + total_enq, nb_rx - 
total_enq);
+   } while (total_enq < nb_rx);
+   }
+
+   return 0;
+}
+
+static int
+worker_wrapper(void *arg)
+{
+   struct worker_data *w = arg;
+   int max_burst = evt_has_burst_mode(w->dev_id) ? BURST_SIZE : 1;
+   const bool flow_id_cap = evt_has_flow_id(w->dev_id);
+
+   return atomic_queue_worker_burst(arg, flow_id_cap, max_burst);
+}
+
+static int
+atomic_

[RFC v7 0/4] eventdev: atomic tests to test-eventdev app

2025-02-19 Thread Luka Jankovic
Add atomic tests to the test-eventdev app which correspond to the order
tests but use exclusively atomic queues.

The test detects if port maintenance is required, and so they are compatible
with event devices such as the distributed software eventdev.

To verify atomicity, a spinlock is used for each combination of stage and flow.
It is acquired whenever an event is dequeued for processing and released when
processing is finished.

The tests will fail if a port attempts to acquire a lock which is already held.

Luka Jankovic (4):
  eventdev: atomic common for test-eventdev app
  eventdev: add atomic queue test to test-evnetdev app
  eventdev: add atomic atq to test-eventdev app
  eventdev: documentation for atomic queue and atomic atq tests

 app/test-eventdev/evt_common.h|9 +
 app/test-eventdev/meson.build |3 +
 app/test-eventdev/test_atomic_atq.c   |  216 +++
 app/test-eventdev/test_atomic_common.c|  134 ++
 app/test-eventdev/test_atomic_common.h|   85 +
 app/test-eventdev/test_atomic_queue.c |  230 +++
 .../tools/img/eventdev_atomic_atq_test.svg| 1588 +++
 .../tools/img/eventdev_atomic_queue_test.svg  | 1701 +
 doc/guides/tools/testeventdev.rst |  155 ++
 9 files changed, 4121 insertions(+)
 create mode 100644 app/test-eventdev/test_atomic_atq.c
 create mode 100644 app/test-eventdev/test_atomic_common.c
 create mode 100644 app/test-eventdev/test_atomic_common.h
 create mode 100644 app/test-eventdev/test_atomic_queue.c
 create mode 100644 doc/guides/tools/img/eventdev_atomic_atq_test.svg
 create mode 100644 doc/guides/tools/img/eventdev_atomic_queue_test.svg

---
v7:
 * Refactor common atomic test functionality into separate common file.
 * Implement atomic atq

v6:
 * Revert the use of event.u64 to mbufs as the Marvell CNXK platform assumes
   event.u64 to be 8-byte aligned, which causes the test to fail.
 * Clarified deadlock error message.

v5:
 * Updated documentation for dpdk-test-eventdev

v4:
 * Fix code style issues.
 * Remove unused imports.

v3:
 * Use struct to avoid bit operations when accessing event u64.
 * Changed __rte_always_inline to inline for processing stages.
 * Introduce idle timeout constant.
 * Formatting and cleanup.

v2:
 * Changed to only check queue, flow combination, not port, queue, flow.
 * Lock is only held when a packet is processed.
 * Utilize event u64 instead of mbuf.
 * General cleanup.

---
2.34.1



[RFC v7 3/4] eventdev: add atomic atq to test-eventdev app

2025-02-19 Thread Luka Jankovic
Add an atomic atq test to the test-eventdev app. The test works
in the same way as atomic queue, the difference being that only one
queue capable of all types for both stages.

Signed-off-by: Luka Jankovic 
---
 app/test-eventdev/meson.build   |   1 +
 app/test-eventdev/test_atomic_atq.c | 216 
 2 files changed, 217 insertions(+)
 create mode 100644 app/test-eventdev/test_atomic_atq.c

diff --git a/app/test-eventdev/meson.build b/app/test-eventdev/meson.build
index c5d3974bb4..b7f0929583 100644
--- a/app/test-eventdev/meson.build
+++ b/app/test-eventdev/meson.build
@@ -15,6 +15,7 @@ sources = files(
 'test_order_atq.c',
 'test_order_common.c',
 'test_order_queue.c',
+'test_atomic_atq.c',
 'test_atomic_common.c',
 'test_atomic_queue.c',
 'test_perf_atq.c',
diff --git a/app/test-eventdev/test_atomic_atq.c 
b/app/test-eventdev/test_atomic_atq.c
new file mode 100644
index 00..ff9723ca7b
--- /dev/null
+++ b/app/test-eventdev/test_atomic_atq.c
@@ -0,0 +1,216 @@
+#include 
+#include 
+
+#include "test_atomic_common.h"
+
+#define NB_QUEUES 1
+#define NB_STAGES 2
+
+static rte_spinlock_t *atomic_locks;
+
+static inline void
+atomic_atq_process_stage_0(struct test_order *const t,
+   struct rte_event *const ev,
+   uint32_t nb_flows,
+   uint32_t port)
+{
+   const uint32_t flow = *order_mbuf_flow_id(t, ev->mbuf);
+
+   atomic_lock_verify(atomic_locks, 0, flow, nb_flows, t, port);
+
+   ev->sub_event_type = 1;
+   ev->op = RTE_EVENT_OP_FORWARD;
+   ev->sched_type = RTE_SCHED_TYPE_ATOMIC;
+   ev->event_type = RTE_EVENT_TYPE_CPU;
+
+   atomic_spinlock_unlock(atomic_locks, 0, flow, nb_flows);
+}
+
+static inline void
+atomic_atq_process_stage_1(struct test_order *const t,
+   struct rte_event *const ev,
+   uint32_t nb_flows,
+   uint32_t *const expected_flow_seq,
+   RTE_ATOMIC(uint64_t) *const outstand_pkts,
+   uint32_t port)
+{
+   const uint32_t flow = *order_mbuf_flow_id(t, ev->mbuf);
+   const uint32_t seq = *order_mbuf_seqn(t, ev->mbuf);
+
+   atomic_lock_verify(atomic_locks, 1, flow, nb_flows, t, port);
+
+   /* compare the seqn against expected value */
+   if (seq != expected_flow_seq[flow]) {
+   evt_err("flow=%x seqn mismatch got=%x expected=%x", flow, seq,
+   expected_flow_seq[flow]);
+   t->err = true;
+   }
+
+   expected_flow_seq[flow]++;
+   rte_pktmbuf_free(ev->mbuf);
+
+   rte_atomic_fetch_sub_explicit(outstand_pkts, 1, 
rte_memory_order_relaxed);
+
+   ev->op = RTE_EVENT_OP_RELEASE;
+
+   atomic_spinlock_unlock(atomic_locks, 1, flow, nb_flows);
+}
+
+static int
+atomic_atq_worker_burst(void *arg, bool flow_id_cap, uint32_t max_burst)
+{
+   ORDER_WORKER_INIT;
+   struct rte_event ev[BURST_SIZE];
+   uint16_t i;
+
+   while (t->err == false) {
+
+   uint16_t const nb_rx = rte_event_dequeue_burst(dev_id, port, 
ev, max_burst, 0);
+
+   if (nb_rx == 0) {
+   if (rte_atomic_load_explicit(outstand_pkts, 
rte_memory_order_relaxed) <= 0)
+   break;
+   rte_pause();
+   continue;
+   }
+
+   for (i = 0; i < nb_rx; i++) {
+   if (!flow_id_cap)
+   order_flow_id_copy_from_mbuf(t, &ev[i]);
+
+   switch (ev[i].sub_event_type) {
+   case 0:
+   atomic_atq_process_stage_0(t, &ev[i], nb_flows, 
port);
+   break;
+   case 1:
+   atomic_atq_process_stage_1(t, &ev[i], nb_flows, 
expected_flow_seq,
+   outstand_pkts, port);
+   break;
+   default:
+   order_process_stage_invalid(t, &ev[i]);
+   break;
+   }
+   }
+
+   uint16_t total_enq = 0;
+
+   do {
+   total_enq += rte_event_enqueue_burst(
+   dev_id, port, ev + total_enq, nb_rx - 
total_enq);
+   } while (total_enq < nb_rx);
+   }
+
+   return 0;
+}
+
+static int
+worker_wrapper(void *arg)
+{
+   struct worker_data *w = arg;
+   int max_burst = evt_has_burst_mode(w->dev_id) ? BURST_SIZE : 1;
+   const bool flow_id_cap = evt_has_flow_id(w->dev_id);
+
+   return atomic_atq_worker_burst(arg, flow_id_cap, max_burst);
+}
+
+static int
+atomic_atq_launch_lcores(struct evt_tes

Re: [PATCH v9 0/3] eventdev: atomic tests to test-eventdev app

2025-03-06 Thread Luka Jankovic
I accidentally sent the last version (v9) with an incorrect git name, but the
commits are signed correctly. Will this be an issue or should I upload a new
version with the correct name?


[PATCH v10 3/3] eventdev: add atomic atq to test-eventdev app

2025-03-06 Thread Luka Jankovic
Add an atomic atq test to the test-eventdev app. The test works
in the same way as atomic queue, the difference being that only one
queue capable of all types for both stages.

Signed-off-by: Luka Jankovic 
Tested-by: Pavan Nikhilesh 
---
 app/test-eventdev/meson.build |1 +
 app/test-eventdev/test_atomic_atq.c   |  220 +++
 .../tools/img/eventdev_atomic_queue_test.svg  | 1701 +
 doc/guides/tools/testeventdev.rst |   63 +
 4 files changed, 1985 insertions(+)
 create mode 100644 app/test-eventdev/test_atomic_atq.c
 create mode 100644 doc/guides/tools/img/eventdev_atomic_queue_test.svg

diff --git a/app/test-eventdev/meson.build b/app/test-eventdev/meson.build
index 1f13e1700c..10658f69ce 100644
--- a/app/test-eventdev/meson.build
+++ b/app/test-eventdev/meson.build
@@ -15,6 +15,7 @@ sources = files(
 'test_order_atq.c',
 'test_order_common.c',
 'test_order_queue.c',
+'test_atomic_atq.c',
 'test_atomic_common.c',
 'test_atomic_queue.c',
 'test_perf_atq.c',
diff --git a/app/test-eventdev/test_atomic_atq.c 
b/app/test-eventdev/test_atomic_atq.c
new file mode 100644
index 00..4810d2eaae
--- /dev/null
+++ b/app/test-eventdev/test_atomic_atq.c
@@ -0,0 +1,220 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2025 Ericsson AB
+ */
+
+#include 
+#include 
+
+#include "test_atomic_common.h"
+
+#define NB_QUEUES 1
+#define NB_STAGES 2
+
+static rte_spinlock_t *atomic_locks;
+
+static inline void
+atomic_atq_process_stage_0(struct test_order *const t,
+   struct rte_event *const ev,
+   uint32_t nb_flows,
+   uint32_t port)
+{
+   const uint32_t flow = *order_mbuf_flow_id(t, ev->mbuf);
+
+   atomic_lock_verify(atomic_locks, 0, flow, nb_flows, t, port);
+
+   ev->sub_event_type = 1;
+   ev->op = RTE_EVENT_OP_FORWARD;
+   ev->sched_type = RTE_SCHED_TYPE_ATOMIC;
+   ev->event_type = RTE_EVENT_TYPE_CPU;
+
+   atomic_spinlock_unlock(atomic_locks, 0, flow, nb_flows);
+}
+
+static inline void
+atomic_atq_process_stage_1(struct test_order *const t,
+   struct rte_event *const ev,
+   uint32_t nb_flows,
+   uint32_t *const expected_flow_seq,
+   RTE_ATOMIC(uint64_t) * const outstand_pkts,
+   uint32_t port)
+{
+   const uint32_t flow = *order_mbuf_flow_id(t, ev->mbuf);
+   const uint32_t seq = *order_mbuf_seqn(t, ev->mbuf);
+
+   atomic_lock_verify(atomic_locks, 1, flow, nb_flows, t, port);
+
+   /* compare the seqn against expected value */
+   if (seq != expected_flow_seq[flow]) {
+   evt_err("flow=%x seqn mismatch got=%x expected=%x", flow, seq,
+   expected_flow_seq[flow]);
+   t->err = true;
+   }
+
+   expected_flow_seq[flow]++;
+   rte_pktmbuf_free(ev->mbuf);
+
+   rte_atomic_fetch_sub_explicit(outstand_pkts, 1, 
rte_memory_order_relaxed);
+
+   ev->op = RTE_EVENT_OP_RELEASE;
+
+   atomic_spinlock_unlock(atomic_locks, 1, flow, nb_flows);
+}
+
+static int
+atomic_atq_worker_burst(void *arg, bool flow_id_cap, uint32_t max_burst)
+{
+   ORDER_WORKER_INIT;
+   struct rte_event ev[BURST_SIZE];
+   uint16_t i;
+
+   while (t->err == false) {
+
+   uint16_t const nb_rx = rte_event_dequeue_burst(dev_id, port, 
ev, max_burst, 0);
+
+   if (nb_rx == 0) {
+   if (rte_atomic_load_explicit(outstand_pkts, 
rte_memory_order_relaxed) <= 0)
+   break;
+   rte_pause();
+   continue;
+   }
+
+   for (i = 0; i < nb_rx; i++) {
+   if (!flow_id_cap)
+   order_flow_id_copy_from_mbuf(t, &ev[i]);
+
+   switch (ev[i].sub_event_type) {
+   case 0:
+   atomic_atq_process_stage_0(t, &ev[i], nb_flows, 
port);
+   break;
+   case 1:
+   atomic_atq_process_stage_1(t, &ev[i], nb_flows, 
expected_flow_seq,
+   outstand_pkts, port);
+   break;
+   default:
+   order_process_stage_invalid(t, &ev[i]);
+   break;
+   }
+   }
+
+   uint16_t total_enq = 0;
+
+   do {
+   total_enq += rte_event_enqueue_burst(
+   dev_id, port, ev + total_enq, nb_rx - 
total_enq);
+   } while (total_enq < nb_rx);
+   }
+
+   return 0;
+}
+
+static int
+worker_wrapper(void 

[PATCH v10 0/3] eventdev: atomic tests to test-eventdev app

2025-03-06 Thread Luka Jankovic
Add atomic tests to the test-eventdev app which correspond to the order
tests but use exclusively atomic queues.

The test detects if port maintenance is required, and so they are compatible
with event devices such as the distributed software eventdev.

To verify atomicity, a spinlock is used for each combination of stage and flow.
It is acquired whenever an event is dequeued for processing and released when
processing is finished.

The tests will fail if a port attempts to acquire a lock which is already held.

Luka Jankovic (3):
  eventdev: atomic common for test-eventdev app
  eventdev: add atomic queue test to test-evnetdev app
  eventdev: add atomic atq to test-eventdev app

 app/test-eventdev/evt_common.h|9 +
 app/test-eventdev/meson.build |3 +
 app/test-eventdev/test_atomic_atq.c   |  220 +++
 app/test-eventdev/test_atomic_common.c|  136 ++
 app/test-eventdev/test_atomic_common.h|   87 +
 app/test-eventdev/test_atomic_queue.c |  234 +++
 doc/guides/rel_notes/release_25_03.rst|5 +
 .../tools/img/eventdev_atomic_atq_test.svg| 1588 +++
 .../tools/img/eventdev_atomic_queue_test.svg  | 1701 +
 doc/guides/tools/testeventdev.rst |  155 ++
 10 files changed, 4138 insertions(+)
 create mode 100644 app/test-eventdev/test_atomic_atq.c
 create mode 100644 app/test-eventdev/test_atomic_common.c
 create mode 100644 app/test-eventdev/test_atomic_common.h
 create mode 100644 app/test-eventdev/test_atomic_queue.c
 create mode 100644 doc/guides/tools/img/eventdev_atomic_atq_test.svg
 create mode 100644 doc/guides/tools/img/eventdev_atomic_queue_test.svg

---
v10:
 * No changes.

v9:
 * Fixed incorrect patch.

v8:
 * Add copyright notice.
 * Update changelog.
 * Prepare for merge.

v7:
 * Refactor common atomic test functionality into separate common file.
 * Implement atomic atq.

v6:
 * Revert the use of event.u64 to mbufs as the Marvell CNXK platform assumes
   event.u64 to be 8-byte aligned, which causes the test to fail.
 * Clarified deadlock error message.

v5:
 * Updated documentation for dpdk-test-eventdev

v4:
 * Fix code style issues.
 * Remove unused imports.

v3:
 * Use struct to avoid bit operations when accessing event u64.
 * Changed __rte_always_inline to inline for processing stages.
 * Introduce idle timeout constant.
 * Formatting and cleanup.

v2:
 * Changed to only check queue, flow combination, not port, queue, flow.
 * Lock is only held when a packet is processed.
 * Utilize event u64 instead of mbuf.
 * General cleanup.

---
2.34.1



[PATCH v10 2/3] eventdev: add atomic queue test to test-evnetdev app

2025-03-11 Thread Luka Jankovic
Add an atomic queue test to the test-eventdev app, which is based on the
order queue test that exclusively uses atomic queues.

Signed-off-by: Luka Jankovic 
Tested-by: Pavan Nikhilesh 
---
 app/test-eventdev/meson.build |1 +
 app/test-eventdev/test_atomic_queue.c |  234 +++
 .../tools/img/eventdev_atomic_atq_test.svg| 1588 +
 doc/guides/tools/testeventdev.rst |   92 +
 4 files changed, 1915 insertions(+)
 create mode 100644 app/test-eventdev/test_atomic_queue.c
 create mode 100644 doc/guides/tools/img/eventdev_atomic_atq_test.svg

diff --git a/app/test-eventdev/meson.build b/app/test-eventdev/meson.build
index 926593b1a6..1f13e1700c 100644
--- a/app/test-eventdev/meson.build
+++ b/app/test-eventdev/meson.build
@@ -16,6 +16,7 @@ sources = files(
 'test_order_common.c',
 'test_order_queue.c',
 'test_atomic_common.c',
+'test_atomic_queue.c',
 'test_perf_atq.c',
 'test_perf_common.c',
 'test_perf_queue.c',
diff --git a/app/test-eventdev/test_atomic_queue.c 
b/app/test-eventdev/test_atomic_queue.c
new file mode 100644
index 00..c1a447bbac
--- /dev/null
+++ b/app/test-eventdev/test_atomic_queue.c
@@ -0,0 +1,234 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2025 Ericsson AB
+ */
+
+#include 
+#include 
+
+#include "test_atomic_common.h"
+
+#define NB_QUEUES 2
+#define NB_STAGES 2
+
+static rte_spinlock_t *atomic_locks;
+
+static inline void
+atomic_queue_process_stage_0(struct test_order *const t,
+   struct rte_event *const ev,
+   uint32_t nb_flows,
+   uint32_t port)
+{
+   const uint32_t flow = *order_mbuf_flow_id(t, ev->mbuf);
+
+   atomic_lock_verify(atomic_locks, 0, flow, nb_flows, t, port);
+
+   ev->queue_id = 1;
+   ev->op = RTE_EVENT_OP_FORWARD;
+   ev->sched_type = RTE_SCHED_TYPE_ATOMIC;
+   ev->event_type = RTE_EVENT_TYPE_CPU;
+
+   atomic_spinlock_unlock(atomic_locks, 0, flow, nb_flows);
+}
+
+static inline void
+atomic_queue_process_stage_1(struct test_order *const t,
+   struct rte_event *const ev,
+   uint32_t nb_flows,
+   rte_spinlock_t *atomic_locks,
+   uint32_t *const expected_flow_seq,
+   RTE_ATOMIC(uint64_t) * const outstand_pkts,
+   uint32_t port)
+{
+   const uint32_t flow = *order_mbuf_flow_id(t, ev->mbuf);
+   const uint32_t seq = *order_mbuf_seqn(t, ev->mbuf);
+
+   atomic_lock_verify(atomic_locks, 1, flow, nb_flows, t, port);
+
+   /* compare the seqn against expected value */
+   if (seq != expected_flow_seq[flow]) {
+   evt_err("flow=%x seqn mismatch got=%x expected=%x", flow, seq,
+   expected_flow_seq[flow]);
+   t->err = true;
+   }
+
+   expected_flow_seq[flow]++;
+   rte_pktmbuf_free(ev->mbuf);
+
+   rte_atomic_fetch_sub_explicit(outstand_pkts, 1, 
rte_memory_order_relaxed);
+
+   ev->op = RTE_EVENT_OP_RELEASE;
+
+   atomic_spinlock_unlock(atomic_locks, 1, flow, nb_flows);
+}
+
+static int
+atomic_queue_worker_burst(void *arg, bool flow_id_cap, uint32_t max_burst)
+{
+   ORDER_WORKER_INIT;
+   struct rte_event ev[BURST_SIZE];
+   uint16_t i;
+
+   while (t->err == false) {
+
+   uint16_t const nb_rx = rte_event_dequeue_burst(dev_id, port, 
ev, max_burst, 0);
+
+   if (nb_rx == 0) {
+   if (rte_atomic_load_explicit(outstand_pkts, 
rte_memory_order_relaxed) <= 0)
+   break;
+   rte_pause();
+   continue;
+   }
+
+   for (i = 0; i < nb_rx; i++) {
+   if (!flow_id_cap)
+   order_flow_id_copy_from_mbuf(t, &ev[i]);
+
+   switch (ev[i].queue_id) {
+   case 0:
+   atomic_queue_process_stage_0(t, &ev[i], 
nb_flows, port);
+   break;
+   case 1:
+   atomic_queue_process_stage_1(t, &ev[i], 
nb_flows, atomic_locks,
+   expected_flow_seq, 
outstand_pkts, port);
+   break;
+   default:
+   order_process_stage_invalid(t, &ev[i]);
+   break;
+   }
+   }
+
+   uint16_t total_enq = 0;
+
+   do {
+   total_enq += rte_event_enqueue_burst(
+   dev_id, port, ev + total_enq, nb_rx - 
total_enq);
+   } while (total_enq < nb_rx);
+   }
+
+   return 0;
+}
+
+static int
+wor

[PATCH v10 1/3] eventdev: atomic common for test-eventdev app

2025-03-11 Thread Luka Jankovic
Introduce changes required for atomic tests to run atomic tests.

- Producer port maintenance
- Common spinlock implementation

Signed-off-by: Luka Jankovic 
Tested-by: Pavan Nikhilesh 
---
 app/test-eventdev/evt_common.h |   9 ++
 app/test-eventdev/meson.build  |   1 +
 app/test-eventdev/test_atomic_common.c | 136 +
 app/test-eventdev/test_atomic_common.h |  87 
 doc/guides/rel_notes/release_25_03.rst |   5 +
 5 files changed, 238 insertions(+)
 create mode 100644 app/test-eventdev/test_atomic_common.c
 create mode 100644 app/test-eventdev/test_atomic_common.h

diff --git a/app/test-eventdev/evt_common.h b/app/test-eventdev/evt_common.h
index 63b782f11a..74f9d187f3 100644
--- a/app/test-eventdev/evt_common.h
+++ b/app/test-eventdev/evt_common.h
@@ -138,6 +138,15 @@ evt_has_flow_id(uint8_t dev_id)
true : false;
 }
 
+static inline bool
+evt_is_maintenance_free(uint8_t dev_id)
+{
+   struct rte_event_dev_info dev_info;
+
+   rte_event_dev_info_get(dev_id, &dev_info);
+   return dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_MAINTENANCE_FREE;
+}
+
 static inline int
 evt_service_setup(uint32_t service_id)
 {
diff --git a/app/test-eventdev/meson.build b/app/test-eventdev/meson.build
index f0da9fadf2..926593b1a6 100644
--- a/app/test-eventdev/meson.build
+++ b/app/test-eventdev/meson.build
@@ -15,6 +15,7 @@ sources = files(
 'test_order_atq.c',
 'test_order_common.c',
 'test_order_queue.c',
+'test_atomic_common.c',
 'test_perf_atq.c',
 'test_perf_common.c',
 'test_perf_queue.c',
diff --git a/app/test-eventdev/test_atomic_common.c 
b/app/test-eventdev/test_atomic_common.c
new file mode 100644
index 00..618b92750c
--- /dev/null
+++ b/app/test-eventdev/test_atomic_common.c
@@ -0,0 +1,136 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2025 Ericsson AB
+ */
+
+#include "test_atomic_common.h"
+
+static inline bool
+test_done(struct test_order *const t)
+{
+   return t->err || t->result == EVT_TEST_SUCCESS;
+}
+
+static inline int
+atomic_producer(void *arg)
+{
+   struct prod_data *p = arg;
+   struct test_order *t = p->t;
+   struct evt_options *opt = t->opt;
+   const uint8_t dev_id = p->dev_id;
+   const uint8_t port = p->port_id;
+   struct rte_mempool *pool = t->pool;
+   const uint64_t nb_pkts = t->nb_pkts;
+   uint32_t *producer_flow_seq = t->producer_flow_seq;
+   const uint32_t nb_flows = t->nb_flows;
+   uint64_t count = 0;
+   struct rte_mbuf *m;
+   struct rte_event ev;
+
+   if (opt->verbose_level > 1)
+   printf("%s(): lcore %d dev_id %d port=%d queue=%d\n",
+   __func__, rte_lcore_id(), dev_id, port, p->queue_id);
+
+   ev = (struct rte_event) {
+   .op = RTE_EVENT_OP_NEW,
+   .queue_id = p->queue_id,
+   .sched_type = RTE_SCHED_TYPE_ATOMIC,
+   .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+   .event_type = RTE_EVENT_TYPE_CPU,
+   .sub_event_type = 0
+   };
+
+   while (count < nb_pkts && t->err == false) {
+   m = rte_pktmbuf_alloc(pool);
+   if (m == NULL)
+   continue;
+
+   /* Maintain seq number per flow */
+
+   const flow_id_t flow = rte_rand_max(nb_flows);
+
+   *order_mbuf_flow_id(t, m) = flow;
+   *order_mbuf_seqn(t, m) = producer_flow_seq[flow]++;
+
+   ev.flow_id = flow;
+   ev.mbuf = m;
+
+   while (rte_event_enqueue_burst(dev_id, port, &ev, 1) != 1) {
+   if (t->err)
+   break;
+   rte_pause();
+   }
+
+   count++;
+   }
+
+   if (!evt_is_maintenance_free(dev_id)) {
+   while (!test_done(t)) {
+   rte_event_maintain(dev_id, port, 
RTE_EVENT_DEV_MAINT_OP_FLUSH);
+   rte_pause();
+   }
+   }
+
+   return 0;
+}
+
+int
+atomic_launch_lcores(struct evt_test *test, struct evt_options *opt,
+   int (*worker)(void *))
+{
+   int ret, lcore_id;
+   struct test_order *t = evt_test_priv(test);
+
+   /* launch workers */
+
+   int wkr_idx = 0;
+   RTE_LCORE_FOREACH_WORKER(lcore_id) {
+   if (!(opt->wlcores[lcore_id]))
+   continue;
+
+   ret = rte_eal_remote_launch(worker, &t->worker[wkr_idx], 
lcore_id);
+   if (ret) {
+   evt_err("failed to launch worker %d", lcore_id);
+   return ret;
+   }
+   wkr_idx++;
+   }
+
+

[PATCH v8 2/3] eventdev: add atomic queue test to test-evnetdev app

2025-03-05 Thread Luka Jankovic
Add an atomic queue test to the test-eventdev app, which is based on the
order queue test that exclusively uses atomic queues.

Signed-off-by: Luka Jankovic 
Tested-by: Pavan Nikhilesh 
---
 app/test-eventdev/meson.build |1 +
 app/test-eventdev/test_atomic_queue.c |  234 +++
 .../tools/img/eventdev_atomic_atq_test.svg| 1588 +
 doc/guides/tools/testeventdev.rst |   92 +
 4 files changed, 1915 insertions(+)
 create mode 100644 app/test-eventdev/test_atomic_queue.c
 create mode 100644 doc/guides/tools/img/eventdev_atomic_atq_test.svg

diff --git a/app/test-eventdev/meson.build b/app/test-eventdev/meson.build
index 926593b1a6..1f13e1700c 100644
--- a/app/test-eventdev/meson.build
+++ b/app/test-eventdev/meson.build
@@ -16,6 +16,7 @@ sources = files(
 'test_order_common.c',
 'test_order_queue.c',
 'test_atomic_common.c',
+'test_atomic_queue.c',
 'test_perf_atq.c',
 'test_perf_common.c',
 'test_perf_queue.c',
diff --git a/app/test-eventdev/test_atomic_queue.c 
b/app/test-eventdev/test_atomic_queue.c
new file mode 100644
index 00..c1a447bbac
--- /dev/null
+++ b/app/test-eventdev/test_atomic_queue.c
@@ -0,0 +1,234 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2025 Ericsson AB
+ */
+
+#include 
+#include 
+
+#include "test_atomic_common.h"
+
+#define NB_QUEUES 2
+#define NB_STAGES 2
+
+static rte_spinlock_t *atomic_locks;
+
+static inline void
+atomic_queue_process_stage_0(struct test_order *const t,
+   struct rte_event *const ev,
+   uint32_t nb_flows,
+   uint32_t port)
+{
+   const uint32_t flow = *order_mbuf_flow_id(t, ev->mbuf);
+
+   atomic_lock_verify(atomic_locks, 0, flow, nb_flows, t, port);
+
+   ev->queue_id = 1;
+   ev->op = RTE_EVENT_OP_FORWARD;
+   ev->sched_type = RTE_SCHED_TYPE_ATOMIC;
+   ev->event_type = RTE_EVENT_TYPE_CPU;
+
+   atomic_spinlock_unlock(atomic_locks, 0, flow, nb_flows);
+}
+
+static inline void
+atomic_queue_process_stage_1(struct test_order *const t,
+   struct rte_event *const ev,
+   uint32_t nb_flows,
+   rte_spinlock_t *atomic_locks,
+   uint32_t *const expected_flow_seq,
+   RTE_ATOMIC(uint64_t) * const outstand_pkts,
+   uint32_t port)
+{
+   const uint32_t flow = *order_mbuf_flow_id(t, ev->mbuf);
+   const uint32_t seq = *order_mbuf_seqn(t, ev->mbuf);
+
+   atomic_lock_verify(atomic_locks, 1, flow, nb_flows, t, port);
+
+   /* compare the seqn against expected value */
+   if (seq != expected_flow_seq[flow]) {
+   evt_err("flow=%x seqn mismatch got=%x expected=%x", flow, seq,
+   expected_flow_seq[flow]);
+   t->err = true;
+   }
+
+   expected_flow_seq[flow]++;
+   rte_pktmbuf_free(ev->mbuf);
+
+   rte_atomic_fetch_sub_explicit(outstand_pkts, 1, 
rte_memory_order_relaxed);
+
+   ev->op = RTE_EVENT_OP_RELEASE;
+
+   atomic_spinlock_unlock(atomic_locks, 1, flow, nb_flows);
+}
+
+static int
+atomic_queue_worker_burst(void *arg, bool flow_id_cap, uint32_t max_burst)
+{
+   ORDER_WORKER_INIT;
+   struct rte_event ev[BURST_SIZE];
+   uint16_t i;
+
+   while (t->err == false) {
+
+   uint16_t const nb_rx = rte_event_dequeue_burst(dev_id, port, 
ev, max_burst, 0);
+
+   if (nb_rx == 0) {
+   if (rte_atomic_load_explicit(outstand_pkts, 
rte_memory_order_relaxed) <= 0)
+   break;
+   rte_pause();
+   continue;
+   }
+
+   for (i = 0; i < nb_rx; i++) {
+   if (!flow_id_cap)
+   order_flow_id_copy_from_mbuf(t, &ev[i]);
+
+   switch (ev[i].queue_id) {
+   case 0:
+   atomic_queue_process_stage_0(t, &ev[i], 
nb_flows, port);
+   break;
+   case 1:
+   atomic_queue_process_stage_1(t, &ev[i], 
nb_flows, atomic_locks,
+   expected_flow_seq, 
outstand_pkts, port);
+   break;
+   default:
+   order_process_stage_invalid(t, &ev[i]);
+   break;
+   }
+   }
+
+   uint16_t total_enq = 0;
+
+   do {
+   total_enq += rte_event_enqueue_burst(
+   dev_id, port, ev + total_enq, nb_rx - 
total_enq);
+   } while (total_enq < nb_rx);
+   }
+
+   return 0;
+}
+
+static int
+wor

[PATCH v8 3/3] eventdev: add atomic atq to test-eventdev app

2025-03-05 Thread Luka Jankovic
Add an atomic atq test to the test-eventdev app. The test works
in the same way as atomic queue, the difference being that only one
queue capable of all types for both stages.

Signed-off-by: Luka Jankovic 
Tested-by: Pavan Nikhilesh 
---
 app/test-eventdev/meson.build |1 +
 app/test-eventdev/test_atomic_atq.c   |  220 +++
 .../tools/img/eventdev_atomic_queue_test.svg  | 1701 +
 doc/guides/tools/testeventdev.rst |   63 +
 4 files changed, 1985 insertions(+)
 create mode 100644 app/test-eventdev/test_atomic_atq.c
 create mode 100644 doc/guides/tools/img/eventdev_atomic_queue_test.svg

diff --git a/app/test-eventdev/meson.build b/app/test-eventdev/meson.build
index 1f13e1700c..10658f69ce 100644
--- a/app/test-eventdev/meson.build
+++ b/app/test-eventdev/meson.build
@@ -15,6 +15,7 @@ sources = files(
 'test_order_atq.c',
 'test_order_common.c',
 'test_order_queue.c',
+'test_atomic_atq.c',
 'test_atomic_common.c',
 'test_atomic_queue.c',
 'test_perf_atq.c',
diff --git a/app/test-eventdev/test_atomic_atq.c 
b/app/test-eventdev/test_atomic_atq.c
new file mode 100644
index 00..4810d2eaae
--- /dev/null
+++ b/app/test-eventdev/test_atomic_atq.c
@@ -0,0 +1,220 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2025 Ericsson AB
+ */
+
+#include 
+#include 
+
+#include "test_atomic_common.h"
+
+#define NB_QUEUES 1
+#define NB_STAGES 2
+
+static rte_spinlock_t *atomic_locks;
+
+static inline void
+atomic_atq_process_stage_0(struct test_order *const t,
+   struct rte_event *const ev,
+   uint32_t nb_flows,
+   uint32_t port)
+{
+   const uint32_t flow = *order_mbuf_flow_id(t, ev->mbuf);
+
+   atomic_lock_verify(atomic_locks, 0, flow, nb_flows, t, port);
+
+   ev->sub_event_type = 1;
+   ev->op = RTE_EVENT_OP_FORWARD;
+   ev->sched_type = RTE_SCHED_TYPE_ATOMIC;
+   ev->event_type = RTE_EVENT_TYPE_CPU;
+
+   atomic_spinlock_unlock(atomic_locks, 0, flow, nb_flows);
+}
+
+static inline void
+atomic_atq_process_stage_1(struct test_order *const t,
+   struct rte_event *const ev,
+   uint32_t nb_flows,
+   uint32_t *const expected_flow_seq,
+   RTE_ATOMIC(uint64_t) * const outstand_pkts,
+   uint32_t port)
+{
+   const uint32_t flow = *order_mbuf_flow_id(t, ev->mbuf);
+   const uint32_t seq = *order_mbuf_seqn(t, ev->mbuf);
+
+   atomic_lock_verify(atomic_locks, 1, flow, nb_flows, t, port);
+
+   /* compare the seqn against expected value */
+   if (seq != expected_flow_seq[flow]) {
+   evt_err("flow=%x seqn mismatch got=%x expected=%x", flow, seq,
+   expected_flow_seq[flow]);
+   t->err = true;
+   }
+
+   expected_flow_seq[flow]++;
+   rte_pktmbuf_free(ev->mbuf);
+
+   rte_atomic_fetch_sub_explicit(outstand_pkts, 1, 
rte_memory_order_relaxed);
+
+   ev->op = RTE_EVENT_OP_RELEASE;
+
+   atomic_spinlock_unlock(atomic_locks, 1, flow, nb_flows);
+}
+
+static int
+atomic_atq_worker_burst(void *arg, bool flow_id_cap, uint32_t max_burst)
+{
+   ORDER_WORKER_INIT;
+   struct rte_event ev[BURST_SIZE];
+   uint16_t i;
+
+   while (t->err == false) {
+
+   uint16_t const nb_rx = rte_event_dequeue_burst(dev_id, port, 
ev, max_burst, 0);
+
+   if (nb_rx == 0) {
+   if (rte_atomic_load_explicit(outstand_pkts, 
rte_memory_order_relaxed) <= 0)
+   break;
+   rte_pause();
+   continue;
+   }
+
+   for (i = 0; i < nb_rx; i++) {
+   if (!flow_id_cap)
+   order_flow_id_copy_from_mbuf(t, &ev[i]);
+
+   switch (ev[i].sub_event_type) {
+   case 0:
+   atomic_atq_process_stage_0(t, &ev[i], nb_flows, 
port);
+   break;
+   case 1:
+   atomic_atq_process_stage_1(t, &ev[i], nb_flows, 
expected_flow_seq,
+   outstand_pkts, port);
+   break;
+   default:
+   order_process_stage_invalid(t, &ev[i]);
+   break;
+   }
+   }
+
+   uint16_t total_enq = 0;
+
+   do {
+   total_enq += rte_event_enqueue_burst(
+   dev_id, port, ev + total_enq, nb_rx - 
total_enq);
+   } while (total_enq < nb_rx);
+   }
+
+   return 0;
+}
+
+static int
+worker_wrapper(void 

[PATCH v8 0/3] eventdev: atomic tests to test-eventdev app

2025-03-05 Thread Luka Jankovic
Add atomic tests to the test-eventdev app which correspond to the order
tests but use exclusively atomic queues.

The test detects if port maintenance is required, and so they are compatible
with event devices such as the distributed software eventdev.

To verify atomicity, a spinlock is used for each combination of stage and flow.
It is acquired whenever an event is dequeued for processing and released when
processing is finished.

The tests will fail if a port attempts to acquire a lock which is already held.

Luka Jankovic (3):
  eventdev: atomic common for test-eventdev app
  eventdev: add atomic queue test to test-evnetdev app
  eventdev: add atomic atq to test-eventdev app

 app/test-eventdev/evt_common.h|9 +
 app/test-eventdev/meson.build |3 +
 app/test-eventdev/test_atomic_atq.c   |  220 +++
 app/test-eventdev/test_atomic_common.c|  138 ++
 app/test-eventdev/test_atomic_common.h|   87 +
 app/test-eventdev/test_atomic_queue.c |  234 +++
 doc/guides/rel_notes/release_25_03.rst|5 +
 .../tools/img/eventdev_atomic_atq_test.svg| 1588 +++
 .../tools/img/eventdev_atomic_queue_test.svg  | 1701 +
 doc/guides/tools/testeventdev.rst |  155 ++
 10 files changed, 4140 insertions(+)
 create mode 100644 app/test-eventdev/test_atomic_atq.c
 create mode 100644 app/test-eventdev/test_atomic_common.c
 create mode 100644 app/test-eventdev/test_atomic_common.h
 create mode 100644 app/test-eventdev/test_atomic_queue.c
 create mode 100644 doc/guides/tools/img/eventdev_atomic_atq_test.svg
 create mode 100644 doc/guides/tools/img/eventdev_atomic_queue_test.svg

---
v8:
 * Add copyright notice.
 * Update changelog.
 * Prepare for merge.

v7:
 * Refactor common atomic test functionality into separate common file.
 * Implement atomic atq.

v6:
 * Revert the use of event.u64 to mbufs as the Marvell CNXK platform assumes
   event.u64 to be 8-byte aligned, which causes the test to fail.
 * Clarified deadlock error message.

v5:
 * Updated documentation for dpdk-test-eventdev

v4:
 * Fix code style issues.
 * Remove unused imports.

v3:
 * Use struct to avoid bit operations when accessing event u64.
 * Changed __rte_always_inline to inline for processing stages.
 * Introduce idle timeout constant.
 * Formatting and cleanup.

v2:
 * Changed to only check queue, flow combination, not port, queue, flow.
 * Lock is only held when a packet is processed.
 * Utilize event u64 instead of mbuf.
 * General cleanup.

---
2.34.1



[PATCH] eventdev: atomic common for test-eventdev app

2025-03-05 Thread Luka Jankovic
Introduce changes required for atomic tests to run atomic tests.

- Producer port maintenance
- Common spinlock implementation

Signed-off-by: Luka Jankovic 
Tested-by: Pavan Nikhilesh 
---
 app/test-eventdev/evt_common.h |   9 ++
 app/test-eventdev/meson.build  |   1 +
 app/test-eventdev/test_atomic_common.c | 136 +
 app/test-eventdev/test_atomic_common.h |  87 
 doc/guides/rel_notes/release_25_03.rst |   5 +
 5 files changed, 238 insertions(+)
 create mode 100644 app/test-eventdev/test_atomic_common.c
 create mode 100644 app/test-eventdev/test_atomic_common.h

diff --git a/app/test-eventdev/evt_common.h b/app/test-eventdev/evt_common.h
index 63b782f11a..74f9d187f3 100644
--- a/app/test-eventdev/evt_common.h
+++ b/app/test-eventdev/evt_common.h
@@ -138,6 +138,15 @@ evt_has_flow_id(uint8_t dev_id)
true : false;
 }

+static inline bool
+evt_is_maintenance_free(uint8_t dev_id)
+{
+   struct rte_event_dev_info dev_info;
+
+   rte_event_dev_info_get(dev_id, &dev_info);
+   return dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_MAINTENANCE_FREE;
+}
+
 static inline int
 evt_service_setup(uint32_t service_id)
 {
diff --git a/app/test-eventdev/meson.build b/app/test-eventdev/meson.build
index f0da9fadf2..926593b1a6 100644
--- a/app/test-eventdev/meson.build
+++ b/app/test-eventdev/meson.build
@@ -15,6 +15,7 @@ sources = files(
 'test_order_atq.c',
 'test_order_common.c',
 'test_order_queue.c',
+'test_atomic_common.c',
 'test_perf_atq.c',
 'test_perf_common.c',
 'test_perf_queue.c',
diff --git a/app/test-eventdev/test_atomic_common.c 
b/app/test-eventdev/test_atomic_common.c
new file mode 100644
index 00..618b92750c
--- /dev/null
+++ b/app/test-eventdev/test_atomic_common.c
@@ -0,0 +1,136 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2025 Ericsson AB
+ */
+
+#include "test_atomic_common.h"
+
+static inline bool
+test_done(struct test_order *const t)
+{
+   return t->err || t->result == EVT_TEST_SUCCESS;
+}
+
+static inline int
+atomic_producer(void *arg)
+{
+   struct prod_data *p = arg;
+   struct test_order *t = p->t;
+   struct evt_options *opt = t->opt;
+   const uint8_t dev_id = p->dev_id;
+   const uint8_t port = p->port_id;
+   struct rte_mempool *pool = t->pool;
+   const uint64_t nb_pkts = t->nb_pkts;
+   uint32_t *producer_flow_seq = t->producer_flow_seq;
+   const uint32_t nb_flows = t->nb_flows;
+   uint64_t count = 0;
+   struct rte_mbuf *m;
+   struct rte_event ev;
+
+   if (opt->verbose_level > 1)
+   printf("%s(): lcore %d dev_id %d port=%d queue=%d\n",
+   __func__, rte_lcore_id(), dev_id, port, p->queue_id);
+
+   ev = (struct rte_event) {
+   .op = RTE_EVENT_OP_NEW,
+   .queue_id = p->queue_id,
+   .sched_type = RTE_SCHED_TYPE_ATOMIC,
+   .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+   .event_type = RTE_EVENT_TYPE_CPU,
+   .sub_event_type = 0
+   };
+
+   while (count < nb_pkts && t->err == false) {
+   m = rte_pktmbuf_alloc(pool);
+   if (m == NULL)
+   continue;
+
+   /* Maintain seq number per flow */
+
+   const flow_id_t flow = rte_rand_max(nb_flows);
+
+   *order_mbuf_flow_id(t, m) = flow;
+   *order_mbuf_seqn(t, m) = producer_flow_seq[flow]++;
+
+   ev.flow_id = flow;
+   ev.mbuf = m;
+
+   while (rte_event_enqueue_burst(dev_id, port, &ev, 1) != 1) {
+   if (t->err)
+   break;
+   rte_pause();
+   }
+
+   count++;
+   }
+
+   if (!evt_is_maintenance_free(dev_id)) {
+   while (!test_done(t)) {
+   rte_event_maintain(dev_id, port, 
RTE_EVENT_DEV_MAINT_OP_FLUSH);
+   rte_pause();
+   }
+   }
+
+   return 0;
+}
+
+int
+atomic_launch_lcores(struct evt_test *test, struct evt_options *opt,
+   int (*worker)(void *))
+{
+   int ret, lcore_id;
+   struct test_order *t = evt_test_priv(test);
+
+   /* launch workers */
+
+   int wkr_idx = 0;
+   RTE_LCORE_FOREACH_WORKER(lcore_id) {
+   if (!(opt->wlcores[lcore_id]))
+   continue;
+
+   ret = rte_eal_remote_launch(worker, &t->worker[wkr_idx], 
lcore_id);
+   if (ret) {
+   evt_err("failed to launch worker %d", lcore_id);
+   return ret;
+   }
+   wkr_idx++;
+   }
+
+   /* launch producer *

Re: [EXTERNAL] [RFC PATCH v6 1/2] eventdev: add atomic queue to test-eventdev app

2025-02-14 Thread Luka Jankovic


On Tue, 2025-02-04 at 16:11 +, Pavan Nikhilesh Bhagavatula wrote:
> > Add an atomic queue test to the test-eventdev app, which is based on the
> > order queue test that exclusively uses atomic queues.
> > 
> > This makes it compatible with event devices such as the
> > distributed software eventdev.
> > 
> > The test detects if port maintenance is required.
> > 
> > To verify atomicity, a spinlock is used for each combination of queue and
> > flow.
> > It is acquired whenever an event is dequeued for processing and
> > released when processing is finished.
> > 
> > The test will fail if a port attempts to acquire a lock which is already
> > held.
> > 
> > Signed-off-by: Luka Jankovic 
> 
> It would be great if you could add atomic-atq test too.

I will add atomic-atq, but I only have access to sw and dsw, so I am unable to
test it locally.