This commit implements the eventdev ABI changes required by
the DLB/DLB2 PMDs.  Several data structures and constants are modified
or added in this patch, thereby requiring modifications to the
dependent apps and examples.

The DLB/DLB2 hardware does not conform exactly to the eventdev interface.
1) It has a limit on the number of queues that may be linked to a port.
2) Some ports a further restricted to a maximum of 1 linked queue.
3) DLB does not have the ability to carry the flow_id as part
   of the event (QE) payload. Note that the DLB2 hardware is capable of
   carrying the flow_id.

Following is a detailed description of the changes that have been made.

1) Add new fields to the rte_event_dev_info struct. These fields allow
the device to advertize its capabilities so that applications can take
the appropriate actions based on those capabilities.

    struct rte_event_dev_info {
        uint32_t max_event_port_links;
        /**< Maximum number of queues that can be linked to a single event
         * port by this device.
         */

        uint8_t max_single_link_event_port_queue_pairs;
        /**< Maximum number of event ports and queues that are optimized for
         * (and only capable of) single-link configurations supported by this
         * device. These ports and queues are not accounted for in
         * max_event_ports or max_event_queues.
         */
    }

2) Add a new field to the rte_event_dev_config struct. This field allows
the application to specify how many of its ports are limited to a single
link, or will be used in single link mode.

    /** Event device configuration structure */
    struct rte_event_dev_config {
        uint8_t nb_single_link_event_port_queues;
        /**< Number of event ports and queues that will be singly-linked to
         * each other. These are a subset of the overall event ports and
         * queues; this value cannot exceed *nb_event_ports* or
         * *nb_event_queues*. If the device has ports and queues that are
         * optimized for single-link usage, this field is a hint for how many
         * to allocate; otherwise, regular event ports and queues can be used.
         */
    }

3) Replace the dedicated implicit_release_disabled field with a bit field
of explicit port capabilities. The implicit_release_disable functionality
is assigned to one bit, and a port-is-single-link-only  attribute is
assigned to other, with the remaining bits available for future assignment.

        * Event port configuration bitmap flags */
        #define RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL    (1ULL << 0)
        /**< Configure the port not to release outstanding events in
         * rte_event_dev_dequeue_burst(). If set, all events received through
         * the port must be explicitly released with RTE_EVENT_OP_RELEASE or
         * RTE_EVENT_OP_FORWARD. Must be unset if the device is not
         * RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE capable.
         */
        #define RTE_EVENT_PORT_CFG_SINGLE_LINK         (1ULL << 1)

        /**< This event port links only to a single event queue.
         *
         *  @see rte_event_port_setup(), rte_event_port_link()
         */

        #define RTE_EVENT_PORT_ATTR_IMPLICIT_RELEASE_DISABLE 3
        /**
         * The implicit release disable attribute of the port
         */

        struct rte_event_port_conf {
                uint32_t event_port_cfg;
                /**< Port cfg flags(EVENT_PORT_CFG_) */
        }

Signed-off-by: Timothy McDaniel <timothy.mcdan...@intel.com>
Acked-by: Pavan Nikhilesh <pbhagavat...@marvell.com>
Acked-by: Harry van Haaren <harry.van.haa...@intel.com>
---
 app/test-eventdev/evt_common.h                     | 11 ++++
 app/test-eventdev/test_order_atq.c                 | 28 +++++++---
 app/test-eventdev/test_order_common.c              |  1 +
 app/test-eventdev/test_order_queue.c               | 29 +++++++---
 app/test/test_eventdev.c                           |  4 +-
 drivers/event/dpaa/dpaa_eventdev.c                 |  3 +-
 drivers/event/dpaa2/dpaa2_eventdev.c               |  5 +-
 drivers/event/dsw/dsw_evdev.c                      |  3 +-
 drivers/event/octeontx/ssovf_evdev.c               |  5 +-
 drivers/event/octeontx2/otx2_evdev.c               |  3 +-
 drivers/event/opdl/opdl_evdev.c                    |  3 +-
 drivers/event/skeleton/skeleton_eventdev.c         |  5 +-
 drivers/event/sw/sw_evdev.c                        |  8 ++-
 drivers/event/sw/sw_evdev_selftest.c               |  6 +-
 .../eventdev_pipeline/pipeline_worker_generic.c    |  6 +-
 examples/eventdev_pipeline/pipeline_worker_tx.c    |  1 +
 examples/l2fwd-event/l2fwd_event_generic.c         |  7 ++-
 examples/l2fwd-event/l2fwd_event_internal_port.c   |  6 +-
 examples/l3fwd/l3fwd_event_generic.c               |  7 ++-
 examples/l3fwd/l3fwd_event_internal_port.c         |  6 +-
 lib/librte_eventdev/rte_event_eth_tx_adapter.c     |  2 +-
 lib/librte_eventdev/rte_eventdev.c                 | 65 +++++++++++++++++++---
 lib/librte_eventdev/rte_eventdev.h                 | 51 ++++++++++++++---
 lib/librte_eventdev/rte_eventdev_pmd_pci.h         |  1 -
 lib/librte_eventdev/rte_eventdev_trace.h           |  7 ++-
 lib/librte_eventdev/rte_eventdev_version.map       |  4 +-
 26 files changed, 213 insertions(+), 64 deletions(-)

diff --git a/app/test-eventdev/evt_common.h b/app/test-eventdev/evt_common.h
index f9d7378..a1da1cf 100644
--- a/app/test-eventdev/evt_common.h
+++ b/app/test-eventdev/evt_common.h
@@ -104,6 +104,16 @@ evt_has_all_types_queue(uint8_t dev_id)
                        true : false;
 }
 
+static inline bool
+evt_has_flow_id(uint8_t dev_id)
+{
+       struct rte_event_dev_info dev_info;
+
+       rte_event_dev_info_get(dev_id, &dev_info);
+       return (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_CARRY_FLOW_ID) ?
+                       true : false;
+}
+
 static inline int
 evt_service_setup(uint32_t service_id)
 {
@@ -169,6 +179,7 @@ evt_configure_eventdev(struct evt_options *opt, uint8_t 
nb_queues,
                        .dequeue_timeout_ns = opt->deq_tmo_nsec,
                        .nb_event_queues = nb_queues,
                        .nb_event_ports = nb_ports,
+                       .nb_single_link_event_port_queues = 0,
                        .nb_events_limit  = info.max_num_events,
                        .nb_event_queue_flows = opt->nb_flows,
                        .nb_event_port_dequeue_depth =
diff --git a/app/test-eventdev/test_order_atq.c 
b/app/test-eventdev/test_order_atq.c
index 3366cfc..cfcb1dc 100644
--- a/app/test-eventdev/test_order_atq.c
+++ b/app/test-eventdev/test_order_atq.c
@@ -19,7 +19,7 @@ order_atq_process_stage_0(struct rte_event *const ev)
 }
 
 static int
-order_atq_worker(void *arg)
+order_atq_worker(void *arg, const bool flow_id_cap)
 {
        ORDER_WORKER_INIT;
        struct rte_event ev;
@@ -34,6 +34,9 @@ order_atq_worker(void *arg)
                        continue;
                }
 
+               if (!flow_id_cap)
+                       ev.flow_id = ev.mbuf->udata64;
+
                if (ev.sub_event_type == 0) { /* stage 0 from producer */
                        order_atq_process_stage_0(&ev);
                        while (rte_event_enqueue_burst(dev_id, port, &ev, 1)
@@ -50,7 +53,7 @@ order_atq_worker(void *arg)
 }
 
 static int
-order_atq_worker_burst(void *arg)
+order_atq_worker_burst(void *arg, const bool flow_id_cap)
 {
        ORDER_WORKER_INIT;
        struct rte_event ev[BURST_SIZE];
@@ -68,6 +71,9 @@ order_atq_worker_burst(void *arg)
                }
 
                for (i = 0; i < nb_rx; i++) {
+                       if (!flow_id_cap)
+                               ev[i].flow_id = ev[i].mbuf->udata64;
+
                        if (ev[i].sub_event_type == 0) { /*stage 0 */
                                order_atq_process_stage_0(&ev[i]);
                        } else if (ev[i].sub_event_type == 1) { /* stage 1 */
@@ -95,11 +101,19 @@ worker_wrapper(void *arg)
 {
        struct worker_data *w  = arg;
        const bool burst = evt_has_burst_mode(w->dev_id);
-
-       if (burst)
-               return order_atq_worker_burst(arg);
-       else
-               return order_atq_worker(arg);
+       const bool flow_id_cap = evt_has_flow_id(w->dev_id);
+
+       if (burst) {
+               if (flow_id_cap)
+                       return order_atq_worker_burst(arg, true);
+               else
+                       return order_atq_worker_burst(arg, false);
+       } else {
+               if (flow_id_cap)
+                       return order_atq_worker(arg, true);
+               else
+                       return order_atq_worker(arg, false);
+       }
 }
 
 static int
diff --git a/app/test-eventdev/test_order_common.c 
b/app/test-eventdev/test_order_common.c
index 4190f9a..7942390 100644
--- a/app/test-eventdev/test_order_common.c
+++ b/app/test-eventdev/test_order_common.c
@@ -49,6 +49,7 @@ order_producer(void *arg)
                const uint32_t flow = (uintptr_t)m % nb_flows;
                /* Maintain seq number per flow */
                m->seqn = producer_flow_seq[flow]++;
+               m->udata64 = flow;
 
                ev.flow_id = flow;
                ev.mbuf = m;
diff --git a/app/test-eventdev/test_order_queue.c 
b/app/test-eventdev/test_order_queue.c
index 495efd9..1511c00 100644
--- a/app/test-eventdev/test_order_queue.c
+++ b/app/test-eventdev/test_order_queue.c
@@ -19,7 +19,7 @@ order_queue_process_stage_0(struct rte_event *const ev)
 }
 
 static int
-order_queue_worker(void *arg)
+order_queue_worker(void *arg, const bool flow_id_cap)
 {
        ORDER_WORKER_INIT;
        struct rte_event ev;
@@ -34,6 +34,9 @@ order_queue_worker(void *arg)
                        continue;
                }
 
+               if (!flow_id_cap)
+                       ev.flow_id = ev.mbuf->udata64;
+
                if (ev.queue_id == 0) { /* from ordered queue */
                        order_queue_process_stage_0(&ev);
                        while (rte_event_enqueue_burst(dev_id, port, &ev, 1)
@@ -50,7 +53,7 @@ order_queue_worker(void *arg)
 }
 
 static int
-order_queue_worker_burst(void *arg)
+order_queue_worker_burst(void *arg, const bool flow_id_cap)
 {
        ORDER_WORKER_INIT;
        struct rte_event ev[BURST_SIZE];
@@ -68,6 +71,10 @@ order_queue_worker_burst(void *arg)
                }
 
                for (i = 0; i < nb_rx; i++) {
+
+                       if (!flow_id_cap)
+                               ev[i].flow_id = ev[i].mbuf->udata64;
+
                        if (ev[i].queue_id == 0) { /* from ordered queue */
                                order_queue_process_stage_0(&ev[i]);
                        } else if (ev[i].queue_id == 1) {/* from atomic queue */
@@ -95,11 +102,19 @@ worker_wrapper(void *arg)
 {
        struct worker_data *w  = arg;
        const bool burst = evt_has_burst_mode(w->dev_id);
-
-       if (burst)
-               return order_queue_worker_burst(arg);
-       else
-               return order_queue_worker(arg);
+       const bool flow_id_cap = evt_has_flow_id(w->dev_id);
+
+       if (burst) {
+               if (flow_id_cap)
+                       return order_queue_worker_burst(arg, true);
+               else
+                       return order_queue_worker_burst(arg, false);
+       } else {
+               if (flow_id_cap)
+                       return order_queue_worker(arg, true);
+               else
+                       return order_queue_worker(arg, false);
+       }
 }
 
 static int
diff --git a/app/test/test_eventdev.c b/app/test/test_eventdev.c
index 43ccb1c..62019c1 100644
--- a/app/test/test_eventdev.c
+++ b/app/test/test_eventdev.c
@@ -559,10 +559,10 @@ test_eventdev_port_setup(void)
        if (!(info.event_dev_cap &
              RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE)) {
                pconf.enqueue_depth = info.max_event_port_enqueue_depth;
-               pconf.disable_implicit_release = 1;
+               pconf.event_port_cfg = RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL;
                ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
                TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
-               pconf.disable_implicit_release = 0;
+               pconf.event_port_cfg = 0;
        }
 
        ret = rte_event_port_setup(TEST_DEV_ID, info.max_event_ports,
diff --git a/drivers/event/dpaa/dpaa_eventdev.c 
b/drivers/event/dpaa/dpaa_eventdev.c
index b5ae87a..07cd079 100644
--- a/drivers/event/dpaa/dpaa_eventdev.c
+++ b/drivers/event/dpaa/dpaa_eventdev.c
@@ -355,7 +355,8 @@ dpaa_event_dev_info_get(struct rte_eventdev *dev,
                RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
                RTE_EVENT_DEV_CAP_BURST_MODE |
                RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
-               RTE_EVENT_DEV_CAP_NONSEQ_MODE;
+               RTE_EVENT_DEV_CAP_NONSEQ_MODE |
+               RTE_EVENT_DEV_CAP_CARRY_FLOW_ID;
 }
 
 static int
diff --git a/drivers/event/dpaa2/dpaa2_eventdev.c 
b/drivers/event/dpaa2/dpaa2_eventdev.c
index f7383ca..95f03c8 100644
--- a/drivers/event/dpaa2/dpaa2_eventdev.c
+++ b/drivers/event/dpaa2/dpaa2_eventdev.c
@@ -406,7 +406,8 @@ dpaa2_eventdev_info_get(struct rte_eventdev *dev,
                RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK |
                RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
                RTE_EVENT_DEV_CAP_NONSEQ_MODE |
-               RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES;
+               RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES |
+               RTE_EVENT_DEV_CAP_CARRY_FLOW_ID;
 
 }
 
@@ -536,7 +537,7 @@ dpaa2_eventdev_port_def_conf(struct rte_eventdev *dev, 
uint8_t port_id,
                DPAA2_EVENT_MAX_PORT_DEQUEUE_DEPTH;
        port_conf->enqueue_depth =
                DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH;
-       port_conf->disable_implicit_release = 0;
+       port_conf->event_port_cfg = 0;
 }
 
 static int
diff --git a/drivers/event/dsw/dsw_evdev.c b/drivers/event/dsw/dsw_evdev.c
index e796975..933a5a5 100644
--- a/drivers/event/dsw/dsw_evdev.c
+++ b/drivers/event/dsw/dsw_evdev.c
@@ -224,7 +224,8 @@ dsw_info_get(struct rte_eventdev *dev __rte_unused,
                .event_dev_cap = RTE_EVENT_DEV_CAP_BURST_MODE|
                RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED|
                RTE_EVENT_DEV_CAP_NONSEQ_MODE|
-               RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT
+               RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT|
+               RTE_EVENT_DEV_CAP_CARRY_FLOW_ID
        };
 }
 
diff --git a/drivers/event/octeontx/ssovf_evdev.c 
b/drivers/event/octeontx/ssovf_evdev.c
index 33cb502..6f242aa 100644
--- a/drivers/event/octeontx/ssovf_evdev.c
+++ b/drivers/event/octeontx/ssovf_evdev.c
@@ -152,7 +152,8 @@ ssovf_info_get(struct rte_eventdev *dev, struct 
rte_event_dev_info *dev_info)
                                        RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES|
                                        RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK |
                                        RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
-                                       RTE_EVENT_DEV_CAP_NONSEQ_MODE;
+                                       RTE_EVENT_DEV_CAP_NONSEQ_MODE |
+                                       RTE_EVENT_DEV_CAP_CARRY_FLOW_ID;
 
 }
 
@@ -218,7 +219,7 @@ ssovf_port_def_conf(struct rte_eventdev *dev, uint8_t 
port_id,
        port_conf->new_event_threshold = edev->max_num_events;
        port_conf->dequeue_depth = 1;
        port_conf->enqueue_depth = 1;
-       port_conf->disable_implicit_release = 0;
+       port_conf->event_port_cfg = 0;
 }
 
 static void
diff --git a/drivers/event/octeontx2/otx2_evdev.c 
b/drivers/event/octeontx2/otx2_evdev.c
index 256b6a5..b31c26e 100644
--- a/drivers/event/octeontx2/otx2_evdev.c
+++ b/drivers/event/octeontx2/otx2_evdev.c
@@ -501,7 +501,8 @@ otx2_sso_info_get(struct rte_eventdev *event_dev,
                                        RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES |
                                        RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK |
                                        RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
-                                       RTE_EVENT_DEV_CAP_NONSEQ_MODE;
+                                       RTE_EVENT_DEV_CAP_NONSEQ_MODE |
+                                       RTE_EVENT_DEV_CAP_CARRY_FLOW_ID;
 }
 
 static void
diff --git a/drivers/event/opdl/opdl_evdev.c b/drivers/event/opdl/opdl_evdev.c
index 9b2f75f..3050578 100644
--- a/drivers/event/opdl/opdl_evdev.c
+++ b/drivers/event/opdl/opdl_evdev.c
@@ -374,7 +374,8 @@ opdl_info_get(struct rte_eventdev *dev, struct 
rte_event_dev_info *info)
                .max_event_port_dequeue_depth = MAX_OPDL_CONS_Q_DEPTH,
                .max_event_port_enqueue_depth = MAX_OPDL_CONS_Q_DEPTH,
                .max_num_events = OPDL_INFLIGHT_EVENTS_TOTAL,
-               .event_dev_cap = RTE_EVENT_DEV_CAP_BURST_MODE,
+               .event_dev_cap = RTE_EVENT_DEV_CAP_BURST_MODE |
+                                RTE_EVENT_DEV_CAP_CARRY_FLOW_ID,
        };
 
        *info = evdev_opdl_info;
diff --git a/drivers/event/skeleton/skeleton_eventdev.c 
b/drivers/event/skeleton/skeleton_eventdev.c
index c889220..6fd1102 100644
--- a/drivers/event/skeleton/skeleton_eventdev.c
+++ b/drivers/event/skeleton/skeleton_eventdev.c
@@ -101,7 +101,8 @@ skeleton_eventdev_info_get(struct rte_eventdev *dev,
        dev_info->max_num_events = (1ULL << 20);
        dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_QUEUE_QOS |
                                        RTE_EVENT_DEV_CAP_BURST_MODE |
-                                       RTE_EVENT_DEV_CAP_EVENT_QOS;
+                                       RTE_EVENT_DEV_CAP_EVENT_QOS |
+                                       RTE_EVENT_DEV_CAP_CARRY_FLOW_ID;
 }
 
 static int
@@ -209,7 +210,7 @@ skeleton_eventdev_port_def_conf(struct rte_eventdev *dev, 
uint8_t port_id,
        port_conf->new_event_threshold = 32 * 1024;
        port_conf->dequeue_depth = 16;
        port_conf->enqueue_depth = 16;
-       port_conf->disable_implicit_release = 0;
+       port_conf->event_port_cfg = 0;
 }
 
 static void
diff --git a/drivers/event/sw/sw_evdev.c b/drivers/event/sw/sw_evdev.c
index e310c8c..0d8013a 100644
--- a/drivers/event/sw/sw_evdev.c
+++ b/drivers/event/sw/sw_evdev.c
@@ -179,7 +179,8 @@ sw_port_setup(struct rte_eventdev *dev, uint8_t port_id,
        }
 
        p->inflight_max = conf->new_event_threshold;
-       p->implicit_release = !conf->disable_implicit_release;
+       p->implicit_release = !(conf->event_port_cfg &
+                               RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL);
 
        /* check if ring exists, same as rx_worker above */
        snprintf(buf, sizeof(buf), "sw%d_p%u, %s", dev->data->dev_id,
@@ -501,7 +502,7 @@ sw_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
        port_conf->new_event_threshold = 1024;
        port_conf->dequeue_depth = 16;
        port_conf->enqueue_depth = 16;
-       port_conf->disable_implicit_release = 0;
+       port_conf->event_port_cfg = 0;
 }
 
 static int
@@ -608,7 +609,8 @@ sw_info_get(struct rte_eventdev *dev, struct 
rte_event_dev_info *info)
                                RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE|
                                RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK |
                                RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
-                               RTE_EVENT_DEV_CAP_NONSEQ_MODE),
+                               RTE_EVENT_DEV_CAP_NONSEQ_MODE |
+                               RTE_EVENT_DEV_CAP_CARRY_FLOW_ID),
        };
 
        *info = evdev_sw_info;
diff --git a/drivers/event/sw/sw_evdev_selftest.c 
b/drivers/event/sw/sw_evdev_selftest.c
index 38c21fa..4a7d823 100644
--- a/drivers/event/sw/sw_evdev_selftest.c
+++ b/drivers/event/sw/sw_evdev_selftest.c
@@ -172,7 +172,6 @@ create_ports(struct test *t, int num_ports)
                        .new_event_threshold = 1024,
                        .dequeue_depth = 32,
                        .enqueue_depth = 64,
-                       .disable_implicit_release = 0,
        };
        if (num_ports > MAX_PORTS)
                return -1;
@@ -1227,7 +1226,6 @@ port_reconfig_credits(struct test *t)
                                .new_event_threshold = 128,
                                .dequeue_depth = 32,
                                .enqueue_depth = 64,
-                               .disable_implicit_release = 0,
                };
                if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
                        printf("%d Error setting up port\n", __LINE__);
@@ -1317,7 +1315,6 @@ port_single_lb_reconfig(struct test *t)
                .new_event_threshold = 128,
                .dequeue_depth = 32,
                .enqueue_depth = 64,
-               .disable_implicit_release = 0,
        };
        if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
                printf("%d Error setting up port\n", __LINE__);
@@ -3079,7 +3076,8 @@ worker_loopback(struct test *t, uint8_t 
disable_implicit_release)
         * only be initialized once - and this needs to be set for multiple runs
         */
        conf.new_event_threshold = 512;
-       conf.disable_implicit_release = disable_implicit_release;
+       conf.event_port_cfg = disable_implicit_release ?
+               RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL : 0;
 
        if (rte_event_port_setup(evdev, 0, &conf) < 0) {
                printf("Error setting up RX port\n");
diff --git a/examples/eventdev_pipeline/pipeline_worker_generic.c 
b/examples/eventdev_pipeline/pipeline_worker_generic.c
index 42ff4ee..f70ab0c 100644
--- a/examples/eventdev_pipeline/pipeline_worker_generic.c
+++ b/examples/eventdev_pipeline/pipeline_worker_generic.c
@@ -129,6 +129,7 @@ setup_eventdev_generic(struct worker_data *worker_data)
        struct rte_event_dev_config config = {
                        .nb_event_queues = nb_queues,
                        .nb_event_ports = nb_ports,
+                       .nb_single_link_event_port_queues = 1,
                        .nb_events_limit  = 4096,
                        .nb_event_queue_flows = 1024,
                        .nb_event_port_dequeue_depth = 128,
@@ -143,7 +144,7 @@ setup_eventdev_generic(struct worker_data *worker_data)
                        .schedule_type = cdata.queue_type,
                        .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
                        .nb_atomic_flows = 1024,
-               .nb_atomic_order_sequences = 1024,
+                       .nb_atomic_order_sequences = 1024,
        };
        struct rte_event_queue_conf tx_q_conf = {
                        .priority = RTE_EVENT_DEV_PRIORITY_HIGHEST,
@@ -167,7 +168,8 @@ setup_eventdev_generic(struct worker_data *worker_data)
        disable_implicit_release = (dev_info.event_dev_cap &
                        RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE);
 
-       wkr_p_conf.disable_implicit_release = disable_implicit_release;
+       wkr_p_conf.event_port_cfg = disable_implicit_release ?
+               RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL : 0;
 
        if (dev_info.max_num_events < config.nb_events_limit)
                config.nb_events_limit = dev_info.max_num_events;
diff --git a/examples/eventdev_pipeline/pipeline_worker_tx.c 
b/examples/eventdev_pipeline/pipeline_worker_tx.c
index 55bb2f7..ca6cd20 100644
--- a/examples/eventdev_pipeline/pipeline_worker_tx.c
+++ b/examples/eventdev_pipeline/pipeline_worker_tx.c
@@ -436,6 +436,7 @@ setup_eventdev_worker_tx_enq(struct worker_data 
*worker_data)
        struct rte_event_dev_config config = {
                        .nb_event_queues = nb_queues,
                        .nb_event_ports = nb_ports,
+                       .nb_single_link_event_port_queues = 0,
                        .nb_events_limit  = 4096,
                        .nb_event_queue_flows = 1024,
                        .nb_event_port_dequeue_depth = 128,
diff --git a/examples/l2fwd-event/l2fwd_event_generic.c 
b/examples/l2fwd-event/l2fwd_event_generic.c
index 2dc95e5..9a3167c 100644
--- a/examples/l2fwd-event/l2fwd_event_generic.c
+++ b/examples/l2fwd-event/l2fwd_event_generic.c
@@ -126,8 +126,11 @@ l2fwd_event_port_setup_generic(struct l2fwd_resources 
*rsrc)
        if (def_p_conf.enqueue_depth < event_p_conf.enqueue_depth)
                event_p_conf.enqueue_depth = def_p_conf.enqueue_depth;
 
-       event_p_conf.disable_implicit_release =
-               evt_rsrc->disable_implicit_release;
+       event_p_conf.event_port_cfg = 0;
+       if (evt_rsrc->disable_implicit_release)
+               event_p_conf.event_port_cfg |=
+                       RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL;
+
        evt_rsrc->deq_depth = def_p_conf.dequeue_depth;
 
        for (event_p_id = 0; event_p_id < evt_rsrc->evp.nb_ports;
diff --git a/examples/l2fwd-event/l2fwd_event_internal_port.c 
b/examples/l2fwd-event/l2fwd_event_internal_port.c
index 63d57b4..203a14c 100644
--- a/examples/l2fwd-event/l2fwd_event_internal_port.c
+++ b/examples/l2fwd-event/l2fwd_event_internal_port.c
@@ -123,8 +123,10 @@ l2fwd_event_port_setup_internal_port(struct 
l2fwd_resources *rsrc)
        if (def_p_conf.enqueue_depth < event_p_conf.enqueue_depth)
                event_p_conf.enqueue_depth = def_p_conf.enqueue_depth;
 
-       event_p_conf.disable_implicit_release =
-               evt_rsrc->disable_implicit_release;
+       event_p_conf.event_port_cfg = 0;
+       if (evt_rsrc->disable_implicit_release)
+               event_p_conf.event_port_cfg |=
+                       RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL;
 
        for (event_p_id = 0; event_p_id < evt_rsrc->evp.nb_ports;
                                                                event_p_id++) {
diff --git a/examples/l3fwd/l3fwd_event_generic.c 
b/examples/l3fwd/l3fwd_event_generic.c
index f8c9843..c80573f 100644
--- a/examples/l3fwd/l3fwd_event_generic.c
+++ b/examples/l3fwd/l3fwd_event_generic.c
@@ -115,8 +115,11 @@ l3fwd_event_port_setup_generic(void)
        if (def_p_conf.enqueue_depth < event_p_conf.enqueue_depth)
                event_p_conf.enqueue_depth = def_p_conf.enqueue_depth;
 
-       event_p_conf.disable_implicit_release =
-               evt_rsrc->disable_implicit_release;
+       event_p_conf.event_port_cfg = 0;
+       if (evt_rsrc->disable_implicit_release)
+               event_p_conf.event_port_cfg |=
+                       RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL;
+
        evt_rsrc->deq_depth = def_p_conf.dequeue_depth;
 
        for (event_p_id = 0; event_p_id < evt_rsrc->evp.nb_ports;
diff --git a/examples/l3fwd/l3fwd_event_internal_port.c 
b/examples/l3fwd/l3fwd_event_internal_port.c
index 03ac581..9916a7f 100644
--- a/examples/l3fwd/l3fwd_event_internal_port.c
+++ b/examples/l3fwd/l3fwd_event_internal_port.c
@@ -113,8 +113,10 @@ l3fwd_event_port_setup_internal_port(void)
        if (def_p_conf.enqueue_depth < event_p_conf.enqueue_depth)
                event_p_conf.enqueue_depth = def_p_conf.enqueue_depth;
 
-       event_p_conf.disable_implicit_release =
-               evt_rsrc->disable_implicit_release;
+       event_p_conf.event_port_cfg = 0;
+       if (evt_rsrc->disable_implicit_release)
+               event_p_conf.event_port_cfg |=
+                       RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL;
 
        for (event_p_id = 0; event_p_id < evt_rsrc->evp.nb_ports;
                                                                event_p_id++) {
diff --git a/lib/librte_eventdev/rte_event_eth_tx_adapter.c 
b/lib/librte_eventdev/rte_event_eth_tx_adapter.c
index 86287b4..cc27bbc 100644
--- a/lib/librte_eventdev/rte_event_eth_tx_adapter.c
+++ b/lib/librte_eventdev/rte_event_eth_tx_adapter.c
@@ -286,7 +286,7 @@ txa_service_conf_cb(uint8_t __rte_unused id, uint8_t dev_id,
                return ret;
        }
 
-       pc->disable_implicit_release = 0;
+       pc->event_port_cfg = 0;
        ret = rte_event_port_setup(dev_id, port_id, pc);
        if (ret) {
                RTE_EDEV_LOG_ERR("failed to setup event port %u\n",
diff --git a/lib/librte_eventdev/rte_eventdev.c 
b/lib/librte_eventdev/rte_eventdev.c
index 557198f..322453c 100644
--- a/lib/librte_eventdev/rte_eventdev.c
+++ b/lib/librte_eventdev/rte_eventdev.c
@@ -438,9 +438,29 @@ rte_event_dev_configure(uint8_t dev_id,
                                        dev_id);
                return -EINVAL;
        }
-       if (dev_conf->nb_event_queues > info.max_event_queues) {
-               RTE_EDEV_LOG_ERR("%d nb_event_queues=%d > max_event_queues=%d",
-               dev_id, dev_conf->nb_event_queues, info.max_event_queues);
+       if (dev_conf->nb_event_queues > info.max_event_queues +
+                       info.max_single_link_event_port_queue_pairs) {
+               RTE_EDEV_LOG_ERR("%d nb_event_queues=%d > max_event_queues=%d + 
max_single_link_event_port_queue_pairs=%d",
+                                dev_id, dev_conf->nb_event_queues,
+                                info.max_event_queues,
+                                info.max_single_link_event_port_queue_pairs);
+               return -EINVAL;
+       }
+       if (dev_conf->nb_event_queues -
+                       dev_conf->nb_single_link_event_port_queues >
+                       info.max_event_queues) {
+               RTE_EDEV_LOG_ERR("id%d nb_event_queues=%d - 
nb_single_link_event_port_queues=%d > max_event_queues=%d",
+                                dev_id, dev_conf->nb_event_queues,
+                                dev_conf->nb_single_link_event_port_queues,
+                                info.max_event_queues);
+               return -EINVAL;
+       }
+       if (dev_conf->nb_single_link_event_port_queues >
+                       dev_conf->nb_event_queues) {
+               RTE_EDEV_LOG_ERR("dev%d nb_single_link_event_port_queues=%d > 
nb_event_queues=%d",
+                                dev_id,
+                                dev_conf->nb_single_link_event_port_queues,
+                                dev_conf->nb_event_queues);
                return -EINVAL;
        }
 
@@ -449,9 +469,31 @@ rte_event_dev_configure(uint8_t dev_id,
                RTE_EDEV_LOG_ERR("dev%d nb_event_ports cannot be zero", dev_id);
                return -EINVAL;
        }
-       if (dev_conf->nb_event_ports > info.max_event_ports) {
-               RTE_EDEV_LOG_ERR("id%d nb_event_ports=%d > max_event_ports= %d",
-               dev_id, dev_conf->nb_event_ports, info.max_event_ports);
+       if (dev_conf->nb_event_ports > info.max_event_ports +
+                       info.max_single_link_event_port_queue_pairs) {
+               RTE_EDEV_LOG_ERR("id%d nb_event_ports=%d > max_event_ports=%d + 
max_single_link_event_port_queue_pairs=%d",
+                                dev_id, dev_conf->nb_event_ports,
+                                info.max_event_ports,
+                                info.max_single_link_event_port_queue_pairs);
+               return -EINVAL;
+       }
+       if (dev_conf->nb_event_ports -
+                       dev_conf->nb_single_link_event_port_queues
+                       > info.max_event_ports) {
+               RTE_EDEV_LOG_ERR("id%d nb_event_ports=%d - 
nb_single_link_event_port_queues=%d > max_event_ports=%d",
+                                dev_id, dev_conf->nb_event_ports,
+                                dev_conf->nb_single_link_event_port_queues,
+                                info.max_event_ports);
+               return -EINVAL;
+       }
+
+       if (dev_conf->nb_single_link_event_port_queues >
+           dev_conf->nb_event_ports) {
+               RTE_EDEV_LOG_ERR(
+                                "dev%d nb_single_link_event_port_queues=%d > 
nb_event_ports=%d",
+                                dev_id,
+                                dev_conf->nb_single_link_event_port_queues,
+                                dev_conf->nb_event_ports);
                return -EINVAL;
        }
 
@@ -738,7 +780,8 @@ rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
                return -EINVAL;
        }
 
-       if (port_conf && port_conf->disable_implicit_release &&
+       if (port_conf &&
+           (port_conf->event_port_cfg & RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL) &&
            !(dev->data->event_dev_cap &
              RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE)) {
                RTE_EDEV_LOG_ERR(
@@ -831,6 +874,14 @@ rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, 
uint32_t attr_id,
        case RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD:
                *attr_value = dev->data->ports_cfg[port_id].new_event_threshold;
                break;
+       case RTE_EVENT_PORT_ATTR_IMPLICIT_RELEASE_DISABLE:
+       {
+               uint32_t config;
+
+               config = dev->data->ports_cfg[port_id].event_port_cfg;
+               *attr_value = !!(config & RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL);
+               break;
+       }
        default:
                return -EINVAL;
        };
diff --git a/lib/librte_eventdev/rte_eventdev.h 
b/lib/librte_eventdev/rte_eventdev.h
index 7dc8323..ce1fc2c 100644
--- a/lib/librte_eventdev/rte_eventdev.h
+++ b/lib/librte_eventdev/rte_eventdev.h
@@ -291,6 +291,12 @@ struct rte_event;
  * single queue to each port or map a single queue to many port.
  */
 
+#define RTE_EVENT_DEV_CAP_CARRY_FLOW_ID (1ULL << 9)
+/**< Event device preserves the flow ID from the enqueued
+ * event to the dequeued event if the flag is set. Otherwise,
+ * the content of this field is implementation dependent.
+ */
+
 /* Event device priority levels */
 #define RTE_EVENT_DEV_PRIORITY_HIGHEST   0
 /**< Highest priority expressed across eventdev subsystem
@@ -380,6 +386,10 @@ struct rte_event_dev_info {
         * event port by this device.
         * A device that does not support bulk enqueue will set this as 1.
         */
+       uint8_t max_event_port_links;
+       /**< Maximum number of queues that can be linked to a single event
+        * port by this device.
+        */
        int32_t max_num_events;
        /**< A *closed system* event dev has a limit on the number of events it
         * can manage at a time. An *open system* event dev does not have a
@@ -387,6 +397,12 @@ struct rte_event_dev_info {
         */
        uint32_t event_dev_cap;
        /**< Event device capabilities(RTE_EVENT_DEV_CAP_)*/
+       uint8_t max_single_link_event_port_queue_pairs;
+       /**< Maximum number of event ports and queues that are optimized for
+        * (and only capable of) single-link configurations supported by this
+        * device. These ports and queues are not accounted for in
+        * max_event_ports or max_event_queues.
+        */
 };
 
 /**
@@ -494,6 +510,14 @@ struct rte_event_dev_config {
         */
        uint32_t event_dev_cfg;
        /**< Event device config flags(RTE_EVENT_DEV_CFG_)*/
+       uint8_t nb_single_link_event_port_queues;
+       /**< Number of event ports and queues that will be singly-linked to
+        * each other. These are a subset of the overall event ports and
+        * queues; this value cannot exceed *nb_event_ports* or
+        * *nb_event_queues*. If the device has ports and queues that are
+        * optimized for single-link usage, this field is a hint for how many
+        * to allocate; otherwise, regular event ports and queues can be used.
+        */
 };
 
 /**
@@ -519,7 +543,6 @@ int
 rte_event_dev_configure(uint8_t dev_id,
                        const struct rte_event_dev_config *dev_conf);
 
-
 /* Event queue specific APIs */
 
 /* Event queue configuration bitmap flags */
@@ -671,6 +694,20 @@ rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, 
uint32_t attr_id,
 
 /* Event port specific APIs */
 
+/* Event port configuration bitmap flags */
+#define RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL    (1ULL << 0)
+/**< Configure the port not to release outstanding events in
+ * rte_event_dev_dequeue_burst(). If set, all events received through
+ * the port must be explicitly released with RTE_EVENT_OP_RELEASE or
+ * RTE_EVENT_OP_FORWARD. Must be unset if the device is not
+ * RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE capable.
+ */
+#define RTE_EVENT_PORT_CFG_SINGLE_LINK         (1ULL << 1)
+/**< This event port links only to a single event queue.
+ *
+ *  @see rte_event_port_setup(), rte_event_port_link()
+ */
+
 /** Event port configuration structure */
 struct rte_event_port_conf {
        int32_t new_event_threshold;
@@ -698,13 +735,7 @@ struct rte_event_port_conf {
         * which previously supplied to rte_event_dev_configure().
         * Ignored when device is not RTE_EVENT_DEV_CAP_BURST_MODE capable.
         */
-       uint8_t disable_implicit_release;
-       /**< Configure the port not to release outstanding events in
-        * rte_event_dev_dequeue_burst(). If true, all events received through
-        * the port must be explicitly released with RTE_EVENT_OP_RELEASE or
-        * RTE_EVENT_OP_FORWARD. Must be false when the device is not
-        * RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE capable.
-        */
+       uint32_t event_port_cfg; /**< Port cfg flags(EVENT_PORT_CFG_) */
 };
 
 /**
@@ -769,6 +800,10 @@ rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
  * The new event threshold of the port
  */
 #define RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD 2
+/**
+ * The implicit release disable attribute of the port
+ */
+#define RTE_EVENT_PORT_ATTR_IMPLICIT_RELEASE_DISABLE 3
 
 /**
  * Get an attribute from a port.
diff --git a/lib/librte_eventdev/rte_eventdev_pmd_pci.h 
b/lib/librte_eventdev/rte_eventdev_pmd_pci.h
index 443cd38..a3f9244 100644
--- a/lib/librte_eventdev/rte_eventdev_pmd_pci.h
+++ b/lib/librte_eventdev/rte_eventdev_pmd_pci.h
@@ -88,7 +88,6 @@ rte_event_pmd_pci_probe(struct rte_pci_driver *pci_drv,
        return -ENXIO;
 }
 
-
 /**
  * @internal
  * Wrapper for use by pci drivers as a .remove function to detach a event
diff --git a/lib/librte_eventdev/rte_eventdev_trace.h 
b/lib/librte_eventdev/rte_eventdev_trace.h
index 4de6341..5ec43d8 100644
--- a/lib/librte_eventdev/rte_eventdev_trace.h
+++ b/lib/librte_eventdev/rte_eventdev_trace.h
@@ -34,6 +34,7 @@ RTE_TRACE_POINT(
        rte_trace_point_emit_u32(dev_conf->nb_event_port_dequeue_depth);
        rte_trace_point_emit_u32(dev_conf->nb_event_port_enqueue_depth);
        rte_trace_point_emit_u32(dev_conf->event_dev_cfg);
+       rte_trace_point_emit_u8(dev_conf->nb_single_link_event_port_queues);
        rte_trace_point_emit_int(rc);
 )
 
@@ -59,7 +60,7 @@ RTE_TRACE_POINT(
        rte_trace_point_emit_i32(port_conf->new_event_threshold);
        rte_trace_point_emit_u16(port_conf->dequeue_depth);
        rte_trace_point_emit_u16(port_conf->enqueue_depth);
-       rte_trace_point_emit_u8(port_conf->disable_implicit_release);
+       rte_trace_point_emit_u32(port_conf->event_port_cfg);
        rte_trace_point_emit_int(rc);
 )
 
@@ -165,7 +166,7 @@ RTE_TRACE_POINT(
        rte_trace_point_emit_i32(port_conf->new_event_threshold);
        rte_trace_point_emit_u16(port_conf->dequeue_depth);
        rte_trace_point_emit_u16(port_conf->enqueue_depth);
-       rte_trace_point_emit_u8(port_conf->disable_implicit_release);
+       rte_trace_point_emit_u32(port_conf->event_port_cfg);
        rte_trace_point_emit_ptr(conf_cb);
        rte_trace_point_emit_int(rc);
 )
@@ -257,7 +258,7 @@ RTE_TRACE_POINT(
        rte_trace_point_emit_i32(port_conf->new_event_threshold);
        rte_trace_point_emit_u16(port_conf->dequeue_depth);
        rte_trace_point_emit_u16(port_conf->enqueue_depth);
-       rte_trace_point_emit_u8(port_conf->disable_implicit_release);
+       rte_trace_point_emit_u32(port_conf->event_port_cfg);
 )
 
 RTE_TRACE_POINT(
diff --git a/lib/librte_eventdev/rte_eventdev_version.map 
b/lib/librte_eventdev/rte_eventdev_version.map
index 3d9d0ca..2846d04 100644
--- a/lib/librte_eventdev/rte_eventdev_version.map
+++ b/lib/librte_eventdev/rte_eventdev_version.map
@@ -100,7 +100,6 @@ EXPERIMENTAL {
        # added in 20.05
        __rte_eventdev_trace_configure;
        __rte_eventdev_trace_queue_setup;
-       __rte_eventdev_trace_port_setup;
        __rte_eventdev_trace_port_link;
        __rte_eventdev_trace_port_unlink;
        __rte_eventdev_trace_start;
@@ -134,4 +133,7 @@ EXPERIMENTAL {
        __rte_eventdev_trace_crypto_adapter_queue_pair_del;
        __rte_eventdev_trace_crypto_adapter_start;
        __rte_eventdev_trace_crypto_adapter_stop;
+
+       # changed in 20.11
+       __rte_eventdev_trace_port_setup;
 };
-- 
2.6.4

Reply via email to