From: Pavan Nikhilesh <pbhagavat...@marvell.com>

A collection of event queues linked to an event port can be
associated with a unique identifier called as a link profile, multiple
such profiles can be created based on the event device capability
using the function `rte_event_port_profile_links_set` which takes
arguments similar to `rte_event_port_link` in addition to the profile
identifier.

The maximum link profiles that are supported by an event device
is advertised through the structure member
`rte_event_dev_info::max_profiles_per_port`.
By default, event ports are configured to use the link profile 0
on initialization.

Once multiple link profiles are set up and the event device is started,
the application can use the function `rte_event_port_profile_switch`
to change the currently active profile on an event port. This effects
the next `rte_event_dequeue_burst` call, where the event queues
associated with the newly active link profile will participate in
scheduling.

An unlink function `rte_event_port_profile_unlink` is provided
to modify the links associated to a profile, and
`rte_event_port_profile_links_get` can be used to retrieve the
links associated with a profile.

Using Link profiles can reduce the overhead of linking/unlinking and
waiting for unlinks in progress in fast-path and gives applications
the ability to switch between preset profiles on the fly.

Signed-off-by: Pavan Nikhilesh <pbhagavat...@marvell.com>
Acked-by: Jerin Jacob <jer...@marvell.com>
---
 config/rte_config.h                       |   1 +
 doc/guides/eventdevs/features/default.ini |   1 +
 doc/guides/prog_guide/eventdev.rst        |  40 ++++
 doc/guides/rel_notes/release_23_11.rst    |  11 ++
 drivers/event/cnxk/cnxk_eventdev.c        |   2 +-
 lib/eventdev/eventdev_pmd.h               |  59 +++++-
 lib/eventdev/eventdev_private.c           |   9 +
 lib/eventdev/eventdev_trace.h             |  32 +++
 lib/eventdev/eventdev_trace_points.c      |  12 ++
 lib/eventdev/rte_eventdev.c               | 150 +++++++++++---
 lib/eventdev/rte_eventdev.h               | 231 ++++++++++++++++++++++
 lib/eventdev/rte_eventdev_core.h          |   5 +
 lib/eventdev/rte_eventdev_trace_fp.h      |   8 +
 lib/eventdev/version.map                  |   4 +
 14 files changed, 536 insertions(+), 29 deletions(-)

diff --git a/config/rte_config.h b/config/rte_config.h
index 401727703f..a06189d0b5 100644
--- a/config/rte_config.h
+++ b/config/rte_config.h
@@ -73,6 +73,7 @@
 #define RTE_EVENT_MAX_DEVS 16
 #define RTE_EVENT_MAX_PORTS_PER_DEV 255
 #define RTE_EVENT_MAX_QUEUES_PER_DEV 255
+#define RTE_EVENT_MAX_PROFILES_PER_PORT 8
 #define RTE_EVENT_TIMER_ADAPTER_NUM_MAX 32
 #define RTE_EVENT_ETH_INTR_RING_SIZE 1024
 #define RTE_EVENT_CRYPTO_ADAPTER_MAX_INSTANCE 32
diff --git a/doc/guides/eventdevs/features/default.ini 
b/doc/guides/eventdevs/features/default.ini
index 73a52d915b..e980ae134a 100644
--- a/doc/guides/eventdevs/features/default.ini
+++ b/doc/guides/eventdevs/features/default.ini
@@ -18,6 +18,7 @@ multiple_queue_port        =
 carry_flow_id              =
 maintenance_free           =
 runtime_queue_attr         =
+profile_links              =
 
 ;
 ; Features of a default Ethernet Rx adapter.
diff --git a/doc/guides/prog_guide/eventdev.rst 
b/doc/guides/prog_guide/eventdev.rst
index ff55115d0d..8c15c678bf 100644
--- a/doc/guides/prog_guide/eventdev.rst
+++ b/doc/guides/prog_guide/eventdev.rst
@@ -317,6 +317,46 @@ can be achieved like this:
         }
         int links_made = rte_event_port_link(dev_id, tx_port_id, 
&single_link_q, &priority, 1);
 
+Linking Queues to Ports with link profiles
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+An application can use link profiles if supported by the underlying event 
device to setup up
+multiple link profile per port and change them run time depending up on 
heuristic data.
+Using Link profiles can reduce the overhead of linking/unlinking and wait for 
unlinks in progress
+in fast-path and gives applications the ability to switch between preset 
profiles on the fly.
+
+An Example use case could be as follows.
+
+Config path:
+
+.. code-block:: c
+
+        uint8_t lq[4] = {4, 5, 6, 7};
+        uint8_t hq[4] = {0, 1, 2, 3};
+
+        if (rte_event_dev_info.max_profiles_per_port < 2)
+            return -ENOTSUP;
+
+        rte_event_port_profile_links_set(0, 0, hq, NULL, 4, 0);
+        rte_event_port_profile_links_set(0, 0, lq, NULL, 4, 1);
+
+Worker path:
+
+.. code-block:: c
+
+        uint8_t profile_id_to_switch;
+
+        while (1) {
+            deq = rte_event_dequeue_burst(0, 0, &ev, 1, 0);
+            if (deq == 0) {
+                profile_id_to_switch = app_find_profile_id_to_switch();
+                rte_event_port_profile_switch(0, 0, profile_id_to_switch);
+                continue;
+            }
+
+            // Process the event received.
+        }
+
 Starting the EventDev
 ~~~~~~~~~~~~~~~~~~~~~
 
diff --git a/doc/guides/rel_notes/release_23_11.rst 
b/doc/guides/rel_notes/release_23_11.rst
index b66c364e21..fe6656bed2 100644
--- a/doc/guides/rel_notes/release_23_11.rst
+++ b/doc/guides/rel_notes/release_23_11.rst
@@ -90,6 +90,17 @@ New Features
     model by introducing APIs that allow applications to enqueue/dequeue DMA
     operations to/from dmadev as events scheduled by an event device.
 
+* **Added eventdev support to link queues to port with link profile.**
+
+  Introduced event link profiles that can be used to associated links between
+  event queues and an event port with a unique identifier termed as link 
profile.
+  The profile can be used to switch between the associated links in fast-path
+  without the additional overhead of linking/unlinking and waiting for 
unlinking.
+
+  * Added ``rte_event_port_profile_links_set``, 
``rte_event_port_profile_unlink``
+    ``rte_event_port_profile_links_get`` and ``rte_event_port_profile_switch``
+    APIs to enable this feature.
+
 * **Updated Marvell cnxk eventdev driver.**
 
   * Added support for ``remaining_ticks_get`` timer adapter PMD callback
diff --git a/drivers/event/cnxk/cnxk_eventdev.c 
b/drivers/event/cnxk/cnxk_eventdev.c
index 9c9192bd40..e8ea7e0efb 100644
--- a/drivers/event/cnxk/cnxk_eventdev.c
+++ b/drivers/event/cnxk/cnxk_eventdev.c
@@ -133,7 +133,7 @@ cnxk_sso_restore_links(const struct rte_eventdev *event_dev,
        for (i = 0; i < dev->nb_event_ports; i++) {
                uint16_t nb_hwgrp = 0;
 
-               links_map = event_dev->data->links_map;
+               links_map = event_dev->data->links_map[0];
                /* Point links_map to this port specific area */
                links_map += (i * RTE_EVENT_MAX_QUEUES_PER_DEV);
 
diff --git a/lib/eventdev/eventdev_pmd.h b/lib/eventdev/eventdev_pmd.h
index f7227c0bfd..30bd90085c 100644
--- a/lib/eventdev/eventdev_pmd.h
+++ b/lib/eventdev/eventdev_pmd.h
@@ -119,8 +119,8 @@ struct rte_eventdev_data {
        /**< Array of port configuration structures. */
        struct rte_event_queue_conf queues_cfg[RTE_EVENT_MAX_QUEUES_PER_DEV];
        /**< Array of queue configuration structures. */
-       uint16_t links_map[RTE_EVENT_MAX_PORTS_PER_DEV *
-                          RTE_EVENT_MAX_QUEUES_PER_DEV];
+       uint16_t links_map[RTE_EVENT_MAX_PROFILES_PER_PORT]
+                         [RTE_EVENT_MAX_PORTS_PER_DEV * 
RTE_EVENT_MAX_QUEUES_PER_DEV];
        /**< Memory to store queues to port connections. */
        void *dev_private;
        /**< PMD-specific private data */
@@ -179,9 +179,10 @@ struct rte_eventdev {
        /**< Pointer to PMD eth Tx adapter enqueue function. */
        event_crypto_adapter_enqueue_t ca_enqueue;
        /**< Pointer to PMD crypto adapter enqueue function. */
-
        event_dma_adapter_enqueue_t dma_enqueue;
        /**< Pointer to PMD DMA adapter enqueue function. */
+       event_profile_switch_t profile_switch;
+       /**< Pointer to PMD Event switch profile function. */
 
        uint64_t reserved_64s[3]; /**< Reserved for future fields */
        void *reserved_ptrs[3];   /**< Reserved for future fields */
@@ -441,6 +442,32 @@ typedef int (*eventdev_port_link_t)(struct rte_eventdev 
*dev, void *port,
                const uint8_t queues[], const uint8_t priorities[],
                uint16_t nb_links);
 
+/**
+ * Link multiple source event queues associated with a link profile to a
+ * destination event port.
+ *
+ * @param dev
+ *   Event device pointer
+ * @param port
+ *   Event port pointer
+ * @param queues
+ *   Points to an array of *nb_links* event queues to be linked
+ *   to the event port.
+ * @param priorities
+ *   Points to an array of *nb_links* service priorities associated with each
+ *   event queue link to event port.
+ * @param nb_links
+ *   The number of links to establish.
+ * @param profile_id
+ *   The profile ID to associate the links.
+ *
+ * @return
+ *   Returns 0 on success.
+ */
+typedef int (*eventdev_port_link_profile_t)(struct rte_eventdev *dev, void 
*port,
+                                           const uint8_t queues[], const 
uint8_t priorities[],
+                                           uint16_t nb_links, uint8_t 
profile_id);
+
 /**
  * Unlink multiple source event queues from destination event port.
  *
@@ -459,6 +486,28 @@ typedef int (*eventdev_port_link_t)(struct rte_eventdev 
*dev, void *port,
 typedef int (*eventdev_port_unlink_t)(struct rte_eventdev *dev, void *port,
                uint8_t queues[], uint16_t nb_unlinks);
 
+/**
+ * Unlink multiple source event queues associated with a link profile from
+ * destination event port.
+ *
+ * @param dev
+ *   Event device pointer
+ * @param port
+ *   Event port pointer
+ * @param queues
+ *   An array of *nb_unlinks* event queues to be unlinked from the event port.
+ * @param nb_unlinks
+ *   The number of unlinks to establish
+ * @param profile_id
+ *   The profile ID of the associated links.
+ *
+ * @return
+ *   Returns 0 on success.
+ */
+typedef int (*eventdev_port_unlink_profile_t)(struct rte_eventdev *dev, void 
*port,
+                                             uint8_t queues[], uint16_t 
nb_unlinks,
+                                             uint8_t profile_id);
+
 /**
  * Unlinks in progress. Returns number of unlinks that the PMD is currently
  * performing, but have not yet been completed.
@@ -1502,8 +1551,12 @@ struct eventdev_ops {
 
        eventdev_port_link_t port_link;
        /**< Link event queues to an event port. */
+       eventdev_port_link_profile_t port_link_profile;
+       /**< Link event queues associated with a profile to an event port. */
        eventdev_port_unlink_t port_unlink;
        /**< Unlink event queues from an event port. */
+       eventdev_port_unlink_profile_t port_unlink_profile;
+       /**< Unlink event queues associated with a profile from an event port. 
*/
        eventdev_port_unlinks_in_progress_t port_unlinks_in_progress;
        /**< Unlinks in progress on an event port. */
        eventdev_dequeue_timeout_ticks_t timeout_ticks;
diff --git a/lib/eventdev/eventdev_private.c b/lib/eventdev/eventdev_private.c
index 18ed8bf3c8..017f97ccab 100644
--- a/lib/eventdev/eventdev_private.c
+++ b/lib/eventdev/eventdev_private.c
@@ -89,6 +89,13 @@ dummy_event_dma_adapter_enqueue(__rte_unused void *port, 
__rte_unused struct rte
        return 0;
 }
 
+static int
+dummy_event_port_profile_switch(__rte_unused void *port, __rte_unused uint8_t 
profile_id)
+{
+       RTE_EDEV_LOG_ERR("change profile requested for unconfigured event 
device");
+       return -EINVAL;
+}
+
 void
 event_dev_fp_ops_reset(struct rte_event_fp_ops *fp_op)
 {
@@ -106,6 +113,7 @@ event_dev_fp_ops_reset(struct rte_event_fp_ops *fp_op)
                        dummy_event_tx_adapter_enqueue_same_dest,
                .ca_enqueue = dummy_event_crypto_adapter_enqueue,
                .dma_enqueue = dummy_event_dma_adapter_enqueue,
+               .profile_switch = dummy_event_port_profile_switch,
                .data = dummy_data,
        };
 
@@ -127,5 +135,6 @@ event_dev_fp_ops_set(struct rte_event_fp_ops *fp_op,
        fp_op->txa_enqueue_same_dest = dev->txa_enqueue_same_dest;
        fp_op->ca_enqueue = dev->ca_enqueue;
        fp_op->dma_enqueue = dev->dma_enqueue;
+       fp_op->profile_switch = dev->profile_switch;
        fp_op->data = dev->data->ports;
 }
diff --git a/lib/eventdev/eventdev_trace.h b/lib/eventdev/eventdev_trace.h
index f008ef0091..9c2b261c06 100644
--- a/lib/eventdev/eventdev_trace.h
+++ b/lib/eventdev/eventdev_trace.h
@@ -76,6 +76,17 @@ RTE_TRACE_POINT(
        rte_trace_point_emit_int(rc);
 )
 
+RTE_TRACE_POINT(
+       rte_eventdev_trace_port_profile_links_set,
+       RTE_TRACE_POINT_ARGS(uint8_t dev_id, uint8_t port_id,
+               uint16_t nb_links, uint8_t profile_id, int rc),
+       rte_trace_point_emit_u8(dev_id);
+       rte_trace_point_emit_u8(port_id);
+       rte_trace_point_emit_u16(nb_links);
+       rte_trace_point_emit_u8(profile_id);
+       rte_trace_point_emit_int(rc);
+)
+
 RTE_TRACE_POINT(
        rte_eventdev_trace_port_unlink,
        RTE_TRACE_POINT_ARGS(uint8_t dev_id, uint8_t port_id,
@@ -86,6 +97,17 @@ RTE_TRACE_POINT(
        rte_trace_point_emit_int(rc);
 )
 
+RTE_TRACE_POINT(
+       rte_eventdev_trace_port_profile_unlink,
+       RTE_TRACE_POINT_ARGS(uint8_t dev_id, uint8_t port_id,
+               uint16_t nb_unlinks, uint8_t profile_id, int rc),
+       rte_trace_point_emit_u8(dev_id);
+       rte_trace_point_emit_u8(port_id);
+       rte_trace_point_emit_u16(nb_unlinks);
+       rte_trace_point_emit_u8(profile_id);
+       rte_trace_point_emit_int(rc);
+)
+
 RTE_TRACE_POINT(
        rte_eventdev_trace_start,
        RTE_TRACE_POINT_ARGS(uint8_t dev_id, int rc),
@@ -487,6 +509,16 @@ RTE_TRACE_POINT(
        rte_trace_point_emit_int(count);
 )
 
+RTE_TRACE_POINT(
+       rte_eventdev_trace_port_profile_links_get,
+       RTE_TRACE_POINT_ARGS(uint8_t dev_id, uint8_t port_id, uint8_t 
profile_id,
+               int count),
+       rte_trace_point_emit_u8(dev_id);
+       rte_trace_point_emit_u8(port_id);
+       rte_trace_point_emit_u8(profile_id);
+       rte_trace_point_emit_int(count);
+)
+
 RTE_TRACE_POINT(
        rte_eventdev_trace_port_unlinks_in_progress,
        RTE_TRACE_POINT_ARGS(uint8_t dev_id, uint8_t port_id),
diff --git a/lib/eventdev/eventdev_trace_points.c 
b/lib/eventdev/eventdev_trace_points.c
index 76144cfe75..8024e07531 100644
--- a/lib/eventdev/eventdev_trace_points.c
+++ b/lib/eventdev/eventdev_trace_points.c
@@ -19,9 +19,15 @@ RTE_TRACE_POINT_REGISTER(rte_eventdev_trace_port_setup,
 RTE_TRACE_POINT_REGISTER(rte_eventdev_trace_port_link,
        lib.eventdev.port.link)
 
+RTE_TRACE_POINT_REGISTER(rte_eventdev_trace_port_profile_links_set,
+       lib.eventdev.port.profile.links.set)
+
 RTE_TRACE_POINT_REGISTER(rte_eventdev_trace_port_unlink,
        lib.eventdev.port.unlink)
 
+RTE_TRACE_POINT_REGISTER(rte_eventdev_trace_port_profile_unlink,
+       lib.eventdev.port.profile.unlink)
+
 RTE_TRACE_POINT_REGISTER(rte_eventdev_trace_start,
        lib.eventdev.start)
 
@@ -40,6 +46,9 @@ RTE_TRACE_POINT_REGISTER(rte_eventdev_trace_deq_burst,
 RTE_TRACE_POINT_REGISTER(rte_eventdev_trace_maintain,
        lib.eventdev.maintain)
 
+RTE_TRACE_POINT_REGISTER(rte_eventdev_trace_port_profile_switch,
+       lib.eventdev.port.profile.switch)
+
 /* Eventdev Rx adapter trace points */
 RTE_TRACE_POINT_REGISTER(rte_eventdev_trace_eth_rx_adapter_create,
        lib.eventdev.rx.adapter.create)
@@ -206,6 +215,9 @@ 
RTE_TRACE_POINT_REGISTER(rte_eventdev_trace_port_default_conf_get,
 RTE_TRACE_POINT_REGISTER(rte_eventdev_trace_port_links_get,
        lib.eventdev.port.links.get)
 
+RTE_TRACE_POINT_REGISTER(rte_eventdev_trace_port_profile_links_get,
+       lib.eventdev.port.profile.links.get)
+
 RTE_TRACE_POINT_REGISTER(rte_eventdev_trace_port_unlinks_in_progress,
        lib.eventdev.port.unlinks.in.progress)
 
diff --git a/lib/eventdev/rte_eventdev.c b/lib/eventdev/rte_eventdev.c
index 60509c6efb..5ee8bd665b 100644
--- a/lib/eventdev/rte_eventdev.c
+++ b/lib/eventdev/rte_eventdev.c
@@ -96,6 +96,7 @@ rte_event_dev_info_get(uint8_t dev_id, struct 
rte_event_dev_info *dev_info)
                return -EINVAL;
 
        memset(dev_info, 0, sizeof(struct rte_event_dev_info));
+       dev_info->max_profiles_per_port = 1;
 
        if (*dev->dev_ops->dev_infos_get == NULL)
                return -ENOTSUP;
@@ -293,7 +294,7 @@ event_dev_port_config(struct rte_eventdev *dev, uint8_t 
nb_ports)
        void **ports;
        uint16_t *links_map;
        struct rte_event_port_conf *ports_cfg;
-       unsigned int i;
+       unsigned int i, j;
 
        RTE_EDEV_LOG_DEBUG("Setup %d ports on device %u", nb_ports,
                         dev->data->dev_id);
@@ -304,7 +305,6 @@ event_dev_port_config(struct rte_eventdev *dev, uint8_t 
nb_ports)
 
                ports = dev->data->ports;
                ports_cfg = dev->data->ports_cfg;
-               links_map = dev->data->links_map;
 
                for (i = nb_ports; i < old_nb_ports; i++)
                        (*dev->dev_ops->port_release)(ports[i]);
@@ -320,9 +320,11 @@ event_dev_port_config(struct rte_eventdev *dev, uint8_t 
nb_ports)
                                sizeof(ports[0]) * new_ps);
                        memset(ports_cfg + old_nb_ports, 0,
                                sizeof(ports_cfg[0]) * new_ps);
-                       for (i = old_links_map_end; i < links_map_end; i++)
-                               links_map[i] =
-                                       EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
+                       for (i = 0; i < RTE_EVENT_MAX_PROFILES_PER_PORT; i++) {
+                               links_map = dev->data->links_map[i];
+                               for (j = old_links_map_end; j < links_map_end; 
j++)
+                                       links_map[j] = 
EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
+                       }
                }
        } else {
                if (*dev->dev_ops->port_release == NULL)
@@ -976,21 +978,45 @@ rte_event_port_link(uint8_t dev_id, uint8_t port_id,
                    const uint8_t queues[], const uint8_t priorities[],
                    uint16_t nb_links)
 {
-       struct rte_eventdev *dev;
-       uint8_t queues_list[RTE_EVENT_MAX_QUEUES_PER_DEV];
+       return rte_event_port_profile_links_set(dev_id, port_id, queues, 
priorities, nb_links, 0);
+}
+
+int
+rte_event_port_profile_links_set(uint8_t dev_id, uint8_t port_id, const 
uint8_t queues[],
+                                const uint8_t priorities[], uint16_t nb_links, 
uint8_t profile_id)
+{
        uint8_t priorities_list[RTE_EVENT_MAX_QUEUES_PER_DEV];
+       uint8_t queues_list[RTE_EVENT_MAX_QUEUES_PER_DEV];
+       struct rte_event_dev_info info;
+       struct rte_eventdev *dev;
        uint16_t *links_map;
        int i, diag;
 
        RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, EINVAL, 0);
        dev = &rte_eventdevs[dev_id];
 
+       if (*dev->dev_ops->dev_infos_get == NULL)
+               return -ENOTSUP;
+
+       (*dev->dev_ops->dev_infos_get)(dev, &info);
+       if (profile_id >= RTE_EVENT_MAX_PROFILES_PER_PORT ||
+           profile_id >= info.max_profiles_per_port) {
+               RTE_EDEV_LOG_ERR("Invalid profile_id=%" PRIu8, profile_id);
+               return -EINVAL;
+       }
+
        if (*dev->dev_ops->port_link == NULL) {
                RTE_EDEV_LOG_ERR("Function not supported\n");
                rte_errno = ENOTSUP;
                return 0;
        }
 
+       if (profile_id && *dev->dev_ops->port_link_profile == NULL) {
+               RTE_EDEV_LOG_ERR("Function not supported\n");
+               rte_errno = ENOTSUP;
+               return 0;
+       }
+
        if (!is_valid_port(dev, port_id)) {
                RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
                rte_errno = EINVAL;
@@ -1018,18 +1044,22 @@ rte_event_port_link(uint8_t dev_id, uint8_t port_id,
                        return 0;
                }
 
-       diag = (*dev->dev_ops->port_link)(dev, dev->data->ports[port_id],
-                                               queues, priorities, nb_links);
+       if (profile_id)
+               diag = (*dev->dev_ops->port_link_profile)(dev, 
dev->data->ports[port_id], queues,
+                                                         priorities, nb_links, 
profile_id);
+       else
+               diag = (*dev->dev_ops->port_link)(dev, 
dev->data->ports[port_id], queues,
+                                                 priorities, nb_links);
        if (diag < 0)
                return diag;
 
-       links_map = dev->data->links_map;
+       links_map = dev->data->links_map[profile_id];
        /* Point links_map to this port specific area */
        links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
        for (i = 0; i < diag; i++)
                links_map[queues[i]] = (uint8_t)priorities[i];
 
-       rte_eventdev_trace_port_link(dev_id, port_id, nb_links, diag);
+       rte_eventdev_trace_port_profile_links_set(dev_id, port_id, nb_links, 
profile_id, diag);
        return diag;
 }
 
@@ -1037,27 +1067,51 @@ int
 rte_event_port_unlink(uint8_t dev_id, uint8_t port_id,
                      uint8_t queues[], uint16_t nb_unlinks)
 {
-       struct rte_eventdev *dev;
+       return rte_event_port_profile_unlink(dev_id, port_id, queues, 
nb_unlinks, 0);
+}
+
+int
+rte_event_port_profile_unlink(uint8_t dev_id, uint8_t port_id, uint8_t 
queues[],
+                             uint16_t nb_unlinks, uint8_t profile_id)
+{
        uint8_t all_queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
-       int i, diag, j;
+       struct rte_event_dev_info info;
+       struct rte_eventdev *dev;
        uint16_t *links_map;
+       int i, diag, j;
 
        RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, EINVAL, 0);
        dev = &rte_eventdevs[dev_id];
 
+       if (*dev->dev_ops->dev_infos_get == NULL)
+               return -ENOTSUP;
+
+       (*dev->dev_ops->dev_infos_get)(dev, &info);
+       if (profile_id >= RTE_EVENT_MAX_PROFILES_PER_PORT ||
+           profile_id >= info.max_profiles_per_port) {
+               RTE_EDEV_LOG_ERR("Invalid profile_id=%" PRIu8, profile_id);
+               return -EINVAL;
+       }
+
        if (*dev->dev_ops->port_unlink == NULL) {
                RTE_EDEV_LOG_ERR("Function not supported");
                rte_errno = ENOTSUP;
                return 0;
        }
 
+       if (profile_id && *dev->dev_ops->port_unlink_profile == NULL) {
+               RTE_EDEV_LOG_ERR("Function not supported");
+               rte_errno = ENOTSUP;
+               return 0;
+       }
+
        if (!is_valid_port(dev, port_id)) {
                RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
                rte_errno = EINVAL;
                return 0;
        }
 
-       links_map = dev->data->links_map;
+       links_map = dev->data->links_map[profile_id];
        /* Point links_map to this port specific area */
        links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
 
@@ -1086,16 +1140,19 @@ rte_event_port_unlink(uint8_t dev_id, uint8_t port_id,
                        return 0;
                }
 
-       diag = (*dev->dev_ops->port_unlink)(dev, dev->data->ports[port_id],
-                                       queues, nb_unlinks);
-
+       if (profile_id)
+               diag = (*dev->dev_ops->port_unlink_profile)(dev, 
dev->data->ports[port_id], queues,
+                                                           nb_unlinks, 
profile_id);
+       else
+               diag = (*dev->dev_ops->port_unlink)(dev, 
dev->data->ports[port_id], queues,
+                                                   nb_unlinks);
        if (diag < 0)
                return diag;
 
        for (i = 0; i < diag; i++)
                links_map[queues[i]] = EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
 
-       rte_eventdev_trace_port_unlink(dev_id, port_id, nb_unlinks, diag);
+       rte_eventdev_trace_port_profile_unlink(dev_id, port_id, nb_unlinks, 
profile_id, diag);
        return diag;
 }
 
@@ -1139,7 +1196,8 @@ rte_event_port_links_get(uint8_t dev_id, uint8_t port_id,
                return -EINVAL;
        }
 
-       links_map = dev->data->links_map;
+       /* Use the default profile_id. */
+       links_map = dev->data->links_map[0];
        /* Point links_map to this port specific area */
        links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
        for (i = 0; i < dev->data->nb_queues; i++) {
@@ -1155,6 +1213,49 @@ rte_event_port_links_get(uint8_t dev_id, uint8_t port_id,
        return count;
 }
 
+int
+rte_event_port_profile_links_get(uint8_t dev_id, uint8_t port_id, uint8_t 
queues[],
+                                uint8_t priorities[], uint8_t profile_id)
+{
+       struct rte_event_dev_info info;
+       struct rte_eventdev *dev;
+       uint16_t *links_map;
+       int i, count = 0;
+
+       RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+
+       dev = &rte_eventdevs[dev_id];
+       if (*dev->dev_ops->dev_infos_get == NULL)
+               return -ENOTSUP;
+
+       (*dev->dev_ops->dev_infos_get)(dev, &info);
+       if (profile_id >= RTE_EVENT_MAX_PROFILES_PER_PORT ||
+           profile_id >= info.max_profiles_per_port) {
+               RTE_EDEV_LOG_ERR("Invalid profile_id=%" PRIu8, profile_id);
+               return -EINVAL;
+       }
+
+       if (!is_valid_port(dev, port_id)) {
+               RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
+               return -EINVAL;
+       }
+
+       links_map = dev->data->links_map[profile_id];
+       /* Point links_map to this port specific area */
+       links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
+       for (i = 0; i < dev->data->nb_queues; i++) {
+               if (links_map[i] != EVENT_QUEUE_SERVICE_PRIORITY_INVALID) {
+                       queues[count] = i;
+                       priorities[count] = (uint8_t)links_map[i];
+                       ++count;
+               }
+       }
+
+       rte_eventdev_trace_port_profile_links_get(dev_id, port_id, profile_id, 
count);
+
+       return count;
+}
+
 int
 rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
                                 uint64_t *timeout_ticks)
@@ -1463,7 +1564,7 @@ eventdev_data_alloc(uint8_t dev_id, struct 
rte_eventdev_data **data,
 {
        char mz_name[RTE_EVENTDEV_NAME_MAX_LEN];
        const struct rte_memzone *mz;
-       int n;
+       int i, n;
 
        /* Generate memzone name */
        n = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u", dev_id);
@@ -1483,11 +1584,10 @@ eventdev_data_alloc(uint8_t dev_id, struct 
rte_eventdev_data **data,
        *data = mz->addr;
        if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
                memset(*data, 0, sizeof(struct rte_eventdev_data));
-               for (n = 0; n < RTE_EVENT_MAX_PORTS_PER_DEV *
-                                       RTE_EVENT_MAX_QUEUES_PER_DEV;
-                    n++)
-                       (*data)->links_map[n] =
-                               EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
+               for (i = 0; i < RTE_EVENT_MAX_PROFILES_PER_PORT; i++)
+                       for (n = 0; n < RTE_EVENT_MAX_PORTS_PER_DEV * 
RTE_EVENT_MAX_QUEUES_PER_DEV;
+                            n++)
+                               (*data)->links_map[i][n] = 
EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
        }
 
        return 0;
diff --git a/lib/eventdev/rte_eventdev.h b/lib/eventdev/rte_eventdev.h
index 41743f91b1..2ea98302b8 100644
--- a/lib/eventdev/rte_eventdev.h
+++ b/lib/eventdev/rte_eventdev.h
@@ -320,6 +320,12 @@ struct rte_event;
  * rte_event_queue_setup().
  */
 
+#define RTE_EVENT_DEV_CAP_PROFILE_LINK (1ULL << 12)
+/**< Event device is capable of supporting multiple link profiles per event 
port
+ * i.e., the value of `rte_event_dev_info::max_profiles_per_port` is greater
+ * than one.
+ */
+
 /* Event device priority levels */
 #define RTE_EVENT_DEV_PRIORITY_HIGHEST   0
 /**< Highest priority expressed across eventdev subsystem
@@ -446,6 +452,10 @@ struct rte_event_dev_info {
         * device. These ports and queues are not accounted for in
         * max_event_ports or max_event_queues.
         */
+       uint8_t max_profiles_per_port;
+       /**< Maximum number of event queue profiles per event port.
+        * A device that doesn't support multiple profiles will set this as 1.
+        */
 };
 
 /**
@@ -1580,6 +1590,10 @@ rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t 
ns,
  * latency of critical work by establishing the link with more event ports
  * at runtime.
  *
+ * When the value of ``rte_event_dev_info::max_profiles_per_port`` is greater
+ * than or equal to one, this function links the event queues to the default
+ * profile_id i.e. profile_id 0 of the event port.
+ *
  * @param dev_id
  *   The identifier of the device.
  *
@@ -1637,6 +1651,10 @@ rte_event_port_link(uint8_t dev_id, uint8_t port_id,
  * Event queue(s) to event port unlink establishment can be changed at runtime
  * without re-configuring the device.
  *
+ * When the value of ``rte_event_dev_info::max_profiles_per_port`` is greater
+ * than or equal to one, this function unlinks the event queues from the 
default
+ * profile identifier i.e. profile 0 of the event port.
+ *
  * @see rte_event_port_unlinks_in_progress() to poll for completed unlinks.
  *
  * @param dev_id
@@ -1670,6 +1688,136 @@ int
 rte_event_port_unlink(uint8_t dev_id, uint8_t port_id,
                      uint8_t queues[], uint16_t nb_unlinks);
 
+/**
+ * Link multiple source event queues supplied in *queues* to the destination
+ * event port designated by its *port_id* with associated profile identifier
+ * supplied in *profile_id* with service priorities supplied in *priorities*
+ * on the event device designated by its *dev_id*.
+ *
+ * If *profile_id* is set to 0 then, the links created by the call 
`rte_event_port_link`
+ * will be overwritten.
+ *
+ * Event ports by default use profile_id 0 unless it is changed using the
+ * call ``rte_event_port_profile_switch()``.
+ *
+ * The link establishment shall enable the event port *port_id* from
+ * receiving events from the specified event queue(s) supplied in *queues*
+ *
+ * An event queue may link to one or more event ports.
+ * The number of links can be established from an event queue to event port is
+ * implementation defined.
+ *
+ * Event queue(s) to event port link establishment can be changed at runtime
+ * without re-configuring the device to support scaling and to reduce the
+ * latency of critical work by establishing the link with more event ports
+ * at runtime.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @param port_id
+ *   Event port identifier to select the destination port to link.
+ *
+ * @param queues
+ *   Points to an array of *nb_links* event queues to be linked
+ *   to the event port.
+ *   NULL value is allowed, in which case this function links all the 
configured
+ *   event queues *nb_event_queues* which previously supplied to
+ *   rte_event_dev_configure() to the event port *port_id*
+ *
+ * @param priorities
+ *   Points to an array of *nb_links* service priorities associated with each
+ *   event queue link to event port.
+ *   The priority defines the event port's servicing priority for
+ *   event queue, which may be ignored by an implementation.
+ *   The requested priority should in the range of
+ *   [RTE_EVENT_DEV_PRIORITY_HIGHEST, RTE_EVENT_DEV_PRIORITY_LOWEST].
+ *   The implementation shall normalize the requested priority to
+ *   implementation supported priority value.
+ *   NULL value is allowed, in which case this function links the event queues
+ *   with RTE_EVENT_DEV_PRIORITY_NORMAL servicing priority
+ *
+ * @param nb_links
+ *   The number of links to establish. This parameter is ignored if queues is
+ *   NULL.
+ *
+ * @param profile_id
+ *   The profile identifier associated with the links between event queues and
+ *   event port. Should be less than the max capability reported by
+ *   ``rte_event_dev_info::max_profiles_per_port``
+ *
+ * @return
+ * The number of links actually established. The return value can be less than
+ * the value of the *nb_links* parameter when the implementation has the
+ * limitation on specific queue to port link establishment or if invalid
+ * parameters are specified in *queues*
+ * If the return value is less than *nb_links*, the remaining links at the end
+ * of link[] are not established, and the caller has to take care of them.
+ * If return value is less than *nb_links* then implementation shall update the
+ * rte_errno accordingly, Possible rte_errno values are
+ * (EDQUOT) Quota exceeded(Application tried to link the queue configured with
+ *  RTE_EVENT_QUEUE_CFG_SINGLE_LINK to more than one event ports)
+ * (EINVAL) Invalid parameter
+ *
+ */
+__rte_experimental
+int
+rte_event_port_profile_links_set(uint8_t dev_id, uint8_t port_id, const 
uint8_t queues[],
+                                const uint8_t priorities[], uint16_t nb_links, 
uint8_t profile_id);
+
+/**
+ * Unlink multiple source event queues supplied in *queues* that belong to 
profile
+ * designated by *profile_id* from the destination event port designated by its
+ * *port_id* on the event device designated by its *dev_id*.
+ *
+ * If *profile_id* is set to 0 i.e., the default profile then, then this 
function
+ * will act as ``rte_event_port_unlink``.
+ *
+ * The unlink call issues an async request to disable the event port *port_id*
+ * from receiving events from the specified event queue *queue_id*.
+ * Event queue(s) to event port unlink establishment can be changed at runtime
+ * without re-configuring the device.
+ *
+ * @see rte_event_port_unlinks_in_progress() to poll for completed unlinks.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @param port_id
+ *   Event port identifier to select the destination port to unlink.
+ *
+ * @param queues
+ *   Points to an array of *nb_unlinks* event queues to be unlinked
+ *   from the event port.
+ *   NULL value is allowed, in which case this function unlinks all the
+ *   event queue(s) from the event port *port_id*.
+ *
+ * @param nb_unlinks
+ *   The number of unlinks to establish. This parameter is ignored if queues is
+ *   NULL.
+ *
+ * @param profile_id
+ *   The profile identifier associated with the links between event queues and
+ *   event port. Should be less than the max capability reported by
+ *   ``rte_event_dev_info::max_profiles_per_port``
+ *
+ * @return
+ * The number of unlinks successfully requested. The return value can be less
+ * than the value of the *nb_unlinks* parameter when the implementation has the
+ * limitation on specific queue to port unlink establishment or
+ * if invalid parameters are specified.
+ * If the return value is less than *nb_unlinks*, the remaining queues at the
+ * end of queues[] are not unlinked, and the caller has to take care of them.
+ * If return value is less than *nb_unlinks* then implementation shall update
+ * the rte_errno accordingly, Possible rte_errno values are
+ * (EINVAL) Invalid parameter
+ *
+ */
+__rte_experimental
+int
+rte_event_port_profile_unlink(uint8_t dev_id, uint8_t port_id, uint8_t 
queues[],
+                             uint16_t nb_unlinks, uint8_t profile_id);
+
 /**
  * Returns the number of unlinks in progress.
  *
@@ -1724,6 +1872,42 @@ int
 rte_event_port_links_get(uint8_t dev_id, uint8_t port_id,
                         uint8_t queues[], uint8_t priorities[]);
 
+/**
+ * Retrieve the list of source event queues and its service priority
+ * associated to a *profile_id* and linked to the destination event port
+ * designated by its *port_id* on the event device designated by its *dev_id*.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @param port_id
+ *   Event port identifier.
+ *
+ * @param[out] queues
+ *   Points to an array of *queues* for output.
+ *   The caller has to allocate *RTE_EVENT_MAX_QUEUES_PER_DEV* bytes to
+ *   store the event queue(s) linked with event port *port_id*
+ *
+ * @param[out] priorities
+ *   Points to an array of *priorities* for output.
+ *   The caller has to allocate *RTE_EVENT_MAX_QUEUES_PER_DEV* bytes to
+ *   store the service priority associated with each event queue linked
+ *
+ * @param profile_id
+ *   The profile identifier associated with the links between event queues and
+ *   event port. Should be less than the max capability reported by
+ *   ``rte_event_dev_info::max_profiles_per_port``
+ *
+ * @return
+ * The number of links established on the event port designated by its
+ *  *port_id*.
+ * - <0 on failure.
+ */
+__rte_experimental
+int
+rte_event_port_profile_links_get(uint8_t dev_id, uint8_t port_id, uint8_t 
queues[],
+                                uint8_t priorities[], uint8_t profile_id);
+
 /**
  * Retrieve the service ID of the event dev. If the adapter doesn't use
  * a rte_service function, this function returns -ESRCH.
@@ -2309,6 +2493,53 @@ rte_event_maintain(uint8_t dev_id, uint8_t port_id, int 
op)
        return 0;
 }
 
+/**
+ * Change the active profile on an event port.
+ *
+ * This function is used to change the current active profile on an event port
+ * when multiple link profiles are configured on an event port through the
+ * function call ``rte_event_port_profile_links_set``.
+ *
+ * On the subsequent ``rte_event_dequeue_burst`` call, only the event queues
+ * that were associated with the newly active profile will participate in
+ * scheduling.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param port_id
+ *   The identifier of the event port.
+ * @param profile_id
+ *   The identifier of the profile.
+ * @return
+ *  - 0 on success.
+ *  - -EINVAL if *dev_id*,  *port_id*, or *profile_id* is invalid.
+ */
+__rte_experimental
+static inline uint8_t
+rte_event_port_profile_switch(uint8_t dev_id, uint8_t port_id, uint8_t 
profile_id)
+{
+       const struct rte_event_fp_ops *fp_ops;
+       void *port;
+
+       fp_ops = &rte_event_fp_ops[dev_id];
+       port = fp_ops->data[port_id];
+
+#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
+       if (dev_id >= RTE_EVENT_MAX_DEVS ||
+           port_id >= RTE_EVENT_MAX_PORTS_PER_DEV)
+               return -EINVAL;
+
+       if (port == NULL)
+               return -EINVAL;
+
+       if (profile_id >= RTE_EVENT_MAX_PROFILES_PER_PORT)
+               return -EINVAL;
+#endif
+       rte_eventdev_trace_port_profile_switch(dev_id, port_id, profile_id);
+
+       return fp_ops->profile_switch(port, profile_id);
+}
+
 #ifdef __cplusplus
 }
 #endif
diff --git a/lib/eventdev/rte_eventdev_core.h b/lib/eventdev/rte_eventdev_core.h
index 83e8736c71..5b405518d1 100644
--- a/lib/eventdev/rte_eventdev_core.h
+++ b/lib/eventdev/rte_eventdev_core.h
@@ -46,6 +46,9 @@ typedef uint16_t (*event_dma_adapter_enqueue_t)(void *port, 
struct rte_event ev[
                                                uint16_t nb_events);
 /**< @internal Enqueue burst of events on DMA adapter */
 
+typedef int (*event_profile_switch_t)(void *port, uint8_t profile);
+/**< @internal Switch active link profile on the event port. */
+
 struct rte_event_fp_ops {
        void **data;
        /**< points to array of internal port data pointers */
@@ -71,6 +74,8 @@ struct rte_event_fp_ops {
        /**< PMD Crypto adapter enqueue function. */
        event_dma_adapter_enqueue_t dma_enqueue;
        /**< PMD DMA adapter enqueue function. */
+       event_profile_switch_t profile_switch;
+       /**< PMD Event switch profile function. */
        uintptr_t reserved[4];
 } __rte_cache_aligned;
 
diff --git a/lib/eventdev/rte_eventdev_trace_fp.h 
b/lib/eventdev/rte_eventdev_trace_fp.h
index af2172d2a5..04d510ad00 100644
--- a/lib/eventdev/rte_eventdev_trace_fp.h
+++ b/lib/eventdev/rte_eventdev_trace_fp.h
@@ -46,6 +46,14 @@ RTE_TRACE_POINT_FP(
        rte_trace_point_emit_int(op);
 )
 
+RTE_TRACE_POINT_FP(
+       rte_eventdev_trace_port_profile_switch,
+       RTE_TRACE_POINT_ARGS(uint8_t dev_id, uint8_t port_id, uint8_t profile),
+       rte_trace_point_emit_u8(dev_id);
+       rte_trace_point_emit_u8(port_id);
+       rte_trace_point_emit_u8(profile);
+)
+
 RTE_TRACE_POINT_FP(
        rte_eventdev_trace_eth_tx_adapter_enqueue,
        RTE_TRACE_POINT_ARGS(uint8_t dev_id, uint8_t port_id, void *ev_table,
diff --git a/lib/eventdev/version.map b/lib/eventdev/version.map
index b81eb2919c..59ee8b86cf 100644
--- a/lib/eventdev/version.map
+++ b/lib/eventdev/version.map
@@ -150,6 +150,10 @@ EXPERIMENTAL {
        rte_event_dma_adapter_vchan_add;
        rte_event_dma_adapter_vchan_del;
        rte_event_eth_rx_adapter_create_ext_with_params;
+       rte_event_port_profile_links_set;
+       rte_event_port_profile_unlink;
+       rte_event_port_profile_links_get;
+       __rte_eventdev_trace_port_profile_switch;
 };
 
 INTERNAL {
-- 
2.25.1


Reply via email to