From: Lukasz Bartosik <lbarto...@marvell.com>

Add support for Rx and Tx internal ports. When internal ports are
available then a packet can be received from eth port and forwarded
to event queue by HW without any software intervention. The same
applies to Tx side where a packet sent to an event queue can by
forwarded by HW to eth port without any software intervention.

Signed-off-by: Anoob Joseph <ano...@marvell.com>
Signed-off-by: Lukasz Bartosik <lbarto...@marvell.com>
---
 examples/ipsec-secgw/event_helper.c | 231 ++++++++++++++++++++++++++++--------
 examples/ipsec-secgw/event_helper.h |  11 ++
 2 files changed, 195 insertions(+), 47 deletions(-)

diff --git a/examples/ipsec-secgw/event_helper.c 
b/examples/ipsec-secgw/event_helper.c
index a67132a..6549875 100644
--- a/examples/ipsec-secgw/event_helper.c
+++ b/examples/ipsec-secgw/event_helper.c
@@ -100,6 +100,39 @@ eh_get_eventdev_params(struct eventmode_conf *em_conf, 
uint8_t eventdev_id)
 
        return &(em_conf->eventdev_config[i]);
 }
+
+static inline bool
+eh_dev_has_rx_internal_port(uint8_t eventdev_id)
+{
+       int j;
+       bool flag = true;
+
+       RTE_ETH_FOREACH_DEV(j) {
+               uint32_t caps = 0;
+
+               rte_event_eth_rx_adapter_caps_get(eventdev_id, j, &caps);
+               if (!(caps & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT))
+                       flag = false;
+       }
+       return flag;
+}
+
+static inline bool
+eh_dev_has_tx_internal_port(uint8_t eventdev_id)
+{
+       int j;
+       bool flag = true;
+
+       RTE_ETH_FOREACH_DEV(j) {
+               uint32_t caps = 0;
+
+               rte_event_eth_tx_adapter_caps_get(eventdev_id, j, &caps);
+               if (!(caps & RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT))
+                       flag = false;
+       }
+       return flag;
+}
+
 static inline bool
 eh_dev_has_burst_mode(uint8_t dev_id)
 {
@@ -115,7 +148,9 @@ eh_set_default_conf_eventdev(struct eventmode_conf *em_conf)
 {
        struct eventdev_params *eventdev_config;
        struct rte_event_dev_info dev_info;
+       int lcore_count;
        int nb_eventdev;
+       int nb_eth_dev;
        int i, ret;
 
        /* Get the number of event devices */
@@ -126,6 +161,17 @@ eh_set_default_conf_eventdev(struct eventmode_conf 
*em_conf)
                return -EINVAL;
        }
 
+       /* Get the number of eth devs */
+       nb_eth_dev = rte_eth_dev_count_avail();
+
+       if (nb_eth_dev == 0) {
+               EH_LOG_ERR("No eth devices detected");
+               return -EINVAL;
+       }
+
+       /* Get the number of lcores */
+       lcore_count = rte_lcore_count();
+
        for (i = 0; i < nb_eventdev; i++) {
 
                /* Get the event dev conf */
@@ -152,11 +198,17 @@ eh_set_default_conf_eventdev(struct eventmode_conf 
*em_conf)
                eventdev_config->ev_queue_mode =
                                RTE_EVENT_QUEUE_CFG_ALL_TYPES;
 
-               /* One port is required for eth Rx adapter */
-               eventdev_config->nb_eventport -= 1;
+               /* Check if there are more queues than required */
+               if (eventdev_config->nb_eventqueue > nb_eth_dev + 1) {
+                       /* One queue is reserved for Tx */
+                       eventdev_config->nb_eventqueue = nb_eth_dev + 1;
+               }
 
-               /* One port is reserved for eth Tx adapter */
-               eventdev_config->nb_eventport -= 1;
+               /* Check if there are more ports than required */
+               if (eventdev_config->nb_eventport > lcore_count) {
+                       /* One port per lcore is enough */
+                       eventdev_config->nb_eventport = lcore_count;
+               }
 
                /* Update the number of event devices */
                em_conf->nb_eventdev++;
@@ -165,6 +217,42 @@ eh_set_default_conf_eventdev(struct eventmode_conf 
*em_conf)
        return 0;
 }
 
+static void
+eh_do_capability_check(struct eventmode_conf *em_conf)
+{
+       struct eventdev_params *eventdev_config;
+       int all_internal_ports = 1;
+       uint32_t eventdev_id;
+       int i;
+
+       for (i = 0; i < em_conf->nb_eventdev; i++) {
+
+               /* Get the event dev conf */
+               eventdev_config = &(em_conf->eventdev_config[i]);
+               eventdev_id = eventdev_config->eventdev_id;
+
+               /* Check if event device has internal port for Rx & Tx */
+               if (eh_dev_has_rx_internal_port(eventdev_id) &&
+                   eh_dev_has_tx_internal_port(eventdev_id)) {
+                       eventdev_config->all_internal_ports = 1;
+               } else {
+                       all_internal_ports = 0;
+               }
+       }
+
+       /*
+        * If Rx & Tx internal ports are supported by all event devices then
+        * eth cores won't be required. Override the eth core mask requested
+        * and decrement number of event queues by one as it won't be needed
+        * for Tx.
+        */
+       if (all_internal_ports) {
+               rte_bitmap_reset(em_conf->eth_core_mask);
+               for (i = 0; i < em_conf->nb_eventdev; i++)
+                       em_conf->eventdev_config[i].nb_eventqueue--;
+       }
+}
+
 static int
 eh_set_default_conf_link(struct eventmode_conf *em_conf)
 {
@@ -239,6 +327,9 @@ eh_set_default_conf_rx_adapter(struct eventmode_conf 
*em_conf)
        struct rx_adapter_connection_info *conn;
        struct eventdev_params *eventdev_config;
        struct rx_adapter_conf *adapter;
+       bool rx_internal_port = true;
+       int nb_eventqueue;
+       uint32_t caps = 0;
        int eventdev_id;
        int nb_eth_dev;
        int adapter_id;
@@ -268,7 +359,14 @@ eh_set_default_conf_rx_adapter(struct eventmode_conf 
*em_conf)
        /* Set adapter conf */
        adapter->eventdev_id = eventdev_id;
        adapter->adapter_id = adapter_id;
-       adapter->rx_core_id = eh_get_next_eth_core(em_conf);
+
+       /*
+        * If event device does not have internal ports for passing
+        * packets then reserved one queue for Tx path
+        */
+       nb_eventqueue = eventdev_config->all_internal_ports ?
+                       eventdev_config->nb_eventqueue :
+                       eventdev_config->nb_eventqueue - 1;
 
        /*
         * Map all queues of one eth device (port) to one event
@@ -277,10 +375,10 @@ eh_set_default_conf_rx_adapter(struct eventmode_conf 
*em_conf)
         */
 
        /* Make sure there is enough event queues for 1:1 mapping */
-       if (nb_eth_dev > eventdev_config->nb_eventqueue) {
+       if (nb_eth_dev > nb_eventqueue) {
                EH_LOG_ERR("Not enough event queues for 1:1 mapping "
                        "[eth devs: %d, event queues: %d]\n",
-                       nb_eth_dev, eventdev_config->nb_eventqueue);
+                       nb_eth_dev, nb_eventqueue);
                return -EINVAL;
        }
 
@@ -303,11 +401,24 @@ eh_set_default_conf_rx_adapter(struct eventmode_conf 
*em_conf)
                /* Add all eth queues of one eth port to one event queue */
                conn->ethdev_rx_qid = -1;
 
+               /* Get Rx adapter capabilities */
+               rte_event_eth_rx_adapter_caps_get(eventdev_id, i, &caps);
+               if (!(caps & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT))
+                       rx_internal_port = false;
+
                /* Update no of connections */
                adapter->nb_connections++;
 
        }
 
+       if (rx_internal_port) {
+               /* Rx core is not required */
+               adapter->rx_core_id = -1;
+       } else {
+               /* Rx core is required */
+               adapter->rx_core_id = eh_get_next_eth_core(em_conf);
+       }
+
        /* We have setup one adapter */
        em_conf->nb_rx_adapter = 1;
 
@@ -320,6 +431,8 @@ eh_set_default_conf_tx_adapter(struct eventmode_conf 
*em_conf)
        struct tx_adapter_connection_info *conn;
        struct eventdev_params *eventdev_config;
        struct tx_adapter_conf *tx_adapter;
+       bool tx_internal_port = true;
+       uint32_t caps = 0;
        int eventdev_id;
        int adapter_id;
        int nb_eth_dev;
@@ -353,22 +466,6 @@ eh_set_default_conf_tx_adapter(struct eventmode_conf 
*em_conf)
        tx_adapter->eventdev_id = eventdev_id;
        tx_adapter->adapter_id = adapter_id;
 
-       /* TODO: Tx core is required only when internal port is not present */
-
-       tx_adapter->tx_core_id = eh_get_next_eth_core(em_conf);
-
-       /*
-        * Application uses one event queue per adapter for submitting
-        * packets for Tx. Reserve the last queue available and decrement
-        * the total available event queues for this
-        */
-
-       /* Queue numbers start at 0 */
-       tx_adapter->tx_ev_queue = eventdev_config->nb_eventqueue - 1;
-
-       /* Update the number of event queues available in eventdev */
-       eventdev_config->nb_eventqueue--;
-
        /*
         * Map all Tx queues of the eth device (port) to the event device.
         */
@@ -398,10 +495,30 @@ eh_set_default_conf_tx_adapter(struct eventmode_conf 
*em_conf)
                /* Add all eth tx queues to adapter */
                conn->ethdev_tx_qid = -1;
 
+               /* Get Tx adapter capabilities */
+               rte_event_eth_tx_adapter_caps_get(eventdev_id, i, &caps);
+               if (!(caps & RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT))
+                       tx_internal_port = false;
+
                /* Update no of connections */
                tx_adapter->nb_connections++;
        }
 
+       if (tx_internal_port) {
+               /* Tx core is not required */
+               tx_adapter->tx_core_id = -1;
+       } else {
+               /* Tx core is required */
+               tx_adapter->tx_core_id = eh_get_next_eth_core(em_conf);
+
+               /*
+                * Use one event queue per adapter for submitting packets
+                * for Tx. Reserving the last queue available
+                */
+               /* Queue numbers start at 0 */
+               tx_adapter->tx_ev_queue = eventdev_config->nb_eventqueue - 1;
+       }
+
        /* We have setup one adapter */
        em_conf->nb_tx_adapter = 1;
        return 0;
@@ -422,6 +539,9 @@ eh_validate_conf(struct eventmode_conf *em_conf)
                        return ret;
        }
 
+       /* Perform capability check for the selected event devices */
+       eh_do_capability_check(em_conf);
+
        /*
         * Check if links are specified. Else generate a default config for
         * the event ports used.
@@ -481,9 +601,6 @@ eh_initialize_eventdev(struct eventmode_conf *em_conf)
                /* Get the number of queues */
                nb_eventqueue = eventdev_config->nb_eventqueue;
 
-               /* One queue is reserved for the final stage (doing eth tx) */
-               nb_eventqueue += 1;
-
                /* Reset the default conf */
                memset(&evdev_default_conf, 0,
                        sizeof(struct rte_event_dev_info));
@@ -530,11 +647,13 @@ eh_initialize_eventdev(struct eventmode_conf *em_conf)
                                        eventdev_config->ev_queue_mode;
                        /*
                         * All queues need to be set with sched_type as
-                        * schedule type for the application stage. One queue
-                        * would be reserved for the final eth tx stage. This
-                        * will be an atomic queue.
+                        * schedule type for the application stage. One
+                        * queue would be reserved for the final eth tx
+                        * stage if event device does not have internal
+                        * ports. This will be an atomic queue.
                         */
-                       if (j == nb_eventqueue-1) {
+                       if (!eventdev_config->all_internal_ports &&
+                           j == nb_eventqueue-1) {
                                eventq_conf.schedule_type =
                                        RTE_SCHED_TYPE_ATOMIC;
                        } else {
@@ -650,10 +769,6 @@ eh_rx_adapter_configure(struct eventmode_conf *em_conf,
        }
 
        /* Setup various connections in the adapter */
-
-       queue_conf.rx_queue_flags =
-                       RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID;
-
        for (j = 0; j < adapter->nb_connections; j++) {
                /* Get connection */
                conn = &(adapter->conn[j]);
@@ -661,9 +776,7 @@ eh_rx_adapter_configure(struct eventmode_conf *em_conf,
                /* Setup queue conf */
                queue_conf.ev.queue_id = conn->eventq_id;
                queue_conf.ev.sched_type = em_conf->ext_params.sched_type;
-
-               /* Set flow ID as ethdev ID */
-               queue_conf.ev.flow_id = conn->ethdev_id;
+               queue_conf.ev.event_type = RTE_EVENT_TYPE_ETHDEV;
 
                /* Add queue to the adapter */
                ret = rte_event_eth_rx_adapter_queue_add(
@@ -859,6 +972,12 @@ eh_find_worker(uint32_t lcore_id, struct eh_conf *conf,
 
        /* Populate the curr_conf with the capabilities */
 
+       /* Check for Tx internal port */
+       if (eh_dev_has_tx_internal_port(eventdev_id))
+               curr_conf.cap.tx_internal_port = EH_TX_TYPE_INTERNAL_PORT;
+       else
+               curr_conf.cap.tx_internal_port = EH_TX_TYPE_NO_INTERNAL_PORT;
+
        /* Check for burst mode */
        if (eh_dev_has_burst_mode(eventdev_id))
                curr_conf.cap.burst = EH_RX_TYPE_BURST;
@@ -1034,6 +1153,18 @@ eh_tx_adapter_configure(struct eventmode_conf *em_conf,
                }
        }
 
+       /*
+        * Check if Tx core is assigned. If Tx core is not assigned, then
+        * the adapter has internal port for submitting packets for Tx
+        * and so Tx event queue & port setup is not required
+        */
+       if (adapter->tx_core_id == (uint32_t) (-1)) {
+               /* Internal port is present */
+               goto skip_tx_queue_port_setup;
+       }
+
+       /* Setup Tx queue & port */
+
        /* Get event port used by the adapter */
        ret = rte_event_eth_tx_adapter_event_port_get(
                        adapter->adapter_id,
@@ -1044,11 +1175,6 @@ eh_tx_adapter_configure(struct eventmode_conf *em_conf,
        }
 
        /*
-        * TODO: event queue for Tx adapter is required only if the
-        * INTERNAL PORT is not present.
-        */
-
-       /*
         * Tx event queue is reserved for Tx adapter. Unlink this queue
         * from all other ports
         *
@@ -1058,6 +1184,7 @@ eh_tx_adapter_configure(struct eventmode_conf *em_conf,
                                      &(adapter->tx_ev_queue), 1);
        }
 
+       /* Link Tx event queue to Tx port */
        ret = rte_event_port_link(
                        eventdev_id,
                        tx_port_id,
@@ -1079,6 +1206,7 @@ eh_tx_adapter_configure(struct eventmode_conf *em_conf,
 
        rte_service_set_runstate_mapped_check(service_id, 0);
 
+skip_tx_queue_port_setup:
        /* Start adapter */
        ret = rte_event_eth_tx_adapter_start(adapter->adapter_id);
        if (ret < 0) {
@@ -1163,13 +1291,22 @@ eh_display_rx_adapter_conf(struct eventmode_conf 
*em_conf)
 
        for (i = 0; i < nb_rx_adapter; i++) {
                adapter = &(em_conf->rx_adapter[i]);
-               EH_LOG_INFO(
-                       "\tRx adaper ID: %-2d\tConnections: %-2d\tEvent dev ID: 
%-2d"
-                       "\tRx core: %-2d",
+               sprintf(print_buf,
+                       "\tRx adaper ID: %-2d\tConnections: %-2d\tEvent dev ID: 
%-2d",
                        adapter->adapter_id,
                        adapter->nb_connections,
-                       adapter->eventdev_id,
-                       adapter->rx_core_id);
+                       adapter->eventdev_id);
+               if (adapter->rx_core_id == (uint32_t)-1)
+                       sprintf(print_buf + strlen(print_buf),
+                               "\tRx core: %-2s", "[INTERNAL PORT]");
+               else if (adapter->rx_core_id == RTE_MAX_LCORE)
+                       sprintf(print_buf + strlen(print_buf),
+                               "\tRx core: %-2s", "[NONE]");
+               else
+                       sprintf(print_buf + strlen(print_buf),
+                               "\tRx core: %-2d", adapter->rx_core_id);
+
+               EH_LOG_INFO("%s", print_buf);
 
                for (j = 0; j < adapter->nb_connections; j++) {
                        conn = &(adapter->conn[j]);
diff --git a/examples/ipsec-secgw/event_helper.h 
b/examples/ipsec-secgw/event_helper.h
index aad87f7..2895dfa 100644
--- a/examples/ipsec-secgw/event_helper.h
+++ b/examples/ipsec-secgw/event_helper.h
@@ -66,12 +66,21 @@ enum eh_rx_types {
        EH_RX_TYPE_BURST
 };
 
+/**
+ * Event mode packet tx types
+ */
+enum eh_tx_types {
+       EH_TX_TYPE_INTERNAL_PORT = 0,
+       EH_TX_TYPE_NO_INTERNAL_PORT
+};
+
 /* Event dev params */
 struct eventdev_params {
        uint8_t eventdev_id;
        uint8_t nb_eventqueue;
        uint8_t nb_eventport;
        uint8_t ev_queue_mode;
+       uint8_t all_internal_ports;
 };
 
 /**
@@ -183,6 +192,8 @@ struct eh_app_worker_params {
                struct {
                        uint64_t burst : 1;
                        /**< Specify status of rx type burst */
+                       uint64_t tx_internal_port : 1;
+                       /**< Specify whether tx internal port is available */
                };
                uint64_t u64;
        } cap;
-- 
2.7.4

Reply via email to