Modify ethernet dev setup to support packet transmission and probe
through ethdev tx capabilities.

Signed-off-by: Pavan Nikhilesh <pbhagavat...@caviumnetworks.com>
---
 This patch set depends on : http://dpdk.org/dev/patchwork/patch/30520

 app/test-eventdev/test_perf_common.c | 56 ++++++++++++++++++++----------------
 app/test-eventdev/test_perf_common.h |  6 ++++
 2 files changed, 38 insertions(+), 24 deletions(-)

diff --git a/app/test-eventdev/test_perf_common.c 
b/app/test-eventdev/test_perf_common.c
index 873eab9..60965dc 100644
--- a/app/test-eventdev/test_perf_common.c
+++ b/app/test-eventdev/test_perf_common.c
@@ -92,7 +92,7 @@ static int
 perf_producer_wrapper(void *arg)
 {
        RTE_SET_USED(arg);
-       struct prod_data *p  = arg;
+       struct prod_data *p = arg;
        struct test_perf *t = p->t;
        /* Launch the producer function only in case of synthetic producer. */
        if (t->opt->prod_type == EVT_PROD_TYPE_SYNT)
@@ -126,7 +126,6 @@ total_latency(struct test_perf *t)
        return total;
 }

-
 int
 perf_launch_lcores(struct evt_test *test, struct evt_options *opt,
                int (*worker)(void *))
@@ -232,7 +231,7 @@ perf_launch_lcores(struct evt_test *test, struct 
evt_options *opt,
        return 0;
 }

-static int
+int
 perf_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride,
                struct rte_event_port_conf prod_conf)
 {
@@ -494,9 +493,9 @@ perf_elt_init(struct rte_mempool *mp, void *arg 
__rte_unused,
 int
 perf_ethdev_setup(struct evt_test *test, struct evt_options *opt)
 {
-       uint16_t nb_rx_queues = 1;
        int i;
-       int j;
+       uint8_t nb_queues = 1;
+       uint8_t mt_state = 0;
        struct test_perf *t = evt_test_priv(test);
        struct rte_eth_conf port_conf = {
                .rxmode = {
@@ -528,33 +527,42 @@ perf_ethdev_setup(struct evt_test *test, struct 
evt_options *opt)
        }

        for (i = 0; i < rte_eth_dev_count(); i++) {
+               struct rte_eth_dev_info dev_info;
+
+               memset(&dev_info, 0, sizeof(struct rte_eth_dev_info));
+               rte_eth_dev_info_get(i, &dev_info);
+               mt_state = !(dev_info.tx_offload_capa &
+                               DEV_TX_OFFLOAD_MT_LOCKFREE);

-               if (rte_eth_dev_configure(i, nb_rx_queues, nb_rx_queues,
+               if (rte_eth_dev_configure(i, nb_queues, nb_queues,
                                        &port_conf)
                                < 0) {
                        evt_err("Failed to configure eth port [%d]\n", i);
                        return -EINVAL;
                }

-               for (j = 0; j < nb_rx_queues; j++) {
-                       if (rte_eth_rx_queue_setup(i, j, NB_RX_DESC,
-                                       rte_socket_id(), NULL, t->pool) < 0) {
-                               evt_err("Failed to setup eth port [%d]"
-                                               " rx_queue: %d."
-                                               " Using synthetic producer\n",
-                                               i, j);
-                               return -EINVAL;
-                       }
-                       if (rte_eth_tx_queue_setup(i, j, NB_TX_DESC,
-                                               rte_socket_id(), NULL) < 0) {
-                               evt_err("Failed to setup eth port [%d]"
-                                               " tx_queue: %d."
-                                               " Using synthetic producer\n",
-                                               i, j);
-                               return -EINVAL;
-                       }
+               if (rte_eth_rx_queue_setup(i, 0, NB_RX_DESC,
+                               rte_socket_id(), NULL, t->pool) < 0) {
+                       evt_err("Failed to setup eth port [%d]"
+                                       " rx_queue: %d."
+                                       " Using synthetic producer\n",
+                                       i, 0);
+                       return -EINVAL;
+               }
+               if (rte_eth_tx_queue_setup(i, 0, NB_TX_DESC,
+                                       rte_socket_id(), NULL) < 0) {
+                       evt_err("Failed to setup eth port [%d]"
+                                       " tx_queue: %d."
+                                       " Using synthetic producer\n",
+                                       i, 0);
+                       return -EINVAL;
                }

+               t->mt_unsafe |= mt_state;
+               t->tx_buf[i] =
+                       rte_malloc(NULL, RTE_ETH_TX_BUFFER_SIZE(BURST_SIZE), 0);
+               if (t->tx_buf[i] == NULL)
+                       rte_panic("Unable to allocate Tx buffer memory.");
                rte_eth_promiscuous_enable(i);
        }

@@ -591,7 +599,7 @@ perf_mempool_setup(struct evt_test *test, struct 
evt_options *opt)
        } else {
                t->pool = rte_pktmbuf_pool_create(test->name, /* mempool name */
                                opt->pool_sz, /* number of elements*/
-                               0, /* cache size*/
+                               512, /* cache size*/
                                0,
                                RTE_MBUF_DEFAULT_BUF_SIZE,
                                opt->socket_id); /* flags */
diff --git a/app/test-eventdev/test_perf_common.h 
b/app/test-eventdev/test_perf_common.h
index 95a2174..a847a75 100644
--- a/app/test-eventdev/test_perf_common.h
+++ b/app/test-eventdev/test_perf_common.h
@@ -45,6 +45,7 @@
 #include <rte_malloc.h>
 #include <rte_mempool.h>
 #include <rte_prefetch.h>
+#include <rte_spinlock.h>

 #include "evt_common.h"
 #include "evt_options.h"
@@ -74,12 +75,15 @@ struct test_perf {
        int done;
        uint64_t outstand_pkts;
        uint8_t nb_workers;
+       uint8_t mt_unsafe;
        enum evt_test_result result;
        uint32_t nb_flows;
        uint64_t nb_pkts;
        struct rte_mempool *pool;
        struct prod_data prod[EVT_MAX_PORTS];
        struct worker_data worker[EVT_MAX_PORTS];
+       struct rte_eth_dev_tx_buffer *tx_buf[RTE_MAX_ETHPORTS];
+       rte_spinlock_t tx_lk[RTE_MAX_ETHPORTS];
        struct evt_options *opt;
        uint8_t sched_type_list[EVT_MAX_STAGES] __rte_cache_aligned;
 } __rte_cache_aligned;
@@ -159,6 +163,8 @@ int perf_test_result(struct evt_test *test, struct 
evt_options *opt);
 int perf_opt_check(struct evt_options *opt, uint64_t nb_queues);
 int perf_test_setup(struct evt_test *test, struct evt_options *opt);
 int perf_ethdev_setup(struct evt_test *test, struct evt_options *opt);
+int perf_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride,
+               struct rte_event_port_conf prod_conf);
 int perf_mempool_setup(struct evt_test *test, struct evt_options *opt);
 int perf_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,
                                uint8_t stride, uint8_t nb_queues);
--
2.7.4

Reply via email to