Signed-off-by: Gagandeep Singh <g.si...@nxp.com>
---
drivers/net/dpaa2/dpaa2_ethdev.c | 14 ++++++++++++++
drivers/net/dpaa2/dpaa2_ethdev.h | 9 +++++++++
drivers/net/dpaa2/dpaa2_rxtx.c | 13 ++++++-------
3 files changed, 29 insertions(+), 7 deletions(-)
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.c b/drivers/net/dpaa2/dpaa2_ethdev.c
index 37a8b43114..c7aae70300 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.c
+++ b/drivers/net/dpaa2/dpaa2_ethdev.c
@@ -78,6 +78,8 @@ bool dpaa2_enable_err_queue;
#define MAX_NB_RX_DESC 11264
int total_nb_rx_desc;
+struct rte_mempool *dpaa2_tx_sg_pool;
+
struct rte_dpaa2_xstats_name_off {
char name[RTE_ETH_XSTATS_NAME_SIZE];
uint8_t page_id; /* dpni statistics page id */
@@ -2907,6 +2909,18 @@ rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv,
/* Invoke PMD device initialization function */
diag = dpaa2_dev_init(eth_dev);
if (diag == 0) {
+ if (!dpaa2_tx_sg_pool) {
+ dpaa2_tx_sg_pool =
+ rte_pktmbuf_pool_create("dpaa2_mbuf_tx_sg_pool",
+ DPAA2_POOL_SIZE,
+ DPAA2_POOL_CACHE_SIZE, 0,
+ DPAA2_MAX_SGS * sizeof(struct qbman_sge),
+ rte_socket_id());
+ if (dpaa2_tx_sg_pool == NULL) {
+ DPAA2_PMD_ERR("SG pool creation failed\n");
+ return -ENOMEM;
+ }
+ }
rte_eth_dev_probing_finish(eth_dev);
return 0;
}
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.h b/drivers/net/dpaa2/dpaa2_ethdev.h
index 32ae762e4a..872dced517 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.h
+++ b/drivers/net/dpaa2/dpaa2_ethdev.h
@@ -121,6 +121,15 @@
#define DPAA2_PKT_TYPE_VLAN_1 0x0160
#define DPAA2_PKT_TYPE_VLAN_2 0x0260
+/* Global pool used by driver for SG list TX */
+extern struct rte_mempool *dpaa2_tx_sg_pool;
+/* Maximum SG segments */
+#define DPAA2_MAX_SGS 128
+/* SG pool size */
+#define DPAA2_POOL_SIZE 2048
+/* SG pool cache size */
+#define DPAA2_POOL_CACHE_SIZE 256
+
/* enable timestamp in mbuf*/
extern bool dpaa2_enable_ts[];
extern uint64_t dpaa2_timestamp_rx_dynflag;
diff --git a/drivers/net/dpaa2/dpaa2_rxtx.c b/drivers/net/dpaa2/dpaa2_rxtx.c
index bc0e49b0d4..dcd86c4056 100644
--- a/drivers/net/dpaa2/dpaa2_rxtx.c
+++ b/drivers/net/dpaa2/dpaa2_rxtx.c
@@ -403,7 +403,7 @@ eth_fd_to_mbuf(const struct qbman_fd *fd,
static int __rte_noinline __rte_hot
eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
struct qbman_fd *fd,
- struct rte_mempool *mp, uint16_t bpid)
+ uint16_t bpid)
{
struct rte_mbuf *cur_seg = mbuf, *prev_seg, *mi, *temp;
struct qbman_sge *sgt, *sge = NULL;
@@ -433,12 +433,12 @@ eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
}
DPAA2_SET_FD_OFFSET(fd, offset);
} else {
- temp = rte_pktmbuf_alloc(mp);
+ temp = rte_pktmbuf_alloc(dpaa2_tx_sg_pool);
if (temp == NULL) {
DPAA2_PMD_DP_DEBUG("No memory to allocate S/G table\n");
return -ENOMEM;
}
- DPAA2_SET_ONLY_FD_BPID(fd, bpid);
+ DPAA2_SET_ONLY_FD_BPID(fd, mempool_to_bpid(dpaa2_tx_sg_pool));
DPAA2_SET_FD_OFFSET(fd, temp->data_off);
#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
rte_mempool_check_cookies(rte_mempool_from_obj((void *)temp),
@@ -1321,9 +1321,10 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs,
uint16_t nb_pkts)
if (unlikely(RTE_MBUF_HAS_EXTBUF(*bufs))) {
if (unlikely((*bufs)->nb_segs > 1)) {
+ mp = (*bufs)->pool;
if (eth_mbuf_to_sg_fd(*bufs,
&fd_arr[loop],
- mp, 0))
+
mempool_to_bpid(mp)))
goto send_n_return;
} else {
eth_mbuf_to_fd(*bufs,
@@ -1372,7 +1373,7 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs,
uint16_t nb_pkts)
if (unlikely((*bufs)->nb_segs > 1)) {
if (eth_mbuf_to_sg_fd(*bufs,
&fd_arr[loop],
- mp, bpid))
+ bpid))
goto send_n_return;
} else {
eth_mbuf_to_fd(*bufs,
@@ -1646,7 +1647,6 @@ dpaa2_dev_tx_multi_txq_ordered(void **queue,
if (unlikely((*bufs)->nb_segs > 1)) {
if (eth_mbuf_to_sg_fd(*bufs,
&fd_arr[loop],
- mp,
bpid))
goto send_frames;
} else {
@@ -1810,7 +1810,6 @@ dpaa2_dev_tx_ordered(void *queue, struct rte_mbuf **bufs,
uint16_t nb_pkts)
if (unlikely((*bufs)->nb_segs > 1)) {
if (eth_mbuf_to_sg_fd(*bufs,
&fd_arr[loop],
- mp,
bpid))
goto send_n_return;
} else {