[dpdk-dev] [PATCH 5/5] net/sfc: handle already flushed Tx queue gracefully

2017-05-27 Thread Andrew Rybchenko
Tx queue may be already flushed because of previous Tx error or
MC reboot.

Signed-off-by: Andrew Rybchenko 
---
 drivers/net/sfc/sfc_tx.c | 7 +--
 1 file changed, 5 insertions(+), 2 deletions(-)

diff --git a/drivers/net/sfc/sfc_tx.c b/drivers/net/sfc/sfc_tx.c
index 9e426ca..fc439cb 100644
--- a/drivers/net/sfc/sfc_tx.c
+++ b/drivers/net/sfc/sfc_tx.c
@@ -479,6 +479,7 @@ sfc_tx_qstop(struct sfc_adapter *sa, unsigned int sw_index)
struct sfc_txq *txq;
unsigned int retry_count;
unsigned int wait_count;
+   int rc;
 
sfc_log_init(sa, "TxQ = %u", sw_index);
 
@@ -502,8 +503,10 @@ sfc_tx_qstop(struct sfc_adapter *sa, unsigned int sw_index)
 ((txq->state & SFC_TXQ_FLUSHED) == 0) &&
 (retry_count < SFC_TX_QFLUSH_ATTEMPTS);
 ++retry_count) {
-   if (efx_tx_qflush(txq->common) != 0) {
-   txq->state |= SFC_TXQ_FLUSH_FAILED;
+   rc = efx_tx_qflush(txq->common);
+   if (rc != 0) {
+   txq->state |= (rc == EALREADY) ?
+   SFC_TXQ_FLUSHED : SFC_TXQ_FLUSH_FAILED;
break;
}
 
-- 
2.9.4



[dpdk-dev] [PATCH 2/5] net/sfc/base: let caller know that queue is already flushed

2017-05-27 Thread Andrew Rybchenko
From: Andy Moreton 

Tx/Rx queue may be already flushed due to Tx/Rx error on the queue or
MC reboot. Caller needs to know that the queue is already flushed to
avoid waiting for flush done event.

Signed-off-by: Andy Moreton 
Signed-off-by: Andrew Rybchenko 
---
 drivers/net/sfc/base/ef10_ev.c |  7 ++-
 drivers/net/sfc/base/ef10_rx.c | 18 +++---
 drivers/net/sfc/base/ef10_tx.c | 18 +++---
 3 files changed, 36 insertions(+), 7 deletions(-)

diff --git a/drivers/net/sfc/base/ef10_ev.c b/drivers/net/sfc/base/ef10_ev.c
index 3522674..d9389da 100644
--- a/drivers/net/sfc/base/ef10_ev.c
+++ b/drivers/net/sfc/base/ef10_ev.c
@@ -431,7 +431,12 @@ efx_mcdi_fini_evq(
return (0);
 
 fail1:
-   EFSYS_PROBE1(fail1, efx_rc_t, rc);
+   /*
+* EALREADY is not an error, but indicates that the MC has rebooted and
+* that the EVQ has already been destroyed.
+*/
+   if (rc != EALREADY)
+   EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
return (rc);
 }
diff --git a/drivers/net/sfc/base/ef10_rx.c b/drivers/net/sfc/base/ef10_rx.c
index 9d6756c..661caa8 100644
--- a/drivers/net/sfc/base/ef10_rx.c
+++ b/drivers/net/sfc/base/ef10_rx.c
@@ -137,7 +137,7 @@ efx_mcdi_fini_rxq(
 
efx_mcdi_execute_quiet(enp, &req);
 
-   if ((req.emr_rc != 0) && (req.emr_rc != EALREADY)) {
+   if (req.emr_rc != 0) {
rc = req.emr_rc;
goto fail1;
}
@@ -145,7 +145,12 @@ efx_mcdi_fini_rxq(
return (0);
 
 fail1:
-   EFSYS_PROBE1(fail1, efx_rc_t, rc);
+   /*
+* EALREADY is not an error, but indicates that the MC has rebooted and
+* that the RXQ has already been destroyed.
+*/
+   if (rc != EALREADY)
+   EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
return (rc);
 }
@@ -802,7 +807,14 @@ ef10_rx_qflush(
return (0);
 
 fail1:
-   EFSYS_PROBE1(fail1, efx_rc_t, rc);
+   /*
+* EALREADY is not an error, but indicates that the MC has rebooted and
+* that the RXQ has already been destroyed. Callers need to know that
+* the RXQ flush has completed to avoid waiting until timeout for a
+* flush done event that will not be delivered.
+*/
+   if (rc != EALREADY)
+   EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
return (rc);
 }
diff --git a/drivers/net/sfc/base/ef10_tx.c b/drivers/net/sfc/base/ef10_tx.c
index dfa9e0b..211d265 100644
--- a/drivers/net/sfc/base/ef10_tx.c
+++ b/drivers/net/sfc/base/ef10_tx.c
@@ -148,7 +148,7 @@ efx_mcdi_fini_txq(
 
efx_mcdi_execute_quiet(enp, &req);
 
-   if ((req.emr_rc != 0) && (req.emr_rc != EALREADY)) {
+   if (req.emr_rc != 0) {
rc = req.emr_rc;
goto fail1;
}
@@ -156,7 +156,12 @@ efx_mcdi_fini_txq(
return (0);
 
 fail1:
-   EFSYS_PROBE1(fail1, efx_rc_t, rc);
+   /*
+* EALREADY is not an error, but indicates that the MC has rebooted and
+* that the TXQ has already been destroyed.
+*/
+   if (rc != EALREADY)
+   EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
return (rc);
 }
@@ -675,7 +680,14 @@ ef10_tx_qflush(
return (0);
 
 fail1:
-   EFSYS_PROBE1(fail1, efx_rc_t, rc);
+   /*
+* EALREADY is not an error, but indicates that the MC has rebooted and
+* that the TXQ has already been destroyed. Callers need to know that
+* the TXQ flush has completed to avoid waiting until timeout for a
+* flush done event that will not be delivered.
+*/
+   if (rc != EALREADY)
+   EFSYS_PROBE1(fail1, efx_rc_t, rc);
 
return (rc);
 }
-- 
2.9.4



[dpdk-dev] [PATCH 4/5] net/sfc: add Tx queue flush failed flag for sanity

2017-05-27 Thread Andrew Rybchenko
Avoid usage of flushing state when Tx queue flush init failed.

Fixes: fed9aeb46c19 ("net/sfc: implement transmit path start / stop")
Cc: sta...@dpdk.org

Signed-off-by: Andrew Rybchenko 
---
 drivers/net/sfc/sfc_tx.c | 2 +-
 drivers/net/sfc/sfc_tx.h | 2 ++
 2 files changed, 3 insertions(+), 1 deletion(-)

diff --git a/drivers/net/sfc/sfc_tx.c b/drivers/net/sfc/sfc_tx.c
index d75fb84..9e426ca 100644
--- a/drivers/net/sfc/sfc_tx.c
+++ b/drivers/net/sfc/sfc_tx.c
@@ -503,7 +503,7 @@ sfc_tx_qstop(struct sfc_adapter *sa, unsigned int sw_index)
 (retry_count < SFC_TX_QFLUSH_ATTEMPTS);
 ++retry_count) {
if (efx_tx_qflush(txq->common) != 0) {
-   txq->state |= SFC_TXQ_FLUSHING;
+   txq->state |= SFC_TXQ_FLUSH_FAILED;
break;
}
 
diff --git a/drivers/net/sfc/sfc_tx.h b/drivers/net/sfc/sfc_tx.h
index 6c3ac3b..0c1c708 100644
--- a/drivers/net/sfc/sfc_tx.h
+++ b/drivers/net/sfc/sfc_tx.h
@@ -64,6 +64,8 @@ enum sfc_txq_state_bit {
 #define SFC_TXQ_FLUSHING   (1 << SFC_TXQ_FLUSHING_BIT)
SFC_TXQ_FLUSHED_BIT,
 #define SFC_TXQ_FLUSHED(1 << SFC_TXQ_FLUSHED_BIT)
+   SFC_TXQ_FLUSH_FAILED_BIT,
+#define SFC_TXQ_FLUSH_FAILED   (1 << SFC_TXQ_FLUSH_FAILED_BIT)
 };
 
 /**
-- 
2.9.4



[dpdk-dev] [PATCH 3/5] net/sfc: handle already flushed Rx queue gracefully

2017-05-27 Thread Andrew Rybchenko
Rx queue may be already flushed because of previous Rx error or
MC reboot.

Signed-off-by: Andrew Rybchenko 
---
 drivers/net/sfc/sfc_rx.c | 7 +--
 1 file changed, 5 insertions(+), 2 deletions(-)

diff --git a/drivers/net/sfc/sfc_rx.c b/drivers/net/sfc/sfc_rx.c
index 122b657..325f32a 100644
--- a/drivers/net/sfc/sfc_rx.c
+++ b/drivers/net/sfc/sfc_rx.c
@@ -529,6 +529,7 @@ sfc_rx_qflush(struct sfc_adapter *sa, unsigned int sw_index)
struct sfc_rxq *rxq;
unsigned int retry_count;
unsigned int wait_count;
+   int rc;
 
rxq = sa->rxq_info[sw_index].rxq;
SFC_ASSERT(rxq->state & SFC_RXQ_STARTED);
@@ -541,8 +542,10 @@ sfc_rx_qflush(struct sfc_adapter *sa, unsigned int 
sw_index)
 ((rxq->state & SFC_RXQ_FLUSHED) == 0) &&
 (retry_count < SFC_RX_QFLUSH_ATTEMPTS);
 ++retry_count) {
-   if (efx_rx_qflush(rxq->common) != 0) {
-   rxq->state |= SFC_RXQ_FLUSH_FAILED;
+   rc = efx_rx_qflush(rxq->common);
+   if (rc != 0) {
+   rxq->state |= (rc == EALREADY) ?
+   SFC_RXQ_FLUSHED : SFC_RXQ_FLUSH_FAILED;
break;
}
rxq->state &= ~SFC_RXQ_FLUSH_FAILED;
-- 
2.9.4



[dpdk-dev] [PATCH 1/5] net/sfc/base: fix incorrect error code usage in common code

2017-05-27 Thread Andrew Rybchenko
From: Andy Moreton 

MCDI results retuerned in req.emr_rc have already been translated
from MC_CMD_ERR_* to errno names, so using an MC_CMD_ERR_* value
is incorrect.

Fixes: e7cd430c864f ("net/sfc/base: import SFN7xxx family support")
Cc: sta...@dpdk.org

Signed-off-by: Andy Moreton 
Signed-off-by: Andrew Rybchenko 
---
 drivers/net/sfc/base/ef10_rx.c | 2 +-
 drivers/net/sfc/base/ef10_tx.c | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/net/sfc/base/ef10_rx.c b/drivers/net/sfc/base/ef10_rx.c
index b65faed..9d6756c 100644
--- a/drivers/net/sfc/base/ef10_rx.c
+++ b/drivers/net/sfc/base/ef10_rx.c
@@ -137,7 +137,7 @@ efx_mcdi_fini_rxq(
 
efx_mcdi_execute_quiet(enp, &req);
 
-   if ((req.emr_rc != 0) && (req.emr_rc != MC_CMD_ERR_EALREADY)) {
+   if ((req.emr_rc != 0) && (req.emr_rc != EALREADY)) {
rc = req.emr_rc;
goto fail1;
}
diff --git a/drivers/net/sfc/base/ef10_tx.c b/drivers/net/sfc/base/ef10_tx.c
index 0f8e9b1..dfa9e0b 100644
--- a/drivers/net/sfc/base/ef10_tx.c
+++ b/drivers/net/sfc/base/ef10_tx.c
@@ -148,7 +148,7 @@ efx_mcdi_fini_txq(
 
efx_mcdi_execute_quiet(enp, &req);
 
-   if ((req.emr_rc != 0) && (req.emr_rc != MC_CMD_ERR_EALREADY)) {
+   if ((req.emr_rc != 0) && (req.emr_rc != EALREADY)) {
rc = req.emr_rc;
goto fail1;
}
-- 
2.9.4



[dpdk-dev] [PATCH 00/20] traffic manager on i40e and ixgbe

2017-05-27 Thread Wenzhuo Lu
Implement the traffic manager APIs on i40e and ixgbe.
This patch set is based on the patch set,
"ethdev: abstraction layer for QoS traffic management"
http://dpdk.org/dev/patchwork/patch/24411/
http://dpdk.org/dev/patchwork/patch/24412/

Wenzhuo Lu (20):
  net/i40e: support getting TM ops
  net/i40e: support getting TM capability
  net/i40e: support adding TM shaper profile
  net/i40e: support deleting TM shaper profile
  net/i40e: support adding TM node
  net/i40e: support deleting TM node
  net/i40e: support getting TM node type
  net/i40e: support getting TM level capability
  net/i40e: support getting TM node capability
  net/i40e: support committing TM hierarchy
  net/ixgbe: support getting TM ops
  net/ixgbe: support getting TM capability
  net/ixgbe: support adding TM shaper profile
  net/ixgbe: support deleting TM shaper profile
  net/ixgbe: support adding TM node
  net/ixgbe: support deleting TM node
  net/ixgbe: support getting TM node type
  net/ixgbe: support getting TM level capability
  net/ixgbe: support getting TM node capability
  net/ixgbe: support committing TM hierarchy

 drivers/net/i40e/Makefile|   1 +
 drivers/net/i40e/i40e_ethdev.c   |   7 +
 drivers/net/i40e/i40e_ethdev.h   |  57 +++
 drivers/net/i40e/i40e_tm.c   | 815 +
 drivers/net/i40e/rte_pmd_i40e.c  |   9 -
 drivers/net/ixgbe/Makefile   |   1 +
 drivers/net/ixgbe/ixgbe_ethdev.c |  15 +-
 drivers/net/ixgbe/ixgbe_ethdev.h |  60 +++
 drivers/net/ixgbe/ixgbe_tm.c | 850 +++
 9 files changed, 1801 insertions(+), 14 deletions(-)
 create mode 100644 drivers/net/i40e/i40e_tm.c
 create mode 100644 drivers/net/ixgbe/ixgbe_tm.c

-- 
1.9.3



[dpdk-dev] [PATCH 02/20] net/i40e: support getting TM capability

2017-05-27 Thread Wenzhuo Lu
Add the support of the Traffic Management API,
rte_tm_capabilities_get.

Signed-off-by: Wenzhuo Lu 
---
 drivers/net/i40e/i40e_tm.c | 82 +-
 1 file changed, 81 insertions(+), 1 deletion(-)

diff --git a/drivers/net/i40e/i40e_tm.c b/drivers/net/i40e/i40e_tm.c
index 2f4c866..86a2f74 100644
--- a/drivers/net/i40e/i40e_tm.c
+++ b/drivers/net/i40e/i40e_tm.c
@@ -34,8 +34,12 @@
 #include "base/i40e_prototype.h"
 #include "i40e_ethdev.h"
 
+static int i40e_tm_capabilities_get(struct rte_eth_dev *dev,
+   struct rte_tm_capabilities *cap,
+   struct rte_tm_error *error);
+
 const struct rte_tm_ops i40e_tm_ops = {
-   NULL,
+   .capabilities_get = i40e_tm_capabilities_get,
 };
 
 int
@@ -49,3 +53,79 @@
 
return 0;
 }
+
+static inline uint16_t
+i40e_tc_nb_get(struct rte_eth_dev *dev)
+{
+   struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+   struct i40e_vsi *main_vsi = pf->main_vsi;
+   uint16_t sum = 0;
+   int i;
+
+   for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+   if (main_vsi->enabled_tc & BIT_ULL(i))
+   sum++;
+   }
+
+   return sum;
+}
+
+static int
+i40e_tm_capabilities_get(struct rte_eth_dev *dev,
+struct rte_tm_capabilities *cap,
+struct rte_tm_error *error)
+{
+   uint16_t tc_nb = 0;
+
+   if (!cap || !error)
+   return -EINVAL;
+
+   error->type = RTE_TM_ERROR_TYPE_NONE;
+
+   /* set all the parameters to 0 first. */
+   memset(cap, 0, sizeof(struct rte_tm_capabilities));
+
+   /* only support port + TCs */
+   tc_nb = i40e_tc_nb_get(dev);
+   cap->n_nodes_max = tc_nb + 1;
+   cap->n_levels_max = 2;
+   cap->non_leaf_nodes_identical = 0;
+   cap->leaf_nodes_identical = 0;
+   cap->shaper_n_max = cap->n_nodes_max;
+   cap->shaper_private_n_max = cap->n_nodes_max;
+   cap->shaper_private_dual_rate_n_max = 0;
+   cap->shaper_private_rate_min = 0;
+   /* 40Gbps -> 5GBps */
+   cap->shaper_private_rate_max = 50ull;
+   cap->shaper_shared_n_max = 0;
+   cap->shaper_shared_n_nodes_per_shaper_max = 0;
+   cap->shaper_shared_n_shapers_per_node_max = 0;
+   cap->shaper_shared_dual_rate_n_max = 0;
+   cap->shaper_shared_rate_min = 0;
+   cap->shaper_shared_rate_max = 0;
+   cap->sched_n_children_max = tc_nb;
+   cap->sched_sp_n_priorities_max = 0;
+   cap->sched_wfq_n_children_per_group_max = 0;
+   cap->sched_wfq_n_groups_max = 0;
+   cap->sched_wfq_weight_max = 0;
+   cap->cman_head_drop_supported = 0;
+   cap->dynamic_update_mask = 0;
+
+   /**
+* not supported parameters are 0, below,
+* shaper_pkt_length_adjust_min
+* shaper_pkt_length_adjust_max
+* cman_wred_context_n_max
+* cman_wred_context_private_n_max
+* cman_wred_context_shared_n_max
+* cman_wred_context_shared_n_nodes_per_context_max
+* cman_wred_context_shared_n_contexts_per_node_max
+* mark_vlan_dei_supported
+* mark_ip_ecn_tcp_supported
+* mark_ip_ecn_sctp_supported
+* mark_ip_dscp_supported
+* stats_mask
+*/
+
+   return 0;
+}
-- 
1.9.3



[dpdk-dev] [PATCH 01/20] net/i40e: support getting TM ops

2017-05-27 Thread Wenzhuo Lu
To support QoS scheduler APIs, create a new C file for
the TM (Traffic Management) ops but without any function
implemented.

Signed-off-by: Wenzhuo Lu 
---
 drivers/net/i40e/Makefile  |  1 +
 drivers/net/i40e/i40e_ethdev.c |  1 +
 drivers/net/i40e/i40e_ethdev.h |  2 ++
 drivers/net/i40e/i40e_tm.c | 51 ++
 4 files changed, 55 insertions(+)
 create mode 100644 drivers/net/i40e/i40e_tm.c

diff --git a/drivers/net/i40e/Makefile b/drivers/net/i40e/Makefile
index 56f210d..33be5f9 100644
--- a/drivers/net/i40e/Makefile
+++ b/drivers/net/i40e/Makefile
@@ -109,6 +109,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_pf.c
 SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_fdir.c
 SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_flow.c
 SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += rte_pmd_i40e.c
+SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_tm.c
 
 # vector PMD driver needs SSE4.1 support
 ifeq ($(findstring RTE_MACHINE_CPUFLAG_SSE4_1,$(CFLAGS)),)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 4c49673..fcc958d 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -515,6 +515,7 @@ static int i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
.get_eeprom   = i40e_get_eeprom,
.mac_addr_set = i40e_set_default_mac_addr,
.mtu_set  = i40e_dev_mtu_set,
+   .tm_ops_get   = i40e_tm_ops_get,
 };
 
 /* store statistics names and its offset in stats structure */
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 2ff8282..e5301ee 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -39,6 +39,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #define I40E_VLAN_TAG_SIZE4
 
@@ -892,6 +893,7 @@ int i40e_add_macvlan_filters(struct i40e_vsi *vsi,
 struct i40e_macvlan_filter *filter,
 int total);
 bool is_i40e_supported(struct rte_eth_dev *dev);
+int i40e_tm_ops_get(struct rte_eth_dev *dev, void *ops);
 
 #define I40E_DEV_TO_PCI(eth_dev) \
RTE_DEV_TO_PCI((eth_dev)->device)
diff --git a/drivers/net/i40e/i40e_tm.c b/drivers/net/i40e/i40e_tm.c
new file mode 100644
index 000..2f4c866
--- /dev/null
+++ b/drivers/net/i40e/i40e_tm.c
@@ -0,0 +1,51 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in
+ *   the documentation and/or other materials provided with the
+ *   distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ *   contributors may be used to endorse or promote products derived
+ *   from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "base/i40e_prototype.h"
+#include "i40e_ethdev.h"
+
+const struct rte_tm_ops i40e_tm_ops = {
+   NULL,
+};
+
+int
+i40e_tm_ops_get(struct rte_eth_dev *dev __rte_unused,
+   void *arg)
+{
+   if (!arg)
+   return -EINVAL;
+
+   *(const void **)arg = &i40e_tm_ops;
+
+   return 0;
+}
-- 
1.9.3



[dpdk-dev] [PATCH 03/20] net/i40e: support adding TM shaper profile

2017-05-27 Thread Wenzhuo Lu
Add the support of the Traffic Management API,
rte_tm_shaper_profile_add.

Signed-off-by: Wenzhuo Lu 
---
 drivers/net/i40e/i40e_ethdev.c |   6 +++
 drivers/net/i40e/i40e_ethdev.h |  18 +++
 drivers/net/i40e/i40e_tm.c | 107 +
 3 files changed, 131 insertions(+)

diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index fcc958d..721d192 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -1299,6 +1299,9 @@ static inline void i40e_GLQF_reg_init(struct i40e_hw *hw)
/* initialize mirror rule list */
TAILQ_INIT(&pf->mirror_list);
 
+   /* initialize Traffic Manager configuration */
+   i40e_tm_conf_init(dev);
+
ret = i40e_init_ethtype_filter_list(dev);
if (ret < 0)
goto err_init_ethtype_filter_list;
@@ -1462,6 +1465,9 @@ static inline void i40e_GLQF_reg_init(struct i40e_hw *hw)
rte_free(p_flow);
}
 
+   /* Remove all Traffic Manager configuration */
+   i40e_tm_conf_uninit(dev);
+
return 0;
 }
 
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index e5301ee..da73d64 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -626,6 +626,21 @@ struct rte_flow {
 
 TAILQ_HEAD(i40e_flow_list, rte_flow);
 
+/* Struct to store Traffic Manager shaper profile. */
+struct i40e_tm_shaper_profile {
+   TAILQ_ENTRY(i40e_tm_shaper_profile) node;
+   uint32_t shaper_profile_id;
+   uint32_t reference_count;
+   struct rte_tm_shaper_params profile;
+};
+
+TAILQ_HEAD(i40e_shaper_profile_list, i40e_tm_shaper_profile);
+
+/* Struct to store all the Traffic Manager configuration. */
+struct i40e_tm_conf {
+   struct i40e_shaper_profile_list shaper_profile_list;
+};
+
 /*
  * Structure to store private data specific for PF instance.
  */
@@ -686,6 +701,7 @@ struct i40e_pf {
struct i40e_flow_list flow_list;
bool mpls_replace_flag;  /* 1 - MPLS filter replace is done */
bool qinq_replace_flag;  /* QINQ filter replace is done */
+   struct i40e_tm_conf tm_conf;
 };
 
 enum pending_msg {
@@ -894,6 +910,8 @@ int i40e_add_macvlan_filters(struct i40e_vsi *vsi,
 int total);
 bool is_i40e_supported(struct rte_eth_dev *dev);
 int i40e_tm_ops_get(struct rte_eth_dev *dev, void *ops);
+void i40e_tm_conf_init(struct rte_eth_dev *dev);
+void i40e_tm_conf_uninit(struct rte_eth_dev *dev);
 
 #define I40E_DEV_TO_PCI(eth_dev) \
RTE_DEV_TO_PCI((eth_dev)->device)
diff --git a/drivers/net/i40e/i40e_tm.c b/drivers/net/i40e/i40e_tm.c
index 86a2f74..a71ff45 100644
--- a/drivers/net/i40e/i40e_tm.c
+++ b/drivers/net/i40e/i40e_tm.c
@@ -31,15 +31,22 @@
  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
+#include 
+
 #include "base/i40e_prototype.h"
 #include "i40e_ethdev.h"
 
 static int i40e_tm_capabilities_get(struct rte_eth_dev *dev,
struct rte_tm_capabilities *cap,
struct rte_tm_error *error);
+static int i40e_shaper_profile_add(struct rte_eth_dev *dev,
+  uint32_t shaper_profile_id,
+  struct rte_tm_shaper_params *profile,
+  struct rte_tm_error *error);
 
 const struct rte_tm_ops i40e_tm_ops = {
.capabilities_get = i40e_tm_capabilities_get,
+   .shaper_profile_add = i40e_shaper_profile_add,
 };
 
 int
@@ -54,6 +61,30 @@ static int i40e_tm_capabilities_get(struct rte_eth_dev *dev,
return 0;
 }
 
+void
+i40e_tm_conf_init(struct rte_eth_dev *dev)
+{
+   struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+   /* initialize shaper profile list */
+   TAILQ_INIT(&pf->tm_conf.shaper_profile_list);
+}
+
+void
+i40e_tm_conf_uninit(struct rte_eth_dev *dev)
+{
+   struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+   struct i40e_tm_shaper_profile *shaper_profile;
+
+   /* Remove all shaper profiles */
+   while ((shaper_profile =
+  TAILQ_FIRST(&pf->tm_conf.shaper_profile_list))) {
+   TAILQ_REMOVE(&pf->tm_conf.shaper_profile_list,
+shaper_profile, node);
+   rte_free(shaper_profile);
+   }
+}
+
 static inline uint16_t
 i40e_tc_nb_get(struct rte_eth_dev *dev)
 {
@@ -129,3 +160,79 @@ static int i40e_tm_capabilities_get(struct rte_eth_dev 
*dev,
 
return 0;
 }
+
+static inline struct i40e_tm_shaper_profile *
+i40e_shaper_profile_search(struct rte_eth_dev *dev,
+  uint32_t shaper_profile_id)
+{
+   struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+   struct i40e_shaper_profile_list *shaper_profile_list =
+   &pf->tm_conf.shaper_profile_list;
+   struct i40e_tm_shaper_profile *shaper_profile;
+
+   TAILQ_FOREACH

[dpdk-dev] [PATCH 06/20] net/i40e: support deleting TM node

2017-05-27 Thread Wenzhuo Lu
Add the support of the Traffic Management API,
rte_tm_node_delete.

Signed-off-by: Wenzhuo Lu 
---
 drivers/net/i40e/i40e_tm.c | 54 ++
 1 file changed, 54 insertions(+)

diff --git a/drivers/net/i40e/i40e_tm.c b/drivers/net/i40e/i40e_tm.c
index 6ebce77..20172d5 100644
--- a/drivers/net/i40e/i40e_tm.c
+++ b/drivers/net/i40e/i40e_tm.c
@@ -50,12 +50,15 @@ static int i40e_node_add(struct rte_eth_dev *dev, uint32_t 
node_id,
 uint32_t parent_node_id, uint32_t priority,
 uint32_t weight, struct rte_tm_node_params *params,
 struct rte_tm_error *error);
+static int i40e_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
+   struct rte_tm_error *error);
 
 const struct rte_tm_ops i40e_tm_ops = {
.capabilities_get = i40e_tm_capabilities_get,
.shaper_profile_add = i40e_shaper_profile_add,
.shaper_profile_delete = i40e_shaper_profile_del,
.node_add = i40e_node_add,
+   .node_delete = i40e_node_delete,
 };
 
 int
@@ -495,3 +498,54 @@ static int i40e_node_add(struct rte_eth_dev *dev, uint32_t 
node_id,
 
return 0;
 }
+
+static int
+i40e_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
+struct rte_tm_error *error)
+{
+   struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+   enum i40e_tm_node_type node_type;
+   struct i40e_tm_node *tm_node;
+
+   if (!error)
+   return -EINVAL;
+
+   if (node_id == RTE_TM_NODE_ID_NULL) {
+   error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+   error->message = "invalid node id";
+   return -EINVAL;
+   }
+
+   /* check if the node id exists */
+   tm_node = i40e_tm_node_search(dev, node_id, &node_type);
+   if (!tm_node) {
+   error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+   error->message = "no such node";
+   return -EINVAL;
+   }
+
+   /* the node should have no child */
+   if (tm_node->reference_count) {
+   error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+   error->message =
+   "cannot delete a node which has children";
+   return -EINVAL;
+   }
+
+   /* root node */
+   if (node_type == I40E_TM_NODE_TYPE_PORT) {
+   tm_node->shaper_profile->reference_count--;
+   rte_free(tm_node);
+   pf->tm_conf.root = NULL;
+   return 0;
+   }
+
+   /* TC node */
+   tm_node->shaper_profile->reference_count--;
+   tm_node->parent->reference_count--;
+   TAILQ_REMOVE(&pf->tm_conf.tc_list, tm_node, node);
+   rte_free(tm_node);
+   pf->tm_conf.nb_tc_node--;
+
+   return 0;
+}
-- 
1.9.3



[dpdk-dev] [PATCH 05/20] net/i40e: support adding TM node

2017-05-27 Thread Wenzhuo Lu
Add the support of the Traffic Management API,
rte_tm_node_add.

Signed-off-by: Wenzhuo Lu 
---
 drivers/net/i40e/i40e_ethdev.h |  28 ++
 drivers/net/i40e/i40e_tm.c | 223 +
 2 files changed, 251 insertions(+)

diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index da73d64..34ba3e5 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -636,9 +636,37 @@ struct i40e_tm_shaper_profile {
 
 TAILQ_HEAD(i40e_shaper_profile_list, i40e_tm_shaper_profile);
 
+/* node type of Traffic Manager */
+enum i40e_tm_node_type {
+   I40E_TM_NODE_TYPE_PORT,
+   I40E_TM_NODE_TYPE_TC,
+   I40E_TM_NODE_TYPE_MAX,
+};
+
+/* Struct to store Traffic Manager node configuration. */
+struct i40e_tm_node {
+   TAILQ_ENTRY(i40e_tm_node) node;
+   uint32_t id;
+   uint32_t priority;
+   uint32_t weight;
+   uint32_t reference_count;
+   struct i40e_tm_node *parent;
+   struct i40e_tm_shaper_profile *shaper_profile;
+   struct rte_tm_node_params params;
+};
+
+TAILQ_HEAD(i40e_tm_node_list, i40e_tm_node);
+
 /* Struct to store all the Traffic Manager configuration. */
 struct i40e_tm_conf {
struct i40e_shaper_profile_list shaper_profile_list;
+   struct i40e_tm_node *root; /* root node - port */
+   struct i40e_tm_node_list tc_list; /* node list for all the TCs */
+   /**
+* The number of added TC nodes.
+* It should be no more than the TC number of this port.
+*/
+   uint32_t nb_tc_node;
 };
 
 /*
diff --git a/drivers/net/i40e/i40e_tm.c b/drivers/net/i40e/i40e_tm.c
index 233adcf..6ebce77 100644
--- a/drivers/net/i40e/i40e_tm.c
+++ b/drivers/net/i40e/i40e_tm.c
@@ -46,11 +46,16 @@ static int i40e_shaper_profile_add(struct rte_eth_dev *dev,
 static int i40e_shaper_profile_del(struct rte_eth_dev *dev,
   uint32_t shaper_profile_id,
   struct rte_tm_error *error);
+static int i40e_node_add(struct rte_eth_dev *dev, uint32_t node_id,
+uint32_t parent_node_id, uint32_t priority,
+uint32_t weight, struct rte_tm_node_params *params,
+struct rte_tm_error *error);
 
 const struct rte_tm_ops i40e_tm_ops = {
.capabilities_get = i40e_tm_capabilities_get,
.shaper_profile_add = i40e_shaper_profile_add,
.shaper_profile_delete = i40e_shaper_profile_del,
+   .node_add = i40e_node_add,
 };
 
 int
@@ -72,6 +77,11 @@ static int i40e_shaper_profile_del(struct rte_eth_dev *dev,
 
/* initialize shaper profile list */
TAILQ_INIT(&pf->tm_conf.shaper_profile_list);
+
+   /* initialize node configuration */
+   pf->tm_conf.root = NULL;
+   TAILQ_INIT(&pf->tm_conf.tc_list);
+   pf->tm_conf.nb_tc_node = 0;
 }
 
 void
@@ -79,6 +89,18 @@ static int i40e_shaper_profile_del(struct rte_eth_dev *dev,
 {
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_tm_shaper_profile *shaper_profile;
+   struct i40e_tm_node *tc;
+
+   /* clear node configuration */
+   while ((tc = TAILQ_FIRST(&pf->tm_conf.tc_list))) {
+   TAILQ_REMOVE(&pf->tm_conf.tc_list, tc, node);
+   rte_free(tc);
+   }
+   pf->tm_conf.nb_tc_node = 0;
+   if (pf->tm_conf.root) {
+   rte_free(pf->tm_conf.root);
+   pf->tm_conf.root = NULL;
+   }
 
/* Remove all shaper profiles */
while ((shaper_profile =
@@ -272,3 +294,204 @@ static int i40e_shaper_profile_del(struct rte_eth_dev 
*dev,
 
return 0;
 }
+
+static inline struct i40e_tm_node *
+i40e_tm_node_search(struct rte_eth_dev *dev,
+   uint32_t node_id, enum i40e_tm_node_type *node_type)
+{
+   struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+   struct i40e_tm_node_list *tc_list = &pf->tm_conf.tc_list;
+   struct i40e_tm_node *tm_node;
+
+   if (pf->tm_conf.root && pf->tm_conf.root->id == node_id) {
+   *node_type = I40E_TM_NODE_TYPE_PORT;
+   return pf->tm_conf.root;
+   }
+
+   TAILQ_FOREACH(tm_node, tc_list, node) {
+   if (tm_node->id == node_id) {
+   *node_type = I40E_TM_NODE_TYPE_TC;
+   return tm_node;
+   }
+   }
+
+   return NULL;
+}
+
+static int
+i40e_node_add(struct rte_eth_dev *dev, uint32_t node_id,
+ uint32_t parent_node_id, uint32_t priority,
+ uint32_t weight, struct rte_tm_node_params *params,
+ struct rte_tm_error *error)
+{
+   struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+   enum i40e_tm_node_type node_type = I40E_TM_NODE_TYPE_MAX;
+   struct i40e_tm_shaper_profile *shaper_profile;
+   struct i40e_tm_node *tm_node;
+   uint16_t tc_nb = 0;
+
+   if (!params || !error)
+

[dpdk-dev] [PATCH 04/20] net/i40e: support deleting TM shaper profile

2017-05-27 Thread Wenzhuo Lu
Add the support of the Traffic Management API,
rte_tm_shaper_profile_delete.

Signed-off-by: Wenzhuo Lu 
---
 drivers/net/i40e/i40e_tm.c | 36 
 1 file changed, 36 insertions(+)

diff --git a/drivers/net/i40e/i40e_tm.c b/drivers/net/i40e/i40e_tm.c
index a71ff45..233adcf 100644
--- a/drivers/net/i40e/i40e_tm.c
+++ b/drivers/net/i40e/i40e_tm.c
@@ -43,10 +43,14 @@ static int i40e_shaper_profile_add(struct rte_eth_dev *dev,
   uint32_t shaper_profile_id,
   struct rte_tm_shaper_params *profile,
   struct rte_tm_error *error);
+static int i40e_shaper_profile_del(struct rte_eth_dev *dev,
+  uint32_t shaper_profile_id,
+  struct rte_tm_error *error);
 
 const struct rte_tm_ops i40e_tm_ops = {
.capabilities_get = i40e_tm_capabilities_get,
.shaper_profile_add = i40e_shaper_profile_add,
+   .shaper_profile_delete = i40e_shaper_profile_del,
 };
 
 int
@@ -236,3 +240,35 @@ static int i40e_shaper_profile_add(struct rte_eth_dev *dev,
 
return 0;
 }
+
+static int
+i40e_shaper_profile_del(struct rte_eth_dev *dev,
+   uint32_t shaper_profile_id,
+   struct rte_tm_error *error)
+{
+   struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+   struct i40e_tm_shaper_profile *shaper_profile;
+
+   if (!error)
+   return -EINVAL;
+
+   shaper_profile = i40e_shaper_profile_search(dev, shaper_profile_id);
+
+   if (!shaper_profile) {
+   error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
+   error->message = "profile ID not exist";
+   return -EINVAL;
+   }
+
+   /* don't delete a profile if it's used by one or several nodes */
+   if (shaper_profile->reference_count) {
+   error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
+   error->message = "profile in use";
+   return -EINVAL;
+   }
+
+   TAILQ_REMOVE(&pf->tm_conf.shaper_profile_list, shaper_profile, node);
+   rte_free(shaper_profile);
+
+   return 0;
+}
-- 
1.9.3



[dpdk-dev] [PATCH 07/20] net/i40e: support getting TM node type

2017-05-27 Thread Wenzhuo Lu
Add the support of the Traffic Management API,
rte_tm_node_type_get.

Signed-off-by: Wenzhuo Lu 
---
 drivers/net/i40e/i40e_tm.c | 35 +++
 1 file changed, 35 insertions(+)

diff --git a/drivers/net/i40e/i40e_tm.c b/drivers/net/i40e/i40e_tm.c
index 20172d5..899e88e 100644
--- a/drivers/net/i40e/i40e_tm.c
+++ b/drivers/net/i40e/i40e_tm.c
@@ -52,6 +52,8 @@ static int i40e_node_add(struct rte_eth_dev *dev, uint32_t 
node_id,
 struct rte_tm_error *error);
 static int i40e_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
struct rte_tm_error *error);
+static int i40e_node_type_get(struct rte_eth_dev *dev, uint32_t node_id,
+ int *is_leaf, struct rte_tm_error *error);
 
 const struct rte_tm_ops i40e_tm_ops = {
.capabilities_get = i40e_tm_capabilities_get,
@@ -59,6 +61,7 @@ static int i40e_node_delete(struct rte_eth_dev *dev, uint32_t 
node_id,
.shaper_profile_delete = i40e_shaper_profile_del,
.node_add = i40e_node_add,
.node_delete = i40e_node_delete,
+   .node_type_get = i40e_node_type_get,
 };
 
 int
@@ -549,3 +552,35 @@ static int i40e_node_delete(struct rte_eth_dev *dev, 
uint32_t node_id,
 
return 0;
 }
+
+static int
+i40e_node_type_get(struct rte_eth_dev *dev, uint32_t node_id,
+  int *is_leaf, struct rte_tm_error *error)
+{
+   enum i40e_tm_node_type node_type;
+   struct i40e_tm_node *tm_node;
+
+   if (!is_leaf || !error)
+   return -EINVAL;
+
+   if (node_id == RTE_TM_NODE_ID_NULL) {
+   error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+   error->message = "invalid node id";
+   return -EINVAL;
+   }
+
+   /* check if the node id exists */
+   tm_node = i40e_tm_node_search(dev, node_id, &node_type);
+   if (!tm_node) {
+   error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+   error->message = "no such node";
+   return -EINVAL;
+   }
+
+   if (tm_node->reference_count)
+   *is_leaf = false;
+   else
+   *is_leaf = true;
+
+   return 0;
+}
-- 
1.9.3



[dpdk-dev] [PATCH 08/20] net/i40e: support getting TM level capability

2017-05-27 Thread Wenzhuo Lu
Add the support of the Traffic Management API,
rte_tm_level_capabilities_get.

Signed-off-by: Wenzhuo Lu 
---
 drivers/net/i40e/i40e_tm.c | 67 ++
 1 file changed, 67 insertions(+)

diff --git a/drivers/net/i40e/i40e_tm.c b/drivers/net/i40e/i40e_tm.c
index 899e88e..70e9b78 100644
--- a/drivers/net/i40e/i40e_tm.c
+++ b/drivers/net/i40e/i40e_tm.c
@@ -54,6 +54,10 @@ static int i40e_node_delete(struct rte_eth_dev *dev, 
uint32_t node_id,
struct rte_tm_error *error);
 static int i40e_node_type_get(struct rte_eth_dev *dev, uint32_t node_id,
  int *is_leaf, struct rte_tm_error *error);
+static int i40e_level_capabilities_get(struct rte_eth_dev *dev,
+  uint32_t level_id,
+  struct rte_tm_level_capabilities *cap,
+  struct rte_tm_error *error);
 
 const struct rte_tm_ops i40e_tm_ops = {
.capabilities_get = i40e_tm_capabilities_get,
@@ -62,6 +66,7 @@ static int i40e_node_type_get(struct rte_eth_dev *dev, 
uint32_t node_id,
.node_add = i40e_node_add,
.node_delete = i40e_node_delete,
.node_type_get = i40e_node_type_get,
+   .level_capabilities_get = i40e_level_capabilities_get,
 };
 
 int
@@ -584,3 +589,65 @@ static int i40e_node_type_get(struct rte_eth_dev *dev, 
uint32_t node_id,
 
return 0;
 }
+
+static int
+i40e_level_capabilities_get(struct rte_eth_dev *dev,
+   uint32_t level_id,
+   struct rte_tm_level_capabilities *cap,
+   struct rte_tm_error *error)
+{
+   uint16_t nb_tc = 0;
+
+   if (!cap || !error)
+   return -EINVAL;
+
+   if (level_id >= I40E_TM_NODE_TYPE_MAX) {
+   error->type = RTE_TM_ERROR_TYPE_LEVEL_ID;
+   error->message = "too deep level";
+   return -EINVAL;
+   }
+
+   nb_tc = i40e_tc_nb_get(dev);
+
+   /* root node */
+   if (level_id == I40E_TM_NODE_TYPE_PORT) {
+   cap->n_nodes_max = 1;
+   cap->n_nodes_nonleaf_max = 1;
+   cap->n_nodes_leaf_max = 0;
+   cap->non_leaf_nodes_identical = false;
+   cap->leaf_nodes_identical = false;
+   cap->nonleaf.shaper_private_supported = true;
+   cap->nonleaf.shaper_private_dual_rate_supported = false;
+   cap->nonleaf.shaper_private_rate_min = 0;
+   /* 40Gbps -> 5GBps */
+   cap->nonleaf.shaper_private_rate_max = 50ull;
+   cap->nonleaf.shaper_shared_n_max = 0;
+   cap->nonleaf.sched_n_children_max = nb_tc;
+   cap->nonleaf.sched_sp_n_priorities_max = 0;
+   cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
+   cap->nonleaf.sched_wfq_n_groups_max = 0;
+   cap->nonleaf.sched_wfq_weight_max = 0;
+   cap->nonleaf.stats_mask = 0;
+
+   return 0;
+   }
+
+   /* TC node */
+   cap->n_nodes_max = nb_tc;
+   cap->n_nodes_nonleaf_max = 0;
+   cap->n_nodes_leaf_max = nb_tc;
+   cap->non_leaf_nodes_identical = false;
+   cap->leaf_nodes_identical = true;
+   cap->leaf.shaper_private_supported = true;
+   cap->leaf.shaper_private_dual_rate_supported = false;
+   cap->leaf.shaper_private_rate_min = 0;
+   /* 40Gbps -> 5GBps */
+   cap->leaf.shaper_private_rate_max = 50ull;
+   cap->leaf.shaper_shared_n_max = 0;
+   cap->leaf.cman_head_drop_supported = false;
+   cap->leaf.cman_wred_context_private_supported = false;
+   cap->leaf.cman_wred_context_shared_n_max = 0;
+   cap->leaf.stats_mask = 0;
+
+   return 0;
+}
-- 
1.9.3



[dpdk-dev] [PATCH 09/20] net/i40e: support getting TM node capability

2017-05-27 Thread Wenzhuo Lu
Add the support of the Traffic Management API,
rte_tm_node_capabilities_get.

Signed-off-by: Wenzhuo Lu 
---
 drivers/net/i40e/i40e_tm.c | 57 ++
 1 file changed, 57 insertions(+)

diff --git a/drivers/net/i40e/i40e_tm.c b/drivers/net/i40e/i40e_tm.c
index 70e9b78..2d8217c 100644
--- a/drivers/net/i40e/i40e_tm.c
+++ b/drivers/net/i40e/i40e_tm.c
@@ -58,6 +58,10 @@ static int i40e_level_capabilities_get(struct rte_eth_dev 
*dev,
   uint32_t level_id,
   struct rte_tm_level_capabilities *cap,
   struct rte_tm_error *error);
+static int i40e_node_capabilities_get(struct rte_eth_dev *dev,
+ uint32_t node_id,
+ struct rte_tm_node_capabilities *cap,
+ struct rte_tm_error *error);
 
 const struct rte_tm_ops i40e_tm_ops = {
.capabilities_get = i40e_tm_capabilities_get,
@@ -67,6 +71,7 @@ static int i40e_level_capabilities_get(struct rte_eth_dev 
*dev,
.node_delete = i40e_node_delete,
.node_type_get = i40e_node_type_get,
.level_capabilities_get = i40e_level_capabilities_get,
+   .node_capabilities_get = i40e_node_capabilities_get,
 };
 
 int
@@ -651,3 +656,55 @@ static int i40e_level_capabilities_get(struct rte_eth_dev 
*dev,
 
return 0;
 }
+
+static int
+i40e_node_capabilities_get(struct rte_eth_dev *dev,
+  uint32_t node_id,
+  struct rte_tm_node_capabilities *cap,
+  struct rte_tm_error *error)
+{
+   enum i40e_tm_node_type node_type;
+   struct i40e_tm_node *tm_node;
+   uint16_t nb_tc = 0;
+
+   if (!cap || !error)
+   return -EINVAL;
+
+   if (node_id == RTE_TM_NODE_ID_NULL) {
+   error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+   error->message = "invalid node id";
+   return -EINVAL;
+   }
+
+   /* check if the node id exists */
+   tm_node = i40e_tm_node_search(dev, node_id, &node_type);
+   if (!tm_node) {
+   error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+   error->message = "no such node";
+   return -EINVAL;
+   }
+
+   cap->shaper_private_supported = true;
+   cap->shaper_private_dual_rate_supported = false;
+   cap->shaper_private_rate_min = 0;
+   /* 40Gbps -> 5GBps */
+   cap->shaper_private_rate_max = 50ull;
+   cap->shaper_shared_n_max = 0;
+
+   if (node_type == I40E_TM_NODE_TYPE_PORT) {
+   nb_tc = i40e_tc_nb_get(dev);
+   cap->nonleaf.sched_n_children_max = nb_tc;
+   cap->nonleaf.sched_sp_n_priorities_max = 0;
+   cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
+   cap->nonleaf.sched_wfq_n_groups_max = 0;
+   cap->nonleaf.sched_wfq_weight_max = 0;
+   } else {
+   cap->leaf.cman_head_drop_supported = false;
+   cap->leaf.cman_wred_context_private_supported = false;
+   cap->leaf.cman_wred_context_shared_n_max = 0;
+   }
+
+   cap->stats_mask = 0;
+
+   return 0;
+}
-- 
1.9.3



[dpdk-dev] [PATCH 11/20] net/ixgbe: support getting TM ops

2017-05-27 Thread Wenzhuo Lu
To support QoS scheduler APIs, create a new C file for
the TM (Traffic Management) ops but without any function
implemented.

Signed-off-by: Wenzhuo Lu 
---
 drivers/net/ixgbe/Makefile   |  1 +
 drivers/net/ixgbe/ixgbe_ethdev.c |  1 +
 drivers/net/ixgbe/ixgbe_ethdev.h |  2 ++
 drivers/net/ixgbe/ixgbe_tm.c | 50 
 4 files changed, 54 insertions(+)
 create mode 100644 drivers/net/ixgbe/ixgbe_tm.c

diff --git a/drivers/net/ixgbe/Makefile b/drivers/net/ixgbe/Makefile
index 5529d81..0595dcf 100644
--- a/drivers/net/ixgbe/Makefile
+++ b/drivers/net/ixgbe/Makefile
@@ -124,6 +124,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_bypass.c
 SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_82599_bypass.c
 endif
 SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += rte_pmd_ixgbe.c
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_tm.c
 
 # install this header file
 SYMLINK-$(CONFIG_RTE_LIBRTE_IXGBE_PMD)-include := rte_pmd_ixgbe.h
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index 2083cde..4433590 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -608,6 +608,7 @@ static int ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev 
*dev,
.l2_tunnel_offload_set   = ixgbe_dev_l2_tunnel_offload_set,
.udp_tunnel_port_add  = ixgbe_dev_udp_tunnel_port_add,
.udp_tunnel_port_del  = ixgbe_dev_udp_tunnel_port_del,
+   .tm_ops_get   = ixgbe_tm_ops_get,
 };
 
 /*
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h
index b576a6f..7e99fd3 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.h
+++ b/drivers/net/ixgbe/ixgbe_ethdev.h
@@ -39,6 +39,7 @@
 #include "ixgbe_bypass.h"
 #include 
 #include 
+#include 
 
 /* need update link, bit flag */
 #define IXGBE_FLAG_NEED_LINK_UPDATE (uint32_t)(1 << 0)
@@ -671,6 +672,7 @@ int ixgbe_fdir_ctrl_func(struct rte_eth_dev *dev,
 int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
uint16_t tx_rate, uint64_t q_msk);
 bool is_ixgbe_supported(struct rte_eth_dev *dev);
+int ixgbe_tm_ops_get(struct rte_eth_dev *dev, void *ops);
 
 static inline int
 ixgbe_ethertype_filter_lookup(struct ixgbe_filter_info *filter_info,
diff --git a/drivers/net/ixgbe/ixgbe_tm.c b/drivers/net/ixgbe/ixgbe_tm.c
new file mode 100644
index 000..0a222a1
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_tm.c
@@ -0,0 +1,50 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in
+ *   the documentation and/or other materials provided with the
+ *   distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ *   contributors may be used to endorse or promote products derived
+ *   from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "ixgbe_ethdev.h"
+
+const struct rte_tm_ops ixgbe_tm_ops = {
+   NULL,
+};
+
+int
+ixgbe_tm_ops_get(struct rte_eth_dev *dev __rte_unused,
+void *arg)
+{
+   if (!arg)
+   return -EINVAL;
+
+   *(const void **)arg = &ixgbe_tm_ops;
+
+   return 0;
+}
-- 
1.9.3



[dpdk-dev] [PATCH 10/20] net/i40e: support committing TM hierarchy

2017-05-27 Thread Wenzhuo Lu
Add the support of the Traffic Management API,
rte_tm_hierarchy_commit.
When calling this API, the driver tries to enable
the TM configuration on HW.

Signed-off-by: Wenzhuo Lu 
---
 drivers/net/i40e/i40e_ethdev.h  |   9 
 drivers/net/i40e/i40e_tm.c  | 105 
 drivers/net/i40e/rte_pmd_i40e.c |   9 
 3 files changed, 114 insertions(+), 9 deletions(-)

diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 34ba3e5..741cf92 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -252,6 +252,15 @@ enum i40e_flxpld_layer_idx {
I40E_INSET_FLEX_PAYLOAD_W5 | I40E_INSET_FLEX_PAYLOAD_W6 | \
I40E_INSET_FLEX_PAYLOAD_W7 | I40E_INSET_FLEX_PAYLOAD_W8)
 
+/* The max bandwidth of i40e is 40Gbps. */
+#define I40E_QOS_BW_MAX 4
+/* The bandwidth should be the multiple of 50Mbps. */
+#define I40E_QOS_BW_GRANULARITY 50
+/* The min bandwidth weight is 1. */
+#define I40E_QOS_BW_WEIGHT_MIN 1
+/* The max bandwidth weight is 127. */
+#define I40E_QOS_BW_WEIGHT_MAX 127
+
 /**
  * The overhead from MTU to max frame size.
  * Considering QinQ packet, the VLAN tag needs to be counted twice.
diff --git a/drivers/net/i40e/i40e_tm.c b/drivers/net/i40e/i40e_tm.c
index 2d8217c..a9c5900 100644
--- a/drivers/net/i40e/i40e_tm.c
+++ b/drivers/net/i40e/i40e_tm.c
@@ -62,6 +62,9 @@ static int i40e_node_capabilities_get(struct rte_eth_dev *dev,
  uint32_t node_id,
  struct rte_tm_node_capabilities *cap,
  struct rte_tm_error *error);
+static int i40e_hierarchy_commit(struct rte_eth_dev *dev,
+int clear_on_fail,
+struct rte_tm_error *error);
 
 const struct rte_tm_ops i40e_tm_ops = {
.capabilities_get = i40e_tm_capabilities_get,
@@ -72,6 +75,7 @@ static int i40e_node_capabilities_get(struct rte_eth_dev *dev,
.node_type_get = i40e_node_type_get,
.level_capabilities_get = i40e_level_capabilities_get,
.node_capabilities_get = i40e_node_capabilities_get,
+   .hierarchy_commit = i40e_hierarchy_commit,
 };
 
 int
@@ -708,3 +712,104 @@ static int i40e_node_capabilities_get(struct rte_eth_dev 
*dev,
 
return 0;
 }
+
+static int
+i40e_hierarchy_commit(struct rte_eth_dev *dev,
+ int clear_on_fail,
+ struct rte_tm_error *error)
+{
+   struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+   struct i40e_tm_node_list *tc_list = &pf->tm_conf.tc_list;
+   struct i40e_tm_node *tm_node;
+   struct i40e_vsi *vsi;
+   struct i40e_hw *hw;
+   struct i40e_aqc_configure_vsi_ets_sla_bw_data tc_bw;
+   uint64_t bw;
+   uint8_t tc_map;
+   int ret;
+   int i;
+
+   if (!error)
+   return -EINVAL;
+
+   /* check the setting */
+   if (!pf->tm_conf.root)
+   return 0;
+
+   vsi = pf->main_vsi;
+   hw = I40E_VSI_TO_HW(vsi);
+
+   /**
+* Don't support bandwidth control for port and TCs in parallel.
+* If the port has a max bandwidth, the TCs should have none.
+*/
+   /* port */
+   bw = pf->tm_conf.root->shaper_profile->profile.peak.rate;
+   if (bw) {
+   /* check if any TC has a max bandwidth */
+   TAILQ_FOREACH(tm_node, tc_list, node) {
+   if (tm_node->shaper_profile->profile.peak.rate) {
+   error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
+   error->message = "no port and TC max bandwidth"
+" in parallel";
+   goto fail_clear;
+   }
+   }
+
+   /* interpret Bps to 50Mbps */
+   bw = bw * 8 / 1000 / 1000 / I40E_QOS_BW_GRANULARITY;
+
+   /* set the max bandwidth */
+   ret = i40e_aq_config_vsi_bw_limit(hw, vsi->seid,
+ (uint16_t)bw, 0, NULL);
+   if (ret) {
+   error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
+   error->message = "fail to set port max bandwidth";
+   goto fail_clear;
+   }
+
+   return 0;
+   }
+
+   /* TC */
+   memset(&tc_bw, 0, sizeof(tc_bw));
+   tc_bw.tc_valid_bits = vsi->enabled_tc;
+   tc_map = vsi->enabled_tc;
+   TAILQ_FOREACH(tm_node, tc_list, node) {
+   i = 0;
+   while (i < I40E_MAX_TRAFFIC_CLASS && !(tc_map & BIT_ULL(i)))
+   i++;
+   if (i >= I40E_MAX_TRAFFIC_CLASS) {
+   error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
+   error->message = "cannot find the TC";
+   goto fail_clear;
+   }
+

[dpdk-dev] [PATCH 12/20] net/ixgbe: support getting TM capability

2017-05-27 Thread Wenzhuo Lu
Add the support of the Traffic Management API,
rte_tm_capabilities_get.

Signed-off-by: Wenzhuo Lu 
---
 drivers/net/ixgbe/ixgbe_tm.c | 90 +++-
 1 file changed, 89 insertions(+), 1 deletion(-)

diff --git a/drivers/net/ixgbe/ixgbe_tm.c b/drivers/net/ixgbe/ixgbe_tm.c
index 0a222a1..77066b7 100644
--- a/drivers/net/ixgbe/ixgbe_tm.c
+++ b/drivers/net/ixgbe/ixgbe_tm.c
@@ -33,8 +33,12 @@
 
 #include "ixgbe_ethdev.h"
 
+static int ixgbe_tm_capabilities_get(struct rte_eth_dev *dev,
+struct rte_tm_capabilities *cap,
+struct rte_tm_error *error);
+
 const struct rte_tm_ops ixgbe_tm_ops = {
-   NULL,
+   .capabilities_get = ixgbe_tm_capabilities_get,
 };
 
 int
@@ -48,3 +52,87 @@
 
return 0;
 }
+
+static inline uint8_t
+ixgbe_tc_nb_get(struct rte_eth_dev *dev)
+{
+   struct rte_eth_conf *eth_conf;
+   uint8_t nb_tcs = 0;
+
+   eth_conf = &dev->data->dev_conf;
+   if (eth_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
+   nb_tcs = eth_conf->tx_adv_conf.dcb_tx_conf.nb_tcs;
+   } else if (eth_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
+   if (eth_conf->tx_adv_conf.vmdq_dcb_tx_conf.nb_queue_pools ==
+   ETH_32_POOLS)
+   nb_tcs = ETH_4_TCS;
+   else
+   nb_tcs = ETH_8_TCS;
+   } else {
+   nb_tcs = 1;
+   }
+
+   return nb_tcs;
+}
+
+static int
+ixgbe_tm_capabilities_get(struct rte_eth_dev *dev,
+ struct rte_tm_capabilities *cap,
+ struct rte_tm_error *error)
+{
+   uint8_t nb_tcs;
+   uint8_t nb_queues;
+
+   if (!cap || !error)
+   return -EINVAL;
+
+   error->type = RTE_TM_ERROR_TYPE_NONE;
+
+   /* set all the parameters to 0 first. */
+   memset(cap, 0, sizeof(struct rte_tm_capabilities));
+
+   nb_tcs = ixgbe_tc_nb_get(dev);
+   nb_queues = dev->data->nb_tx_queues;
+   /* port + TCs + queues */
+   cap->n_nodes_max = 1 + nb_tcs + nb_queues;
+   cap->n_levels_max = 3;
+   cap->non_leaf_nodes_identical = 0;
+   cap->leaf_nodes_identical = 0;
+   cap->shaper_n_max = cap->n_nodes_max;
+   cap->shaper_private_n_max = cap->n_nodes_max;
+   cap->shaper_private_dual_rate_n_max = 0;
+   cap->shaper_private_rate_min = 0;
+   /* 10Gbps -> 1.25GBps */
+   cap->shaper_private_rate_max = 125000ull;
+   cap->shaper_shared_n_max = 0;
+   cap->shaper_shared_n_nodes_per_shaper_max = 0;
+   cap->shaper_shared_n_shapers_per_node_max = 0;
+   cap->shaper_shared_dual_rate_n_max = 0;
+   cap->shaper_shared_rate_min = 0;
+   cap->shaper_shared_rate_max = 0;
+   cap->sched_n_children_max = (nb_tcs > nb_queues) ? nb_tcs : nb_queues;
+   cap->sched_sp_n_priorities_max = 0;
+   cap->sched_wfq_n_children_per_group_max = 0;
+   cap->sched_wfq_n_groups_max = 0;
+   cap->sched_wfq_weight_max = 0;
+   cap->cman_head_drop_supported = 0;
+   cap->dynamic_update_mask = 0;
+
+   /**
+* not supported parameters are 0, below,
+* shaper_pkt_length_adjust_min
+* shaper_pkt_length_adjust_max
+* cman_wred_context_n_max
+* cman_wred_context_private_n_max
+* cman_wred_context_shared_n_max
+* cman_wred_context_shared_n_nodes_per_context_max
+* cman_wred_context_shared_n_contexts_per_node_max
+* mark_vlan_dei_supported
+* mark_ip_ecn_tcp_supported
+* mark_ip_ecn_sctp_supported
+* mark_ip_dscp_supported
+* stats_mask
+*/
+
+   return 0;
+}
-- 
1.9.3



[dpdk-dev] [PATCH 14/20] net/ixgbe: support deleting TM shaper profile

2017-05-27 Thread Wenzhuo Lu
Add the support of the Traffic Management API,
rte_tm_shaper_profile_delete.

Signed-off-by: Wenzhuo Lu 
---
 drivers/net/ixgbe/ixgbe_tm.c | 37 +
 1 file changed, 37 insertions(+)

diff --git a/drivers/net/ixgbe/ixgbe_tm.c b/drivers/net/ixgbe/ixgbe_tm.c
index 89e795a..b3b1acf 100644
--- a/drivers/net/ixgbe/ixgbe_tm.c
+++ b/drivers/net/ixgbe/ixgbe_tm.c
@@ -42,10 +42,14 @@ static int ixgbe_shaper_profile_add(struct rte_eth_dev *dev,
uint32_t shaper_profile_id,
struct rte_tm_shaper_params *profile,
struct rte_tm_error *error);
+static int ixgbe_shaper_profile_del(struct rte_eth_dev *dev,
+   uint32_t shaper_profile_id,
+   struct rte_tm_error *error);
 
 const struct rte_tm_ops ixgbe_tm_ops = {
.capabilities_get = ixgbe_tm_capabilities_get,
.shaper_profile_add = ixgbe_shaper_profile_add,
+   .shaper_profile_delete = ixgbe_shaper_profile_del,
 };
 
 int
@@ -247,3 +251,36 @@ static int ixgbe_shaper_profile_add(struct rte_eth_dev 
*dev,
 
return 0;
 }
+
+static int
+ixgbe_shaper_profile_del(struct rte_eth_dev *dev,
+uint32_t shaper_profile_id,
+struct rte_tm_error *error)
+{
+   struct ixgbe_tm_conf *tm_conf =
+   IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
+   struct ixgbe_tm_shaper_profile *shaper_profile;
+
+   if (!error)
+   return -EINVAL;
+
+   shaper_profile = ixgbe_shaper_profile_search(dev, shaper_profile_id);
+
+   if (!shaper_profile) {
+   error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
+   error->message = "profile ID not exist";
+   return -EINVAL;
+   }
+
+   /* don't delete a profile if it's used by one or several nodes */
+   if (shaper_profile->reference_count) {
+   error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
+   error->message = "profile in use";
+   return -EINVAL;
+   }
+
+   TAILQ_REMOVE(&tm_conf->shaper_profile_list, shaper_profile, node);
+   rte_free(shaper_profile);
+
+   return 0;
+}
-- 
1.9.3



[dpdk-dev] [PATCH 15/20] net/ixgbe: support adding TM node

2017-05-27 Thread Wenzhuo Lu
Add the support of the Traffic Management API,
rte_tm_node_add.

Signed-off-by: Wenzhuo Lu 
---
 drivers/net/ixgbe/ixgbe_ethdev.h |  35 ++
 drivers/net/ixgbe/ixgbe_tm.c | 259 +++
 2 files changed, 294 insertions(+)

diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h
index b647702..ccde335 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.h
+++ b/drivers/net/ixgbe/ixgbe_ethdev.h
@@ -445,9 +445,44 @@ struct ixgbe_tm_shaper_profile {
 
 TAILQ_HEAD(ixgbe_shaper_profile_list, ixgbe_tm_shaper_profile);
 
+/* node type of Traffic Manager */
+enum ixgbe_tm_node_type {
+   IXGBE_TM_NODE_TYPE_PORT,
+   IXGBE_TM_NODE_TYPE_TC,
+   IXGBE_TM_NODE_TYPE_QUEUE,
+   IXGBE_TM_NODE_TYPE_MAX,
+};
+
+/* Struct to store Traffic Manager node configuration. */
+struct ixgbe_tm_node {
+   TAILQ_ENTRY(ixgbe_tm_node) node;
+   uint32_t id;
+   uint32_t priority;
+   uint32_t weight;
+   uint32_t reference_count;
+   struct ixgbe_tm_node *parent;
+   struct ixgbe_tm_shaper_profile *shaper_profile;
+   struct rte_tm_node_params params;
+};
+
+TAILQ_HEAD(ixgbe_tm_node_list, ixgbe_tm_node);
+
 /* The configuration of Traffic Manager */
 struct ixgbe_tm_conf {
struct ixgbe_shaper_profile_list shaper_profile_list;
+   struct ixgbe_tm_node *root; /* root node - port */
+   struct ixgbe_tm_node_list tc_list; /* node list for all the TCs */
+   struct ixgbe_tm_node_list queue_list; /* node list for all the queues */
+   /**
+* The number of added TC nodes.
+* It should be no more than the TC number of this port.
+*/
+   uint32_t nb_tc_node;
+   /**
+* The number of added queue nodes.
+* It should be no more than the queue number of this port.
+*/
+   uint32_t nb_queue_node;
 };
 
 /*
diff --git a/drivers/net/ixgbe/ixgbe_tm.c b/drivers/net/ixgbe/ixgbe_tm.c
index b3b1acf..16e8f89 100644
--- a/drivers/net/ixgbe/ixgbe_tm.c
+++ b/drivers/net/ixgbe/ixgbe_tm.c
@@ -45,11 +45,16 @@ static int ixgbe_shaper_profile_add(struct rte_eth_dev *dev,
 static int ixgbe_shaper_profile_del(struct rte_eth_dev *dev,
uint32_t shaper_profile_id,
struct rte_tm_error *error);
+static int ixgbe_node_add(struct rte_eth_dev *dev, uint32_t node_id,
+ uint32_t parent_node_id, uint32_t priority,
+ uint32_t weight, struct rte_tm_node_params *params,
+ struct rte_tm_error *error);
 
 const struct rte_tm_ops ixgbe_tm_ops = {
.capabilities_get = ixgbe_tm_capabilities_get,
.shaper_profile_add = ixgbe_shaper_profile_add,
.shaper_profile_delete = ixgbe_shaper_profile_del,
+   .node_add = ixgbe_node_add,
 };
 
 int
@@ -72,6 +77,13 @@ static int ixgbe_shaper_profile_del(struct rte_eth_dev *dev,
 
/* initialize shaper profile list */
TAILQ_INIT(&tm_conf->shaper_profile_list);
+
+   /* initialize node configuration */
+   tm_conf->root = NULL;
+   TAILQ_INIT(&tm_conf->queue_list);
+   TAILQ_INIT(&tm_conf->tc_list);
+   tm_conf->nb_tc_node = 0;
+   tm_conf->nb_queue_node = 0;
 }
 
 void
@@ -80,6 +92,23 @@ static int ixgbe_shaper_profile_del(struct rte_eth_dev *dev,
struct ixgbe_tm_conf *tm_conf =
IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
struct ixgbe_tm_shaper_profile *shaper_profile;
+   struct ixgbe_tm_node *tm_node;
+
+   /* clear node configuration */
+   while ((tm_node = TAILQ_FIRST(&tm_conf->queue_list))) {
+   TAILQ_REMOVE(&tm_conf->queue_list, tm_node, node);
+   rte_free(tm_node);
+   }
+   tm_conf->nb_queue_node = 0;
+   while ((tm_node = TAILQ_FIRST(&tm_conf->tc_list))) {
+   TAILQ_REMOVE(&tm_conf->tc_list, tm_node, node);
+   rte_free(tm_node);
+   }
+   tm_conf->nb_tc_node = 0;
+   if (tm_conf->root) {
+   rte_free(tm_conf->root);
+   tm_conf->root = NULL;
+   }
 
/* Remove all shaper profiles */
while ((shaper_profile =
@@ -284,3 +313,233 @@ static int ixgbe_shaper_profile_del(struct rte_eth_dev 
*dev,
 
return 0;
 }
+
+static inline struct ixgbe_tm_node *
+ixgbe_tm_node_search(struct rte_eth_dev *dev,
+uint32_t node_id, enum ixgbe_tm_node_type *node_type)
+{
+   struct ixgbe_tm_conf *tm_conf =
+   IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
+   struct ixgbe_tm_node *tm_node;
+
+   if (tm_conf->root && tm_conf->root->id == node_id) {
+   *node_type = IXGBE_TM_NODE_TYPE_PORT;
+   return tm_conf->root;
+   }
+
+   TAILQ_FOREACH(tm_node, &tm_conf->tc_list, node) {
+   if (tm_node->id == node_id) {
+   *node_type = IXGBE_TM_NODE_TYPE_TC;
+   return tm_node;
+ 

[dpdk-dev] [PATCH 13/20] net/ixgbe: support adding TM shaper profile

2017-05-27 Thread Wenzhuo Lu
Add the support of the Traffic Management API,
rte_tm_shaper_profile_add.

Signed-off-by: Wenzhuo Lu 
---
 drivers/net/ixgbe/ixgbe_ethdev.c |   6 +++
 drivers/net/ixgbe/ixgbe_ethdev.h |  21 
 drivers/net/ixgbe/ixgbe_tm.c | 111 +++
 3 files changed, 138 insertions(+)

diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index 4433590..d339fc4 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -1360,6 +1360,9 @@ struct rte_ixgbe_xstats_name_off {
/* initialize bandwidth configuration info */
memset(bw_conf, 0, sizeof(struct ixgbe_bw_conf));
 
+   /* initialize Traffic Manager configuration */
+   ixgbe_tm_conf_init(eth_dev);
+
return 0;
 }
 
@@ -1413,6 +1416,9 @@ struct rte_ixgbe_xstats_name_off {
/* clear all the filters list */
ixgbe_filterlist_flush();
 
+   /* Remove all Traffic Manager configuration */
+   ixgbe_tm_conf_uninit(eth_dev);
+
return 0;
 }
 
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h
index 7e99fd3..b647702 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.h
+++ b/drivers/net/ixgbe/ixgbe_ethdev.h
@@ -435,6 +435,21 @@ struct ixgbe_bw_conf {
uint8_t tc_num; /* Number of TCs. */
 };
 
+/* Struct to store Traffic Manager shaper profile. */
+struct ixgbe_tm_shaper_profile {
+   TAILQ_ENTRY(ixgbe_tm_shaper_profile) node;
+   uint32_t shaper_profile_id;
+   uint32_t reference_count;
+   struct rte_tm_shaper_params profile;
+};
+
+TAILQ_HEAD(ixgbe_shaper_profile_list, ixgbe_tm_shaper_profile);
+
+/* The configuration of Traffic Manager */
+struct ixgbe_tm_conf {
+   struct ixgbe_shaper_profile_list shaper_profile_list;
+};
+
 /*
  * Structure to store private data for each driver instance (for each port).
  */
@@ -463,6 +478,7 @@ struct ixgbe_adapter {
struct rte_timecounter  systime_tc;
struct rte_timecounter  rx_tstamp_tc;
struct rte_timecounter  tx_tstamp_tc;
+   struct ixgbe_tm_conftm_conf;
 };
 
 #define IXGBE_DEV_TO_PCI(eth_dev) \
@@ -513,6 +529,9 @@ struct ixgbe_adapter {
 #define IXGBE_DEV_PRIVATE_TO_BW_CONF(adapter) \
(&((struct ixgbe_adapter *)adapter)->bw_conf)
 
+#define IXGBE_DEV_PRIVATE_TO_TM_CONF(adapter) \
+   (&((struct ixgbe_adapter *)adapter)->tm_conf)
+
 /*
  * RX/TX function prototypes
  */
@@ -673,6 +692,8 @@ int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, 
uint16_t vf,
uint16_t tx_rate, uint64_t q_msk);
 bool is_ixgbe_supported(struct rte_eth_dev *dev);
 int ixgbe_tm_ops_get(struct rte_eth_dev *dev, void *ops);
+void ixgbe_tm_conf_init(struct rte_eth_dev *dev);
+void ixgbe_tm_conf_uninit(struct rte_eth_dev *dev);
 
 static inline int
 ixgbe_ethertype_filter_lookup(struct ixgbe_filter_info *filter_info,
diff --git a/drivers/net/ixgbe/ixgbe_tm.c b/drivers/net/ixgbe/ixgbe_tm.c
index 77066b7..89e795a 100644
--- a/drivers/net/ixgbe/ixgbe_tm.c
+++ b/drivers/net/ixgbe/ixgbe_tm.c
@@ -31,14 +31,21 @@
  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
+#include 
+
 #include "ixgbe_ethdev.h"
 
 static int ixgbe_tm_capabilities_get(struct rte_eth_dev *dev,
 struct rte_tm_capabilities *cap,
 struct rte_tm_error *error);
+static int ixgbe_shaper_profile_add(struct rte_eth_dev *dev,
+   uint32_t shaper_profile_id,
+   struct rte_tm_shaper_params *profile,
+   struct rte_tm_error *error);
 
 const struct rte_tm_ops ixgbe_tm_ops = {
.capabilities_get = ixgbe_tm_capabilities_get,
+   .shaper_profile_add = ixgbe_shaper_profile_add,
 };
 
 int
@@ -53,6 +60,32 @@ static int ixgbe_tm_capabilities_get(struct rte_eth_dev *dev,
return 0;
 }
 
+void
+ixgbe_tm_conf_init(struct rte_eth_dev *dev)
+{
+   struct ixgbe_tm_conf *tm_conf =
+   IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
+
+   /* initialize shaper profile list */
+   TAILQ_INIT(&tm_conf->shaper_profile_list);
+}
+
+void
+ixgbe_tm_conf_uninit(struct rte_eth_dev *dev)
+{
+   struct ixgbe_tm_conf *tm_conf =
+   IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
+   struct ixgbe_tm_shaper_profile *shaper_profile;
+
+   /* Remove all shaper profiles */
+   while ((shaper_profile =
+  TAILQ_FIRST(&tm_conf->shaper_profile_list))) {
+   TAILQ_REMOVE(&tm_conf->shaper_profile_list,
+shaper_profile, node);
+   rte_free(shaper_profile);
+   }
+}
+
 static inline uint8_t
 ixgbe_tc_nb_get(struct rte_eth_dev *dev)
 {
@@ -136,3 +169,81 @@ static int ixgbe_tm_capabilities_get(struct rte_eth_dev 
*dev,
 
return 0;
 }
+
+static inline struct ixgbe_tm_shaper_profile *
+ixgbe_shaper_profil

[dpdk-dev] [PATCH 16/20] net/ixgbe: support deleting TM node

2017-05-27 Thread Wenzhuo Lu
Add the support of the Traffic Management API,
rte_tm_node_delete.

Signed-off-by: Wenzhuo Lu 
---
 drivers/net/ixgbe/ixgbe_tm.c | 60 
 1 file changed, 60 insertions(+)

diff --git a/drivers/net/ixgbe/ixgbe_tm.c b/drivers/net/ixgbe/ixgbe_tm.c
index 16e8f89..39ec272 100644
--- a/drivers/net/ixgbe/ixgbe_tm.c
+++ b/drivers/net/ixgbe/ixgbe_tm.c
@@ -49,12 +49,15 @@ static int ixgbe_node_add(struct rte_eth_dev *dev, uint32_t 
node_id,
  uint32_t parent_node_id, uint32_t priority,
  uint32_t weight, struct rte_tm_node_params *params,
  struct rte_tm_error *error);
+static int ixgbe_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
+struct rte_tm_error *error);
 
 const struct rte_tm_ops ixgbe_tm_ops = {
.capabilities_get = ixgbe_tm_capabilities_get,
.shaper_profile_add = ixgbe_shaper_profile_add,
.shaper_profile_delete = ixgbe_shaper_profile_del,
.node_add = ixgbe_node_add,
+   .node_delete = ixgbe_node_delete,
 };
 
 int
@@ -543,3 +546,60 @@ static int ixgbe_node_add(struct rte_eth_dev *dev, 
uint32_t node_id,
 
return 0;
 }
+
+static int
+ixgbe_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
+ struct rte_tm_error *error)
+{
+   struct ixgbe_tm_conf *tm_conf =
+   IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
+   enum ixgbe_tm_node_type node_type = IXGBE_TM_NODE_TYPE_MAX;
+   struct ixgbe_tm_node *tm_node;
+
+   if (!error)
+   return -EINVAL;
+
+   if (node_id == RTE_TM_NODE_ID_NULL) {
+   error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+   error->message = "invalid node id";
+   return -EINVAL;
+   }
+
+   /* check the if the node id exists */
+   tm_node = ixgbe_tm_node_search(dev, node_id, &node_type);
+   if (!tm_node) {
+   error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+   error->message = "no such node";
+   return -EINVAL;
+   }
+
+   /* the node should have no child */
+   if (tm_node->reference_count) {
+   error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+   error->message =
+   "cannot delete a node which has children";
+   return -EINVAL;
+   }
+
+   /* root node */
+   if (node_type == IXGBE_TM_NODE_TYPE_PORT) {
+   tm_node->shaper_profile->reference_count--;
+   rte_free(tm_node);
+   tm_conf->root = NULL;
+   return 0;
+   }
+
+   /* TC or queue node */
+   tm_node->shaper_profile->reference_count--;
+   tm_node->parent->reference_count--;
+   if (node_type == IXGBE_TM_NODE_TYPE_TC) {
+   TAILQ_REMOVE(&tm_conf->tc_list, tm_node, node);
+   tm_conf->nb_tc_node--;
+   } else {
+   TAILQ_REMOVE(&tm_conf->queue_list, tm_node, node);
+   tm_conf->nb_queue_node--;
+   }
+   rte_free(tm_node);
+
+   return 0;
+}
-- 
1.9.3



[dpdk-dev] [PATCH 18/20] net/ixgbe: support getting TM level capability

2017-05-27 Thread Wenzhuo Lu
Add the support of the Traffic Management API,
rte_tm_level_capabilities_get.

Signed-off-by: Wenzhuo Lu 
---
 drivers/net/ixgbe/ixgbe_tm.c | 78 
 1 file changed, 78 insertions(+)

diff --git a/drivers/net/ixgbe/ixgbe_tm.c b/drivers/net/ixgbe/ixgbe_tm.c
index 68b26cc..4a9947d 100644
--- a/drivers/net/ixgbe/ixgbe_tm.c
+++ b/drivers/net/ixgbe/ixgbe_tm.c
@@ -53,6 +53,10 @@ static int ixgbe_node_delete(struct rte_eth_dev *dev, 
uint32_t node_id,
 struct rte_tm_error *error);
 static int ixgbe_node_type_get(struct rte_eth_dev *dev, uint32_t node_id,
   int *is_leaf, struct rte_tm_error *error);
+static int ixgbe_level_capabilities_get(struct rte_eth_dev *dev,
+   uint32_t level_id,
+   struct rte_tm_level_capabilities *cap,
+   struct rte_tm_error *error);
 
 const struct rte_tm_ops ixgbe_tm_ops = {
.capabilities_get = ixgbe_tm_capabilities_get,
@@ -61,6 +65,7 @@ static int ixgbe_node_type_get(struct rte_eth_dev *dev, 
uint32_t node_id,
.node_add = ixgbe_node_add,
.node_delete = ixgbe_node_delete,
.node_type_get = ixgbe_node_type_get,
+   .level_capabilities_get = ixgbe_level_capabilities_get,
 };
 
 int
@@ -638,3 +643,76 @@ static int ixgbe_node_type_get(struct rte_eth_dev *dev, 
uint32_t node_id,
 
return 0;
 }
+
+static int
+ixgbe_level_capabilities_get(struct rte_eth_dev *dev,
+uint32_t level_id,
+struct rte_tm_level_capabilities *cap,
+struct rte_tm_error *error)
+{
+   uint8_t nb_tc = 0;
+   uint8_t nb_queue = 0;
+
+   if (!cap || !error)
+   return -EINVAL;
+
+   if (level_id >= IXGBE_TM_NODE_TYPE_MAX) {
+   error->type = RTE_TM_ERROR_TYPE_LEVEL_ID;
+   error->message = "too deep level";
+   return -EINVAL;
+   }
+
+   nb_tc = ixgbe_tc_nb_get(dev);
+   nb_queue = dev->data->nb_tx_queues;
+
+   /* root node */
+   if (level_id == IXGBE_TM_NODE_TYPE_PORT) {
+   cap->n_nodes_max = 1;
+   cap->n_nodes_nonleaf_max = 1;
+   cap->n_nodes_leaf_max = 0;
+   cap->non_leaf_nodes_identical = false;
+   cap->leaf_nodes_identical = false;
+   cap->nonleaf.shaper_private_supported = true;
+   cap->nonleaf.shaper_private_dual_rate_supported = false;
+   cap->nonleaf.shaper_private_rate_min = 0;
+   /* 10Gbps -> 1.25GBps */
+   cap->nonleaf.shaper_private_rate_max = 125000ull;
+   cap->nonleaf.shaper_shared_n_max = 0;
+   cap->nonleaf.sched_n_children_max = nb_tc;
+   cap->nonleaf.sched_sp_n_priorities_max = 0;
+   cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
+   cap->nonleaf.sched_wfq_n_groups_max = 0;
+   cap->nonleaf.sched_wfq_weight_max = 0;
+   cap->nonleaf.stats_mask = 0;
+
+   return 0;
+   }
+
+   /* TC or queue node */
+   if (level_id == IXGBE_TM_NODE_TYPE_TC) {
+   /* TC */
+   cap->n_nodes_max = nb_tc;
+   cap->n_nodes_nonleaf_max = nb_tc;
+   cap->n_nodes_leaf_max = nb_tc;
+   cap->non_leaf_nodes_identical = true;
+   } else {
+   /* queue */
+   cap->n_nodes_max = nb_queue;
+   cap->n_nodes_nonleaf_max = 0;
+   cap->n_nodes_leaf_max = nb_queue;
+   cap->non_leaf_nodes_identical = false;
+   }
+   cap->leaf_nodes_identical = true;
+   cap->leaf.shaper_private_supported = true;
+   cap->leaf.shaper_private_dual_rate_supported = false;
+   cap->leaf.shaper_private_rate_min = 0;
+   /* 10Gbps -> 1.25GBps */
+   cap->leaf.shaper_private_rate_max = 125000ull;
+   cap->leaf.shaper_shared_n_max = 0;
+   cap->leaf.cman_head_drop_supported = false;
+   cap->leaf.cman_wred_context_private_supported = false;
+   cap->leaf.cman_wred_context_shared_n_max = 0;
+   cap->leaf.stats_mask = 0;
+
+   return 0;
+}
-- 
1.9.3



[dpdk-dev] [PATCH 17/20] net/ixgbe: support getting TM node type

2017-05-27 Thread Wenzhuo Lu
Add the support of the Traffic Management API,
rte_tm_node_type_get.

Signed-off-by: Wenzhuo Lu 
---
 drivers/net/ixgbe/ixgbe_tm.c | 35 +++
 1 file changed, 35 insertions(+)

diff --git a/drivers/net/ixgbe/ixgbe_tm.c b/drivers/net/ixgbe/ixgbe_tm.c
index 39ec272..68b26cc 100644
--- a/drivers/net/ixgbe/ixgbe_tm.c
+++ b/drivers/net/ixgbe/ixgbe_tm.c
@@ -51,6 +51,8 @@ static int ixgbe_node_add(struct rte_eth_dev *dev, uint32_t 
node_id,
  struct rte_tm_error *error);
 static int ixgbe_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
 struct rte_tm_error *error);
+static int ixgbe_node_type_get(struct rte_eth_dev *dev, uint32_t node_id,
+  int *is_leaf, struct rte_tm_error *error);
 
 const struct rte_tm_ops ixgbe_tm_ops = {
.capabilities_get = ixgbe_tm_capabilities_get,
@@ -58,6 +60,7 @@ static int ixgbe_node_delete(struct rte_eth_dev *dev, 
uint32_t node_id,
.shaper_profile_delete = ixgbe_shaper_profile_del,
.node_add = ixgbe_node_add,
.node_delete = ixgbe_node_delete,
+   .node_type_get = ixgbe_node_type_get,
 };
 
 int
@@ -603,3 +606,35 @@ static int ixgbe_node_delete(struct rte_eth_dev *dev, 
uint32_t node_id,
 
return 0;
 }
+
+static int
+ixgbe_node_type_get(struct rte_eth_dev *dev, uint32_t node_id,
+   int *is_leaf, struct rte_tm_error *error)
+{
+   enum ixgbe_tm_node_type node_type;
+   struct ixgbe_tm_node *tm_node;
+
+   if (!is_leaf || !error)
+   return -EINVAL;
+
+   if (node_id == RTE_TM_NODE_ID_NULL) {
+   error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+   error->message = "invalid node id";
+   return -EINVAL;
+   }
+
+   /* check if the node id exists */
+   tm_node = ixgbe_tm_node_search(dev, node_id, &node_type);
+   if (!tm_node) {
+   error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+   error->message = "no such node";
+   return -EINVAL;
+   }
+
+   if (tm_node->reference_count)
+   *is_leaf = false;
+   else
+   *is_leaf = true;
+
+   return 0;
+}
-- 
1.9.3



[dpdk-dev] [PATCH 20/20] net/ixgbe: support committing TM hierarchy

2017-05-27 Thread Wenzhuo Lu
Add the support of the Traffic Management API,
rte_tm_hierarchy_commit.
When calling this API, the driver tries to enable
the TM configuration on HW.

Signed-off-by: Wenzhuo Lu 
---
 drivers/net/ixgbe/ixgbe_ethdev.c |  8 ++---
 drivers/net/ixgbe/ixgbe_ethdev.h |  2 ++
 drivers/net/ixgbe/ixgbe_tm.c | 69 
 3 files changed, 74 insertions(+), 5 deletions(-)

diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index d339fc4..e234177 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -302,9 +302,6 @@ static void ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t 
direction,
   uint8_t queue, uint8_t msix_vector);
 static void ixgbe_configure_msix(struct rte_eth_dev *dev);
 
-static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
-   uint16_t queue_idx, uint16_t tx_rate);
-
 static int ixgbevf_add_mac_addr(struct rte_eth_dev *dev,
struct ether_addr *mac_addr,
uint32_t index, uint32_t pool);
@@ -5605,8 +5602,9 @@ static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, 
bool on)
IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
 }
 
-static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
-   uint16_t queue_idx, uint16_t tx_rate)
+int
+ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
+  uint16_t queue_idx, uint16_t tx_rate)
 {
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint32_t rf_dec, rf_int;
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h
index ccde335..48cf5b6 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.h
+++ b/drivers/net/ixgbe/ixgbe_ethdev.h
@@ -729,6 +729,8 @@ int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, 
uint16_t vf,
 int ixgbe_tm_ops_get(struct rte_eth_dev *dev, void *ops);
 void ixgbe_tm_conf_init(struct rte_eth_dev *dev);
 void ixgbe_tm_conf_uninit(struct rte_eth_dev *dev);
+int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev, uint16_t queue_idx,
+  uint16_t tx_rate);
 
 static inline int
 ixgbe_ethertype_filter_lookup(struct ixgbe_filter_info *filter_info,
diff --git a/drivers/net/ixgbe/ixgbe_tm.c b/drivers/net/ixgbe/ixgbe_tm.c
index abb4643..c52f591 100644
--- a/drivers/net/ixgbe/ixgbe_tm.c
+++ b/drivers/net/ixgbe/ixgbe_tm.c
@@ -61,6 +61,9 @@ static int ixgbe_node_capabilities_get(struct rte_eth_dev 
*dev,
   uint32_t node_id,
   struct rte_tm_node_capabilities *cap,
   struct rte_tm_error *error);
+static int ixgbe_hierarchy_commit(struct rte_eth_dev *dev,
+ int clear_on_fail,
+ struct rte_tm_error *error);
 
 const struct rte_tm_ops ixgbe_tm_ops = {
.capabilities_get = ixgbe_tm_capabilities_get,
@@ -71,6 +74,7 @@ static int ixgbe_node_capabilities_get(struct rte_eth_dev 
*dev,
.node_type_get = ixgbe_node_type_get,
.level_capabilities_get = ixgbe_level_capabilities_get,
.node_capabilities_get = ixgbe_node_capabilities_get,
+   .hierarchy_commit = ixgbe_hierarchy_commit,
 };
 
 int
@@ -779,3 +783,68 @@ static int ixgbe_node_capabilities_get(struct rte_eth_dev 
*dev,
 
return 0;
 }
+
+static int
+ixgbe_hierarchy_commit(struct rte_eth_dev *dev,
+  int clear_on_fail,
+  struct rte_tm_error *error)
+{
+   struct ixgbe_tm_conf *tm_conf =
+   IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
+   struct ixgbe_tm_node *tm_node;
+   uint64_t bw;
+   int ret;
+   int i;
+
+   if (!error)
+   return -EINVAL;
+
+   /* check the setting */
+   if (tm_conf->root)
+   return 0;
+
+   /* not support port max bandwidth yet */
+   if (tm_conf->root->shaper_profile->profile.peak.rate) {
+   error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
+   error->message = "no port max bandwidth";
+   goto fail_clear;
+   }
+
+   /* HW not support TC max bandwidth */
+   TAILQ_FOREACH(tm_node, &tm_conf->tc_list, node) {
+   if (tm_node->shaper_profile->profile.peak.rate) {
+   error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
+   error->message = "no TC max bandwidth";
+   goto fail_clear;
+   }
+   }
+
+   /* queue max bandwidth */
+   i = 0;
+   TAILQ_FOREACH(tm_node, &tm_conf->queue_list, node) {
+   bw = tm_node->shaper_profile->profile.peak.rate;
+   if (bw) {
+   /* interpret Bps to Mbps */
+   bw = bw * 8 / 1000 / 1000;
+   ret = ixgbe_set_queue_rate_limit(dev, i, bw);
+   if (ret) {
+  

[dpdk-dev] [PATCH 19/20] net/ixgbe: support getting TM node capability

2017-05-27 Thread Wenzhuo Lu
Add the support of the Traffic Management API,
rte_tm_node_capabilities_get.

Signed-off-by: Wenzhuo Lu 
---
 drivers/net/ixgbe/ixgbe_tm.c | 63 
 1 file changed, 63 insertions(+)

diff --git a/drivers/net/ixgbe/ixgbe_tm.c b/drivers/net/ixgbe/ixgbe_tm.c
index 4a9947d..abb4643 100644
--- a/drivers/net/ixgbe/ixgbe_tm.c
+++ b/drivers/net/ixgbe/ixgbe_tm.c
@@ -57,6 +57,10 @@ static int ixgbe_level_capabilities_get(struct rte_eth_dev 
*dev,
uint32_t level_id,
struct rte_tm_level_capabilities *cap,
struct rte_tm_error *error);
+static int ixgbe_node_capabilities_get(struct rte_eth_dev *dev,
+  uint32_t node_id,
+  struct rte_tm_node_capabilities *cap,
+  struct rte_tm_error *error);
 
 const struct rte_tm_ops ixgbe_tm_ops = {
.capabilities_get = ixgbe_tm_capabilities_get,
@@ -66,6 +70,7 @@ static int ixgbe_level_capabilities_get(struct rte_eth_dev 
*dev,
.node_delete = ixgbe_node_delete,
.node_type_get = ixgbe_node_type_get,
.level_capabilities_get = ixgbe_level_capabilities_get,
+   .node_capabilities_get = ixgbe_node_capabilities_get,
 };
 
 int
@@ -716,3 +721,61 @@ static int ixgbe_level_capabilities_get(struct rte_eth_dev 
*dev,
 
return 0;
 }
+
+static int
+ixgbe_node_capabilities_get(struct rte_eth_dev *dev,
+   uint32_t node_id,
+   struct rte_tm_node_capabilities *cap,
+   struct rte_tm_error *error)
+{
+   enum ixgbe_tm_node_type node_type;
+   struct ixgbe_tm_node *tm_node;
+   uint8_t nb_tc = 0;
+   uint8_t nb_queue = 0;
+
+   if (!cap || !error)
+   return -EINVAL;
+
+   if (node_id == RTE_TM_NODE_ID_NULL) {
+   error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+   error->message = "invalid node id";
+   return -EINVAL;
+   }
+
+   nb_tc = ixgbe_tc_nb_get(dev);
+   nb_queue = dev->data->nb_tx_queues;
+
+   /* check if the node id exists */
+   tm_node = ixgbe_tm_node_search(dev, node_id, &node_type);
+   if (!tm_node) {
+   error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+   error->message = "no such node";
+   return -EINVAL;
+   }
+
+   cap->shaper_private_supported = true;
+   cap->shaper_private_dual_rate_supported = false;
+   cap->shaper_private_rate_min = 0;
+   /* 10Gbps -> 1.25GBps */
+   cap->shaper_private_rate_max = 125000ull;
+   cap->shaper_shared_n_max = 0;
+
+   if (node_type == IXGBE_TM_NODE_TYPE_QUEUE) {
+   cap->leaf.cman_head_drop_supported = false;
+   cap->leaf.cman_wred_context_private_supported = false;
+   cap->leaf.cman_wred_context_shared_n_max = 0;
+   } else {
+   if (node_type == IXGBE_TM_NODE_TYPE_PORT)
+   cap->nonleaf.sched_n_children_max = nb_tc;
+   else
+   cap->nonleaf.sched_n_children_max = nb_queue;
+   cap->nonleaf.sched_sp_n_priorities_max = 0;
+   cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
+   cap->nonleaf.sched_wfq_n_groups_max = 0;
+   cap->nonleaf.sched_wfq_weight_max = 0;
+   }
+
+   cap->stats_mask = 0;
+
+   return 0;
+}
-- 
1.9.3



[dpdk-dev] [PATCH 2/7] ethdev: add support of restoration of queue state

2017-05-27 Thread Wei Dai
As dev->dev_ops->dev_start may change dev->data->rx_queue_state[]
and dev->data->tx_queue_state[], this patch adds rxq_restore_state[]
and txq_restore_state[ ] for restoration.
In the restoration process, PMD should start or stop each Rx or Tx
queue according to dev->data->rx_restore_state[] or
dev->data->tx_restore_state[].

Signed-off-by: Wei Dai 
---
 lib/librte_ether/rte_ethdev.c | 87 +++
 lib/librte_ether/rte_ethdev.h |  5 ++-
 2 files changed, 83 insertions(+), 9 deletions(-)

diff --git a/lib/librte_ether/rte_ethdev.c b/lib/librte_ether/rte_ethdev.c
index a5a9519..97c0044 100644
--- a/lib/librte_ether/rte_ethdev.c
+++ b/lib/librte_ether/rte_ethdev.c
@@ -504,6 +504,7 @@ int
 rte_eth_dev_rx_queue_start(uint8_t port_id, uint16_t rx_queue_id)
 {
struct rte_eth_dev *dev;
+   int ret;
 
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
 
@@ -522,14 +523,18 @@ rte_eth_dev_rx_queue_start(uint8_t port_id, uint16_t 
rx_queue_id)
return 0;
}
 
-   return dev->dev_ops->rx_queue_start(dev, rx_queue_id);
-
+   ret = dev->dev_ops->rx_queue_start(dev, rx_queue_id);
+   if (!ret)
+   dev->data->rxq_restore_state[rx_queue_id] =
+   RTE_ETH_QUEUE_STATE_STARTED;
+   return ret;
 }
 
 int
 rte_eth_dev_rx_queue_stop(uint8_t port_id, uint16_t rx_queue_id)
 {
struct rte_eth_dev *dev;
+   int ret;
 
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
 
@@ -548,14 +553,18 @@ rte_eth_dev_rx_queue_stop(uint8_t port_id, uint16_t 
rx_queue_id)
return 0;
}
 
-   return dev->dev_ops->rx_queue_stop(dev, rx_queue_id);
-
+   ret = dev->dev_ops->rx_queue_stop(dev, rx_queue_id);
+   if (!ret)
+   dev->data->rxq_restore_state[rx_queue_id] =
+   RTE_ETH_QUEUE_STATE_STOPPED;
+   return ret;
 }
 
 int
 rte_eth_dev_tx_queue_start(uint8_t port_id, uint16_t tx_queue_id)
 {
struct rte_eth_dev *dev;
+   int ret;
 
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
 
@@ -574,14 +583,18 @@ rte_eth_dev_tx_queue_start(uint8_t port_id, uint16_t 
tx_queue_id)
return 0;
}
 
-   return dev->dev_ops->tx_queue_start(dev, tx_queue_id);
-
+   ret = dev->dev_ops->tx_queue_start(dev, tx_queue_id);
+   if (!ret)
+   dev->data->txq_restore_state[tx_queue_id] =
+   RTE_ETH_QUEUE_STATE_STARTED;
+   return ret;
 }
 
 int
 rte_eth_dev_tx_queue_stop(uint8_t port_id, uint16_t tx_queue_id)
 {
struct rte_eth_dev *dev;
+   int ret;
 
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
 
@@ -600,8 +613,11 @@ rte_eth_dev_tx_queue_stop(uint8_t port_id, uint16_t 
tx_queue_id)
return 0;
}
 
-   return dev->dev_ops->tx_queue_stop(dev, tx_queue_id);
-
+   ret = dev->dev_ops->tx_queue_stop(dev, tx_queue_id);
+   if (!ret)
+   dev->data->txq_restore_state[tx_queue_id] =
+   RTE_ETH_QUEUE_STATE_STOPPED;
+   return ret;
 }
 
 static int
@@ -863,6 +879,50 @@ _rte_eth_dev_reset(struct rte_eth_dev *dev)
 }
 
 static void
+rte_eth_dev_rx_queue_restore(uint8_t port_id, uint16_t queue_id)
+{
+   struct rte_eth_dev *dev;
+   uint16_t q = queue_id;
+
+   dev = &rte_eth_devices[port_id];
+
+   if (dev->data->in_restoration == 0) {
+   dev->data->rxq_restore_state[q] = dev->data->rx_queue_state[q];
+   return;
+   }
+
+   if (dev->data->rxq_restore_state[q] != dev->data->rx_queue_state[q]) {
+   if (dev->data->rxq_restore_state[q]
+   == RTE_ETH_QUEUE_STATE_STARTED)
+   rte_eth_dev_rx_queue_start(port_id, q);
+   else
+   rte_eth_dev_rx_queue_stop(port_id, q);
+   }
+}
+
+static void
+rte_eth_dev_tx_queue_restore(uint8_t port_id, uint16_t queue_id)
+{
+   struct rte_eth_dev *dev;
+   uint16_t q = queue_id;
+
+   dev = &rte_eth_devices[port_id];
+
+   if (dev->data->in_restoration == 0) {
+   dev->data->txq_restore_state[q] = dev->data->tx_queue_state[q];
+   return;
+   }
+
+   if (dev->data->txq_restore_state[q] != dev->data->tx_queue_state[q]) {
+   if (dev->data->txq_restore_state[q]
+   == RTE_ETH_QUEUE_STATE_STARTED)
+   rte_eth_dev_tx_queue_start(port_id, q);
+   else
+   rte_eth_dev_tx_queue_stop(port_id, q);
+   }
+}
+
+static void
 rte_eth_dev_config_restore(uint8_t port_id)
 {
struct rte_eth_dev *dev;
@@ -871,6 +931,7 @@ rte_eth_dev_config_restore(uint8_t port_id)
uint16_t i;
uint32_t pool = 0;
uint64_t pool_mask;
+   uint16_t q;
 
dev = &rte_eth_devices[port_id];
 
@@ -915,6 +976,12 @@ rte_eth_dev_config_restore(uint8_t port_id)
rte_eth_allmulticast_enable(port_

[dpdk-dev] [PATCH 0/7] NIC port restoration

2017-05-27 Thread Wei Dai
Sometimes configuration and run time environment in PMD layer of a
running port is changed and the port has to stop all bi-directional
traffic and initialize the port device and restore its configurations
and traffic again. Such this procedure can be regarded as restoration. 
Some customers found that in some VFN scenario a running port with
i40e VF DPDK PMD has to be restored when the host PF with kernel driver
need to reset the whole PF due to some reasons. For example, i40e HW
may need a reset after reconfiguring a few key registers. Then PF host
needs to Inform all VFs generated by that PF with the event of 'PF
reset'. After VF driver see this event, VF driver needs to restore
the VF port.

In order to make restoration as a common functionality of all PMD,
based on current implementation of rte_ethdev, most of restoration work
can be done in rte_ethdev layer with the settings stored in data 
structures in rte_ethdev layer. If some settings is not stored in
rte_ethdev after they are configured before, they should be stored for
restoration by adding data structure in rte_ethdev layer. Ethdev should
also add a API like dev_restore( ) for PMD possible specific work in
restoration process.
   The outline of restoration procedure is as follows.
1. rte_eth_dev_stop(port_id);   
2. (*dev->dev_ops->dev_uninit)(dev);
3. (*dev->dev_ops->dev_init)(dev);
4. rte_eth_dev_configure(...);
5. rte_eth_dev_rx_queue_config(...) invoked for all Rx queues.
6. rte_eth_dev_tx_queue_config(...) invoked for all Tx queues;
7. rte_eth_start(port) 
7.1  rte_eth_dev_config_restore(port); // need to be enhanced
8.  (*dev->dev_ops->dev_restore)(dev); // doing PMD specific restoration


Wei Dai (7):
  ethdev: add support of NIC restoration
  ethdev: add support of restoration of queue state
  ethdev: add support of restoration of port status
  ethdev: add support of MTU restoration
  ethdev: add support of restoration of multicast addr
  net/ixgbe: add support of restoration
  net/i40e: add support of restoration

 drivers/net/i40e/i40e_ethdev.c |   2 +
 drivers/net/i40e/i40e_ethdev_vf.c  |   5 +
 drivers/net/ixgbe/ixgbe_ethdev.c   |   4 +
 lib/librte_ether/rte_ethdev.c  | 280 +++--
 lib/librte_ether/rte_ethdev.h  |  45 +-
 lib/librte_ether/rte_ether_version.map |   6 +
 6 files changed, 326 insertions(+), 16 deletions(-)

-- 
2.7.4



[dpdk-dev] [PATCH 1/7] ethdev: add support of NIC restoration

2017-05-27 Thread Wei Dai
The steps of NIC restoration process include following items in order:
dev_stop, dev_uninit, dev_init, dev_configure with stored configuration,
setup each Rx and Tx queue with previous configurations and dev_start.

Signed-off-by: Wei Dai 
---
 lib/librte_ether/rte_ethdev.c  | 102 -
 lib/librte_ether/rte_ethdev.h  |  36 
 lib/librte_ether/rte_ether_version.map |   6 ++
 3 files changed, 142 insertions(+), 2 deletions(-)

diff --git a/lib/librte_ether/rte_ethdev.c b/lib/librte_ether/rte_ethdev.c
index 83898a8..a5a9519 100644
--- a/lib/librte_ether/rte_ethdev.c
+++ b/lib/librte_ether/rte_ethdev.c
@@ -795,17 +795,39 @@ rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, 
uint16_t nb_tx_q,
/*
 * Setup new number of RX/TX queues and reconfigure device.
 */
+   if (dev->data->rxq_conf == NULL) {
+   dev->data->rxq_conf = rte_zmalloc("ethdev->rxq_conf",
+   sizeof(struct rte_eth_rx_queue_conf) * nb_rx_q,
+   RTE_CACHE_LINE_SIZE);
+   if (dev->data->rxq_conf == NULL)
+   return -ENOMEM;
+   }
+
diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
if (diag != 0) {
RTE_PMD_DEBUG_TRACE("port%d rte_eth_dev_rx_queue_config = %d\n",
port_id, diag);
+   rte_free(dev->data->rxq_conf);
return diag;
}
 
+   if (dev->data->txq_conf == NULL) {
+   dev->data->txq_conf = rte_zmalloc("ethdev->txq_conf",
+   sizeof(struct rte_eth_tx_queue_conf) * nb_tx_q,
+   RTE_CACHE_LINE_SIZE);
+   if (dev->data->txq_conf == NULL) {
+   rte_free(dev->data->rxq_conf);
+   rte_eth_dev_rx_queue_config(dev, 0);
+   return -ENOMEM;
+   }
+   }
+
diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
if (diag != 0) {
RTE_PMD_DEBUG_TRACE("port%d rte_eth_dev_tx_queue_config = %d\n",
port_id, diag);
+   rte_free(dev->data->rxq_conf);
+   rte_free(dev->data->txq_conf);
rte_eth_dev_rx_queue_config(dev, 0);
return diag;
}
@@ -814,6 +836,8 @@ rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, 
uint16_t nb_tx_q,
if (diag != 0) {
RTE_PMD_DEBUG_TRACE("port%d dev_configure = %d\n",
port_id, diag);
+   rte_free(dev->data->rxq_conf);
+   rte_free(dev->data->txq_conf);
rte_eth_dev_rx_queue_config(dev, 0);
rte_eth_dev_tx_queue_config(dev, 0);
return diag;
@@ -1005,6 +1029,7 @@ rte_eth_rx_queue_setup(uint8_t port_id, uint16_t 
rx_queue_id,
struct rte_eth_dev *dev;
struct rte_eth_dev_info dev_info;
void **rxq;
+   struct rte_eth_rx_queue_conf *rxq_conf;
 
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
 
@@ -1080,6 +1105,12 @@ rte_eth_rx_queue_setup(uint8_t port_id, uint16_t 
rx_queue_id,
if (!dev->data->min_rx_buf_size ||
dev->data->min_rx_buf_size > mbp_buf_size)
dev->data->min_rx_buf_size = mbp_buf_size;
+
+   rxq_conf = &dev->data->rxq_conf[rx_queue_id];
+   rxq_conf->nb_rx_desc = nb_rx_desc;
+   rxq_conf->socket_id = socket_id;
+   rxq_conf->rx_conf = *rx_conf;
+   rxq_conf->mp = mp;
}
 
return ret;
@@ -1093,6 +1124,8 @@ rte_eth_tx_queue_setup(uint8_t port_id, uint16_t 
tx_queue_id,
struct rte_eth_dev *dev;
struct rte_eth_dev_info dev_info;
void **txq;
+   int ret;
+   struct rte_eth_tx_queue_conf *txq_conf;
 
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
 
@@ -1136,8 +1169,16 @@ rte_eth_tx_queue_setup(uint8_t port_id, uint16_t 
tx_queue_id,
if (tx_conf == NULL)
tx_conf = &dev_info.default_txconf;
 
-   return (*dev->dev_ops->tx_queue_setup)(dev, tx_queue_id, nb_tx_desc,
-  socket_id, tx_conf);
+   ret = (*dev->dev_ops->tx_queue_setup)(dev, tx_queue_id, nb_tx_desc,
+ socket_id, tx_conf);
+   if (!ret) {
+   txq_conf = &dev->data->txq_conf[tx_queue_id];
+   txq_conf->nb_tx_desc = nb_tx_desc;
+   txq_conf->socket_id = socket_id;
+   txq_conf->tx_conf = *tx_conf;
+   }
+
+   return ret;
 }
 
 void
@@ -3472,3 +3513,60 @@ rte_eth_dev_l2_tunnel_offload_set(uint8_t port_id,
-ENOTSUP);
return (*dev->dev_ops->l2_tunnel_offload_set)(dev, l2_tunnel, mask, en);
 }
+
+int
+rte_eth_dev_restore(uint8_t port_id)
+{
+   struct rte_eth_dev *dev;

[dpdk-dev] [PATCH 3/7] ethdev: add support of restoration of port status

2017-05-27 Thread Wei Dai
As dev->data->dev_link.link_status may change when the port is
initialized again, this patch adds dev->data->restore_link for
restoration.
In the restoration process, ethdev layer can restore link status
as up or down by comparing dev->data->restore_link.link_status
and dev->data->dev_link.link_status.

Signed-off-by: Wei Dai 
---
 lib/librte_ether/rte_ethdev.c | 37 +++--
 lib/librte_ether/rte_ethdev.h |  1 +
 2 files changed, 36 insertions(+), 2 deletions(-)

diff --git a/lib/librte_ether/rte_ethdev.c b/lib/librte_ether/rte_ethdev.c
index 97c0044..af8ccf6 100644
--- a/lib/librte_ether/rte_ethdev.c
+++ b/lib/librte_ether/rte_ethdev.c
@@ -923,6 +923,28 @@ rte_eth_dev_tx_queue_restore(uint8_t port_id, uint16_t 
queue_id)
 }
 
 static void
+rte_eth_dev_link_status_restore(uint8_t port_id)
+{
+   struct rte_eth_dev *dev;
+
+   dev = &rte_eth_devices[port_id];
+
+   if (dev->data->in_restoration == 0) {
+   dev->data->restore_link.link_status =
+   dev->data->dev_link.link_status;
+   return;
+   }
+
+   if (dev->data->restore_link.link_status
+   != dev->data->dev_link.link_status) {
+   if (dev->data->restore_link.link_status != 0)
+   rte_eth_dev_set_link_up(port_id);
+   else
+   rte_eth_dev_set_link_down(port_id);
+   }
+}
+
+static void
 rte_eth_dev_config_restore(uint8_t port_id)
 {
struct rte_eth_dev *dev;
@@ -982,6 +1004,7 @@ rte_eth_dev_config_restore(uint8_t port_id)
for (q = 0; q < dev->data->nb_tx_queues; q++)
rte_eth_dev_tx_queue_restore(port_id, q);
 
+   rte_eth_dev_link_status_restore(port_id);
 }
 
 int
@@ -1014,6 +1037,8 @@ rte_eth_dev_start(uint8_t port_id)
if (dev->data->dev_conf.intr_conf.lsc == 0) {
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
(*dev->dev_ops->link_update)(dev, 0);
+   dev->data->restore_link.link_status =
+   dev->data->dev_link.link_status;
}
return 0;
 }
@@ -1043,26 +1068,34 @@ int
 rte_eth_dev_set_link_up(uint8_t port_id)
 {
struct rte_eth_dev *dev;
+   int ret;
 
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
 
dev = &rte_eth_devices[port_id];
 
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
-   return (*dev->dev_ops->dev_set_link_up)(dev);
+   ret = (*dev->dev_ops->dev_set_link_up)(dev);
+   if (!ret)
+   dev->data->restore_link.link_status = 1;
+   return ret;
 }
 
 int
 rte_eth_dev_set_link_down(uint8_t port_id)
 {
struct rte_eth_dev *dev;
+   int ret;
 
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
 
dev = &rte_eth_devices[port_id];
 
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
-   return (*dev->dev_ops->dev_set_link_down)(dev);
+   ret = (*dev->dev_ops->dev_set_link_down)(dev);
+   if (!ret)
+   dev->data->restore_link.link_status = 0;
+   return ret;
 }
 
 void
diff --git a/lib/librte_ether/rte_ethdev.h b/lib/librte_ether/rte_ethdev.h
index 7a2ce07..9428f57 100644
--- a/lib/librte_ether/rte_ethdev.h
+++ b/lib/librte_ether/rte_ethdev.h
@@ -1734,6 +1734,7 @@ struct rte_eth_dev_data {
void *dev_private;  /**< PMD-specific private data */
 
struct rte_eth_link dev_link;
+   struct rte_eth_link restore_link;
/**< Link-level information & status */
 
struct rte_eth_conf dev_conf;   /**< Configuration applied to device. */
-- 
2.7.4



[dpdk-dev] [PATCH 6/7] net/ixgbe: add support of restoration

2017-05-27 Thread Wei Dai
export dev_uninit and dev_init for restoration to
reset port but keep same port id.

Signed-off-by: Wei Dai 
---
 drivers/net/ixgbe/ixgbe_ethdev.c | 4 
 1 file changed, 4 insertions(+)

diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index 2083cde..64b8a78 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -525,6 +525,8 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = {
.dev_set_link_up= ixgbe_dev_set_link_up,
.dev_set_link_down  = ixgbe_dev_set_link_down,
.dev_close= ixgbe_dev_close,
+   .dev_init = eth_ixgbe_dev_init,
+   .dev_uninit   = eth_ixgbe_dev_uninit,
.promiscuous_enable   = ixgbe_dev_promiscuous_enable,
.promiscuous_disable  = ixgbe_dev_promiscuous_disable,
.allmulticast_enable  = ixgbe_dev_allmulticast_enable,
@@ -625,6 +627,8 @@ static const struct eth_dev_ops ixgbevf_eth_dev_ops = {
.xstats_reset = ixgbevf_dev_stats_reset,
.xstats_get_names = ixgbevf_dev_xstats_get_names,
.dev_close= ixgbevf_dev_close,
+   .dev_init = eth_ixgbevf_dev_init,
+   .dev_uninit   = eth_ixgbevf_dev_uninit,
.allmulticast_enable  = ixgbevf_dev_allmulticast_enable,
.allmulticast_disable = ixgbevf_dev_allmulticast_disable,
.dev_infos_get= ixgbevf_dev_info_get,
-- 
2.7.4



[dpdk-dev] [PATCH 4/7] ethdev: add support of MTU restoration

2017-05-27 Thread Wei Dai
Signed-off-by: Wei Dai 
---
 lib/librte_ether/rte_ethdev.c | 23 +--
 lib/librte_ether/rte_ethdev.h |  1 +
 2 files changed, 22 insertions(+), 2 deletions(-)

diff --git a/lib/librte_ether/rte_ethdev.c b/lib/librte_ether/rte_ethdev.c
index af8ccf6..0d9544c 100644
--- a/lib/librte_ether/rte_ethdev.c
+++ b/lib/librte_ether/rte_ethdev.c
@@ -945,6 +945,22 @@ rte_eth_dev_link_status_restore(uint8_t port_id)
 }
 
 static void
+rte_eth_dev_mtu_restore(uint8_t port_id)
+{
+   struct rte_eth_dev *dev;
+
+   dev = &rte_eth_devices[port_id];
+
+   if (dev->data->in_restoration == 0) {
+   dev->data->restore_mtu = dev->data->mtu;
+   return;
+   }
+
+   if (dev->data->restore_mtu != dev->data->mtu)
+   rte_eth_dev_set_mtu(port_id, dev->data->restore_mtu);
+}
+
+static void
 rte_eth_dev_config_restore(uint8_t port_id)
 {
struct rte_eth_dev *dev;
@@ -1005,6 +1021,8 @@ rte_eth_dev_config_restore(uint8_t port_id)
rte_eth_dev_tx_queue_restore(port_id, q);
 
rte_eth_dev_link_status_restore(port_id);
+
+   rte_eth_dev_mtu_restore(port_id);
 }
 
 int
@@ -2106,9 +2124,10 @@ rte_eth_dev_set_mtu(uint8_t port_id, uint16_t mtu)
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
 
ret = (*dev->dev_ops->mtu_set)(dev, mtu);
-   if (!ret)
+   if (!ret) {
dev->data->mtu = mtu;
-
+   dev->data->restore_mtu = mtu;
+   }
return ret;
 }
 
diff --git a/lib/librte_ether/rte_ethdev.h b/lib/librte_ether/rte_ethdev.h
index 9428f57..aca8510 100644
--- a/lib/librte_ether/rte_ethdev.h
+++ b/lib/librte_ether/rte_ethdev.h
@@ -1739,6 +1739,7 @@ struct rte_eth_dev_data {
 
struct rte_eth_conf dev_conf;   /**< Configuration applied to device. */
uint16_t mtu;   /**< Maximum Transmission Unit. */
+   uint16_t restore_mtu;
 
uint32_t min_rx_buf_size;
/**< Common rx buffer size handled by all queues */
-- 
2.7.4



[dpdk-dev] [PATCH 7/7] net/i40e: add support of restoration

2017-05-27 Thread Wei Dai
export dev_uninit and dev_init for restoration to
reset port but keep same port id.

Signed-off-by: Wei Dai 
---
 drivers/net/i40e/i40e_ethdev.c| 2 ++
 drivers/net/i40e/i40e_ethdev_vf.c | 5 +
 2 files changed, 7 insertions(+)

diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 4c49673..9512ca4 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -449,6 +449,8 @@ static const struct eth_dev_ops i40e_eth_dev_ops = {
.dev_start= i40e_dev_start,
.dev_stop = i40e_dev_stop,
.dev_close= i40e_dev_close,
+   .dev_init = eth_i40e_dev_init,
+   .dev_uninit   = eth_i40e_dev_uninit,
.promiscuous_enable   = i40e_dev_promiscuous_enable,
.promiscuous_disable  = i40e_dev_promiscuous_disable,
.allmulticast_enable  = i40e_dev_allmulticast_enable,
diff --git a/drivers/net/i40e/i40e_ethdev_vf.c 
b/drivers/net/i40e/i40e_ethdev_vf.c
index 859b5e8..9405888 100644
--- a/drivers/net/i40e/i40e_ethdev_vf.c
+++ b/drivers/net/i40e/i40e_ethdev_vf.c
@@ -163,6 +163,9 @@ static void i40evf_handle_pf_event(__rte_unused struct 
rte_eth_dev *dev,
   uint8_t *msg,
   uint16_t msglen);
 
+static int i40evf_dev_init(struct rte_eth_dev *eth_dev);
+static int i40evf_dev_uninit(struct rte_eth_dev *eth_dev);
+
 /* Default hash key buffer for RSS */
 static uint32_t rss_key_default[I40E_VFQF_HKEY_MAX_INDEX + 1];
 
@@ -194,6 +197,8 @@ static const struct eth_dev_ops i40evf_eth_dev_ops = {
.dev_configure= i40evf_dev_configure,
.dev_start= i40evf_dev_start,
.dev_stop = i40evf_dev_stop,
+   .dev_init = i40evf_dev_init,
+   .dev_uninit   = i40evf_dev_uninit,
.promiscuous_enable   = i40evf_dev_promiscuous_enable,
.promiscuous_disable  = i40evf_dev_promiscuous_disable,
.allmulticast_enable  = i40evf_dev_allmulticast_enable,
-- 
2.7.4



[dpdk-dev] [PATCH 5/7] ethdev: add support of restoration of multicast addr

2017-05-27 Thread Wei Dai
Signed-off-by: Wei Dai 
---
 lib/librte_ether/rte_ethdev.c | 31 ++-
 lib/librte_ether/rte_ethdev.h |  2 ++
 2 files changed, 32 insertions(+), 1 deletion(-)

diff --git a/lib/librte_ether/rte_ethdev.c b/lib/librte_ether/rte_ethdev.c
index 0d9544c..78609f5 100644
--- a/lib/librte_ether/rte_ethdev.c
+++ b/lib/librte_ether/rte_ethdev.c
@@ -961,6 +961,20 @@ rte_eth_dev_mtu_restore(uint8_t port_id)
 }
 
 static void
+rte_eth_dev_mc_addr_list_restore(uint8_t port_id)
+{
+   struct rte_eth_dev *dev;
+
+   dev = &rte_eth_devices[port_id];
+
+   if (dev->data->mc_addr_count == 0)
+   return;
+
+   rte_eth_dev_set_mc_addr_list(port_id, dev->data->mc_addr_list,
+   dev->data->mc_addr_count);
+}
+
+static void
 rte_eth_dev_config_restore(uint8_t port_id)
 {
struct rte_eth_dev *dev;
@@ -1023,6 +1037,8 @@ rte_eth_dev_config_restore(uint8_t port_id)
rte_eth_dev_link_status_restore(port_id);
 
rte_eth_dev_mtu_restore(port_id);
+
+   rte_eth_dev_mc_addr_list_restore(port_id);
 }
 
 int
@@ -3423,12 +3439,25 @@ rte_eth_dev_set_mc_addr_list(uint8_t port_id,
 uint32_t nb_mc_addr)
 {
struct rte_eth_dev *dev;
+   int ret;
 
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
 
dev = &rte_eth_devices[port_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP);
-   return dev->dev_ops->set_mc_addr_list(dev, mc_addr_set, nb_mc_addr);
+   ret = dev->dev_ops->set_mc_addr_list(dev, mc_addr_set, nb_mc_addr);
+   if (!ret) {
+   if (dev->data->mc_addr_list != NULL)
+   rte_free(dev->data->mc_addr_list);
+   dev->data->mc_addr_list = rte_zmalloc("dev->mc_addr_list",
+   sizeof(struct ether_addr) * nb_mc_addr, 0);
+   if (dev->data->mc_addr_list != NULL) {
+   rte_memcpy(dev->data->mc_addr_list, mc_addr_set,
+  sizeof(struct ether_addr) * nb_mc_addr);
+   dev->data->mc_addr_count = nb_mc_addr;
+   }
+   }
+   return ret;
 }
 
 int
diff --git a/lib/librte_ether/rte_ethdev.h b/lib/librte_ether/rte_ethdev.h
index aca8510..8f7e772 100644
--- a/lib/librte_ether/rte_ethdev.h
+++ b/lib/librte_ether/rte_ethdev.h
@@ -1771,6 +1771,8 @@ struct rte_eth_dev_data {
 
struct rte_eth_rx_queue_conf *rxq_conf;
struct rte_eth_tx_queue_conf *txq_conf;
+   uint32_t mc_addr_count;
+   struct ether_addr *mc_addr_list;
 };
 
 /** Device supports hotplug detach */
-- 
2.7.4



[dpdk-dev] [PATCH 2/3] net/ixgbe/base: disable X550EM-x 1GBASE-T led switch support

2017-05-27 Thread Wei Dai
This patch disables X550EM_X 1Gbase-t led_[on|off] support since
the LEDs are wired to the PHY and the driver can not access the
PHY. led_[on|off] supportis disabled by setting the function
pointer to NULL. init_led_link_act is also set to NULL.

Signed-off-by: Wei Dai 
---
 drivers/net/ixgbe/base/ixgbe_common.c | 3 ++-
 drivers/net/ixgbe/base/ixgbe_x550.c   | 6 ++
 2 files changed, 8 insertions(+), 1 deletion(-)

diff --git a/drivers/net/ixgbe/base/ixgbe_common.c 
b/drivers/net/ixgbe/base/ixgbe_common.c
index 4dabb43..7f85713 100644
--- a/drivers/net/ixgbe/base/ixgbe_common.c
+++ b/drivers/net/ixgbe/base/ixgbe_common.c
@@ -504,7 +504,8 @@ s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw)
}
 
/* Initialize the LED link active for LED blink support */
-   hw->mac.ops.init_led_link_act(hw);
+   if (hw->mac.ops.init_led_link_act)
+   hw->mac.ops.init_led_link_act(hw);
 
if (status != IXGBE_SUCCESS)
DEBUGOUT1("Failed to initialize HW, STATUS = %d\n", status);
diff --git a/drivers/net/ixgbe/base/ixgbe_x550.c 
b/drivers/net/ixgbe/base/ixgbe_x550.c
index dd8be75..9862391 100644
--- a/drivers/net/ixgbe/base/ixgbe_x550.c
+++ b/drivers/net/ixgbe/base/ixgbe_x550.c
@@ -86,6 +86,10 @@ s32 ixgbe_init_ops_X550(struct ixgbe_hw *hw)
/* Manageability interface */
mac->ops.set_fw_drv_ver = ixgbe_set_fw_drv_ver_x550;
switch (hw->device_id) {
+   case IXGBE_DEV_ID_X550EM_X_1G_T:
+   hw->mac.ops.led_on = NULL;
+   hw->mac.ops.led_off = NULL;
+   break;
case IXGBE_DEV_ID_X550EM_X_10G_T:
case IXGBE_DEV_ID_X550EM_A_10G_T:
hw->mac.ops.led_on = ixgbe_led_on_t_X550em;
@@ -957,6 +961,7 @@ s32 ixgbe_init_ops_X550EM_x(struct ixgbe_hw *hw)
if (hw->device_id == IXGBE_DEV_ID_X550EM_X_1G_T) {
mac->ops.setup_fc = NULL;
mac->ops.setup_eee = NULL;
+   mac->ops.init_led_link_act = NULL;
}
 
return ret_val;
@@ -2425,6 +2430,7 @@ s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
case ixgbe_phy_ext_1g_t:
/* link is managed by FW */
phy->ops.setup_link = NULL;
+   phy->ops.reset = NULL;
break;
case ixgbe_phy_x550em_xfi:
/* link is managed by HW */
-- 
2.7.4



[dpdk-dev] [PATCH 3/3] net/ixgbe/base: update version to 2017.05.16

2017-05-27 Thread Wei Dai
* Remove PHY access for some 1G ports
* Disable X550EM-x 1GBASE-T led switch support

Signed-off-by: Wei Dai 
---
 drivers/net/ixgbe/base/README | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/net/ixgbe/base/README b/drivers/net/ixgbe/base/README
index a61617b..8c833b4 100644
--- a/drivers/net/ixgbe/base/README
+++ b/drivers/net/ixgbe/base/README
@@ -34,7 +34,7 @@ Intel® IXGBE driver
 ===
 
 This directory contains source code of FreeBSD ixgbe driver of version
-cid-10g-shared-code.2017.03.29 released by the team which develop
+cid-10g-shared-code.2017.05.16 released by the team which develop
 basic drivers for any ixgbe NIC. The sub-directory of base/
 contains the original source package.
 This driver is valid for the product(s) listed below
-- 
2.7.4



[dpdk-dev] [PATCH 1/3] net/ixgbe/base: remove PHY access for some 1G ports

2017-05-27 Thread Wei Dai
This patch removes some some 1GBASE-T PHY access since the FW
configures the PHY. SW shall not configure or initialize link.
Accessing the PHY would require the use of MDI clause 22 which
should be avoided in high layer driver code.

Signed-off-by: Wei Dai 
---
 drivers/net/ixgbe/base/ixgbe_x550.c | 24 +---
 1 file changed, 17 insertions(+), 7 deletions(-)

diff --git a/drivers/net/ixgbe/base/ixgbe_x550.c 
b/drivers/net/ixgbe/base/ixgbe_x550.c
index 674dc14..dd8be75 100644
--- a/drivers/net/ixgbe/base/ixgbe_x550.c
+++ b/drivers/net/ixgbe/base/ixgbe_x550.c
@@ -459,9 +459,13 @@ STATIC s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw)
hw->phy.type = ixgbe_phy_x550em_kr;
break;
case IXGBE_DEV_ID_X550EM_A_10G_T:
-   case IXGBE_DEV_ID_X550EM_X_1G_T:
case IXGBE_DEV_ID_X550EM_X_10G_T:
return ixgbe_identify_phy_generic(hw);
+   case IXGBE_DEV_ID_X550EM_X_1G_T:
+   hw->phy.type = ixgbe_phy_ext_1g_t;
+   hw->phy.ops.read_reg = NULL;
+   hw->phy.ops.write_reg = NULL;
+   break;
case IXGBE_DEV_ID_X550EM_A_1G_T:
case IXGBE_DEV_ID_X550EM_A_1G_T_L:
hw->phy.type = ixgbe_phy_fw;
@@ -751,6 +755,11 @@ s32 ixgbe_init_ops_X550EM(struct ixgbe_hw *hw)
phy->ops.set_phy_power = NULL;
phy->ops.get_firmware_version = NULL;
break;
+   case IXGBE_DEV_ID_X550EM_X_1G_T:
+   mac->ops.setup_fc = NULL;
+   phy->ops.identify = ixgbe_identify_phy_x550em;
+   phy->ops.set_phy_power = NULL;
+   break;
default:
phy->ops.identify = ixgbe_identify_phy_x550em;
}
@@ -945,6 +954,10 @@ s32 ixgbe_init_ops_X550EM_x(struct ixgbe_hw *hw)
  ixgbe_write_i2c_combined_generic_unlocked;
link->addr = IXGBE_CS4227;
 
+   if (hw->device_id == IXGBE_DEV_ID_X550EM_X_1G_T) {
+   mac->ops.setup_fc = NULL;
+   mac->ops.setup_eee = NULL;
+   }
 
return ret_val;
 }
@@ -1915,6 +1928,8 @@ void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw)
ixgbe_setup_mac_link_sfp_x550em;
break;
case ixgbe_media_type_copper:
+   if (hw->device_id == IXGBE_DEV_ID_X550EM_X_1G_T)
+   break;
if (hw->mac.type == ixgbe_mac_X550EM_a) {
if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T ||
hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) {
@@ -2380,10 +2395,6 @@ s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
/* set up for CS4227 usage */
hw->phy.phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
break;
-   case IXGBE_DEV_ID_X550EM_X_1G_T:
-   phy->ops.read_reg_mdi = ixgbe_read_phy_reg_mdi_22;
-   phy->ops.write_reg_mdi = ixgbe_write_phy_reg_mdi_22;
-   break;
default:
break;
}
@@ -2565,10 +2576,9 @@ s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
if (status != IXGBE_SUCCESS) {
ERROR_REPORT2(IXGBE_ERROR_CAUTION,
-   "semaphore failed with %d", status);
+   "semaphore failed with %d", status);
return IXGBE_ERR_SWFW_SYNC;
}
-
ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
IXGBE_WRITE_FLUSH(hw);
-- 
2.7.4



Re: [dpdk-dev] [RFC] Add Membership Library

2017-05-27 Thread Vincent Jardin
Why duplicating Jyri's libbloom - https://github.com/jvirkki/libbloom - for 
this DPDK capability? Why not showing that you can contribute to libbloom 
and make it linkable with the DPDK?


There are so many duplicated code...

Thank you,
 Vincent




[dpdk-dev] [RFC Patch 01/39] eal: add Bus log type

2017-05-27 Thread Shreyansh Jain
Signed-off-by: Shreyansh Jain 
---
 lib/librte_eal/common/include/rte_log.h | 1 +
 1 file changed, 1 insertion(+)

diff --git a/lib/librte_eal/common/include/rte_log.h 
b/lib/librte_eal/common/include/rte_log.h
index 3419138..4d001f5 100644
--- a/lib/librte_eal/common/include/rte_log.h
+++ b/lib/librte_eal/common/include/rte_log.h
@@ -87,6 +87,7 @@ extern struct rte_logs rte_logs;
 #define RTE_LOGTYPE_CRYPTODEV 17 /**< Log related to cryptodev. */
 #define RTE_LOGTYPE_EFD   18 /**< Log related to EFD. */
 #define RTE_LOGTYPE_EVENTDEV  19 /**< Log related to eventdev. */
+#define RTE_LOGTYPE_BUS   20 /**< Log related to Bus drivers. */
 
 /* these log types can be used in an application */
 #define RTE_LOGTYPE_USER1 24 /**< User-defined log type 1. */
-- 
2.7.4



[dpdk-dev] [RFC Patch 00/39] Introduce NXP DPAA Bus, Mempool and PMD

2017-05-27 Thread Shreyansh Jain
Series based on net-next/master (8c79ce3)

:: This is an RFC! ::

Introduction


This patch series adds NXP's QorIQ-Layerscape DPAA Architecture based
bus driver, mempool driver and PMD. This version of driver supports NXP
LS1043A/LS1023A, LS1046A/LS1026A family of network SoCs. [R1]

DPAA, or Datapath Acceleration Architecture [R2], is a set of hardware
components designed for high-speed network packet processing. This
architecture provides the infrastructure to support simplified sharing of
networking interfaces and accelerators by multiple CPU cores, and the
accelerators themselves.

This patchset introduces the following:
1. DPAA Bus (drivers/bus/dpaa)
 The core of DPAA bus is implemented using 3 main hardware blocks: QMan,
 or Queue Manager; BMan, or Buffer Manager and FMan, or Frame Manager.
 The patches introduce necessary layers to expose the DPAA hardware
 blocks for interfacing with RTE framework.

2. DPAA Mempool (drivers/mempool/dpaa)
 BMan, or Buffer Manager, block of DPAA features a hardware offloaded
 mempool. These patches add support for a driver to manage the BMan
 block. This driver allows for mempool creation, deletion, buffer
 acquire and release, as per the RTE APIs.

3. DPAA PMD (drivers/net/dpaa)
 The Poll Mode Driver for DPAA NIC Interfaces.

Patch Layout


01: A dependency patch [D1]
02: Add EAL support for 24, 40 and 48 bit operations
03~18: Add DPAA Bus support and features, incrementally
19: Add Documentation
20~22: Add DPAA Mempool support
23~39: Add PMD and its various features, incrementally

Pending
===
1. Some patches have potential for further breakup
2. There are some checkpatch errors - some which can be removed, and others
   which might have to be ignored.
3. There may be some signoff/authorship changes in subsequent series.

Dependency
==

This patch is dependent on:

[D1] Patch: http://dpdk.org/dev/patchwork/patch/24478/
 This patch adds macro for Bus logging to RTE logging framework

References
==

[R1] 
http://www.nxp.com/products/microcontrollers-and-processors/arm-processors/qoriq-layerscape-arm-processors:QORIQ-ARM
[R2] http://www.nxp.com/assets/documents/data/en/white-papers/QORIQDPAAWP.pdf

Hemant Agrawal (2):
  eal: add support for 24 40 and 48 bit operations
  bus/dpaa: add compatibility and helper macros

Shreyansh Jain (37):
  eal: add Bus log type
  config: add NXP DPAA SoC build configuration
  bus/dpaa: introduce NXP DPAA Bus driver skeleton
  bus/dpaa: add OF parser for device scanning
  bus/dpaa: introducing FMan configurations
  bus/dpaa: add FMan hardware operations
  bus/dpaa: enable DPAA IOCTL portal driver
  bus/dpaa: add layer for interrupt emulation using pthread
  bus/dpaa: add routines for managing a RB tree
  bus/dpaa: add QMAN interface driver
  bus/dpaa: add QMan driver core routines
  bus/dpaa: add BMAN driver core
  bus/dpaa: add support for FMAN frame queue lookup
  bus/dpaa: add BMan hardware interfaces
  bus/dpaa: add fman flow control threshold setting
  bus/dpaa: integrate DPAA Bus driver with hardware drivers
  doc: add NXP DPAA PMD documentation
  mempool/dpaa: add support for NXP DPAA Mempool
  drivers: enable compilation of NXP DPAA Mempool driver
  maintainers: claim ownership of NXP DPAA Mempool driver
  net/dpaa: add NXP DPAA PMD driver skeleton
  config: enable NXP DPAA PMD compilation
  net/dpaa: add support for Tx and Rx queue setup
  net/dpaa: add support for MTU update
  net/dpaa: add support for link status update
  net/dpaa: add support for jumbo frames
  net/dpaa: add support for promiscuous toggle
  net/dpaa: add support for multicast toggle
  net/dpaa: add support for basic stats
  net/dpaa: add support for device info
  net/dpaa: support for checksum offload
  net/dpaa: add support for hashed RSS
  net/dpaa: add support for MAC address update
  net/dpaa: add support for packet type parsing
  net/dpaa: add support for Scattered Rx
  net/dpaa: add support for flow control
  net/dpaa: add packet dump for debugging

 MAINTAINERS|9 +
 config/common_base |3 +
 config/defconfig_arm64-dpaa-linuxapp-gcc   |   63 +
 doc/guides/nics/dpaa.rst   |  360 +++
 doc/guides/nics/features/dpaa.ini  |   22 +
 doc/guides/nics/index.rst  |1 +
 drivers/bus/Makefile   |3 +
 drivers/bus/dpaa/Makefile  |   84 +
 drivers/bus/dpaa/base/fman/fman.c  |  537 +
 drivers/bus/dpaa/base/fman/fman_hw.c   |  634 +
 drivers/bus/dpaa/base/fman/netcfg_layer.c  |  205 ++
 drivers/bus/dpaa/base/fman/of.c|  576 +
 drivers/bus/dpaa/base/qbman/bman.c |  394 +++
 drivers/bus/dpaa/base/qbman/bman.h |  550 +
 drivers/bus/dpaa/base/qbman/bman_driver.c  |  323 +++
 drivers/bus/dpaa/base/qbman/bman_p

[dpdk-dev] [RFC Patch 03/39] config: add NXP DPAA SoC build configuration

2017-05-27 Thread Shreyansh Jain
This patch adds skeleton build configuration for DPAA platform.

Signed-off-by: Shreyansh Jain 
---
 config/defconfig_arm64-dpaa-linuxapp-gcc | 39 
 mk/machine/dpaa/rte.vars.mk  | 61 
 2 files changed, 100 insertions(+)
 create mode 100644 config/defconfig_arm64-dpaa-linuxapp-gcc
 create mode 100644 mk/machine/dpaa/rte.vars.mk

diff --git a/config/defconfig_arm64-dpaa-linuxapp-gcc 
b/config/defconfig_arm64-dpaa-linuxapp-gcc
new file mode 100644
index 000..34e5d42
--- /dev/null
+++ b/config/defconfig_arm64-dpaa-linuxapp-gcc
@@ -0,0 +1,39 @@
+#   BSD LICENSE
+#
+#   Copyright 2016 Freescale Semiconductor, Inc.
+#   Copyright 2017 NXP.
+#   All rights reserved.
+#
+#   Redistribution and use in source and binary forms, with or without
+#   modification, are permitted provided that the following conditions
+#   are met:
+#
+# * Redistributions of source code must retain the above copyright
+#   notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+#   notice, this list of conditions and the following disclaimer in
+#   the documentation and/or other materials provided with the
+#   distribution.
+# * Neither the name of NXP nor the names of its
+#   contributors may be used to endorse or promote products derived
+#   from this software without specific prior written permission.
+#
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+#include "defconfig_arm64-armv8a-linuxapp-gcc"
+
+# NXP (Freescale) - Soc Architecture with FMAN, QMAN & BMAN support
+CONFIG_RTE_MACHINE="dpaa"
+CONFIG_RTE_ARCH_ARM_TUNE="cortex-a72"
+
diff --git a/mk/machine/dpaa/rte.vars.mk b/mk/machine/dpaa/rte.vars.mk
new file mode 100644
index 000..b24cedf
--- /dev/null
+++ b/mk/machine/dpaa/rte.vars.mk
@@ -0,0 +1,61 @@
+#   BSD LICENSE
+#
+#   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
+#   Copyright 2017 NXP. All rights reserved.
+#
+#   Redistribution and use in source and binary forms, with or without
+#   modification, are permitted provided that the following conditions
+#   are met:
+#
+# * Redistributions of source code must retain the above copyright
+#   notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+#   notice, this list of conditions and the following disclaimer in
+#   the documentation and/or other materials provided with the
+#   distribution.
+# * Neither the name of NXP nor the names of its
+#   contributors may be used to endorse or promote products derived
+#   from this software without specific prior written permission.
+#
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#
+# machine:
+#
+#   - can define ARCH variable (overridden by cmdline value)
+#   - can define CROSS variable (overridden by cmdline value)
+#   - define MACHINE_CFLAGS variable (overridden by cmdline value)
+#   - define MACHINE_LDFLAGS variable (overridden by cmdline value)
+#   - define MACHINE_ASFLAGS variable (overridden by cmdline value)
+#   - can define CPU_CFLAGS variable (overridden by cmdline value) that
+# overrides the one defined in arch.
+#   - can define CPU_LDFLAGS variable (overridden by cmdline value) that
+# overrides the one defined in arch.
+#   - can define CPU_ASFLAGS variable (overridden by c

[dpdk-dev] [RFC Patch 02/39] eal: add support for 24 40 and 48 bit operations

2017-05-27 Thread Shreyansh Jain
From: Hemant Agrawal 

Bit Swap and LE<=>BE conversions for 23, 40 and 48 bit width

Signed-off-by: Hemant Agrawal 
---
 .../common/include/generic/rte_byteorder.h | 78 ++
 1 file changed, 78 insertions(+)

diff --git a/lib/librte_eal/common/include/generic/rte_byteorder.h 
b/lib/librte_eal/common/include/generic/rte_byteorder.h
index e00bccb..8903ff6 100644
--- a/lib/librte_eal/common/include/generic/rte_byteorder.h
+++ b/lib/librte_eal/common/include/generic/rte_byteorder.h
@@ -122,6 +122,84 @@ rte_constant_bswap64(uint64_t x)
((x & 0xff00ULL) >> 56);
 }
 
+/*
+ * An internal function to swap bytes of a 48-bit value.
+ */
+static inline uint64_t
+rte_constant_bswap48(uint64_t x)
+{
+   return  ((x & 0x00ffULL) << 40) |
+   ((x & 0xff00ULL) << 24) |
+   ((x & 0x00ffULL) <<  8) |
+   ((x & 0xff00ULL) >>  8) |
+   ((x & 0x00ffULL) >> 24) |
+   ((x & 0xff00ULL) >> 40);
+}
+
+/*
+ * An internal function to swap bytes of a 40-bit value.
+ */
+static inline uint64_t
+rte_constant_bswap40(uint64_t x)
+{
+   return  ((x & 0xffULL) << 32) |
+   ((x & 0x00ff00ULL) << 16) |
+   ((x & 0xffULL)) |
+   ((x & 0x00ff00ULL) >> 16) |
+   ((x & 0xffULL) >> 32);
+}
+
+/*
+ * An internal function to swap bytes of a 24-bit value.
+ */
+static inline uint32_t
+rte_constant_bswap24(uint32_t x)
+{
+   return  ((x & 0xffULL) << 16) |
+   ((x & 0x00ff00ULL)) |
+   ((x & 0xffULL) >> 16);
+}
+
+#define rte_bswap24 rte_constant_bswap24
+#define rte_bswap40 rte_constant_bswap40
+#define rte_bswap48 rte_constant_bswap48
+
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+
+#define rte_cpu_to_le_24(x) (x)
+#define rte_cpu_to_le_40(x) (x)
+#define rte_cpu_to_le_48(x) (x)
+
+#define rte_cpu_to_be_24(x) rte_bswap24(x)
+#define rte_cpu_to_be_40(x) rte_bswap40(x)
+#define rte_cpu_to_be_48(x) rte_bswap48(x)
+
+#define rte_le_to_cpu_24(x) (x)
+#define rte_le_to_cpu_40(x) (x)
+#define rte_le_to_cpu_48(x) (x)
+
+#define rte_be_to_cpu_24(x) rte_bswap24(x)
+#define rte_be_to_cpu_40(x) rte_bswap40(x)
+#define rte_be_to_cpu_48(x) rte_bswap48(x)
+
+#else /* RTE_BIG_ENDIAN */
+
+#define rte_cpu_to_le_24(x) rte_bswap24(x)
+#define rte_cpu_to_le_40(x) rte_bswap40(x)
+#define rte_cpu_to_le_48(x) rte_bswap48(x)
+
+#define rte_cpu_to_be_24(x) (x)
+#define rte_cpu_to_be_40(x) (x)
+#define rte_cpu_to_be_48(x) (x)
+
+#define rte_le_to_cpu_24(x) rte_bswap24(x)
+#define rte_le_to_cpu_40(x) rte_bswap40(x)
+#define rte_le_to_cpu_48(x) rte_bswap48(x)
+
+#define rte_be_to_cpu_24(x) (x)
+#define rte_be_to_cpu_40(x) (x)
+#define rte_be_to_cpu_48(x) (x)
+#endif
 
 #ifdef __DOXYGEN__
 
-- 
2.7.4



[dpdk-dev] [RFC Patch 04/39] bus/dpaa: introduce NXP DPAA Bus driver skeleton

2017-05-27 Thread Shreyansh Jain
Signed-off-by: Shreyansh Jain 
Signed-off-by: Hemant Agrawal 
---
 MAINTAINERS   |   5 +
 config/common_base|   3 +
 config/defconfig_arm64-dpaa-linuxapp-gcc  |   7 ++
 drivers/bus/Makefile  |   3 +
 drivers/bus/dpaa/Makefile |  63 +++
 drivers/bus/dpaa/dpaa_bus.c   | 172 ++
 drivers/bus/dpaa/rte_bus_dpaa_version.map |   7 ++
 drivers/bus/dpaa/rte_dpaa_bus.h   | 169 +
 drivers/bus/dpaa/rte_dpaa_logs.h  |  95 +
 9 files changed, 524 insertions(+)
 create mode 100644 drivers/bus/dpaa/Makefile
 create mode 100644 drivers/bus/dpaa/dpaa_bus.c
 create mode 100644 drivers/bus/dpaa/rte_bus_dpaa_version.map
 create mode 100644 drivers/bus/dpaa/rte_dpaa_bus.h
 create mode 100644 drivers/bus/dpaa/rte_dpaa_logs.h

diff --git a/MAINTAINERS b/MAINTAINERS
index afb4cab..e39044e 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -388,6 +388,11 @@ F: drivers/net/nfp/
 F: doc/guides/nics/nfp.rst
 F: doc/guides/nics/features/nfp.ini
 
+NXP dpaa
+M: Hemant Agrawal 
+M: Shreyansh Jain 
+F: drivers/bus/dpaa/
+
 NXP dpaa2
 M: Hemant Agrawal 
 M: Shreyansh Jain 
diff --git a/config/common_base b/config/common_base
index 67ef2ec..e8aacb6 100644
--- a/config/common_base
+++ b/config/common_base
@@ -308,6 +308,9 @@ CONFIG_RTE_LIBRTE_LIO_DEBUG_TX=n
 CONFIG_RTE_LIBRTE_LIO_DEBUG_MBOX=n
 CONFIG_RTE_LIBRTE_LIO_DEBUG_REGS=n
 
+# NXP DPAA Bus
+CONFIG_RTE_LIBRTE_DPAA_BUS=n
+
 #
 # Compile NXP DPAA2 FSL-MC Bus
 #
diff --git a/config/defconfig_arm64-dpaa-linuxapp-gcc 
b/config/defconfig_arm64-dpaa-linuxapp-gcc
index 34e5d42..f1b6d86 100644
--- a/config/defconfig_arm64-dpaa-linuxapp-gcc
+++ b/config/defconfig_arm64-dpaa-linuxapp-gcc
@@ -37,3 +37,10 @@
 CONFIG_RTE_MACHINE="dpaa"
 CONFIG_RTE_ARCH_ARM_TUNE="cortex-a72"
 
+
+# DPAA Bus
+CONFIG_RTE_LIBRTE_DPAA_BUS=y
+CONFIG_RTE_LIBRTE_DPAA_DEBUG_INIT=n
+CONFIG_RTE_LIBRTE_DPAA_DEBUG_DRIVER=n
+CONFIG_RTE_LIBRTE_DPAA_DEBUG_RX=n
+CONFIG_RTE_LIBRTE_DPAA_DEBUG_TX=n
diff --git a/drivers/bus/Makefile b/drivers/bus/Makefile
index 1e5b281..2dad392 100644
--- a/drivers/bus/Makefile
+++ b/drivers/bus/Makefile
@@ -33,6 +33,9 @@ include $(RTE_SDK)/mk/rte.vars.mk
 
 core-libs := librte_eal librte_mbuf librte_mempool librte_ring librte_ether
 
+DIRS-$(CONFIG_RTE_LIBRTE_DPAA_BUS) += dpaa
+DEPDIRS-dpaa = $(core-libs)
+
 DIRS-$(CONFIG_RTE_LIBRTE_FSLMC_BUS) += fslmc
 DEPDIRS-fslmc = $(core-libs)
 
diff --git a/drivers/bus/dpaa/Makefile b/drivers/bus/dpaa/Makefile
new file mode 100644
index 000..ae48bf2
--- /dev/null
+++ b/drivers/bus/dpaa/Makefile
@@ -0,0 +1,63 @@
+#   BSD LICENSE
+#
+#   Copyright(c) 2016 NXP. All rights reserved.
+#   All rights reserved.
+#
+#   Redistribution and use in source and binary forms, with or without
+#   modification, are permitted provided that the following conditions
+#   are met:
+#
+# * Redistributions of source code must retain the above copyright
+#   notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+#   notice, this list of conditions and the following disclaimer in
+#   the documentation and/or other materials provided with the
+#   distribution.
+# * Neither the name of NXP nor the names of its
+#   contributors may be used to endorse or promote products derived
+#   from this software without specific prior written permission.
+#
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+RTE_BUS_DPAA=$(RTE_SDK)/drivers/bus/dpaa
+
+#
+# library name
+#
+LIB = librte_bus_dpaa.a
+
+ifeq ($(CONFIG_RTE_LIBRTE_DPAA_DEBUG_INIT),y)
+CFLAGS += -O0 -g
+CFLAGS += "-Wno-error"
+else
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+endif
+
+CFLAGS += -I$(RTE_BUS_DPAA)/
+CFLAGS += -I$(RTE_SDK)/lib/librte_eal/linuxapp/eal
+CFLAGS += -I$(RTE_SDK)/lib/librte_eal/common/include
+
+# versioning export map
+EXPORT_MAP := rte_bus_dpaa_version.map
+
+LIBABIVER := 1
+
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_DPAA_BUS) += \
+   dpaa_bus.c
+
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers

[dpdk-dev] [RFC Patch 05/39] bus/dpaa: add compatibility and helper macros

2017-05-27 Thread Shreyansh Jain
From: Hemant Agrawal 

Linked list, bit operations and compatibility macros.

Signed-off-by: Geoff Thorpe 
Signed-off-by: Hemant Agrawal 
---
 drivers/bus/dpaa/include/compat.h| 330 +++
 drivers/bus/dpaa/include/dpaa_bits.h |  65 +++
 drivers/bus/dpaa/include/dpaa_list.h | 101 +++
 3 files changed, 496 insertions(+)
 create mode 100644 drivers/bus/dpaa/include/compat.h
 create mode 100644 drivers/bus/dpaa/include/dpaa_bits.h
 create mode 100644 drivers/bus/dpaa/include/dpaa_list.h

diff --git a/drivers/bus/dpaa/include/compat.h 
b/drivers/bus/dpaa/include/compat.h
new file mode 100644
index 000..ce6136e
--- /dev/null
+++ b/drivers/bus/dpaa/include/compat.h
@@ -0,0 +1,330 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ *   BSD LICENSE
+ *
+ * Copyright 2011 Freescale Semiconductor, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *   GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __COMPAT_H
+#define __COMPAT_H
+
+#include 
+
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE
+#endif
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+/* The following definitions are primarily to allow the single-source driver
+ * interfaces to be included by arbitrary program code. Ie. for interfaces that
+ * are also available in kernel-space, these definitions provide compatibility
+ * with certain attributes and types used in those interfaces.
+ */
+
+/* Required compiler attributes */
+#define __maybe_unused __rte_unused
+#define __always_unused__rte_unused
+#define __packed   __rte_packed
+#define noinline   __attribute__((noinline))
+
+#define L1_CACHE_BYTES 64
+#define cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
+#define __stringify_1(x) #x
+#define __stringify(x) __stringify_1(x)
+
+#ifdef ARRAY_SIZE
+#undef ARRAY_SIZE
+#endif
+#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
+
+/* Debugging */
+#define prflush(fmt, args...) \
+   do { \
+   printf(fmt, ##args); \
+   fflush(stdout); \
+   } while (0)
+
+#define pr_crit(fmt, args...)   prflush("CRIT:" fmt, ##args)
+#define pr_err(fmt, args...)prflush("ERR:" fmt, ##args)
+#define pr_warn(fmt, args...)   prflush("WARN:" fmt, ##args)
+#define pr_info(fmt, args...)   prflush(fmt, ##args)
+
+#define ASSERT(x) do {\
+   if (!(x)) \
+   rte_panic("DPAA: x"); \
+} while (0)
+#define BUG_ON(x) ASSERT(!(x))
+
+/* Required types */
+typedef uint8_tu8;
+typedef uint16_t   u16;
+typedef uint32_t   u32;
+typedef uint64_t   u64;
+typedef uint64_t   dma_addr_t;
+typedef cpu_set_t  cpumask_t;
+typedef uint32_t   phandle;
+typedef uint32_t   gfp_t;
+typedef uint32_t   irqreturn_t;
+
+#define IRQ_HANDLED0
+#define request_irqqbman_request_irq
+#define free_irq   qbman_free_irq
+
+#define __iomem
+#define GFP_KERNEL 0
+#define __raw_readb(p) (*(

[dpdk-dev] [RFC Patch 07/39] bus/dpaa: introducing FMan configurations

2017-05-27 Thread Shreyansh Jain
FMan or Frame Manager, inspects traffic, splits it into queueson ingress.
It is also responsible for directing traffic on queues on egress.

This patch introduces FMan configurational interfaces. This layer is
used by Bus driver for configuring the hardware block.

Signed-off-by: Geoff Thorpe 
Signed-off-by: Hemant Agrawal 
Signed-off-by: Shreyansh Jain 
---
 drivers/bus/dpaa/Makefile |   2 +
 drivers/bus/dpaa/base/fman/fman.c | 537 ++
 drivers/bus/dpaa/base/fman/netcfg_layer.c | 205 
 drivers/bus/dpaa/include/fman.h   | 473 ++
 drivers/bus/dpaa/include/netcfg.h |  96 ++
 5 files changed, 1313 insertions(+)
 create mode 100644 drivers/bus/dpaa/base/fman/fman.c
 create mode 100644 drivers/bus/dpaa/base/fman/netcfg_layer.c
 create mode 100644 drivers/bus/dpaa/include/fman.h
 create mode 100644 drivers/bus/dpaa/include/netcfg.h

diff --git a/drivers/bus/dpaa/Makefile b/drivers/bus/dpaa/Makefile
index 9069a2b..14bbb14 100644
--- a/drivers/bus/dpaa/Makefile
+++ b/drivers/bus/dpaa/Makefile
@@ -65,6 +65,8 @@ SRCS-$(CONFIG_RTE_LIBRTE_DPAA_BUS) += \
dpaa_bus.c
 
 SRCS-$(CONFIG_RTE_LIBRTE_DPAA_BUS) += \
+   base/fman/fman.c \
base/fman/of.c \
+   base/fman/netcfg_layer.c
 
 include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/bus/dpaa/base/fman/fman.c 
b/drivers/bus/dpaa/base/fman/fman.c
new file mode 100644
index 000..0be4ea4
--- /dev/null
+++ b/drivers/bus/dpaa/base/fman/fman.c
@@ -0,0 +1,537 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ *   BSD LICENSE
+ *
+ * Copyright 2010-2016 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *   GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include 
+#include 
+#include 
+
+#include 
+
+/* This header declares the driver interface we implement */
+#include 
+#include 
+
+#define QMI_PORT_REGS_OFFSET   0x400
+
+/* CCSR map address to access ccsr based register */
+void *fman_ccsr_map;
+/* fman version info */
+u16 fman_ip_rev;
+static int get_once;
+u32 fman_dealloc_bufs_mask_hi;
+u32 fman_dealloc_bufs_mask_lo;
+
+int fman_ccsr_map_fd = -1;
+static COMPAT_LIST_HEAD(__ifs);
+
+/* This is the (const) global variable that callers have read-only access to.
+ * Internally, we have read-write access directly to __ifs.
+ */
+const struct list_head *fman_if_list = &__ifs;
+
+static void
+if_destructor(struct __fman_if *__if)
+{
+   struct fman_if_bpool *bp, *tmpbp;
+
+   if (__if->__if.mac_type == fman_offline)
+   goto cleanup;
+
+   list_for_each_entry_safe(bp, tmpbp, &__if->__if.bpool_list, node) {
+   list_del(&bp->node);
+   rte_free(bp);
+   }
+cleanup:
+   rte_free(__if);
+}
+
+static int
+fman_get_ip_rev(const struct device_node *fman_node)
+{
+   const uint32_t *fman_addr;
+   uint64_t phys_addr;
+   uint64_t regs_size;
+   uint32_t ip_rev_1;
+   int _errno;
+
+   fman_addr = of_get_address(fman_node, 0, ®s_size, NULL);
+   if (!fman_addr) {
+   pr_err("of_get_address cannot return fman address\n");
+ 

[dpdk-dev] [RFC Patch 11/39] bus/dpaa: add routines for managing a RB tree

2017-05-27 Thread Shreyansh Jain
QMAN frames are managed over a RB tree data structure.
This patch introduces necessary routines for implementing a RB tree.

Signed-off-by: Geoff Thorpe 
Signed-off-by: Hemant Agrawal 
Signed-off-by: Shreyansh Jain 
---
 drivers/bus/dpaa/include/dpaa_rbtree.h | 143 +
 1 file changed, 143 insertions(+)
 create mode 100644 drivers/bus/dpaa/include/dpaa_rbtree.h

diff --git a/drivers/bus/dpaa/include/dpaa_rbtree.h 
b/drivers/bus/dpaa/include/dpaa_rbtree.h
new file mode 100644
index 000..fff2110
--- /dev/null
+++ b/drivers/bus/dpaa/include/dpaa_rbtree.h
@@ -0,0 +1,143 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright 2017 NXP. All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in
+ *   the documentation and/or other materials provided with the
+ *   distribution.
+ * * Neither the name of NXP nor the names of its
+ *   contributors may be used to endorse or promote products derived
+ *   from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __DPAA_RBTREE_H
+#define __DPAA_RBTREE_H
+
+#include 
+//
+/* RB-trees */
+//
+
+/* Linux has a good RB-tree implementation, that we can't use (GPL). It also 
has
+ * a flat/hooked-in interface that virtually requires license-contamination in
+ * order to write a caller-compatible implementation. Instead, I've created an
+ * RB-tree encapsulation on top of linux's primitives (it does some of the work
+ * the client logic would normally do), and this gives us something we can
+ * reimplement on LWE. Unfortunately there's no good+free RB-tree
+ * implementations out there that are license-compatible and "flat" (ie. no
+ * dynamic allocation). I did find a malloc-based one that I could convert, but
+ * that will be a task for later on. For now, LWE's RB-tree is implemented 
using
+ * an ordered linked-list.
+ *
+ * Note, the only linux-esque type is "struct rb_node", because it's used
+ * statically in the exported header, so it can't be opaque. Our version 
doesn't
+ * include a "rb_parent_color" field because we're doing linked-list instead of
+ * a true rb-tree.
+ */
+
+struct rb_node {
+   struct rb_node *prev, *next;
+};
+
+struct dpa_rbtree {
+   struct rb_node *head, *tail;
+};
+
+#define DPAA_RBTREE { NULL, NULL }
+static inline void dpa_rbtree_init(struct dpa_rbtree *tree)
+{
+   tree->head = tree->tail = NULL;
+}
+
+#define QMAN_NODE2OBJ(ptr, type, node_field) \
+   (type *)((char *)ptr - offsetof(type, node_field))
+
+#define IMPLEMENT_DPAA_RBTREE(name, type, node_field, val_field) \
+static inline int name##_push(struct dpa_rbtree *tree, type *obj) \
+{ \
+   struct rb_node *node = tree->head; \
+   if (!node) { \
+   tree->head = tree->tail = &obj->node_field; \
+   obj->node_field.prev = obj->node_field.next = NULL; \
+   return 0; \
+   } \
+   while (node) { \
+   type *item = QMAN_NODE2OBJ(node, type, node_field); \
+   if (obj->val_field == item->val_field) \
+   return -EBUSY; \
+   if (obj->val_field < item->val_field) { \
+   if (tree->head == node) \
+   tree->head = &obj->node_field; \
+   else \
+   node->prev->next = &obj->node_field; \
+   obj->node_field.prev = node->prev; \
+   obj->node_field.next = node; \
+   node->prev = &obj->node_field; \
+   return 0; \
+   } \
+   node = node->next; \
+   } \
+   obj->node_field.prev = tree->tail; \
+   obj->node_field.next = NULL; \
+   tree->t

[dpdk-dev] [RFC Patch 08/39] bus/dpaa: add FMan hardware operations

2017-05-27 Thread Shreyansh Jain
Signed-off-by: Geoff Thorpe 
Signed-off-by: Hemant Agrawal 
Signed-off-by: Shreyansh Jain 
---
 drivers/bus/dpaa/Makefile |   1 +
 drivers/bus/dpaa/base/fman/fman_hw.c  | 606 ++
 drivers/bus/dpaa/include/fman.h   |   2 +
 drivers/bus/dpaa/include/fsl_fman.h   | 182 +
 drivers/bus/dpaa/include/fsl_fman_crc64.h | 263 +
 5 files changed, 1054 insertions(+)
 create mode 100644 drivers/bus/dpaa/base/fman/fman_hw.c
 create mode 100644 drivers/bus/dpaa/include/fsl_fman.h
 create mode 100644 drivers/bus/dpaa/include/fsl_fman_crc64.h

diff --git a/drivers/bus/dpaa/Makefile b/drivers/bus/dpaa/Makefile
index 14bbb14..66b6f2f 100644
--- a/drivers/bus/dpaa/Makefile
+++ b/drivers/bus/dpaa/Makefile
@@ -66,6 +66,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_DPAA_BUS) += \
 
 SRCS-$(CONFIG_RTE_LIBRTE_DPAA_BUS) += \
base/fman/fman.c \
+   base/fman/fman_hw.c \
base/fman/of.c \
base/fman/netcfg_layer.c
 
diff --git a/drivers/bus/dpaa/base/fman/fman_hw.c 
b/drivers/bus/dpaa/base/fman/fman_hw.c
new file mode 100644
index 000..77908ec
--- /dev/null
+++ b/drivers/bus/dpaa/base/fman/fman_hw.c
@@ -0,0 +1,606 @@
+/*-
+ *   BSD LICENSE
+ *
+ * Copyright 2017 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include 
+#include 
+#include 
+#include 
+/* This header declares things about Fman hardware itself (the format of status
+ * words and an inline implementation of CRC64). We include it only in order to
+ * instantiate the one global variable it depends on.
+ */
+#include 
+#include 
+
+/* Instantiate the global variable that the inline CRC64 implementation (in
+ * ) depends on.
+ */
+DECLARE_FMAN_CRC64_TABLE();
+
+#define ETH_ADDR_TO_UINT64(eth_addr)  \
+   (uint64_t)(((uint64_t)(eth_addr)[0] << 40) |   \
+   ((uint64_t)(eth_addr)[1] << 32) |   \
+   ((uint64_t)(eth_addr)[2] << 24) |   \
+   ((uint64_t)(eth_addr)[3] << 16) |   \
+   ((uint64_t)(eth_addr)[4] << 8) |\
+   ((uint64_t)(eth_addr)[5]))
+
+void
+fman_if_set_mcast_filter_table(struct fman_if *p)
+{
+   struct __fman_if *__if = container_of(p, struct __fman_if, __if);
+   void *hashtable_ctrl;
+   uint32_t i;
+
+   hashtable_ctrl = &((struct memac_regs *)__if->ccsr_map)->hashtable_ctrl;
+   for (i = 0; i < 64; i++)
+   out_be32(hashtable_ctrl, i|HASH_CTRL_MCAST_EN);
+}
+
+void
+fman_if_reset_mcast_filter_table(struct fman_if *p)
+{
+   struct __fman_if *__if = container_of(p, struct __fman_if, __if);
+   void *hashtable_ctrl;
+   uint32_t i;
+
+   hashtable_ctrl = &((struct memac_regs *)__if->ccsr_map)->hashtable_ctrl;
+   for (i = 0; i < 64; i++)
+   out_be32(hashtable_ctrl, i & ~HASH_CTRL_MCAST_EN);
+}
+
+static
+uint32_t get_mac_hash_code(uint64_t eth_addr)
+{
+   uint64_tmask1, mask2;
+   uint32_txorVal = 0;
+   uint8_t i, j;
+
+   for (i = 0; i < 6; i++) {
+   mask1 = eth_addr & (uint64_t)0x01;
+   eth_addr >>= 1;
+
+   for (j = 0; j < 7; j++) {
+   mask2 = eth_addr & (uint64_t)0x01;
+   mask1 ^= mask2;
+   eth_addr >>= 1;
+   }
+
+   xorVal |= (mask1 << (5 - i));
+   }
+
+   return xorVal;
+}
+
+int
+fman_memac_add_hash_mac_addr(struct fman_if *p, uint8_t *eth)
+{
+   uint64_t eth_addr;
+   void *hashtable_ctrl;
+   uint32_t

[dpdk-dev] [RFC Patch 13/39] bus/dpaa: add QMan driver core routines

2017-05-27 Thread Shreyansh Jain
Signed-off-by: Geoff Thorpe 
Signed-off-by: Roy Pledge 
Signed-off-by: Hemant Agrawal 
Signed-off-by: Shreyansh Jain 
---
 drivers/bus/dpaa/Makefile |2 +
 drivers/bus/dpaa/base/qbman/dpaa_alloc.c  |   88 ++
 drivers/bus/dpaa/base/qbman/qman.c| 2402 +
 drivers/bus/dpaa/base/qbman/qman.h|  888 +++
 drivers/bus/dpaa/base/qbman/qman_driver.c |   12 +
 drivers/bus/dpaa/base/qbman/qman_priv.h   |   11 -
 drivers/bus/dpaa/include/fsl_qman.h   |  767 -
 drivers/bus/dpaa/include/fsl_usd.h|1 +
 8 files changed, 4148 insertions(+), 23 deletions(-)
 create mode 100644 drivers/bus/dpaa/base/qbman/dpaa_alloc.c
 create mode 100644 drivers/bus/dpaa/base/qbman/qman.c
 create mode 100644 drivers/bus/dpaa/base/qbman/qman.h

diff --git a/drivers/bus/dpaa/Makefile b/drivers/bus/dpaa/Makefile
index 5dc8a4d..06a6467 100644
--- a/drivers/bus/dpaa/Makefile
+++ b/drivers/bus/dpaa/Makefile
@@ -71,7 +71,9 @@ SRCS-$(CONFIG_RTE_LIBRTE_DPAA_BUS) += \
base/fman/of.c \
base/fman/netcfg_layer.c \
base/qbman/process.c \
+   base/qbman/qman.c \
base/qbman/qman_driver.c \
+   base/qbman/dpaa_alloc.c \
base/qbman/dpaa_sys.c
 
 # Link Pthread
diff --git a/drivers/bus/dpaa/base/qbman/dpaa_alloc.c 
b/drivers/bus/dpaa/base/qbman/dpaa_alloc.c
new file mode 100644
index 000..690576a
--- /dev/null
+++ b/drivers/bus/dpaa/base/qbman/dpaa_alloc.c
@@ -0,0 +1,88 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ *   BSD LICENSE
+ *
+ * Copyright 2009-2016 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *   GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "dpaa_sys.h"
+#include 
+#include 
+
+int qman_alloc_fqid_range(u32 *result, u32 count, u32 align, int partial)
+{
+   return process_alloc(dpaa_id_fqid, result, count, align, partial);
+}
+
+void qman_release_fqid_range(u32 fqid, u32 count)
+{
+   process_release(dpaa_id_fqid, fqid, count);
+}
+
+int qman_reserve_fqid_range(u32 fqid, unsigned int count)
+{
+   return process_reserve(dpaa_id_fqid, fqid, count);
+}
+
+int qman_alloc_pool_range(u32 *result, u32 count, u32 align, int partial)
+{
+   return process_alloc(dpaa_id_qpool, result, count, align, partial);
+}
+
+void qman_release_pool_range(u32 pool, u32 count)
+{
+   process_release(dpaa_id_qpool, pool, count);
+}
+
+int qman_reserve_pool_range(u32 pool, u32 count)
+{
+   return process_reserve(dpaa_id_qpool, pool, count);
+}
+
+int qman_alloc_cgrid_range(u32 *result, u32 count, u32 align, int partial)
+{
+   return process_alloc(dpaa_id_cgrid, result, count, align, partial);
+}
+
+void qman_release_cgrid_range(u32 cgrid, u32 count)
+{
+   process_release(dpaa_id_cgrid, cgrid, count);
+}
+
+int qman_reserve_cgrid_range(u32 cgrid, u32 count)
+{
+   return process_reserve(dpaa_id_cgrid, cgrid, count);
+}
diff --git a/drivers/bus/dpaa/base/qbman/qman.c 
b/drivers/bus/dpaa/base/qbman/qman.c
new file mode 100644
index 000..8da7acb
--- /dev/null
+++ b/drivers/bus/dpaa/base/qbman/qman.c
@@ -0,0 +1,2402 @@
+/*-
+ * This file is provide

[dpdk-dev] [RFC Patch 10/39] bus/dpaa: add layer for interrupt emulation using pthread

2017-05-27 Thread Shreyansh Jain
An interrupt manager is implemented by emulating over pthreads.
Handlers are registered by QBMAN layer for being notified about
any interrupt request from DPAA blocks in userspace.

Signed-off-by: Roy Pledge 
Signed-off-by: Hemant Agrawal 
Signed-off-by: Shreyansh Jain 
---
 drivers/bus/dpaa/Makefile  |   3 +-
 drivers/bus/dpaa/base/qbman/dpaa_sys.c | 136 +
 drivers/bus/dpaa/base/qbman/dpaa_sys.h |  65 
 3 files changed, 203 insertions(+), 1 deletion(-)
 create mode 100644 drivers/bus/dpaa/base/qbman/dpaa_sys.c
 create mode 100644 drivers/bus/dpaa/base/qbman/dpaa_sys.h

diff --git a/drivers/bus/dpaa/Makefile b/drivers/bus/dpaa/Makefile
index 07c9880..591b65e 100644
--- a/drivers/bus/dpaa/Makefile
+++ b/drivers/bus/dpaa/Makefile
@@ -70,6 +70,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_DPAA_BUS) += \
base/fman/fman_hw.c \
base/fman/of.c \
base/fman/netcfg_layer.c \
-   base/qbman/process.c
+   base/qbman/process.c \
+   base/qbman/dpaa_sys.c
 
 include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/bus/dpaa/base/qbman/dpaa_sys.c 
b/drivers/bus/dpaa/base/qbman/dpaa_sys.c
new file mode 100644
index 000..0017da5
--- /dev/null
+++ b/drivers/bus/dpaa/base/qbman/dpaa_sys.c
@@ -0,0 +1,136 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ *   BSD LICENSE
+ *
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *   GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include 
+#include "dpaa_sys.h"
+
+struct process_interrupt {
+   int irq;
+   irqreturn_t (*isr)(int irq, void *arg);
+   unsigned long flags;
+   const char *name;
+   void *arg;
+   struct list_head node;
+};
+
+static COMPAT_LIST_HEAD(process_irq_list);
+static pthread_mutex_t process_irq_lock = PTHREAD_MUTEX_INITIALIZER;
+
+static void process_interrupt_install(struct process_interrupt *irq)
+{
+   int ret;
+   /* Add the irq to the end of the list */
+   ret = pthread_mutex_lock(&process_irq_lock);
+   assert(!ret);
+   list_add_tail(&irq->node, &process_irq_list);
+   ret = pthread_mutex_unlock(&process_irq_lock);
+   assert(!ret);
+}
+
+static void process_interrupt_remove(struct process_interrupt *irq)
+{
+   int ret;
+
+   ret = pthread_mutex_lock(&process_irq_lock);
+   assert(!ret);
+   list_del(&irq->node);
+   ret = pthread_mutex_unlock(&process_irq_lock);
+   assert(!ret);
+}
+
+static struct process_interrupt *process_interrupt_find(int irq_num)
+{
+   int ret;
+   struct process_interrupt *i = NULL;
+
+   ret = pthread_mutex_lock(&process_irq_lock);
+   assert(!ret);
+   list_for_each_entry(i, &process_irq_list, node) {
+   if (i->irq == irq_num)
+   goto done;
+   }
+done:
+   ret = pthread_mutex_unlock(&process_irq_lock);
+   assert(!ret);
+   return i;
+}
+
+/* This is the interface from the platform-agnostic driver code to (de)register
+ * interrupt handlers. We simply create/destroy corresponding structs.
+ */
+int qbman_request_irq(int irq, irqreturn_t (*isr)(int 

[dpdk-dev] [RFC Patch 15/39] bus/dpaa: add support for FMAN frame queue lookup

2017-05-27 Thread Shreyansh Jain
Signed-off-by: Geoff Thorpe 
Signed-off-by: Roy Pledge 
Signed-off-by: Hemant Agrawal 
Signed-off-by: Shreyansh Jain 
---
 drivers/bus/dpaa/base/qbman/qman.c| 99 ++-
 drivers/bus/dpaa/base/qbman/qman_driver.c |  7 ++-
 drivers/bus/dpaa/base/qbman/qman_priv.h   | 11 
 drivers/bus/dpaa/include/fsl_qman.h   | 12 
 4 files changed, 126 insertions(+), 3 deletions(-)

diff --git a/drivers/bus/dpaa/base/qbman/qman.c 
b/drivers/bus/dpaa/base/qbman/qman.c
index 8da7acb..2a1882a 100644
--- a/drivers/bus/dpaa/base/qbman/qman.c
+++ b/drivers/bus/dpaa/base/qbman/qman.c
@@ -176,6 +176,65 @@ static inline struct qman_fq *table_find_fq(struct 
qman_portal *p, u32 fqid)
return fqtree_find(&p->retire_table, fqid);
 }
 
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+static void **qman_fq_lookup_table;
+static size_t qman_fq_lookup_table_size;
+
+int qman_setup_fq_lookup_table(size_t num_entries)
+{
+   num_entries++;
+   /* Allocate 1 more entry since the first entry is not used */
+   qman_fq_lookup_table = vmalloc((num_entries * sizeof(void *)));
+   if (!qman_fq_lookup_table) {
+   pr_err("QMan: Could not allocate fq lookup table\n");
+   return -ENOMEM;
+   }
+   memset(qman_fq_lookup_table, 0, num_entries * sizeof(void *));
+   qman_fq_lookup_table_size = num_entries;
+   pr_info("QMan: Allocated lookup table at %p, entry count %lu\n",
+   qman_fq_lookup_table,
+   (unsigned long)qman_fq_lookup_table_size);
+   return 0;
+}
+
+/* global structure that maintains fq object mapping */
+static DEFINE_SPINLOCK(fq_hash_table_lock);
+
+static int find_empty_fq_table_entry(u32 *entry, struct qman_fq *fq)
+{
+   u32 i;
+
+   spin_lock(&fq_hash_table_lock);
+   /* Can't use index zero because this has special meaning
+* in context_b field.
+*/
+   for (i = 1; i < qman_fq_lookup_table_size; i++) {
+   if (qman_fq_lookup_table[i] == NULL) {
+   *entry = i;
+   qman_fq_lookup_table[i] = fq;
+   spin_unlock(&fq_hash_table_lock);
+   return 0;
+   }
+   }
+   spin_unlock(&fq_hash_table_lock);
+   return -ENOMEM;
+}
+
+static void clear_fq_table_entry(u32 entry)
+{
+   spin_lock(&fq_hash_table_lock);
+   BUG_ON(entry >= qman_fq_lookup_table_size);
+   qman_fq_lookup_table[entry] = NULL;
+   spin_unlock(&fq_hash_table_lock);
+}
+
+static inline struct qman_fq *get_fq_table_entry(u32 entry)
+{
+   BUG_ON(entry >= qman_fq_lookup_table_size);
+   return qman_fq_lookup_table[entry];
+}
+#endif
+
 static inline void cpu_to_hw_fqd(struct qm_fqd *fqd)
 {
/* Byteswap the FQD to HW format */
@@ -766,8 +825,13 @@ static u32 __poll_portal_slow(struct qman_portal *p, u32 
is)
break;
case QM_MR_VERB_FQPN:
/* Parked */
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+   fq = get_fq_table_entry(
+   be32_to_cpu(msg->fq.contextB));
+#else
fq = (void *)(uintptr_t)
be32_to_cpu(msg->fq.contextB);
+#endif
fq_state_change(p, fq, msg, verb);
if (fq->cb.fqs)
fq->cb.fqs(p, fq, &swapped_msg);
@@ -792,7 +856,11 @@ static u32 __poll_portal_slow(struct qman_portal *p, u32 
is)
}
} else {
/* Its a software ERN */
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+   fq = get_fq_table_entry(be32_to_cpu(msg->ern.tag));
+#else
fq = (void *)(uintptr_t)be32_to_cpu(msg->ern.tag);
+#endif
fq->cb.ern(p, fq, &swapped_msg);
}
num++;
@@ -907,7 +975,11 @@ static inline unsigned int __poll_portal_fast(struct 
qman_portal *p,
clear_vdqcr(p, fq);
} else {
/* SDQCR: context_b points to the FQ */
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+   fq = get_fq_table_entry(dq->contextB);
+#else
fq = (void *)(uintptr_t)dq->contextB;
+#endif
/* Now let the callback do its stuff */
res = fq->cb.dqrr(p, fq, dq);
/*
@@ -1119,7 +1191,12 @@ int qman_create_fq(u32 fqid, u32 flags, struct qman_fq 
*fq)
fq->flags = flags;
fq->state = qman_fq_state_oos;
fq->cgr_groupid = 0;
-
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+   if (unlikely(find_empty_fq_table_entry(&fq->key, fq))) {
+   pr_info("Find empty table entry failed\n");
+   return -ENOMEM;
+   }
+#endif
if (!(flags & QMAN_FQ_FLAG_AS_IS) || (flags & QMA

[dpdk-dev] [RFC Patch 18/39] bus/dpaa: integrate DPAA Bus with hardware blocks

2017-05-27 Thread Shreyansh Jain
Now that QBMAN (QMAN, BMAN) and FMAN drivers are available, this patch
integrates the DPAA Bus driver for using the drivers for scanning
devices and calling the PMD registered probe callbacks.

Signed-off-by: Hemant Agrawal 
Signed-off-by: Shreyansh Jain 
---
 drivers/bus/dpaa/dpaa_bus.c   | 259 ++
 drivers/bus/dpaa/rte_bus_dpaa_version.map |  39 +
 drivers/bus/dpaa/rte_dpaa_bus.h   |   6 +
 3 files changed, 304 insertions(+)

diff --git a/drivers/bus/dpaa/dpaa_bus.c b/drivers/bus/dpaa/dpaa_bus.c
index e9eb093..f1e7d0d 100644
--- a/drivers/bus/dpaa/dpaa_bus.c
+++ b/drivers/bus/dpaa/dpaa_bus.c
@@ -64,9 +64,19 @@
 #include 
 #include 
 
+#include 
+#include 
+#include 
+#include 
+#include 
 
 struct rte_dpaa_bus rte_dpaa_bus;
+struct netcfg_info *dpaa_netcfg;
 
+/* define a variable to hold the portal_key, once created.*/
+pthread_key_t dpaa_portal_key;
+
+RTE_DEFINE_PER_LCORE(bool, _dpaa_io);
 
 static inline void
 dpaa_add_to_device_list(struct rte_dpaa_device *dev)
@@ -79,11 +89,226 @@ dpaa_remove_from_device_list(struct rte_dpaa_device *dev)
 {
TAILQ_INSERT_TAIL(&rte_dpaa_bus.device_list, dev, next);
 }
+
+static int
+dpaa_create_device_list(void)
+{
+   int dev_id;
+   struct rte_dpaa_device *dev;
+   struct fm_eth_port_cfg *cfg;
+   struct fman_if *fman_intf;
+
+   for (dev_id = 0; dev_id < dpaa_netcfg->num_ethports; dev_id++) {
+   dev = rte_zmalloc(NULL, sizeof(struct rte_dpaa_device),
+ RTE_CACHE_LINE_SIZE);
+   if (!dev)
+   return -ENOMEM;
+
+   cfg = &dpaa_netcfg->port_cfg[dev_id];
+   fman_intf = cfg->fman_if;
+
+   /* Device identifiers */
+   dev->id.vendor_id = FSL_VENDOR_ID;
+   dev->id.class_id = FSL_DEVICE_ID;
+   dev->id.fman_id = fman_intf->fman_idx + 1;
+   dev->id.mac_id = fman_intf->mac_idx;
+   dev->id.dev_id = dev_id;
+
+   /* Create device name */
+   memset(dev->name, 0, RTE_ETH_NAME_MAX_LEN);
+   sprintf(dev->name, "fm%d-mac%d", (fman_intf->fman_idx + 1),
+   fman_intf->mac_idx);
+
+   dpaa_add_to_device_list(dev);
+   }
+
+   rte_dpaa_bus.device_count = dev_id;
+
+   return 0;
+}
+
+static void
+dpaa_clean_device_list(void)
+{
+   struct rte_dpaa_device *dev = NULL;
+   struct rte_dpaa_device *tdev = NULL;
+
+   TAILQ_FOREACH_SAFE(dev, &rte_dpaa_bus.device_list, next, tdev) {
+   TAILQ_REMOVE(&rte_dpaa_bus.device_list, dev, next);
+   rte_free(dev);
+   dev = NULL;
+   }
+}
+
+/** XXX move this function into a separate file */
+static int
+_dpaa_portal_init(void *arg)
+{
+   cpu_set_t cpuset;
+   pthread_t id;
+   uint32_t cpu = rte_lcore_id();
+   int ret;
+   struct dpaa_portal *dpaa_io_portal;
+
+   PMD_INIT_FUNC_TRACE();
+
+   if ((uint64_t)arg == 1 || cpu == LCORE_ID_ANY)
+   cpu = rte_get_master_lcore();
+   /* if the core id is not supported */
+   else
+   if (cpu >= RTE_MAX_LCORE)
+   return -1;
+
+   /* Set CPU affinity for this thread */
+   CPU_ZERO(&cpuset);
+   CPU_SET(cpu, &cpuset);
+   id = pthread_self();
+   ret = pthread_setaffinity_np(id, sizeof(cpu_set_t), &cpuset);
+   if (ret) {
+   PMD_DRV_LOG(ERR, "pthread_setaffinity_np failed on "
+   "core :%d with ret: %d", cpu, ret);
+   return ret;
+   }
+
+   /* Initialise bman thread portals */
+   ret = bman_thread_init();
+   if (ret) {
+   PMD_DRV_LOG(ERR, "bman_thread_init failed on "
+   "core %d with ret: %d", cpu, ret);
+   return ret;
+   }
+
+   PMD_DRV_LOG(DEBUG, "BMAN thread initialized");
+
+   /* Initialise qman thread portals */
+   ret = qman_thread_init();
+   if (ret) {
+   PMD_DRV_LOG(ERR, "bman_thread_init failed on "
+   "core %d with ret: %d", cpu, ret);
+   bman_thread_finish();
+   return ret;
+   }
+
+   PMD_DRV_LOG(DEBUG, "QMAN thread initialized");
+
+   dpaa_io_portal = rte_malloc(NULL, sizeof(struct dpaa_portal),
+   RTE_CACHE_LINE_SIZE);
+   if (!dpaa_io_portal) {
+   PMD_DRV_LOG(ERR, "Unable to allocate memory");
+   bman_thread_finish();
+   qman_thread_finish();
+   return -ENOMEM;
+   }
+
+   dpaa_io_portal->qman_idx = qman_get_portal_index();
+   dpaa_io_portal->bman_idx = bman_get_portal_index();
+   dpaa_io_portal->tid = syscall(SYS_gettid);
+
+   ret = pthread_setspecific(dpaa_portal_key, (void *)dpaa_io_portal);
+   if (ret) {
+   PMD_DRV_LOG(ERR, "pthread_setspecific failed on "
+ 

[dpdk-dev] [RFC Patch 12/39] bus/dpaa: add QMAN interface driver

2017-05-27 Thread Shreyansh Jain
The Queue Manager (QMan) is a hardware queue management block that
allows software and accelerators on the datapath to enqueue and dequeue
frames in order to communicate.

This part of QBMAN DPAA Block.

Signed-off-by: Geoff Thorpe 
Signed-off-by: Roy Pledge 
Signed-off-by: Hemant Agrawal 
Signed-off-by: Shreyansh Jain 
---
 drivers/bus/dpaa/Makefile |4 +
 drivers/bus/dpaa/base/qbman/qman_driver.c |  271 ++
 drivers/bus/dpaa/base/qbman/qman_priv.h   |  314 +++
 drivers/bus/dpaa/include/fsl_qman.h   | 1283 +
 drivers/bus/dpaa/include/fsl_usd.h|   13 +
 5 files changed, 1885 insertions(+)
 create mode 100644 drivers/bus/dpaa/base/qbman/qman_driver.c
 create mode 100644 drivers/bus/dpaa/base/qbman/qman_priv.h
 create mode 100644 drivers/bus/dpaa/include/fsl_qman.h

diff --git a/drivers/bus/dpaa/Makefile b/drivers/bus/dpaa/Makefile
index 591b65e..5dc8a4d 100644
--- a/drivers/bus/dpaa/Makefile
+++ b/drivers/bus/dpaa/Makefile
@@ -71,6 +71,10 @@ SRCS-$(CONFIG_RTE_LIBRTE_DPAA_BUS) += \
base/fman/of.c \
base/fman/netcfg_layer.c \
base/qbman/process.c \
+   base/qbman/qman_driver.c \
base/qbman/dpaa_sys.c
 
+# Link Pthread
+LDLIBS += -lpthread
+
 include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/bus/dpaa/base/qbman/qman_driver.c 
b/drivers/bus/dpaa/base/qbman/qman_driver.c
new file mode 100644
index 000..80dde20
--- /dev/null
+++ b/drivers/bus/dpaa/base/qbman/qman_driver.c
@@ -0,0 +1,271 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ *   BSD LICENSE
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *   GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include 
+#include 
+#include "qman_priv.h"
+#include 
+#include 
+
+/* Global variable containing revision id (even on non-control plane systems
+ * where CCSR isn't available).
+ */
+u16 qman_ip_rev;
+u16 qm_channel_pool1 = QMAN_CHANNEL_POOL1;
+u16 qm_channel_caam = QMAN_CHANNEL_CAAM;
+u16 qm_channel_pme = QMAN_CHANNEL_PME;
+
+/* Ccsr map address to access ccsrbased register */
+void *qman_ccsr_map;
+/* The qman clock frequency */
+u32 qman_clk;
+
+static __thread int fd = -1;
+static __thread struct qm_portal_config pcfg;
+static __thread struct dpaa_ioctl_portal_map map = {
+   .type = dpaa_portal_qman
+};
+
+static int fsl_qman_portal_init(uint32_t index, int is_shared)
+{
+   cpu_set_t cpuset;
+   int loop, ret;
+   struct dpaa_ioctl_irq_map irq_map;
+
+   /* Verify the thread's cpu-affinity */
+   ret = pthread_getaffinity_np(pthread_self(), sizeof(cpu_set_t),
+&cpuset);
+   if (ret) {
+   error(0, ret, "pthread_getaffinity_np()");
+   return ret;
+   }
+   pcfg.cpu = -1;
+   for (loop = 0; loop < CPU_SETSIZE; loop++)
+   if (CPU_ISSET(loop, &cpuset)) {
+   if (pcfg.cpu != -1) {
+   pr_err("Thread is not affine to 1 cpu\n");
+   return -EINVAL;
+   }
+   pcfg.cpu = loop;
+

[dpdk-dev] [RFC Patch 14/39] bus/dpaa: add BMAN driver core

2017-05-27 Thread Shreyansh Jain
The Buffer Manager (BMan) is a hardware buffer pool management block that
allows software and accelerators on the datapath to acquire and release
buffers in order to build frames.

This patch adds the core routines.

Signed-off-by: Geoff Thorpe 
Signed-off-by: Roy Pledge 
Signed-off-by: Hemant Agrawal 
Signed-off-by: Shreyansh Jain 
---
 drivers/bus/dpaa/Makefile |   1 +
 drivers/bus/dpaa/base/qbman/bman_driver.c | 311 +
 drivers/bus/dpaa/base/qbman/bman_priv.h   | 125 ++
 drivers/bus/dpaa/include/fsl_bman.h   | 375 ++
 drivers/bus/dpaa/include/fsl_usd.h|   5 +
 5 files changed, 817 insertions(+)
 create mode 100644 drivers/bus/dpaa/base/qbman/bman_driver.c
 create mode 100644 drivers/bus/dpaa/base/qbman/bman_priv.h
 create mode 100644 drivers/bus/dpaa/include/fsl_bman.h

diff --git a/drivers/bus/dpaa/Makefile b/drivers/bus/dpaa/Makefile
index 06a6467..f11af6d 100644
--- a/drivers/bus/dpaa/Makefile
+++ b/drivers/bus/dpaa/Makefile
@@ -71,6 +71,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_DPAA_BUS) += \
base/fman/of.c \
base/fman/netcfg_layer.c \
base/qbman/process.c \
+   base/qbman/bman_driver.c \
base/qbman/qman.c \
base/qbman/qman_driver.c \
base/qbman/dpaa_alloc.c \
diff --git a/drivers/bus/dpaa/base/qbman/bman_driver.c 
b/drivers/bus/dpaa/base/qbman/bman_driver.c
new file mode 100644
index 000..fb3c50e
--- /dev/null
+++ b/drivers/bus/dpaa/base/qbman/bman_driver.c
@@ -0,0 +1,311 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ *   BSD LICENSE
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *   GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include 
+
+#include 
+#include 
+#include "bman_priv.h"
+#include 
+
+/*
+ * Global variables of the max portal/pool number this bman version supported
+ */
+u16 bman_ip_rev;
+u16 bman_pool_max;
+void *bman_ccsr_map;
+
+/*/
+/* Portal driver */
+/*/
+
+static __thread int fd = -1;
+static __thread struct bm_portal_config pcfg;
+static __thread struct dpaa_ioctl_portal_map map = {
+   .type = dpaa_portal_bman
+};
+
+static int fsl_bman_portal_init(uint32_t idx, int is_shared)
+{
+   cpu_set_t cpuset;
+   int loop, ret;
+   struct dpaa_ioctl_irq_map irq_map;
+
+   /* Verify the thread's cpu-affinity */
+   ret = pthread_getaffinity_np(pthread_self(), sizeof(cpu_set_t),
+&cpuset);
+   if (ret) {
+   error(0, ret, "pthread_getaffinity_np()");
+   return ret;
+   }
+   pcfg.cpu = -1;
+   for (loop = 0; loop < CPU_SETSIZE; loop++)
+   if (CPU_ISSET(loop, &cpuset)) {
+   if (pcfg.cpu != -1) {
+   pr_err("Thread is not affine to 1 cpu");
+   return -EINVAL;
+   }
+   pcfg.cpu = loop;
+   }
+   if (pcfg.cpu == -1) {
+   pr_err("Bug in getaffinity handling!");
+   return -EINVAL;
+   }
+  

[dpdk-dev] [RFC Patch 23/39] net/dpaa: add NXP DPAA PMD driver skeleton

2017-05-27 Thread Shreyansh Jain
A skeleton which would be called after bus device scan. It currently
fails to identify the device.

Signed-off-by: Hemant Agrawal 
Signed-off-by: Shreyansh Jain 
---
 MAINTAINERS   |   1 +
 drivers/net/dpaa/Makefile |  64 +
 drivers/net/dpaa/dpaa_ethdev.c| 220 ++
 drivers/net/dpaa/dpaa_ethdev.h| 141 +++
 drivers/net/dpaa/rte_pmd_dpaa_version.map |   4 +
 5 files changed, 430 insertions(+)
 create mode 100644 drivers/net/dpaa/Makefile
 create mode 100644 drivers/net/dpaa/dpaa_ethdev.c
 create mode 100644 drivers/net/dpaa/dpaa_ethdev.h
 create mode 100644 drivers/net/dpaa/rte_pmd_dpaa_version.map

diff --git a/MAINTAINERS b/MAINTAINERS
index b50bd33..2992e07 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -393,6 +393,7 @@ M: Hemant Agrawal 
 M: Shreyansh Jain 
 F: drivers/bus/dpaa/
 F: drivers/mempool/dpaa/
+F: drivers/net/dpaa/
 F: doc/guides/nics/dpaa.rst
 F: doc/guides/nics/features/dpaa.ini
 
diff --git a/drivers/net/dpaa/Makefile b/drivers/net/dpaa/Makefile
new file mode 100644
index 000..8fcde26
--- /dev/null
+++ b/drivers/net/dpaa/Makefile
@@ -0,0 +1,64 @@
+#   BSD LICENSE
+#
+#   Copyright 2017 NXP.
+#   All rights reserved.
+#
+#   Redistribution and use in source and binary forms, with or without
+#   modification, are permitted provided that the following conditions
+#   are met:
+#
+# * Redistributions of source code must retain the above copyright
+#   notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+#   notice, this list of conditions and the following disclaimer in
+#   the documentation and/or other materials provided with the
+#   distribution.
+# * Neither the name of Freescale Semiconductor, Inc nor the names of its
+#   contributors may be used to endorse or promote products derived
+#   from this software without specific prior written permission.
+#
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+RTE_SDK_DPAA=$(RTE_SDK)/drivers/net/dpaa
+
+#
+# library name
+#
+LIB = librte_pmd_dpaa.a
+
+ifeq ($(CONFIG_RTE_LIBRTE_DPAA_DEBUG_INIT),y)
+CFLAGS += -O0 -g
+CFLAGS += "-Wno-error"
+else
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+endif
+
+CFLAGS += -I$(RTE_SDK_DPAA)/
+CFLAGS += -I$(RTE_SDK_DPAA)/include
+CFLAGS += -I$(RTE_SDK)/drivers/bus/dpaa
+CFLAGS += -I$(RTE_SDK)/drivers/bus/dpaa/include/
+CFLAGS += -I$(RTE_SDK)/lib/librte_eal/common/include
+CFLAGS += -I$(RTE_SDK)/lib/librte_eal/linuxapp/eal/include
+
+EXPORT_MAP := rte_pmd_dpaa_version.map
+
+LIBABIVER := 1
+
+# Interfaces with DPDK
+SRCS-$(CONFIG_RTE_LIBRTE_DPAA_PMD) += dpaa_ethdev.c
+
+LDLIBS += -lrte_bus_dpaa
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/dpaa/dpaa_ethdev.c b/drivers/net/dpaa/dpaa_ethdev.c
new file mode 100644
index 000..53c8277
--- /dev/null
+++ b/drivers/net/dpaa/dpaa_ethdev.c
@@ -0,0 +1,220 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright 2016 Freescale Semiconductor, Inc. All rights reserved.
+ *   Copyright 2017 NXP. All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in
+ *   the documentation and/or other materials provided with the
+ *   distribution.
+ * * Neither the name of  Freescale Semiconductor, Inc nor the names of its
+ *   contributors may be used to endorse or promote products derived
+ *   from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO 

[dpdk-dev] [RFC Patch 24/39] config: enable NXP DPAA PMD compilation

2017-05-27 Thread Shreyansh Jain
Signed-off-by: Shreyansh Jain 
---
 config/defconfig_arm64-dpaa-linuxapp-gcc | 11 +++
 drivers/net/Makefile |  2 ++
 mk/rte.app.mk|  5 +
 3 files changed, 18 insertions(+)

diff --git a/config/defconfig_arm64-dpaa-linuxapp-gcc 
b/config/defconfig_arm64-dpaa-linuxapp-gcc
index 2a7d8d9..fcc0231 100644
--- a/config/defconfig_arm64-dpaa-linuxapp-gcc
+++ b/config/defconfig_arm64-dpaa-linuxapp-gcc
@@ -37,6 +37,12 @@
 CONFIG_RTE_MACHINE="dpaa"
 CONFIG_RTE_ARCH_ARM_TUNE="cortex-a72"
 
+#
+# Compile Environment Abstraction Layer
+#
+CONFIG_RTE_MAX_LCORE=4
+CONFIG_RTE_MAX_NUMA_NODES=1
+CONFIG_RTE_PKTMBUF_HEADROOM=128
 
 # DPAA Bus
 CONFIG_RTE_LIBRTE_DPAA_BUS=y
@@ -48,3 +54,8 @@ CONFIG_RTE_LIBRTE_DPAA_DEBUG_TX=n
 # DPAA Mempool
 CONFIG_RTE_LIBRTE_DPAA_MEMPOOL=y
 CONFIG_RTE_MBUF_DEFAULT_MEMPOOL_OPS="dpaa"
+
+# Compile software NXP DPAA PMD
+#
+CONFIG_RTE_LIBRTE_DPAA_PMD=y
+
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 35ed813..efd1a34 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -51,6 +51,8 @@ DIRS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += bonding
 DEPDIRS-bonding = $(core-libs) librte_cmdline
 DIRS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += cxgbe
 DEPDIRS-cxgbe = $(core-libs)
+DIRS-$(CONFIG_RTE_LIBRTE_DPAA_PMD) += dpaa
+DEPDIRS-dpaa = $(core-libs)
 DIRS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += dpaa2
 DEPDIRS-dpaa2 = $(core-libs)
 DIRS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000
diff --git a/mk/rte.app.mk b/mk/rte.app.mk
index bcaf1b3..80e5530 100644
--- a/mk/rte.app.mk
+++ b/mk/rte.app.mk
@@ -115,6 +115,7 @@ _LDLIBS-$(CONFIG_RTE_LIBRTE_BNX2X_PMD)  += 
-lrte_pmd_bnx2x -lz
 _LDLIBS-$(CONFIG_RTE_LIBRTE_BNXT_PMD)   += -lrte_pmd_bnxt
 _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_BOND)   += -lrte_pmd_bond
 _LDLIBS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD)  += -lrte_pmd_cxgbe
+_LDLIBS-$(CONFIG_RTE_LIBRTE_DPAA_PMD)   += -lrte_pmd_dpaa
 _LDLIBS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD)  += -lrte_pmd_dpaa2
 _LDLIBS-$(CONFIG_RTE_LIBRTE_E1000_PMD)  += -lrte_pmd_e1000
 _LDLIBS-$(CONFIG_RTE_LIBRTE_ENA_PMD)+= -lrte_pmd_ena
@@ -178,6 +179,10 @@ _LDLIBS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD)  += 
-lrte_bus_fslmc
 _LDLIBS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD)  += -lrte_mempool_dpaa2
 endif # CONFIG_RTE_LIBRTE_DPAA2_PMD
 
+ifeq ($(CONFIG_RTE_LIBRTE_DPAA_PMD),y)
+_LDLIBS-$(CONFIG_RTE_LIBRTE_DPAA_PMD)   += -lrte_bus_dpaa
+endif
+
 endif # !CONFIG_RTE_BUILD_SHARED_LIBS
 
 _LDLIBS-y += --no-whole-archive
-- 
2.7.4



[dpdk-dev] [RFC Patch 25/39] net/dpaa: add support for Tx and Rx queue setup

2017-05-27 Thread Shreyansh Jain
Signed-off-by: Hemant Agrawal 
Signed-off-by: Shreyansh Jain 
---
 doc/guides/nics/features/dpaa.ini |   1 +
 drivers/net/dpaa/Makefile |   4 +
 drivers/net/dpaa/dpaa_ethdev.c| 271 -
 drivers/net/dpaa/dpaa_ethdev.h|   7 +
 drivers/net/dpaa/dpaa_rxtx.c  | 312 ++
 drivers/net/dpaa/dpaa_rxtx.h  | 216 ++
 mk/rte.app.mk |   1 +
 7 files changed, 807 insertions(+), 5 deletions(-)
 create mode 100644 drivers/net/dpaa/dpaa_rxtx.c
 create mode 100644 drivers/net/dpaa/dpaa_rxtx.h

diff --git a/doc/guides/nics/features/dpaa.ini 
b/doc/guides/nics/features/dpaa.ini
index 9e8befc..29ba47e 100644
--- a/doc/guides/nics/features/dpaa.ini
+++ b/doc/guides/nics/features/dpaa.ini
@@ -4,5 +4,6 @@
 ; Refer to default.ini for the full list of available PMD features.
 ;
 [Features]
+Queue start/stop = Y
 ARMv8= Y
 Usage doc= Y
diff --git a/drivers/net/dpaa/Makefile b/drivers/net/dpaa/Makefile
index 8fcde26..06b63fc 100644
--- a/drivers/net/dpaa/Makefile
+++ b/drivers/net/dpaa/Makefile
@@ -44,11 +44,13 @@ else
 CFLAGS += -O3
 CFLAGS += $(WERROR_FLAGS)
 endif
+CFLAGS +=-Wno-pointer-arith
 
 CFLAGS += -I$(RTE_SDK_DPAA)/
 CFLAGS += -I$(RTE_SDK_DPAA)/include
 CFLAGS += -I$(RTE_SDK)/drivers/bus/dpaa
 CFLAGS += -I$(RTE_SDK)/drivers/bus/dpaa/include/
+CFLAGS += -I$(RTE_SDK)/drivers/mempool/dpaa
 CFLAGS += -I$(RTE_SDK)/lib/librte_eal/common/include
 CFLAGS += -I$(RTE_SDK)/lib/librte_eal/linuxapp/eal/include
 
@@ -58,7 +60,9 @@ LIBABIVER := 1
 
 # Interfaces with DPDK
 SRCS-$(CONFIG_RTE_LIBRTE_DPAA_PMD) += dpaa_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_DPAA_PMD) += dpaa_rxtx.c
 
 LDLIBS += -lrte_bus_dpaa
+LDLIBS += -lrte_mempool_dpaa
 
 include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/dpaa/dpaa_ethdev.c b/drivers/net/dpaa/dpaa_ethdev.c
index 53c8277..b93f781 100644
--- a/drivers/net/dpaa/dpaa_ethdev.c
+++ b/drivers/net/dpaa/dpaa_ethdev.c
@@ -62,8 +62,16 @@
 
 #include 
 #include 
+#include 
 
 #include 
+#include 
+
+#include 
+#include 
+#include 
+#include 
+
 
 /* Keep track of whether QMAN and BMAN have been globally initialized */
 static int is_global_init;
@@ -79,20 +87,104 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev 
__rte_unused)
 
 static int dpaa_eth_dev_start(struct rte_eth_dev *dev)
 {
+   struct dpaa_if *dpaa_intf = dev->data->dev_private;
+
PMD_INIT_FUNC_TRACE();
 
/* Change tx callback to the real one */
-   dev->tx_pkt_burst = NULL;
+   dev->tx_pkt_burst = dpaa_eth_queue_tx;
+   fman_if_enable_rx(dpaa_intf->fif);
 
return 0;
 }
 
 static void dpaa_eth_dev_stop(struct rte_eth_dev *dev)
 {
-   dev->tx_pkt_burst = NULL;
+   struct dpaa_if *dpaa_intf = dev->data->dev_private;
+
+   PMD_INIT_FUNC_TRACE();
+
+   fman_if_disable_rx(dpaa_intf->fif);
+   dev->tx_pkt_burst = dpaa_eth_tx_drop_all;
+}
+
+static void dpaa_eth_dev_close(struct rte_eth_dev *dev)
+{
+   PMD_INIT_FUNC_TRACE();
+
+   dpaa_eth_dev_stop(dev);
 }
 
-static void dpaa_eth_dev_close(struct rte_eth_dev *dev __rte_unused)
+static
+int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+   uint16_t nb_desc __rte_unused,
+   unsigned int socket_id __rte_unused,
+   const struct rte_eth_rxconf *rx_conf __rte_unused,
+   struct rte_mempool *mp)
+{
+   struct dpaa_if *dpaa_intf = dev->data->dev_private;
+
+   PMD_INIT_FUNC_TRACE();
+
+   PMD_DRV_LOG(INFO, "Rx queue setup for queue index: %d", queue_idx);
+
+   if (!dpaa_intf->bp_info || dpaa_intf->bp_info->mp != mp) {
+   struct fman_if_ic_params icp;
+   uint32_t fd_offset;
+   uint32_t bp_size;
+
+   if (!mp->pool_data) {
+   PMD_DRV_LOG(ERR, "not an offloaded buffer pool");
+   return -1;
+   }
+   dpaa_intf->bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
+
+   memset(&icp, 0, sizeof(icp));
+   /* set ICEOF for to the default value , which is 0*/
+   icp.iciof = DEFAULT_ICIOF;
+   icp.iceof = DEFAULT_RX_ICEOF;
+   icp.icsz = DEFAULT_ICSZ;
+   fman_if_set_ic_params(dpaa_intf->fif, &icp);
+
+   fd_offset = RTE_PKTMBUF_HEADROOM + DPAA_HW_BUF_RESERVE;
+   fman_if_set_fdoff(dpaa_intf->fif, fd_offset);
+
+   /* Buffer pool size should be equal to Dataroom Size*/
+   bp_size = rte_pktmbuf_data_room_size(mp);
+   fman_if_set_bp(dpaa_intf->fif, mp->size,
+  dpaa_intf->bp_info->bpid, bp_size);
+   dpaa_intf->valid = 1;
+   PMD_DRV_LOG(INFO, "if =%s - fd_offset = %d offset = %d",
+   dpaa_intf->name, fd_offset,
+   fman_if_get_fdoff

[dpdk-dev] [RFC Patch 21/39] drivers: enable compilation of DPAA Mempool driver

2017-05-27 Thread Shreyansh Jain
This patch also adds configuration necessary for compilation of DPAA
Mempool driver into the DPAA specific config file.
CONFIG_RTE_MBUF_DEFAULT_MEMPOOL_OPS=dpaa is also configured to allow
applications to use DPAA mempool as default.

Signed-off-by: Shreyansh Jain 
---
 config/defconfig_arm64-dpaa-linuxapp-gcc | 4 
 drivers/mempool/Makefile | 2 ++
 2 files changed, 6 insertions(+)

diff --git a/config/defconfig_arm64-dpaa-linuxapp-gcc 
b/config/defconfig_arm64-dpaa-linuxapp-gcc
index f1b6d86..2a7d8d9 100644
--- a/config/defconfig_arm64-dpaa-linuxapp-gcc
+++ b/config/defconfig_arm64-dpaa-linuxapp-gcc
@@ -44,3 +44,7 @@ CONFIG_RTE_LIBRTE_DPAA_DEBUG_INIT=n
 CONFIG_RTE_LIBRTE_DPAA_DEBUG_DRIVER=n
 CONFIG_RTE_LIBRTE_DPAA_DEBUG_RX=n
 CONFIG_RTE_LIBRTE_DPAA_DEBUG_TX=n
+
+# DPAA Mempool
+CONFIG_RTE_LIBRTE_DPAA_MEMPOOL=y
+CONFIG_RTE_MBUF_DEFAULT_MEMPOOL_OPS="dpaa"
diff --git a/drivers/mempool/Makefile b/drivers/mempool/Makefile
index 8fd40e1..595f717 100644
--- a/drivers/mempool/Makefile
+++ b/drivers/mempool/Makefile
@@ -33,6 +33,8 @@ include $(RTE_SDK)/mk/rte.vars.mk
 
 core-libs := librte_eal librte_mempool librte_ring
 
+DIRS-$(CONFIG_RTE_LIBRTE_DPAA_MEMPOOL) += dpaa
+DEPDIRS-dpaa = $(core-libs)
 DIRS-$(CONFIG_RTE_LIBRTE_DPAA2_MEMPOOL) += dpaa2
 DEPDIRS-dpaa2 = $(core-libs)
 DIRS-$(CONFIG_RTE_DRIVER_MEMPOOL_RING) += ring
-- 
2.7.4



[dpdk-dev] [RFC Patch 28/39] net/dpaa: add support for jumbo frames

2017-05-27 Thread Shreyansh Jain
Signed-off-by: Hemant Agrawal 
Signed-off-by: Shreyansh Jain 
---
 doc/guides/nics/features/dpaa.ini |  1 +
 drivers/net/dpaa/dpaa_ethdev.c| 16 
 2 files changed, 13 insertions(+), 4 deletions(-)

diff --git a/doc/guides/nics/features/dpaa.ini 
b/doc/guides/nics/features/dpaa.ini
index 86ab0a8..aaad818 100644
--- a/doc/guides/nics/features/dpaa.ini
+++ b/doc/guides/nics/features/dpaa.ini
@@ -6,6 +6,7 @@
 [Features]
 Link status  = Y
 Queue start/stop = Y
+Jumbo frame  = Y
 MTU update   = Y
 ARMv8= Y
 Usage doc= Y
diff --git a/drivers/net/dpaa/dpaa_ethdev.c b/drivers/net/dpaa/dpaa_ethdev.c
index 64ff32d..df4f2e4 100644
--- a/drivers/net/dpaa/dpaa_ethdev.c
+++ b/drivers/net/dpaa/dpaa_ethdev.c
@@ -72,7 +72,6 @@
 #include 
 #include 
 
-
 /* Keep track of whether QMAN and BMAN have been globally initialized */
 static int is_global_init;
 
@@ -89,18 +88,27 @@ dpaa_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
fman_if_set_maxfrm(dpaa_intf->fif, mtu);
 
if (mtu > ETHER_MAX_LEN)
-   return -1
-   dev->data->dev_conf.rxmode.jumbo_frame = 0;
+   dev->data->dev_conf.rxmode.jumbo_frame = 1;
+   else
+   dev->data->dev_conf.rxmode.jumbo_frame = 0;
 
dev->data->dev_conf.rxmode.max_rx_pkt_len = mtu;
return 0;
 }
 
 static int
-dpaa_eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
+dpaa_eth_dev_configure(struct rte_eth_dev *dev)
 {
PMD_INIT_FUNC_TRACE();
 
+   if (dev->data->dev_conf.rxmode.jumbo_frame == 1) {
+   if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
+   DPAA_MAX_RX_PKT_LEN)
+   return dpaa_mtu_set(dev,
+   dev->data->dev_conf.rxmode.max_rx_pkt_len);
+   else
+   return -1;
+   }
return 0;
 }
 
-- 
2.7.4



[dpdk-dev] [RFC Patch 26/39] net/dpaa: add support for MTU update

2017-05-27 Thread Shreyansh Jain
Signed-off-by: Hemant Agrawal 
Signed-off-by: Shreyansh Jain 
---
 doc/guides/nics/features/dpaa.ini |  1 +
 drivers/net/dpaa/dpaa_ethdev.c| 21 +
 2 files changed, 22 insertions(+)

diff --git a/doc/guides/nics/features/dpaa.ini 
b/doc/guides/nics/features/dpaa.ini
index 29ba47e..0b992fd 100644
--- a/doc/guides/nics/features/dpaa.ini
+++ b/doc/guides/nics/features/dpaa.ini
@@ -5,5 +5,6 @@
 ;
 [Features]
 Queue start/stop = Y
+MTU update   = Y
 ARMv8= Y
 Usage doc= Y
diff --git a/drivers/net/dpaa/dpaa_ethdev.c b/drivers/net/dpaa/dpaa_ethdev.c
index b93f781..122e45b 100644
--- a/drivers/net/dpaa/dpaa_ethdev.c
+++ b/drivers/net/dpaa/dpaa_ethdev.c
@@ -77,6 +77,26 @@
 static int is_global_init;
 
 static int
+dpaa_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+{
+   struct dpaa_if *dpaa_intf = dev->data->dev_private;
+
+   PMD_INIT_FUNC_TRACE();
+
+   if (mtu < ETHER_MIN_MTU)
+   return -EINVAL;
+
+   fman_if_set_maxfrm(dpaa_intf->fif, mtu);
+
+   if (mtu > ETHER_MAX_LEN)
+   return -1
+   dev->data->dev_conf.rxmode.jumbo_frame = 0;
+
+   dev->data->dev_conf.rxmode.max_rx_pkt_len = mtu;
+   return 0;
+}
+
+static int
 dpaa_eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
 {
PMD_INIT_FUNC_TRACE();
@@ -199,6 +219,7 @@ static struct eth_dev_ops dpaa_devops = {
.tx_queue_setup   = dpaa_eth_tx_queue_setup,
.rx_queue_release = dpaa_eth_rx_queue_release,
.tx_queue_release = dpaa_eth_tx_queue_release,
+   .mtu_set  = dpaa_mtu_set,
 };
 
 /* Initialise an Rx FQ */
-- 
2.7.4



[dpdk-dev] [RFC Patch 27/39] net/dpaa: add support for link status update

2017-05-27 Thread Shreyansh Jain
Signed-off-by: Hemant Agrawal 
Signed-off-by: Shreyansh Jain 
---
 doc/guides/nics/features/dpaa.ini |  1 +
 drivers/net/dpaa/dpaa_ethdev.c| 42 +++
 2 files changed, 43 insertions(+)

diff --git a/doc/guides/nics/features/dpaa.ini 
b/doc/guides/nics/features/dpaa.ini
index 0b992fd..86ab0a8 100644
--- a/doc/guides/nics/features/dpaa.ini
+++ b/doc/guides/nics/features/dpaa.ini
@@ -4,6 +4,7 @@
 ; Refer to default.ini for the full list of available PMD features.
 ;
 [Features]
+Link status  = Y
 Queue start/stop = Y
 MTU update   = Y
 ARMv8= Y
diff --git a/drivers/net/dpaa/dpaa_ethdev.c b/drivers/net/dpaa/dpaa_ethdev.c
index 122e45b..64ff32d 100644
--- a/drivers/net/dpaa/dpaa_ethdev.c
+++ b/drivers/net/dpaa/dpaa_ethdev.c
@@ -135,6 +135,28 @@ static void dpaa_eth_dev_close(struct rte_eth_dev *dev)
dpaa_eth_dev_stop(dev);
 }
 
+static int dpaa_eth_link_update(struct rte_eth_dev *dev,
+   int wait_to_complete __rte_unused)
+{
+   struct dpaa_if *dpaa_intf = dev->data->dev_private;
+   struct rte_eth_link *link = &dev->data->dev_link;
+
+   PMD_INIT_FUNC_TRACE();
+
+   if (dpaa_intf->fif->mac_type == fman_mac_1g)
+   link->link_speed = 1000;
+   else if (dpaa_intf->fif->mac_type == fman_mac_10g)
+   link->link_speed = 1;
+   else
+   PMD_DRV_LOG(ERR, "invalid link_speed: %s, %d",
+   dpaa_intf->name, dpaa_intf->fif->mac_type);
+
+   link->link_status = dpaa_intf->valid;
+   link->link_duplex = ETH_LINK_FULL_DUPLEX;
+   link->link_autoneg = ETH_LINK_AUTONEG;
+   return 0;
+}
+
 static
 int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t nb_desc __rte_unused,
@@ -209,6 +231,22 @@ static void dpaa_eth_tx_queue_release(void *txq 
__rte_unused)
PMD_INIT_FUNC_TRACE();
 }
 
+static int dpaa_link_down(struct rte_eth_dev *dev)
+{
+   PMD_INIT_FUNC_TRACE();
+
+   dpaa_eth_dev_stop(dev);
+   return 0;
+}
+
+static int dpaa_link_up(struct rte_eth_dev *dev)
+{
+   PMD_INIT_FUNC_TRACE();
+
+   dpaa_eth_dev_start(dev);
+   return 0;
+}
+
 static struct eth_dev_ops dpaa_devops = {
.dev_configure= dpaa_eth_dev_configure,
.dev_start= dpaa_eth_dev_start,
@@ -219,7 +257,11 @@ static struct eth_dev_ops dpaa_devops = {
.tx_queue_setup   = dpaa_eth_tx_queue_setup,
.rx_queue_release = dpaa_eth_rx_queue_release,
.tx_queue_release = dpaa_eth_tx_queue_release,
+
+   .link_update  = dpaa_eth_link_update,
.mtu_set  = dpaa_mtu_set,
+   .dev_set_link_down= dpaa_link_down,
+   .dev_set_link_up  = dpaa_link_up,
 };
 
 /* Initialise an Rx FQ */
-- 
2.7.4



[dpdk-dev] [RFC Patch 30/39] net/dpaa: add support for multicast toggle

2017-05-27 Thread Shreyansh Jain
Signed-off-by: Hemant Agrawal 
Signed-off-by: Shreyansh Jain 
---
 doc/guides/nics/features/dpaa.ini |  2 ++
 drivers/net/dpaa/dpaa_ethdev.c| 21 +
 2 files changed, 23 insertions(+)

diff --git a/doc/guides/nics/features/dpaa.ini 
b/doc/guides/nics/features/dpaa.ini
index 23945a6..26443d9 100644
--- a/doc/guides/nics/features/dpaa.ini
+++ b/doc/guides/nics/features/dpaa.ini
@@ -9,5 +9,7 @@ Queue start/stop = Y
 Jumbo frame  = Y
 MTU update   = Y
 Promiscuous mode = Y
+Allmulticast mode= Y
+Unicast MAC filter   = Y
 ARMv8= Y
 Usage doc= Y
diff --git a/drivers/net/dpaa/dpaa_ethdev.c b/drivers/net/dpaa/dpaa_ethdev.c
index 9a4a1d0..1d4af49 100644
--- a/drivers/net/dpaa/dpaa_ethdev.c
+++ b/drivers/net/dpaa/dpaa_ethdev.c
@@ -184,6 +184,25 @@ static void dpaa_eth_promiscuous_disable(struct 
rte_eth_dev *dev)
fman_if_promiscuous_disable(dpaa_intf->fif);
 }
 
+static void dpaa_eth_multicast_enable(struct rte_eth_dev *dev)
+{
+   struct dpaa_if *dpaa_intf = dev->data->dev_private;
+
+   PMD_INIT_FUNC_TRACE();
+
+   fman_if_set_mcast_filter_table(dpaa_intf->fif);
+}
+
+static void dpaa_eth_multicast_disable(struct rte_eth_dev *dev)
+{
+   struct dpaa_if *dpaa_intf = dev->data->dev_private;
+
+   PMD_INIT_FUNC_TRACE();
+
+   fman_if_reset_mcast_filter_table(dpaa_intf->fif);
+
+}
+
 static
 int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t nb_desc __rte_unused,
@@ -288,6 +307,8 @@ static struct eth_dev_ops dpaa_devops = {
.link_update  = dpaa_eth_link_update,
.promiscuous_enable   = dpaa_eth_promiscuous_enable,
.promiscuous_disable  = dpaa_eth_promiscuous_disable,
+   .allmulticast_enable  = dpaa_eth_multicast_enable,
+   .allmulticast_disable = dpaa_eth_multicast_disable,
.mtu_set  = dpaa_mtu_set,
.dev_set_link_down= dpaa_link_down,
.dev_set_link_up  = dpaa_link_up,
-- 
2.7.4



[dpdk-dev] [RFC Patch 09/39] bus/dpaa: enable DPAA IOCTL portal driver

2017-05-27 Thread Shreyansh Jain
Userspace applications interact with DPAA blocks using this IOCTL driver.

Signed-off-by: Geoff Thorpe 
Signed-off-by: Hemant Agrawal 
Signed-off-by: Shreyansh Jain 
---
 drivers/bus/dpaa/Makefile |   4 +-
 drivers/bus/dpaa/base/qbman/process.c | 331 ++
 drivers/bus/dpaa/include/fsl_usd.h|  88 +
 drivers/bus/dpaa/include/process.h| 107 +++
 4 files changed, 529 insertions(+), 1 deletion(-)
 create mode 100644 drivers/bus/dpaa/base/qbman/process.c
 create mode 100644 drivers/bus/dpaa/include/fsl_usd.h
 create mode 100644 drivers/bus/dpaa/include/process.h

diff --git a/drivers/bus/dpaa/Makefile b/drivers/bus/dpaa/Makefile
index 66b6f2f..07c9880 100644
--- a/drivers/bus/dpaa/Makefile
+++ b/drivers/bus/dpaa/Makefile
@@ -51,6 +51,7 @@ CFLAGS += -D _GNU_SOURCE
 
 CFLAGS += -I$(RTE_BUS_DPAA)/
 CFLAGS += -I$(RTE_BUS_DPAA)/include
+CFLAGS += -I$(RTE_BUS_DPAA)/base/qbman
 CFLAGS += -I$(RTE_SDK)/lib/librte_eal/linuxapp/eal
 CFLAGS += -I$(RTE_SDK)/lib/librte_eal/common/include
 
@@ -68,6 +69,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_DPAA_BUS) += \
base/fman/fman.c \
base/fman/fman_hw.c \
base/fman/of.c \
-   base/fman/netcfg_layer.c
+   base/fman/netcfg_layer.c \
+   base/qbman/process.c
 
 include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/bus/dpaa/base/qbman/process.c 
b/drivers/bus/dpaa/base/qbman/process.c
new file mode 100644
index 000..b8ec539
--- /dev/null
+++ b/drivers/bus/dpaa/base/qbman/process.c
@@ -0,0 +1,331 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ *   BSD LICENSE
+ *
+ * Copyright 2011-2016 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *   GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include 
+#include 
+#include 
+#include 
+
+#include "process.h"
+
+#include 
+
+/* As higher-level drivers will be built on top of this (dma_mem, qbman, ...),
+ * it's preferable that the process driver itself not provide any exported API.
+ * As such, combined with the fact that none of these operations are
+ * performance critical, it is justified to use lazy initialisation, so that's
+ * what the lock is for.
+ */
+static int fd = -1;
+static pthread_mutex_t fd_init_lock = PTHREAD_MUTEX_INITIALIZER;
+
+static int check_fd(void)
+{
+   int ret;
+
+   if (fd >= 0)
+   return 0;
+   ret = pthread_mutex_lock(&fd_init_lock);
+   assert(!ret);
+   /* check again with the lock held */
+   if (fd < 0)
+   fd = open(PROCESS_PATH, O_RDWR);
+   ret = pthread_mutex_unlock(&fd_init_lock);
+   assert(!ret);
+   return (fd >= 0) ? 0 : -ENODEV;
+}
+
+#define DPAA_IOCTL_MAGIC 'u'
+struct dpaa_ioctl_id_alloc {
+   uint32_t base; /* Return value, the start of the allocated range */
+   enum dpaa_id_type id_type; /* what kind of resource(s) to allocate */
+   uint32_t num; /* how many IDs to allocate (and return value) */
+   uint32_t align; /* must be a power of 2, 0 is treated like 1 */
+   int partial; /* whether to allow less than 'num' */
+};
+
+struct dpaa_ioctl_id_release {
+   /* Input; */
+   enum dpaa_id

[dpdk-dev] [RFC Patch 29/39] net/dpaa: add support for promiscuous toggle

2017-05-27 Thread Shreyansh Jain
Signed-off-by: Hemant Agrawal 
Signed-off-by: Shreyansh Jain 
---
 doc/guides/nics/features/dpaa.ini |  1 +
 drivers/net/dpaa/dpaa_ethdev.c| 21 +
 2 files changed, 22 insertions(+)

diff --git a/doc/guides/nics/features/dpaa.ini 
b/doc/guides/nics/features/dpaa.ini
index aaad818..23945a6 100644
--- a/doc/guides/nics/features/dpaa.ini
+++ b/doc/guides/nics/features/dpaa.ini
@@ -8,5 +8,6 @@ Link status  = Y
 Queue start/stop = Y
 Jumbo frame  = Y
 MTU update   = Y
+Promiscuous mode = Y
 ARMv8= Y
 Usage doc= Y
diff --git a/drivers/net/dpaa/dpaa_ethdev.c b/drivers/net/dpaa/dpaa_ethdev.c
index df4f2e4..9a4a1d0 100644
--- a/drivers/net/dpaa/dpaa_ethdev.c
+++ b/drivers/net/dpaa/dpaa_ethdev.c
@@ -165,6 +165,25 @@ static int dpaa_eth_link_update(struct rte_eth_dev *dev,
return 0;
 }
 
+
+static void dpaa_eth_promiscuous_enable(struct rte_eth_dev *dev)
+{
+   struct dpaa_if *dpaa_intf = dev->data->dev_private;
+
+   PMD_INIT_FUNC_TRACE();
+
+   fman_if_promiscuous_enable(dpaa_intf->fif);
+}
+
+static void dpaa_eth_promiscuous_disable(struct rte_eth_dev *dev)
+{
+   struct dpaa_if *dpaa_intf = dev->data->dev_private;
+
+   PMD_INIT_FUNC_TRACE();
+
+   fman_if_promiscuous_disable(dpaa_intf->fif);
+}
+
 static
 int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t nb_desc __rte_unused,
@@ -267,6 +286,8 @@ static struct eth_dev_ops dpaa_devops = {
.tx_queue_release = dpaa_eth_tx_queue_release,
 
.link_update  = dpaa_eth_link_update,
+   .promiscuous_enable   = dpaa_eth_promiscuous_enable,
+   .promiscuous_disable  = dpaa_eth_promiscuous_disable,
.mtu_set  = dpaa_mtu_set,
.dev_set_link_down= dpaa_link_down,
.dev_set_link_up  = dpaa_link_up,
-- 
2.7.4



[dpdk-dev] [RFC Patch 06/39] bus/dpaa: add OF parser for device scanning

2017-05-27 Thread Shreyansh Jain
This layer is used by Bus driver's scan function. Devices are parsed
using OF parser and added to DPAA device list.

Signed-off-by: Geoff Thorpe 
Signed-off-by: Shreyansh Jain 
---
 drivers/bus/dpaa/Makefile   |   7 +
 drivers/bus/dpaa/base/fman/of.c | 576 
 drivers/bus/dpaa/include/of.h   | 191 +
 3 files changed, 774 insertions(+)
 create mode 100644 drivers/bus/dpaa/base/fman/of.c
 create mode 100644 drivers/bus/dpaa/include/of.h

diff --git a/drivers/bus/dpaa/Makefile b/drivers/bus/dpaa/Makefile
index ae48bf2..9069a2b 100644
--- a/drivers/bus/dpaa/Makefile
+++ b/drivers/bus/dpaa/Makefile
@@ -45,7 +45,12 @@ CFLAGS += -O3
 CFLAGS += $(WERROR_FLAGS)
 endif
 
+CFLAGS +=-Wno-pointer-arith
+CFLAGS +=-Wno-cast-qual
+CFLAGS += -D _GNU_SOURCE
+
 CFLAGS += -I$(RTE_BUS_DPAA)/
+CFLAGS += -I$(RTE_BUS_DPAA)/include
 CFLAGS += -I$(RTE_SDK)/lib/librte_eal/linuxapp/eal
 CFLAGS += -I$(RTE_SDK)/lib/librte_eal/common/include
 
@@ -59,5 +64,7 @@ LIBABIVER := 1
 SRCS-$(CONFIG_RTE_LIBRTE_DPAA_BUS) += \
dpaa_bus.c
 
+SRCS-$(CONFIG_RTE_LIBRTE_DPAA_BUS) += \
+   base/fman/of.c \
 
 include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/bus/dpaa/base/fman/of.c b/drivers/bus/dpaa/base/fman/of.c
new file mode 100644
index 000..6cc3987
--- /dev/null
+++ b/drivers/bus/dpaa/base/fman/of.c
@@ -0,0 +1,576 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ *   BSD LICENSE
+ *
+ * Copyright 2010-2016 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *   GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include 
+#include 
+
+static int alive;
+static struct dt_dir root_dir;
+static const char *base_dir;
+static COMPAT_LIST_HEAD(linear);
+
+static int
+of_open_dir(const char *relative_path, struct dirent ***d)
+{
+   int ret;
+   char full_path[PATH_MAX];
+
+   snprintf(full_path, PATH_MAX, "%s/%s", base_dir, relative_path);
+   ret = scandir(full_path, d, 0, versionsort);
+   if (ret < 0)
+   DPAA_BUS_LOG(ERR, "Failed to open directory %s",
+full_path);
+   return ret;
+}
+
+static void
+of_close_dir(struct dirent **d, int num)
+{
+   while (num--)
+   free(d[num]);
+   free(d);
+}
+
+static int
+of_open_file(const char *relative_path)
+{
+   int ret;
+   char full_path[PATH_MAX];
+
+   snprintf(full_path, PATH_MAX, "%s/%s", base_dir, relative_path);
+   ret = open(full_path, O_RDONLY);
+   if (ret < 0)
+   DPAA_BUS_LOG(ERR, "Failed to open directory %s",
+full_path);
+   return ret;
+}
+
+static void
+process_file(struct dirent *dent, struct dt_dir *parent)
+{
+   int fd;
+   struct dt_file *f = malloc(sizeof(*f));
+
+   if (!f) {
+   DPAA_BUS_LOG(DEBUG, "Unable to allocate memory for file node");
+   return;
+   }
+   f->node.is_file = 1;
+   snprintf(f->node.node.name, NAME_MAX, "%s", dent->d_name);
+   snprintf(f->node.node.full_name, PATH_MAX, "%s/%s",
+parent->node.node.full_name, dent->d_name);
+   f->parent = parent;
+ 

[dpdk-dev] [RFC Patch 31/39] net/dpaa: add support for basic stats

2017-05-27 Thread Shreyansh Jain
Signed-off-by: Hemant Agrawal 
Signed-off-by: Shreyansh Jain 
---
 doc/guides/nics/features/dpaa.ini |  1 +
 drivers/net/dpaa/dpaa_ethdev.c| 20 
 2 files changed, 21 insertions(+)

diff --git a/doc/guides/nics/features/dpaa.ini 
b/doc/guides/nics/features/dpaa.ini
index 26443d9..7165e47 100644
--- a/doc/guides/nics/features/dpaa.ini
+++ b/doc/guides/nics/features/dpaa.ini
@@ -11,5 +11,6 @@ MTU update   = Y
 Promiscuous mode = Y
 Allmulticast mode= Y
 Unicast MAC filter   = Y
+Basic stats  = Y
 ARMv8= Y
 Usage doc= Y
diff --git a/drivers/net/dpaa/dpaa_ethdev.c b/drivers/net/dpaa/dpaa_ethdev.c
index 1d4af49..d076da5 100644
--- a/drivers/net/dpaa/dpaa_ethdev.c
+++ b/drivers/net/dpaa/dpaa_ethdev.c
@@ -165,6 +165,24 @@ static int dpaa_eth_link_update(struct rte_eth_dev *dev,
return 0;
 }
 
+static void dpaa_eth_stats_get(struct rte_eth_dev *dev,
+  struct rte_eth_stats *stats)
+{
+   struct dpaa_if *dpaa_intf = dev->data->dev_private;
+
+   PMD_INIT_FUNC_TRACE();
+
+   fman_if_stats_get(dpaa_intf->fif, stats);
+}
+
+static void dpaa_eth_stats_reset(struct rte_eth_dev *dev)
+{
+   struct dpaa_if *dpaa_intf = dev->data->dev_private;
+
+   PMD_INIT_FUNC_TRACE();
+
+   fman_if_stats_reset(dpaa_intf->fif);
+}
 
 static void dpaa_eth_promiscuous_enable(struct rte_eth_dev *dev)
 {
@@ -305,6 +323,8 @@ static struct eth_dev_ops dpaa_devops = {
.tx_queue_release = dpaa_eth_tx_queue_release,
 
.link_update  = dpaa_eth_link_update,
+   .stats_get= dpaa_eth_stats_get,
+   .stats_reset  = dpaa_eth_stats_reset,
.promiscuous_enable   = dpaa_eth_promiscuous_enable,
.promiscuous_disable  = dpaa_eth_promiscuous_disable,
.allmulticast_enable  = dpaa_eth_multicast_enable,
-- 
2.7.4



[dpdk-dev] [RFC Patch 34/39] net/dpaa: add support for hashed RSS

2017-05-27 Thread Shreyansh Jain
Signed-off-by: Hemant Agrawal 
Signed-off-by: Shreyansh Jain 
---
 doc/guides/nics/features/dpaa.ini |  1 +
 drivers/net/dpaa/dpaa_ethdev.c|  1 +
 drivers/net/dpaa/dpaa_ethdev.h| 10 ++
 drivers/net/dpaa/dpaa_rxtx.c  |  2 +-
 4 files changed, 13 insertions(+), 1 deletion(-)

diff --git a/doc/guides/nics/features/dpaa.ini 
b/doc/guides/nics/features/dpaa.ini
index cb220e4..916e5dc 100644
--- a/doc/guides/nics/features/dpaa.ini
+++ b/doc/guides/nics/features/dpaa.ini
@@ -11,6 +11,7 @@ MTU update   = Y
 Promiscuous mode = Y
 Allmulticast mode= Y
 Unicast MAC filter   = Y
+RSS hash = Y
 L3 checksum offload  = Y
 L4 checksum offload  = Y
 Basic stats  = Y
diff --git a/drivers/net/dpaa/dpaa_ethdev.c b/drivers/net/dpaa/dpaa_ethdev.c
index c3f9eb5..b9669ef 100644
--- a/drivers/net/dpaa/dpaa_ethdev.c
+++ b/drivers/net/dpaa/dpaa_ethdev.c
@@ -158,6 +158,7 @@ static void dpaa_eth_dev_info(struct rte_eth_dev *dev,
dev_info->max_hash_mac_addrs = 0;
dev_info->max_vfs = 0;
dev_info->max_vmdq_pools = ETH_16_POOLS;
+   dev_info->flow_type_rss_offloads = DPAA_RSS_OFFLOAD_ALL;
dev_info->rx_offload_capa =
(DEV_RX_OFFLOAD_IPV4_CKSUM |
DEV_RX_OFFLOAD_UDP_CKSUM  |
diff --git a/drivers/net/dpaa/dpaa_ethdev.h b/drivers/net/dpaa/dpaa_ethdev.h
index 076faf5..45fd14b 100644
--- a/drivers/net/dpaa/dpaa_ethdev.h
+++ b/drivers/net/dpaa/dpaa_ethdev.h
@@ -105,6 +105,16 @@
 #define DPAA_DEBUG_FQ_RX_ERROR   0
 #define DPAA_DEBUG_FQ_TX_ERROR   1
 
+#define DPAA_RSS_OFFLOAD_ALL ( \
+   ETH_RSS_FRAG_IPV4 | \
+   ETH_RSS_NONFRAG_IPV4_TCP | \
+   ETH_RSS_NONFRAG_IPV4_UDP | \
+   ETH_RSS_NONFRAG_IPV4_SCTP | \
+   ETH_RSS_FRAG_IPV6 | \
+   ETH_RSS_NONFRAG_IPV6_TCP | \
+   ETH_RSS_NONFRAG_IPV6_UDP | \
+   ETH_RSS_NONFRAG_IPV6_SCTP)
+
 #define DPAA_TX_CKSUM_OFFLOAD_MASK ( \
PKT_TX_IP_CKSUM |\
PKT_TX_TCP_CKSUM |   \
diff --git a/drivers/net/dpaa/dpaa_rxtx.c b/drivers/net/dpaa/dpaa_rxtx.c
index b51d66c..b1d39b6 100644
--- a/drivers/net/dpaa/dpaa_rxtx.c
+++ b/drivers/net/dpaa/dpaa_rxtx.c
@@ -330,7 +330,7 @@ dpaa_eth_queue_tx(void *q, struct rte_mbuf **bufs, uint16_t 
nb_bufs)
if (mbuf->ol_flags & 
DPAA_TX_CKSUM_OFFLOAD_MASK) {
if (mbuf->data_off < 
DEFAULT_TX_ICEOF +
sizeof(struct 
dpaa_eth_parse_results_t)) {
-   PMD_DRV_LOG(DEBUG, 
"Checksum offload Err: "
+   PMD_DRV_LOG(ERR, 
"Checksum offload Err: "
"Not enough 
Headroom "
"space for 
correct Checksum offload."
"So Calculating 
checksum in Software.");
-- 
2.7.4



[dpdk-dev] [RFC Patch 32/39] net/dpaa: add support for device info

2017-05-27 Thread Shreyansh Jain
Signed-off-by: Hemant Agrawal 
Signed-off-by: Shreyansh Jain 
---
 drivers/net/dpaa/dpaa_ethdev.c | 18 ++
 1 file changed, 18 insertions(+)

diff --git a/drivers/net/dpaa/dpaa_ethdev.c b/drivers/net/dpaa/dpaa_ethdev.c
index d076da5..5d406be 100644
--- a/drivers/net/dpaa/dpaa_ethdev.c
+++ b/drivers/net/dpaa/dpaa_ethdev.c
@@ -143,6 +143,23 @@ static void dpaa_eth_dev_close(struct rte_eth_dev *dev)
dpaa_eth_dev_stop(dev);
 }
 
+static void dpaa_eth_dev_info(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info)
+{
+   struct dpaa_if *dpaa_intf = dev->data->dev_private;
+
+   PMD_INIT_FUNC_TRACE();
+
+   dev_info->max_rx_queues = dpaa_intf->nb_rx_queues;
+   dev_info->max_tx_queues = dpaa_intf->nb_tx_queues;
+   dev_info->min_rx_bufsize = DPAA_MIN_RX_BUF_SIZE;
+   dev_info->max_rx_pktlen = DPAA_MAX_RX_PKT_LEN;
+   dev_info->max_mac_addrs = DPAA_MAX_MAC_FILTER;
+   dev_info->max_hash_mac_addrs = 0;
+   dev_info->max_vfs = 0;
+   dev_info->max_vmdq_pools = ETH_16_POOLS;
+}
+
 static int dpaa_eth_link_update(struct rte_eth_dev *dev,
int wait_to_complete __rte_unused)
 {
@@ -316,6 +333,7 @@ static struct eth_dev_ops dpaa_devops = {
.dev_start= dpaa_eth_dev_start,
.dev_stop = dpaa_eth_dev_stop,
.dev_close= dpaa_eth_dev_close,
+   .dev_infos_get= dpaa_eth_dev_info,
 
.rx_queue_setup   = dpaa_eth_rx_queue_setup,
.tx_queue_setup   = dpaa_eth_tx_queue_setup,
-- 
2.7.4



[dpdk-dev] [RFC Patch 33/39] net/dpaa: support for checksum offload

2017-05-27 Thread Shreyansh Jain
Signed-off-by: Hemant Agrawal 
Signed-off-by: Shreyansh Jain 
---
 doc/guides/nics/features/dpaa.ini |  2 +
 drivers/net/dpaa/dpaa_ethdev.c|  8 
 drivers/net/dpaa/dpaa_rxtx.c  | 88 +++
 3 files changed, 98 insertions(+)

diff --git a/doc/guides/nics/features/dpaa.ini 
b/doc/guides/nics/features/dpaa.ini
index 7165e47..cb220e4 100644
--- a/doc/guides/nics/features/dpaa.ini
+++ b/doc/guides/nics/features/dpaa.ini
@@ -11,6 +11,8 @@ MTU update   = Y
 Promiscuous mode = Y
 Allmulticast mode= Y
 Unicast MAC filter   = Y
+L3 checksum offload  = Y
+L4 checksum offload  = Y
 Basic stats  = Y
 ARMv8= Y
 Usage doc= Y
diff --git a/drivers/net/dpaa/dpaa_ethdev.c b/drivers/net/dpaa/dpaa_ethdev.c
index 5d406be..c3f9eb5 100644
--- a/drivers/net/dpaa/dpaa_ethdev.c
+++ b/drivers/net/dpaa/dpaa_ethdev.c
@@ -158,6 +158,14 @@ static void dpaa_eth_dev_info(struct rte_eth_dev *dev,
dev_info->max_hash_mac_addrs = 0;
dev_info->max_vfs = 0;
dev_info->max_vmdq_pools = ETH_16_POOLS;
+   dev_info->rx_offload_capa =
+   (DEV_RX_OFFLOAD_IPV4_CKSUM |
+   DEV_RX_OFFLOAD_UDP_CKSUM  |
+   DEV_RX_OFFLOAD_TCP_CKSUM);
+   dev_info->tx_offload_capa =
+   (DEV_TX_OFFLOAD_IPV4_CKSUM  |
+   DEV_TX_OFFLOAD_UDP_CKSUM   |
+   DEV_TX_OFFLOAD_TCP_CKSUM);
 }
 
 static int dpaa_eth_link_update(struct rte_eth_dev *dev,
diff --git a/drivers/net/dpaa/dpaa_rxtx.c b/drivers/net/dpaa/dpaa_rxtx.c
index 5978090..b51d66c 100644
--- a/drivers/net/dpaa/dpaa_rxtx.c
+++ b/drivers/net/dpaa/dpaa_rxtx.c
@@ -85,6 +85,82 @@
(_fd)->bpid = _bpid; \
} while (0)
 
+static inline void dpaa_checksum(struct rte_mbuf *mbuf)
+{
+   struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(mbuf, struct ether_hdr *);
+   char *l3_hdr = (char *)eth_hdr + mbuf->l2_len;
+   struct ipv4_hdr *ipv4_hdr = (struct ipv4_hdr *)l3_hdr;
+   struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *)l3_hdr;
+
+   PMD_TX_LOG(DEBUG, "Calculating checksum for mbuf: %p", mbuf);
+
+   if (((mbuf->packet_type & RTE_PTYPE_L3_MASK) == RTE_PTYPE_L3_IPV4) ||
+   ((mbuf->packet_type & RTE_PTYPE_L3_MASK) ==
+   RTE_PTYPE_L3_IPV4_EXT)) {
+   ipv4_hdr = (struct ipv4_hdr *)l3_hdr;
+   ipv4_hdr->hdr_checksum = 0;
+   ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr);
+   } else if (((mbuf->packet_type & RTE_PTYPE_L3_MASK) ==
+  RTE_PTYPE_L3_IPV6) ||
+  ((mbuf->packet_type & RTE_PTYPE_L3_MASK) ==
+  RTE_PTYPE_L3_IPV6_EXT))
+   ipv6_hdr = (struct ipv6_hdr *)l3_hdr;
+
+   if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP) {
+   struct tcp_hdr *tcp_hdr = (struct tcp_hdr *)(l3_hdr +
+ mbuf->l3_len);
+   tcp_hdr->cksum = 0;
+   if (eth_hdr->ether_type == htons(ETHER_TYPE_IPv4))
+   tcp_hdr->cksum = rte_ipv4_udptcp_cksum(ipv4_hdr,
+  tcp_hdr);
+   else /* assume ethertype == ETHER_TYPE_IPv6 */
+   tcp_hdr->cksum = rte_ipv6_udptcp_cksum(ipv6_hdr,
+  tcp_hdr);
+   } else if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) ==
+  RTE_PTYPE_L4_UDP) {
+   struct udp_hdr *udp_hdr = (struct udp_hdr *)(l3_hdr +
+mbuf->l3_len);
+   udp_hdr->dgram_cksum = 0;
+   if (eth_hdr->ether_type == htons(ETHER_TYPE_IPv4))
+   udp_hdr->dgram_cksum = rte_ipv4_udptcp_cksum(ipv4_hdr,
+udp_hdr);
+   else /* assume ethertype == ETHER_TYPE_IPv6 */
+   udp_hdr->dgram_cksum = rte_ipv6_udptcp_cksum(ipv6_hdr,
+udp_hdr);
+   }
+}
+
+static inline void dpaa_checksum_offload(struct rte_mbuf *mbuf,
+struct qm_fd *fd, char *prs_buf)
+{
+   struct dpaa_eth_parse_results_t *prs;
+
+   PMD_TX_LOG(DEBUG, " Offloading checksum for mbuf: %p", mbuf);
+
+   prs = GET_TX_PRS(prs_buf);
+   prs->l3r = 0;
+   prs->l4r = 0;
+   if (((mbuf->packet_type & RTE_PTYPE_L3_MASK) == RTE_PTYPE_L3_IPV4) ||
+  ((mbuf->packet_type & RTE_PTYPE_L3_MASK) ==
+  RTE_PTYPE_L3_IPV4_EXT))
+   prs->l3r = DPAA_L3_PARSE_RESULT_IPV4;
+   else if (((mbuf->packet_type & RTE_PTYPE_L3_MASK) ==
+  RTE_PTYPE_L3_IPV6) ||
+((mbuf->packet_type & RTE_PTYPE_L3_MASK) ==
+   RTE_PTYPE_L3_IPV6_EXT))
+   prs->l3r = DPAA_L3_PARSE_RESULT_IPV6;
+
+   if ((mbuf->pac

[dpdk-dev] [RFC Patch 35/39] net/dpaa: add support for MAC address update

2017-05-27 Thread Shreyansh Jain
Signed-off-by: Hemant Agrawal 
Signed-off-by: Shreyansh Jain 
---
 drivers/net/dpaa/dpaa_ethdev.c | 55 ++
 1 file changed, 55 insertions(+)

diff --git a/drivers/net/dpaa/dpaa_ethdev.c b/drivers/net/dpaa/dpaa_ethdev.c
index b9669ef..856b229 100644
--- a/drivers/net/dpaa/dpaa_ethdev.c
+++ b/drivers/net/dpaa/dpaa_ethdev.c
@@ -337,6 +337,57 @@ static int dpaa_link_up(struct rte_eth_dev *dev)
return 0;
 }
 
+static int
+dpaa_dev_add_mac_addr(struct rte_eth_dev *dev,
+struct ether_addr *addr,
+uint32_t index,
+__rte_unused uint32_t pool)
+{
+   int ret;
+   struct dpaa_if *dpaa_intf = dev->data->dev_private;
+
+   PMD_INIT_FUNC_TRACE();
+
+   ret = fm_mac_add_exact_match_mac_addr(dpaa_intf->fif,
+ addr->addr_bytes, index);
+
+   if (ret)
+   RTE_LOG(ERR, PMD, "error: Adding the MAC ADDR failed:"
+   " err = %d", ret);
+   return 0;
+}
+
+static void
+dpaa_dev_remove_mac_addr(struct rte_eth_dev *dev,
+ uint32_t index)
+{
+   int ret;
+   struct dpaa_if *dpaa_intf = dev->data->dev_private;
+
+   PMD_INIT_FUNC_TRACE();
+
+   ret = fm_mac_rem_exact_match_mac_addr(dpaa_intf->fif, index);
+
+   if (ret)
+   RTE_LOG(ERR, PMD, "error: Removing the MAC ADDR failed:"
+   " err = %d", ret);
+}
+
+static void
+dpaa_dev_set_mac_addr(struct rte_eth_dev *dev,
+  struct ether_addr *addr)
+{
+   int ret;
+   struct dpaa_if *dpaa_intf = dev->data->dev_private;
+
+   PMD_INIT_FUNC_TRACE();
+
+   ret = fm_mac_add_exact_match_mac_addr(dpaa_intf->fif,
+ addr->addr_bytes, 0);
+   if (ret)
+   RTE_LOG(ERR, PMD, "error: Setting the MAC ADDR failed %d", ret);
+}
+
 static struct eth_dev_ops dpaa_devops = {
.dev_configure= dpaa_eth_dev_configure,
.dev_start= dpaa_eth_dev_start,
@@ -359,6 +410,10 @@ static struct eth_dev_ops dpaa_devops = {
.mtu_set  = dpaa_mtu_set,
.dev_set_link_down= dpaa_link_down,
.dev_set_link_up  = dpaa_link_up,
+   .mac_addr_add = dpaa_dev_add_mac_addr,
+   .mac_addr_remove  = dpaa_dev_remove_mac_addr,
+   .mac_addr_set = dpaa_dev_set_mac_addr,
+
 };
 
 /* Initialise an Rx FQ */
-- 
2.7.4



[dpdk-dev] [RFC Patch 36/39] net/dpaa: add support for packet type parsing

2017-05-27 Thread Shreyansh Jain
Signed-off-by: Hemant Agrawal 
Signed-off-by: Shreyansh Jain 
---
 doc/guides/nics/features/dpaa.ini |  1 +
 drivers/net/dpaa/dpaa_ethdev.c| 22 
 drivers/net/dpaa/dpaa_rxtx.c  | 75 +++
 3 files changed, 98 insertions(+)

diff --git a/doc/guides/nics/features/dpaa.ini 
b/doc/guides/nics/features/dpaa.ini
index 916e5dc..4c81294 100644
--- a/doc/guides/nics/features/dpaa.ini
+++ b/doc/guides/nics/features/dpaa.ini
@@ -14,6 +14,7 @@ Unicast MAC filter   = Y
 RSS hash = Y
 L3 checksum offload  = Y
 L4 checksum offload  = Y
+Packet type parsing  = Y
 Basic stats  = Y
 ARMv8= Y
 Usage doc= Y
diff --git a/drivers/net/dpaa/dpaa_ethdev.c b/drivers/net/dpaa/dpaa_ethdev.c
index 856b229..8204ab7 100644
--- a/drivers/net/dpaa/dpaa_ethdev.c
+++ b/drivers/net/dpaa/dpaa_ethdev.c
@@ -112,6 +112,27 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev)
return 0;
 }
 
+static const uint32_t *
+dpaa_supported_ptypes_get(struct rte_eth_dev *dev)
+{
+   static const uint32_t ptypes[] = {
+   /*todo -= add more types */
+   RTE_PTYPE_L2_ETHER,
+   RTE_PTYPE_L3_IPV4,
+   RTE_PTYPE_L3_IPV4_EXT,
+   RTE_PTYPE_L3_IPV6,
+   RTE_PTYPE_L3_IPV6_EXT,
+   RTE_PTYPE_L4_TCP,
+   RTE_PTYPE_L4_UDP,
+   RTE_PTYPE_L4_SCTP
+   };
+
+   PMD_INIT_FUNC_TRACE();
+
+   if (dev->rx_pkt_burst == dpaa_eth_queue_rx)
+   return ptypes;
+   return NULL;
+}
 
 static int dpaa_eth_dev_start(struct rte_eth_dev *dev)
 {
@@ -394,6 +415,7 @@ static struct eth_dev_ops dpaa_devops = {
.dev_stop = dpaa_eth_dev_stop,
.dev_close= dpaa_eth_dev_close,
.dev_infos_get= dpaa_eth_dev_info,
+   .dev_supported_ptypes_get = dpaa_supported_ptypes_get,
 
.rx_queue_setup   = dpaa_eth_rx_queue_setup,
.tx_queue_setup   = dpaa_eth_tx_queue_setup,
diff --git a/drivers/net/dpaa/dpaa_rxtx.c b/drivers/net/dpaa/dpaa_rxtx.c
index b1d39b6..7bb1077 100644
--- a/drivers/net/dpaa/dpaa_rxtx.c
+++ b/drivers/net/dpaa/dpaa_rxtx.c
@@ -85,6 +85,80 @@
(_fd)->bpid = _bpid; \
} while (0)
 
+
+static inline void dpaa_eth_packet_info(struct rte_mbuf *m,
+   uint64_t fd_virt_addr)
+{
+   struct annotations_t *annot = GET_ANNOTATIONS(fd_virt_addr);
+   uint64_t prs = *((uint64_t *)(&annot->parse)) & DPAA_PARSE_MASK;
+
+   PMD_RX_LOG(DEBUG, " Parsing mbuf: %p with annotations: %p", m, annot);
+
+   switch (prs) {
+   case DPAA_PKT_TYPE_NONE:
+   m->packet_type = 0;
+   break;
+   case DPAA_PKT_TYPE_ETHER:
+   m->packet_type = RTE_PTYPE_L2_ETHER;
+   break;
+   case DPAA_PKT_TYPE_IPV4:
+   m->packet_type = RTE_PTYPE_L2_ETHER |
+   RTE_PTYPE_L3_IPV4;
+   break;
+   case DPAA_PKT_TYPE_IPV6:
+   m->packet_type = RTE_PTYPE_L2_ETHER |
+   RTE_PTYPE_L3_IPV6;
+   break;
+   case DPAA_PKT_TYPE_IPV4_EXT:
+   m->packet_type = RTE_PTYPE_L2_ETHER |
+   RTE_PTYPE_L3_IPV4_EXT;
+   break;
+   case DPAA_PKT_TYPE_IPV6_EXT:
+   m->packet_type = RTE_PTYPE_L2_ETHER |
+   RTE_PTYPE_L3_IPV6_EXT;
+   break;
+   case DPAA_PKT_TYPE_IPV4_TCP:
+   m->packet_type = RTE_PTYPE_L2_ETHER |
+   RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP;
+   break;
+   case DPAA_PKT_TYPE_IPV6_TCP:
+   m->packet_type = RTE_PTYPE_L2_ETHER |
+   RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP;
+   break;
+   case DPAA_PKT_TYPE_IPV4_UDP:
+   m->packet_type = RTE_PTYPE_L2_ETHER |
+   RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP;
+   break;
+   case DPAA_PKT_TYPE_IPV6_UDP:
+   m->packet_type = RTE_PTYPE_L2_ETHER |
+   RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP;
+   break;
+   case DPAA_PKT_TYPE_IPV4_SCTP:
+   m->packet_type = RTE_PTYPE_L2_ETHER |
+   RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP;
+   break;
+   case DPAA_PKT_TYPE_IPV6_SCTP:
+   m->packet_type = RTE_PTYPE_L2_ETHER |
+   RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP;
+   break;
+   /* More switch cases can be added */
+   default:
+   break;
+   }
+
+   m->tx_offload = annot->parse.ip_off[0];
+   m->tx_offload |= (annot->parse.l4_off - annot->parse.ip_off[0])
+   << DPAA_PKT_L3_LEN_SHIFT;
+
+   /* Set the hash values */
+   m->hash.rss = (uint32_t)(rte_be_to_cpu_64(annot->hash));
+   m->ol_flags = PKT_RX_RSS_HASH;
+
+  

[dpdk-dev] [RFC Patch 38/39] net/dpaa: add support for flow control

2017-05-27 Thread Shreyansh Jain
Signed-off-by: Hemant Agrawal 
Signed-off-by: Shreyansh Jain 
---
 doc/guides/nics/features/dpaa.ini |   1 +
 drivers/net/dpaa/dpaa_ethdev.c| 112 ++
 2 files changed, 113 insertions(+)

diff --git a/doc/guides/nics/features/dpaa.ini 
b/doc/guides/nics/features/dpaa.ini
index c2f787d..2d91303 100644
--- a/doc/guides/nics/features/dpaa.ini
+++ b/doc/guides/nics/features/dpaa.ini
@@ -13,6 +13,7 @@ Promiscuous mode = Y
 Allmulticast mode= Y
 Unicast MAC filter   = Y
 RSS hash = Y
+Flow control = Y
 L3 checksum offload  = Y
 L4 checksum offload  = Y
 Packet type parsing  = Y
diff --git a/drivers/net/dpaa/dpaa_ethdev.c b/drivers/net/dpaa/dpaa_ethdev.c
index 8204ab7..30ebc2b 100644
--- a/drivers/net/dpaa/dpaa_ethdev.c
+++ b/drivers/net/dpaa/dpaa_ethdev.c
@@ -359,6 +359,85 @@ static int dpaa_link_up(struct rte_eth_dev *dev)
 }
 
 static int
+dpaa_flow_ctrl_set(struct rte_eth_dev *dev,
+  struct rte_eth_fc_conf *fc_conf)
+{
+   struct dpaa_if *dpaa_intf = dev->data->dev_private;
+   struct rte_eth_fc_conf *net_fc;
+
+   PMD_INIT_FUNC_TRACE();
+
+   if (!(dpaa_intf->fc_conf)) {
+   dpaa_intf->fc_conf = rte_zmalloc(NULL,
+   sizeof(struct rte_eth_fc_conf), MAX_CACHELINE);
+   if (!dpaa_intf->fc_conf) {
+   PMD_DRV_LOG(ERR, "unable to save flow control info");
+   return -ENOMEM;
+   }
+   }
+   net_fc = dpaa_intf->fc_conf;
+
+   if (fc_conf->high_water < fc_conf->low_water) {
+   PMD_DRV_LOG(ERR, "Incorrect Flow Control Configuration");
+   return -EINVAL;
+   }
+
+   if (fc_conf->mode == RTE_FC_NONE) {
+   return 0;
+   } else if (fc_conf->mode == RTE_FC_TX_PAUSE ||
+fc_conf->mode == RTE_FC_FULL) {
+   fman_if_set_fc_threshold(dpaa_intf->fif, fc_conf->high_water,
+fc_conf->low_water,
+   dpaa_intf->bp_info->bpid);
+   if (fc_conf->pause_time)
+   fman_if_set_fc_quanta(dpaa_intf->fif,
+ fc_conf->pause_time);
+   }
+
+   /* Save the information in dpaa device */
+   net_fc->pause_time = fc_conf->pause_time;
+   net_fc->high_water = fc_conf->high_water;
+   net_fc->low_water = fc_conf->low_water;
+   net_fc->send_xon = fc_conf->send_xon;
+   net_fc->mac_ctrl_frame_fwd = fc_conf->mac_ctrl_frame_fwd;
+   net_fc->mode = fc_conf->mode;
+   net_fc->autoneg = fc_conf->autoneg;
+
+   return 0;
+}
+
+static int
+dpaa_flow_ctrl_get(struct rte_eth_dev *dev,
+  struct rte_eth_fc_conf *fc_conf)
+{
+   struct dpaa_if *dpaa_intf = dev->data->dev_private;
+   struct rte_eth_fc_conf *net_fc = dpaa_intf->fc_conf;
+   int ret;
+
+   PMD_INIT_FUNC_TRACE();
+
+   if (net_fc) {
+   fc_conf->pause_time = net_fc->pause_time;
+   fc_conf->high_water = net_fc->high_water;
+   fc_conf->low_water = net_fc->low_water;
+   fc_conf->send_xon = net_fc->send_xon;
+   fc_conf->mac_ctrl_frame_fwd = net_fc->mac_ctrl_frame_fwd;
+   fc_conf->mode = net_fc->mode;
+   fc_conf->autoneg = net_fc->autoneg;
+   return 0;
+   }
+   ret = fman_if_get_fc_threshold(dpaa_intf->fif);
+   if (ret) {
+   fc_conf->mode = RTE_FC_TX_PAUSE;
+   fc_conf->pause_time = fman_if_get_fc_quanta(dpaa_intf->fif);
+   } else {
+   fc_conf->mode = RTE_FC_NONE;
+   }
+
+   return 0;
+}
+
+static int
 dpaa_dev_add_mac_addr(struct rte_eth_dev *dev,
 struct ether_addr *addr,
 uint32_t index,
@@ -422,6 +501,9 @@ static struct eth_dev_ops dpaa_devops = {
.rx_queue_release = dpaa_eth_rx_queue_release,
.tx_queue_release = dpaa_eth_tx_queue_release,
 
+   .flow_ctrl_get= dpaa_flow_ctrl_get,
+   .flow_ctrl_set= dpaa_flow_ctrl_set,
+
.link_update  = dpaa_eth_link_update,
.stats_get= dpaa_eth_stats_get,
.stats_reset  = dpaa_eth_stats_reset,
@@ -438,6 +520,33 @@ static struct eth_dev_ops dpaa_devops = {
 
 };
 
+static int dpaa_fc_set_default(struct dpaa_if *dpaa_intf)
+{
+   struct rte_eth_fc_conf *fc_conf;
+   int ret;
+
+   PMD_INIT_FUNC_TRACE();
+
+   if (!(dpaa_intf->fc_conf)) {
+   dpaa_intf->fc_conf = rte_zmalloc(NULL,
+   sizeof(struct rte_eth_fc_conf), MAX_CACHELINE);
+   if (!dpaa_intf->fc_conf) {
+   PMD_DRV_LOG(ERR, "unable to save flow control info");
+   return -ENOMEM;
+   }
+   }
+   fc_conf = dpaa_intf->fc_conf;
+   ret = fman_if_

[dpdk-dev] [RFC Patch 17/39] bus/dpaa: add fman flow control threshold setting

2017-05-27 Thread Shreyansh Jain
Signed-off-by: Geoff Thorpe 
Signed-off-by: Roy Pledge 
Signed-off-by: Hemant Agrawal 
Signed-off-by: Shreyansh Jain 
---
 drivers/bus/dpaa/base/fman/fman_hw.c | 28 
 drivers/bus/dpaa/include/fsl_fman.h  |  7 +++
 2 files changed, 35 insertions(+)

diff --git a/drivers/bus/dpaa/base/fman/fman_hw.c 
b/drivers/bus/dpaa/base/fman/fman_hw.c
index 77908ec..7618fc1 100644
--- a/drivers/bus/dpaa/base/fman/fman_hw.c
+++ b/drivers/bus/dpaa/base/fman/fman_hw.c
@@ -37,6 +37,7 @@
  */
 #include 
 #include 
+#include 
 
 /* Instantiate the global variable that the inline CRC64 implementation (in
  * ) depends on.
@@ -437,6 +438,33 @@ fman_if_set_bp(struct fman_if *fm_if, unsigned num 
__always_unused,
 }
 
 int
+fman_if_get_fc_threshold(struct fman_if *fm_if)
+{
+   struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
+   unsigned int *fmbm_mpd;
+
+   assert(fman_ccsr_map_fd != -1);
+
+   fmbm_mpd = &((struct rx_bmi_regs *)__if->bmi_map)->fmbm_mpd;
+   return in_be32(fmbm_mpd);
+}
+
+int
+fman_if_set_fc_threshold(struct fman_if *fm_if, u32 high_water,
+u32 low_water, u32 bpid)
+{
+   struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
+   unsigned int *fmbm_mpd;
+
+   assert(fman_ccsr_map_fd != -1);
+
+   fmbm_mpd = &((struct rx_bmi_regs *)__if->bmi_map)->fmbm_mpd;
+   out_be32(fmbm_mpd, FMAN_ENABLE_BPOOL_DEPLETION);
+   return bm_pool_set_hw_threshold(bpid, low_water, high_water);
+
+}
+
+int
 fman_if_get_fc_quanta(struct fman_if *fm_if)
 {
struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
diff --git a/drivers/bus/dpaa/include/fsl_fman.h 
b/drivers/bus/dpaa/include/fsl_fman.h
index 0aff22c..b94bc56 100644
--- a/drivers/bus/dpaa/include/fsl_fman.h
+++ b/drivers/bus/dpaa/include/fsl_fman.h
@@ -120,6 +120,13 @@ void fman_if_loopback_disable(struct fman_if *);
 void fman_if_set_bp(struct fman_if *fm_if, unsigned int num, int bpid,
size_t bufsize);
 
+/* Get Flow Control threshold parameters on specific interface */
+int fman_if_get_fc_threshold(struct fman_if *fm_if);
+
+/* Enable and Set Flow Control threshold parameters on specific interface */
+int fman_if_set_fc_threshold(struct fman_if *fm_if,
+   u32 high_water, u32 low_water, u32 bpid);
+
 /* Get Flow Control pause quanta on specific interface */
 int fman_if_get_fc_quanta(struct fman_if *fm_if);
 
-- 
2.7.4



[dpdk-dev] [RFC Patch 37/39] net/dpaa: add support for Scattered Rx

2017-05-27 Thread Shreyansh Jain
Signed-off-by: Hemant Agrawal 
Signed-off-by: Shreyansh Jain 
---
 doc/guides/nics/features/dpaa.ini |   1 +
 drivers/net/dpaa/dpaa_rxtx.c  | 157 ++
 2 files changed, 158 insertions(+)

diff --git a/doc/guides/nics/features/dpaa.ini 
b/doc/guides/nics/features/dpaa.ini
index 4c81294..c2f787d 100644
--- a/doc/guides/nics/features/dpaa.ini
+++ b/doc/guides/nics/features/dpaa.ini
@@ -8,6 +8,7 @@ Link status  = Y
 Queue start/stop = Y
 Jumbo frame  = Y
 MTU update   = Y
+Scattered Rx = Y
 Promiscuous mode = Y
 Allmulticast mode= Y
 Unicast MAC filter   = Y
diff --git a/drivers/net/dpaa/dpaa_rxtx.c b/drivers/net/dpaa/dpaa_rxtx.c
index 7bb1077..9dc059e 100644
--- a/drivers/net/dpaa/dpaa_rxtx.c
+++ b/drivers/net/dpaa/dpaa_rxtx.c
@@ -235,6 +235,64 @@ static inline void dpaa_checksum_offload(struct rte_mbuf 
*mbuf,
fd->cmd = DPAA_FD_CMD_RPD | DPAA_FD_CMD_DTC;
 }
 
+struct rte_mbuf *dpaa_eth_sg_to_mbuf(struct qm_fd *fd, uint32_t ifid)
+{
+   struct pool_info_entry *bp_info = DPAA_BPID_TO_POOL_INFO(fd->bpid);
+   struct rte_mbuf *first_seg, *prev_seg, *cur_seg, *temp;
+   struct qm_sg_entry *sgt, *sg_temp;
+   void *vaddr, *sg_vaddr;
+   int i = 0;
+   uint8_t fd_offset = fd->offset;
+
+   PMD_RX_LOG(DEBUG, "Received an SG frame");
+
+   vaddr = rte_dpaa_mem_ptov(qm_fd_addr(fd));
+   if (!vaddr) {
+   PMD_DRV_LOG(ERR, "unable to convert physical address");
+   return NULL;
+   }
+   sgt = vaddr + fd_offset;
+   sg_temp = &sgt[i++];
+   hw_sg_to_cpu(sg_temp);
+   temp = (struct rte_mbuf *)((char *)vaddr - bp_info->meta_data_size);
+   sg_vaddr = rte_dpaa_mem_ptov(qm_sg_entry_get64(sg_temp));
+
+   first_seg = (struct rte_mbuf *)((char *)sg_vaddr -
+   bp_info->meta_data_size);
+   first_seg->data_off = sg_temp->offset;
+   first_seg->data_len = sg_temp->length;
+   first_seg->pkt_len = sg_temp->length;
+   rte_mbuf_refcnt_set(first_seg, 1);
+
+   first_seg->port = ifid;
+   first_seg->nb_segs = 1;
+   first_seg->ol_flags = 0;
+   prev_seg = first_seg;
+   while (i < DPAA_SGT_MAX_ENTRIES) {
+   sg_temp = &sgt[i++];
+   hw_sg_to_cpu(sg_temp);
+   sg_vaddr = rte_dpaa_mem_ptov(qm_sg_entry_get64(sg_temp));
+   cur_seg = (struct rte_mbuf *)((char *)sg_vaddr -
+ bp_info->meta_data_size);
+   cur_seg->data_off = sg_temp->offset;
+   cur_seg->data_len = sg_temp->length;
+   first_seg->pkt_len += sg_temp->length;
+   first_seg->nb_segs += 1;
+   rte_mbuf_refcnt_set(cur_seg, 1);
+   prev_seg->next = cur_seg;
+   if (sg_temp->final) {
+   cur_seg->next = NULL;
+   break;
+   }
+   prev_seg = cur_seg;
+   }
+
+   dpaa_eth_packet_info(first_seg, (uint64_t)vaddr);
+   rte_pktmbuf_free_seg(temp);
+
+   return first_seg;
+}
+
 static inline struct rte_mbuf *dpaa_eth_fd_to_mbuf(struct qm_fd *fd,
uint32_t ifid)
 {
@@ -247,6 +305,9 @@ static inline struct rte_mbuf *dpaa_eth_fd_to_mbuf(struct 
qm_fd *fd,
 
PMD_RX_LOG(DEBUG, " FD--->MBUF");
 
+   if (unlikely(format == qm_fd_sg))
+   return dpaa_eth_sg_to_mbuf(fd, ifid);
+
/* Ignoring case when format != qm_fd_contig */
ptr = rte_dpaa_mem_ptov(fd->addr);
/* Ignoring case when ptr would be NULL. That is only possible incase
@@ -348,6 +409,94 @@ static struct rte_mbuf *dpaa_get_dmable_mbuf(struct 
rte_mbuf *mbuf,
return dpaa_mbuf;
 }
 
+int dpaa_eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
+   struct qm_fd *fd,
+   uint32_t bpid)
+{
+   struct rte_mbuf *cur_seg = mbuf, *prev_seg = NULL;
+   struct pool_info_entry *bp_info = DPAA_BPID_TO_POOL_INFO(bpid);
+   struct rte_mbuf *temp, *mi;
+   struct qm_sg_entry *sg_temp, *sgt;
+   int i = 0;
+
+   PMD_TX_LOG(DEBUG, "Creating SG FD to transmit");
+
+   temp = rte_pktmbuf_alloc(bp_info->mp);
+   if (!temp) {
+   PMD_DRV_LOG(ERR, "Failure in allocation mbuf");
+   return -1;
+   }
+   if (temp->buf_len < ((mbuf->nb_segs * sizeof(struct qm_sg_entry))
+   + temp->data_off)) {
+   PMD_DRV_LOG(ERR, "Insufficient space in mbuf for SG entries");
+   return -1;
+   }
+
+   fd->cmd = 0;
+   fd->opaque_addr = 0;
+
+   if (mbuf->ol_flags & DPAA_TX_CKSUM_OFFLOAD_MASK) {
+   if (temp->data_off < DEFAULT_TX_ICEOF
+   + sizeof(struct dpaa_eth_parse_results_t))
+   temp->data_off = DEFAULT_TX_ICEOF
+   + sizeof(

Re: [dpdk-dev] [RFC Patch 01/39] eal: add Bus log type

2017-05-27 Thread Shreyansh Jain

On Saturday 27 May 2017 03:54 PM, Shreyansh Jain wrote:

Signed-off-by: Shreyansh Jain 
---
  lib/librte_eal/common/include/rte_log.h | 1 +
  1 file changed, 1 insertion(+)

diff --git a/lib/librte_eal/common/include/rte_log.h 
b/lib/librte_eal/common/include/rte_log.h
index 3419138..4d001f5 100644
--- a/lib/librte_eal/common/include/rte_log.h
+++ b/lib/librte_eal/common/include/rte_log.h
@@ -87,6 +87,7 @@ extern struct rte_logs rte_logs;
  #define RTE_LOGTYPE_CRYPTODEV 17 /**< Log related to cryptodev. */
  #define RTE_LOGTYPE_EFD   18 /**< Log related to EFD. */
  #define RTE_LOGTYPE_EVENTDEV  19 /**< Log related to eventdev. */
+#define RTE_LOGTYPE_BUS   20 /**< Log related to Bus drivers. */
  
  /* these log types can be used in an application */

  #define RTE_LOGTYPE_USER1 24 /**< User-defined log type 1. */



This patch has been sent only for quick reference. This has already been
submitted here [1].

[1] http://dpdk.org/dev/patchwork/patch/24478/


[dpdk-dev] [RFC Patch 19/39] doc: add NXP DPAA PMD documentation

2017-05-27 Thread Shreyansh Jain
Signed-off-by: Hemant Agrawal 
Signed-off-by: Shreyansh Jain 
---
 MAINTAINERS   |   2 +
 doc/guides/nics/dpaa.rst  | 360 ++
 doc/guides/nics/features/dpaa.ini |   8 +
 doc/guides/nics/index.rst |   1 +
 4 files changed, 371 insertions(+)
 create mode 100644 doc/guides/nics/dpaa.rst
 create mode 100644 doc/guides/nics/features/dpaa.ini

diff --git a/MAINTAINERS b/MAINTAINERS
index e39044e..e2b0415 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -392,6 +392,8 @@ NXP dpaa
 M: Hemant Agrawal 
 M: Shreyansh Jain 
 F: drivers/bus/dpaa/
+F: doc/guides/nics/dpaa.rst
+F: doc/guides/nics/features/dpaa.ini
 
 NXP dpaa2
 M: Hemant Agrawal 
diff --git a/doc/guides/nics/dpaa.rst b/doc/guides/nics/dpaa.rst
new file mode 100644
index 000..3548922
--- /dev/null
+++ b/doc/guides/nics/dpaa.rst
@@ -0,0 +1,360 @@
+..  BSD LICENSE
+Copyright 2017 NXP.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+* Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+* Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in
+the documentation and/or other materials provided with the
+distribution.
+* Neither the name of NXP nor the names of its
+contributors may be used to endorse or promote products derived
+from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+DPAA Poll Mode Driver
+=
+
+The DPAA NIC PMD (**librte_pmd_dpaa**) provides poll mode driver
+support for the inbuilt NIC found in the **NXP DPAA** SoC family.
+
+More information can be found at `NXP Official Website
+`_.
+
+NXP DPAA (Data Path Acceleration Architecture - Gen 1)
+--
+
+This section provides an overview of the NXP DPAA architecture
+and how it is integrated into the DPDK.
+
+Contents summary
+
+- DPAA overview
+- DPAA driver architecture overview
+
+.. _dpaa_overview:
+
+DPAA Overview
+~
+
+Reference: `FSL DPAA Architecture 
`_.
+
+The QorIQ Data Path Acceleration Architecture (DPAA) is a set of hardware
+components on specific QorIQ series multicore processors. This architecture
+provides the infrastructure to support simplified sharing of networking
+interfaces and accelerators by multiple CPU cores, and the accelerators
+themselves.
+
+DPAA includes:
+
+- Cores
+- Network and packet I/O
+- Hardware offload accelerators
+- Infrastructure required to facilitate flow of packets between the components 
above
+
+Infrastructure components are:
+
+- The Queue Manager (QMan) is a hardware accelerator that manages frame queues.
+  It allows  CPUs and other accelerators connected to the SoC datapath to
+  enqueue and dequeue ethernet frames, thus providing the infrastructure for
+  data exchange among CPUs and datapath accelerators.
+- The Buffer Manager (BMan) is a hardware buffer pool management block that
+  allows software and accelerators on the datapath to acquire and release
+  buffers in order to build frames.
+
+Hardware accelerators are:
+
+- SEC - Cryptographic accelerator
+- PME - Pattern matching engine
+
+The Network and packet I/O component:
+
+- The Frame Manager (FMan) is a key component in the DPAA and makes use of the
+  DPAA infrastructure (QMan and BMan). FMan  is responsible for packet
+  distribution and policing. Each frame can be parsed, classified and results
+  may be attached to the frame. This meta data can be used to select
+  particular QMan queue, which the packet is forwarded to.
+
+
+DPAA DPDK - Poll Mode Driver Overview
+-
+
+This section provides an overview of the drivers for DPAA:
+
+* Bus driver and associated "DPAA infrastructure" drive

[dpdk-dev] [RFC Patch 20/39] mempool/dpaa: add support for NXP DPAA Mempool

2017-05-27 Thread Shreyansh Jain
This Mempool driver works with DPAA BMan hardware block. This block
manages data buffers in memory, and provides efficient interface with
other hardware and software components for buffer requests.

This patch adds support for BMan. Compilation would be enabled in
subsequent patches.

Signed-off-by: Hemant Agrawal 
Signed-off-by: Shreyansh Jain 
---
 drivers/mempool/dpaa/Makefile |  65 ++
 drivers/mempool/dpaa/dpaa_mempool.c   | 265 ++
 drivers/mempool/dpaa/dpaa_mempool.h   |  78 +++
 drivers/mempool/dpaa/rte_mempool_dpaa_version.map |   6 +
 4 files changed, 414 insertions(+)
 create mode 100644 drivers/mempool/dpaa/Makefile
 create mode 100644 drivers/mempool/dpaa/dpaa_mempool.c
 create mode 100644 drivers/mempool/dpaa/dpaa_mempool.h
 create mode 100644 drivers/mempool/dpaa/rte_mempool_dpaa_version.map

diff --git a/drivers/mempool/dpaa/Makefile b/drivers/mempool/dpaa/Makefile
new file mode 100644
index 000..45a1f7b
--- /dev/null
+++ b/drivers/mempool/dpaa/Makefile
@@ -0,0 +1,65 @@
+#   BSD LICENSE
+#
+#   Copyright(c) 2016 NXP. All rights reserved.
+#   All rights reserved.
+#
+#   Redistribution and use in source and binary forms, with or without
+#   modification, are permitted provided that the following conditions
+#   are met:
+#
+# * Redistributions of source code must retain the above copyright
+#   notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+#   notice, this list of conditions and the following disclaimer in
+#   the documentation and/or other materials provided with the
+#   distribution.
+# * Neither the name of NXP nor the names of its
+#   contributors may be used to endorse or promote products derived
+#   from this software without specific prior written permission.
+#
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_mempool_dpaa.a
+
+ifeq ($(CONFIG_RTE_LIBRTE_DPAA2_DEBUG_INIT),y)
+CFLAGS += -O0 -g
+CFLAGS += "-Wno-error"
+else
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+endif
+CFLAGS += -D _GNU_SOURCE
+
+CFLAGS += -I$(RTE_SDK)/drivers/bus/dpaa
+CFLAGS += -I$(RTE_SDK)/drivers/bus/dpaa/include/
+CFLAGS += -I$(RTE_SDK)/drivers/mempool/dpaa
+CFLAGS += -I$(RTE_SDK)/lib/librte_mempool
+
+# versioning export map
+EXPORT_MAP := rte_mempool_dpaa_version.map
+
+# Lbrary version
+LIBABIVER := 1
+
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_DPAA_MEMPOOL) += dpaa_mempool.c
+
+LDLIBS += -lrte_bus_dpaa
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/mempool/dpaa/dpaa_mempool.c 
b/drivers/mempool/dpaa/dpaa_mempool.c
new file mode 100644
index 000..67f4cdd
--- /dev/null
+++ b/drivers/mempool/dpaa/dpaa_mempool.c
@@ -0,0 +1,265 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright 2017 NXP.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in
+ *   the documentation and/or other materials provided with the
+ *   distribution.
+ * * Neither the name of NXP nor the names of its
+ *   contributors may be used to endorse or promote products derived
+ *   from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS

[dpdk-dev] [RFC Patch 16/39] bus/dpaa: add BMan hardware interfaces

2017-05-27 Thread Shreyansh Jain
Signed-off-by: Geoff Thorpe 
Signed-off-by: Roy Pledge 
Signed-off-by: Hemant Agrawal 
Signed-off-by: Shreyansh Jain 
---
 drivers/bus/dpaa/Makefile |   1 +
 drivers/bus/dpaa/base/qbman/bman.c| 394 +
 drivers/bus/dpaa/base/qbman/bman.h| 550 ++
 drivers/bus/dpaa/base/qbman/bman_driver.c |  12 +
 drivers/bus/dpaa/base/qbman/dpaa_alloc.c  |  16 +
 5 files changed, 973 insertions(+)
 create mode 100644 drivers/bus/dpaa/base/qbman/bman.c
 create mode 100644 drivers/bus/dpaa/base/qbman/bman.h

diff --git a/drivers/bus/dpaa/Makefile b/drivers/bus/dpaa/Makefile
index f11af6d..29a065a 100644
--- a/drivers/bus/dpaa/Makefile
+++ b/drivers/bus/dpaa/Makefile
@@ -71,6 +71,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_DPAA_BUS) += \
base/fman/of.c \
base/fman/netcfg_layer.c \
base/qbman/process.c \
+   base/qbman/bman.c \
base/qbman/bman_driver.c \
base/qbman/qman.c \
base/qbman/qman_driver.c \
diff --git a/drivers/bus/dpaa/base/qbman/bman.c 
b/drivers/bus/dpaa/base/qbman/bman.c
new file mode 100644
index 000..a0bea62
--- /dev/null
+++ b/drivers/bus/dpaa/base/qbman/bman.c
@@ -0,0 +1,394 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ *   BSD LICENSE
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *   GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bman.h"
+#include 
+
+/* Compilation constants */
+#define RCR_THRESH 2   /* reread h/w CI when running out of space */
+#define IRQNAME"BMan portal %d"
+#define MAX_IRQNAME16  /* big enough for "BMan portal %d" */
+
+struct bman_portal {
+   struct bm_portal p;
+   /* 2-element array. pools[0] is mask, pools[1] is snapshot. */
+   struct bman_depletion *pools;
+   int thresh_set;
+   unsigned long irq_sources;
+   u32 slowpoll;   /* only used when interrupts are off */
+   /* When the cpu-affine portal is activated, this is non-NULL */
+   const struct bm_portal_config *config;
+   char irqname[MAX_IRQNAME];
+};
+
+static cpumask_t affine_mask;
+static DEFINE_SPINLOCK(affine_mask_lock);
+static DEFINE_PER_CPU(struct bman_portal, bman_affine_portal);
+
+static inline struct bman_portal *get_affine_portal(void)
+{
+   return &get_cpu_var(bman_affine_portal);
+}
+
+/*
+ * This object type refers to a pool, it isn't *the* pool. There may be
+ * more than one such object per BMan buffer pool, eg. if different users of
+ * the pool are operating via different portals.
+ */
+struct bman_pool {
+   struct bman_pool_params params;
+   /* Used for hash-table admin when using depletion notifications. */
+   struct bman_portal *portal;
+   struct bman_pool *next;
+#ifdef RTE_LIBRTE_DPAA_CHECKING
+   atomic_t in_use;
+#endif
+};
+
+static inline
+struct bman_portal *bman_create_portal(struct bman_portal *portal,
+  const struct bm_portal_config *c)
+{
+   struct bm_portal *p;
+   const struct bman_depletion *pools = &c->mask;
+   int ret;
+   u8 bpid = 0;
+
+

[dpdk-dev] [RFC Patch 39/39] net/dpaa: add packet dump for debugging

2017-05-27 Thread Shreyansh Jain
Signed-off-by: Hemant Agrawal 
Signed-off-by: Shreyansh Jain 
---
 config/defconfig_arm64-dpaa-linuxapp-gcc |  2 ++
 drivers/net/dpaa/dpaa_ethdev.c   | 42 
 drivers/net/dpaa/dpaa_rxtx.c | 36 +++
 3 files changed, 80 insertions(+)

diff --git a/config/defconfig_arm64-dpaa-linuxapp-gcc 
b/config/defconfig_arm64-dpaa-linuxapp-gcc
index fcc0231..ce1a10e 100644
--- a/config/defconfig_arm64-dpaa-linuxapp-gcc
+++ b/config/defconfig_arm64-dpaa-linuxapp-gcc
@@ -50,6 +50,8 @@ CONFIG_RTE_LIBRTE_DPAA_DEBUG_INIT=n
 CONFIG_RTE_LIBRTE_DPAA_DEBUG_DRIVER=n
 CONFIG_RTE_LIBRTE_DPAA_DEBUG_RX=n
 CONFIG_RTE_LIBRTE_DPAA_DEBUG_TX=n
+CONFIG_RTE_LIBRTE_DPAA_DEBUG_DRIVER_DISPLAY=n
+CONFIG_RTE_LIBRTE_DPAA_CHECKING=y
 
 # DPAA Mempool
 CONFIG_RTE_LIBRTE_DPAA_MEMPOOL=y
diff --git a/drivers/net/dpaa/dpaa_ethdev.c b/drivers/net/dpaa/dpaa_ethdev.c
index 30ebc2b..92fac72 100644
--- a/drivers/net/dpaa/dpaa_ethdev.c
+++ b/drivers/net/dpaa/dpaa_ethdev.c
@@ -621,6 +621,39 @@ static int dpaa_tx_queue_init(struct qman_fq *fq,
return ret;
 }
 
+#ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
+/* Initialise a DEBUG FQ ([rt]x_error, rx_default). */
+static int dpaa_debug_queue_init(struct qman_fq *fq, uint32_t fqid)
+{
+   struct qm_mcc_initfq opts;
+   int ret;
+
+   PMD_INIT_FUNC_TRACE();
+
+   ret = qman_reserve_fqid(fqid);
+   if (ret) {
+   PMD_DRV_LOG(ERR, "reserve debug fqid %d failed with ret: %d",
+   fqid, ret);
+   return -EINVAL;
+   }
+   /* "map" this Rx FQ to one of the interfaces Tx FQID */
+   PMD_DRV_LOG(DEBUG, "creating debug fq %p, fqid %d", fq, fqid);
+   ret = qman_create_fq(fqid, QMAN_FQ_FLAG_NO_ENQUEUE, fq);
+   if (ret) {
+   PMD_DRV_LOG(ERR, "create debug fqid %d failed with ret: %d",
+   fqid, ret);
+   return ret;
+   }
+   opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL;
+   opts.fqd.dest.wq = DPAA_IF_DEBUG_PRIORITY;
+   ret = qman_init_fq(fq, 0, &opts);
+   if (ret)
+   PMD_DRV_LOG(ERR, "init debug fqid %d failed with ret: %d",
+   fqid, ret);
+   return ret;
+}
+#endif
+
 /* Initialise a network interface */
 static int dpaa_eth_dev_init(struct rte_eth_dev *eth_dev)
 {
@@ -682,6 +715,15 @@ static int dpaa_eth_dev_init(struct rte_eth_dev *eth_dev)
}
dpaa_intf->nb_tx_queues = num_cores;
 
+#ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
+   dpaa_debug_queue_init(&dpaa_intf->debug_queues[
+   DPAA_DEBUG_FQ_RX_ERROR], fman_intf->fqid_rx_err);
+   dpaa_intf->debug_queues[DPAA_DEBUG_FQ_RX_ERROR].dpaa_intf = dpaa_intf;
+   dpaa_debug_queue_init(&dpaa_intf->debug_queues[
+   DPAA_DEBUG_FQ_TX_ERROR], fman_intf->fqid_tx_err);
+   dpaa_intf->debug_queues[DPAA_DEBUG_FQ_TX_ERROR].dpaa_intf = dpaa_intf;
+#endif
+
PMD_DRV_LOG(DEBUG, "all fqs created");
 
/* Get the initial configuration for flow control */
diff --git a/drivers/net/dpaa/dpaa_rxtx.c b/drivers/net/dpaa/dpaa_rxtx.c
index 9dc059e..d4396b1 100644
--- a/drivers/net/dpaa/dpaa_rxtx.c
+++ b/drivers/net/dpaa/dpaa_rxtx.c
@@ -85,6 +85,38 @@
(_fd)->bpid = _bpid; \
} while (0)
 
+#if (defined RTE_LIBRTE_DPAA_DEBUG_DRIVER_DISPLAY)
+void dpaa_display_frame(const struct qm_fd *fd)
+{
+   int ii;
+   char *ptr;
+
+   printf("%s::bpid %x addr %08x%08x, format %d off %d, len %d stat %x\n",
+  __func__, fd->bpid, fd->addr_hi, fd->addr_lo, fd->format,
+   fd->offset, fd->length20, fd->status);
+
+   ptr = (char *)rte_dpaa_mem_ptov(fd->addr);
+   ptr += fd->offset;
+   printf("%02x ", *ptr);
+   for (ii = 1; ii < fd->length20; ii++) {
+   printf("%02x ", *ptr);
+   if ((ii % 16) == 0)
+   printf("\n");
+   ptr++;
+   }
+   printf("\n");
+}
+#else
+#define dpaa_display_frame(a)
+#endif
+
+static inline void dpaa_slow_parsing(struct rte_mbuf *m __rte_unused,
+uint64_t prs __rte_unused)
+{
+   PMD_RX_LOG(DEBUG, " Slow parsing");
+
+   /*TBD:XXX: to be implemented*/
+}
 
 static inline void dpaa_eth_packet_info(struct rte_mbuf *m,
uint64_t fd_virt_addr)
@@ -143,6 +175,7 @@ static inline void dpaa_eth_packet_info(struct rte_mbuf *m,
break;
/* More switch cases can be added */
default:
+   dpaa_slow_parsing(m, prs);
break;
}
 
@@ -299,6 +332,8 @@ static inline struct rte_mbuf *dpaa_eth_fd_to_mbuf(struct 
qm_fd *fd,
struct pool_info_entry *bp_info = DPAA_BPID_TO_POOL_INFO(fd->bpid);
struct rte_mbuf *mbuf;
void *ptr;
+   uint8_t format =
+   (fd->opaque & DPAA_FD_FORMAT_MASK) >> DPAA_FD_FORMAT_SHIFT;
uint16_t offset =
(fd->op

[dpdk-dev] [RFC Patch 22/39] maintainers: claim ownership of DPAA Mempool driver

2017-05-27 Thread Shreyansh Jain
Signed-off-by: Shreyansh Jain 
---
 MAINTAINERS | 1 +
 1 file changed, 1 insertion(+)

diff --git a/MAINTAINERS b/MAINTAINERS
index e2b0415..b50bd33 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -392,6 +392,7 @@ NXP dpaa
 M: Hemant Agrawal 
 M: Shreyansh Jain 
 F: drivers/bus/dpaa/
+F: drivers/mempool/dpaa/
 F: doc/guides/nics/dpaa.rst
 F: doc/guides/nics/features/dpaa.ini
 
-- 
2.7.4



Re: [dpdk-dev] [PATCH v3 1/3] lib: add Generic Receive Offload API framework

2017-05-27 Thread Ananyev, Konstantin


> -Original Message-
> From: Hu, Jiayu
> Sent: Saturday, May 27, 2017 4:42 AM
> To: Ananyev, Konstantin 
> Cc: dev@dpdk.org; Wiles, Keith ; 
> yuanhan@linux.intel.com
> Subject: Re: [PATCH v3 1/3] lib: add Generic Receive Offload API framework
> 
> On Sat, May 27, 2017 at 07:10:21AM +0800, Ananyev, Konstantin wrote:
> > Hi Jiayu,
> >
> > > -Original Message-
> > > From: Hu, Jiayu
> > > Sent: Friday, May 26, 2017 8:26 AM
> > > To: Ananyev, Konstantin 
> > > Cc: dev@dpdk.org; Wiles, Keith ; 
> > > yuanhan@linux.intel.com
> > > Subject: Re: [PATCH v3 1/3] lib: add Generic Receive Offload API framework
> > >
> > > Hi Konstantin,
> > >
> > > On Wed, May 24, 2017 at 08:38:25PM +0800, Ananyev, Konstantin wrote:
> > > >
> > > > Hi Jiayu,
> > > >
> > > > >
> > > > > Hi Konstantin,
> > > > >
> > > > > Thanks for your comments. My replies/questions are below.
> > > > >
> > > > > BRs,
> > > > > Jiayu
> > > > >
> > > > > On Mon, May 22, 2017 at 05:19:19PM +0800, Ananyev, Konstantin wrote:
> > > > > > Hi Jiayu,
> > > > > > My comments/questions below.
> > > > > > Konstantin
> > > > > >
> > > > > > >
> > > > > > > For applications, DPDK GRO provides three external functions to
> > > > > > > enable/disable GRO:
> > > > > > > - rte_gro_init: initialize GRO environment;
> > > > > > > - rte_gro_enable: enable GRO for a given port;
> > > > > > > - rte_gro_disable: disable GRO for a given port.
> > > > > > > Before using GRO, applications should explicitly call 
> > > > > > > rte_gro_init to
> > > > > > > initizalize GRO environment. After that, applications can call
> > > > > > > rte_gro_enable to enable GRO and call rte_gro_disable to disable 
> > > > > > > GRO for
> > > > > > > specific ports.
> > > > > >
> > > > > > I think this is too restrictive and wouldn't meet various user's 
> > > > > > needs.
> > > > > > User might want to:
> > > > > > - enable/disable GRO for particular RX queue
> > > > > > - or even setup different GRO types for different RX queues,
> > > > > >i.e, - GRO over IPV4/TCP for queue 0, and  GRO over IPV6/TCP for 
> > > > > > queue 1, etc.
> > > > >
> > > > > The reason for enabling/disabling GRO per-port instead of per-queue 
> > > > > is that LINUX
> > > > > controls GRO per-port. To control GRO per-queue indeed can provide 
> > > > > more flexibility
> > > > > to applications. But are there any scenarios that different queues of 
> > > > > a port may
> > > > > require different GRO control (i.e. GRO types and enable/disable GRO)?
> > > >
> > > > I think yes.
> > > >
> > > > >
> > > > > > - For various reasons, user might prefer not to use RX callbacks 
> > > > > > for various reasons,
> > > > > >   But invoke gro() manually at somepoint in his code.
> > > > >
> > > > > An application-used GRO library can enable more flexibility to 
> > > > > applications. Besides,
> > > > > when perform GRO in ethdev layer or inside PMD drivers, it is an 
> > > > > issue that
> > > > > rte_eth_rx_burst returns actually received packet number or GROed 
> > > > > packet number. And
> > > > > the same issue happens in GSO, and even more seriously. This is 
> > > > > because applications
> > > > > , like VPP, always rely on the return value of rte_eth_tx_burst to 
> > > > > decide further
> > > > > operations. If applications can direcly call GRO/GSO libraries, this 
> > > > > issue won't exist.
> > > > > And DPDK is a library, which is not a holistic system like LINUX. We 
> > > > > don't need to do
> > > > > the same as LINUX. Therefore, maybe it's a better idea to directly 
> > > > > provide SW
> > > > > segmentation/reassembling libraries to applications.
> > > > >
> > > > > > - Many users would like to control size (number of flows/items per 
> > > > > > flow),
> > > > > >   max allowed packet size, max timeout, etc., for different GRO 
> > > > > > tables.
> > > > > > - User would need a way to flush all or only timeout packets from 
> > > > > > particular GRO tables.
> > > > > >
> > > > > > So I think that API needs to extended to become be much more 
> > > > > > fine-grained.
> > > > > > Something like that:
> > > > > >
> > > > > > struct rte_gro_tbl_param {
> > > > > >int32_t socket_id;
> > > > > >size_t max_flows;
> > > > > >size_t max_items_per_flow;
> > > > > >size_t max_pkt_size;
> > > > > >uint64_t packet_timeout_cycles;
> > > > > >
> > > > > >   
> > > > > >   ...
> > > > > > };
> > > > > >
> > > > > > struct rte_gro_tbl;
> > > > > > strct rte_gro_tbl *rte_gro_tbl_create(const struct 
> > > > > > rte_gro_tbl_param *param);
> > > > > >
> > > > > > void rte_gro_tbl_destroy(struct rte_gro_tbl *tbl);
> > > > >
> > > > > Yes, I agree with you. It's necessary to provide more fine-grained 
> > > > > control APIs to
> > > > > applications. But what's 'packet_timeout_cycles' used for? Is it for 
> > > > > TCP packets?
> > > >
> > > > For any packets that sits in the gro_table for too long.
> > > >
> > > > >
> > > > > >
> > > > > > /*
> > > > > >  * process packet

[dpdk-dev] stable release 17.02.1 patches review and test

2017-05-27 Thread Yuanhan Liu
Hi all,

Here is a list of patches targeted for stable release 17.02.1. Please
help review and test. The planned date for the final release is 2th,
Jun. Before that, please shout if anyone has objections with these
patches being applied.

These patches are located at branch 17.02 of dpdk-stable repo:
http://dpdk.org/browse/dpdk-stable/

Thanks.

--yliu

---
Adrien Mazarguil (4):
  mbuf: fix missing includes in exported header
  efd: fix missing include in exported header
  app/testpmd: fix stack overwriting by flow command
  app/testpmd: fix MAC endian in flow command

Alejandro Lucero (4):
  net/nfp: clean Tx descriptor flags
  net/nfp: fix packet/data length conversion
  net/nfp: fix Rx interrupt
  net/nfp: fix releasing muti-segment mbufs

Andrew Rybchenko (6):
  net/sfc: destroy event queue when Rx queue is released
  net/sfc: destroy event queue when Tx queue is released
  net/sfc: fix leak if EvQ DMA space allocation fails
  net/sfc: use correct function to free scattered packet on Rx
  net/sfc: reset RSS channels back to 0 on close
  net/sfc: fix LSC interrupt support for UIO cases

Andriy Berestovskyy (1):
  examples/load_balancer: fix Tx flush

Arek Kusztal (4):
  crypto/qat: fix AES-GCM authentication length
  crypto/qat: fix IV zero physical address
  crypto/openssl: fix AAD capabilities for AES-GCM
  cryptodev: fix API AAD comments

Beilei Xing (2):
  net/i40e: fix memory allocation for hash table
  net/i40e: fix setup when bulk is disabled

Ben Walker (1):
  pci: fix device registration on FreeBSD

Bernard Iremonger (1):
  net/i40e: ensure vector mode is not used with QinQ

Bruce Richardson (8):
  crypto/scheduler: fix include of local headers
  nic_uio: fix device binding at boot
  examples: fix build clean on FreeBSD
  examples/performance-thread: fix build on FreeBSD
  examples/performance-thread: fix compilation on Suse 11 SP2
  examples/performance-thread: fix build on FreeBSD 10.0
  net/vmxnet3: fix build with gcc 7
  test/cmdline: fix missing break in switch

Charles Myers (1):
  net/mlx4: fix Rx after mbuf alloc failure

Chas Williams (2):
  net/bnx2x: fix transmit queue free threshold
  net/vmxnet3: fix queue size changes

Fan Zhang (2):
  crypto/scheduler: fix capability update
  app/crypto-perf: fix crypto operation resubmission

Ferruh Yigit (5):
  kni: fix build with kernel 4.11
  net/ixgbe: fix duplicated check
  kni: fix ethtool support
  kni: fix possible memory leak
  kni: fix crash caused by freeing mempool

Fiona Trahe (1):
  cryptodev: fix API digest length comments

Gaetan Rivet (3):
  net/mlx4: update link status upon probing with LSC
  net/mlx4: fix returned values upon failed probing
  net/mlx5: fix returned values upon failed probing

Gage Eads (1):
  crypto/qat: fix dequeue statistics

Harish Patil (5):
  net/qede: fix missing UDP protocol in RSS offload types
  net/qede: fix VF RSS configuration
  net/qede: prevent crash while changing MTU dynamically
  net/qede: fix default MAC address handling
  net/qede: fix fastpath rings reset phase

Henry Cai (2):
  net/cxgbe: fix possible null pointer dereference
  net/i40e: fix allocation check

Huanle Han (1):
  net/virtio: fix crash when closing twice

Ido Barnea (1):
  net/ixgbevf: set xstats id values

Ilya Maximets (2):
  vhost: change log levels in client mode
  net/bonding: allow configuring jumbo frames without slaves

Jeff Guo (3):
  lib: fix IPv6 tunnel checksum
  app/testpmd: fix IPv6 tunnel checksum
  net/i40e: fix hash input set on X722

Jerin Jacob (5):
  net/i40e: fix incorrect packet index reference
  net/thunderx: fix 32-bit build
  net/thunderx: fix build on FreeBSD
  net/thunderx: use internal byte ordering macros
  net/thunderx: fix deadlock in Rx path

Jia Yu (1):
  net/ixgbe: fix setting MTU on stopped device

Jianfeng Tan (4):
  vfio: fix secondary process start
  net/virtio-user: fix address on 32-bit system
  net/virtio: fix MSI-X for modern devices
  net/virtio: fix LSC setting

Jiayu Hu (1):
  app/testpmd: fix exit without freeing resources

Jingjing Wu (4):
  net/ixgbe: fix multi-queue mode check in SRIOV mode
  app/testpmd: fix init config for multi-queue mode
  app/testpmd: fix TC mapping in DCB init config
  examples/l3fwd-power: fix handling no Rx queue

Johan Samuelsson (1):
  net/pcap: fix using mbuf after freeing it

Keith Wiles (1):
  net/tap: fix possibly unterminated string

Kevin Traynor (1):
  vhost: fix false sharing

Marcin Wilk (1):
  net/thunderx: fix stats access out of bounds

Markos Chandras (1):
  examples/ethtool: fix link with ixgbe shared lib

Matt Peters (1):
  net/virtio: disable LSC interrupt if MSIX not enabled

Matthias Gatto (1):
  vhos

[dpdk-dev] [PATCH 0/2] LACP control packet filtering offload

2017-05-27 Thread Tomasz Kulasek
1. Overview

  Packet processing in the current path for bonding in mode 4, requires
  parse all packets in the fast path, to classify and process LACP
  packets.

  The idea of performance improvement is to use hardware offloads to
  improve packet classification.

2. Scope of work

   a) Optimization of software LACP packet classification by using
  packet_type metadata to eliminate the requirement of parsing each
  packet in the received burst.

   b) Implementation of classification mechanism using flow director to
  redirect LACP packets to the dedicated queue (not visible by
  application).

  - Filter pattern choosing (not all filters are supported by all
devices),
  - Changing processing path to speed up non-LACP packets
processing,
  - Handle LACP packets from dedicated Rx queue and send to the
dedicated Tx queue,

   c) Creation of fallback mechanism allowing to select the most
  preferable method of processing:

  - Flow director,
  - Packet type metadata,
  - Software parsing,

3. Implementation

3.1. Packet type

   The packet_type approach would result in a performance improvement
   as packets data would no longer be required to be read, but with this
   approach the bonded driver would still need to look at the mbuf of
   each packet thereby having an impact on the achievable Rx
   performance.

   There's not packet_type value describing LACP packets directly.
   However, it can be used to limit number of packets required to be
   parsed, e.g. if packet_type indicates >L2 packets.

   It should improve performance while well-known non-LACP packets can
   be skipped without the need to look up into its data.

3.2. Flow director

   Using rte_flow API and pattern on ethernet type of packet (0x8809),
   we can configure flow director to redirect slow packets to separated
   queue.

   An independent Rx queues for LACP would remove the requirement to
   filter all ingress traffic in sw which should result in a performance
   increase. Other queues stay untouched and processing of packets on
   the fast path will be reduced to simple packet collecting from
   slaves.

   Separated Tx queue for LACP daemon allows to send LACP responses
   immediately, without interfering into Tx fast path.

   RECEIVE

 .---.
 | Slave 0   |
 |  .--. |
 |  Fd  | Rxq  | |
   Rx ==o==>|  |==.
 |  |   +==+ ||  .---.
 |  `-->| LACP |. |  | Bonding   |
 |  `--' |  | |  |  .--. |
 `---'  | |  |  |  | |
| >>|  |===> Rx
 .---.  | |  |  +==+ |
 | Slave 1   |  | |  |  |  | |
 |  .--. |  | |  |  `--' |
 |  Fd  | Rxq  | |  | |  `---'
   Rx ==o==>|  |=='.---.
 |  |   +==+ |  | / \
 |  `-->| LACP |+--->+  LACP DAEMON  |
 |  `--' | Tx <---\ /
 `---' `---'

   All slow packets received by slaves in bonding are redirected to the
   separated queue using flow director. Other packets are collected from
   slaves and exposed to the application with Rx burst on bonded device.

   TRANSMIT

 .---.
 | Slave 0   |
 |  .--. |
 |  |  | |
   Tx <=+===|  |<=.
 |  |   |--| ||  .---.
 |  `---| LACP |<---. |  | Bonding   |
 |  `--' |  | |  |  .--. |
 `---'  | |  |  |  | |
| +<|  |<== Tx
 .---.  | |  |  +==+ |
 | Slave 1   |  | |  |  |  | |
 |  .--. |  | |  |  `--' |
 |  |  | |  | |  `---'
   Tx <=+===|  |<='  Rx.---.
 |  |   |--| |  | `-->/ \
 |  `---| LACP |<---++  LACP DAEMON  |
 |  `--' |\ /
 `---' `---'

   On transmit, packets are propagated on the slaves. While we have
   separated Tx queue for LACP responses, it can be sent regardless of
   the fast path.

   LACP DAEMON

   In this mode whole slow packets are handled in LACP DAEMON.


Tomasz Kulasek (2):
  LACP control packet filtering offload
  test-pmd: add set bonding slow_queue hw/sw

 app/test-pmd/cmdline.c   

[dpdk-dev] [PATCH 1/2] LACP control packet filtering offload

2017-05-27 Thread Tomasz Kulasek
New API funtions implemented:

   rte_eth_bond_8023ad_slow_queue_enable(uint8_t port_id);
   rte_eth_bond_8023ad_slow_queue_disable(uint8_t port_id);

rte_eth_bond_8023ad_slow_queue_enable should be called before bonding port
start to enable new path.

When this option is enabled all slaves must support flow director's
filtering by ethernet type and support one additional queue on slaves
tx/rx.

Signed-off-by: Tomasz Kulasek 
---
 drivers/net/bonding/rte_eth_bond_8023ad.c | 141 +++--
 drivers/net/bonding/rte_eth_bond_8023ad.h |   6 +
 drivers/net/bonding/rte_eth_bond_8023ad_private.h |  15 +
 drivers/net/bonding/rte_eth_bond_pmd.c| 345 +-
 drivers/net/bonding/rte_eth_bond_version.map  |   9 +
 5 files changed, 481 insertions(+), 35 deletions(-)

diff --git a/drivers/net/bonding/rte_eth_bond_8023ad.c 
b/drivers/net/bonding/rte_eth_bond_8023ad.c
index 7b863d6..125eb45 100644
--- a/drivers/net/bonding/rte_eth_bond_8023ad.c
+++ b/drivers/net/bonding/rte_eth_bond_8023ad.c
@@ -632,12 +632,20 @@
lacpdu->tlv_type_terminator = TLV_TYPE_TERMINATOR_INFORMATION;
lacpdu->terminator_length = 0;
 
-   if (rte_ring_enqueue(port->tx_ring, lacp_pkt) == -ENOBUFS) {
-   /* If TX ring full, drop packet and free message. Retransmission
-* will happen in next function call. */
-   rte_pktmbuf_free(lacp_pkt);
-   set_warning_flags(port, WRN_TX_QUEUE_FULL);
-   return;
+   if (internals->mode4.slow_rx_queue == 0) {
+   if (rte_ring_enqueue(port->tx_ring, lacp_pkt) == -ENOBUFS) {
+   /* If TX ring full, drop packet and free message. 
Retransmission
+* will happen in next function call. */
+   rte_pktmbuf_free(lacp_pkt);
+   set_warning_flags(port, WRN_TX_QUEUE_FULL);
+   return;
+   }
+   } else {
+   if (rte_eth_tx_burst(slave_id, internals->mode4.slow_tx_queue, 
&lacp_pkt, 1) == 0) {
+   rte_pktmbuf_free(lacp_pkt);
+   set_warning_flags(port, WRN_TX_QUEUE_FULL);
+   return;
+   }
}
 
MODE4_DEBUG("sending LACP frame\n");
@@ -741,6 +749,25 @@
 }
 
 static void
+rx_machine_update(struct bond_dev_private *internals, uint8_t slave_id,
+   struct rte_mbuf *lacp_pkt) {
+
+   /* Find LACP packet to this port. Do not check subtype, it is done in
+* function that queued packet */
+   if (lacp_pkt != NULL) {
+   struct lacpdu_header *lacp;
+
+   lacp = rte_pktmbuf_mtod(lacp_pkt, struct lacpdu_header *);
+   RTE_ASSERT(lacp->lacpdu.subtype == SLOW_SUBTYPE_LACP);
+
+   /* This is LACP frame so pass it to rx_machine */
+   rx_machine(internals, slave_id, &lacp->lacpdu);
+   rte_pktmbuf_free(lacp_pkt);
+   } else
+   rx_machine(internals, slave_id, NULL);
+}
+
+static void
 bond_mode_8023ad_periodic_cb(void *arg)
 {
struct rte_eth_dev *bond_dev = arg;
@@ -809,20 +836,21 @@
 
SM_FLAG_SET(port, LACP_ENABLED);
 
-   /* Find LACP packet to this port. Do not check subtype, it is 
done in
-* function that queued packet */
-   if (rte_ring_dequeue(port->rx_ring, &pkt) == 0) {
-   struct rte_mbuf *lacp_pkt = pkt;
-   struct lacpdu_header *lacp;
+   struct rte_mbuf *lacp_pkt = NULL;
 
-   lacp = rte_pktmbuf_mtod(lacp_pkt, struct lacpdu_header 
*);
-   RTE_ASSERT(lacp->lacpdu.subtype == SLOW_SUBTYPE_LACP);
+   if (internals->mode4.slow_rx_queue == 0) {
+   /* Find LACP packet to this port. Do not check subtype, 
it is done in
+* function that queued packet */
+   if (rte_ring_dequeue(port->rx_ring, &pkt) == 0)
+   lacp_pkt = pkt;
 
-   /* This is LACP frame so pass it to rx_machine */
-   rx_machine(internals, slave_id, &lacp->lacpdu);
-   rte_pktmbuf_free(lacp_pkt);
-   } else
-   rx_machine(internals, slave_id, NULL);
+   rx_machine_update(internals, slave_id, lacp_pkt);
+   } else {
+   if (rte_eth_rx_burst(slave_id, 
internals->mode4.slow_rx_queue, &lacp_pkt, 1) == 1)
+   bond_mode_8023ad_handle_slow_pkt(internals, 
slave_id, lacp_pkt);
+   else
+   rx_machine_update(internals, slave_id, NULL);
+   }
 
periodic_machine(internals, slave_id);
mux_machine(internals, slave_id);
@@ -1188,18 +1216,36 @@
m_hdr->marker.tlv_type_marker = MARKER_TL

[dpdk-dev] [PATCH 2/2] test-pmd: add set bonding slow_queue hw/sw

2017-05-27 Thread Tomasz Kulasek
This patch adds new command:

set bonding slow_queue  sw|hw

"set bonding slow_queue  hw" sets hardware management
of slow packets and chooses simplified paths for tx/rx bursts.

"set bonding slow_queue  sw" turns back to the software
handling of slow packets. This option is default.

Signed-off-by: Tomasz Kulasek 
---
 app/test-pmd/cmdline.c | 58 ++
 1 file changed, 58 insertions(+)

diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
index 0afac68..11fa4a5 100644
--- a/app/test-pmd/cmdline.c
+++ b/app/test-pmd/cmdline.c
@@ -87,6 +87,7 @@
 #include 
 #ifdef RTE_LIBRTE_PMD_BOND
 #include 
+#include 
 #endif
 #ifdef RTE_LIBRTE_IXGBE_PMD
 #include 
@@ -4279,6 +4280,62 @@ static void cmd_set_bonding_mode_parsed(void 
*parsed_result,
}
 };
 
+/* *** SET BONDING SLOW_QUEUE SW/HW *** */
+struct cmd_set_bonding_slow_queue_result {
+   cmdline_fixed_string_t set;
+   cmdline_fixed_string_t bonding;
+   cmdline_fixed_string_t slow_queue;
+   uint8_t port_id;
+   cmdline_fixed_string_t mode;
+};
+
+static void cmd_set_bonding_slow_queue_parsed(void *parsed_result,
+   __attribute__((unused))  struct cmdline *cl,
+   __attribute__((unused)) void *data)
+{
+   struct cmd_set_bonding_slow_queue_result *res = parsed_result;
+   portid_t port_id = res->port_id;
+
+   if (!strcmp(res->mode, "hw")) {
+   rte_eth_bond_8023ad_slow_queue_enable(port_id);
+   printf("Hardware slow queue enabled\n");
+   } else if (!strcmp(res->mode, "sw")) {
+   rte_eth_bond_8023ad_slow_queue_disable(port_id);
+   }
+}
+
+cmdline_parse_token_string_t cmd_setbonding_slow_queue_set =
+TOKEN_STRING_INITIALIZER(struct cmd_set_bonding_slow_queue_result,
+   set, "set");
+cmdline_parse_token_string_t cmd_setbonding_slow_queue_bonding =
+TOKEN_STRING_INITIALIZER(struct cmd_set_bonding_slow_queue_result,
+   bonding, "bonding");
+cmdline_parse_token_string_t cmd_setbonding_slow_queue_slow_queue =
+TOKEN_STRING_INITIALIZER(struct cmd_set_bonding_slow_queue_result,
+   slow_queue, "slow_queue");
+cmdline_parse_token_num_t cmd_setbonding_slow_queue_port =
+TOKEN_NUM_INITIALIZER(struct cmd_set_bonding_slow_queue_result,
+   port_id, UINT8);
+cmdline_parse_token_string_t cmd_setbonding_slow_queue_mode =
+TOKEN_STRING_INITIALIZER(struct cmd_set_bonding_slow_queue_result,
+   mode, "sw#hw");
+
+cmdline_parse_inst_t cmd_set_slow_queue = {
+   .f = cmd_set_bonding_slow_queue_parsed,
+   .help_str = "set bonding slow_queue  "
+   "sw|hw: "
+   "Set the bonding slow queue acceleration for port_id",
+   .data = NULL,
+   .tokens = {
+   (void *)&cmd_setbonding_slow_queue_set,
+   (void *)&cmd_setbonding_slow_queue_bonding,
+   (void *)&cmd_setbonding_slow_queue_slow_queue,
+   (void *)&cmd_setbonding_slow_queue_port,
+   (void *)&cmd_setbonding_slow_queue_mode,
+   NULL
+   }
+};
+
 /* *** SET BALANCE XMIT POLICY *** */
 struct cmd_set_bonding_balance_xmit_policy_result {
cmdline_fixed_string_t set;
@@ -13613,6 +13670,7 @@ struct cmd_cmdfile_result {
(cmdline_parse_inst_t *) &cmd_set_bond_mac_addr,
(cmdline_parse_inst_t *) &cmd_set_balance_xmit_policy,
(cmdline_parse_inst_t *) &cmd_set_bond_mon_period,
+   (cmdline_parse_inst_t *) &cmd_set_slow_queue,
 #endif
(cmdline_parse_inst_t *)&cmd_vlan_offload,
(cmdline_parse_inst_t *)&cmd_vlan_tpid,
-- 
1.9.1



Re: [dpdk-dev] [PATCH v3 1/3] lib: add Generic Receive Offload API framework

2017-05-27 Thread Jiayu Hu
Hi Konstantin,

On Sat, May 27, 2017 at 07:12:16PM +0800, Ananyev, Konstantin wrote:
> 
> 
> > -Original Message-
> > From: Hu, Jiayu
> > Sent: Saturday, May 27, 2017 4:42 AM
> > To: Ananyev, Konstantin 
> > Cc: dev@dpdk.org; Wiles, Keith ; 
> > yuanhan@linux.intel.com
> > Subject: Re: [PATCH v3 1/3] lib: add Generic Receive Offload API framework
> > 
> > On Sat, May 27, 2017 at 07:10:21AM +0800, Ananyev, Konstantin wrote:
> > > Hi Jiayu,
> > >
> > > > -Original Message-
> > > > From: Hu, Jiayu
> > > > Sent: Friday, May 26, 2017 8:26 AM
> > > > To: Ananyev, Konstantin 
> > > > Cc: dev@dpdk.org; Wiles, Keith ; 
> > > > yuanhan@linux.intel.com
> > > > Subject: Re: [PATCH v3 1/3] lib: add Generic Receive Offload API 
> > > > framework
> > > >
> > > > Hi Konstantin,
> > > >
> > > > On Wed, May 24, 2017 at 08:38:25PM +0800, Ananyev, Konstantin wrote:
> > > > >
> > > > > Hi Jiayu,
> > > > >
> > > > > >
> > > > > > Hi Konstantin,
> > > > > >
> > > > > > Thanks for your comments. My replies/questions are below.
> > > > > >
> > > > > > BRs,
> > > > > > Jiayu
> > > > > >
> > > > > > On Mon, May 22, 2017 at 05:19:19PM +0800, Ananyev, Konstantin wrote:
> > > > > > > Hi Jiayu,
> > > > > > > My comments/questions below.
> > > > > > > Konstantin
> > > > > > >
> > > > > > > >
> > > > > > > > For applications, DPDK GRO provides three external functions to
> > > > > > > > enable/disable GRO:
> > > > > > > > - rte_gro_init: initialize GRO environment;
> > > > > > > > - rte_gro_enable: enable GRO for a given port;
> > > > > > > > - rte_gro_disable: disable GRO for a given port.
> > > > > > > > Before using GRO, applications should explicitly call 
> > > > > > > > rte_gro_init to
> > > > > > > > initizalize GRO environment. After that, applications can call
> > > > > > > > rte_gro_enable to enable GRO and call rte_gro_disable to 
> > > > > > > > disable GRO for
> > > > > > > > specific ports.
> > > > > > >
> > > > > > > I think this is too restrictive and wouldn't meet various user's 
> > > > > > > needs.
> > > > > > > User might want to:
> > > > > > > - enable/disable GRO for particular RX queue
> > > > > > > - or even setup different GRO types for different RX queues,
> > > > > > >i.e, - GRO over IPV4/TCP for queue 0, and  GRO over IPV6/TCP 
> > > > > > > for queue 1, etc.
> > > > > >
> > > > > > The reason for enabling/disabling GRO per-port instead of per-queue 
> > > > > > is that LINUX
> > > > > > controls GRO per-port. To control GRO per-queue indeed can provide 
> > > > > > more flexibility
> > > > > > to applications. But are there any scenarios that different queues 
> > > > > > of a port may
> > > > > > require different GRO control (i.e. GRO types and enable/disable 
> > > > > > GRO)?
> > > > >
> > > > > I think yes.
> > > > >
> > > > > >
> > > > > > > - For various reasons, user might prefer not to use RX callbacks 
> > > > > > > for various reasons,
> > > > > > >   But invoke gro() manually at somepoint in his code.
> > > > > >
> > > > > > An application-used GRO library can enable more flexibility to 
> > > > > > applications. Besides,
> > > > > > when perform GRO in ethdev layer or inside PMD drivers, it is an 
> > > > > > issue that
> > > > > > rte_eth_rx_burst returns actually received packet number or GROed 
> > > > > > packet number. And
> > > > > > the same issue happens in GSO, and even more seriously. This is 
> > > > > > because applications
> > > > > > , like VPP, always rely on the return value of rte_eth_tx_burst to 
> > > > > > decide further
> > > > > > operations. If applications can direcly call GRO/GSO libraries, 
> > > > > > this issue won't exist.
> > > > > > And DPDK is a library, which is not a holistic system like LINUX. 
> > > > > > We don't need to do
> > > > > > the same as LINUX. Therefore, maybe it's a better idea to directly 
> > > > > > provide SW
> > > > > > segmentation/reassembling libraries to applications.
> > > > > >
> > > > > > > - Many users would like to control size (number of flows/items 
> > > > > > > per flow),
> > > > > > >   max allowed packet size, max timeout, etc., for different GRO 
> > > > > > > tables.
> > > > > > > - User would need a way to flush all or only timeout packets from 
> > > > > > > particular GRO tables.
> > > > > > >
> > > > > > > So I think that API needs to extended to become be much more 
> > > > > > > fine-grained.
> > > > > > > Something like that:
> > > > > > >
> > > > > > > struct rte_gro_tbl_param {
> > > > > > >int32_t socket_id;
> > > > > > >size_t max_flows;
> > > > > > >size_t max_items_per_flow;
> > > > > > >size_t max_pkt_size;
> > > > > > >uint64_t packet_timeout_cycles;
> > > > > > >
> > > > > > >   
> > > > > > >   ...
> > > > > > > };
> > > > > > >
> > > > > > > struct rte_gro_tbl;
> > > > > > > strct rte_gro_tbl *rte_gro_tbl_create(const struct 
> > > > > > > rte_gro_tbl_param *param);
> > > > > > >
> > > > > > > void rte_gro_tbl_destroy(struct rte_gro_tbl *tbl);
> > > > > >
> >

[dpdk-dev] [PATCH] net/i40e: extended list of operations for ddp processing

2017-05-27 Thread Andrey Chilikin
This patch adds ability to remove already loaded profile
or write profile without registering it

Signed-off-by: Andrey Chilikin 
---
 drivers/net/i40e/rte_pmd_i40e.c |  165 ---
 drivers/net/i40e/rte_pmd_i40e.h |6 +-
 2 files changed, 141 insertions(+), 30 deletions(-)

diff --git a/drivers/net/i40e/rte_pmd_i40e.c b/drivers/net/i40e/rte_pmd_i40e.c
index f7ce62b..5ebd7cf 100644
--- a/drivers/net/i40e/rte_pmd_i40e.c
+++ b/drivers/net/i40e/rte_pmd_i40e.c
@@ -1523,6 +1523,9 @@ int rte_pmd_i40e_set_vf_vlan_filter(uint8_t port, 
uint16_t vlan_id,
 #define I40E_PROFILE_INFO_SIZE 48
 #define I40E_MAX_PROFILE_NUM 16
 
+#define I40E_DDP_TRACKID_INVALID 0x
+#define SECTION_TYPE_RB_MMIO 0x1800
+
 /* Check if the profile info exists */
 static int
 i40e_check_profile_info(uint8_t port, uint8_t *profile_info_sec)
@@ -1557,11 +1560,7 @@ int rte_pmd_i40e_set_vf_vlan_filter(uint8_t port, 
uint16_t vlan_id,
 sizeof(struct i40e_profile_section_header));
for (i = 0; i < p_list->p_count; i++) {
p = &p_list->p_info[i];
-   if ((pinfo->track_id == p->track_id) &&
-   !memcmp(&pinfo->version, &p->version,
-   sizeof(struct i40e_ddp_version)) &&
-   !memcmp(&pinfo->name, &p->name,
-   I40E_DDP_NAME_SIZE)) {
+   if (pinfo->track_id == p->track_id) {
PMD_DRV_LOG(INFO, "Profile exists.");
rte_free(buff);
return 1;
@@ -1572,6 +1571,88 @@ int rte_pmd_i40e_set_vf_vlan_filter(uint8_t port, 
uint16_t vlan_id,
return 0;
 }
 
+/**
+ * i40e_rollback_profile
+ * @hw: pointer to the hardware structure
+ * @profile: pointer to the profile segment of the package to be removed
+ * @track_id: package tracking id
+ *
+ * Rolls back previously loaded package.
+ */
+static enum i40e_status_code
+i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
+  u32 track_id)
+{
+   enum i40e_status_code status = I40E_SUCCESS;
+   struct i40e_section_table *sec_tbl;
+   struct i40e_profile_section_header *sec = NULL;
+   u32 dev_cnt;
+   u32 vendor_dev_id;
+   u32 *nvm;
+   u32 section_size = 0;
+   u32 offset = 0, info = 0;
+   u32 i, n;
+
+   if (track_id == I40E_DDP_TRACKID_INVALID) {
+   PMD_DRV_LOG(ERR, "Invalid track_id");
+   return I40E_NOT_SUPPORTED;
+   }
+
+   dev_cnt = profile->device_table_count;
+
+   for (i = 0; i < dev_cnt; i++) {
+   vendor_dev_id = profile->device_table[i].vendor_dev_id;
+   if ((vendor_dev_id >> 16) == I40E_INTEL_VENDOR_ID)
+   if (hw->device_id == (vendor_dev_id & 0x))
+   break;
+   }
+   if (dev_cnt && (i == dev_cnt)) {
+   PMD_DRV_LOG(ERR, "Device doesn't support DDP");
+   return I40E_ERR_DEVICE_NOT_SUPPORTED;
+   }
+
+   nvm = (u32 *)&profile->device_table[dev_cnt];
+   sec_tbl = (struct i40e_section_table *)&nvm[nvm[0] + 1];
+
+   for (i = 0; i < sec_tbl->section_count; i++) {
+   sec = (struct i40e_profile_section_header *)((u8 *)profile +
+   sec_tbl->section_offset[i]);
+   if (sec->section.type == SECTION_TYPE_AQ) {
+   PMD_DRV_LOG(ERR, "Rollback not supported for AQ 
sections");
+   return I40E_NOT_SUPPORTED;
+   }
+   if (sec->section.type == SECTION_TYPE_MMIO) {
+   PMD_DRV_LOG(ERR, "Not a roll-back package");
+   return I40E_NOT_SUPPORTED;
+   }
+   }
+
+   for (i = 0; i < sec_tbl->section_count; i++) {
+   /* For rollback write sections in reverse */
+   n = sec_tbl->section_count - i - 1;
+   sec = (struct i40e_profile_section_header *)((u8 *)profile +
+sec_tbl->section_offset[n]);
+
+   /* Skip any non-rollback sections */
+   if (sec->section.type != SECTION_TYPE_RB_MMIO)
+   continue;
+
+   section_size = sec->section.size +
+   sizeof(struct i40e_profile_section_header);
+
+   /* Write roll-back MMIO section */
+   status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size,
+  track_id, &offset, &info, NULL);
+   if (status) {
+   PMD_DRV_LOG(ERR,
+  "Failed to write profile: section %d, offset 
%d, info %d",
+  n, offset, info);
+   break;
+   }
+   }
+   return status;
+}
+
 int
 rte_pmd_i40e_process_ddp_package(uint8_t port, uint8_t *buff,
  

Re: [dpdk-dev] [PATCH v3 1/3] lib: add Generic Receive Offload API framework

2017-05-27 Thread Ananyev, Konstantin

Hi Jiayu,

> 
> Hi Konstantin,
> 
> On Sat, May 27, 2017 at 07:12:16PM +0800, Ananyev, Konstantin wrote:
> >
> >
> > > -Original Message-
> > > From: Hu, Jiayu
> > > Sent: Saturday, May 27, 2017 4:42 AM
> > > To: Ananyev, Konstantin 
> > > Cc: dev@dpdk.org; Wiles, Keith ; 
> > > yuanhan@linux.intel.com
> > > Subject: Re: [PATCH v3 1/3] lib: add Generic Receive Offload API framework
> > >
> > > On Sat, May 27, 2017 at 07:10:21AM +0800, Ananyev, Konstantin wrote:
> > > > Hi Jiayu,
> > > >
> > > > > -Original Message-
> > > > > From: Hu, Jiayu
> > > > > Sent: Friday, May 26, 2017 8:26 AM
> > > > > To: Ananyev, Konstantin 
> > > > > Cc: dev@dpdk.org; Wiles, Keith ; 
> > > > > yuanhan@linux.intel.com
> > > > > Subject: Re: [PATCH v3 1/3] lib: add Generic Receive Offload API 
> > > > > framework
> > > > >
> > > > > Hi Konstantin,
> > > > >
> > > > > On Wed, May 24, 2017 at 08:38:25PM +0800, Ananyev, Konstantin wrote:
> > > > > >
> > > > > > Hi Jiayu,
> > > > > >
> > > > > > >
> > > > > > > Hi Konstantin,
> > > > > > >
> > > > > > > Thanks for your comments. My replies/questions are below.
> > > > > > >
> > > > > > > BRs,
> > > > > > > Jiayu
> > > > > > >
> > > > > > > On Mon, May 22, 2017 at 05:19:19PM +0800, Ananyev, Konstantin 
> > > > > > > wrote:
> > > > > > > > Hi Jiayu,
> > > > > > > > My comments/questions below.
> > > > > > > > Konstantin
> > > > > > > >
> > > > > > > > >
> > > > > > > > > For applications, DPDK GRO provides three external functions 
> > > > > > > > > to
> > > > > > > > > enable/disable GRO:
> > > > > > > > > - rte_gro_init: initialize GRO environment;
> > > > > > > > > - rte_gro_enable: enable GRO for a given port;
> > > > > > > > > - rte_gro_disable: disable GRO for a given port.
> > > > > > > > > Before using GRO, applications should explicitly call 
> > > > > > > > > rte_gro_init to
> > > > > > > > > initizalize GRO environment. After that, applications can call
> > > > > > > > > rte_gro_enable to enable GRO and call rte_gro_disable to 
> > > > > > > > > disable GRO for
> > > > > > > > > specific ports.
> > > > > > > >
> > > > > > > > I think this is too restrictive and wouldn't meet various 
> > > > > > > > user's needs.
> > > > > > > > User might want to:
> > > > > > > > - enable/disable GRO for particular RX queue
> > > > > > > > - or even setup different GRO types for different RX queues,
> > > > > > > >i.e, - GRO over IPV4/TCP for queue 0, and  GRO over IPV6/TCP 
> > > > > > > > for queue 1, etc.
> > > > > > >
> > > > > > > The reason for enabling/disabling GRO per-port instead of 
> > > > > > > per-queue is that LINUX
> > > > > > > controls GRO per-port. To control GRO per-queue indeed can 
> > > > > > > provide more flexibility
> > > > > > > to applications. But are there any scenarios that different 
> > > > > > > queues of a port may
> > > > > > > require different GRO control (i.e. GRO types and enable/disable 
> > > > > > > GRO)?
> > > > > >
> > > > > > I think yes.
> > > > > >
> > > > > > >
> > > > > > > > - For various reasons, user might prefer not to use RX 
> > > > > > > > callbacks for various reasons,
> > > > > > > >   But invoke gro() manually at somepoint in his code.
> > > > > > >
> > > > > > > An application-used GRO library can enable more flexibility to 
> > > > > > > applications. Besides,
> > > > > > > when perform GRO in ethdev layer or inside PMD drivers, it is an 
> > > > > > > issue that
> > > > > > > rte_eth_rx_burst returns actually received packet number or GROed 
> > > > > > > packet number. And
> > > > > > > the same issue happens in GSO, and even more seriously. This is 
> > > > > > > because applications
> > > > > > > , like VPP, always rely on the return value of rte_eth_tx_burst 
> > > > > > > to decide further
> > > > > > > operations. If applications can direcly call GRO/GSO libraries, 
> > > > > > > this issue won't exist.
> > > > > > > And DPDK is a library, which is not a holistic system like LINUX. 
> > > > > > > We don't need to do
> > > > > > > the same as LINUX. Therefore, maybe it's a better idea to 
> > > > > > > directly provide SW
> > > > > > > segmentation/reassembling libraries to applications.
> > > > > > >
> > > > > > > > - Many users would like to control size (number of flows/items 
> > > > > > > > per flow),
> > > > > > > >   max allowed packet size, max timeout, etc., for different GRO 
> > > > > > > > tables.
> > > > > > > > - User would need a way to flush all or only timeout packets 
> > > > > > > > from particular GRO tables.
> > > > > > > >
> > > > > > > > So I think that API needs to extended to become be much more 
> > > > > > > > fine-grained.
> > > > > > > > Something like that:
> > > > > > > >
> > > > > > > > struct rte_gro_tbl_param {
> > > > > > > >int32_t socket_id;
> > > > > > > >size_t max_flows;
> > > > > > > >size_t max_items_per_flow;
> > > > > > > >size_t max_pkt_size;
> > > > > > > >uint64_t packet_timeout_cycles;
> > > > > > > >
> > > > > > > >   
> > 

[dpdk-dev] [PATCH] net/mlx5: implement drop action in hardware classifier

2017-05-27 Thread Shachar Beiser
The current drop action is implemented as a queue tail drop,
requiring to instantiate multiple WQs to maintain high drop rate.
This commit, implements the drop action in hardware classifier.
This enables to reduce the amount of contexts needed for the drop,
without affecting the drop rate.

Signed-off-by: Shachar Beiser 
---
 drivers/net/mlx5/Makefile|  5 +
 drivers/net/mlx5/mlx5_flow.c | 43 +++
 2 files changed, 48 insertions(+)

diff --git a/drivers/net/mlx5/Makefile b/drivers/net/mlx5/Makefile
index c079959..daf8013 100644
--- a/drivers/net/mlx5/Makefile
+++ b/drivers/net/mlx5/Makefile
@@ -101,6 +101,11 @@ mlx5_autoconf.h.new: FORCE
 mlx5_autoconf.h.new: $(RTE_SDK)/buildtools/auto-config-h.sh
$Q $(RM) -f -- '$@'
$Q sh -- '$<' '$@' \
+   HAVE_VERBS_IBV_EXP_FLOW_SPEC_ACTION_DROP \
+   infiniband/verbs_exp.h \
+   enum IBV_EXP_FLOW_SPEC_ACTION_DROP \
+   $(AUTOCONF_OUTPUT)
+   $Q sh -- '$<' '$@' \
HAVE_VERBS_IBV_EXP_CQ_COMPRESSED_CQE \
infiniband/verbs_exp.h \
enum IBV_EXP_CQ_COMPRESSED_CQE \
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index adcbe3f..e243d39 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -994,6 +994,11 @@ struct mlx5_flow_action {
 {
struct rte_flow *rte_flow;
 
+#ifdef HAVE_VERBS_IBV_EXP_FLOW_SPEC_ACTION_DROP
+   struct ibv_exp_flow_spec_action_drop *drop;
+   unsigned int size = sizeof(struct ibv_exp_flow_spec_action_drop);
+#endif
+
assert(priv->pd);
assert(priv->ctx);
rte_flow = rte_calloc(__func__, 1, sizeof(*rte_flow), 0);
@@ -1007,6 +1012,15 @@ struct mlx5_flow_action {
rte_flow->qp = priv->flow_drop_queue->qp;
if (!priv->started)
return rte_flow;
+#ifdef HAVE_VERBS_IBV_EXP_FLOW_SPEC_ACTION_DROP
+   drop = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
+   *drop = (struct ibv_exp_flow_spec_action_drop){
+   .type = IBV_EXP_FLOW_SPEC_ACTION_DROP,
+   .size = size,
+   };
+   ++flow->ibv_attr->num_of_specs;
+   flow->offset += sizeof(struct ibv_exp_flow_spec_action_drop);
+#endif
rte_flow->ibv_flow = ibv_exp_create_flow(rte_flow->qp,
 rte_flow->ibv_attr);
if (!rte_flow->ibv_flow) {
@@ -1370,7 +1384,9 @@ struct rte_flow *
 priv_flow_create_drop_queue(struct priv *priv)
 {
struct rte_flow_drop *fdq = NULL;
+#ifndef HAVE_VERBS_IBV_EXP_FLOW_SPEC_ACTION_DROP
unsigned int i;
+#endif
 
assert(priv->pd);
assert(priv->ctx);
@@ -1387,6 +1403,7 @@ struct rte_flow *
WARN("cannot allocate CQ for drop queue");
goto error;
}
+#ifndef HAVE_VERBS_IBV_EXP_FLOW_SPEC_ACTION_DROP
for (i = 0; i != MLX5_DROP_WQ_N; ++i) {
fdq->wqs[i] = ibv_exp_create_wq(priv->ctx,
&(struct ibv_exp_wq_init_attr){
@@ -1401,6 +1418,20 @@ struct rte_flow *
goto error;
}
}
+#else
+   fdq->wqs[0] = ibv_exp_create_wq(priv->ctx,
+   &(struct ibv_exp_wq_init_attr){
+   .wq_type = IBV_EXP_WQT_RQ,
+   .max_recv_wr = 1,
+   .max_recv_sge = 1,
+   .pd = priv->pd,
+   .cq = fdq->cq,
+   });
+   if (!fdq->wqs[0]) {
+   WARN("cannot allocate WQ for drop queue");
+   goto error;
+   }
+#endif
fdq->ind_table = ibv_exp_create_rwq_ind_table(priv->ctx,
&(struct ibv_exp_rwq_ind_table_init_attr){
.pd = priv->pd,
@@ -1441,10 +1472,15 @@ struct rte_flow *
claim_zero(ibv_destroy_qp(fdq->qp));
if (fdq->ind_table)
claim_zero(ibv_exp_destroy_rwq_ind_table(fdq->ind_table));
+#ifndef HAVE_VERBS_IBV_EXP_FLOW_SPEC_ACTION_DROP
for (i = 0; i != MLX5_DROP_WQ_N; ++i) {
if (fdq->wqs[i])
claim_zero(ibv_exp_destroy_wq(fdq->wqs[i]));
}
+#else
+   if (fdq->wqs[0])
+   claim_zero(ibv_exp_destroy_wq(fdq->wqs[0]));
+#endif
if (fdq->cq)
claim_zero(ibv_destroy_cq(fdq->cq));
if (fdq)
@@ -1463,7 +1499,9 @@ struct rte_flow *
 priv_flow_delete_drop_queue(struct priv *priv)
 {
struct rte_flow_drop *fdq = priv->flow_drop_queue;
+#ifndef HAVE_VERBS_IBV_EXP_FLOW_SPEC_ACTION_DROP
unsigned int i;
+#endif
 
if (!fdq)
return;
@@ -1471,10 +1509,15 @@ struct rte_flow *
claim_zero(ibv_destroy_qp(fdq->qp));
if (fdq->ind_table)
claim_zero(ibv_exp_destroy_rwq_ind_table(fdq->ind_table));
+#ifndef HAVE_VERBS_IBV_EXP_FLOW_SPEC_ACTION_DROP
for (i

[dpdk-dev] [PATCH] net/mlx5: implement drop action in hardware classifier

2017-05-27 Thread Shachar Beiser
The current drop action is implemented as a queue tail drop,
requiring to instantiate multiple WQs to maintain high drop rate.
This commit, implements the drop action in hardware classifier.
This enables to reduce the amount of contexts needed for the drop,
without affecting the drop rate.

Signed-off-by: Shachar Beiser 
---
 drivers/net/mlx5/Makefile|  5 +
 drivers/net/mlx5/mlx5_flow.c | 43 +++
 2 files changed, 48 insertions(+)

diff --git a/drivers/net/mlx5/Makefile b/drivers/net/mlx5/Makefile
index c079959..daf8013 100644
--- a/drivers/net/mlx5/Makefile
+++ b/drivers/net/mlx5/Makefile
@@ -101,6 +101,11 @@ mlx5_autoconf.h.new: FORCE
 mlx5_autoconf.h.new: $(RTE_SDK)/buildtools/auto-config-h.sh
$Q $(RM) -f -- '$@'
$Q sh -- '$<' '$@' \
+   HAVE_VERBS_IBV_EXP_FLOW_SPEC_ACTION_DROP \
+   infiniband/verbs_exp.h \
+   enum IBV_EXP_FLOW_SPEC_ACTION_DROP \
+   $(AUTOCONF_OUTPUT)
+   $Q sh -- '$<' '$@' \
HAVE_VERBS_IBV_EXP_CQ_COMPRESSED_CQE \
infiniband/verbs_exp.h \
enum IBV_EXP_CQ_COMPRESSED_CQE \
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index adcbe3f..e243d39 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -994,6 +994,11 @@ struct mlx5_flow_action {
 {
struct rte_flow *rte_flow;
 
+#ifdef HAVE_VERBS_IBV_EXP_FLOW_SPEC_ACTION_DROP
+   struct ibv_exp_flow_spec_action_drop *drop;
+   unsigned int size = sizeof(struct ibv_exp_flow_spec_action_drop);
+#endif
+
assert(priv->pd);
assert(priv->ctx);
rte_flow = rte_calloc(__func__, 1, sizeof(*rte_flow), 0);
@@ -1007,6 +1012,15 @@ struct mlx5_flow_action {
rte_flow->qp = priv->flow_drop_queue->qp;
if (!priv->started)
return rte_flow;
+#ifdef HAVE_VERBS_IBV_EXP_FLOW_SPEC_ACTION_DROP
+   drop = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
+   *drop = (struct ibv_exp_flow_spec_action_drop){
+   .type = IBV_EXP_FLOW_SPEC_ACTION_DROP,
+   .size = size,
+   };
+   ++flow->ibv_attr->num_of_specs;
+   flow->offset += sizeof(struct ibv_exp_flow_spec_action_drop);
+#endif
rte_flow->ibv_flow = ibv_exp_create_flow(rte_flow->qp,
 rte_flow->ibv_attr);
if (!rte_flow->ibv_flow) {
@@ -1370,7 +1384,9 @@ struct rte_flow *
 priv_flow_create_drop_queue(struct priv *priv)
 {
struct rte_flow_drop *fdq = NULL;
+#ifndef HAVE_VERBS_IBV_EXP_FLOW_SPEC_ACTION_DROP
unsigned int i;
+#endif
 
assert(priv->pd);
assert(priv->ctx);
@@ -1387,6 +1403,7 @@ struct rte_flow *
WARN("cannot allocate CQ for drop queue");
goto error;
}
+#ifndef HAVE_VERBS_IBV_EXP_FLOW_SPEC_ACTION_DROP
for (i = 0; i != MLX5_DROP_WQ_N; ++i) {
fdq->wqs[i] = ibv_exp_create_wq(priv->ctx,
&(struct ibv_exp_wq_init_attr){
@@ -1401,6 +1418,20 @@ struct rte_flow *
goto error;
}
}
+#else
+   fdq->wqs[0] = ibv_exp_create_wq(priv->ctx,
+   &(struct ibv_exp_wq_init_attr){
+   .wq_type = IBV_EXP_WQT_RQ,
+   .max_recv_wr = 1,
+   .max_recv_sge = 1,
+   .pd = priv->pd,
+   .cq = fdq->cq,
+   });
+   if (!fdq->wqs[0]) {
+   WARN("cannot allocate WQ for drop queue");
+   goto error;
+   }
+#endif
fdq->ind_table = ibv_exp_create_rwq_ind_table(priv->ctx,
&(struct ibv_exp_rwq_ind_table_init_attr){
.pd = priv->pd,
@@ -1441,10 +1472,15 @@ struct rte_flow *
claim_zero(ibv_destroy_qp(fdq->qp));
if (fdq->ind_table)
claim_zero(ibv_exp_destroy_rwq_ind_table(fdq->ind_table));
+#ifndef HAVE_VERBS_IBV_EXP_FLOW_SPEC_ACTION_DROP
for (i = 0; i != MLX5_DROP_WQ_N; ++i) {
if (fdq->wqs[i])
claim_zero(ibv_exp_destroy_wq(fdq->wqs[i]));
}
+#else
+   if (fdq->wqs[0])
+   claim_zero(ibv_exp_destroy_wq(fdq->wqs[0]));
+#endif
if (fdq->cq)
claim_zero(ibv_destroy_cq(fdq->cq));
if (fdq)
@@ -1463,7 +1499,9 @@ struct rte_flow *
 priv_flow_delete_drop_queue(struct priv *priv)
 {
struct rte_flow_drop *fdq = priv->flow_drop_queue;
+#ifndef HAVE_VERBS_IBV_EXP_FLOW_SPEC_ACTION_DROP
unsigned int i;
+#endif
 
if (!fdq)
return;
@@ -1471,10 +1509,15 @@ struct rte_flow *
claim_zero(ibv_destroy_qp(fdq->qp));
if (fdq->ind_table)
claim_zero(ibv_exp_destroy_rwq_ind_table(fdq->ind_table));
+#ifndef HAVE_VERBS_IBV_EXP_FLOW_SPEC_ACTION_DROP
for (i