[dpdk-dev] [PATCH] net/ice: fix core dumped issue in switch filter

2020-04-29 Thread Junyu Jiang
The number of queues in queue group should be checked before
using it to avoid NULL pointer. This patch fixed the issue.

Fixes: 47d460d63233 ("net/ice: rework switch filter")
Cc: sta...@dpdk.org

Signed-off-by: Junyu Jiang 
---
 drivers/net/ice/ice_switch_filter.c | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/drivers/net/ice/ice_switch_filter.c 
b/drivers/net/ice/ice_switch_filter.c
index 179430136..c2762e331 100644
--- a/drivers/net/ice/ice_switch_filter.c
+++ b/drivers/net/ice/ice_switch_filter.c
@@ -1296,6 +1296,8 @@ ice_switch_parse_action(struct ice_pf *pf,
switch (action_type) {
case RTE_FLOW_ACTION_TYPE_RSS:
act_qgrop = action->conf;
+   if (act_qgrop->queue_num <= 1)
+   goto error;
rule_info->sw_act.fltr_act =
ICE_FWD_TO_QGRP;
rule_info->sw_act.fwd_id.q_id =
-- 
2.17.1



[dpdk-dev] [PATCH v2] net/ice: fix core dumped issue in switch filter

2020-05-05 Thread Junyu Jiang
The number of queues in queue group should be checked before
using it. This patch fixed the issue.

Fixes: 47d460d63233 ("net/ice: rework switch filter")
Cc: sta...@dpdk.org

Signed-off-by: Junyu Jiang 
Tested-by: Qimai Xiao 
---
 drivers/net/ice/ice_switch_filter.c | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/drivers/net/ice/ice_switch_filter.c 
b/drivers/net/ice/ice_switch_filter.c
index 179430136..c2762e331 100644
--- a/drivers/net/ice/ice_switch_filter.c
+++ b/drivers/net/ice/ice_switch_filter.c
@@ -1296,6 +1296,8 @@ ice_switch_parse_action(struct ice_pf *pf,
switch (action_type) {
case RTE_FLOW_ACTION_TYPE_RSS:
act_qgrop = action->conf;
+   if (act_qgrop->queue_num <= 1)
+   goto error;
rule_info->sw_act.fltr_act =
ICE_FWD_TO_QGRP;
rule_info->sw_act.fwd_id.q_id =
-- 
2.17.1



[dpdk-dev] [PATCH v3] net/ice: fix RSS advanced rule invalid issue

2020-03-31 Thread Junyu Jiang
This patch moved the ice_init_rss into ice_dev_configure to fix RSS
advanced rule invalid after running port stop and port start.

Fixes: 5ad3db8d4bdd ("net/ice: enable advanced RSS")
Cc: sta...@dpdk.org

Signed-off-by: Junyu Jiang 
---
 drivers/net/ice/ice_ethdev.c | 13 +++--
 1 file changed, 7 insertions(+), 6 deletions(-)

diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index 85ef83e92..1d94d2a41 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -155,6 +155,7 @@ static int ice_dev_udp_tunnel_port_add(struct rte_eth_dev 
*dev,
struct rte_eth_udp_tunnel *udp_tunnel);
 static int ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
struct rte_eth_udp_tunnel *udp_tunnel);
+static int ice_init_rss(struct ice_pf *pf);
 
 static const struct rte_pci_id pci_id_ice_map[] = {
{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_BACKPLANE) },
@@ -2451,6 +2452,8 @@ ice_dev_configure(struct rte_eth_dev *dev)
 {
struct ice_adapter *ad =
ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+   struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+   int ret;
 
/* Initialize to TRUE. If any of Rx queues doesn't meet the
 * bulk allocation or vector Rx preconditions we will reset it.
@@ -2461,6 +2464,10 @@ ice_dev_configure(struct rte_eth_dev *dev)
if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
 
+   ret = ice_init_rss(pf);
+   if (ret)
+   PMD_DRV_LOG(ERR, "Failed to enable rss for PF");
+
return 0;
 }
 
@@ -2797,12 +2804,6 @@ ice_dev_start(struct rte_eth_dev *dev)
}
}
 
-   ret = ice_init_rss(pf);
-   if (ret) {
-   PMD_DRV_LOG(ERR, "Failed to enable rss for PF");
-   goto rx_err;
-   }
-
ice_set_rx_function(dev);
ice_set_tx_function(dev);
 
-- 
2.17.1



[dpdk-dev] [PATCH v4] net/ice: fix RSS advanced rule invalid issue

2020-04-01 Thread Junyu Jiang
This patch moved the RSS initialization from dev start to dev configure
to fix RSS advanced rule invalid after running port stop and port start.

Fixes: 5ad3db8d4bdd ("net/ice: enable advanced RSS")
Cc: sta...@dpdk.org

Signed-off-by: Junyu Jiang 
---
 drivers/net/ice/ice_ethdev.c | 48 ++--
 1 file changed, 24 insertions(+), 24 deletions(-)

diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index 85ef83e92..4cfdbd838 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -2446,24 +2446,6 @@ ice_dev_uninit(struct rte_eth_dev *dev)
return 0;
 }
 
-static int
-ice_dev_configure(struct rte_eth_dev *dev)
-{
-   struct ice_adapter *ad =
-   ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
-
-   /* Initialize to TRUE. If any of Rx queues doesn't meet the
-* bulk allocation or vector Rx preconditions we will reset it.
-*/
-   ad->rx_bulk_alloc_allowed = true;
-   ad->tx_simple_allowed = true;
-
-   if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-   dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
-
-   return 0;
-}
-
 static int ice_init_rss(struct ice_pf *pf)
 {
struct ice_hw *hw = ICE_PF_TO_HW(pf);
@@ -2594,6 +2576,30 @@ static int ice_init_rss(struct ice_pf *pf)
return 0;
 }
 
+static int
+ice_dev_configure(struct rte_eth_dev *dev)
+{
+   struct ice_adapter *ad =
+   ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+   struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+   int ret;
+
+   /* Initialize to TRUE. If any of Rx queues doesn't meet the
+* bulk allocation or vector Rx preconditions we will reset it.
+*/
+   ad->rx_bulk_alloc_allowed = true;
+   ad->tx_simple_allowed = true;
+
+   if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
+   dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+
+   ret = ice_init_rss(pf);
+   if (ret)
+   PMD_DRV_LOG(ERR, "Failed to enable rss for PF");
+
+   return ret;
+}
+
 static void
 __vsi_queues_bind_intr(struct ice_vsi *vsi, uint16_t msix_vect,
   int base_queue, int nb_queue)
@@ -2797,12 +2803,6 @@ ice_dev_start(struct rte_eth_dev *dev)
}
}
 
-   ret = ice_init_rss(pf);
-   if (ret) {
-   PMD_DRV_LOG(ERR, "Failed to enable rss for PF");
-   goto rx_err;
-   }
-
ice_set_rx_function(dev);
ice_set_tx_function(dev);
 
-- 
2.17.1



[dpdk-dev] [PATCH v5] net/ice: fix RSS advanced rule invalid issue

2020-04-06 Thread Junyu Jiang
This patch moved the RSS initialization from dev start to dev configure
to fix RSS advanced rule invalid after running port stop and port start.

Fixes: 5ad3db8d4bdd ("net/ice: enable advanced RSS")
Cc: sta...@dpdk.org

Signed-off-by: Junyu Jiang 
---
 drivers/net/ice/ice_ethdev.c | 50 +++-
 1 file changed, 26 insertions(+), 24 deletions(-)

diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index 85ef83e92..90a91c9c1 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -2446,24 +2446,6 @@ ice_dev_uninit(struct rte_eth_dev *dev)
return 0;
 }
 
-static int
-ice_dev_configure(struct rte_eth_dev *dev)
-{
-   struct ice_adapter *ad =
-   ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
-
-   /* Initialize to TRUE. If any of Rx queues doesn't meet the
-* bulk allocation or vector Rx preconditions we will reset it.
-*/
-   ad->rx_bulk_alloc_allowed = true;
-   ad->tx_simple_allowed = true;
-
-   if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-   dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
-
-   return 0;
-}
-
 static int ice_init_rss(struct ice_pf *pf)
 {
struct ice_hw *hw = ICE_PF_TO_HW(pf);
@@ -2594,6 +2576,32 @@ static int ice_init_rss(struct ice_pf *pf)
return 0;
 }
 
+static int
+ice_dev_configure(struct rte_eth_dev *dev)
+{
+   struct ice_adapter *ad =
+   ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+   struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+   int ret;
+
+   /* Initialize to TRUE. If any of Rx queues doesn't meet the
+* bulk allocation or vector Rx preconditions we will reset it.
+*/
+   ad->rx_bulk_alloc_allowed = true;
+   ad->tx_simple_allowed = true;
+
+   if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
+   dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+
+   ret = ice_init_rss(pf);
+   if (ret) {
+   PMD_DRV_LOG(ERR, "Failed to enable rss for PF");
+   return ret;
+   }
+
+   return 0;
+}
+
 static void
 __vsi_queues_bind_intr(struct ice_vsi *vsi, uint16_t msix_vect,
   int base_queue, int nb_queue)
@@ -2797,12 +2805,6 @@ ice_dev_start(struct rte_eth_dev *dev)
}
}
 
-   ret = ice_init_rss(pf);
-   if (ret) {
-   PMD_DRV_LOG(ERR, "Failed to enable rss for PF");
-   goto rx_err;
-   }
-
ice_set_rx_function(dev);
ice_set_tx_function(dev);
 
-- 
2.17.1



[dpdk-dev] [PATCH] examples/vmdq: fix the output of pools/queues

2020-02-26 Thread Junyu Jiang
To match the pools/queues configuration, the pools/queues output
should start from VMDQ base queue. This patch fixed the issue.

Fixes: 6bb97df521aa ("examples/vmdq: new app")
Cc: sta...@dpdk.org

Signed-off-by: Junyu Jiang 
---
 examples/vmdq/main.c | 9 +
 1 file changed, 5 insertions(+), 4 deletions(-)

diff --git a/examples/vmdq/main.c b/examples/vmdq/main.c
index 00920..592f9bc62 100644
--- a/examples/vmdq/main.c
+++ b/examples/vmdq/main.c
@@ -441,10 +441,11 @@ update_mac_address(struct rte_mbuf *m, unsigned dst_port)
 static void
 sighup_handler(int signum)
 {
-   unsigned q;
-   for (q = 0; q < num_queues; q++) {
-   if (q % (num_queues/num_pools) == 0)
-   printf("\nPool %u: ", q/(num_queues/num_pools));
+   unsigned q = vmdq_queue_base;;
+   for (; q < num_queues; q++) {
+   if (q % (num_vmdq_queues / num_pools) == 0)
+   printf("\nPool %u: ", (q - vmdq_queue_base) /
+  (num_vmdq_queues / num_pools));
printf("%lu ", rxPackets[q]);
}
printf("\nFinished handling signal %d\n", signum);
-- 
2.17.1



[dpdk-dev] [PATCH v2] examples/vmdq: fix the output of pools/queues

2020-02-26 Thread Junyu Jiang
To match the pools/queues configuration, the pools/queues output
should start from VMDQ base queue. This patch fixed the issue.

Fixes: 6bb97df521aa ("examples/vmdq: new app")
Cc: sta...@dpdk.org

Signed-off-by: Junyu Jiang 
---
 examples/vmdq/main.c | 9 +
 1 file changed, 5 insertions(+), 4 deletions(-)

diff --git a/examples/vmdq/main.c b/examples/vmdq/main.c
index 00920..dcba3a708 100644
--- a/examples/vmdq/main.c
+++ b/examples/vmdq/main.c
@@ -441,10 +441,11 @@ update_mac_address(struct rte_mbuf *m, unsigned dst_port)
 static void
 sighup_handler(int signum)
 {
-   unsigned q;
-   for (q = 0; q < num_queues; q++) {
-   if (q % (num_queues/num_pools) == 0)
-   printf("\nPool %u: ", q/(num_queues/num_pools));
+   unsigned int q = vmdq_queue_base;
+   for (; q < num_queues; q++) {
+   if (q % (num_vmdq_queues / num_pools) == 0)
+   printf("\nPool %u: ", (q - vmdq_queue_base) /
+  (num_vmdq_queues / num_pools));
printf("%lu ", rxPackets[q]);
}
printf("\nFinished handling signal %d\n", signum);
-- 
2.17.1



[dpdk-dev] [PATCH v3] examples/vmdq: fix the output of pools/queues

2020-03-01 Thread Junyu Jiang
To match the pools/queues configuration, the pools/queues output
should start from VMDQ base queue. This patch fixed the issue.

Fixes: 6bb97df521aa ("examples/vmdq: new app")
Cc: sta...@dpdk.org

Signed-off-by: Junyu Jiang 
---
 examples/vmdq/main.c | 9 +
 1 file changed, 5 insertions(+), 4 deletions(-)

diff --git a/examples/vmdq/main.c b/examples/vmdq/main.c
index 00920..5ab3427c0 100644
--- a/examples/vmdq/main.c
+++ b/examples/vmdq/main.c
@@ -441,10 +441,11 @@ update_mac_address(struct rte_mbuf *m, unsigned dst_port)
 static void
 sighup_handler(int signum)
 {
-   unsigned q;
-   for (q = 0; q < num_queues; q++) {
-   if (q % (num_queues/num_pools) == 0)
-   printf("\nPool %u: ", q/(num_queues/num_pools));
+   unsigned int q = vmdq_queue_base;
+   for (; q < num_queues; q++) {
+   if ((q - vmdq_queue_base) % (num_vmdq_queues / num_pools) == 0)
+   printf("\nPool %u: ", (q - vmdq_queue_base) /
+  (num_vmdq_queues / num_pools));
printf("%lu ", rxPackets[q]);
}
printf("\nFinished handling signal %d\n", signum);
-- 
2.17.1



[dpdk-dev] [PATCH] examples/vmdq: fix RSS configuration

2020-03-03 Thread Junyu Jiang
In order that all queues of pools can receive packets,
add enable-rss argument to change rss configuration.

Fixes: 6bb97df521aa ("examples/vmdq: new app")
Cc: sta...@dpdk.org

Signed-off-by: Junyu Jiang 
---
 examples/vmdq/main.c | 39 ++-
 1 file changed, 34 insertions(+), 5 deletions(-)

diff --git a/examples/vmdq/main.c b/examples/vmdq/main.c
index 00920..98032e6a3 100644
--- a/examples/vmdq/main.c
+++ b/examples/vmdq/main.c
@@ -59,6 +59,7 @@ static uint32_t enabled_port_mask;
 /* number of pools (if user does not specify any, 8 by default */
 static uint32_t num_queues = 8;
 static uint32_t num_pools = 8;
+static uint8_t rss_enable;
 
 /* empty vmdq configuration structure. Filled in programatically */
 static const struct rte_eth_conf vmdq_conf_default = {
@@ -143,6 +144,13 @@ get_eth_conf(struct rte_eth_conf *eth_conf, uint32_t 
num_pools)
(void)(rte_memcpy(eth_conf, &vmdq_conf_default, sizeof(*eth_conf)));
(void)(rte_memcpy(ð_conf->rx_adv_conf.vmdq_rx_conf, &conf,
   sizeof(eth_conf->rx_adv_conf.vmdq_rx_conf)));
+   if (rss_enable) {
+   eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
+   eth_conf->rx_adv_conf.rss_conf.rss_hf = ETH_RSS_IP |
+   ETH_RSS_UDP |
+   ETH_RSS_TCP |
+   ETH_RSS_SCTP;
+   }
return 0;
 }
 
@@ -164,6 +172,7 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool)
uint16_t q;
uint16_t queues_per_pool;
uint32_t max_nb_pools;
+   uint64_t rss_hf_tmp;
 
/*
 * The max pool number from dev_info will be used to validate the pool
@@ -209,6 +218,17 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool)
if (!rte_eth_dev_is_valid_port(port))
return -1;
 
+   rss_hf_tmp = port_conf.rx_adv_conf.rss_conf.rss_hf;
+   port_conf.rx_adv_conf.rss_conf.rss_hf &=
+   dev_info.flow_type_rss_offloads;
+   if (port_conf.rx_adv_conf.rss_conf.rss_hf != rss_hf_tmp) {
+   printf("Port %u modified RSS hash function based on hardware 
support,"
+   "requested:%#"PRIx64" configured:%#"PRIx64"\n",
+   port,
+   rss_hf_tmp,
+   port_conf.rx_adv_conf.rss_conf.rss_hf);
+   }
+
/*
 * Though in this example, we only receive packets from the first queue
 * of each pool and send packets through first rte_lcore_count() tx
@@ -363,7 +383,8 @@ static void
 vmdq_usage(const char *prgname)
 {
printf("%s [EAL options] -- -p PORTMASK]\n"
-   "  --nb-pools NP: number of pools\n",
+   "  --nb-pools NP: number of pools\n"
+   "  --enable-rss: enable RSS (disabled by default)\n",
   prgname);
 }
 
@@ -377,6 +398,7 @@ vmdq_parse_args(int argc, char **argv)
const char *prgname = argv[0];
static struct option long_option[] = {
{"nb-pools", required_argument, NULL, 0},
+   {"enable-rss", 0, NULL, 0},
{NULL, 0, 0, 0}
};
 
@@ -394,11 +416,18 @@ vmdq_parse_args(int argc, char **argv)
}
break;
case 0:
-   if (vmdq_parse_num_pools(optarg) == -1) {
-   printf("invalid number of pools\n");
-   vmdq_usage(prgname);
-   return -1;
+   if (!strcmp(long_option[option_index].name,
+   "nb-pools")) {
+   if (vmdq_parse_num_pools(optarg) == -1) {
+   printf("invalid number of pools\n");
+   vmdq_usage(prgname);
+   return -1;
+   }
}
+
+   if (!strcmp(long_option[option_index].name,
+   "enable-rss"))
+   rss_enable = 1;
break;
 
default:
-- 
2.17.1



[dpdk-dev] [PATCH] net/ice: support based RSS configure

2020-06-09 Thread Junyu Jiang
Enable/disable RSS for corresponding flow
base on the user's requirement.

Signed-off-by: Junyu Jiang 
---
 drivers/net/ice/ice_ethdev.c | 181 ++-
 1 file changed, 115 insertions(+), 66 deletions(-)

diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index d5110c439..ee88cb49e 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -2436,6 +2436,103 @@ ice_dev_uninit(struct rte_eth_dev *dev)
return 0;
 }
 
+static void
+ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
+{
+   struct ice_hw *hw = ICE_PF_TO_HW(pf);
+   struct ice_vsi *vsi = pf->main_vsi;
+   int ret;
+
+   /**
+* configure RSS for IPv4 with input set IPv4 src/dst
+* configure RSS for IPv6 with input set IPv6 src/dst
+*/
+   if (rss_hf & ETH_RSS_IP) {
+   ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV4,
+ ICE_FLOW_SEG_HDR_IPV4, 0);
+   if (ret)
+   PMD_DRV_LOG(ERR, "%s IPV4 rss flow fail %d",
+   __func__, ret);
+
+   ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV6,
+ ICE_FLOW_SEG_HDR_IPV6, 0);
+   if (ret)
+   PMD_DRV_LOG(ERR, "%s IPV6 rss flow fail %d",
+   __func__, ret);
+   }
+   /**
+*configure RSS for udp6 with input set IPv6 src/dst, UDP src/dst
+*configure RSS for udp4 with input set IP src/dst, UDP src/dst
+*/
+   if (rss_hf & ETH_RSS_UDP) {
+   ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_UDP_IPV6,
+ ICE_FLOW_SEG_HDR_UDP |
+ ICE_FLOW_SEG_HDR_IPV6, 0);
+   if (ret)
+   PMD_DRV_LOG(ERR, "%s UDP_IPV6 rss flow fail %d",
+   __func__, ret);
+
+   ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_UDP_IPV4,
+ ICE_FLOW_SEG_HDR_UDP |
+ ICE_FLOW_SEG_HDR_IPV4, 0);
+   if (ret)
+   PMD_DRV_LOG(ERR, "%s UDP_IPV4 rss flow fail %d",
+   __func__, ret);
+   }
+   /**
+* configure RSS for tcp6 with input set IPv6 src/dst, TCP src/dst
+* configure RSS for tcp4 with input set IP src/dst, TCP src/dst
+*/
+   if (rss_hf & ETH_RSS_TCP) {
+   ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_TCP_IPV6,
+ ICE_FLOW_SEG_HDR_TCP |
+ ICE_FLOW_SEG_HDR_IPV6, 0);
+   if (ret)
+   PMD_DRV_LOG(ERR, "%s TCP_IPV6 rss flow fail %d",
+   __func__, ret);
+   ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_TCP_IPV4,
+ ICE_FLOW_SEG_HDR_TCP |
+ ICE_FLOW_SEG_HDR_IPV4, 0);
+   if (ret)
+   PMD_DRV_LOG(ERR, "%s TCP_IPV4 rss flow fail %d",
+   __func__, ret);
+   }
+   /**
+* configure RSS for sctp6 with input set IPv6 src/dst
+* configure RSS for sctp4 with input set IP src/dst
+*/
+   if (rss_hf & ETH_RSS_SCTP) {
+   ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV6,
+ ICE_FLOW_SEG_HDR_SCTP |
+ ICE_FLOW_SEG_HDR_IPV6, 0);
+   if (ret)
+   PMD_DRV_LOG(ERR, "%s SCTP_IPV6 rss flow fail %d",
+   __func__, ret);
+   ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV4,
+ ICE_FLOW_SEG_HDR_SCTP |
+ ICE_FLOW_SEG_HDR_IPV4, 0);
+   if (ret)
+   PMD_DRV_LOG(ERR, "%s SCTP_IPV4 rss flow fail %d",
+   __func__, ret);
+   }
+
+   /* configure RSS for gtpu with input set TEID */
+   ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_GTP_U_IPV4_TEID,
+ ICE_FLOW_SEG_HDR_GTPU_IP, 0);
+   if (ret)
+   PMD_DRV_LOG(ERR, "%s GTPU_TEID rss flow fail %d",
+   __func__, ret);
+   /**
+* configure RSS for pppoe/pppod with input set
+* Source MAC and Session ID
+*/
+   ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_PPPOE_SESS_ID_ETH,
+ ICE_FLOW_SEG_HDR_PPPOE, 0);
+   if (ret)
+   PMD_DRV_LOG(ERR, "%s PPPoE/PPPoD_SessionID rss flow fail %d",
+ 

[dpdk-dev] [PATCH] net/iavf: fix RSS RETA settings invalid

2020-06-19 Thread Junyu Jiang
This patch moved the RSS initialization from dev start to
dev configure, to fix the issue that RSS redirection table
can not be kept after restarting port.

Fixes: 69dd4c3d0898 ("net/avf: enable queue and device")
Cc: sta...@dpdk.org

Signed-off-by: Junyu Jiang 
---
 drivers/net/iavf/iavf_ethdev.c | 71 +-
 1 file changed, 35 insertions(+), 36 deletions(-)

diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index 2b1066b0a..5e79a2d03 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -136,34 +136,6 @@ static const struct eth_dev_ops iavf_eth_dev_ops = {
.filter_ctrl= iavf_dev_filter_ctrl,
 };
 
-static int
-iavf_dev_configure(struct rte_eth_dev *dev)
-{
-   struct iavf_adapter *ad =
-   IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
-   struct iavf_info *vf =  IAVF_DEV_PRIVATE_TO_VF(ad);
-   struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
-
-   ad->rx_bulk_alloc_allowed = true;
-   /* Initialize to TRUE. If any of Rx queues doesn't meet the
-* vector Rx/Tx preconditions, it will be reset.
-*/
-   ad->rx_vec_allowed = true;
-   ad->tx_vec_allowed = true;
-
-   if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-   dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
-
-   /* Vlan stripping setting */
-   if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN) {
-   if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
-   iavf_enable_vlan_strip(ad);
-   else
-   iavf_disable_vlan_strip(ad);
-   }
-   return 0;
-}
-
 static int
 iavf_init_rss(struct iavf_adapter *adapter)
 {
@@ -220,6 +192,41 @@ iavf_init_rss(struct iavf_adapter *adapter)
return 0;
 }
 
+static int
+iavf_dev_configure(struct rte_eth_dev *dev)
+{
+   struct iavf_adapter *ad =
+   IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+   struct iavf_info *vf =  IAVF_DEV_PRIVATE_TO_VF(ad);
+   struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
+
+   ad->rx_bulk_alloc_allowed = true;
+   /* Initialize to TRUE. If any of Rx queues doesn't meet the
+* vector Rx/Tx preconditions, it will be reset.
+*/
+   ad->rx_vec_allowed = true;
+   ad->tx_vec_allowed = true;
+
+   if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
+   dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+
+   /* Vlan stripping setting */
+   if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN) {
+   if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+   iavf_enable_vlan_strip(ad);
+   else
+   iavf_disable_vlan_strip(ad);
+   }
+
+   if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
+   if (iavf_init_rss(ad) != 0) {
+   PMD_DRV_LOG(ERR, "configure rss failed");
+   return -1;
+   }
+   }
+   return 0;
+}
+
 static int
 iavf_init_rxq(struct rte_eth_dev *dev, struct iavf_rx_queue *rxq)
 {
@@ -440,13 +447,6 @@ iavf_dev_start(struct rte_eth_dev *dev)
return -1;
}
 
-   if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
-   if (iavf_init_rss(adapter) != 0) {
-   PMD_DRV_LOG(ERR, "configure rss failed");
-   goto err_rss;
-   }
-   }
-
if (iavf_configure_queues(adapter) != 0) {
PMD_DRV_LOG(ERR, "configure queues failed");
goto err_queue;
@@ -475,7 +475,6 @@ iavf_dev_start(struct rte_eth_dev *dev)
 err_mac:
iavf_add_del_all_mac_addr(adapter, false);
 err_queue:
-err_rss:
return -1;
 }
 
-- 
2.17.1



[dpdk-dev] [PATCH v2] net/ice: support based RSS configure

2020-06-21 Thread Junyu Jiang
Enable/disable RSS for corresponding flow
base on the user's requirement.

Signed-off-by: Junyu Jiang 

---
v1->v2:
remove gtpu and pppoe/pppod configuration from rss init
---
 drivers/net/ice/ice_ethdev.c | 162 +--
 1 file changed, 96 insertions(+), 66 deletions(-)

diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index 5a89a1955..cbe59a40e 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -2441,6 +2441,87 @@ ice_dev_uninit(struct rte_eth_dev *dev)
return 0;
 }
 
+static void
+ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
+{
+   struct ice_hw *hw = ICE_PF_TO_HW(pf);
+   struct ice_vsi *vsi = pf->main_vsi;
+   int ret;
+
+   /**
+* configure RSS for IPv4 with input set IPv4 src/dst
+* configure RSS for IPv6 with input set IPv6 src/dst
+*/
+   if (rss_hf & ETH_RSS_IP) {
+   ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV4,
+ ICE_FLOW_SEG_HDR_IPV4, 0);
+   if (ret)
+   PMD_DRV_LOG(ERR, "%s IPV4 rss flow fail %d",
+   __func__, ret);
+
+   ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV6,
+ ICE_FLOW_SEG_HDR_IPV6, 0);
+   if (ret)
+   PMD_DRV_LOG(ERR, "%s IPV6 rss flow fail %d",
+   __func__, ret);
+   }
+   /**
+*configure RSS for udp6 with input set IPv6 src/dst, UDP src/dst
+*configure RSS for udp4 with input set IP src/dst, UDP src/dst
+*/
+   if (rss_hf & ETH_RSS_UDP) {
+   ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_UDP_IPV6,
+ ICE_FLOW_SEG_HDR_UDP |
+ ICE_FLOW_SEG_HDR_IPV6, 0);
+   if (ret)
+   PMD_DRV_LOG(ERR, "%s UDP_IPV6 rss flow fail %d",
+   __func__, ret);
+
+   ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_UDP_IPV4,
+ ICE_FLOW_SEG_HDR_UDP |
+ ICE_FLOW_SEG_HDR_IPV4, 0);
+   if (ret)
+   PMD_DRV_LOG(ERR, "%s UDP_IPV4 rss flow fail %d",
+   __func__, ret);
+   }
+   /**
+* configure RSS for tcp6 with input set IPv6 src/dst, TCP src/dst
+* configure RSS for tcp4 with input set IP src/dst, TCP src/dst
+*/
+   if (rss_hf & ETH_RSS_TCP) {
+   ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_TCP_IPV6,
+ ICE_FLOW_SEG_HDR_TCP |
+ ICE_FLOW_SEG_HDR_IPV6, 0);
+   if (ret)
+   PMD_DRV_LOG(ERR, "%s TCP_IPV6 rss flow fail %d",
+   __func__, ret);
+   ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_TCP_IPV4,
+ ICE_FLOW_SEG_HDR_TCP |
+ ICE_FLOW_SEG_HDR_IPV4, 0);
+   if (ret)
+   PMD_DRV_LOG(ERR, "%s TCP_IPV4 rss flow fail %d",
+   __func__, ret);
+   }
+   /**
+* configure RSS for sctp6 with input set IPv6 src/dst
+* configure RSS for sctp4 with input set IP src/dst
+*/
+   if (rss_hf & ETH_RSS_SCTP) {
+   ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV6,
+ ICE_FLOW_SEG_HDR_SCTP |
+ ICE_FLOW_SEG_HDR_IPV6, 0);
+   if (ret)
+   PMD_DRV_LOG(ERR, "%s SCTP_IPV6 rss flow fail %d",
+   __func__, ret);
+   ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV4,
+ ICE_FLOW_SEG_HDR_SCTP |
+ ICE_FLOW_SEG_HDR_IPV4, 0);
+   if (ret)
+   PMD_DRV_LOG(ERR, "%s SCTP_IPV4 rss flow fail %d",
+   __func__, ret);
+   }
+}
+
 static int ice_init_rss(struct ice_pf *pf)
 {
struct ice_hw *hw = ICE_PF_TO_HW(pf);
@@ -2501,72 +2582,9 @@ static int ice_init_rss(struct ice_pf *pf)
(1 << VSIQF_HASH_CTL_HASH_SCHEME_S);
ICE_WRITE_REG(hw, VSIQF_HASH_CTL(vsi->vsi_id), reg);
 
-   /* configure RSS for IPv4 with input set IPv4 src/dst */
-   ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV4,
- ICE_FLOW_SEG_HDR_IPV4, 0);
-   if (ret)
-   PMD_DRV_LOG(ERR, "%s IPV4 rss flow fail %d", __func__, ret);
-
-   /* configure RSS for IPv6 with input set

[dpdk-dev] [PATCH v3] net/ice: initialize and update RSS based on user request

2020-06-23 Thread Junyu Jiang
Initialize and update RSS configure based on user request
(rte_eth_rss_conf) from dev_configure and .rss_hash_update ops.
All previous default configure has been removed.

Signed-off-by: Junyu Jiang 

---
v2->v3:
change the commit log
Separate ipv4 and ipv6
Remove the call of ice_rem_vsi_rss_cfg()

v1->v2:
remove gtpu and pppoe/pppod configuration from rss init
---
 drivers/net/ice/ice_ethdev.c | 169 +--
 drivers/net/ice/ice_ethdev.h |   2 +
 2 files changed, 104 insertions(+), 67 deletions(-)

diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index 5a89a1955..75e2dcc72 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -2441,6 +2441,100 @@ ice_dev_uninit(struct rte_eth_dev *dev)
return 0;
 }
 
+static void
+ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
+{
+   struct ice_hw *hw = ICE_PF_TO_HW(pf);
+   struct ice_vsi *vsi = pf->main_vsi;
+   int ret;
+
+   /* Configure RSS for IPv4 with src/dst addr as input set */
+   if (rss_hf & ETH_RSS_IPV4) {
+   ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV4,
+ ICE_FLOW_SEG_HDR_IPV4 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
+   if (ret)
+   PMD_DRV_LOG(ERR, "%s IPV4 rss flow fail %d",
+   __func__, ret);
+   }
+
+   /* Configure RSS for IPv6 with src/dst addr as input set */
+   if (rss_hf & ETH_RSS_IPV6) {
+   ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV6,
+ ICE_FLOW_SEG_HDR_IPV6 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
+   if (ret)
+   PMD_DRV_LOG(ERR, "%s IPV6 rss flow fail %d",
+   __func__, ret);
+   }
+
+   /* Configure RSS for udp4 with src/dst addr and port as input set */
+   if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
+   ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_UDP_IPV4,
+ ICE_FLOW_SEG_HDR_UDP |
+ ICE_FLOW_SEG_HDR_IPV4 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
+   if (ret)
+   PMD_DRV_LOG(ERR, "%s UDP_IPV4 rss flow fail %d",
+   __func__, ret);
+   }
+
+   /* Configure RSS for udp6 with src/dst addr and port as input set */
+   if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) {
+   ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_UDP_IPV6,
+ ICE_FLOW_SEG_HDR_UDP |
+ ICE_FLOW_SEG_HDR_IPV6 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
+   if (ret)
+   PMD_DRV_LOG(ERR, "%s UDP_IPV6 rss flow fail %d",
+   __func__, ret);
+   }
+
+   /* Configure RSS for tcp4 with src/dst addr and port as input set */
+   if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
+   ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_TCP_IPV4,
+ ICE_FLOW_SEG_HDR_TCP |
+ ICE_FLOW_SEG_HDR_IPV4 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
+   if (ret)
+   PMD_DRV_LOG(ERR, "%s TCP_IPV4 rss flow fail %d",
+   __func__, ret);
+   }
+
+   /* Configure RSS for tcp6 with src/dst addr and port as input set */
+   if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) {
+   ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_TCP_IPV6,
+ ICE_FLOW_SEG_HDR_TCP |
+ ICE_FLOW_SEG_HDR_IPV6 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
+   if (ret)
+   PMD_DRV_LOG(ERR, "%s TCP_IPV6 rss flow fail %d",
+   __func__, ret);
+   }
+
+   /* Configure RSS for sctp4 with src/dst addr and port as input set */
+   if (rss_hf & ETH_RSS_NONFRAG_IPV4_SCTP) {
+   ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV4,
+ ICE_FLOW_SEG_HDR_SCTP |
+ ICE_FLOW_SEG_HDR_IPV4 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
+   if (ret)
+   PMD_DRV_LOG(ERR, "%s SCTP_IPV4 rss flow fail %d",
+   __func__, ret);
+   }
+
+   /* Configure RSS for sctp6 with src/dst addr and port as input set */
+   if (rss_hf & ETH_RSS_NONFRAG_IPV6_SCTP) {
+   ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FL

[dpdk-dev] [PATCH v4] net/ice: initialize and update RSS based on user request

2020-06-23 Thread Junyu Jiang
Initialize and update RSS configure based on user request
(rte_eth_rss_conf) from dev_configure and .rss_hash_update ops.
All previous default configure has been removed.

Signed-off-by: Junyu Jiang 

---
v3->v4:
change the return value to success when rss_hf value update to 0.

v2->v3:
change the commit log
Separate ipv4 and ipv6
Remove the call of ice_rem_vsi_rss_cfg()

v1->v2:
remove gtpu and pppoe/pppod configuration from rss init
---
 drivers/net/ice/ice_ethdev.c | 169 +--
 drivers/net/ice/ice_ethdev.h |   2 +
 2 files changed, 104 insertions(+), 67 deletions(-)

diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index 5a89a1955..7db8b35fd 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -2441,6 +2441,100 @@ ice_dev_uninit(struct rte_eth_dev *dev)
return 0;
 }
 
+static void
+ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
+{
+   struct ice_hw *hw = ICE_PF_TO_HW(pf);
+   struct ice_vsi *vsi = pf->main_vsi;
+   int ret;
+
+   /* Configure RSS for IPv4 with src/dst addr as input set */
+   if (rss_hf & ETH_RSS_IPV4) {
+   ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV4,
+ ICE_FLOW_SEG_HDR_IPV4 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
+   if (ret)
+   PMD_DRV_LOG(ERR, "%s IPV4 rss flow fail %d",
+   __func__, ret);
+   }
+
+   /* Configure RSS for IPv6 with src/dst addr as input set */
+   if (rss_hf & ETH_RSS_IPV6) {
+   ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV6,
+ ICE_FLOW_SEG_HDR_IPV6 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
+   if (ret)
+   PMD_DRV_LOG(ERR, "%s IPV6 rss flow fail %d",
+   __func__, ret);
+   }
+
+   /* Configure RSS for udp4 with src/dst addr and port as input set */
+   if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
+   ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_UDP_IPV4,
+ ICE_FLOW_SEG_HDR_UDP |
+ ICE_FLOW_SEG_HDR_IPV4 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
+   if (ret)
+   PMD_DRV_LOG(ERR, "%s UDP_IPV4 rss flow fail %d",
+   __func__, ret);
+   }
+
+   /* Configure RSS for udp6 with src/dst addr and port as input set */
+   if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) {
+   ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_UDP_IPV6,
+ ICE_FLOW_SEG_HDR_UDP |
+ ICE_FLOW_SEG_HDR_IPV6 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
+   if (ret)
+   PMD_DRV_LOG(ERR, "%s UDP_IPV6 rss flow fail %d",
+   __func__, ret);
+   }
+
+   /* Configure RSS for tcp4 with src/dst addr and port as input set */
+   if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
+   ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_TCP_IPV4,
+ ICE_FLOW_SEG_HDR_TCP |
+ ICE_FLOW_SEG_HDR_IPV4 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
+   if (ret)
+   PMD_DRV_LOG(ERR, "%s TCP_IPV4 rss flow fail %d",
+   __func__, ret);
+   }
+
+   /* Configure RSS for tcp6 with src/dst addr and port as input set */
+   if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) {
+   ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_TCP_IPV6,
+ ICE_FLOW_SEG_HDR_TCP |
+ ICE_FLOW_SEG_HDR_IPV6 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
+   if (ret)
+   PMD_DRV_LOG(ERR, "%s TCP_IPV6 rss flow fail %d",
+   __func__, ret);
+   }
+
+   /* Configure RSS for sctp4 with src/dst addr and port as input set */
+   if (rss_hf & ETH_RSS_NONFRAG_IPV4_SCTP) {
+   ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV4,
+ ICE_FLOW_SEG_HDR_SCTP |
+ ICE_FLOW_SEG_HDR_IPV4 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
+   if (ret)
+   PMD_DRV_LOG(ERR, "%s SCTP_IPV4 rss flow fail %d",
+   __func__, ret);
+   }
+
+   /* Configure RSS for sctp6 with src/dst addr and port as in

[dpdk-dev] [PATCH] net/ice: fix RSS advanced rule invalid issue

2020-03-19 Thread Junyu Jiang
This patch added a restore function of RSS advanced rule to fix
the rule invalid when after running port stop and port start.

Fixes: 5ad3db8d4bdd ("net/ice: enable advanced RSS")
Cc: sta...@dpdk.org

Signed-off-by: Junyu Jiang 
---
 drivers/net/ice/ice_ethdev.c | 47 
 1 file changed, 47 insertions(+)

diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index 85ef83e92..2dd8120f1 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -47,6 +47,11 @@ struct proto_xtr_ol_flag {
bool required;
 };
 
+struct ice_hash_flow_cfg {
+   bool simple_xor;
+   struct ice_rss_cfg rss_cfg;
+};
+
 static struct proto_xtr_ol_flag ice_proto_xtr_ol_flag_params[] = {
[PROTO_XTR_VLAN] = {
.param = { .name = "ice_dynflag_proto_xtr_vlan" },
@@ -2464,6 +2469,45 @@ ice_dev_configure(struct rte_eth_dev *dev)
return 0;
 }
 
+static int
+ice_rss_restore(struct rte_eth_dev *dev)
+{
+   struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+   struct ice_hw *hw = ICE_PF_TO_HW(pf);
+   struct ice_vsi *vsi = pf->main_vsi;
+   struct rte_flow *p_flow;
+   struct ice_hash_flow_cfg *filter_ptr;
+   struct ice_flow_engine *engine;
+   uint32_t reg;
+   int ret;
+
+   TAILQ_FOREACH(p_flow, &pf->flow_list, node) {
+   engine = p_flow->engine;
+   if (engine->type == ICE_FLOW_ENGINE_HASH) {
+   filter_ptr = (struct ice_hash_flow_cfg *)p_flow->rule;
+   /* Enable registers for simple_xor hash function. */
+   if (filter_ptr->simple_xor == 1) {
+   reg = ICE_READ_REG(hw,
+   VSIQF_HASH_CTL(vsi->vsi_id));
+   reg = (reg & (~VSIQF_HASH_CTL_HASH_SCHEME_M)) |
+   (2 << VSIQF_HASH_CTL_HASH_SCHEME_S);
+   ICE_WRITE_REG(hw,
+   VSIQF_HASH_CTL(vsi->vsi_id), reg);
+   } else {
+   ret = ice_add_rss_cfg(hw, vsi->idx,
+   filter_ptr->rss_cfg.hashed_flds,
+   filter_ptr->rss_cfg.packet_hdr,
+   filter_ptr->rss_cfg.symm);
+   if (ret)
+   PMD_DRV_LOG(ERR,
+   "%s restore rss fail %d",
+   __func__, ret);
+   }
+   }
+   }
+   return 0;
+}
+
 static int ice_init_rss(struct ice_pf *pf)
 {
struct ice_hw *hw = ICE_PF_TO_HW(pf);
@@ -2591,6 +2635,9 @@ static int ice_init_rss(struct ice_pf *pf)
PMD_DRV_LOG(ERR, "%s PPPoE/PPPoD_SessionID rss flow fail %d",
__func__, ret);
 
+   /* restore RSS configuration */
+   ice_rss_restore(dev);
+
return 0;
 }
 
-- 
2.17.1



[dpdk-dev] [PATCH v2 0/2] examples/vmdq: fix RSS configuration

2020-03-24 Thread Junyu Jiang
This patch set fixed a bug of vmdq example,
and added a documentation for it.

*** BLURB HERE ***

Junyu Jiang (2):
  doc: add user guide for VMDq
  examples/vmdq: fix RSS configuration

 MAINTAINERS  |   1 +
 doc/guides/sample_app_ug/index.rst   |   1 +
 doc/guides/sample_app_ug/vmdq_forwarding.rst | 208 +++
 examples/vmdq/main.c |  39 +++-
 4 files changed, 244 insertions(+), 5 deletions(-)
 create mode 100644 doc/guides/sample_app_ug/vmdq_forwarding.rst

-- 
2.17.1



[dpdk-dev] [PATCH v2 1/2] doc: add user guide for VMDq

2020-03-24 Thread Junyu Jiang
currently, there is no documentation for vmdq example,
this path added the user guide for vmdq.

Signed-off-by: Junyu Jiang 
---
 MAINTAINERS  |   1 +
 doc/guides/sample_app_ug/index.rst   |   1 +
 doc/guides/sample_app_ug/vmdq_forwarding.rst | 208 +++
 3 files changed, 210 insertions(+)
 create mode 100644 doc/guides/sample_app_ug/vmdq_forwarding.rst

diff --git a/MAINTAINERS b/MAINTAINERS
index c3785554f..1802356b0 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1599,5 +1599,6 @@ M: Xiaoyun Li 
 F: examples/tep_termination/
 
 F: examples/vmdq/
+F: doc/guides/sample_app_ug/vmdq_forwarding.rst
 F: examples/vmdq_dcb/
 F: doc/guides/sample_app_ug/vmdq_dcb_forwarding.rst
diff --git a/doc/guides/sample_app_ug/index.rst 
b/doc/guides/sample_app_ug/index.rst
index ac3445147..4b16dd161 100644
--- a/doc/guides/sample_app_ug/index.rst
+++ b/doc/guides/sample_app_ug/index.rst
@@ -40,6 +40,7 @@ Sample Applications User Guides
 timer
 packet_ordering
 vmdq_dcb_forwarding
+vmdq_forwarding
 vhost
 vhost_blk
 vhost_crypto
diff --git a/doc/guides/sample_app_ug/vmdq_forwarding.rst 
b/doc/guides/sample_app_ug/vmdq_forwarding.rst
new file mode 100644
index 0..df23043d6
--- /dev/null
+++ b/doc/guides/sample_app_ug/vmdq_forwarding.rst
@@ -0,0 +1,208 @@
+..  SPDX-License-Identifier: BSD-3-Clause
+Copyright(c) 2020 Intel Corporation.
+
+VMDQ Forwarding Sample Application
+==
+
+The VMDQ Forwarding sample application is a simple example of packet 
processing using the DPDK.
+The application performs L2 forwarding using VMDQ to divide the incoming 
traffic into queues.
+The traffic splitting is performed in hardware by the VMDQ feature of the 
Intel® 82599 and X710/XL710 Ethernet Controllers.
+
+Overview
+
+
+This sample application can be used as a starting point for developing a new 
application that is based on the DPDK and
+uses VMDQ for traffic partitioning.
+
+VMDQ filters split the incoming packets up into different "pools" - each with 
its own set of RX queues - based upon
+the MAC address and VLAN ID within the VLAN tag of the packet.
+
+All traffic is read from a single incoming port and output on another port, 
without any processing being performed.
+With Intel® 82599 NIC, for example, the traffic is split into 128 queues on 
input, where each thread of the application reads from
+multiple queues. When run with 8 threads, that is, with the -c FF option, each 
thread receives and forwards packets from 16 queues.
+
+As supplied, the sample application configures the VMDQ feature to have 32 
pools with 4 queues each.
+The Intel® 82599 10 Gigabit Ethernet Controller NIC also supports the 
splitting of traffic into 16 pools of 2 queues.
+While the Intel® X710 or XL710 Ethernet Controller NICs support many 
configurations of VMDQ pools of 4 or 8 queues each.
+And queues numbers for each VMDQ pool can be changed by setting 
CONFIG_RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM
+in config/common_* file.
+The nb-pools parameter can be passed on the command line, after the EAL 
parameters:
+
+.. code-block:: console
+
+./build/vmdq_app [EAL options] -- -p PORTMASK --nb-pools NP
+
+where, NP can be 8, 16 or 32.
+
+In Linux* user space, the application can display statistics with the number 
of packets received on each queue.
+To have the application display the statistics, send a SIGHUP signal to the 
running application process.
+
+The VMDQ Forwarding sample application is in many ways simpler than the L2 
Forwarding application
+(see :doc:`l2_forward_real_virtual`)
+as it performs unidirectional L2 forwarding of packets from one port to a 
second port.
+No command-line options are taken by this application apart from the standard 
EAL command-line options.
+
+Compiling the Application
+-
+
+To compile the sample application see :doc:`compiling`.
+
+The application is located in the ``vmdq`` sub-directory.
+
+Running the Application
+---
+
+To run the example in a linux environment:
+
+.. code-block:: console
+
+user@target:~$ ./build/vmdq_app -l 0-3 -n 4 -- -p 0x3 --nb-pools 16
+
+Refer to the *DPDK Getting Started Guide* for general information on running 
applications and
+the Environment Abstraction Layer (EAL) options.
+
+Explanation
+---
+
+The following sections provide some explanation of the code.
+
+Initialization
+~~
+
+The EAL, driver and PCI configuration is performed largely as in the L2 
Forwarding sample application,
+as is the creation of the mbuf pool.
+See :doc:`l2_forward_real_virtual`.
+Where this example application differs is in the configuration of the NIC port 
for RX.
+
+The VMDQ hardware feature is configured at port initialization time by setting 
the appropriate values in the
+rte_eth_conf structure passed to the rte_eth_dev_configure() API.
+Initially in the application,
+a default st

[dpdk-dev] [PATCH v2 2/2] examples/vmdq: fix RSS configuration

2020-03-24 Thread Junyu Jiang
In order that all queues of pools can receive packets,
add enable-rss argument to change rss configuration.

Fixes: 6bb97df521aa ("examples/vmdq: new app")
Cc: sta...@dpdk.org

Signed-off-by: Junyu Jiang 
Acked-by: Xiaoyun Li 
---
 doc/guides/sample_app_ug/vmdq_forwarding.rst |  6 +--
 examples/vmdq/main.c | 39 +---
 2 files changed, 37 insertions(+), 8 deletions(-)

diff --git a/doc/guides/sample_app_ug/vmdq_forwarding.rst 
b/doc/guides/sample_app_ug/vmdq_forwarding.rst
index df23043d6..658d6742d 100644
--- a/doc/guides/sample_app_ug/vmdq_forwarding.rst
+++ b/doc/guides/sample_app_ug/vmdq_forwarding.rst
@@ -26,13 +26,13 @@ The Intel® 82599 10 Gigabit Ethernet Controller NIC also 
supports the splitting
 While the Intel® X710 or XL710 Ethernet Controller NICs support many 
configurations of VMDQ pools of 4 or 8 queues each.
 And queues numbers for each VMDQ pool can be changed by setting 
CONFIG_RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM
 in config/common_* file.
-The nb-pools parameter can be passed on the command line, after the EAL 
parameters:
+The nb-pools and enable-rss parameters can be passed on the command line, 
after the EAL parameters:
 
 .. code-block:: console
 
-./build/vmdq_app [EAL options] -- -p PORTMASK --nb-pools NP
+./build/vmdq_app [EAL options] -- -p PORTMASK --nb-pools NP --enable-rss
 
-where, NP can be 8, 16 or 32.
+where, NP can be 8, 16 or 32, rss is disabled by default.
 
 In Linux* user space, the application can display statistics with the number 
of packets received on each queue.
 To have the application display the statistics, send a SIGHUP signal to the 
running application process.
diff --git a/examples/vmdq/main.c b/examples/vmdq/main.c
index 00920..98032e6a3 100644
--- a/examples/vmdq/main.c
+++ b/examples/vmdq/main.c
@@ -59,6 +59,7 @@ static uint32_t enabled_port_mask;
 /* number of pools (if user does not specify any, 8 by default */
 static uint32_t num_queues = 8;
 static uint32_t num_pools = 8;
+static uint8_t rss_enable;
 
 /* empty vmdq configuration structure. Filled in programatically */
 static const struct rte_eth_conf vmdq_conf_default = {
@@ -143,6 +144,13 @@ get_eth_conf(struct rte_eth_conf *eth_conf, uint32_t 
num_pools)
(void)(rte_memcpy(eth_conf, &vmdq_conf_default, sizeof(*eth_conf)));
(void)(rte_memcpy(ð_conf->rx_adv_conf.vmdq_rx_conf, &conf,
   sizeof(eth_conf->rx_adv_conf.vmdq_rx_conf)));
+   if (rss_enable) {
+   eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
+   eth_conf->rx_adv_conf.rss_conf.rss_hf = ETH_RSS_IP |
+   ETH_RSS_UDP |
+   ETH_RSS_TCP |
+   ETH_RSS_SCTP;
+   }
return 0;
 }
 
@@ -164,6 +172,7 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool)
uint16_t q;
uint16_t queues_per_pool;
uint32_t max_nb_pools;
+   uint64_t rss_hf_tmp;
 
/*
 * The max pool number from dev_info will be used to validate the pool
@@ -209,6 +218,17 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool)
if (!rte_eth_dev_is_valid_port(port))
return -1;
 
+   rss_hf_tmp = port_conf.rx_adv_conf.rss_conf.rss_hf;
+   port_conf.rx_adv_conf.rss_conf.rss_hf &=
+   dev_info.flow_type_rss_offloads;
+   if (port_conf.rx_adv_conf.rss_conf.rss_hf != rss_hf_tmp) {
+   printf("Port %u modified RSS hash function based on hardware 
support,"
+   "requested:%#"PRIx64" configured:%#"PRIx64"\n",
+   port,
+   rss_hf_tmp,
+   port_conf.rx_adv_conf.rss_conf.rss_hf);
+   }
+
/*
 * Though in this example, we only receive packets from the first queue
 * of each pool and send packets through first rte_lcore_count() tx
@@ -363,7 +383,8 @@ static void
 vmdq_usage(const char *prgname)
 {
printf("%s [EAL options] -- -p PORTMASK]\n"
-   "  --nb-pools NP: number of pools\n",
+   "  --nb-pools NP: number of pools\n"
+   "  --enable-rss: enable RSS (disabled by default)\n",
   prgname);
 }
 
@@ -377,6 +398,7 @@ vmdq_parse_args(int argc, char **argv)
const char *prgname = argv[0];
static struct option long_option[] = {
{"nb-pools", required_argument, NULL, 0},
+   {"enable-rss", 0, NULL, 0},
{NULL, 0, 0, 0}
};
 
@@ -394,11 +416,18 @@ vmdq_parse_args(int argc, char **argv)
}
break;
case 0:
-   if (vmdq_parse_num_pools(optarg) == -1) {
-   printf("invalid number of poo

[dpdk-dev] [PATCH v2] net/ice: fix RSS advanced rule invalid issue

2020-03-25 Thread Junyu Jiang
This patch added a restore function for RSS advanced rule to fix
the rule invalid after running port stop and port start.

Fixes: 5ad3db8d4bdd ("net/ice: enable advanced RSS")
Cc: sta...@dpdk.org

Signed-off-by: Junyu Jiang 
---
 drivers/net/ice/ice_ethdev.c | 47 
 1 file changed, 47 insertions(+)

diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index 85ef83e92..2dd8120f1 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -47,6 +47,11 @@ struct proto_xtr_ol_flag {
bool required;
 };
 
+struct ice_hash_flow_cfg {
+   bool simple_xor;
+   struct ice_rss_cfg rss_cfg;
+};
+
 static struct proto_xtr_ol_flag ice_proto_xtr_ol_flag_params[] = {
[PROTO_XTR_VLAN] = {
.param = { .name = "ice_dynflag_proto_xtr_vlan" },
@@ -2464,6 +2469,45 @@ ice_dev_configure(struct rte_eth_dev *dev)
return 0;
 }
 
+static int
+ice_rss_restore(struct rte_eth_dev *dev)
+{
+   struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+   struct ice_hw *hw = ICE_PF_TO_HW(pf);
+   struct ice_vsi *vsi = pf->main_vsi;
+   struct rte_flow *p_flow;
+   struct ice_hash_flow_cfg *filter_ptr;
+   struct ice_flow_engine *engine;
+   uint32_t reg;
+   int ret;
+
+   TAILQ_FOREACH(p_flow, &pf->flow_list, node) {
+   engine = p_flow->engine;
+   if (engine->type == ICE_FLOW_ENGINE_HASH) {
+   filter_ptr = (struct ice_hash_flow_cfg *)p_flow->rule;
+   /* Enable registers for simple_xor hash function. */
+   if (filter_ptr->simple_xor == 1) {
+   reg = ICE_READ_REG(hw,
+   VSIQF_HASH_CTL(vsi->vsi_id));
+   reg = (reg & (~VSIQF_HASH_CTL_HASH_SCHEME_M)) |
+   (2 << VSIQF_HASH_CTL_HASH_SCHEME_S);
+   ICE_WRITE_REG(hw,
+   VSIQF_HASH_CTL(vsi->vsi_id), reg);
+   } else {
+   ret = ice_add_rss_cfg(hw, vsi->idx,
+   filter_ptr->rss_cfg.hashed_flds,
+   filter_ptr->rss_cfg.packet_hdr,
+   filter_ptr->rss_cfg.symm);
+   if (ret)
+   PMD_DRV_LOG(ERR,
+   "%s restore rss fail %d",
+   __func__, ret);
+   }
+   }
+   }
+   return 0;
+}
+
 static int ice_init_rss(struct ice_pf *pf)
 {
struct ice_hw *hw = ICE_PF_TO_HW(pf);
@@ -2591,6 +2635,9 @@ static int ice_init_rss(struct ice_pf *pf)
PMD_DRV_LOG(ERR, "%s PPPoE/PPPoD_SessionID rss flow fail %d",
__func__, ret);
 
+   /* restore RSS configuration */
+   ice_rss_restore(dev);
+
return 0;
 }
 
-- 
2.17.1



[dpdk-dev] [PATCH v2] net/ice: fix RSS advanced rule invalid issue

2020-03-26 Thread Junyu Jiang
This patch added a restore function for RSS advanced rule to fix
the rule invalid after running port stop and port start.

Fixes: 5ad3db8d4bdd ("net/ice: enable advanced RSS")
Cc: sta...@dpdk.org

Signed-off-by: Junyu Jiang 
---
 drivers/net/ice/ice_ethdev.c | 47 
 1 file changed, 47 insertions(+)

diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index 85ef83e92..c2ee37c59 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -47,6 +47,11 @@ struct proto_xtr_ol_flag {
bool required;
 };
 
+struct ice_hash_flow_cfg {
+   bool simple_xor;
+   struct ice_rss_cfg rss_cfg;
+};
+
 static struct proto_xtr_ol_flag ice_proto_xtr_ol_flag_params[] = {
[PROTO_XTR_VLAN] = {
.param = { .name = "ice_dynflag_proto_xtr_vlan" },
@@ -2464,6 +2469,45 @@ ice_dev_configure(struct rte_eth_dev *dev)
return 0;
 }
 
+static int
+ice_rss_restore(struct rte_eth_dev *dev)
+{
+   struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+   struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+   struct ice_vsi *vsi = pf->main_vsi;
+   struct rte_flow *p_flow;
+   struct ice_hash_flow_cfg *filter_ptr;
+   struct ice_flow_engine *engine;
+   uint32_t reg;
+   int ret;
+
+   TAILQ_FOREACH(p_flow, &pf->flow_list, node) {
+   engine = p_flow->engine;
+   if (engine->type == ICE_FLOW_ENGINE_HASH) {
+   filter_ptr = (struct ice_hash_flow_cfg *)p_flow->rule;
+   /* Enable registers for simple_xor hash function. */
+   if (filter_ptr->simple_xor == 1) {
+   reg = ICE_READ_REG(hw,
+   VSIQF_HASH_CTL(vsi->vsi_id));
+   reg = (reg & (~VSIQF_HASH_CTL_HASH_SCHEME_M)) |
+   (2 << VSIQF_HASH_CTL_HASH_SCHEME_S);
+   ICE_WRITE_REG(hw,
+   VSIQF_HASH_CTL(vsi->vsi_id), reg);
+   } else {
+   ret = ice_add_rss_cfg(hw, vsi->idx,
+   filter_ptr->rss_cfg.hashed_flds,
+   filter_ptr->rss_cfg.packet_hdr,
+   filter_ptr->rss_cfg.symm);
+   if (ret)
+   PMD_DRV_LOG(ERR,
+   "%s restore rss fail %d",
+   __func__, ret);
+   }
+   }
+   }
+   return 0;
+}
+
 static int ice_init_rss(struct ice_pf *pf)
 {
struct ice_hw *hw = ICE_PF_TO_HW(pf);
@@ -2591,6 +2635,9 @@ static int ice_init_rss(struct ice_pf *pf)
PMD_DRV_LOG(ERR, "%s PPPoE/PPPoD_SessionID rss flow fail %d",
__func__, ret);
 
+   /* restore RSS configuration */
+   ice_rss_restore(dev);
+
return 0;
 }
 
-- 
2.17.1



[dpdk-dev] [PATCH] net/ixgbe: enable jumbo frame for VF

2019-12-02 Thread Junyu Jiang
Enable jumbo frame for VF by configuring DPDK PF.

Signed-off-by: Junyu Jiang 
---
 drivers/net/ixgbe/ixgbe_pf.c | 13 +
 1 file changed, 13 insertions(+)

diff --git a/drivers/net/ixgbe/ixgbe_pf.c b/drivers/net/ixgbe/ixgbe_pf.c
index d0d85e138..66b856e11 100644
--- a/drivers/net/ixgbe/ixgbe_pf.c
+++ b/drivers/net/ixgbe/ixgbe_pf.c
@@ -544,6 +544,7 @@ ixgbe_set_vf_lpe(struct rte_eth_dev *dev, __rte_unused 
uint32_t vf, uint32_t *ms
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint32_t new_mtu = msgbuf[1];
uint32_t max_frs;
+   uint32_t hlreg0;
int max_frame = new_mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
 
/* X540 and X550 support jumbo frames in IOV mode */
@@ -560,6 +561,18 @@ ixgbe_set_vf_lpe(struct rte_eth_dev *dev, __rte_unused 
uint32_t vf, uint32_t *ms
max_frs = (IXGBE_READ_REG(hw, IXGBE_MAXFRS) &
   IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT;
if (max_frs < new_mtu) {
+   hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+   if (new_mtu > RTE_ETHER_MAX_LEN) {
+   dev->data->dev_conf.rxmode.offloads |=
+   DEV_RX_OFFLOAD_JUMBO_FRAME;
+   hlreg0 |= IXGBE_HLREG0_JUMBOEN;
+   } else {
+   dev->data->dev_conf.rxmode.offloads &=
+   ~DEV_RX_OFFLOAD_JUMBO_FRAME;
+   hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
+   }
+   IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
+
max_frs = new_mtu << IXGBE_MHADD_MFS_SHIFT;
IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, max_frs);
}
-- 
2.17.1



[dpdk-dev] [PATCH v2] app/testpmd: fix flow flush with invalid port

2020-10-15 Thread Junyu Jiang
There is no error info displayed when running flow flush
command with invalid port. This patch fixed the issue.

Fixes: 2a449871a12d ("app/testpmd: align behaviour of multi-port detach")
Cc: sta...@dpdk.org

Signed-off-by: Junyu Jiang 
Reviewed-by: Suanming Mou 
---
 app/test-pmd/config.c | 13 -
 1 file changed, 8 insertions(+), 5 deletions(-)

diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c
index fe31a9d52..541952ab8 100644
--- a/app/test-pmd/config.c
+++ b/app/test-pmd/config.c
@@ -1772,19 +1772,22 @@ int
 port_flow_flush(portid_t port_id)
 {
struct rte_flow_error error;
-   struct rte_port *port = &ports[port_id];
+   struct rte_port *port;
int ret = 0;
 
+   if (port_id_is_invalid(port_id, ENABLED_WARN) ||
+   port_id == (portid_t)RTE_PORT_ALL)
+   return -EINVAL;
+
+   port = &ports[port_id];
+
if (port->flow_list == NULL)
return ret;
 
/* Poisoning to make sure PMDs update it in case of error. */
memset(&error, 0x44, sizeof(error));
if (rte_flow_flush(port_id, &error)) {
-   ret = port_flow_complain(&error);
-   if (port_id_is_invalid(port_id, DISABLED_WARN) ||
-   port_id == (portid_t)RTE_PORT_ALL)
-   return ret;
+   port_flow_complain(&error);
}
 
while (port->flow_list) {
-- 
2.17.1



[dpdk-dev] [PATCH] net/ice: fix SCTP RSS configuration

2020-10-20 Thread Junyu Jiang
This patch configured RSS for sctp with IP address
and port as input set.

Fixes: 4717a12cfaf1 ("net/ice: initialize and update RSS based on user config")
Cc: sta...@dpdk.org

Signed-off-by: Junyu Jiang 
---
 drivers/net/ice/ice_ethdev.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index 0056da78a..63138d3b9 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -2987,7 +2987,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 
/* Configure RSS for sctp4 with src/dst addr and port as input set */
if (rss_hf & ETH_RSS_NONFRAG_IPV4_SCTP) {
-   ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_FLOW_HASH_IPV4,
+   ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_SCTP_IPV4,
  ICE_FLOW_SEG_HDR_SCTP |
  ICE_FLOW_SEG_HDR_IPV4 |
  ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
@@ -2998,7 +2998,7 @@ ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
 
/* Configure RSS for sctp6 with src/dst addr and port as input set */
if (rss_hf & ETH_RSS_NONFRAG_IPV6_SCTP) {
-   ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_FLOW_HASH_IPV6,
+   ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_SCTP_IPV6,
  ICE_FLOW_SEG_HDR_SCTP |
  ICE_FLOW_SEG_HDR_IPV6 |
  ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
-- 
2.17.1



[dpdk-dev] [PATCH 0/2] fix incorrect statistics data

2020-07-15 Thread Junyu Jiang
This patchset fixed the issue that rx_bytes and tx_bytes
overflowed on 40 bit limitation by enlarging the limitation.

Junyu Jiang (2):
  net/ice: fix incorrect Rx bytes statistics
  net/ice: fix incorrect Tx bytes statistics

 drivers/net/ice/ice_ethdev.c | 24 
 1 file changed, 24 insertions(+)

-- 
2.17.1



[dpdk-dev] [PATCH 2/2] net/ice: fix incorrect Tx bytes statistics

2020-07-15 Thread Junyu Jiang
This patch fixed the issue that tx_bytes overflowed
on 40 bit limitation by enlarging the limitation.

Fixes: a37bde56314d ("net/ice: support statistics")
Cc: sta...@dpdk.org

Signed-off-by: Junyu Jiang 
---
 drivers/net/ice/ice_ethdev.c | 10 ++
 1 file changed, 10 insertions(+)

diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index d92b6ffa1..b6b45e274 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -4144,6 +4144,7 @@ ice_update_vsi_stats(struct ice_vsi *vsi)
struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
int idx = rte_le_to_cpu_16(vsi->vsi_id);
uint64_t old_rx_bytes = nes->rx_bytes;
+   uint64_t old_tx_bytes = nes->tx_bytes;
 
old_rx_bytes += (nes->rx_unicast + nes->rx_multicast +
 nes->rx_broadcast) * RTE_ETHER_CRC_LEN;
@@ -4189,6 +4190,9 @@ ice_update_vsi_stats(struct ice_vsi *vsi)
/* GLV_TDPC not supported */
ice_stat_update_32(hw, GLV_TEPC(idx), vsi->offset_loaded,
   &oes->tx_errors, &nes->tx_errors);
+   /* enlarge the limitation when tx_bytes overflowed */
+   if (old_tx_bytes > nes->tx_bytes && vsi->offset_loaded)
+   nes->tx_bytes += (uint64_t)1 << ICE_40_BIT_WIDTH;
vsi->offset_loaded = true;
 
PMD_DRV_LOG(DEBUG, "** VSI[%u] stats start **",
@@ -4216,9 +4220,12 @@ ice_read_stats_registers(struct ice_pf *pf, struct 
ice_hw *hw)
struct ice_hw_port_stats *ns = &pf->stats; /* new stats */
struct ice_hw_port_stats *os = &pf->stats_offset; /* old stats */
uint64_t old_rx_bytes = ns->eth.rx_bytes;
+   uint64_t old_tx_bytes = ns->eth.tx_bytes;
 
old_rx_bytes += (ns->eth.rx_unicast + ns->eth.rx_multicast +
 ns->eth.rx_broadcast) * RTE_ETHER_CRC_LEN;
+   old_tx_bytes += (ns->eth.tx_unicast + ns->eth.tx_multicast +
+ns->eth.tx_broadcast) * RTE_ETHER_CRC_LEN;
 
/* Get statistics of struct ice_eth_stats */
ice_stat_update_40(hw, GLPRT_GORCH(hw->port_info->lport),
@@ -4273,6 +4280,9 @@ ice_read_stats_registers(struct ice_pf *pf, struct ice_hw 
*hw)
   GLPRT_BPTCL(hw->port_info->lport),
   pf->offset_loaded, &os->eth.tx_broadcast,
   &ns->eth.tx_broadcast);
+   /* enlarge the limitation when tx_bytes overflowed */
+   if (old_tx_bytes > ns->eth.tx_bytes && pf->offset_loaded)
+   ns->eth.tx_bytes += (uint64_t)1 << ICE_40_BIT_WIDTH;
ns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast +
 ns->eth.tx_broadcast) * RTE_ETHER_CRC_LEN;
 
-- 
2.17.1



[dpdk-dev] [PATCH 1/2] net/ice: fix incorrect Rx bytes statistics

2020-07-15 Thread Junyu Jiang
This patch fixed the issue that rx_bytes overflowed
on 40 bit limitation by enlarging the limitation.

Fixes: a37bde56314d ("net/ice: support statistics")
Cc: sta...@dpdk.org

Signed-off-by: Junyu Jiang 
---
 drivers/net/ice/ice_ethdev.c | 14 ++
 1 file changed, 14 insertions(+)

diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index 3534d18ca..d92b6ffa1 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -4143,6 +4143,10 @@ ice_update_vsi_stats(struct ice_vsi *vsi)
struct ice_eth_stats *nes = &vsi->eth_stats;
struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
int idx = rte_le_to_cpu_16(vsi->vsi_id);
+   uint64_t old_rx_bytes = nes->rx_bytes;
+
+   old_rx_bytes += (nes->rx_unicast + nes->rx_multicast +
+nes->rx_broadcast) * RTE_ETHER_CRC_LEN;
 
ice_stat_update_40(hw, GLV_GORCH(idx), GLV_GORCL(idx),
   vsi->offset_loaded, &oes->rx_bytes,
@@ -4156,6 +4160,9 @@ ice_update_vsi_stats(struct ice_vsi *vsi)
ice_stat_update_40(hw, GLV_BPRCH(idx), GLV_BPRCL(idx),
   vsi->offset_loaded, &oes->rx_broadcast,
   &nes->rx_broadcast);
+   /* enlarge the limitation when rx_bytes overflowed */
+   if (old_rx_bytes > nes->rx_bytes && vsi->offset_loaded)
+   nes->rx_bytes += (uint64_t)1 << ICE_40_BIT_WIDTH;
/* exclude CRC bytes */
nes->rx_bytes -= (nes->rx_unicast + nes->rx_multicast +
  nes->rx_broadcast) * RTE_ETHER_CRC_LEN;
@@ -4208,6 +4215,10 @@ ice_read_stats_registers(struct ice_pf *pf, struct 
ice_hw *hw)
 {
struct ice_hw_port_stats *ns = &pf->stats; /* new stats */
struct ice_hw_port_stats *os = &pf->stats_offset; /* old stats */
+   uint64_t old_rx_bytes = ns->eth.rx_bytes;
+
+   old_rx_bytes += (ns->eth.rx_unicast + ns->eth.rx_multicast +
+ns->eth.rx_broadcast) * RTE_ETHER_CRC_LEN;
 
/* Get statistics of struct ice_eth_stats */
ice_stat_update_40(hw, GLPRT_GORCH(hw->port_info->lport),
@@ -4229,6 +4240,9 @@ ice_read_stats_registers(struct ice_pf *pf, struct ice_hw 
*hw)
ice_stat_update_32(hw, PRTRPB_RDPC,
   pf->offset_loaded, &os->eth.rx_discards,
   &ns->eth.rx_discards);
+   /* enlarge the limitation when rx_bytes overflowed */
+   if (old_rx_bytes > ns->eth.rx_bytes && pf->offset_loaded)
+   ns->eth.rx_bytes += (uint64_t)1 << ICE_40_BIT_WIDTH;
 
/* Workaround: CRC size should not be included in byte statistics,
 * so subtract RTE_ETHER_CRC_LEN from the byte counter for each rx
-- 
2.17.1



[dpdk-dev] [PATCH v2] net/ice: fix incorrect Rx/Tx bytes statistics

2020-07-16 Thread Junyu Jiang
This patch fixed the issue that rx/tx bytes overflowed
on 40 bit limitation by enlarging the limitation.

Fixes: a37bde56314d ("net/ice: support statistics")
Cc: sta...@dpdk.org

Signed-off-by: Junyu Jiang 
---
 drivers/net/ice/ice_ethdev.c | 36 
 drivers/net/ice/ice_ethdev.h |  4 
 2 files changed, 40 insertions(+)

diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index 3534d18ca..85aa6cfe6 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -4139,6 +4139,10 @@ ice_stat_update_40(struct ice_hw *hw,
 static void
 ice_update_vsi_stats(struct ice_vsi *vsi)
 {
+   uint64_t old_rx_bytes_h = vsi->old_rx_bytes & ~ICE_40_BIT_MASK;
+   uint64_t old_rx_bytes_l = vsi->old_rx_bytes & ICE_40_BIT_MASK;
+   uint64_t old_tx_bytes_h = vsi->old_tx_bytes & ~ICE_40_BIT_MASK;
+   uint64_t old_tx_bytes_l = vsi->old_tx_bytes & ICE_40_BIT_MASK;
struct ice_eth_stats *oes = &vsi->eth_stats_offset;
struct ice_eth_stats *nes = &vsi->eth_stats;
struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
@@ -4156,6 +4160,13 @@ ice_update_vsi_stats(struct ice_vsi *vsi)
ice_stat_update_40(hw, GLV_BPRCH(idx), GLV_BPRCL(idx),
   vsi->offset_loaded, &oes->rx_broadcast,
   &nes->rx_broadcast);
+   /* enlarge the limitation when rx_bytes overflowed */
+   if (vsi->offset_loaded) {
+   if (old_rx_bytes_l > nes->rx_bytes)
+   old_rx_bytes_h += (uint64_t)1 << ICE_40_BIT_WIDTH;
+   nes->rx_bytes += old_rx_bytes_h;
+   }
+   vsi->old_rx_bytes = nes->rx_bytes;
/* exclude CRC bytes */
nes->rx_bytes -= (nes->rx_unicast + nes->rx_multicast +
  nes->rx_broadcast) * RTE_ETHER_CRC_LEN;
@@ -4182,6 +4193,13 @@ ice_update_vsi_stats(struct ice_vsi *vsi)
/* GLV_TDPC not supported */
ice_stat_update_32(hw, GLV_TEPC(idx), vsi->offset_loaded,
   &oes->tx_errors, &nes->tx_errors);
+   /* enlarge the limitation when tx_bytes overflowed */
+   if (vsi->offset_loaded) {
+   if (old_tx_bytes_l > nes->tx_bytes)
+   old_tx_bytes_h += (uint64_t)1 << ICE_40_BIT_WIDTH;
+   nes->tx_bytes += old_tx_bytes_h;
+   }
+   vsi->old_tx_bytes = nes->tx_bytes;
vsi->offset_loaded = true;
 
PMD_DRV_LOG(DEBUG, "** VSI[%u] stats start **",
@@ -4206,6 +4224,10 @@ ice_update_vsi_stats(struct ice_vsi *vsi)
 static void
 ice_read_stats_registers(struct ice_pf *pf, struct ice_hw *hw)
 {
+   uint64_t old_rx_bytes_h = pf->old_rx_bytes & ~ICE_40_BIT_MASK;
+   uint64_t old_rx_bytes_l = pf->old_rx_bytes & ICE_40_BIT_MASK;
+   uint64_t old_tx_bytes_h = pf->old_tx_bytes & ~ICE_40_BIT_MASK;
+   uint64_t old_tx_bytes_l = pf->old_tx_bytes & ICE_40_BIT_MASK;
struct ice_hw_port_stats *ns = &pf->stats; /* new stats */
struct ice_hw_port_stats *os = &pf->stats_offset; /* old stats */
 
@@ -4229,6 +4251,13 @@ ice_read_stats_registers(struct ice_pf *pf, struct 
ice_hw *hw)
ice_stat_update_32(hw, PRTRPB_RDPC,
   pf->offset_loaded, &os->eth.rx_discards,
   &ns->eth.rx_discards);
+   /* enlarge the limitation when rx_bytes overflowed */
+   if (pf->offset_loaded) {
+   if (old_rx_bytes_l > ns->eth.rx_bytes)
+   old_rx_bytes_h += (uint64_t)1 << ICE_40_BIT_WIDTH;
+   ns->eth.rx_bytes += old_rx_bytes_h;
+   }
+   pf->old_rx_bytes = ns->eth.rx_bytes;
 
/* Workaround: CRC size should not be included in byte statistics,
 * so subtract RTE_ETHER_CRC_LEN from the byte counter for each rx
@@ -4259,6 +4288,13 @@ ice_read_stats_registers(struct ice_pf *pf, struct 
ice_hw *hw)
   GLPRT_BPTCL(hw->port_info->lport),
   pf->offset_loaded, &os->eth.tx_broadcast,
   &ns->eth.tx_broadcast);
+   /* enlarge the limitation when tx_bytes overflowed */
+   if (pf->offset_loaded) {
+   if (old_tx_bytes_l > ns->eth.tx_bytes)
+   old_tx_bytes_h += (uint64_t)1 << ICE_40_BIT_WIDTH;
+   ns->eth.tx_bytes += old_tx_bytes_h;
+   }
+   pf->old_tx_bytes = ns->eth.tx_bytes;
ns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast +
 ns->eth.tx_broadcast) * RTE_ETHER_CRC_LEN;
 
diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
index 2bff735ca..69fd35b47 100644
--- a/drivers/net/ice/ice_et

[dpdk-dev] [PATCH v3] net/ice: fix incorrect Rx/Tx bytes statistics

2020-07-21 Thread Junyu Jiang
This patch fixed the issue that rx/tx bytes overflowed
on 40 bit limitation by enlarging the limitation.

Fixes: a37bde56314d ("net/ice: support statistics")
Cc: sta...@dpdk.org

Signed-off-by: Junyu Jiang 
---
 drivers/net/ice/ice_ethdev.c | 28 
 drivers/net/ice/ice_ethdev.h |  7 +++
 2 files changed, 35 insertions(+)

diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index 3534d18ca..950c27094 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -4156,6 +4156,13 @@ ice_update_vsi_stats(struct ice_vsi *vsi)
ice_stat_update_40(hw, GLV_BPRCH(idx), GLV_BPRCL(idx),
   vsi->offset_loaded, &oes->rx_broadcast,
   &nes->rx_broadcast);
+   /* enlarge the limitation when rx_bytes overflowed */
+   if (vsi->offset_loaded) {
+   if (ICE_RXTX_BYTES_LOW(vsi->old_rx_bytes) > nes->rx_bytes)
+   nes->rx_bytes += (uint64_t)1 << ICE_40_BIT_WIDTH;
+   nes->rx_bytes += ICE_RXTX_BYTES_HIGH(vsi->old_rx_bytes);
+   }
+   vsi->old_rx_bytes = nes->rx_bytes;
/* exclude CRC bytes */
nes->rx_bytes -= (nes->rx_unicast + nes->rx_multicast +
  nes->rx_broadcast) * RTE_ETHER_CRC_LEN;
@@ -4182,6 +4189,13 @@ ice_update_vsi_stats(struct ice_vsi *vsi)
/* GLV_TDPC not supported */
ice_stat_update_32(hw, GLV_TEPC(idx), vsi->offset_loaded,
   &oes->tx_errors, &nes->tx_errors);
+   /* enlarge the limitation when tx_bytes overflowed */
+   if (vsi->offset_loaded) {
+   if (ICE_RXTX_BYTES_LOW(vsi->old_tx_bytes) > nes->tx_bytes)
+   nes->tx_bytes += (uint64_t)1 << ICE_40_BIT_WIDTH;
+   nes->tx_bytes += ICE_RXTX_BYTES_HIGH(vsi->old_tx_bytes);
+   }
+   vsi->old_tx_bytes = nes->tx_bytes;
vsi->offset_loaded = true;
 
PMD_DRV_LOG(DEBUG, "** VSI[%u] stats start **",
@@ -4229,6 +4243,13 @@ ice_read_stats_registers(struct ice_pf *pf, struct 
ice_hw *hw)
ice_stat_update_32(hw, PRTRPB_RDPC,
   pf->offset_loaded, &os->eth.rx_discards,
   &ns->eth.rx_discards);
+   /* enlarge the limitation when rx_bytes overflowed */
+   if (pf->offset_loaded) {
+   if (ICE_RXTX_BYTES_LOW(pf->old_rx_bytes) > ns->eth.rx_bytes)
+   ns->eth.rx_bytes += (uint64_t)1 << ICE_40_BIT_WIDTH;
+   ns->eth.rx_bytes += ICE_RXTX_BYTES_HIGH(pf->old_rx_bytes);
+   }
+   pf->old_rx_bytes = ns->eth.rx_bytes;
 
/* Workaround: CRC size should not be included in byte statistics,
 * so subtract RTE_ETHER_CRC_LEN from the byte counter for each rx
@@ -4259,6 +4280,13 @@ ice_read_stats_registers(struct ice_pf *pf, struct 
ice_hw *hw)
   GLPRT_BPTCL(hw->port_info->lport),
   pf->offset_loaded, &os->eth.tx_broadcast,
   &ns->eth.tx_broadcast);
+   /* enlarge the limitation when tx_bytes overflowed */
+   if (pf->offset_loaded) {
+   if (ICE_RXTX_BYTES_LOW(pf->old_tx_bytes) > ns->eth.tx_bytes)
+   ns->eth.tx_bytes += (uint64_t)1 << ICE_40_BIT_WIDTH;
+   ns->eth.tx_bytes += ICE_RXTX_BYTES_HIGH(pf->old_tx_bytes);
+   }
+   pf->old_tx_bytes = ns->eth.tx_bytes;
ns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast +
 ns->eth.tx_broadcast) * RTE_ETHER_CRC_LEN;
 
diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
index 2bff735ca..87984ef9e 100644
--- a/drivers/net/ice/ice_ethdev.h
+++ b/drivers/net/ice/ice_ethdev.h
@@ -133,6 +133,9 @@
 #define ICE_ETH_OVERHEAD \
(RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + ICE_VLAN_TAG_SIZE * 2)
 
+#define ICE_RXTX_BYTES_HIGH(bytes) ((bytes) & ~ICE_40_BIT_MASK)
+#define ICE_RXTX_BYTES_LOW(bytes) ((bytes) & ICE_40_BIT_MASK)
+
 /* DDP package type */
 enum ice_pkg_type {
ICE_PKG_TYPE_UNKNOWN,
@@ -248,6 +251,8 @@ struct ice_vsi {
struct ice_eth_stats eth_stats_offset;
struct ice_eth_stats eth_stats;
bool offset_loaded;
+   uint64_t old_rx_bytes;
+   uint64_t old_tx_bytes;
 };
 
 enum proto_xtr_type {
@@ -391,6 +396,8 @@ struct ice_pf {
struct ice_parser_list perm_parser_list;
struct ice_parser_list dist_parser_list;
bool init_link_up;
+   uint64_t old_rx_bytes;
+   uint64_t old_tx_bytes;
 };
 
 #define ICE_MAX_QUEUE_NUM  2048
-- 
2.17.1



[dpdk-dev] [PATCH] app/testpmd: fix flow flush with invalid port

2020-10-14 Thread Junyu Jiang
There is no error info displayed when running flow flush
command with invalid port. This patch fixed the issue.

Fixes: 2a449871a12d ("app/testpmd: align behaviour of multi-port detach")
Cc: sta...@dpdk.org

Signed-off-by: Junyu Jiang 
---
 app/test-pmd/config.c | 9 +
 1 file changed, 5 insertions(+), 4 deletions(-)

diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c
index fe31a9d52..cc4527386 100644
--- a/app/test-pmd/config.c
+++ b/app/test-pmd/config.c
@@ -1775,16 +1775,17 @@ port_flow_flush(portid_t port_id)
struct rte_port *port = &ports[port_id];
int ret = 0;
 
+   if (port_id_is_invalid(port_id, ENABLED_WARN) ||
+   port_id == (portid_t)RTE_PORT_ALL)
+   return -EINVAL;
+
if (port->flow_list == NULL)
return ret;
 
/* Poisoning to make sure PMDs update it in case of error. */
memset(&error, 0x44, sizeof(error));
if (rte_flow_flush(port_id, &error)) {
-   ret = port_flow_complain(&error);
-   if (port_id_is_invalid(port_id, DISABLED_WARN) ||
-   port_id == (portid_t)RTE_PORT_ALL)
-   return ret;
+   port_flow_complain(&error);
}
 
while (port->flow_list) {
-- 
2.17.1



[dpdk-dev] [PATCH] net/iavf: fix mismatch command

2020-09-01 Thread Junyu Jiang
The "command mismatch" warning shouldn't be triggered by
VIRTCHNL_OP_EVENT opcode, because the VIRTCHNL_OP_EVENT
opcode is used by PF notifies status change events to VF.
This patch fixed the issue.

Fixes: 837c2ed86e4c ("net/iavf: return error if opcode is mismatched")
Cc: sta...@dpdk.org

Signed-off-by: Junyu Jiang 
---
 drivers/net/iavf/iavf_vchnl.c | 7 +--
 1 file changed, 5 insertions(+), 2 deletions(-)

diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index 33acea54a..331018f14 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -53,8 +53,11 @@ iavf_read_msg_from_pf(struct iavf_adapter *adapter, uint16_t 
buf_len,
opcode, vf->cmd_retval);
 
if (opcode != vf->pend_cmd) {
-   PMD_DRV_LOG(WARNING, "command mismatch, expect %u, get %u",
-   vf->pend_cmd, opcode);
+   if (opcode != VIRTCHNL_OP_EVENT) {
+   PMD_DRV_LOG(WARNING,
+   "command mismatch, expect %u, get %u",
+   vf->pend_cmd, opcode);
+   }
return IAVF_ERR_OPCODE_MISMATCH;
}
 
-- 
2.17.1



[dpdk-dev] [PATCH v2 0/5] supports RxDID #22 and FDID

2020-09-07 Thread Junyu Jiang
This patchset supports flex Rx descriptor RxDID #22 and 
FDID offload in vPMD.Remove devargs "flow-mark-support".

v2:
-Reorder patches.

Guinan Sun (4):
  net/ice: add flow director enabled switch value
  net/ice: support flow mark in AVX path
  net/ice: support flow mark in SSE path
  net/ice: remove devargs flow-mark-support

Junyu Jiang (1):
  net/ice: support flex Rx descriptor RxDID #22

 doc/guides/nics/ice.rst   |  12 --
 drivers/net/ice/ice_ethdev.c  |  10 +-
 drivers/net/ice/ice_ethdev.h  |   3 +-
 drivers/net/ice/ice_fdir_filter.c |   9 +-
 drivers/net/ice/ice_rxtx.c|  16 +--
 drivers/net/ice/ice_rxtx.h|  72 
 drivers/net/ice/ice_rxtx_vec_avx2.c   | 162 +-
 drivers/net/ice/ice_rxtx_vec_common.h |   6 -
 drivers/net/ice/ice_rxtx_vec_sse.c| 138 +++---
 9 files changed, 371 insertions(+), 57 deletions(-)

-- 
2.17.1



[dpdk-dev] [PATCH v2 2/5] net/ice: add flow director enabled switch value

2020-09-07 Thread Junyu Jiang
From: Guinan Sun 

The commit adds fdir_enabled flag into ice_adapter structure
to identify if fdir id is active. Rx data path can be benefit if
fdir id parsing is not needed, especially in vector path.

Signed-off-by: Guinan Sun 
---
 drivers/net/ice/ice_ethdev.h  |  2 ++
 drivers/net/ice/ice_fdir_filter.c |  9 -
 drivers/net/ice/ice_rxtx.h| 30 ++
 3 files changed, 40 insertions(+), 1 deletion(-)

diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
index 393dfeab1..df0d65d8d 100644
--- a/drivers/net/ice/ice_ethdev.h
+++ b/drivers/net/ice/ice_ethdev.h
@@ -285,6 +285,7 @@ struct ice_fdir_filter_conf {
struct rte_flow_action_count act_count;
 
uint64_t input_set;
+   uint32_t mark_flag;
 };
 
 #define ICE_MAX_FDIR_FILTER_NUM(1024 * 16)
@@ -464,6 +465,7 @@ struct ice_adapter {
bool is_safe_mode;
struct ice_devargs devargs;
enum ice_pkg_type active_pkg_type; /* loaded ddp package type */
+   uint16_t fdir_ref_cnt;
 };
 
 struct ice_vsi_vlan_pvid_info {
diff --git a/drivers/net/ice/ice_fdir_filter.c 
b/drivers/net/ice/ice_fdir_filter.c
index 745d7291a..e496c4d0a 100644
--- a/drivers/net/ice/ice_fdir_filter.c
+++ b/drivers/net/ice/ice_fdir_filter.c
@@ -1329,6 +1329,9 @@ ice_fdir_create_filter(struct ice_adapter *ad,
goto free_counter;
}
 
+   if (filter->mark_flag == 1)
+   ice_fdir_rx_proc_enable(ad, 1);
+
rte_memcpy(entry, filter, sizeof(*entry));
ret = ice_fdir_entry_insert(pf, entry, &key);
if (ret) {
@@ -1401,6 +1404,10 @@ ice_fdir_destroy_filter(struct ice_adapter *ad,
}
 
ice_fdir_cnt_update(pf, filter->input.flow_type, is_tun, false);
+
+   if (filter->mark_flag == 1)
+   ice_fdir_rx_proc_enable(ad, 0);
+
flow->rule = NULL;
 
rte_free(filter);
@@ -1573,7 +1580,7 @@ ice_fdir_parse_action(struct ice_adapter *ad,
break;
case RTE_FLOW_ACTION_TYPE_MARK:
mark_num++;
-
+   filter->mark_flag = 1;
mark_spec = actions->conf;
filter->input.fltr_id = mark_spec->id;
filter->input.fdid_prio = ICE_FXD_FLTR_QW1_FDID_PRI_ONE;
diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h
index e21ba152d..69d6e0b8b 100644
--- a/drivers/net/ice/ice_rxtx.h
+++ b/drivers/net/ice/ice_rxtx.h
@@ -70,6 +70,7 @@ struct ice_rx_queue {
 
uint8_t port_id; /* device port ID */
uint8_t crc_len; /* 0 if CRC stripped, 4 otherwise */
+   uint8_t fdir_enabled; /* 0 if FDIR disabled, 1 when enabled */
uint16_t queue_id; /* RX queue index */
uint16_t reg_idx; /* RX queue register index */
uint8_t drop_en; /* if not 0, set register bit */
@@ -245,4 +246,33 @@ uint16_t ice_xmit_pkts_vec_avx2(void *tx_queue, struct 
rte_mbuf **tx_pkts,
 int ice_fdir_programming(struct ice_pf *pf, struct ice_fltr_desc *fdir_desc);
 int ice_tx_done_cleanup(void *txq, uint32_t free_cnt);
 
+#define FDIR_PROC_ENABLE_PER_QUEUE(ad, on) do { \
+   int i; \
+   for (i = 0; i < (ad)->eth_dev->data->nb_rx_queues; i++) { \
+   struct ice_rx_queue *rxq = (ad)->eth_dev->data->rx_queues[i]; \
+   if (!rxq) \
+   continue; \
+   rxq->fdir_enabled = on; \
+   } \
+   PMD_DRV_LOG(DEBUG, "FDIR processing on RX set to %d", on); \
+} while (0)
+
+/* Enable/disable flow director Rx processing in data path. */
+static inline
+void ice_fdir_rx_proc_enable(struct ice_adapter *ad, bool on)
+{
+   if (on) {
+   /* enable flow director processing */
+   FDIR_PROC_ENABLE_PER_QUEUE(ad, on);
+   ad->fdir_ref_cnt++;
+   } else {
+   if (ad->fdir_ref_cnt >= 1) {
+   ad->fdir_ref_cnt--;
+
+   if (ad->fdir_ref_cnt == 0)
+   FDIR_PROC_ENABLE_PER_QUEUE(ad, on);
+   }
+   }
+}
+
 #endif /* _ICE_RXTX_H_ */
-- 
2.17.1



[dpdk-dev] [PATCH v2 1/5] net/ice: support flex Rx descriptor RxDID #22

2020-09-07 Thread Junyu Jiang
This patch supports RxDID #22 by the following changes:
-add structure and macro definition for RxDID #22,
-support RxDID #22 format in normal path,
-change RSS hash parsing from RxDID #22 in AVX/SSE data path.

Signed-off-by: Junyu Jiang 
---
 drivers/net/ice/ice_rxtx.c  | 16 ++---
 drivers/net/ice/ice_rxtx.h  | 42 +
 drivers/net/ice/ice_rxtx_vec_avx2.c | 98 +++--
 drivers/net/ice/ice_rxtx_vec_sse.c  | 89 +-
 4 files changed, 218 insertions(+), 27 deletions(-)

diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c
index 2e1f06d2c..a31a976a1 100644
--- a/drivers/net/ice/ice_rxtx.c
+++ b/drivers/net/ice/ice_rxtx.c
@@ -50,7 +50,7 @@ static inline uint8_t
 ice_proto_xtr_type_to_rxdid(uint8_t xtr_type)
 {
static uint8_t rxdid_map[] = {
-   [PROTO_XTR_NONE]  = ICE_RXDID_COMMS_GENERIC,
+   [PROTO_XTR_NONE]  = ICE_RXDID_COMMS_OVS,
[PROTO_XTR_VLAN]  = ICE_RXDID_COMMS_AUX_VLAN,
[PROTO_XTR_IPV4]  = ICE_RXDID_COMMS_AUX_IPV4,
[PROTO_XTR_IPV6]  = ICE_RXDID_COMMS_AUX_IPV6,
@@ -59,7 +59,7 @@ ice_proto_xtr_type_to_rxdid(uint8_t xtr_type)
};
 
return xtr_type < RTE_DIM(rxdid_map) ?
-   rxdid_map[xtr_type] : ICE_RXDID_COMMS_GENERIC;
+   rxdid_map[xtr_type] : ICE_RXDID_COMMS_OVS;
 }
 
 static enum ice_status
@@ -72,7 +72,7 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
enum ice_status err;
uint16_t buf_size, len;
struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
-   uint32_t rxdid = ICE_RXDID_COMMS_GENERIC;
+   uint32_t rxdid = ICE_RXDID_COMMS_OVS;
uint32_t regval;
 
/* Set buffer size as the head split is disabled. */
@@ -1309,7 +1309,7 @@ ice_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union 
ice_rx_flex_desc *rxdp)
 
 static void
 ice_rxd_to_proto_xtr(struct rte_mbuf *mb,
-volatile struct ice_32b_rx_flex_desc_comms *desc)
+volatile struct ice_32b_rx_flex_desc_comms_ovs *desc)
 {
uint16_t stat_err = rte_le_to_cpu_16(desc->status_error1);
uint32_t metadata;
@@ -1338,8 +1338,9 @@ static inline void
 ice_rxd_to_pkt_fields(struct rte_mbuf *mb,
  volatile union ice_rx_flex_desc *rxdp)
 {
-   volatile struct ice_32b_rx_flex_desc_comms *desc =
-   (volatile struct ice_32b_rx_flex_desc_comms *)rxdp;
+   volatile struct ice_32b_rx_flex_desc_comms_ovs *desc =
+   (volatile struct ice_32b_rx_flex_desc_comms_ovs *)rxdp;
+#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
uint16_t stat_err;
 
stat_err = rte_le_to_cpu_16(desc->status_error0);
@@ -1347,13 +1348,14 @@ ice_rxd_to_pkt_fields(struct rte_mbuf *mb,
mb->ol_flags |= PKT_RX_RSS_HASH;
mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
}
+#endif
 
-#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
if (desc->flow_id != 0x) {
mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
}
 
+#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
if (unlikely(rte_net_ice_dynf_proto_xtr_metadata_avail()))
ice_rxd_to_proto_xtr(mb, desc);
 #endif
diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h
index 2fdcfb7d0..e21ba152d 100644
--- a/drivers/net/ice/ice_rxtx.h
+++ b/drivers/net/ice/ice_rxtx.h
@@ -38,6 +38,8 @@
 
 #define ICE_FDIR_PKT_LEN   512
 
+#define ICE_RXDID_COMMS_OVS22
+
 typedef void (*ice_rx_release_mbufs_t)(struct ice_rx_queue *rxq);
 typedef void (*ice_tx_release_mbufs_t)(struct ice_tx_queue *txq);
 
@@ -135,6 +137,46 @@ union ice_tx_offload {
};
 };
 
+/* Rx Flex Descriptor for Comms Package Profile
+ * RxDID Profile ID 22 (swap Hash and FlowID)
+ * Flex-field 0: Flow ID lower 16-bits
+ * Flex-field 1: Flow ID upper 16-bits
+ * Flex-field 2: RSS hash lower 16-bits
+ * Flex-field 3: RSS hash upper 16-bits
+ * Flex-field 4: AUX0
+ * Flex-field 5: AUX1
+ */
+struct ice_32b_rx_flex_desc_comms_ovs {
+   /* Qword 0 */
+   u8 rxdid;
+   u8 mir_id_umb_cast;
+   __le16 ptype_flexi_flags0;
+   __le16 pkt_len;
+   __le16 hdr_len_sph_flex_flags1;
+
+   /* Qword 1 */
+   __le16 status_error0;
+   __le16 l2tag1;
+   __le32 flow_id;
+
+   /* Qword 2 */
+   __le16 status_error1;
+   u8 flexi_flags2;
+   u8 ts_low;
+   __le16 l2tag2_1st;
+   __le16 l2tag2_2nd;
+
+   /* Qword 3 */
+   __le32 rss_hash;
+   union {
+   struct {
+   __le16 aux0;
+   __le16 aux1;
+   } flex;
+   __le32 ts_high;
+   } flex_ts;
+};
+
 int ice_rx_queue_setup(struct rte_eth_dev *dev,
   uint16_t qu

[dpdk-dev] [PATCH v2 3/5] net/ice: support flow mark in AVX path

2020-09-07 Thread Junyu Jiang
From: Guinan Sun 

Support Flow Director mark ID parsing from Flex
Rx descriptor in AVX path.

Signed-off-by: Guinan Sun 
---
 drivers/net/ice/ice_rxtx_vec_avx2.c | 64 -
 1 file changed, 63 insertions(+), 1 deletion(-)

diff --git a/drivers/net/ice/ice_rxtx_vec_avx2.c 
b/drivers/net/ice/ice_rxtx_vec_avx2.c
index 07d129e3f..70e4b76db 100644
--- a/drivers/net/ice/ice_rxtx_vec_avx2.c
+++ b/drivers/net/ice/ice_rxtx_vec_avx2.c
@@ -132,6 +132,25 @@ ice_rxq_rearm(struct ice_rx_queue *rxq)
ICE_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
 }
 
+static inline __m256i
+ice_flex_rxd_to_fdir_flags_vec_avx2(const __m256i fdir_id0_7)
+{
+#define FDID_MIS_MAGIC 0x
+   RTE_BUILD_BUG_ON(PKT_RX_FDIR != (1 << 2));
+   RTE_BUILD_BUG_ON(PKT_RX_FDIR_ID != (1 << 13));
+   const __m256i pkt_fdir_bit = _mm256_set1_epi32(PKT_RX_FDIR |
+   PKT_RX_FDIR_ID);
+   /* desc->flow_id field == 0x means fdir mismatch */
+   const __m256i fdir_mis_mask = _mm256_set1_epi32(FDID_MIS_MAGIC);
+   __m256i fdir_mask = _mm256_cmpeq_epi32(fdir_id0_7,
+   fdir_mis_mask);
+   /* this XOR op results to bit-reverse the fdir_mask */
+   fdir_mask = _mm256_xor_si256(fdir_mask, fdir_mis_mask);
+   const __m256i fdir_flags = _mm256_and_si256(fdir_mask, pkt_fdir_bit);
+
+   return fdir_flags;
+}
+
 static inline uint16_t
 _ice_recv_raw_pkts_vec_avx2(struct ice_rx_queue *rxq, struct rte_mbuf 
**rx_pkts,
uint16_t nb_pkts, uint8_t *split_packet)
@@ -459,9 +478,51 @@ _ice_recv_raw_pkts_vec_avx2(struct ice_rx_queue *rxq, 
struct rte_mbuf **rx_pkts,
rss_vlan_flag_bits);
 
/* merge flags */
-   const __m256i mbuf_flags = _mm256_or_si256(l3_l4_flags,
+   __m256i mbuf_flags = _mm256_or_si256(l3_l4_flags,
rss_vlan_flags);
 
+   if (rxq->fdir_enabled) {
+   const __m256i fdir_id4_7 =
+   _mm256_unpackhi_epi32(raw_desc6_7, raw_desc4_5);
+
+   const __m256i fdir_id0_3 =
+   _mm256_unpackhi_epi32(raw_desc2_3, raw_desc0_1);
+
+   const __m256i fdir_id0_7 =
+   _mm256_unpackhi_epi64(fdir_id4_7, fdir_id0_3);
+
+   const __m256i fdir_flags =
+   ice_flex_rxd_to_fdir_flags_vec_avx2(fdir_id0_7);
+
+   /* merge with fdir_flags */
+   mbuf_flags = _mm256_or_si256(mbuf_flags, fdir_flags);
+
+   /* write to mbuf: have to use scalar store here */
+   rx_pkts[i + 0]->hash.fdir.hi =
+   _mm256_extract_epi32(fdir_id0_7, 3);
+
+   rx_pkts[i + 1]->hash.fdir.hi =
+   _mm256_extract_epi32(fdir_id0_7, 7);
+
+   rx_pkts[i + 2]->hash.fdir.hi =
+   _mm256_extract_epi32(fdir_id0_7, 2);
+
+   rx_pkts[i + 3]->hash.fdir.hi =
+   _mm256_extract_epi32(fdir_id0_7, 6);
+
+   rx_pkts[i + 4]->hash.fdir.hi =
+   _mm256_extract_epi32(fdir_id0_7, 1);
+
+   rx_pkts[i + 5]->hash.fdir.hi =
+   _mm256_extract_epi32(fdir_id0_7, 5);
+
+   rx_pkts[i + 6]->hash.fdir.hi =
+   _mm256_extract_epi32(fdir_id0_7, 0);
+
+   rx_pkts[i + 7]->hash.fdir.hi =
+   _mm256_extract_epi32(fdir_id0_7, 4);
+   } /* if() on fdir_enabled */
+
 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
/**
 * needs to load 2nd 16B of each desc for RSS hash parsing,
@@ -551,6 +612,7 @@ _ice_recv_raw_pkts_vec_avx2(struct ice_rx_queue *rxq, 
struct rte_mbuf **rx_pkts,
mb0_1 = _mm256_or_si256(mb0_1, rss_hash0_1);
} /* if() on RSS hash parsing */
 #endif
+
/**
 * At this point, we have the 8 sets of flags in the low 16-bits
 * of each 32-bit value in vlan0.
-- 
2.17.1



[dpdk-dev] [PATCH v2 5/5] net/ice: remove devargs flow-mark-support

2020-09-07 Thread Junyu Jiang
From: Guinan Sun 

Remove devargs "flow-mark-support".

Signed-off-by: Guinan Sun 
---
 doc/guides/nics/ice.rst   | 12 
 drivers/net/ice/ice_ethdev.c  | 10 +-
 drivers/net/ice/ice_ethdev.h  |  1 -
 drivers/net/ice/ice_rxtx_vec_common.h |  6 --
 4 files changed, 1 insertion(+), 28 deletions(-)

diff --git a/doc/guides/nics/ice.rst b/doc/guides/nics/ice.rst
index 9a9f4a6bb..64b1b13a6 100644
--- a/doc/guides/nics/ice.rst
+++ b/doc/guides/nics/ice.rst
@@ -75,18 +75,6 @@ Runtime Config Options
 
 -w 80:00.0,pipeline-mode-support=1
 
-- ``Flow Mark Support`` (default ``0``)
-
-  This is a hint to the driver to select the data path that supports flow mark 
extraction
-  by default.
-  NOTE: This is an experimental devarg, it will be removed when any of below 
conditions
-  is ready.
-  1) all data paths support flow mark (currently vPMD does not)
-  2) a new offload like RTE_DEV_RX_OFFLOAD_FLOW_MARK be introduced as a 
standard way to hint.
-  Example::
-
--w 80:00.0,flow-mark-support=1
-
 - ``Protocol extraction for per queue``
 
   Configure the RX queues to do protocol extraction into mbuf for protocol
diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index 8d435e889..cb6882f70 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -23,13 +23,11 @@
 /* devargs */
 #define ICE_SAFE_MODE_SUPPORT_ARG "safe-mode-support"
 #define ICE_PIPELINE_MODE_SUPPORT_ARG  "pipeline-mode-support"
-#define ICE_FLOW_MARK_SUPPORT_ARG  "flow-mark-support"
 #define ICE_PROTO_XTR_ARG "proto_xtr"
 
 static const char * const ice_valid_args[] = {
ICE_SAFE_MODE_SUPPORT_ARG,
ICE_PIPELINE_MODE_SUPPORT_ARG,
-   ICE_FLOW_MARK_SUPPORT_ARG,
ICE_PROTO_XTR_ARG,
NULL
 };
@@ -1985,11 +1983,6 @@ static int ice_parse_devargs(struct rte_eth_dev *dev)
if (ret)
goto bail;
 
-   ret = rte_kvargs_process(kvlist, ICE_FLOW_MARK_SUPPORT_ARG,
-&parse_bool, &ad->devargs.flow_mark_support);
-   if (ret)
-   goto bail;
-
 bail:
rte_kvargs_free(kvlist);
return ret;
@@ -5131,8 +5124,7 @@ RTE_PMD_REGISTER_KMOD_DEP(net_ice, "* igb_uio | 
uio_pci_generic | vfio-pci");
 RTE_PMD_REGISTER_PARAM_STRING(net_ice,
  ICE_PROTO_XTR_ARG 
"=[queue:]"
  ICE_SAFE_MODE_SUPPORT_ARG "=<0|1>"
- ICE_PIPELINE_MODE_SUPPORT_ARG "=<0|1>"
- ICE_FLOW_MARK_SUPPORT_ARG "=<0|1>");
+ ICE_PIPELINE_MODE_SUPPORT_ARG "=<0|1>");
 
 RTE_LOG_REGISTER(ice_logtype_init, pmd.net.ice.init, NOTICE);
 RTE_LOG_REGISTER(ice_logtype_driver, pmd.net.ice.driver, NOTICE);
diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
index df0d65d8d..d441350e0 100644
--- a/drivers/net/ice/ice_ethdev.h
+++ b/drivers/net/ice/ice_ethdev.h
@@ -444,7 +444,6 @@ struct ice_devargs {
int safe_mode_support;
uint8_t proto_xtr_dflt;
int pipe_mode_support;
-   int flow_mark_support;
uint8_t proto_xtr[ICE_MAX_QUEUE_NUM];
 };
 
diff --git a/drivers/net/ice/ice_rxtx_vec_common.h 
b/drivers/net/ice/ice_rxtx_vec_common.h
index 46e3be98a..e2019c8d6 100644
--- a/drivers/net/ice/ice_rxtx_vec_common.h
+++ b/drivers/net/ice/ice_rxtx_vec_common.h
@@ -270,12 +270,6 @@ ice_rx_vec_dev_check_default(struct rte_eth_dev *dev)
 {
int i;
struct ice_rx_queue *rxq;
-   struct ice_adapter *ad =
-   ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
-
-   /* vPMD does not support flow mark. */
-   if (ad->devargs.flow_mark_support)
-   return -1;
 
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxq = dev->data->rx_queues[i];
-- 
2.17.1



[dpdk-dev] [PATCH v2 4/5] net/ice: support flow mark in SSE path

2020-09-07 Thread Junyu Jiang
From: Guinan Sun 

Support Flow Director mark ID parsing from Flex
Rx descriptor in SSE path.

Signed-off-by: Guinan Sun 
---
 drivers/net/ice/ice_rxtx_vec_sse.c | 49 ++
 1 file changed, 49 insertions(+)

diff --git a/drivers/net/ice/ice_rxtx_vec_sse.c 
b/drivers/net/ice/ice_rxtx_vec_sse.c
index fffb27138..965cd8b26 100644
--- a/drivers/net/ice/ice_rxtx_vec_sse.c
+++ b/drivers/net/ice/ice_rxtx_vec_sse.c
@@ -10,6 +10,25 @@
 #pragma GCC diagnostic ignored "-Wcast-qual"
 #endif
 
+static inline __m128i
+ice_flex_rxd_to_fdir_flags_vec(const __m128i fdir_id0_3)
+{
+#define FDID_MIS_MAGIC 0x
+   RTE_BUILD_BUG_ON(PKT_RX_FDIR != (1 << 2));
+   RTE_BUILD_BUG_ON(PKT_RX_FDIR_ID != (1 << 13));
+   const __m128i pkt_fdir_bit = _mm_set1_epi32(PKT_RX_FDIR |
+   PKT_RX_FDIR_ID);
+   /* desc->flow_id field == 0x means fdir mismatch */
+   const __m128i fdir_mis_mask = _mm_set1_epi32(FDID_MIS_MAGIC);
+   __m128i fdir_mask = _mm_cmpeq_epi32(fdir_id0_3,
+   fdir_mis_mask);
+   /* this XOR op results to bit-reverse the fdir_mask */
+   fdir_mask = _mm_xor_si128(fdir_mask, fdir_mis_mask);
+   const __m128i fdir_flags = _mm_and_si128(fdir_mask, pkt_fdir_bit);
+
+   return fdir_flags;
+}
+
 static inline void
 ice_rxq_rearm(struct ice_rx_queue *rxq)
 {
@@ -159,6 +178,36 @@ ice_rx_desc_to_olflags_v(struct ice_rx_queue *rxq, __m128i 
descs[4],
/* merge the flags */
flags = _mm_or_si128(flags, rss_vlan);
 
+   if (rxq->fdir_enabled) {
+   const __m128i fdir_id0_1 =
+   _mm_unpackhi_epi32(descs[0], descs[1]);
+
+   const __m128i fdir_id2_3 =
+   _mm_unpackhi_epi32(descs[2], descs[3]);
+
+   const __m128i fdir_id0_3 =
+   _mm_unpackhi_epi64(fdir_id0_1, fdir_id2_3);
+
+   const __m128i fdir_flags =
+   ice_flex_rxd_to_fdir_flags_vec(fdir_id0_3);
+
+   /* merge with fdir_flags */
+   flags = _mm_or_si128(flags, fdir_flags);
+
+   /* write fdir_id to mbuf */
+   rx_pkts[0]->hash.fdir.hi =
+   _mm_extract_epi32(fdir_id0_3, 0);
+
+   rx_pkts[1]->hash.fdir.hi =
+   _mm_extract_epi32(fdir_id0_3, 1);
+
+   rx_pkts[2]->hash.fdir.hi =
+   _mm_extract_epi32(fdir_id0_3, 2);
+
+   rx_pkts[3]->hash.fdir.hi =
+   _mm_extract_epi32(fdir_id0_3, 3);
+   } /* if() on fdir_enabled */
+
/**
 * At this point, we have the 4 sets of flags in the low 16-bits
 * of each 32-bit value in flags.
-- 
2.17.1



[dpdk-dev] [PATCH] net/i40e: fix incorrect byte counters

2020-09-09 Thread Junyu Jiang
This patch fixed the issue that rx/tx bytes overflowed
on 48 bit limitation by enlarging the limitation.

Fixes: 4861cde46116 ("i40e: new poll mode driver")
Cc: sta...@dpdk.org

Signed-off-by: Junyu Jiang 
---
 drivers/net/i40e/i40e_ethdev.c | 47 ++
 drivers/net/i40e/i40e_ethdev.h |  9 +++
 2 files changed, 56 insertions(+)

diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 11c02b188..e3d4b7f4f 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -3070,6 +3070,13 @@ i40e_update_vsi_stats(struct i40e_vsi *vsi)
i40e_stat_update_48(hw, I40E_GLV_BPRCH(idx), I40E_GLV_BPRCL(idx),
vsi->offset_loaded, &oes->rx_broadcast,
&nes->rx_broadcast);
+   /* enlarge the limitation when rx_bytes overflowed */
+   if (vsi->offset_loaded) {
+   if (I40E_RXTX_BYTES_LOW(vsi->old_rx_bytes) > nes->rx_bytes)
+   nes->rx_bytes += (uint64_t)1 << I40E_48_BIT_WIDTH;
+   nes->rx_bytes += I40E_RXTX_BYTES_HIGH(vsi->old_rx_bytes);
+   }
+   vsi->old_rx_bytes = nes->rx_bytes;
/* exclude CRC bytes */
nes->rx_bytes -= (nes->rx_unicast + nes->rx_multicast +
nes->rx_broadcast) * RTE_ETHER_CRC_LEN;
@@ -3096,6 +3103,13 @@ i40e_update_vsi_stats(struct i40e_vsi *vsi)
/* GLV_TDPC not supported */
i40e_stat_update_32(hw, I40E_GLV_TEPC(idx), vsi->offset_loaded,
&oes->tx_errors, &nes->tx_errors);
+   /* enlarge the limitation when tx_bytes overflowed */
+   if (vsi->offset_loaded) {
+   if (I40E_RXTX_BYTES_LOW(vsi->old_tx_bytes) > nes->tx_bytes)
+   nes->tx_bytes += (uint64_t)1 << I40E_48_BIT_WIDTH;
+   nes->tx_bytes += I40E_RXTX_BYTES_HIGH(vsi->old_tx_bytes);
+   }
+   vsi->old_rx_bytes = nes->rx_bytes;
vsi->offset_loaded = true;
 
PMD_DRV_LOG(DEBUG, "* VSI[%u] stats start 
***",
@@ -3168,6 +3182,24 @@ i40e_read_stats_registers(struct i40e_pf *pf, struct 
i40e_hw *hw)
pf->offset_loaded,
&pf->internal_stats_offset.tx_broadcast,
&pf->internal_stats.tx_broadcast);
+   /* enlarge the limitation when internal rx/tx bytes overflowed */
+   if (pf->offset_loaded) {
+   if (I40E_RXTX_BYTES_LOW(pf->internal_old_rx_bytes) >
+   pf->internal_stats.rx_bytes)
+   pf->internal_stats.rx_bytes +=
+   (uint64_t)1 << I40E_48_BIT_WIDTH;
+   pf->internal_stats.rx_bytes +=
+   I40E_RXTX_BYTES_HIGH(pf->internal_old_rx_bytes);
+
+   if (I40E_RXTX_BYTES_LOW(pf->internal_old_tx_bytes) >
+   pf->internal_stats.tx_bytes)
+   pf->internal_stats.tx_bytes +=
+   (uint64_t)1 << I40E_48_BIT_WIDTH;
+   pf->internal_stats.tx_bytes +=
+   I40E_RXTX_BYTES_HIGH(pf->internal_old_tx_bytes);
+   }
+   pf->internal_old_rx_bytes = pf->internal_stats.rx_bytes;
+   pf->internal_old_tx_bytes = pf->internal_stats.tx_bytes;
 
/* exclude CRC size */
pf->internal_stats.rx_bytes -= (pf->internal_stats.rx_unicast +
@@ -3191,6 +3223,14 @@ i40e_read_stats_registers(struct i40e_pf *pf, struct 
i40e_hw *hw)
I40E_GLPRT_BPRCL(hw->port),
pf->offset_loaded, &os->eth.rx_broadcast,
&ns->eth.rx_broadcast);
+   /* enlarge the limitation when rx_bytes overflowed */
+   if (pf->offset_loaded) {
+   if (I40E_RXTX_BYTES_LOW(pf->old_rx_bytes) > ns->eth.rx_bytes)
+   ns->eth.rx_bytes += (uint64_t)1 << I40E_48_BIT_WIDTH;
+   ns->eth.rx_bytes += I40E_RXTX_BYTES_HIGH(pf->old_rx_bytes);
+   }
+   pf->old_rx_bytes = ns->eth.rx_bytes;
+
/* Workaround: CRC size should not be included in byte statistics,
 * so subtract RTE_ETHER_CRC_LEN from the byte counter for each rx
 * packet.
@@ -3249,6 +3289,13 @@ i40e_read_stats_registers(struct i40e_pf *pf, struct 
i40e_hw *hw)
I40E_GLPRT_BPTCL(hw->port),
pf->offset_loaded, &os->eth.tx_broadcast,
&ns->eth.tx_broadcast);
+   /* enlarge the limitation when tx_bytes overflowed */
+   if (pf->offset_loaded) {
+   if (I40E_RXTX_BYTES_LOW(pf->old_tx_bytes) > ns->eth.tx_bytes)
+   

[dpdk-dev] [PATCH v2] net/i40e: fix incorrect byte counters

2020-09-15 Thread Junyu Jiang
This patch fixed the issue that rx/tx bytes overflowed
on 48 bit limitation by enlarging the limitation.

Fixes: 4861cde46116 ("i40e: new poll mode driver")
Cc: sta...@dpdk.org

Signed-off-by: Junyu Jiang 
---
 drivers/net/i40e/i40e_ethdev.c | 47 ++
 drivers/net/i40e/i40e_ethdev.h |  9 +++
 2 files changed, 56 insertions(+)

diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 563f21d9d..4d4ea9861 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -3073,6 +3073,13 @@ i40e_update_vsi_stats(struct i40e_vsi *vsi)
i40e_stat_update_48(hw, I40E_GLV_BPRCH(idx), I40E_GLV_BPRCL(idx),
vsi->offset_loaded, &oes->rx_broadcast,
&nes->rx_broadcast);
+   /* enlarge the limitation when rx_bytes overflowed */
+   if (vsi->offset_loaded) {
+   if (I40E_RXTX_BYTES_LOW(vsi->old_rx_bytes) > nes->rx_bytes)
+   nes->rx_bytes += (uint64_t)1 << I40E_48_BIT_WIDTH;
+   nes->rx_bytes += I40E_RXTX_BYTES_HIGH(vsi->old_rx_bytes);
+   }
+   vsi->old_rx_bytes = nes->rx_bytes;
/* exclude CRC bytes */
nes->rx_bytes -= (nes->rx_unicast + nes->rx_multicast +
nes->rx_broadcast) * RTE_ETHER_CRC_LEN;
@@ -3099,6 +3106,13 @@ i40e_update_vsi_stats(struct i40e_vsi *vsi)
/* GLV_TDPC not supported */
i40e_stat_update_32(hw, I40E_GLV_TEPC(idx), vsi->offset_loaded,
&oes->tx_errors, &nes->tx_errors);
+   /* enlarge the limitation when tx_bytes overflowed */
+   if (vsi->offset_loaded) {
+   if (I40E_RXTX_BYTES_LOW(vsi->old_tx_bytes) > nes->tx_bytes)
+   nes->tx_bytes += (uint64_t)1 << I40E_48_BIT_WIDTH;
+   nes->tx_bytes += I40E_RXTX_BYTES_HIGH(vsi->old_tx_bytes);
+   }
+   vsi->old_tx_bytes = nes->tx_bytes;
vsi->offset_loaded = true;
 
PMD_DRV_LOG(DEBUG, "* VSI[%u] stats start 
***",
@@ -3171,6 +3185,24 @@ i40e_read_stats_registers(struct i40e_pf *pf, struct 
i40e_hw *hw)
pf->offset_loaded,
&pf->internal_stats_offset.tx_broadcast,
&pf->internal_stats.tx_broadcast);
+   /* enlarge the limitation when internal rx/tx bytes overflowed */
+   if (pf->offset_loaded) {
+   if (I40E_RXTX_BYTES_LOW(pf->internal_old_rx_bytes) >
+   pf->internal_stats.rx_bytes)
+   pf->internal_stats.rx_bytes +=
+   (uint64_t)1 << I40E_48_BIT_WIDTH;
+   pf->internal_stats.rx_bytes +=
+   I40E_RXTX_BYTES_HIGH(pf->internal_old_rx_bytes);
+
+   if (I40E_RXTX_BYTES_LOW(pf->internal_old_tx_bytes) >
+   pf->internal_stats.tx_bytes)
+   pf->internal_stats.tx_bytes +=
+   (uint64_t)1 << I40E_48_BIT_WIDTH;
+   pf->internal_stats.tx_bytes +=
+   I40E_RXTX_BYTES_HIGH(pf->internal_old_tx_bytes);
+   }
+   pf->internal_old_rx_bytes = pf->internal_stats.rx_bytes;
+   pf->internal_old_tx_bytes = pf->internal_stats.tx_bytes;
 
/* exclude CRC size */
pf->internal_stats.rx_bytes -= (pf->internal_stats.rx_unicast +
@@ -3194,6 +3226,14 @@ i40e_read_stats_registers(struct i40e_pf *pf, struct 
i40e_hw *hw)
I40E_GLPRT_BPRCL(hw->port),
pf->offset_loaded, &os->eth.rx_broadcast,
&ns->eth.rx_broadcast);
+   /* enlarge the limitation when rx_bytes overflowed */
+   if (pf->offset_loaded) {
+   if (I40E_RXTX_BYTES_LOW(pf->old_rx_bytes) > ns->eth.rx_bytes)
+   ns->eth.rx_bytes += (uint64_t)1 << I40E_48_BIT_WIDTH;
+   ns->eth.rx_bytes += I40E_RXTX_BYTES_HIGH(pf->old_rx_bytes);
+   }
+   pf->old_rx_bytes = ns->eth.rx_bytes;
+
/* Workaround: CRC size should not be included in byte statistics,
 * so subtract RTE_ETHER_CRC_LEN from the byte counter for each rx
 * packet.
@@ -3252,6 +3292,13 @@ i40e_read_stats_registers(struct i40e_pf *pf, struct 
i40e_hw *hw)
I40E_GLPRT_BPTCL(hw->port),
pf->offset_loaded, &os->eth.tx_broadcast,
&ns->eth.tx_broadcast);
+   /* enlarge the limitation when tx_bytes overflowed */
+   if (pf->offset_loaded) {
+   if (I40E_RXTX_BYTES_LOW(pf->old_tx_bytes) > ns->eth.tx_bytes)
+   

[dpdk-dev] [PATCH v3 1/5] net/ice: support flex Rx descriptor RxDID #22

2020-09-15 Thread Junyu Jiang
This patch supports RxDID #22 by the following changes:
-add structure and macro definition for RxDID #22.
-support RxDID #22 format in normal path.
-change RSS hash parsing from RxDID #22 in AVX/SSE data path.

Signed-off-by: Junyu Jiang 
---
 drivers/net/ice/ice_ethdev.c| 20 ++
 drivers/net/ice/ice_ethdev.h|  4 ++
 drivers/net/ice/ice_rxtx.c  | 23 ---
 drivers/net/ice/ice_rxtx.h  | 42 +
 drivers/net/ice/ice_rxtx_vec_avx2.c | 98 +++--
 drivers/net/ice/ice_rxtx_vec_sse.c  | 89 +-
 6 files changed, 249 insertions(+), 27 deletions(-)

diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index c42581ea7..097b72023 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -2147,6 +2147,24 @@ ice_rss_ctx_init(struct ice_pf *pf)
ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_tcp);
 }
 
+static uint64_t
+ice_get_supported_rxdid(struct ice_hw *hw)
+{
+   uint64_t supported_rxdid = 0; /* bitmap for supported RXDID */
+   uint32_t regval;
+   int i;
+
+   supported_rxdid |= BIT(ICE_RXDID_LEGACY_1);
+
+   for (i = ICE_RXDID_FLEX_NIC; i < ICE_FLEX_DESC_RXDID_MAX_NUM; i++) {
+   regval = ICE_READ_REG(hw, GLFLXP_RXDID_FLAGS(i, 0));
+   if ((regval >> GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S)
+   & GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M)
+   supported_rxdid |= BIT(i);
+   }
+   return supported_rxdid;
+}
+
 static int
 ice_dev_init(struct rte_eth_dev *dev)
 {
@@ -2298,6 +2316,8 @@ ice_dev_init(struct rte_eth_dev *dev)
return ret;
}
 
+   pf->supported_rxdid = ice_get_supported_rxdid(hw);
+
return 0;
 
 err_pf_setup:
diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
index 243a023e6..e8c9971fb 100644
--- a/drivers/net/ice/ice_ethdev.h
+++ b/drivers/net/ice/ice_ethdev.h
@@ -136,6 +136,9 @@
 #define ICE_RXTX_BYTES_HIGH(bytes) ((bytes) & ~ICE_40_BIT_MASK)
 #define ICE_RXTX_BYTES_LOW(bytes) ((bytes) & ICE_40_BIT_MASK)
 
+/* Max number of flexible descriptor rxdid */
+#define ICE_FLEX_DESC_RXDID_MAX_NUM 64
+
 /* DDP package type */
 enum ice_pkg_type {
ICE_PKG_TYPE_UNKNOWN,
@@ -435,6 +438,7 @@ struct ice_pf {
bool init_link_up;
uint64_t old_rx_bytes;
uint64_t old_tx_bytes;
+   uint64_t supported_rxdid; /* bitmap for supported RXDID */
 };
 
 #define ICE_MAX_QUEUE_NUM  2048
diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c
index fecb13459..fef6ad454 100644
--- a/drivers/net/ice/ice_rxtx.c
+++ b/drivers/net/ice/ice_rxtx.c
@@ -63,7 +63,7 @@ static inline uint8_t
 ice_proto_xtr_type_to_rxdid(uint8_t xtr_type)
 {
static uint8_t rxdid_map[] = {
-   [PROTO_XTR_NONE]  = ICE_RXDID_COMMS_GENERIC,
+   [PROTO_XTR_NONE]  = ICE_RXDID_COMMS_OVS,
[PROTO_XTR_VLAN]  = ICE_RXDID_COMMS_AUX_VLAN,
[PROTO_XTR_IPV4]  = ICE_RXDID_COMMS_AUX_IPV4,
[PROTO_XTR_IPV6]  = ICE_RXDID_COMMS_AUX_IPV6,
@@ -73,7 +73,7 @@ ice_proto_xtr_type_to_rxdid(uint8_t xtr_type)
};
 
return xtr_type < RTE_DIM(rxdid_map) ?
-   rxdid_map[xtr_type] : ICE_RXDID_COMMS_GENERIC;
+   rxdid_map[xtr_type] : ICE_RXDID_COMMS_OVS;
 }
 
 static enum ice_status
@@ -81,12 +81,13 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
 {
struct ice_vsi *vsi = rxq->vsi;
struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
+   struct ice_pf *pf = ICE_VSI_TO_PF(vsi);
struct rte_eth_dev *dev = ICE_VSI_TO_ETH_DEV(rxq->vsi);
struct ice_rlan_ctx rx_ctx;
enum ice_status err;
uint16_t buf_size, len;
struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
-   uint32_t rxdid = ICE_RXDID_COMMS_GENERIC;
+   uint32_t rxdid = ICE_RXDID_COMMS_OVS;
uint32_t regval;
 
/* Set buffer size as the head split is disabled. */
@@ -151,6 +152,12 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
PMD_DRV_LOG(DEBUG, "Port (%u) - Rx queue (%u) is set with RXDID : %u",
rxq->port_id, rxq->queue_id, rxdid);
 
+   if (!(pf->supported_rxdid & BIT(rxdid))) {
+   PMD_DRV_LOG(ERR, "currently package doesn't support RXDID (%u)",
+   rxdid);
+   return -EINVAL;
+   }
+
/* Enable Flexible Descriptors in the queue context which
 * allows this driver to select a specific receive descriptor format
 */
@@ -1338,7 +1345,7 @@ ice_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union 
ice_rx_flex_desc *rxdp)
 
 static void
 ice_rxd_to_proto_xtr(struct rte_mbuf *mb,
-volatile struct ice_32b_rx_flex_desc_comms *desc)
+volatile struct ic

[dpdk-dev] [PATCH v3 2/5] net/ice: add flow director enabled switch value

2020-09-15 Thread Junyu Jiang
From: Guinan Sun 

The patch adds fdir_enabled flag to identify if parse flow director mark ID
from flexible Rx descriptor.

Signed-off-by: Guinan Sun 
---
 drivers/net/ice/ice_ethdev.h  |  2 ++
 drivers/net/ice/ice_fdir_filter.c |  9 -
 drivers/net/ice/ice_rxtx.h| 30 ++
 3 files changed, 40 insertions(+), 1 deletion(-)

diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
index e8c9971fb..366eee3b4 100644
--- a/drivers/net/ice/ice_ethdev.h
+++ b/drivers/net/ice/ice_ethdev.h
@@ -291,6 +291,7 @@ struct ice_fdir_filter_conf {
 
uint64_t input_set;
uint64_t outer_input_set; /* only for tunnel packets outer fields */
+   uint32_t mark_flag;
 };
 
 #define ICE_MAX_FDIR_FILTER_NUM(1024 * 16)
@@ -471,6 +472,7 @@ struct ice_adapter {
bool is_safe_mode;
struct ice_devargs devargs;
enum ice_pkg_type active_pkg_type; /* loaded ddp package type */
+   uint16_t fdir_ref_cnt;
 };
 
 struct ice_vsi_vlan_pvid_info {
diff --git a/drivers/net/ice/ice_fdir_filter.c 
b/drivers/net/ice/ice_fdir_filter.c
index e0ce1efb0..175abcdd5 100644
--- a/drivers/net/ice/ice_fdir_filter.c
+++ b/drivers/net/ice/ice_fdir_filter.c
@@ -1318,6 +1318,9 @@ ice_fdir_create_filter(struct ice_adapter *ad,
goto free_counter;
}
 
+   if (filter->mark_flag == 1)
+   ice_fdir_rx_parsing_enable(ad, 1);
+
rte_memcpy(entry, filter, sizeof(*entry));
ret = ice_fdir_entry_insert(pf, entry, &key);
if (ret) {
@@ -1390,6 +1393,10 @@ ice_fdir_destroy_filter(struct ice_adapter *ad,
}
 
ice_fdir_cnt_update(pf, filter->input.flow_type, is_tun, false);
+
+   if (filter->mark_flag == 1)
+   ice_fdir_rx_parsing_enable(ad, 0);
+
flow->rule = NULL;
 
rte_free(filter);
@@ -1562,7 +1569,7 @@ ice_fdir_parse_action(struct ice_adapter *ad,
break;
case RTE_FLOW_ACTION_TYPE_MARK:
mark_num++;
-
+   filter->mark_flag = 1;
mark_spec = actions->conf;
filter->input.fltr_id = mark_spec->id;
filter->input.fdid_prio = ICE_FXD_FLTR_QW1_FDID_PRI_ONE;
diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h
index e21ba152d..9fa57b3b2 100644
--- a/drivers/net/ice/ice_rxtx.h
+++ b/drivers/net/ice/ice_rxtx.h
@@ -70,6 +70,7 @@ struct ice_rx_queue {
 
uint8_t port_id; /* device port ID */
uint8_t crc_len; /* 0 if CRC stripped, 4 otherwise */
+   uint8_t fdir_enabled; /* 0 if FDIR disabled, 1 when enabled */
uint16_t queue_id; /* RX queue index */
uint16_t reg_idx; /* RX queue register index */
uint8_t drop_en; /* if not 0, set register bit */
@@ -245,4 +246,33 @@ uint16_t ice_xmit_pkts_vec_avx2(void *tx_queue, struct 
rte_mbuf **tx_pkts,
 int ice_fdir_programming(struct ice_pf *pf, struct ice_fltr_desc *fdir_desc);
 int ice_tx_done_cleanup(void *txq, uint32_t free_cnt);
 
+#define FDIR_PARSING_ENABLE_PER_QUEUE(ad, on) do { \
+   int i; \
+   for (i = 0; i < (ad)->eth_dev->data->nb_rx_queues; i++) { \
+   struct ice_rx_queue *rxq = (ad)->eth_dev->data->rx_queues[i]; \
+   if (!rxq) \
+   continue; \
+   rxq->fdir_enabled = on; \
+   } \
+   PMD_DRV_LOG(DEBUG, "FDIR processing on RX set to %d", on); \
+} while (0)
+
+/* Enable/disable flow director parsing from Rx descriptor in data path. */
+static inline
+void ice_fdir_rx_parsing_enable(struct ice_adapter *ad, bool on)
+{
+   if (on) {
+   /* Enable flow director parsing from Rx descriptor */
+   FDIR_PARSING_ENABLE_PER_QUEUE(ad, on);
+   ad->fdir_ref_cnt++;
+   } else {
+   if (ad->fdir_ref_cnt >= 1) {
+   ad->fdir_ref_cnt--;
+
+   if (ad->fdir_ref_cnt == 0)
+   FDIR_PARSING_ENABLE_PER_QUEUE(ad, on);
+   }
+   }
+}
+
 #endif /* _ICE_RXTX_H_ */
-- 
2.17.1



[dpdk-dev] [PATCH v3 0/5] supports RxDID #22 and FDID

2020-09-15 Thread Junyu Jiang
This patchset supports flex Rx descriptor RxDID #22 and
FDID offload in vPMD. Remove devargs "flow-mark-support".

---
v3:
* Check if package does support RXDID.
* Modify commit message.
* Rebase the patchset.
v2:
* Reorder patches.


Guinan Sun (4):
  net/ice: add flow director enabled switch value
  net/ice: support flow mark in AVX path
  net/ice: support flow mark in SSE path
  net/ice: remove devargs flow-mark-support

Junyu Jiang (1):
  net/ice: support flex Rx descriptor RxDID #22

 doc/guides/nics/ice.rst   |  12 --
 drivers/net/ice/ice_ethdev.c  |  30 +++--
 drivers/net/ice/ice_ethdev.h  |   7 +-
 drivers/net/ice/ice_fdir_filter.c |   9 +-
 drivers/net/ice/ice_rxtx.c|  23 ++--
 drivers/net/ice/ice_rxtx.h|  72 
 drivers/net/ice/ice_rxtx_vec_avx2.c   | 162 +-
 drivers/net/ice/ice_rxtx_vec_common.h |   6 -
 drivers/net/ice/ice_rxtx_vec_sse.c| 138 +++---
 9 files changed, 402 insertions(+), 57 deletions(-)

-- 
2.17.1



[dpdk-dev] [PATCH v3 5/5] net/ice: remove devargs flow-mark-support

2020-09-15 Thread Junyu Jiang
From: Guinan Sun 

Currently, all data paths already support flow mark, so remove devargs
"flow-mark-support". FDIR matched ID will display in verbose
when packets match the created rule.

Signed-off-by: Guinan Sun 
---
 doc/guides/nics/ice.rst   | 12 
 drivers/net/ice/ice_ethdev.c  | 10 +-
 drivers/net/ice/ice_ethdev.h  |  1 -
 drivers/net/ice/ice_rxtx_vec_common.h |  6 --
 4 files changed, 1 insertion(+), 28 deletions(-)

diff --git a/doc/guides/nics/ice.rst b/doc/guides/nics/ice.rst
index 314198857..25a821177 100644
--- a/doc/guides/nics/ice.rst
+++ b/doc/guides/nics/ice.rst
@@ -72,18 +72,6 @@ Runtime Config Options
 
 -w 80:00.0,pipeline-mode-support=1
 
-- ``Flow Mark Support`` (default ``0``)
-
-  This is a hint to the driver to select the data path that supports flow mark 
extraction
-  by default.
-  NOTE: This is an experimental devarg, it will be removed when any of below 
conditions
-  is ready.
-  1) all data paths support flow mark (currently vPMD does not)
-  2) a new offload like RTE_DEV_RX_OFFLOAD_FLOW_MARK be introduced as a 
standard way to hint.
-  Example::
-
--w 80:00.0,flow-mark-support=1
-
 - ``Protocol extraction for per queue``
 
   Configure the RX queues to do protocol extraction into mbuf for protocol
diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index 097b72023..248daf25d 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -23,13 +23,11 @@
 /* devargs */
 #define ICE_SAFE_MODE_SUPPORT_ARG "safe-mode-support"
 #define ICE_PIPELINE_MODE_SUPPORT_ARG  "pipeline-mode-support"
-#define ICE_FLOW_MARK_SUPPORT_ARG  "flow-mark-support"
 #define ICE_PROTO_XTR_ARG "proto_xtr"
 
 static const char * const ice_valid_args[] = {
ICE_SAFE_MODE_SUPPORT_ARG,
ICE_PIPELINE_MODE_SUPPORT_ARG,
-   ICE_FLOW_MARK_SUPPORT_ARG,
ICE_PROTO_XTR_ARG,
NULL
 };
@@ -2006,11 +2004,6 @@ static int ice_parse_devargs(struct rte_eth_dev *dev)
if (ret)
goto bail;
 
-   ret = rte_kvargs_process(kvlist, ICE_FLOW_MARK_SUPPORT_ARG,
-&parse_bool, &ad->devargs.flow_mark_support);
-   if (ret)
-   goto bail;
-
 bail:
rte_kvargs_free(kvlist);
return ret;
@@ -5178,8 +5171,7 @@ RTE_PMD_REGISTER_KMOD_DEP(net_ice, "* igb_uio | 
uio_pci_generic | vfio-pci");
 RTE_PMD_REGISTER_PARAM_STRING(net_ice,
  ICE_PROTO_XTR_ARG 
"=[queue:]"
  ICE_SAFE_MODE_SUPPORT_ARG "=<0|1>"
- ICE_PIPELINE_MODE_SUPPORT_ARG "=<0|1>"
- ICE_FLOW_MARK_SUPPORT_ARG "=<0|1>");
+ ICE_PIPELINE_MODE_SUPPORT_ARG "=<0|1>");
 
 RTE_LOG_REGISTER(ice_logtype_init, pmd.net.ice.init, NOTICE);
 RTE_LOG_REGISTER(ice_logtype_driver, pmd.net.ice.driver, NOTICE);
diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
index 366eee3b4..37b956e2f 100644
--- a/drivers/net/ice/ice_ethdev.h
+++ b/drivers/net/ice/ice_ethdev.h
@@ -451,7 +451,6 @@ struct ice_devargs {
int safe_mode_support;
uint8_t proto_xtr_dflt;
int pipe_mode_support;
-   int flow_mark_support;
uint8_t proto_xtr[ICE_MAX_QUEUE_NUM];
 };
 
diff --git a/drivers/net/ice/ice_rxtx_vec_common.h 
b/drivers/net/ice/ice_rxtx_vec_common.h
index 46e3be98a..e2019c8d6 100644
--- a/drivers/net/ice/ice_rxtx_vec_common.h
+++ b/drivers/net/ice/ice_rxtx_vec_common.h
@@ -270,12 +270,6 @@ ice_rx_vec_dev_check_default(struct rte_eth_dev *dev)
 {
int i;
struct ice_rx_queue *rxq;
-   struct ice_adapter *ad =
-   ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
-
-   /* vPMD does not support flow mark. */
-   if (ad->devargs.flow_mark_support)
-   return -1;
 
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxq = dev->data->rx_queues[i];
-- 
2.17.1



[dpdk-dev] [PATCH v3 3/5] net/ice: support flow mark in AVX path

2020-09-15 Thread Junyu Jiang
From: Guinan Sun 

Support flow director mark ID parsing from flexible
Rx descriptor in AVX path.

Signed-off-by: Guinan Sun 
---
 drivers/net/ice/ice_rxtx_vec_avx2.c | 64 -
 1 file changed, 63 insertions(+), 1 deletion(-)

diff --git a/drivers/net/ice/ice_rxtx_vec_avx2.c 
b/drivers/net/ice/ice_rxtx_vec_avx2.c
index 07d129e3f..70e4b76db 100644
--- a/drivers/net/ice/ice_rxtx_vec_avx2.c
+++ b/drivers/net/ice/ice_rxtx_vec_avx2.c
@@ -132,6 +132,25 @@ ice_rxq_rearm(struct ice_rx_queue *rxq)
ICE_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
 }
 
+static inline __m256i
+ice_flex_rxd_to_fdir_flags_vec_avx2(const __m256i fdir_id0_7)
+{
+#define FDID_MIS_MAGIC 0x
+   RTE_BUILD_BUG_ON(PKT_RX_FDIR != (1 << 2));
+   RTE_BUILD_BUG_ON(PKT_RX_FDIR_ID != (1 << 13));
+   const __m256i pkt_fdir_bit = _mm256_set1_epi32(PKT_RX_FDIR |
+   PKT_RX_FDIR_ID);
+   /* desc->flow_id field == 0x means fdir mismatch */
+   const __m256i fdir_mis_mask = _mm256_set1_epi32(FDID_MIS_MAGIC);
+   __m256i fdir_mask = _mm256_cmpeq_epi32(fdir_id0_7,
+   fdir_mis_mask);
+   /* this XOR op results to bit-reverse the fdir_mask */
+   fdir_mask = _mm256_xor_si256(fdir_mask, fdir_mis_mask);
+   const __m256i fdir_flags = _mm256_and_si256(fdir_mask, pkt_fdir_bit);
+
+   return fdir_flags;
+}
+
 static inline uint16_t
 _ice_recv_raw_pkts_vec_avx2(struct ice_rx_queue *rxq, struct rte_mbuf 
**rx_pkts,
uint16_t nb_pkts, uint8_t *split_packet)
@@ -459,9 +478,51 @@ _ice_recv_raw_pkts_vec_avx2(struct ice_rx_queue *rxq, 
struct rte_mbuf **rx_pkts,
rss_vlan_flag_bits);
 
/* merge flags */
-   const __m256i mbuf_flags = _mm256_or_si256(l3_l4_flags,
+   __m256i mbuf_flags = _mm256_or_si256(l3_l4_flags,
rss_vlan_flags);
 
+   if (rxq->fdir_enabled) {
+   const __m256i fdir_id4_7 =
+   _mm256_unpackhi_epi32(raw_desc6_7, raw_desc4_5);
+
+   const __m256i fdir_id0_3 =
+   _mm256_unpackhi_epi32(raw_desc2_3, raw_desc0_1);
+
+   const __m256i fdir_id0_7 =
+   _mm256_unpackhi_epi64(fdir_id4_7, fdir_id0_3);
+
+   const __m256i fdir_flags =
+   ice_flex_rxd_to_fdir_flags_vec_avx2(fdir_id0_7);
+
+   /* merge with fdir_flags */
+   mbuf_flags = _mm256_or_si256(mbuf_flags, fdir_flags);
+
+   /* write to mbuf: have to use scalar store here */
+   rx_pkts[i + 0]->hash.fdir.hi =
+   _mm256_extract_epi32(fdir_id0_7, 3);
+
+   rx_pkts[i + 1]->hash.fdir.hi =
+   _mm256_extract_epi32(fdir_id0_7, 7);
+
+   rx_pkts[i + 2]->hash.fdir.hi =
+   _mm256_extract_epi32(fdir_id0_7, 2);
+
+   rx_pkts[i + 3]->hash.fdir.hi =
+   _mm256_extract_epi32(fdir_id0_7, 6);
+
+   rx_pkts[i + 4]->hash.fdir.hi =
+   _mm256_extract_epi32(fdir_id0_7, 1);
+
+   rx_pkts[i + 5]->hash.fdir.hi =
+   _mm256_extract_epi32(fdir_id0_7, 5);
+
+   rx_pkts[i + 6]->hash.fdir.hi =
+   _mm256_extract_epi32(fdir_id0_7, 0);
+
+   rx_pkts[i + 7]->hash.fdir.hi =
+   _mm256_extract_epi32(fdir_id0_7, 4);
+   } /* if() on fdir_enabled */
+
 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
/**
 * needs to load 2nd 16B of each desc for RSS hash parsing,
@@ -551,6 +612,7 @@ _ice_recv_raw_pkts_vec_avx2(struct ice_rx_queue *rxq, 
struct rte_mbuf **rx_pkts,
mb0_1 = _mm256_or_si256(mb0_1, rss_hash0_1);
} /* if() on RSS hash parsing */
 #endif
+
/**
 * At this point, we have the 8 sets of flags in the low 16-bits
 * of each 32-bit value in vlan0.
-- 
2.17.1



[dpdk-dev] [PATCH v3 4/5] net/ice: support flow mark in SSE path

2020-09-15 Thread Junyu Jiang
From: Guinan Sun 

Support flow director mark ID parsing from flexible
Rx descriptor in SSE path.

Signed-off-by: Guinan Sun 
---
 drivers/net/ice/ice_rxtx_vec_sse.c | 49 ++
 1 file changed, 49 insertions(+)

diff --git a/drivers/net/ice/ice_rxtx_vec_sse.c 
b/drivers/net/ice/ice_rxtx_vec_sse.c
index fffb27138..965cd8b26 100644
--- a/drivers/net/ice/ice_rxtx_vec_sse.c
+++ b/drivers/net/ice/ice_rxtx_vec_sse.c
@@ -10,6 +10,25 @@
 #pragma GCC diagnostic ignored "-Wcast-qual"
 #endif
 
+static inline __m128i
+ice_flex_rxd_to_fdir_flags_vec(const __m128i fdir_id0_3)
+{
+#define FDID_MIS_MAGIC 0x
+   RTE_BUILD_BUG_ON(PKT_RX_FDIR != (1 << 2));
+   RTE_BUILD_BUG_ON(PKT_RX_FDIR_ID != (1 << 13));
+   const __m128i pkt_fdir_bit = _mm_set1_epi32(PKT_RX_FDIR |
+   PKT_RX_FDIR_ID);
+   /* desc->flow_id field == 0x means fdir mismatch */
+   const __m128i fdir_mis_mask = _mm_set1_epi32(FDID_MIS_MAGIC);
+   __m128i fdir_mask = _mm_cmpeq_epi32(fdir_id0_3,
+   fdir_mis_mask);
+   /* this XOR op results to bit-reverse the fdir_mask */
+   fdir_mask = _mm_xor_si128(fdir_mask, fdir_mis_mask);
+   const __m128i fdir_flags = _mm_and_si128(fdir_mask, pkt_fdir_bit);
+
+   return fdir_flags;
+}
+
 static inline void
 ice_rxq_rearm(struct ice_rx_queue *rxq)
 {
@@ -159,6 +178,36 @@ ice_rx_desc_to_olflags_v(struct ice_rx_queue *rxq, __m128i 
descs[4],
/* merge the flags */
flags = _mm_or_si128(flags, rss_vlan);
 
+   if (rxq->fdir_enabled) {
+   const __m128i fdir_id0_1 =
+   _mm_unpackhi_epi32(descs[0], descs[1]);
+
+   const __m128i fdir_id2_3 =
+   _mm_unpackhi_epi32(descs[2], descs[3]);
+
+   const __m128i fdir_id0_3 =
+   _mm_unpackhi_epi64(fdir_id0_1, fdir_id2_3);
+
+   const __m128i fdir_flags =
+   ice_flex_rxd_to_fdir_flags_vec(fdir_id0_3);
+
+   /* merge with fdir_flags */
+   flags = _mm_or_si128(flags, fdir_flags);
+
+   /* write fdir_id to mbuf */
+   rx_pkts[0]->hash.fdir.hi =
+   _mm_extract_epi32(fdir_id0_3, 0);
+
+   rx_pkts[1]->hash.fdir.hi =
+   _mm_extract_epi32(fdir_id0_3, 1);
+
+   rx_pkts[2]->hash.fdir.hi =
+   _mm_extract_epi32(fdir_id0_3, 2);
+
+   rx_pkts[3]->hash.fdir.hi =
+   _mm_extract_epi32(fdir_id0_3, 3);
+   } /* if() on fdir_enabled */
+
/**
 * At this point, we have the 4 sets of flags in the low 16-bits
 * of each 32-bit value in flags.
-- 
2.17.1



[dpdk-dev] [PATCH v3] net/i40e: fix incorrect byte counters

2020-09-21 Thread Junyu Jiang
This patch fixed the issue that rx/tx bytes statistics counters
overflowed on 48 bit limitation by enlarging the limitation.

Fixes: 4861cde46116 ("i40e: new poll mode driver")
Cc: sta...@dpdk.org

Signed-off-by: Junyu Jiang 
---
 doc/guides/nics/i40e.rst   |  7 +++
 drivers/net/i40e/i40e_ethdev.c | 32 
 drivers/net/i40e/i40e_ethdev.h |  9 +
 3 files changed, 48 insertions(+)

diff --git a/doc/guides/nics/i40e.rst b/doc/guides/nics/i40e.rst
index b7430f6c4..4baa58be6 100644
--- a/doc/guides/nics/i40e.rst
+++ b/doc/guides/nics/i40e.rst
@@ -830,3 +830,10 @@ Tx bytes affected by the link status change
 
 For firmware versions prior to 6.01 for X710 series and 3.33 for X722 series, 
the tx_bytes statistics data is affected by
 the link down event. Each time the link status changes to down, the tx_bytes 
decreases 110 bytes.
+
+RX/TX statistics may be incorrect when register overflowed
+~~
+
+The rx_bytes/tx_bytes statistics register is 48 bit length. Although this 
limitation is enlarged to 64 bit length
+on the software side, but there is no way to detect if the overflow occurred 
more than once. So rx_bytes/tx_bytes
+statistics data is correct when statistics are updated at least once between 
two overflows.
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 563f21d9d..212338ef0 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -3052,6 +3052,19 @@ i40e_dev_link_update(struct rte_eth_dev *dev,
return ret;
 }
 
+static void
+i40e_stat_update_48_in_64(uint64_t *new_bytes,
+ uint64_t *prev_bytes,
+ bool offset_loaded)
+{
+   if (offset_loaded) {
+   if (I40E_RXTX_BYTES_L_48_BIT(*prev_bytes) > *new_bytes)
+   *new_bytes += (uint64_t)1 << I40E_48_BIT_WIDTH;
+   *new_bytes += I40E_RXTX_BYTES_H_16_BIT(*prev_bytes);
+   }
+   *prev_bytes = *new_bytes;
+}
+
 /* Get all the statistics of a VSI */
 void
 i40e_update_vsi_stats(struct i40e_vsi *vsi)
@@ -3073,6 +3086,9 @@ i40e_update_vsi_stats(struct i40e_vsi *vsi)
i40e_stat_update_48(hw, I40E_GLV_BPRCH(idx), I40E_GLV_BPRCL(idx),
vsi->offset_loaded, &oes->rx_broadcast,
&nes->rx_broadcast);
+   /* enlarge the limitation when rx_bytes overflowed */
+   i40e_stat_update_48_in_64(&nes->rx_bytes, &vsi->prev_rx_bytes,
+ vsi->offset_loaded);
/* exclude CRC bytes */
nes->rx_bytes -= (nes->rx_unicast + nes->rx_multicast +
nes->rx_broadcast) * RTE_ETHER_CRC_LEN;
@@ -3099,6 +3115,9 @@ i40e_update_vsi_stats(struct i40e_vsi *vsi)
/* GLV_TDPC not supported */
i40e_stat_update_32(hw, I40E_GLV_TEPC(idx), vsi->offset_loaded,
&oes->tx_errors, &nes->tx_errors);
+   /* enlarge the limitation when tx_bytes overflowed */
+   i40e_stat_update_48_in_64(&nes->tx_bytes, &vsi->prev_tx_bytes,
+ vsi->offset_loaded);
vsi->offset_loaded = true;
 
PMD_DRV_LOG(DEBUG, "* VSI[%u] stats start 
***",
@@ -3171,6 +3190,13 @@ i40e_read_stats_registers(struct i40e_pf *pf, struct 
i40e_hw *hw)
pf->offset_loaded,
&pf->internal_stats_offset.tx_broadcast,
&pf->internal_stats.tx_broadcast);
+   /* enlarge the limitation when internal rx/tx bytes overflowed */
+   i40e_stat_update_48_in_64(&pf->internal_stats.rx_bytes,
+ &pf->internal_prev_rx_bytes,
+ pf->offset_loaded);
+   i40e_stat_update_48_in_64(&pf->internal_stats.tx_bytes,
+ &pf->internal_prev_tx_bytes,
+ pf->offset_loaded);
 
/* exclude CRC size */
pf->internal_stats.rx_bytes -= (pf->internal_stats.rx_unicast +
@@ -3194,6 +3220,9 @@ i40e_read_stats_registers(struct i40e_pf *pf, struct 
i40e_hw *hw)
I40E_GLPRT_BPRCL(hw->port),
pf->offset_loaded, &os->eth.rx_broadcast,
&ns->eth.rx_broadcast);
+   /* enlarge the limitation when rx_bytes overflowed */
+   i40e_stat_update_48_in_64(&ns->eth.rx_bytes, &pf->prev_rx_bytes,
+ pf->offset_loaded);
/* Workaround: CRC size should not be included in byte statistics,
 * so subtract RTE_ETHER_CRC_LEN from the byte counter for each rx
 * packet.
@@ -3252,6 +3281,9 @@ i40e_read_stats_registers(struct i40

[dpdk-dev] [PATCH v4] net/i40e: fix incorrect byte counters

2020-09-22 Thread Junyu Jiang
This patch fixed the issue that rx/tx bytes statistics counters
overflowed on 48 bit limitation by enlarging the limitation.

Fixes: 4861cde46116 ("i40e: new poll mode driver")
Cc: sta...@dpdk.org

Signed-off-by: Junyu Jiang 
---
v4: put reading stats and extending in same function.
v3: create a function to hide the extension inside it.
v2: modify the error code
---
---
 doc/guides/nics/i40e.rst   |  7 
 drivers/net/i40e/i40e_ethdev.c | 66 +-
 drivers/net/i40e/i40e_ethdev.h |  9 +
 3 files changed, 57 insertions(+), 25 deletions(-)

diff --git a/doc/guides/nics/i40e.rst b/doc/guides/nics/i40e.rst
index b7430f6c4..4baa58be6 100644
--- a/doc/guides/nics/i40e.rst
+++ b/doc/guides/nics/i40e.rst
@@ -830,3 +830,10 @@ Tx bytes affected by the link status change
 
 For firmware versions prior to 6.01 for X710 series and 3.33 for X722 series, 
the tx_bytes statistics data is affected by
 the link down event. Each time the link status changes to down, the tx_bytes 
decreases 110 bytes.
+
+RX/TX statistics may be incorrect when register overflowed
+~~
+
+The rx_bytes/tx_bytes statistics register is 48 bit length. Although this 
limitation is enlarged to 64 bit length
+on the software side, but there is no way to detect if the overflow occurred 
more than once. So rx_bytes/tx_bytes
+statistics data is correct when statistics are updated at least once between 
two overflows.
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 563f21d9d..6439baf2f 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -3052,6 +3052,21 @@ i40e_dev_link_update(struct rte_eth_dev *dev,
return ret;
 }
 
+static void
+i40e_stat_update_48_in_64(struct i40e_hw *hw, uint32_t hireg,
+ uint32_t loreg, bool offset_loaded, uint64_t *offset,
+ uint64_t *stat, uint64_t *prev_stat)
+{
+   i40e_stat_update_48(hw, hireg, loreg, offset_loaded, offset, stat);
+   /* enlarge the limitation when statistics counters overflowed */
+   if (offset_loaded) {
+   if (I40E_RXTX_BYTES_L_48_BIT(*prev_stat) > *stat)
+   *stat += (uint64_t)1 << I40E_48_BIT_WIDTH;
+   *stat += I40E_RXTX_BYTES_H_16_BIT(*prev_stat);
+   }
+   *prev_stat = *stat;
+}
+
 /* Get all the statistics of a VSI */
 void
 i40e_update_vsi_stats(struct i40e_vsi *vsi)
@@ -3061,9 +3076,9 @@ i40e_update_vsi_stats(struct i40e_vsi *vsi)
struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
int idx = rte_le_to_cpu_16(vsi->info.stat_counter_idx);
 
-   i40e_stat_update_48(hw, I40E_GLV_GORCH(idx), I40E_GLV_GORCL(idx),
-   vsi->offset_loaded, &oes->rx_bytes,
-   &nes->rx_bytes);
+   i40e_stat_update_48_in_64(hw, I40E_GLV_GORCH(idx), I40E_GLV_GORCL(idx),
+ vsi->offset_loaded, &oes->rx_bytes,
+ &nes->rx_bytes, &vsi->prev_rx_bytes);
i40e_stat_update_48(hw, I40E_GLV_UPRCH(idx), I40E_GLV_UPRCL(idx),
vsi->offset_loaded, &oes->rx_unicast,
&nes->rx_unicast);
@@ -3084,9 +3099,9 @@ i40e_update_vsi_stats(struct i40e_vsi *vsi)
i40e_stat_update_32(hw, I40E_GLV_RUPP(idx), vsi->offset_loaded,
&oes->rx_unknown_protocol,
&nes->rx_unknown_protocol);
-   i40e_stat_update_48(hw, I40E_GLV_GOTCH(idx), I40E_GLV_GOTCL(idx),
-   vsi->offset_loaded, &oes->tx_bytes,
-   &nes->tx_bytes);
+   i40e_stat_update_48_in_64(hw, I40E_GLV_GOTCH(idx), I40E_GLV_GOTCL(idx),
+ vsi->offset_loaded, &oes->tx_bytes,
+ &nes->tx_bytes, &vsi->prev_tx_bytes);
i40e_stat_update_48(hw, I40E_GLV_UPTCH(idx), I40E_GLV_UPTCL(idx),
vsi->offset_loaded, &oes->tx_unicast,
&nes->tx_unicast);
@@ -3128,17 +3143,18 @@ i40e_read_stats_registers(struct i40e_pf *pf, struct 
i40e_hw *hw)
struct i40e_hw_port_stats *os = &pf->stats_offset; /* old stats */
 
/* Get rx/tx bytes of internal transfer packets */
-   i40e_stat_update_48(hw, I40E_GLV_GORCH(hw->port),
-   I40E_GLV_GORCL(hw->port),
-   pf->offset_loaded,
-   &pf->internal_stats_offset.rx_bytes,
-   &pf->internal_stats.rx_bytes);
-
-   i40e_stat_update_48(hw, I40E_GLV_GOTCH(hw->port),
-   I40E_GLV_GOTCL(hw->port),
-   pf->offset_loaded,
-   &pf->interna

[dpdk-dev] [PATCH v5] net/i40e: fix incorrect byte counters

2020-09-22 Thread Junyu Jiang
This patch fixed the issue that rx/tx bytes statistics counters
overflowed on 48 bit limitation by enlarging the limitation.

Fixes: 4861cde46116 ("i40e: new poll mode driver")
Cc: sta...@dpdk.org

Signed-off-by: Junyu Jiang 
Reviewed-by: Ferruh Yigit 
---
V5: move this known issue next to related to the stats
v4: put reading stats and extending in same function.
v3: create a function to hide the extension inside it.
v2: modify the error code
---
---
 doc/guides/nics/i40e.rst   |  9 +
 drivers/net/i40e/i40e_ethdev.c | 66 +-
 drivers/net/i40e/i40e_ethdev.h |  9 +
 3 files changed, 59 insertions(+), 25 deletions(-)

diff --git a/doc/guides/nics/i40e.rst b/doc/guides/nics/i40e.rst
index b7430f6c4..a0b81e669 100644
--- a/doc/guides/nics/i40e.rst
+++ b/doc/guides/nics/i40e.rst
@@ -670,6 +670,15 @@ When a packet is over maximum frame size, the packet is 
dropped.
 However, the Rx statistics, when calling `rte_eth_stats_get` incorrectly
 shows it as received.
 
+RX/TX statistics may be incorrect when register overflowed
+~~
+
+The rx_bytes/tx_bytes statistics register is 48 bit length.
+Although this limitation is enlarged to 64 bit length on the software side,
+but there is no way to detect if the overflow occurred more than once.
+So rx_bytes/tx_bytes statistics data is correct when statistics are
+updated at least once between two overflows.
+
 VF & TC max bandwidth setting
 ~
 
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 563f21d9d..6439baf2f 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -3052,6 +3052,21 @@ i40e_dev_link_update(struct rte_eth_dev *dev,
return ret;
 }
 
+static void
+i40e_stat_update_48_in_64(struct i40e_hw *hw, uint32_t hireg,
+ uint32_t loreg, bool offset_loaded, uint64_t *offset,
+ uint64_t *stat, uint64_t *prev_stat)
+{
+   i40e_stat_update_48(hw, hireg, loreg, offset_loaded, offset, stat);
+   /* enlarge the limitation when statistics counters overflowed */
+   if (offset_loaded) {
+   if (I40E_RXTX_BYTES_L_48_BIT(*prev_stat) > *stat)
+   *stat += (uint64_t)1 << I40E_48_BIT_WIDTH;
+   *stat += I40E_RXTX_BYTES_H_16_BIT(*prev_stat);
+   }
+   *prev_stat = *stat;
+}
+
 /* Get all the statistics of a VSI */
 void
 i40e_update_vsi_stats(struct i40e_vsi *vsi)
@@ -3061,9 +3076,9 @@ i40e_update_vsi_stats(struct i40e_vsi *vsi)
struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
int idx = rte_le_to_cpu_16(vsi->info.stat_counter_idx);
 
-   i40e_stat_update_48(hw, I40E_GLV_GORCH(idx), I40E_GLV_GORCL(idx),
-   vsi->offset_loaded, &oes->rx_bytes,
-   &nes->rx_bytes);
+   i40e_stat_update_48_in_64(hw, I40E_GLV_GORCH(idx), I40E_GLV_GORCL(idx),
+ vsi->offset_loaded, &oes->rx_bytes,
+ &nes->rx_bytes, &vsi->prev_rx_bytes);
i40e_stat_update_48(hw, I40E_GLV_UPRCH(idx), I40E_GLV_UPRCL(idx),
vsi->offset_loaded, &oes->rx_unicast,
&nes->rx_unicast);
@@ -3084,9 +3099,9 @@ i40e_update_vsi_stats(struct i40e_vsi *vsi)
i40e_stat_update_32(hw, I40E_GLV_RUPP(idx), vsi->offset_loaded,
&oes->rx_unknown_protocol,
&nes->rx_unknown_protocol);
-   i40e_stat_update_48(hw, I40E_GLV_GOTCH(idx), I40E_GLV_GOTCL(idx),
-   vsi->offset_loaded, &oes->tx_bytes,
-   &nes->tx_bytes);
+   i40e_stat_update_48_in_64(hw, I40E_GLV_GOTCH(idx), I40E_GLV_GOTCL(idx),
+ vsi->offset_loaded, &oes->tx_bytes,
+ &nes->tx_bytes, &vsi->prev_tx_bytes);
i40e_stat_update_48(hw, I40E_GLV_UPTCH(idx), I40E_GLV_UPTCL(idx),
vsi->offset_loaded, &oes->tx_unicast,
&nes->tx_unicast);
@@ -3128,17 +3143,18 @@ i40e_read_stats_registers(struct i40e_pf *pf, struct 
i40e_hw *hw)
struct i40e_hw_port_stats *os = &pf->stats_offset; /* old stats */
 
/* Get rx/tx bytes of internal transfer packets */
-   i40e_stat_update_48(hw, I40E_GLV_GORCH(hw->port),
-   I40E_GLV_GORCL(hw->port),
-   pf->offset_loaded,
-   &pf->internal_stats_offset.rx_bytes,
-   &pf->internal_stats.rx_bytes);
-
-   i40e_stat_update_48(hw, I40E_GLV_GOTCH(hw->port),
-   I40E_GLV_GOTCL(hw->port),
-   p