[PATCH] net/iavf: revert fix VLAN insertion

2022-10-18 Thread Yiding Zhou
The patch to be reverted forces to select normal Tx path when kernel driver
tells that L2TAG2 is required, it results in a lot of performance loss.

We should support Tx context descriptor on vector path to handle the L2TAG2
case.

This commit reverts
commit 0d58caa7d6d1 ("net/iavf: fix VLAN insertion")

Signed-off-by: Yiding Zhou 
---
 drivers/net/iavf/iavf_rxtx_vec_common.h | 3 ---
 1 file changed, 3 deletions(-)

diff --git a/drivers/net/iavf/iavf_rxtx_vec_common.h 
b/drivers/net/iavf/iavf_rxtx_vec_common.h
index 4ab22c6b2b..a59cb2ceee 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_common.h
+++ b/drivers/net/iavf/iavf_rxtx_vec_common.h
@@ -253,9 +253,6 @@ iavf_tx_vec_queue_default(struct iavf_tx_queue *txq)
if (txq->offloads & IAVF_TX_NO_VECTOR_FLAGS)
return -1;
 
-   if (txq->vlan_flag == IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2)
-   return -1;
-
if (txq->offloads & IAVF_TX_VECTOR_OFFLOAD)
return IAVF_VECTOR_OFFLOAD_PATH;
 
-- 
2.34.1



[PATCH v2] net/iavf: revert fix VLAN insertion

2022-10-19 Thread Yiding Zhou
When the kernel driver tells to use the L2TAG2 field for VLAN insertion,
the context descriptor needs to be used. There is an issue on the vector Tx
path, because it does not support the context descriptor.

The previous commit forces to select normal path to avoid the above issue,
but it results in a performance loss of around 40%. So it needs to be
reverted and the original issue needed to be fixed by rework.

To reverts
commit 0d58caa7d6d1 ("net/iavf: fix VLAN insertion")

Fixes: 0d58caa7d6d1 ("net/iavf: fix VLAN insertion")

Signed-off-by: Yiding Zhou 
---
 drivers/net/iavf/iavf_rxtx_vec_common.h | 3 ---
 1 file changed, 3 deletions(-)

diff --git a/drivers/net/iavf/iavf_rxtx_vec_common.h 
b/drivers/net/iavf/iavf_rxtx_vec_common.h
index 4ab22c6b2b..a59cb2ceee 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_common.h
+++ b/drivers/net/iavf/iavf_rxtx_vec_common.h
@@ -253,9 +253,6 @@ iavf_tx_vec_queue_default(struct iavf_tx_queue *txq)
if (txq->offloads & IAVF_TX_NO_VECTOR_FLAGS)
return -1;
 
-   if (txq->vlan_flag == IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2)
-   return -1;
-
if (txq->offloads & IAVF_TX_VECTOR_OFFLOAD)
return IAVF_VECTOR_OFFLOAD_PATH;
 
-- 
2.34.1



[PATCH v5] net/iavf: add thread for event callbacks

2022-10-19 Thread Yiding Zhou
All callbacks registered for ethdev events are called in eal-intr-thread,
and some of them execute virtchnl commands. Because interrupts are disabled
in the intr thread, there will be no response received for these commands.
So all callbacks should be called in a new context.

When the device is bonded, the bond pmd registers callback for LSC event to
execute virtchnl commands to reinitialize the device, it would also raise
the above issue.

This commit add a new thread to call all event callbacks.

Fixes: 48de41ca11f0 ("net/avf: enable link status update")
Fixes: 84108425054a ("net/iavf: support asynchronous virtual channel message")
Cc: sta...@dpdk.org

Signed-off-by: Yiding Zhou 
---
v5: reword the title and log
v4: add 'reset' and 'ipsec' handling
v3: fix CI errors
---
 drivers/net/iavf/iavf.h|   2 +
 drivers/net/iavf/iavf_ethdev.c |   5 ++
 drivers/net/iavf/iavf_vchnl.c  | 153 +++--
 3 files changed, 154 insertions(+), 6 deletions(-)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 26b858f6f0..1edebab8dc 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -424,6 +424,8 @@ _atomic_set_async_response_cmd(struct iavf_info *vf, enum 
virtchnl_ops ops)
 }
 int iavf_check_api_version(struct iavf_adapter *adapter);
 int iavf_get_vf_resource(struct iavf_adapter *adapter);
+void iavf_dev_event_handler_fini(void);
+int iavf_dev_event_handler_init(void);
 void iavf_handle_virtchnl_msg(struct rte_eth_dev *dev);
 int iavf_enable_vlan_strip(struct iavf_adapter *adapter);
 int iavf_disable_vlan_strip(struct iavf_adapter *adapter);
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index 782be82c7f..633d684804 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -2631,6 +2631,9 @@ iavf_dev_init(struct rte_eth_dev *eth_dev)
rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr,
ð_dev->data->mac_addrs[0]);
 
+   if (iavf_dev_event_handler_init())
+   goto init_vf_err;
+
if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) {
/* register callback func to eal lib */
rte_intr_callback_register(pci_dev->intr_handle,
@@ -2785,6 +2788,8 @@ iavf_dev_uninit(struct rte_eth_dev *dev)
 
iavf_dev_close(dev);
 
+   iavf_dev_event_handler_fini();
+
return 0;
 }
 
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index 4327c5a786..02ec1ae008 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -2,6 +2,7 @@
  * Copyright(c) 2017 Intel Corporation
  */
 
+#include 
 #include 
 #include 
 #include 
@@ -11,6 +12,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #include 
 #include 
@@ -27,6 +29,146 @@
 #define MAX_TRY_TIMES 2000
 #define ASQ_DELAY_MS  1
 
+#define MAX_EVENT_PENDING 16
+
+struct iavf_event_element {
+   TAILQ_ENTRY(iavf_event_element) next;
+   struct rte_eth_dev *dev;
+   enum rte_eth_event_type event;
+   void *param;
+   size_t param_alloc_size;
+   uint8_t param_alloc_data[0];
+};
+
+struct iavf_event_handler {
+   uint32_t ndev;
+   pthread_t tid;
+   int fd[2];
+   pthread_mutex_t lock;
+   TAILQ_HEAD(event_lsit, iavf_event_element) pending;
+};
+
+static struct iavf_event_handler event_handler = {
+   .fd = {-1, -1},
+};
+
+#ifndef TAILQ_FOREACH_SAFE
+#define TAILQ_FOREACH_SAFE(var, head, field, tvar) \
+   for ((var) = TAILQ_FIRST((head)); \
+   (var) && ((tvar) = TAILQ_NEXT((var), field), 1); \
+   (var) = (tvar))
+#endif
+
+static void *
+iavf_dev_event_handle(void *param __rte_unused)
+{
+   struct iavf_event_handler *handler = &event_handler;
+   TAILQ_HEAD(event_list, iavf_event_element) pending;
+
+   while (true) {
+   char unused[MAX_EVENT_PENDING];
+   ssize_t nr = read(handler->fd[0], &unused, sizeof(unused));
+   if (nr <= 0)
+   break;
+
+   TAILQ_INIT(&pending);
+   pthread_mutex_lock(&handler->lock);
+   TAILQ_CONCAT(&pending, &handler->pending, next);
+   pthread_mutex_unlock(&handler->lock);
+
+   struct iavf_event_element *pos, *save_next;
+   TAILQ_FOREACH_SAFE(pos, &pending, next, save_next) {
+   TAILQ_REMOVE(&pending, pos, next);
+   rte_eth_dev_callback_process(pos->dev, pos->event, 
pos->param);
+   rte_free(pos);
+   }
+   }
+
+   return NULL;
+}
+
+static void
+iavf_dev_event_post(struct rte_eth_dev *dev,
+   enum rte_eth_event_type event,
+   void *param, size_t param_alloc_size)
+{
+   struct iavf_event_handler *handler = &event_handler;

[PATCH v2] net/iavf: fix port stats not cleared

2023-09-06 Thread Yiding Zhou
After VF reset, kernel driver may reuse the orignal VSI without reset its
stats. Call 'iavf_dev_stats_reset' during the initialization of the VF in
order to clear any statistics that may exist from the last use of the VF
and to avoid statistics errors.

Fixes: 22b123a36d07 ("net/avf: initialize PMD")
Cc: sta...@dpdk.org

Signed-off-by: Kuan Xu 
Signed-off-by: Yiding Zhou 
---
 drivers/net/iavf/iavf_ethdev.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index f2fc5a5621..24c6342dee 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -2721,6 +2721,7 @@ iavf_dev_init(struct rte_eth_dev *eth_dev)
 
iavf_default_rss_disable(adapter);
 
+   iavf_dev_stats_reset(eth_dev);
 
/* Start device watchdog */
iavf_dev_watchdog_enable(adapter);
-- 
2.34.1



[PATCH] net/iavf: fix issue of VF resetting

2022-06-27 Thread Yiding Zhou
When the VF is in closed state, the vf_reset flag can not be reverted
if the VF is reset asynchronously. This prevents all virtchnl commands
from executing, causing subsequent calls to iavf_dev_reset() to fail.

So the vf_reset flag needs to be reverted even when VF is in closed state.

Fixes: 676d986b4b86 ("net/iavf: fix crash after VF reset failure")
Cc: sta...@dpdk.org

Signed-off-by: Yiding Zhou 
---
 drivers/net/iavf/iavf_ethdev.c | 7 +--
 1 file changed, 5 insertions(+), 2 deletions(-)

diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index 7df0bf8118..506fcff6e3 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -2702,8 +2702,10 @@ iavf_dev_close(struct rte_eth_dev *dev)
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
 
-   if (adapter->closed)
-   return 0;
+   if (adapter->closed) {
+   ret = 0;
+   goto out;
+   }
 
ret = iavf_dev_stop(dev);
adapter->closed = true;
@@ -2763,6 +2765,7 @@ iavf_dev_close(struct rte_eth_dev *dev)
 * the bus master bit will not be disabled, and this call will have no
 * effect.
 */
+out:
if (vf->vf_reset && !rte_pci_set_bus_master(pci_dev, true))
vf->vf_reset = false;
 
-- 
2.34.1



[PATCH] net/ice: add support for setting promisc by DCF

2022-01-25 Thread Yiding Zhou
allow to enable/disable VFs promisc mode over VF0.
this feature need to update ice kernel driver (newer than v1.8.0)

Signed-off-by: Yiding Zhou 
---
 drivers/net/ice/ice_dcf_vf_representor.c | 56 +---
 1 file changed, 39 insertions(+), 17 deletions(-)

diff --git a/drivers/net/ice/ice_dcf_vf_representor.c 
b/drivers/net/ice/ice_dcf_vf_representor.c
index b9fcfc80ad..781282f68c 100644
--- a/drivers/net/ice/ice_dcf_vf_representor.c
+++ b/drivers/net/ice/ice_dcf_vf_representor.c
@@ -10,6 +10,20 @@
 #include "ice_dcf_ethdev.h"
 #include "ice_rxtx.h"
 
+static __rte_always_inline struct ice_dcf_hw *
+ice_dcf_vf_repr_hw(struct ice_dcf_vf_repr *repr)
+{
+   struct ice_dcf_adapter *dcf_adapter =
+   repr->dcf_eth_dev->data->dev_private;
+
+   if (!dcf_adapter) {
+   PMD_DRV_LOG(ERR, "DCF for VF representor has been released\n");
+   return NULL;
+   }
+
+   return &dcf_adapter->real_hw;
+}
+
 static uint16_t
 ice_dcf_vf_repr_rx_burst(__rte_unused void *rxq,
 __rte_unused struct rte_mbuf **rx_pkts,
@@ -78,15 +92,36 @@ ice_dcf_vf_repr_tx_queue_setup(__rte_unused struct 
rte_eth_dev *dev,
 }
 
 static int
-ice_dcf_vf_repr_promiscuous_enable(__rte_unused struct rte_eth_dev *ethdev)
+ice_dcf_vf_repr_promiscuous_enable(struct rte_eth_dev *ethdev)
 {
-   return 0;
+   struct ice_dcf_vf_repr *repr = ethdev->data->dev_private;
+   struct dcf_virtchnl_cmd args;
+   struct virtchnl_promisc_info promisc;
+   struct ice_dcf_hw *hw = ice_dcf_vf_repr_hw(repr);
+   memset(&args, 0, sizeof(args));
+   args.v_op = VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
+   promisc.flags = 0;
+   promisc.vsi_id = hw->vf_vsi_map[repr->vf_id] & 
~VIRTCHNL_DCF_VF_VSI_VALID;
+   promisc.flags |= FLAG_VF_UNICAST_PROMISC;
+   args.req_msg = (uint8_t *)&promisc;
+   args.req_msglen = sizeof(promisc);
+   return ice_dcf_execute_virtchnl_cmd(hw, &args);
 }
 
 static int
-ice_dcf_vf_repr_promiscuous_disable(__rte_unused struct rte_eth_dev *ethdev)
+ice_dcf_vf_repr_promiscuous_disable(struct rte_eth_dev *ethdev)
 {
-   return 0;
+   struct ice_dcf_vf_repr *repr = ethdev->data->dev_private;
+   struct dcf_virtchnl_cmd args;
+   struct virtchnl_promisc_info promisc;
+   struct ice_dcf_hw *hw = ice_dcf_vf_repr_hw(repr);
+   memset(&args, 0, sizeof(args));
+   args.v_op = VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
+   promisc.flags = 0;
+   promisc.vsi_id = hw->vf_vsi_map[repr->vf_id] & 
~VIRTCHNL_DCF_VF_VSI_VALID;
+   args.req_msg = (uint8_t *)&promisc;
+   args.req_msglen = sizeof(promisc);
+   return ice_dcf_execute_virtchnl_cmd(hw, &args);
 }
 
 static int
@@ -108,19 +143,6 @@ ice_dcf_vf_repr_link_update(__rte_unused struct 
rte_eth_dev *ethdev,
return 0;
 }
 
-static __rte_always_inline struct ice_dcf_hw *
-ice_dcf_vf_repr_hw(struct ice_dcf_vf_repr *repr)
-{
-   struct ice_dcf_adapter *dcf_adapter =
-   repr->dcf_eth_dev->data->dev_private;
-
-   if (!dcf_adapter) {
-   PMD_DRV_LOG(ERR, "DCF for VF representor has been released\n");
-   return NULL;
-   }
-
-   return &dcf_adapter->real_hw;
-}
 
 static int
 ice_dcf_vf_repr_dev_info_get(struct rte_eth_dev *dev,
-- 
2.25.1



[PATCH] net/ice: add support for setting promisc by DCF

2022-01-26 Thread Yiding Zhou
allow to enable/disable VFs promisc mode over VF0.
this feature need to update ice kernel driver (newer than v1.8.0)

Signed-off-by: Yiding Zhou 
---
 drivers/net/ice/ice_dcf_vf_representor.c | 56 +---
 1 file changed, 39 insertions(+), 17 deletions(-)

diff --git a/drivers/net/ice/ice_dcf_vf_representor.c 
b/drivers/net/ice/ice_dcf_vf_representor.c
index b9fcfc80ad..781282f68c 100644
--- a/drivers/net/ice/ice_dcf_vf_representor.c
+++ b/drivers/net/ice/ice_dcf_vf_representor.c
@@ -10,6 +10,20 @@
 #include "ice_dcf_ethdev.h"
 #include "ice_rxtx.h"
 
+static __rte_always_inline struct ice_dcf_hw *
+ice_dcf_vf_repr_hw(struct ice_dcf_vf_repr *repr)
+{
+   struct ice_dcf_adapter *dcf_adapter =
+   repr->dcf_eth_dev->data->dev_private;
+
+   if (!dcf_adapter) {
+   PMD_DRV_LOG(ERR, "DCF for VF representor has been released\n");
+   return NULL;
+   }
+
+   return &dcf_adapter->real_hw;
+}
+
 static uint16_t
 ice_dcf_vf_repr_rx_burst(__rte_unused void *rxq,
 __rte_unused struct rte_mbuf **rx_pkts,
@@ -78,15 +92,36 @@ ice_dcf_vf_repr_tx_queue_setup(__rte_unused struct 
rte_eth_dev *dev,
 }
 
 static int
-ice_dcf_vf_repr_promiscuous_enable(__rte_unused struct rte_eth_dev *ethdev)
+ice_dcf_vf_repr_promiscuous_enable(struct rte_eth_dev *ethdev)
 {
-   return 0;
+   struct ice_dcf_vf_repr *repr = ethdev->data->dev_private;
+   struct dcf_virtchnl_cmd args;
+   struct virtchnl_promisc_info promisc;
+   struct ice_dcf_hw *hw = ice_dcf_vf_repr_hw(repr);
+   memset(&args, 0, sizeof(args));
+   args.v_op = VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
+   promisc.flags = 0;
+   promisc.vsi_id = hw->vf_vsi_map[repr->vf_id] & 
~VIRTCHNL_DCF_VF_VSI_VALID;
+   promisc.flags |= FLAG_VF_UNICAST_PROMISC;
+   args.req_msg = (uint8_t *)&promisc;
+   args.req_msglen = sizeof(promisc);
+   return ice_dcf_execute_virtchnl_cmd(hw, &args);
 }
 
 static int
-ice_dcf_vf_repr_promiscuous_disable(__rte_unused struct rte_eth_dev *ethdev)
+ice_dcf_vf_repr_promiscuous_disable(struct rte_eth_dev *ethdev)
 {
-   return 0;
+   struct ice_dcf_vf_repr *repr = ethdev->data->dev_private;
+   struct dcf_virtchnl_cmd args;
+   struct virtchnl_promisc_info promisc;
+   struct ice_dcf_hw *hw = ice_dcf_vf_repr_hw(repr);
+   memset(&args, 0, sizeof(args));
+   args.v_op = VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
+   promisc.flags = 0;
+   promisc.vsi_id = hw->vf_vsi_map[repr->vf_id] & 
~VIRTCHNL_DCF_VF_VSI_VALID;
+   args.req_msg = (uint8_t *)&promisc;
+   args.req_msglen = sizeof(promisc);
+   return ice_dcf_execute_virtchnl_cmd(hw, &args);
 }
 
 static int
@@ -108,19 +143,6 @@ ice_dcf_vf_repr_link_update(__rte_unused struct 
rte_eth_dev *ethdev,
return 0;
 }
 
-static __rte_always_inline struct ice_dcf_hw *
-ice_dcf_vf_repr_hw(struct ice_dcf_vf_repr *repr)
-{
-   struct ice_dcf_adapter *dcf_adapter =
-   repr->dcf_eth_dev->data->dev_private;
-
-   if (!dcf_adapter) {
-   PMD_DRV_LOG(ERR, "DCF for VF representor has been released\n");
-   return NULL;
-   }
-
-   return &dcf_adapter->real_hw;
-}
 
 static int
 ice_dcf_vf_repr_dev_info_get(struct rte_eth_dev *dev,
-- 
2.25.1



[PATCH] net/ice: fix gcc error with -DRTE_LIBRTE_ICE_16BYTE_RX_DESC

2022-02-07 Thread Yiding Zhou
gcc will report error "unused parameter 'rxq'" when the macro
RTE_LIBRTE_ICE_16BYTE_RX_DESC is defined. use "(void)rxq" to avoid it

Fixes: 7a340b0b4e03 ("net/ice: refactor Rx FlexiMD handling")
Cc: sta...@dpdk.org

Signed-off-by: Yiding Zhou 
---
 drivers/net/ice/ice_rxtx.c | 4 
 1 file changed, 4 insertions(+)

diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c
index 58700f1b92..97572d5952 100644
--- a/drivers/net/ice/ice_rxtx.c
+++ b/drivers/net/ice/ice_rxtx.c
@@ -163,6 +163,8 @@ ice_rxd_to_pkt_fields_by_comms_aux_v1(struct ice_rx_queue 
*rxq,
*RTE_NET_ICE_DYNF_PROTO_XTR_METADATA(mb) = metadata;
}
}
+#else
+   (void)rxq;
 #endif
 }
 
@@ -201,6 +203,8 @@ ice_rxd_to_pkt_fields_by_comms_aux_v2(struct ice_rx_queue 
*rxq,
*RTE_NET_ICE_DYNF_PROTO_XTR_METADATA(mb) = metadata;
}
}
+#else
+   (void)rxq;
 #endif
 }
 
-- 
2.25.1



[PATCH v2] net/ice: fix gcc error with -DRTE_LIBRTE_ICE_16BYTE_RX_DESC

2022-02-07 Thread Yiding Zhou
gcc will report error "unused parameter 'rxq'" when the macro
RTE_LIBRTE_ICE_16BYTE_RX_DESC is defined. use RTE_SET_USED to avoid it

Fixes: 7a340b0b4e03 ("net/ice: refactor Rx FlexiMD handling")
Cc: sta...@dpdk.org

Signed-off-by: Yiding Zhou 
---
 drivers/net/ice/ice_rxtx.c | 4 
 1 file changed, 4 insertions(+)

diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c
index 58700f1b92..4f218bcd0d 100644
--- a/drivers/net/ice/ice_rxtx.c
+++ b/drivers/net/ice/ice_rxtx.c
@@ -163,6 +163,8 @@ ice_rxd_to_pkt_fields_by_comms_aux_v1(struct ice_rx_queue 
*rxq,
*RTE_NET_ICE_DYNF_PROTO_XTR_METADATA(mb) = metadata;
}
}
+#else
+   RTE_SET_USED(rxq);
 #endif
 }
 
@@ -201,6 +203,8 @@ ice_rxd_to_pkt_fields_by_comms_aux_v2(struct ice_rx_queue 
*rxq,
*RTE_NET_ICE_DYNF_PROTO_XTR_METADATA(mb) = metadata;
}
}
+#else
+   RTE_SET_USED(rxq);
 #endif
 }
 
-- 
2.25.1



[PATCH] net/iavf: fix mismatch between rx_pkt_burst and RX descriptor

2022-05-06 Thread Yiding Zhou
Some kernel drivers return the capability VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC
when IAVF_RXDID_COMMS_OVS_1 is not supported. This causes PMD to use
rx_pkt_burst that handles the Flex Receive Descriptor format, but actually
configures the RXDID into IAVF_RXDID_LEGACY_1, then the fields of rte_mbuf
Will be filled with wrong values in rx_pkt_burst, which will eventually
lead to coredump.

This patch fixes mismatch between rx_pkt_burst and rx descriptor.

Fixes: 12b435bf8f2f ("net/iavf: support flex desc metadata extraction")
Cc: sta...@dpdk.org

Signed-off-by: Yiding Zhou 
---
 drivers/net/iavf/iavf_rxtx.c | 20 ++--
 1 file changed, 14 insertions(+), 6 deletions(-)

diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index 345f6aeebc..69584264de 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -2908,6 +2908,18 @@ iavf_set_rx_function(struct rte_eth_dev *dev)
bool use_avx512 = false;
bool use_flex = false;
 
+   if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
+   use_flex = true;
+
+   for (i = 0; i < dev->data->nb_rx_queues; i++) {
+   rxq = dev->data->rx_queues[i];
+   if (rxq->rxdid <= IAVF_RXDID_LEGACY_1 ||
+   !(vf->supported_rxdid & BIT(rxq->rxdid))) {
+   use_flex = false;
+   break;
+   }
+   }
+
check_ret = iavf_rx_vec_dev_check(dev);
if (check_ret >= 0 &&
rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
@@ -2923,10 +2935,6 @@ iavf_set_rx_function(struct rte_eth_dev *dev)
use_avx512 = true;
 #endif
 
-   if (vf->vf_res->vf_cap_flags &
-   VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
-   use_flex = true;
-
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxq = dev->data->rx_queues[i];
(void)iavf_rxq_vec_setup(rxq);
@@ -3030,7 +3038,7 @@ iavf_set_rx_function(struct rte_eth_dev *dev)
if (dev->data->scattered_rx) {
PMD_DRV_LOG(DEBUG, "Using a Scattered Rx callback (port=%d).",
dev->data->port_id);
-   if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
+   if (use_flex)
dev->rx_pkt_burst = iavf_recv_scattered_pkts_flex_rxd;
else
dev->rx_pkt_burst = iavf_recv_scattered_pkts;
@@ -3041,7 +3049,7 @@ iavf_set_rx_function(struct rte_eth_dev *dev)
} else {
PMD_DRV_LOG(DEBUG, "Using Basic Rx callback (port=%d).",
dev->data->port_id);
-   if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
+   if (use_flex)
dev->rx_pkt_burst = iavf_recv_pkts_flex_rxd;
else
dev->rx_pkt_burst = iavf_recv_pkts;
-- 
2.25.1



[PATCH v2] net/iavf: fix mismatch between rx_pkt_burst and RX descriptor

2022-05-07 Thread Yiding Zhou
Some kernel drivers return the capability VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC
when IAVF_RXDID_COMMS_OVS_1 is not supported. This causes PMD to use
rx_pkt_burst that handles the Flex Receive Descriptor format, but actually
configures the RXDID into IAVF_RXDID_LEGACY_1, then the fields of rte_mbuf
Will be filled with wrong values in rx_pkt_burst, which will eventually
lead to coredump.

This patch fixes mismatch between rx_pkt_burst and rx descriptor.

Fixes: 12b435bf8f2f ("net/iavf: support flex desc metadata extraction")
Cc: sta...@dpdk.org

Signed-off-by: Yiding Zhou 
---
 drivers/net/iavf/iavf_rxtx.c | 23 ++-
 1 file changed, 14 insertions(+), 9 deletions(-)

diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index 345f6aeebc..ed8d51dbb2 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -2899,14 +2899,23 @@ iavf_set_rx_function(struct rte_eth_dev *dev)
struct iavf_adapter *adapter =
IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+   int i;
+   struct iavf_rx_queue *rxq;
+   bool use_flex = true;
+
+   for (i = 0; i < dev->data->nb_rx_queues; i++) {
+   rxq = dev->data->rx_queues[i];
+   if (rxq->rxdid <= IAVF_RXDID_LEGACY_1 ||
+   !(vf->supported_rxdid & BIT(rxq->rxdid))) {
+   use_flex = false;
+   break;
+   }
+   }
 
 #ifdef RTE_ARCH_X86
-   struct iavf_rx_queue *rxq;
-   int i;
int check_ret;
bool use_avx2 = false;
bool use_avx512 = false;
-   bool use_flex = false;
 
check_ret = iavf_rx_vec_dev_check(dev);
if (check_ret >= 0 &&
@@ -2923,10 +2932,6 @@ iavf_set_rx_function(struct rte_eth_dev *dev)
use_avx512 = true;
 #endif
 
-   if (vf->vf_res->vf_cap_flags &
-   VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
-   use_flex = true;
-
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxq = dev->data->rx_queues[i];
(void)iavf_rxq_vec_setup(rxq);
@@ -3030,7 +3035,7 @@ iavf_set_rx_function(struct rte_eth_dev *dev)
if (dev->data->scattered_rx) {
PMD_DRV_LOG(DEBUG, "Using a Scattered Rx callback (port=%d).",
dev->data->port_id);
-   if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
+   if (use_flex)
dev->rx_pkt_burst = iavf_recv_scattered_pkts_flex_rxd;
else
dev->rx_pkt_burst = iavf_recv_scattered_pkts;
@@ -3041,7 +3046,7 @@ iavf_set_rx_function(struct rte_eth_dev *dev)
} else {
PMD_DRV_LOG(DEBUG, "Using Basic Rx callback (port=%d).",
dev->data->port_id);
-   if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
+   if (use_flex)
dev->rx_pkt_burst = iavf_recv_pkts_flex_rxd;
else
dev->rx_pkt_burst = iavf_recv_pkts;
-- 
2.25.1



[PATCH v2] net/iavf: fix mismatch between rx_pkt_burst and RX descriptor

2022-05-07 Thread Yiding Zhou
Some kernel drivers return the capability VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC
when IAVF_RXDID_COMMS_OVS_1 is not supported. This causes PMD to use
rx_pkt_burst that handles the Flex Receive Descriptor format, but actually
configures the RXDID into IAVF_RXDID_LEGACY_1, then the fields of rte_mbuf
Will be filled with wrong values in rx_pkt_burst, which will eventually
lead to coredump.

This patch fixes mismatch between rx_pkt_burst and rx descriptor.

Fixes: 12b435bf8f2f ("net/iavf: support flex desc metadata extraction")
Cc: sta...@dpdk.org

Signed-off-by: Yiding Zhou 
---
 drivers/net/iavf/iavf_rxtx.c | 23 ++-
 1 file changed, 14 insertions(+), 9 deletions(-)

diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index 345f6aeebc..ed8d51dbb2 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -2899,14 +2899,23 @@ iavf_set_rx_function(struct rte_eth_dev *dev)
struct iavf_adapter *adapter =
IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+   int i;
+   struct iavf_rx_queue *rxq;
+   bool use_flex = true;
+
+   for (i = 0; i < dev->data->nb_rx_queues; i++) {
+   rxq = dev->data->rx_queues[i];
+   if (rxq->rxdid <= IAVF_RXDID_LEGACY_1 ||
+   !(vf->supported_rxdid & BIT(rxq->rxdid))) {
+   use_flex = false;
+   break;
+   }
+   }
 
 #ifdef RTE_ARCH_X86
-   struct iavf_rx_queue *rxq;
-   int i;
int check_ret;
bool use_avx2 = false;
bool use_avx512 = false;
-   bool use_flex = false;
 
check_ret = iavf_rx_vec_dev_check(dev);
if (check_ret >= 0 &&
@@ -2923,10 +2932,6 @@ iavf_set_rx_function(struct rte_eth_dev *dev)
use_avx512 = true;
 #endif
 
-   if (vf->vf_res->vf_cap_flags &
-   VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
-   use_flex = true;
-
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxq = dev->data->rx_queues[i];
(void)iavf_rxq_vec_setup(rxq);
@@ -3030,7 +3035,7 @@ iavf_set_rx_function(struct rte_eth_dev *dev)
if (dev->data->scattered_rx) {
PMD_DRV_LOG(DEBUG, "Using a Scattered Rx callback (port=%d).",
dev->data->port_id);
-   if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
+   if (use_flex)
dev->rx_pkt_burst = iavf_recv_scattered_pkts_flex_rxd;
else
dev->rx_pkt_burst = iavf_recv_scattered_pkts;
@@ -3041,7 +3046,7 @@ iavf_set_rx_function(struct rte_eth_dev *dev)
} else {
PMD_DRV_LOG(DEBUG, "Using Basic Rx callback (port=%d).",
dev->data->port_id);
-   if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
+   if (use_flex)
dev->rx_pkt_burst = iavf_recv_pkts_flex_rxd;
else
dev->rx_pkt_burst = iavf_recv_pkts;
-- 
2.25.1



[PATCH v2] net/iavf: fix mismatch between rx_pkt_burst and RX descriptor

2022-05-07 Thread Yiding Zhou
Some kernel drivers return the capability VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC
when IAVF_RXDID_COMMS_OVS_1 is not supported. This causes PMD to use
rx_pkt_burst that handles the Flex Receive Descriptor format, but actually
configures the RXDID into IAVF_RXDID_LEGACY_1, then the fields of rte_mbuf
Will be filled with wrong values in rx_pkt_burst, which will eventually
lead to coredump.

This patch fixes mismatch between rx_pkt_burst and rx descriptor.

Fixes: 12b435bf8f2f ("net/iavf: support flex desc metadata extraction")
Cc: sta...@dpdk.org

Signed-off-by: Yiding Zhou 
---
 drivers/net/iavf/iavf_rxtx.c | 27 ++-
 1 file changed, 18 insertions(+), 9 deletions(-)

diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index 345f6aeebc..d3b1a58b27 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -2899,14 +2899,27 @@ iavf_set_rx_function(struct rte_eth_dev *dev)
struct iavf_adapter *adapter =
IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+   int i;
+   struct iavf_rx_queue *rxq;
+   bool use_flex = true;
+
+   for (i = 0; i < dev->data->nb_rx_queues; i++) {
+   rxq = dev->data->rx_queues[i];
+   if (rxq->rxdid <= IAVF_RXDID_LEGACY_1) {
+   PMD_DRV_LOG(NOTICE, "request RXDID[%d] in Queue[%d] is 
legacy, "
+   "set rx_pkt_burst as legacy for all queues", 
rxq->rxdid, i);
+   use_flex = false;
+   } else if (!(vf->supported_rxdid & BIT(rxq->rxdid))) {
+   PMD_DRV_LOG(NOTICE, "request RXDID[%d] in Queue[%d] is 
not supported, "
+   "set rx_pkt_burst as legacy for all queues", 
rxq->rxdid, i);
+   use_flex = false;
+   }
+   }
 
 #ifdef RTE_ARCH_X86
-   struct iavf_rx_queue *rxq;
-   int i;
int check_ret;
bool use_avx2 = false;
bool use_avx512 = false;
-   bool use_flex = false;
 
check_ret = iavf_rx_vec_dev_check(dev);
if (check_ret >= 0 &&
@@ -2923,10 +2936,6 @@ iavf_set_rx_function(struct rte_eth_dev *dev)
use_avx512 = true;
 #endif
 
-   if (vf->vf_res->vf_cap_flags &
-   VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
-   use_flex = true;
-
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxq = dev->data->rx_queues[i];
(void)iavf_rxq_vec_setup(rxq);
@@ -3030,7 +3039,7 @@ iavf_set_rx_function(struct rte_eth_dev *dev)
if (dev->data->scattered_rx) {
PMD_DRV_LOG(DEBUG, "Using a Scattered Rx callback (port=%d).",
dev->data->port_id);
-   if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
+   if (use_flex)
dev->rx_pkt_burst = iavf_recv_scattered_pkts_flex_rxd;
else
dev->rx_pkt_burst = iavf_recv_scattered_pkts;
@@ -3041,7 +3050,7 @@ iavf_set_rx_function(struct rte_eth_dev *dev)
} else {
PMD_DRV_LOG(DEBUG, "Using Basic Rx callback (port=%d).",
dev->data->port_id);
-   if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
+   if (use_flex)
dev->rx_pkt_burst = iavf_recv_pkts_flex_rxd;
else
dev->rx_pkt_burst = iavf_recv_pkts;
-- 
2.25.1



[PATCH v3] net/iavf: fix segfaults when calling API after VF reset failed

2022-05-11 Thread Yiding Zhou
Some pointers will be set to NULL when iavf_dev_reset() failed,
for example vf->vf_res, vf->vsi_res vf->rss_key and etc.
APIs access these NULL pointers will trigger segfault.

This patch adds closed flag to indicate that the VF is closed,
and rejects API calls in this state to avoid coredump.

Fixes: e74e1bb6280d ("net/iavf: enable port reset")
Cc: sta...@dpdk.org

Signed-off-by: Yiding Zhou 
---
 drivers/net/iavf/iavf.h|  1 +
 drivers/net/iavf/iavf_ethdev.c | 57 +++---
 drivers/net/iavf/iavf_rxtx.c   | 10 ++
 drivers/net/iavf/iavf_vchnl.c  | 17 ++
 4 files changed, 81 insertions(+), 4 deletions(-)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index dd83567e59..819510649a 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -312,6 +312,7 @@ struct iavf_adapter {
bool tx_vec_allowed;
uint32_t ptype_tbl[IAVF_MAX_PKT_TYPE] __rte_cache_min_aligned;
bool stopped;
+   bool closed;
uint16_t fdir_ref_cnt;
struct iavf_devargs devargs;
uint64_t phc_time;
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index 82672841f4..198d8299af 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -234,9 +234,15 @@ static const struct eth_dev_ops iavf_eth_dev_ops = {
 };
 
 static int
-iavf_tm_ops_get(struct rte_eth_dev *dev __rte_unused,
+iavf_tm_ops_get(struct rte_eth_dev *dev,
void *arg)
 {
+   struct iavf_adapter *adapter =
+   IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+
+   if (adapter->closed)
+   return -EIO;
+
if (!arg)
return -EINVAL;
 
@@ -347,6 +353,9 @@ iavf_set_mc_addr_list(struct rte_eth_dev *dev,
return -EINVAL;
}
 
+   if (adapter->closed)
+   return -EIO;
+
/* flush previous addresses */
err = iavf_add_del_mc_addr_list(adapter, vf->mc_addrs, vf->mc_addrs_num,
false);
@@ -618,6 +627,9 @@ iavf_dev_configure(struct rte_eth_dev *dev)
dev->data->nb_tx_queues);
int ret;
 
+   if (ad->closed)
+   return -EIO;
+
ad->rx_bulk_alloc_allowed = true;
/* Initialize to TRUE. If any of Rx queues doesn't meet the
 * vector Rx/Tx preconditions, it will be reset.
@@ -950,6 +962,9 @@ iavf_dev_start(struct rte_eth_dev *dev)
 
PMD_INIT_FUNC_TRACE();
 
+   if (adapter->closed)
+   return -1;
+
adapter->stopped = 0;
 
vf->max_pkt_len = dev->data->mtu + IAVF_ETH_OVERHEAD;
@@ -1046,6 +1061,9 @@ iavf_dev_stop(struct rte_eth_dev *dev)
 
PMD_INIT_FUNC_TRACE();
 
+   if (adapter->closed)
+   return -1;
+
if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) &&
dev->data->dev_conf.intr_conf.rxq != 0)
rte_intr_disable(intr_handle);
@@ -1083,6 +1101,9 @@ iavf_dev_info_get(struct rte_eth_dev *dev, struct 
rte_eth_dev_info *dev_info)
IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
struct iavf_info *vf = &adapter->vf;
 
+   if (adapter->closed)
+   return -EIO;
+
dev_info->max_rx_queues = IAVF_MAX_NUM_QUEUES_LV;
dev_info->max_tx_queues = IAVF_MAX_NUM_QUEUES_LV;
dev_info->min_rx_bufsize = IAVF_BUF_SIZE_MIN;
@@ -1326,6 +1347,9 @@ iavf_dev_vlan_filter_set(struct rte_eth_dev *dev, 
uint16_t vlan_id, int on)
struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
int err;
 
+   if (adapter->closed)
+   return -EIO;
+
if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
err = iavf_add_del_vlan_v2(adapter, vlan_id, on);
if (err)
@@ -1402,6 +1426,9 @@ iavf_dev_vlan_offload_set(struct rte_eth_dev *dev, int 
mask)
struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
int err;
 
+   if (adapter->closed)
+   return -EIO;
+
if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2)
return iavf_dev_vlan_offload_set_v2(dev, mask);
 
@@ -1434,6 +1461,9 @@ iavf_dev_rss_reta_update(struct rte_eth_dev *dev,
uint16_t i, idx, shift;
int ret;
 
+   if (adapter->closed)
+   return -EIO;
+
if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
return -ENOTSUP;
 
@@ -1479,6 +1509,9 @@ iavf_dev_rss_reta_query(struct rte_eth_dev *dev,
struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
uint16_t i, idx, shift;
 
+   if (adapter->closed)
+   return -EIO;
+
if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
return -E

[PATCH] net/iavf: fix port stats not cleared

2023-08-29 Thread Yiding Zhou
Call 'iavf_dev_stats_reset' during the initialization of the VF in order
to clear any statistics that may exist from the last use of the VF and to
avoid statistics errors.

Fixes: 22b123a36d07 ("net/avf: initialize PMD")
Cc: sta...@dpdk.org

Signed-off-by: Kuan Xu 
Signed-off-by: Yiding Zhou 
---
 drivers/net/iavf/iavf_ethdev.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index f2fc5a5621..24c6342dee 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -2721,6 +2721,7 @@ iavf_dev_init(struct rte_eth_dev *eth_dev)
 
iavf_default_rss_disable(adapter);
 
+   iavf_dev_stats_reset(eth_dev);
 
/* Start device watchdog */
iavf_dev_watchdog_enable(adapter);
-- 
2.34.1



[PATCH] ice: fix build error on 32bit configure

2023-07-05 Thread Yiding Zhou
Replace 'rte_memcpy' with 'memcpy' like other PMD code to avoid errors when
compiling with GCC-12 on 32-bit configure.

Compiler reports the follow error:

error: array subscript 8 is outside array bounds of "struct rte_mbuf *[32]"
[-Werror=array-bounds]

Fixes: c68a52b8b38c ("net/ice: support vector SSE in Rx")
Cc: sta...@dpdk.org

Signed-off-by: Yiding Zhou 
---
 drivers/net/ice/ice_rxtx_vec_common.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/net/ice/ice_rxtx_vec_common.h 
b/drivers/net/ice/ice_rxtx_vec_common.h
index eec6ea2134..55840cf170 100644
--- a/drivers/net/ice/ice_rxtx_vec_common.h
+++ b/drivers/net/ice/ice_rxtx_vec_common.h
@@ -72,7 +72,7 @@ ice_rx_reassemble_packets(struct ice_rx_queue *rxq, struct 
rte_mbuf **rx_bufs,
/* save the partial packet for next time */
rxq->pkt_first_seg = start;
rxq->pkt_last_seg = end;
-   rte_memcpy(rx_bufs, pkts, pkt_idx * (sizeof(*pkts)));
+   memcpy(rx_bufs, pkts, pkt_idx * (sizeof(*pkts)));
return pkt_idx;
 }
 
-- 
2.34.1



[PATCH v2] net/pcap: fix timeout of stopping device

2022-09-06 Thread Yiding Zhou
The pcap file will be synchronized to the disk when stopping the device.
It takes a long time if the file is large that would cause the
'detach sync request' timeout when the device is closed under multi-process
scenario.

This commit fixes the issue by using alarm handler to release dumper.

Fixes: 0ecfb6c04d54 ("net/pcap: move handler to process private")
Cc: sta...@dpdk.org

Signed-off-by: Yiding Zhou 

---
v2: use alarm handler to release dumper
---
 drivers/net/pcap/pcap_ethdev.c | 22 +-
 1 file changed, 21 insertions(+), 1 deletion(-)

diff --git a/drivers/net/pcap/pcap_ethdev.c b/drivers/net/pcap/pcap_ethdev.c
index ec29fd6bc5..5c643a0277 100644
--- a/drivers/net/pcap/pcap_ethdev.c
+++ b/drivers/net/pcap/pcap_ethdev.c
@@ -17,6 +17,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #include "pcap_osdep.h"
 
@@ -664,6 +665,25 @@ eth_dev_start(struct rte_eth_dev *dev)
return 0;
 }
 
+static void eth_pcap_dumper_release(void *arg)
+{
+   pcap_dump_close((pcap_dumper_t *)arg);
+}
+
+static void
+eth_pcap_dumper_close(pcap_dumper_t *dumper)
+{
+   if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+   /*
+* Delay 30 seconds before releasing dumper to wait for file 
sync
+* to complete to avoid blocking alarm thread in PRIMARY process
+*/
+   rte_eal_alarm_set(3000, eth_pcap_dumper_release, dumper);
+   } else {
+   rte_eal_alarm_set(1, eth_pcap_dumper_release, dumper);
+   }
+}
+
 /*
  * This function gets called when the current port gets stopped.
  * Is the only place for us to close all the tx streams dumpers.
@@ -689,7 +709,7 @@ eth_dev_stop(struct rte_eth_dev *dev)
 
for (i = 0; i < dev->data->nb_tx_queues; i++) {
if (pp->tx_dumper[i] != NULL) {
-   pcap_dump_close(pp->tx_dumper[i]);
+   eth_pcap_dumper_close(pp->tx_dumper[i]);
pp->tx_dumper[i] = NULL;
}
 
-- 
2.34.1



[PATCH] net/iavf: fix error of virtchnl command

2022-09-18 Thread Yiding Zhou
When the device is bonded, bond pmd will register callback for LSC event.
This callback will execute some virtchnl commands in eal-intr-thread to
reinitialize the device with interrupts disabled. In this case, responses
to all commands not be received.

This commit starts a thread to handle all events to fix this issue.

Fixes: 48de41ca11f0 ("net/avf: enable link status update")
CC: sta...@dpdk.org

Signed-off-by: Yiding Zhou 
---
 drivers/net/iavf/iavf.h|   2 +
 drivers/net/iavf/iavf_ethdev.c |   5 ++
 drivers/net/iavf/iavf_vchnl.c  | 130 -
 3 files changed, 136 insertions(+), 1 deletion(-)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 025ab3ff60..7c215e1797 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -417,6 +417,8 @@ _atomic_set_async_response_cmd(struct iavf_info *vf, enum 
virtchnl_ops ops)
 }
 int iavf_check_api_version(struct iavf_adapter *adapter);
 int iavf_get_vf_resource(struct iavf_adapter *adapter);
+void iavf_dev_event_handler_fini(void);
+int iavf_dev_event_handler_init(void);
 void iavf_handle_virtchnl_msg(struct rte_eth_dev *dev);
 int iavf_enable_vlan_strip(struct iavf_adapter *adapter);
 int iavf_disable_vlan_strip(struct iavf_adapter *adapter);
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index 506fcff6e3..a584b8918d 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -2629,6 +2629,9 @@ iavf_dev_init(struct rte_eth_dev *eth_dev)
rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr,
ð_dev->data->mac_addrs[0]);
 
+   if (iavf_dev_event_handler_init())
+   goto init_vf_err;
+
if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) {
/* register callback func to eal lib */
rte_intr_callback_register(pci_dev->intr_handle,
@@ -2783,6 +2786,8 @@ iavf_dev_uninit(struct rte_eth_dev *dev)
 
iavf_dev_close(dev);
 
+   iavf_dev_event_handler_fini();
+
return 0;
 }
 
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index 21bd1e2193..5f3dd8779e 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -2,6 +2,8 @@
  * Copyright(c) 2017 Intel Corporation
  */
 
+#include 
+#include 
 #include 
 #include 
 #include 
@@ -27,6 +29,132 @@
 #define MAX_TRY_TIMES 2000
 #define ASQ_DELAY_MS  1
 
+struct iavf_arq_event_element {
+   TAILQ_ENTRY(iavf_arq_event_element) next;
+   struct rte_eth_dev *dev;
+   enum rte_eth_event_type event;
+   void *param;
+};
+
+struct iavf_event_handler {
+   rte_atomic32_t ndev;
+   pthread_t tid;
+   int fd[2];
+   pthread_mutex_t lock;
+   TAILQ_HEAD(event_lsit, iavf_arq_event_element) pending;
+};
+
+static struct iavf_event_handler event_handler = {
+   .fd = {-1, -1},
+};
+
+#ifndef TAILQ_FOREACH_SAFE
+#define TAILQ_FOREACH_SAFE(var, head, field, tvar) \
+   for ((var) = TAILQ_FIRST((head)); \
+   (var) && ((tvar) = TAILQ_NEXT((var), field), 1); \
+   (var) = (tvar))
+#endif
+
+static void *
+iavf_dev_event_handle(void *param __rte_unused)
+{
+   struct iavf_event_handler *handler = &event_handler;
+   TAILQ_HEAD(event_list, iavf_arq_event_element) pending;
+
+   while (true) {
+   char unused;
+   int nr = read(handler->fd[0], &unused, 1);
+   if (nr <= 0)
+   break;
+
+   TAILQ_INIT(&pending);
+   pthread_mutex_lock(&handler->lock);
+   TAILQ_CONCAT(&pending, &handler->pending, next);
+   pthread_mutex_unlock(&handler->lock);
+
+   struct iavf_arq_event_element *pos, *save_next;
+   TAILQ_FOREACH_SAFE(pos, &pending, next, save_next) {
+   TAILQ_REMOVE(&pending, pos, next);
+   rte_eth_dev_callback_process(pos->dev, pos->event, 
pos->param);
+   rte_free(pos);
+   }
+   }
+   return NULL;
+}
+
+static void
+iavf_dev_event_post(struct rte_eth_dev *dev,
+   enum rte_eth_event_type event,
+   void *param)
+{
+   struct iavf_event_handler *handler = &event_handler;
+   char notify_byte;
+   struct iavf_arq_event_element *elem = rte_malloc(NULL, sizeof(*elem), 
0);
+   if (!elem)
+   return;
+
+   elem->dev = dev;
+   elem->event = event;
+   elem->param = param;
+
+   pthread_mutex_lock(&handler->lock);
+   TAILQ_INSERT_TAIL(&handler->pending, elem, next);
+   pthread_mutex_unlock(&handler->lock);
+
+   write(handler->fd[1], ¬ify_byte, 1);
+}
+
+int
+iavf_dev_event_handler_init(void)
+{
+   struct iavf_event_handler *handler = &event_handler;
+
+   if (r

[PATCH v2] net/iavf: fix error of virtchnl command

2022-09-19 Thread Yiding Zhou
When the device is bonded, bond pmd will register callback for LSC event.
This callback will execute some virtchnl commands in eal-intr-thread to
reinitialize the device with interrupts disabled. In this case, responses
to all commands not be received.

This commit starts a thread to handle all events to fix this issue.

Fixes: 48de41ca11f0 ("net/avf: enable link status update")
CC: sta...@dpdk.org

Signed-off-by: Yiding Zhou 
---
 drivers/net/iavf/iavf.h|   2 +
 drivers/net/iavf/iavf_ethdev.c |   5 ++
 drivers/net/iavf/iavf_vchnl.c  | 131 -
 3 files changed, 137 insertions(+), 1 deletion(-)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 025ab3ff60..7c215e1797 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -417,6 +417,8 @@ _atomic_set_async_response_cmd(struct iavf_info *vf, enum 
virtchnl_ops ops)
 }
 int iavf_check_api_version(struct iavf_adapter *adapter);
 int iavf_get_vf_resource(struct iavf_adapter *adapter);
+void iavf_dev_event_handler_fini(void);
+int iavf_dev_event_handler_init(void);
 void iavf_handle_virtchnl_msg(struct rte_eth_dev *dev);
 int iavf_enable_vlan_strip(struct iavf_adapter *adapter);
 int iavf_disable_vlan_strip(struct iavf_adapter *adapter);
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index 506fcff6e3..a584b8918d 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -2629,6 +2629,9 @@ iavf_dev_init(struct rte_eth_dev *eth_dev)
rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr,
ð_dev->data->mac_addrs[0]);
 
+   if (iavf_dev_event_handler_init())
+   goto init_vf_err;
+
if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) {
/* register callback func to eal lib */
rte_intr_callback_register(pci_dev->intr_handle,
@@ -2783,6 +2786,8 @@ iavf_dev_uninit(struct rte_eth_dev *dev)
 
iavf_dev_close(dev);
 
+   iavf_dev_event_handler_fini();
+
return 0;
 }
 
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index 21bd1e2193..e357211c68 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -2,6 +2,8 @@
  * Copyright(c) 2017 Intel Corporation
  */
 
+#include 
+#include 
 #include 
 #include 
 #include 
@@ -27,6 +29,133 @@
 #define MAX_TRY_TIMES 2000
 #define ASQ_DELAY_MS  1
 
+struct iavf_arq_event_element {
+   TAILQ_ENTRY(iavf_arq_event_element) next;
+   struct rte_eth_dev *dev;
+   enum rte_eth_event_type event;
+   void *param;
+};
+
+struct iavf_event_handler {
+   rte_atomic32_t ndev;
+   pthread_t tid;
+   int fd[2];
+   pthread_mutex_t lock;
+   TAILQ_HEAD(event_lsit, iavf_arq_event_element) pending;
+};
+
+static struct iavf_event_handler event_handler = {
+   .fd = {-1, -1},
+};
+
+#ifndef TAILQ_FOREACH_SAFE
+#define TAILQ_FOREACH_SAFE(var, head, field, tvar) \
+   for ((var) = TAILQ_FIRST((head)); \
+   (var) && ((tvar) = TAILQ_NEXT((var), field), 1); \
+   (var) = (tvar))
+#endif
+
+static void *
+iavf_dev_event_handle(void *param __rte_unused)
+{
+   struct iavf_event_handler *handler = &event_handler;
+   TAILQ_HEAD(event_list, iavf_arq_event_element) pending;
+
+   while (true) {
+   char unused[8];
+   ssize_t nr = read(handler->fd[0], &unused, sizeof(unused));
+   if (nr <= 0)
+   break;
+
+   TAILQ_INIT(&pending);
+   pthread_mutex_lock(&handler->lock);
+   TAILQ_CONCAT(&pending, &handler->pending, next);
+   pthread_mutex_unlock(&handler->lock);
+
+   struct iavf_arq_event_element *pos, *save_next;
+   TAILQ_FOREACH_SAFE(pos, &pending, next, save_next) {
+   TAILQ_REMOVE(&pending, pos, next);
+   rte_eth_dev_callback_process(pos->dev, pos->event, 
pos->param);
+   rte_free(pos);
+   }
+   }
+   return NULL;
+}
+
+static void
+iavf_dev_event_post(struct rte_eth_dev *dev,
+   enum rte_eth_event_type event,
+   void *param)
+{
+   struct iavf_event_handler *handler = &event_handler;
+   char notify_byte;
+   struct iavf_arq_event_element *elem = rte_malloc(NULL, sizeof(*elem), 
0);
+   if (!elem)
+   return;
+
+   elem->dev = dev;
+   elem->event = event;
+   elem->param = param;
+
+   pthread_mutex_lock(&handler->lock);
+   TAILQ_INSERT_TAIL(&handler->pending, elem, next);
+   pthread_mutex_unlock(&handler->lock);
+
+   ssize_t nw = write(handler->fd[1], ¬ify_byte, 1);
+   RTE_SET_USED(nw);
+}
+
+int
+iavf_dev_event_handler_init(void)
+{
+   struct iavf_event

[PATCH v3] net/iavf: fix error of virtchnl command

2022-10-08 Thread Yiding Zhou
When the device is bonded, bond pmd will register callback for LSC event.
This callback will execute some virtchnl commands in eal-intr-thread to
reinitialize the device with interrupts disabled. In this case, responses
to all commands not be received.

This commit starts a thread to handle all events to fix this issue.

Fixes: 48de41ca11f0 ("net/avf: enable link status update")
CC: sta...@dpdk.org

Signed-off-by: Yiding Zhou 
---
v3: fix CI error
---
 drivers/net/iavf/iavf.h|   2 +
 drivers/net/iavf/iavf_ethdev.c |   5 ++
 drivers/net/iavf/iavf_vchnl.c  | 134 -
 3 files changed, 140 insertions(+), 1 deletion(-)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 26b858f6f0..1edebab8dc 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -424,6 +424,8 @@ _atomic_set_async_response_cmd(struct iavf_info *vf, enum 
virtchnl_ops ops)
 }
 int iavf_check_api_version(struct iavf_adapter *adapter);
 int iavf_get_vf_resource(struct iavf_adapter *adapter);
+void iavf_dev_event_handler_fini(void);
+int iavf_dev_event_handler_init(void);
 void iavf_handle_virtchnl_msg(struct rte_eth_dev *dev);
 int iavf_enable_vlan_strip(struct iavf_adapter *adapter);
 int iavf_disable_vlan_strip(struct iavf_adapter *adapter);
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index 782be82c7f..633d684804 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -2631,6 +2631,9 @@ iavf_dev_init(struct rte_eth_dev *eth_dev)
rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr,
ð_dev->data->mac_addrs[0]);
 
+   if (iavf_dev_event_handler_init())
+   goto init_vf_err;
+
if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) {
/* register callback func to eal lib */
rte_intr_callback_register(pci_dev->intr_handle,
@@ -2785,6 +2788,8 @@ iavf_dev_uninit(struct rte_eth_dev *dev)
 
iavf_dev_close(dev);
 
+   iavf_dev_event_handler_fini();
+
return 0;
 }
 
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index 0fa2617cd2..6284a5b125 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -2,6 +2,7 @@
  * Copyright(c) 2017 Intel Corporation
  */
 
+#include 
 #include 
 #include 
 #include 
@@ -11,6 +12,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #include 
 #include 
@@ -27,6 +29,136 @@
 #define MAX_TRY_TIMES 2000
 #define ASQ_DELAY_MS  1
 
+#define MAX_EVENT_PENDING 16
+
+struct iavf_arq_event_element {
+   TAILQ_ENTRY(iavf_arq_event_element) next;
+   struct rte_eth_dev *dev;
+   enum rte_eth_event_type event;
+   void *param;
+};
+
+struct iavf_event_handler {
+   rte_atomic32_t ndev;
+   pthread_t tid;
+   int fd[2];
+   pthread_mutex_t lock;
+   TAILQ_HEAD(event_lsit, iavf_arq_event_element) pending;
+};
+
+static struct iavf_event_handler event_handler = {
+   .fd = {-1, -1},
+};
+
+#ifndef TAILQ_FOREACH_SAFE
+#define TAILQ_FOREACH_SAFE(var, head, field, tvar) \
+   for ((var) = TAILQ_FIRST((head)); \
+   (var) && ((tvar) = TAILQ_NEXT((var), field), 1); \
+   (var) = (tvar))
+#endif
+
+static void *
+iavf_dev_event_handle(void *param __rte_unused)
+{
+   struct iavf_event_handler *handler = &event_handler;
+   TAILQ_HEAD(event_list, iavf_arq_event_element) pending;
+
+   while (true) {
+   char unused[MAX_EVENT_PENDING];
+   ssize_t nr = read(handler->fd[0], &unused, sizeof(unused));
+   if (nr <= 0)
+   break;
+
+   TAILQ_INIT(&pending);
+   pthread_mutex_lock(&handler->lock);
+   TAILQ_CONCAT(&pending, &handler->pending, next);
+   pthread_mutex_unlock(&handler->lock);
+
+   struct iavf_arq_event_element *pos, *save_next;
+   TAILQ_FOREACH_SAFE(pos, &pending, next, save_next) {
+   TAILQ_REMOVE(&pending, pos, next);
+   rte_eth_dev_callback_process(pos->dev, pos->event, 
pos->param);
+   rte_free(pos);
+   }
+   }
+   return NULL;
+}
+
+static void
+iavf_dev_event_post(struct rte_eth_dev *dev,
+   enum rte_eth_event_type event,
+   void *param)
+{
+   struct iavf_event_handler *handler = &event_handler;
+   char notify_byte;
+   struct iavf_arq_event_element *elem = rte_malloc(NULL, sizeof(*elem), 
0);
+   if (!elem)
+   return;
+
+   elem->dev = dev;
+   elem->event = event;
+   elem->param = param;
+
+   pthread_mutex_lock(&handler->lock);
+   TAILQ_INSERT_TAIL(&handler->pending, elem, next);
+   pthread_mutex_unlock(&handler->lock);
+
+   RTE_SET

[PATCH v3] net/iavf: fix error of virtchnl command

2022-10-08 Thread Yiding Zhou
When the device is bonded, bond pmd will register callback for LSC event.
This callback will execute some virtchnl commands in eal-intr-thread to
reinitialize the device with interrupts disabled. In this case, responses
to all commands not be received.

This commit starts a thread to handle all events to fix this issue.

Fixes: 48de41ca11f0 ("net/avf: enable link status update")
CC: sta...@dpdk.org

Signed-off-by: Yiding Zhou 
---
v3: fix CI errors
---
 drivers/net/iavf/iavf.h|   2 +
 drivers/net/iavf/iavf_ethdev.c |   5 ++
 drivers/net/iavf/iavf_vchnl.c  | 136 -
 3 files changed, 142 insertions(+), 1 deletion(-)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 26b858f6f0..1edebab8dc 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -424,6 +424,8 @@ _atomic_set_async_response_cmd(struct iavf_info *vf, enum 
virtchnl_ops ops)
 }
 int iavf_check_api_version(struct iavf_adapter *adapter);
 int iavf_get_vf_resource(struct iavf_adapter *adapter);
+void iavf_dev_event_handler_fini(void);
+int iavf_dev_event_handler_init(void);
 void iavf_handle_virtchnl_msg(struct rte_eth_dev *dev);
 int iavf_enable_vlan_strip(struct iavf_adapter *adapter);
 int iavf_disable_vlan_strip(struct iavf_adapter *adapter);
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index 782be82c7f..633d684804 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -2631,6 +2631,9 @@ iavf_dev_init(struct rte_eth_dev *eth_dev)
rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr,
ð_dev->data->mac_addrs[0]);
 
+   if (iavf_dev_event_handler_init())
+   goto init_vf_err;
+
if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) {
/* register callback func to eal lib */
rte_intr_callback_register(pci_dev->intr_handle,
@@ -2785,6 +2788,8 @@ iavf_dev_uninit(struct rte_eth_dev *dev)
 
iavf_dev_close(dev);
 
+   iavf_dev_event_handler_fini();
+
return 0;
 }
 
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index 0fa2617cd2..35ceccaedd 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -2,6 +2,7 @@
  * Copyright(c) 2017 Intel Corporation
  */
 
+#include 
 #include 
 #include 
 #include 
@@ -11,6 +12,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #include 
 #include 
@@ -27,6 +29,138 @@
 #define MAX_TRY_TIMES 2000
 #define ASQ_DELAY_MS  1
 
+#define MAX_EVENT_PENDING 16
+
+struct iavf_arq_event_element {
+   TAILQ_ENTRY(iavf_arq_event_element) next;
+   struct rte_eth_dev *dev;
+   enum rte_eth_event_type event;
+   void *param;
+};
+
+struct iavf_event_handler {
+   rte_atomic32_t ndev;
+   pthread_t tid;
+   int fd[2];
+   pthread_mutex_t lock;
+   TAILQ_HEAD(event_lsit, iavf_arq_event_element) pending;
+};
+
+static struct iavf_event_handler event_handler = {
+   .fd = {-1, -1},
+};
+
+#ifndef TAILQ_FOREACH_SAFE
+#define TAILQ_FOREACH_SAFE(var, head, field, tvar) \
+   for ((var) = TAILQ_FIRST((head)); \
+   (var) && ((tvar) = TAILQ_NEXT((var), field), 1); \
+   (var) = (tvar))
+#endif
+
+static void *
+iavf_dev_event_handle(void *param __rte_unused)
+{
+   struct iavf_event_handler *handler = &event_handler;
+   TAILQ_HEAD(event_list, iavf_arq_event_element) pending;
+
+   while (true) {
+   char unused[MAX_EVENT_PENDING];
+   ssize_t nr = read(handler->fd[0], &unused, sizeof(unused));
+   if (nr <= 0)
+   break;
+
+   TAILQ_INIT(&pending);
+   pthread_mutex_lock(&handler->lock);
+   TAILQ_CONCAT(&pending, &handler->pending, next);
+   pthread_mutex_unlock(&handler->lock);
+
+   struct iavf_arq_event_element *pos, *save_next;
+   TAILQ_FOREACH_SAFE(pos, &pending, next, save_next) {
+   TAILQ_REMOVE(&pending, pos, next);
+   rte_eth_dev_callback_process(pos->dev, pos->event, 
pos->param);
+   rte_free(pos);
+   }
+   }
+   return NULL;
+}
+
+static void
+iavf_dev_event_post(struct rte_eth_dev *dev,
+   enum rte_eth_event_type event,
+   void *param)
+{
+   struct iavf_event_handler *handler = &event_handler;
+   char notify_byte;
+   struct iavf_arq_event_element *elem = rte_malloc(NULL, sizeof(*elem), 
0);
+   if (!elem)
+   return;
+
+   elem->dev = dev;
+   elem->event = event;
+   elem->param = param;
+
+   pthread_mutex_lock(&handler->lock);
+   TAILQ_INSERT_TAIL(&handler->pending, elem, next);
+   pthread_mutex_unlock(&handler->lock);
+
+   ssiz

[PATCH v4] net/iavf: fix error of virtchnl command

2022-10-12 Thread Yiding Zhou
When the device is bonded, bond pmd will register callback for LSC event.
This callback will execute some virtchnl commands in eal-intr-thread to
reinitialize the device with interrupts disabled. In this case, responses
to all commands not be received.

This commit starts a thread to handle all events to fix this issue.

Fixes: 48de41ca11f0 ("net/avf: enable link status update")
CC: sta...@dpdk.org

Signed-off-by: Yiding Zhou 
---
v4: add 'reset' and 'ipsec' event handling
v3: fix CI errors
---
 drivers/net/iavf/iavf.h|   2 +
 drivers/net/iavf/iavf_ethdev.c |   5 ++
 drivers/net/iavf/iavf_vchnl.c  | 152 +++--
 3 files changed, 153 insertions(+), 6 deletions(-)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 26b858f6f0..1edebab8dc 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -424,6 +424,8 @@ _atomic_set_async_response_cmd(struct iavf_info *vf, enum 
virtchnl_ops ops)
 }
 int iavf_check_api_version(struct iavf_adapter *adapter);
 int iavf_get_vf_resource(struct iavf_adapter *adapter);
+void iavf_dev_event_handler_fini(void);
+int iavf_dev_event_handler_init(void);
 void iavf_handle_virtchnl_msg(struct rte_eth_dev *dev);
 int iavf_enable_vlan_strip(struct iavf_adapter *adapter);
 int iavf_disable_vlan_strip(struct iavf_adapter *adapter);
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index 782be82c7f..633d684804 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -2631,6 +2631,9 @@ iavf_dev_init(struct rte_eth_dev *eth_dev)
rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr,
ð_dev->data->mac_addrs[0]);
 
+   if (iavf_dev_event_handler_init())
+   goto init_vf_err;
+
if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) {
/* register callback func to eal lib */
rte_intr_callback_register(pci_dev->intr_handle,
@@ -2785,6 +2788,8 @@ iavf_dev_uninit(struct rte_eth_dev *dev)
 
iavf_dev_close(dev);
 
+   iavf_dev_event_handler_fini();
+
return 0;
 }
 
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index 4327c5a786..43e18ca5f7 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -2,6 +2,7 @@
  * Copyright(c) 2017 Intel Corporation
  */
 
+#include 
 #include 
 #include 
 #include 
@@ -11,6 +12,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #include 
 #include 
@@ -27,6 +29,145 @@
 #define MAX_TRY_TIMES 2000
 #define ASQ_DELAY_MS  1
 
+#define MAX_EVENT_PENDING 16
+
+struct iavf_event_element {
+   TAILQ_ENTRY(iavf_event_element) next;
+   struct rte_eth_dev *dev;
+   enum rte_eth_event_type event;
+   void *param;
+   size_t param_alloc_size;
+   uint8_t param_alloc_data[0];
+};
+
+struct iavf_event_handler {
+   uint32_t ndev;
+   pthread_t tid;
+   int fd[2];
+   pthread_mutex_t lock;
+   TAILQ_HEAD(event_lsit, iavf_event_element) pending;
+};
+
+static struct iavf_event_handler event_handler = {
+   .fd = {-1, -1},
+};
+
+#ifndef TAILQ_FOREACH_SAFE
+#define TAILQ_FOREACH_SAFE(var, head, field, tvar) \
+   for ((var) = TAILQ_FIRST((head)); \
+   (var) && ((tvar) = TAILQ_NEXT((var), field), 1); \
+   (var) = (tvar))
+#endif
+
+static void *
+iavf_dev_event_handle(void *param __rte_unused)
+{
+   struct iavf_event_handler *handler = &event_handler;
+   TAILQ_HEAD(event_list, iavf_event_element) pending;
+
+   while (true) {
+   char unused[MAX_EVENT_PENDING];
+   ssize_t nr = read(handler->fd[0], &unused, sizeof(unused));
+   if (nr <= 0)
+   break;
+
+   TAILQ_INIT(&pending);
+   pthread_mutex_lock(&handler->lock);
+   TAILQ_CONCAT(&pending, &handler->pending, next);
+   pthread_mutex_unlock(&handler->lock);
+
+   struct iavf_event_element *pos, *save_next;
+   TAILQ_FOREACH_SAFE(pos, &pending, next, save_next) {
+   TAILQ_REMOVE(&pending, pos, next);
+   rte_eth_dev_callback_process(pos->dev, pos->event, 
pos->param);
+   rte_free(pos);
+   }
+   }
+   return NULL;
+}
+
+static void
+iavf_dev_event_post(struct rte_eth_dev *dev,
+   enum rte_eth_event_type event,
+   void *param, size_t param_alloc_size)
+{
+   struct iavf_event_handler *handler = &event_handler;
+   char notify_byte;
+   struct iavf_event_element *elem = rte_malloc(NULL, sizeof(*elem) + 
param_alloc_size, 0);
+   if (!elem)
+   return;
+
+   elem->dev = dev;
+   elem->event = event;
+   elem->param = param;
+   elem->param_alloc_size

[PATCH] net/ice/base: fix duplicate flow rules

2022-10-12 Thread Yiding Zhou
When a vsi that already exists in the created vsi_list subscribes to the
same filter again, the return value ICE_SUCCESS results in duplicate flow
rules to be stored, which will cause 'flush' and 'destroy' errors.

Fixes: fed0c5ca5f19 ("net/ice/base: support programming a new switch recipe")
Cc: sta...@dpdk.org

Signed-off-by: Yiding Zhou 
---
 drivers/net/ice/base/ice_switch.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/net/ice/base/ice_switch.c 
b/drivers/net/ice/base/ice_switch.c
index 4b115ce660..a2581f404d 100644
--- a/drivers/net/ice/base/ice_switch.c
+++ b/drivers/net/ice/base/ice_switch.c
@@ -8786,7 +8786,7 @@ ice_adv_add_update_vsi_list(struct ice_hw *hw,
 
/* A rule already exists with the new VSI being added */
if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
-   return ICE_SUCCESS;
+   return ICE_ERR_ALREADY_EXISTS;
 
/* Update the previously created VSI list set with
 * the new VSI ID passed in
-- 
2.34.1



[PATCH v3] net/iavf: revert fix VLAN insertion

2022-11-03 Thread Yiding Zhou
The vector Tx path does not support VLAN insertion via the L2TAG2 field,
but the scalar path supports. The previous commit was to force to select
scalar path as soon as kernel driver requests to use L2TAG2.

That logic is incorrect. Because other case like VLAN offloading not
required but scalar path selected would have a significant performance drop
. Therefore the following commit needs to revert.

commit 0d58caa7d6d1 ("net/iavf: fix VLAN insertion")

After this commit reverted, the user can select scalar path with the
parameter '--force-max-simd-bitwidth' if necessary.

Fixes: 0d58caa7d6d1 ("net/iavf: fix VLAN insertion")

Signed-off-by: Yiding Zhou 
---
v3: rebase and change commit log
---
 drivers/net/iavf/iavf_rxtx_vec_common.h | 3 ---
 1 file changed, 3 deletions(-)

diff --git a/drivers/net/iavf/iavf_rxtx_vec_common.h 
b/drivers/net/iavf/iavf_rxtx_vec_common.h
index 4ab22c6b2b..a59cb2ceee 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_common.h
+++ b/drivers/net/iavf/iavf_rxtx_vec_common.h
@@ -253,9 +253,6 @@ iavf_tx_vec_queue_default(struct iavf_tx_queue *txq)
if (txq->offloads & IAVF_TX_NO_VECTOR_FLAGS)
return -1;
 
-   if (txq->vlan_flag == IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2)
-   return -1;
-
if (txq->offloads & IAVF_TX_VECTOR_OFFLOAD)
return IAVF_VECTOR_OFFLOAD_PATH;
 
-- 
2.34.1



[PATCH v4] net/iavf: revert fix VLAN insertion

2022-11-11 Thread Yiding Zhou
The vector Tx path does not support VLAN insertion via the L2TAG2 field,
but the scalar path supports. The earlier commit was to force to select
scalar path as soon as kernel driver requests to use L2TAG2. That logic is
incorrect. Because other case like VLAN offloading not required but scalar
path selected would have a significant performance drop.

Therefore the following commit was reverted accordingly.

commit 0d58caa7d6d1 ("net/iavf: fix VLAN insertion")

After reverting this commit, the AVX512 Tx path would insert the VLAN tag
into the wrong location(inner of QinQ) when the kernel driver requested
L2TAG2. This is inconsistent with the behavior of PF(outer of QinQ).

It is currently known that ice kernel drivers newer than 1.8.9 will request
the use of L2TAG2. User can set parameter '--force-max-simd-bitwidth' to
64/128/256 to avoid this issue.

Fixes: 0d58caa7d6d1 ("net/iavf: fix VLAN insertion")
Cc: sta...@dpdk.org

Signed-off-by: Yiding Zhou 
---
v4: document this issue as kown issue and add some commit log
v3: rebase and change commit log
---
 doc/guides/rel_notes/known_issues.rst   | 23 +++
 drivers/net/iavf/iavf_rxtx_vec_common.h |  3 ---
 2 files changed, 23 insertions(+), 3 deletions(-)

diff --git a/doc/guides/rel_notes/known_issues.rst 
b/doc/guides/rel_notes/known_issues.rst
index 570550843a..1f73b17716 100644
--- a/doc/guides/rel_notes/known_issues.rst
+++ b/doc/guides/rel_notes/known_issues.rst
@@ -906,3 +906,26 @@ Vhost multi-queue reconnection failed with QEMU version 
4.2.0 to 5.1.0
 
 **Driver/Module**:
Virtual Device Poll Mode Driver (PMD).
+
+IAVF inserts VLAN tag incorrectly on AVX-512 Tx path
+--
+
+**Description**
+   When the kernel driver requests the VF to use the L2TAG2 field of the Tx 
context
+   descriptor to insert the hardware offload VLAN tag, AVX-512 Tx path cannot 
handle
+   this case correctly due to its lack of support for the Tx context 
descriptor.
+
+**Implication**
+   The VLAN tag will be inserted to the wrong location(inner of QinQ) on 
AVX-512 Tx
+   path. That is inconsistent with the behavior of PF(outer of QinQ). The ice 
kernel
+   driver's version newer than 1.8.9 requests to use L2TAG2 and has this issue.
+
+**Resolution/Workaround**:
+   Set the parameter `--force-max-simd-bitwidth` as 64/128/256 to avoid 
selecting AVX-512
+   Tx path
+
+**Affected Environment/Platform**:
+   ALL.
+
+**Driver/Module**:
+   Poll Mode Driver (PMD).
diff --git a/drivers/net/iavf/iavf_rxtx_vec_common.h 
b/drivers/net/iavf/iavf_rxtx_vec_common.h
index 4ab22c6b2b..a59cb2ceee 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_common.h
+++ b/drivers/net/iavf/iavf_rxtx_vec_common.h
@@ -253,9 +253,6 @@ iavf_tx_vec_queue_default(struct iavf_tx_queue *txq)
if (txq->offloads & IAVF_TX_NO_VECTOR_FLAGS)
return -1;
 
-   if (txq->vlan_flag == IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2)
-   return -1;
-
if (txq->offloads & IAVF_TX_VECTOR_OFFLOAD)
return IAVF_VECTOR_OFFLOAD_PATH;
 
-- 
2.34.1



[PATCH v5] net/iavf: revert fix VLAN insertion

2022-11-13 Thread Yiding Zhou
The vector Tx path does not support VLAN insertion via the L2TAG2 field,
but the scalar path supports. The earlier commit was to force to select
scalar path as soon as kernel driver requests to use L2TAG2. That logic is
incorrect. Because other case like VLAN offloading not required but scalar
path selected would have a significant performance drop.

Therefore the following commit was reverted accordingly.

commit 0d58caa7d6d1 ("net/iavf: fix VLAN insertion")

After reverting this commit, the AVX512 Tx path would insert the VLAN tag
into the wrong location(inner of QinQ) when the kernel driver requested
L2TAG2. This is inconsistent with the behavior of PF(outer of QinQ).

It is currently known that ice kernel drivers newer than 1.8.9 will request
the use of L2TAG2. User can set parameter '--force-max-simd-bitwidth' to
64/128/256 to avoid this issue.

Fixes: 0d58caa7d6d1 ("net/iavf: fix VLAN insertion")
Cc: sta...@dpdk.org

Signed-off-by: Yiding Zhou 
---
v5: move the document
v4: document this issue as kown issue and add some commit log
v3: rebase and change commit log
---
 doc/guides/nics/intel_vf.rst| 13 +
 drivers/net/iavf/iavf_rxtx_vec_common.h |  3 ---
 2 files changed, 13 insertions(+), 3 deletions(-)

diff --git a/doc/guides/nics/intel_vf.rst b/doc/guides/nics/intel_vf.rst
index d582f831da..edbda275c1 100644
--- a/doc/guides/nics/intel_vf.rst
+++ b/doc/guides/nics/intel_vf.rst
@@ -704,3 +704,16 @@ i40e: Vlan filtering of VF
 For i40e driver 2.17.15, configuring VLAN filters from the DPDK VF is 
unsupported.
 When applying VLAN filters on the VF it must first be configured from the
 corresponding PF.
+
+ice: VF inserts VLAN tag incorrectly on AVX-512 Tx path
+~~~
+When the kernel driver requests the VF to use the L2TAG2 field of the Tx 
context
+descriptor to insert the hardware offload VLAN tag, AVX-512 Tx path cannot 
handle
+this case correctly due to its lack of support for the Tx context descriptor.
+
+The VLAN tag will be inserted to the wrong location(inner of QinQ) on AVX-512 
Tx
+path. That is inconsistent with the behavior of PF(outer of QinQ). The ice 
kernel
+driver's version newer than 1.8.9 requests to use L2TAG2 and has this issue.
+
+Set the parameter `--force-max-simd-bitwidth` as 64/128/256 to avoid selecting
+AVX-512 Tx path.
diff --git a/drivers/net/iavf/iavf_rxtx_vec_common.h 
b/drivers/net/iavf/iavf_rxtx_vec_common.h
index 4ab22c6b2b..a59cb2ceee 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_common.h
+++ b/drivers/net/iavf/iavf_rxtx_vec_common.h
@@ -253,9 +253,6 @@ iavf_tx_vec_queue_default(struct iavf_tx_queue *txq)
if (txq->offloads & IAVF_TX_NO_VECTOR_FLAGS)
return -1;
 
-   if (txq->vlan_flag == IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2)
-   return -1;
-
if (txq->offloads & IAVF_TX_VECTOR_OFFLOAD)
return IAVF_VECTOR_OFFLOAD_PATH;
 
-- 
2.34.1



[PATCH] net/iavf: fix VLAN insertion

2022-08-11 Thread Yiding Zhou
When the driver tells the VF to insert VLAN tag using the L2TAG2 field,
vector Tx path does not use Tx context descriptor and would cause VLAN tag
inserted into the wrong location.

This commit is to fix issue by using normal Tx path to handle L2TAG2 case.

Fixes: 3aa957338503 ("net/iavf: fix VLAN insert")
Cc: sta...@dpdk.org

Signed-off-by: Yiding Zhou 
---
 drivers/net/iavf/iavf_rxtx_vec_common.h | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/drivers/net/iavf/iavf_rxtx_vec_common.h 
b/drivers/net/iavf/iavf_rxtx_vec_common.h
index a59cb2ceee..4ab22c6b2b 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_common.h
+++ b/drivers/net/iavf/iavf_rxtx_vec_common.h
@@ -253,6 +253,9 @@ iavf_tx_vec_queue_default(struct iavf_tx_queue *txq)
if (txq->offloads & IAVF_TX_NO_VECTOR_FLAGS)
return -1;
 
+   if (txq->vlan_flag == IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2)
+   return -1;
+
if (txq->offloads & IAVF_TX_VECTOR_OFFLOAD)
return IAVF_VECTOR_OFFLOAD_PATH;
 
-- 
2.34.1



[PATCH] net/iavf: fix VLAN insertion

2022-08-11 Thread Yiding Zhou
When the PF driver tells the VF to insert VLAN tag using the L2TAG2 field,
vector Tx path does not use Tx context descriptor and would cause VLAN tag
inserted into the wrong location.

This commit is to fix the issue by using normal Tx path to handle L2TAG2 case.

Fixes: 3aa957338503 ("net/iavf: fix VLAN insert")
Cc: sta...@dpdk.org

Signed-off-by: Yiding Zhou 
---
 drivers/net/iavf/iavf_rxtx_vec_common.h | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/drivers/net/iavf/iavf_rxtx_vec_common.h 
b/drivers/net/iavf/iavf_rxtx_vec_common.h
index a59cb2ceee..4ab22c6b2b 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_common.h
+++ b/drivers/net/iavf/iavf_rxtx_vec_common.h
@@ -253,6 +253,9 @@ iavf_tx_vec_queue_default(struct iavf_tx_queue *txq)
if (txq->offloads & IAVF_TX_NO_VECTOR_FLAGS)
return -1;
 
+   if (txq->vlan_flag == IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2)
+   return -1;
+
if (txq->offloads & IAVF_TX_VECTOR_OFFLOAD)
return IAVF_VECTOR_OFFLOAD_PATH;
 
-- 
2.34.1



[PATCH] net/pcap: reduce time for stopping device

2022-08-24 Thread Yiding Zhou
The pcap file will be synchronized to the disk when stopping the device.
It takes a long time if the file is large that would cause the
'detach sync request' timeout when the device is closed under multi-process
scenario.

This commit fixes the issue by performing synchronization in Tx path

Fixes: 4c173302c307 ("pcap: add new driver")
Cc: sta...@dpdk.org

Signed-off-by: Yiding Zhou 
---
 drivers/net/pcap/pcap_ethdev.c | 18 --
 1 file changed, 16 insertions(+), 2 deletions(-)

diff --git a/drivers/net/pcap/pcap_ethdev.c b/drivers/net/pcap/pcap_ethdev.c
index ec29fd6bc5..52eafa5674 100644
--- a/drivers/net/pcap/pcap_ethdev.c
+++ b/drivers/net/pcap/pcap_ethdev.c
@@ -3,7 +3,7 @@
  * Copyright(c) 2014 6WIND S.A.
  * All rights reserved.
  */
-
+#include 
 #include 
 
 #include 
@@ -38,6 +38,8 @@
 
 #define RTE_PMD_PCAP_MAX_QUEUES 16
 
+#define ETH_PCAP_SYNC_THRESHOLD 0x2000
+
 static char errbuf[PCAP_ERRBUF_SIZE];
 static struct timespec start_time;
 static uint64_t start_cycles;
@@ -47,6 +49,8 @@ static uint8_t iface_idx;
 static uint64_t timestamp_rx_dynflag;
 static int timestamp_dynfield_offset = -1;
 
+RTE_DEFINE_PER_LCORE(uint64_t, _pcap_cached_bytes);
+
 struct queue_stat {
volatile unsigned long pkts;
volatile unsigned long bytes;
@@ -144,6 +148,16 @@ static struct rte_eth_link pmd_link = {
 
 RTE_LOG_REGISTER_DEFAULT(eth_pcap_logtype, NOTICE);
 
+static inline void
+pcap_dumper_data_sync(pcap_dumper_t *dumper, uint32_t bytes)
+{
+   RTE_PER_LCORE(_pcap_cached_bytes) += bytes;
+   if (unlikely(RTE_PER_LCORE(_pcap_cached_bytes) > 
ETH_PCAP_SYNC_THRESHOLD)) {
+   if (!fdatasync(fileno(pcap_dump_file(dumper
+   RTE_PER_LCORE(_pcap_cached_bytes) = 0;
+   }
+}
+
 static struct queue_missed_stat*
 queue_missed_stat_update(struct rte_eth_dev *dev, unsigned int qid)
 {
@@ -421,7 +435,7 @@ eth_pcap_tx_dumper(void *queue, struct rte_mbuf **bufs, 
uint16_t nb_pkts)
 * process stops and to make sure the pcap file is actually written,
 * we flush the pcap dumper within each burst.
 */
-   pcap_dump_flush(dumper);
+   pcap_dumper_data_sync(dumper, tx_bytes);
dumper_q->tx_stat.pkts += num_tx;
dumper_q->tx_stat.bytes += tx_bytes;
dumper_q->tx_stat.err_pkts += nb_pkts - num_tx;
-- 
2.34.1



[PATCH] net/pcap: reduce time for stopping device

2022-08-25 Thread Yiding Zhou
The pcap file will be synchronized to the disk when stopping the device.
It takes a long time if the file is large that would cause the
'detach sync request' timeout when the device is closed under multi-process
scenario.

This commit fixes the issue by performing synchronization in Tx path

Fixes: 4c173302c307 ("pcap: add new driver")
Cc: sta...@dpdk.org

Signed-off-by: Yiding Zhou 
---
 drivers/net/pcap/pcap_ethdev.c | 18 --
 1 file changed, 16 insertions(+), 2 deletions(-)

diff --git a/drivers/net/pcap/pcap_ethdev.c b/drivers/net/pcap/pcap_ethdev.c
index ec29fd6bc5..52eafa5674 100644
--- a/drivers/net/pcap/pcap_ethdev.c
+++ b/drivers/net/pcap/pcap_ethdev.c
@@ -3,7 +3,7 @@
  * Copyright(c) 2014 6WIND S.A.
  * All rights reserved.
  */
-
+#include 
 #include 
 
 #include 
@@ -38,6 +38,8 @@
 
 #define RTE_PMD_PCAP_MAX_QUEUES 16
 
+#define ETH_PCAP_SYNC_THRESHOLD 0x2000
+
 static char errbuf[PCAP_ERRBUF_SIZE];
 static struct timespec start_time;
 static uint64_t start_cycles;
@@ -47,6 +49,8 @@ static uint8_t iface_idx;
 static uint64_t timestamp_rx_dynflag;
 static int timestamp_dynfield_offset = -1;
 
+RTE_DEFINE_PER_LCORE(uint64_t, _pcap_cached_bytes);
+
 struct queue_stat {
volatile unsigned long pkts;
volatile unsigned long bytes;
@@ -144,6 +148,16 @@ static struct rte_eth_link pmd_link = {
 
 RTE_LOG_REGISTER_DEFAULT(eth_pcap_logtype, NOTICE);
 
+static inline void
+pcap_dumper_data_sync(pcap_dumper_t *dumper, uint32_t bytes)
+{
+   RTE_PER_LCORE(_pcap_cached_bytes) += bytes;
+   if (unlikely(RTE_PER_LCORE(_pcap_cached_bytes) > 
ETH_PCAP_SYNC_THRESHOLD)) {
+   if (!fdatasync(fileno(pcap_dump_file(dumper
+   RTE_PER_LCORE(_pcap_cached_bytes) = 0;
+   }
+}
+
 static struct queue_missed_stat*
 queue_missed_stat_update(struct rte_eth_dev *dev, unsigned int qid)
 {
@@ -421,7 +435,7 @@ eth_pcap_tx_dumper(void *queue, struct rte_mbuf **bufs, 
uint16_t nb_pkts)
 * process stops and to make sure the pcap file is actually written,
 * we flush the pcap dumper within each burst.
 */
-   pcap_dump_flush(dumper);
+   pcap_dumper_data_sync(dumper, tx_bytes);
dumper_q->tx_stat.pkts += num_tx;
dumper_q->tx_stat.bytes += tx_bytes;
dumper_q->tx_stat.err_pkts += nb_pkts - num_tx;
-- 
2.34.1



[PATCH] net/iavf: fix segfaults when calling API after VF reset failed

2022-04-21 Thread Yiding Zhou
Some pointers will be set to NULL when iavf_dev_reset() failed,
for example vf->vf_res, vf->vsi_res vf->rss_key and etc.
APIs access these NULL pointers will trigger segfault.

This patch adds closed flag to indicate that the VF is closed,
and rejects API calls in this state to avoid coredump.

Signed-off-by: Yiding Zhou 
---
 drivers/net/iavf/iavf.h|  1 +
 drivers/net/iavf/iavf_ethdev.c | 59 --
 drivers/net/iavf/iavf_rxtx.c   | 10 ++
 drivers/net/iavf/iavf_vchnl.c  | 17 ++
 4 files changed, 85 insertions(+), 2 deletions(-)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index a01d18e61b..b3b582dd21 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -298,6 +298,7 @@ struct iavf_adapter {
bool tx_vec_allowed;
uint32_t ptype_tbl[IAVF_MAX_PKT_TYPE] __rte_cache_min_aligned;
bool stopped;
+   bool closed;
uint16_t fdir_ref_cnt;
struct iavf_devargs devargs;
 };
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index d6190ac24a..a3454638be 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -229,9 +229,18 @@ static const struct eth_dev_ops iavf_eth_dev_ops = {
 };
 
 static int
-iavf_tm_ops_get(struct rte_eth_dev *dev __rte_unused,
+iavf_tm_ops_get(struct rte_eth_dev *dev,
void *arg)
 {
+   struct iavf_adapter *adapter =
+   IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+
+   if (!dev)
+   return -EINVAL;
+
+   if (adapter->closed)
+   return -EIO;
+
if (!arg)
return -EINVAL;
 
@@ -342,6 +351,9 @@ iavf_set_mc_addr_list(struct rte_eth_dev *dev,
return -EINVAL;
}
 
+   if (adapter->closed)
+   return -EIO;
+
/* flush previous addresses */
err = iavf_add_del_mc_addr_list(adapter, vf->mc_addrs, vf->mc_addrs_num,
false);
@@ -613,6 +625,9 @@ iavf_dev_configure(struct rte_eth_dev *dev)
dev->data->nb_tx_queues);
int ret;
 
+   if (ad->closed)
+   return -EIO;
+
ad->rx_bulk_alloc_allowed = true;
/* Initialize to TRUE. If any of Rx queues doesn't meet the
 * vector Rx/Tx preconditions, it will be reset.
@@ -932,6 +947,9 @@ iavf_dev_start(struct rte_eth_dev *dev)
 
PMD_INIT_FUNC_TRACE();
 
+   if (adapter->closed)
+   return -1;
+
adapter->stopped = 0;
 
vf->max_pkt_len = dev->data->mtu + IAVF_ETH_OVERHEAD;
@@ -1009,6 +1027,9 @@ iavf_dev_stop(struct rte_eth_dev *dev)
 
PMD_INIT_FUNC_TRACE();
 
+   if (adapter->closed)
+   return -1;
+
if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) &&
dev->data->dev_conf.intr_conf.rxq != 0)
rte_intr_disable(intr_handle);
@@ -1046,6 +1067,9 @@ iavf_dev_info_get(struct rte_eth_dev *dev, struct 
rte_eth_dev_info *dev_info)
IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
struct iavf_info *vf = &adapter->vf;
 
+   if (adapter->closed)
+   return -EIO;
+
dev_info->max_rx_queues = IAVF_MAX_NUM_QUEUES_LV;
dev_info->max_tx_queues = IAVF_MAX_NUM_QUEUES_LV;
dev_info->min_rx_bufsize = IAVF_BUF_SIZE_MIN;
@@ -1286,6 +1310,9 @@ iavf_dev_vlan_filter_set(struct rte_eth_dev *dev, 
uint16_t vlan_id, int on)
struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
int err;
 
+   if (adapter->closed)
+   return -EIO;
+
if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
err = iavf_add_del_vlan_v2(adapter, vlan_id, on);
if (err)
@@ -1362,6 +1389,9 @@ iavf_dev_vlan_offload_set(struct rte_eth_dev *dev, int 
mask)
struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
int err;
 
+   if (adapter->closed)
+   return -EIO;
+
if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2)
return iavf_dev_vlan_offload_set_v2(dev, mask);
 
@@ -1394,6 +1424,9 @@ iavf_dev_rss_reta_update(struct rte_eth_dev *dev,
uint16_t i, idx, shift;
int ret;
 
+   if (adapter->closed)
+   return -EIO;
+
if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
return -ENOTSUP;
 
@@ -1439,6 +1472,9 @@ iavf_dev_rss_reta_query(struct rte_eth_dev *dev,
struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
uint16_t i, idx, shift;
 
+   if (adapter->closed)
+   return -EIO;
+
if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
return -ENOTSUP;
 
@@ -1492,6 +1528,9 @@ iavf_dev_rss_hash_upd

[PATCH v2] net/iavf: fix segfaults when calling API after VF reset failed

2022-04-27 Thread Yiding Zhou
Some pointers will be set to NULL when iavf_dev_reset() failed,
for example vf->vf_res, vf->vsi_res vf->rss_key and etc.
APIs access these NULL pointers will trigger segfault.

This patch adds closed flag to indicate that the VF is closed,
and rejects API calls in this state to avoid coredump.

Fixes: e74e1bb6280d ("net/iavf: enable port reset")
Cc: sta...@dpdk.org

Signed-off-by: Yiding Zhou 
---
 drivers/net/iavf/iavf.h|  1 +
 drivers/net/iavf/iavf_ethdev.c | 57 +++---
 drivers/net/iavf/iavf_rxtx.c   | 10 ++
 drivers/net/iavf/iavf_vchnl.c  | 17 ++
 4 files changed, 81 insertions(+), 4 deletions(-)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index a01d18e61b..b3b582dd21 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -298,6 +298,7 @@ struct iavf_adapter {
bool tx_vec_allowed;
uint32_t ptype_tbl[IAVF_MAX_PKT_TYPE] __rte_cache_min_aligned;
bool stopped;
+   bool closed;
uint16_t fdir_ref_cnt;
struct iavf_devargs devargs;
 };
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index d6190ac24a..91b6e64840 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -229,9 +229,15 @@ static const struct eth_dev_ops iavf_eth_dev_ops = {
 };
 
 static int
-iavf_tm_ops_get(struct rte_eth_dev *dev __rte_unused,
+iavf_tm_ops_get(struct rte_eth_dev *dev,
void *arg)
 {
+   struct iavf_adapter *adapter =
+   IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+
+   if (adapter->closed)
+   return -EIO;
+
if (!arg)
return -EINVAL;
 
@@ -342,6 +348,9 @@ iavf_set_mc_addr_list(struct rte_eth_dev *dev,
return -EINVAL;
}
 
+   if (adapter->closed)
+   return -EIO;
+
/* flush previous addresses */
err = iavf_add_del_mc_addr_list(adapter, vf->mc_addrs, vf->mc_addrs_num,
false);
@@ -613,6 +622,9 @@ iavf_dev_configure(struct rte_eth_dev *dev)
dev->data->nb_tx_queues);
int ret;
 
+   if (ad->closed)
+   return -EIO;
+
ad->rx_bulk_alloc_allowed = true;
/* Initialize to TRUE. If any of Rx queues doesn't meet the
 * vector Rx/Tx preconditions, it will be reset.
@@ -932,6 +944,9 @@ iavf_dev_start(struct rte_eth_dev *dev)
 
PMD_INIT_FUNC_TRACE();
 
+   if (adapter->closed)
+   return -1;
+
adapter->stopped = 0;
 
vf->max_pkt_len = dev->data->mtu + IAVF_ETH_OVERHEAD;
@@ -1009,6 +1024,9 @@ iavf_dev_stop(struct rte_eth_dev *dev)
 
PMD_INIT_FUNC_TRACE();
 
+   if (adapter->closed)
+   return -1;
+
if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) &&
dev->data->dev_conf.intr_conf.rxq != 0)
rte_intr_disable(intr_handle);
@@ -1046,6 +1064,9 @@ iavf_dev_info_get(struct rte_eth_dev *dev, struct 
rte_eth_dev_info *dev_info)
IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
struct iavf_info *vf = &adapter->vf;
 
+   if (adapter->closed)
+   return -EIO;
+
dev_info->max_rx_queues = IAVF_MAX_NUM_QUEUES_LV;
dev_info->max_tx_queues = IAVF_MAX_NUM_QUEUES_LV;
dev_info->min_rx_bufsize = IAVF_BUF_SIZE_MIN;
@@ -1286,6 +1307,9 @@ iavf_dev_vlan_filter_set(struct rte_eth_dev *dev, 
uint16_t vlan_id, int on)
struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
int err;
 
+   if (adapter->closed)
+   return -EIO;
+
if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
err = iavf_add_del_vlan_v2(adapter, vlan_id, on);
if (err)
@@ -1362,6 +1386,9 @@ iavf_dev_vlan_offload_set(struct rte_eth_dev *dev, int 
mask)
struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
int err;
 
+   if (adapter->closed)
+   return -EIO;
+
if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2)
return iavf_dev_vlan_offload_set_v2(dev, mask);
 
@@ -1394,6 +1421,9 @@ iavf_dev_rss_reta_update(struct rte_eth_dev *dev,
uint16_t i, idx, shift;
int ret;
 
+   if (adapter->closed)
+   return -EIO;
+
if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
return -ENOTSUP;
 
@@ -1439,6 +1469,9 @@ iavf_dev_rss_reta_query(struct rte_eth_dev *dev,
struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
uint16_t i, idx, shift;
 
+   if (adapter->closed)
+   return -EIO;
+
if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
return -ENOTSUP;
 
@@ -1492,6 +15