The control thread accesses the hardware resources after the
resources were released, which results in a segment error.

The 'ice-reset' threads are detached, so thread resources cannot be
reclaimed by `pthread_join` calls.

This commit synchronizes the number of "ice-reset" threads by adding a
variable ("vsi_update_thread_num") to the "struct ice_dcf_hw" and
performing an atomic operation on this variable. When releasing HW
resources, we wait for the number of "ice-reset" threads to be reduced
to 0 before releasing the resources.

Fixes: c7e1a1a3bfeb ("net/ice: refactor DCF VLAN handling")
Fixes: 3b3757bda3c3 ("net/ice: get VF hardware index in DCF")
Fixes: 7564d5509611 ("net/ice: add DCF hardware initialization")
Fixes: 0b02c9519432 ("net/ice: handle PF initialization by DCF")
Cc: sta...@dpdk.org

Signed-off-by: Ke Zhang <ke1x.zh...@intel.com>
Signed-off-by: Mingjin Ye <mingjinx...@intel.com>
---
v2: add pthread_exit() for windows
---
v3: Optimization. It is unsafe for a thread to forcibly exit, which
will cause the spin lock to not be released correctly
---
v4: Safely wait for all event threads to end
---
v5: Spinlock moved to struct ice_dcf_hw
---
v6: Spinlock changed to atomic
---
V7: moving __atomic_fetch_add to the service_handler thread
---
 drivers/net/ice/ice_dcf.c        | 9 +++++++++
 drivers/net/ice/ice_dcf.h        | 2 ++
 drivers/net/ice/ice_dcf_parent.c | 6 ++++++
 3 files changed, 17 insertions(+)

diff --git a/drivers/net/ice/ice_dcf.c b/drivers/net/ice/ice_dcf.c
index 1c3d22ae0f..adf2cf2cb6 100644
--- a/drivers/net/ice/ice_dcf.c
+++ b/drivers/net/ice/ice_dcf.c
@@ -32,6 +32,8 @@
 #define ICE_DCF_ARQ_MAX_RETRIES 200
 #define ICE_DCF_ARQ_CHECK_TIME  2   /* msecs */
 
+#define ICE_DCF_CHECK_INTERVAL  100   /* 100ms */
+
 #define ICE_DCF_VF_RES_BUF_SZ  \
        (sizeof(struct virtchnl_vf_resource) +  \
                IAVF_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource))
@@ -639,6 +641,8 @@ ice_dcf_init_hw(struct rte_eth_dev *eth_dev, struct 
ice_dcf_hw *hw)
        rte_spinlock_init(&hw->vc_cmd_queue_lock);
        TAILQ_INIT(&hw->vc_cmd_queue);
 
+       __atomic_store_n(&hw->vsi_update_thread_num, 0, __ATOMIC_RELAXED);
+
        hw->arq_buf = rte_zmalloc("arq_buf", ICE_DCF_AQ_BUF_SZ, 0);
        if (hw->arq_buf == NULL) {
                PMD_INIT_LOG(ERR, "unable to allocate AdminQ buffer memory");
@@ -760,6 +764,11 @@ ice_dcf_uninit_hw(struct rte_eth_dev *eth_dev, struct 
ice_dcf_hw *hw)
        rte_intr_callback_unregister(intr_handle,
                                     ice_dcf_dev_interrupt_handler, hw);
 
+       /* Wait for all `ice-thread` threads to exit. */
+       while (__atomic_load_n(&hw->vsi_update_thread_num,
+               __ATOMIC_ACQUIRE) != 0)
+               rte_delay_ms(ICE_DCF_CHECK_INTERVAL);
+
        ice_dcf_mode_disable(hw);
        iavf_shutdown_adminq(&hw->avf);
 
diff --git a/drivers/net/ice/ice_dcf.h b/drivers/net/ice/ice_dcf.h
index 7f42ebabe9..7becf6d187 100644
--- a/drivers/net/ice/ice_dcf.h
+++ b/drivers/net/ice/ice_dcf.h
@@ -105,6 +105,8 @@ struct ice_dcf_hw {
        void (*vc_event_msg_cb)(struct ice_dcf_hw *dcf_hw,
                                uint8_t *msg, uint16_t msglen);
 
+       int vsi_update_thread_num;
+
        uint8_t *arq_buf;
 
        uint16_t num_vfs;
diff --git a/drivers/net/ice/ice_dcf_parent.c b/drivers/net/ice/ice_dcf_parent.c
index 01e390ddda..0563edb0b2 100644
--- a/drivers/net/ice/ice_dcf_parent.c
+++ b/drivers/net/ice/ice_dcf_parent.c
@@ -124,6 +124,9 @@ ice_dcf_vsi_update_service_handler(void *param)
                container_of(hw, struct ice_dcf_adapter, real_hw);
        struct ice_adapter *parent_adapter = &adapter->parent;
 
+       __atomic_fetch_add(&hw->vsi_update_thread_num, 1,
+               __ATOMIC_RELAXED);
+
        pthread_detach(pthread_self());
 
        rte_delay_us(ICE_DCF_VSI_UPDATE_SERVICE_INTERVAL);
@@ -154,6 +157,9 @@ ice_dcf_vsi_update_service_handler(void *param)
 
        free(param);
 
+       __atomic_fetch_sub(&hw->vsi_update_thread_num, 1,
+               __ATOMIC_RELEASE);
+
        return NULL;
 }
 
-- 
2.25.1

Reply via email to