From: Qi Zhang <qi.z.zh...@intel.com>

Add support to enable DCF to flush flow rule created by the
previous DCF when multiple DCF feature is enabled.

Signed-off-by: Qi Zhang <qi.z.zh...@intel.com>
---
 drivers/common/iavf/virtchnl.h     |  4 +++-
 drivers/net/ice/ice_dcf.c          | 16 ++++++++++++++++
 drivers/net/ice/ice_dcf.h          |  1 +
 drivers/net/ice/ice_generic_flow.c |  7 +++++++
 4 files changed, 27 insertions(+), 1 deletion(-)

diff --git a/drivers/common/iavf/virtchnl.h b/drivers/common/iavf/virtchnl.h
index 79515ee8b..64f97503f 100644
--- a/drivers/common/iavf/virtchnl.h
+++ b/drivers/common/iavf/virtchnl.h
@@ -128,7 +128,8 @@ enum virtchnl_ops {
        VIRTCHNL_OP_DISABLE_CHANNELS = 31,
        VIRTCHNL_OP_ADD_CLOUD_FILTER = 32,
        VIRTCHNL_OP_DEL_CLOUD_FILTER = 33,
-       /* opcodes 34, 35, 36, 37 and 38 are reserved */
+       /* opcodes 34, 35, 36 and 37 are reserved */
+       VIRTCHNL_OP_DCF_RULE_FLUSH = 38,
        VIRTCHNL_OP_DCF_CMD_DESC = 39,
        VIRTCHNL_OP_DCF_CMD_BUFF = 40,
        VIRTCHNL_OP_DCF_DISABLE = 41,
@@ -1273,6 +1274,7 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info 
*ver, u32 v_opcode,
                 * so the validation needs to be done in PF's context.
                 */
                return 0;
+       case VIRTCHNL_OP_DCF_RULE_FLUSH:
        case VIRTCHNL_OP_DCF_DISABLE:
        case VIRTCHNL_OP_DCF_GET_VSI_MAP:
                /* The two opcodes are required by DCF without message buffer,
diff --git a/drivers/net/ice/ice_dcf.c b/drivers/net/ice/ice_dcf.c
index 7fd70a394..6ec32d010 100644
--- a/drivers/net/ice/ice_dcf.c
+++ b/drivers/net/ice/ice_dcf.c
@@ -1066,3 +1066,19 @@ ice_dcf_add_del_all_mac_addr(struct ice_dcf_hw *hw, bool 
add)
        rte_free(list);
        return err;
 }
+
+int
+ice_dcf_flush_rules(struct ice_dcf_hw *hw)
+{
+       struct dcf_virtchnl_cmd args;
+       int err = 0;
+
+       memset(&args, 0, sizeof(args));
+       args.v_op = VIRTCHNL_OP_DCF_RULE_FLUSH;
+
+       err = ice_dcf_execute_virtchnl_cmd(hw, &args);
+       if (err)
+               PMD_DRV_LOG(WARNING, "fail to execute command 
OF_DCF_RULE_FLUSH, DCF role must be preempted.");
+
+       return 0;
+}
diff --git a/drivers/net/ice/ice_dcf.h b/drivers/net/ice/ice_dcf.h
index a44a01e90..7e4d48fc5 100644
--- a/drivers/net/ice/ice_dcf.h
+++ b/drivers/net/ice/ice_dcf.h
@@ -76,5 +76,6 @@ int ice_dcf_disable_queues(struct ice_dcf_hw *hw);
 int ice_dcf_query_stats(struct ice_dcf_hw *hw,
                        struct virtchnl_eth_stats *pstats);
 int ice_dcf_add_del_all_mac_addr(struct ice_dcf_hw *hw, bool add);
+int ice_dcf_flush_rules(struct ice_dcf_hw *hw);
 
 #endif /* _ICE_DCF_H_ */
diff --git a/drivers/net/ice/ice_generic_flow.c 
b/drivers/net/ice/ice_generic_flow.c
index ad103d0e8..761f5a528 100644
--- a/drivers/net/ice/ice_generic_flow.c
+++ b/drivers/net/ice/ice_generic_flow.c
@@ -17,6 +17,7 @@
 
 #include "ice_ethdev.h"
 #include "ice_generic_flow.h"
+#include "ice_dcf.h"
 
 /**
  * Non-pipeline mode, fdir and switch both used as distributor,
@@ -2006,6 +2007,9 @@ ice_flow_flush(struct rte_eth_dev *dev,
                struct rte_flow_error *error)
 {
        struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+       struct ice_adapter *ad =
+               ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+       struct ice_dcf_hw *hw = ad->hw.aq_send_cmd_param;
        struct rte_flow *p_flow;
        void *temp;
        int ret = 0;
@@ -2018,6 +2022,9 @@ ice_flow_flush(struct rte_eth_dev *dev,
                }
        }
 
+       if (ad->hw.dcf_enabled && hw->multi_inst)
+               return ice_dcf_flush_rules(ad->hw.aq_send_cmd_param);
+
        return ret;
 }
 
-- 
2.17.1

Reply via email to