MLX5 PMD driver supports 2 types of indirect actions: legacy INDIRECT and INDIRECT_LIST. PMD has different handlers for each of indirection actions types. Therefore PMD marks async `job::indirect_type` with relevant value.
PMD set the type only during indirect action creation. Legacy INDIRECT query could have get a job object used previously by INDIRECT_LIST action. In that case such job object was handled as INDIRECT_LIST because the `job::indirect_type` was not re-assigned. The patch sets `job::indirect_type` during the job initialization according to operation type. Fixes: 59155721936e ("net/mlx5: fix indirect flow completion processing") Cc: sta...@dpdk.org Signed-off-by: Gregory Etelson <getel...@nvidia.com> Acked-by: Dariusz Sosnowski <dsosnow...@nvidia.com> --- drivers/net/mlx5/mlx5_flow_hw.c | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c index 8f004b5435..b9ba05f695 100644 --- a/drivers/net/mlx5/mlx5_flow_hw.c +++ b/drivers/net/mlx5/mlx5_flow_hw.c @@ -188,6 +188,7 @@ flow_hw_action_job_init(struct mlx5_priv *priv, uint32_t queue, const struct rte_flow_action_handle *handle, void *user_data, void *query_data, enum mlx5_hw_job_type type, + enum mlx5_hw_indirect_type indirect_type, struct rte_flow_error *error); static int mlx5_tbl_multi_pattern_process(struct rte_eth_dev *dev, @@ -1692,7 +1693,8 @@ flow_hw_meter_mark_compile(struct rte_eth_dev *dev, struct mlx5_aso_mtr *aso_mtr; struct mlx5_hw_q_job *job = flow_hw_action_job_init(priv, queue, NULL, NULL, NULL, - MLX5_HW_Q_JOB_TYPE_CREATE, NULL); + MLX5_HW_Q_JOB_TYPE_CREATE, + MLX5_HW_INDIRECT_TYPE_LEGACY, NULL); if (!job) return -1; @@ -10998,6 +11000,7 @@ flow_hw_action_job_init(struct mlx5_priv *priv, uint32_t queue, const struct rte_flow_action_handle *handle, void *user_data, void *query_data, enum mlx5_hw_job_type type, + enum mlx5_hw_indirect_type indirect_type, struct rte_flow_error *error) { struct mlx5_hw_q_job *job; @@ -11015,6 +11018,7 @@ flow_hw_action_job_init(struct mlx5_priv *priv, uint32_t queue, job->action = handle; job->user_data = user_data; job->query.user = query_data; + job->indirect_type = indirect_type; return job; } @@ -11026,7 +11030,7 @@ mlx5_flow_action_job_init(struct mlx5_priv *priv, uint32_t queue, struct rte_flow_error *error) { return flow_hw_action_job_init(priv, queue, handle, user_data, query_data, - type, error); + type, MLX5_HW_INDIRECT_TYPE_LEGACY, error); } static __rte_always_inline void @@ -11096,7 +11100,7 @@ flow_hw_action_handle_create(struct rte_eth_dev *dev, uint32_t queue, if (attr || force_job) { job = flow_hw_action_job_init(priv, queue, NULL, user_data, NULL, MLX5_HW_Q_JOB_TYPE_CREATE, - error); + MLX5_HW_INDIRECT_TYPE_LEGACY, error); if (!job) return NULL; } @@ -11165,7 +11169,6 @@ flow_hw_action_handle_create(struct rte_eth_dev *dev, uint32_t queue, } if (job && !force_job) { job->action = handle; - job->indirect_type = MLX5_HW_INDIRECT_TYPE_LEGACY; flow_hw_action_finalize(dev, queue, job, push, aso, handle != NULL); } @@ -11257,7 +11260,7 @@ flow_hw_action_handle_update(struct rte_eth_dev *dev, uint32_t queue, if (attr || force_job) { job = flow_hw_action_job_init(priv, queue, handle, user_data, NULL, MLX5_HW_Q_JOB_TYPE_UPDATE, - error); + MLX5_HW_INDIRECT_TYPE_LEGACY, error); if (!job) return -rte_errno; } @@ -11339,7 +11342,7 @@ flow_hw_action_handle_destroy(struct rte_eth_dev *dev, uint32_t queue, if (attr || force_job) { job = flow_hw_action_job_init(priv, queue, handle, user_data, NULL, MLX5_HW_Q_JOB_TYPE_DESTROY, - error); + MLX5_HW_INDIRECT_TYPE_LEGACY, error); if (!job) return -rte_errno; } @@ -11663,7 +11666,7 @@ flow_hw_action_handle_query(struct rte_eth_dev *dev, uint32_t queue, if (attr) { job = flow_hw_action_job_init(priv, queue, handle, user_data, data, MLX5_HW_Q_JOB_TYPE_QUERY, - error); + MLX5_HW_INDIRECT_TYPE_LEGACY, error); if (!job) return -rte_errno; } @@ -11717,7 +11720,7 @@ flow_hw_async_action_handle_query_update job = flow_hw_action_job_init(priv, queue, handle, user_data, query, MLX5_HW_Q_JOB_TYPE_UPDATE_QUERY, - error); + MLX5_HW_INDIRECT_TYPE_LEGACY, error); if (!job) return -rte_errno; } @@ -12397,7 +12400,7 @@ flow_hw_async_action_list_handle_create(struct rte_eth_dev *dev, uint32_t queue, if (attr) { job = flow_hw_action_job_init(priv, queue, NULL, user_data, NULL, MLX5_HW_Q_JOB_TYPE_CREATE, - error); + MLX5_HW_INDIRECT_TYPE_LIST, error); if (!job) return NULL; } @@ -12417,7 +12420,6 @@ flow_hw_async_action_list_handle_create(struct rte_eth_dev *dev, uint32_t queue, } if (job) { job->action = handle; - job->indirect_type = MLX5_HW_INDIRECT_TYPE_LIST; flow_hw_action_finalize(dev, queue, job, push, false, handle != NULL); } @@ -12462,7 +12464,7 @@ flow_hw_async_action_list_handle_destroy if (attr) { job = flow_hw_action_job_init(priv, queue, NULL, user_data, NULL, MLX5_HW_Q_JOB_TYPE_DESTROY, - error); + MLX5_HW_INDIRECT_TYPE_LIST, error); if (!job) return rte_errno; } -- 2.39.2