The file rte_pmd_mlx5.h is a public API,
so its components must be prefixed with RTE_PMD_.

Signed-off-by: Thomas Monjalon <tho...@monjalon.net>
---
 drivers/net/mlx5/mlx5.h         |  6 +++---
 drivers/net/mlx5/mlx5_defs.h    |  2 +-
 drivers/net/mlx5/mlx5_ethdev.c  |  4 ++--
 drivers/net/mlx5/mlx5_flow.c    | 28 ++++++++++++++--------------
 drivers/net/mlx5/mlx5_flow.h    |  2 +-
 drivers/net/mlx5/mlx5_flow_dv.c |  8 ++++----
 drivers/net/mlx5/mlx5_flow_hw.c |  4 ++--
 drivers/net/mlx5/mlx5_rx.c      |  2 +-
 drivers/net/mlx5/mlx5_rx.h      |  4 ++--
 drivers/net/mlx5/mlx5_rxq.c     | 10 +++++-----
 drivers/net/mlx5/mlx5_testpmd.c |  4 ++--
 drivers/net/mlx5/rte_pmd_mlx5.h | 30 +++++++++++++++---------------
 12 files changed, 52 insertions(+), 52 deletions(-)

diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 0b709a1bda..9966c2c082 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -1684,8 +1684,8 @@ struct mlx5_dv_flow_info {
        struct rte_flow_attr attr;
 };
 
-struct mlx5_flow_engine_mode_info {
-       enum mlx5_flow_engine_mode mode;
+struct rte_pmd_mlx5_flow_engine_mode_info {
+       enum rte_pmd_mlx5_flow_engine_mode mode;
        uint32_t mode_flag;
        /* The list is maintained in insertion order. */
        LIST_HEAD(hot_up_info, mlx5_dv_flow_info) hot_upgrade;
@@ -1834,7 +1834,7 @@ struct mlx5_priv {
        uint32_t nb_queue; /* HW steering queue number. */
        struct mlx5_hws_cnt_pool *hws_cpool; /* HW steering's counter pool. */
        uint32_t hws_mark_refcnt; /* HWS mark action reference counter. */
-       struct mlx5_flow_engine_mode_info mode_info; /* Process set flow engine 
info. */
+       struct rte_pmd_mlx5_flow_engine_mode_info mode_info; /* Process set 
flow engine info. */
        struct mlx5_flow_hw_attr *hw_attr; /* HW Steering port configuration. */
 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
        /* Item template list. */
diff --git a/drivers/net/mlx5/mlx5_defs.h b/drivers/net/mlx5/mlx5_defs.h
index 2af8c731ef..dc5216cb24 100644
--- a/drivers/net/mlx5/mlx5_defs.h
+++ b/drivers/net/mlx5/mlx5_defs.h
@@ -181,7 +181,7 @@
 #define MLX5_MAX_INDIRECT_ACTIONS 3
 
 /* Maximum number of external Rx queues supported by rte_flow */
-#define MLX5_MAX_EXT_RX_QUEUES (UINT16_MAX - MLX5_EXTERNAL_RX_QUEUE_ID_MIN + 1)
+#define MLX5_MAX_EXT_RX_QUEUES (UINT16_MAX - 
RTE_PMD_MLX5_EXTERNAL_RX_QUEUE_ID_MIN + 1)
 
 /*
  * Linux definition of static_assert is found in /usr/include/assert.h.
diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c
index 4a85415ff3..3339da054e 100644
--- a/drivers/net/mlx5/mlx5_ethdev.c
+++ b/drivers/net/mlx5/mlx5_ethdev.c
@@ -129,11 +129,11 @@ mlx5_dev_configure(struct rte_eth_dev *dev)
                rte_errno = EINVAL;
                return -rte_errno;
        }
-       if (priv->ext_rxqs && rxqs_n >= MLX5_EXTERNAL_RX_QUEUE_ID_MIN) {
+       if (priv->ext_rxqs && rxqs_n >= RTE_PMD_MLX5_EXTERNAL_RX_QUEUE_ID_MIN) {
                DRV_LOG(ERR, "port %u cannot handle this many Rx queues (%u), "
                        "the maximal number of internal Rx queues is %u",
                        dev->data->port_id, rxqs_n,
-                       MLX5_EXTERNAL_RX_QUEUE_ID_MIN - 1);
+                       RTE_PMD_MLX5_EXTERNAL_RX_QUEUE_ID_MIN - 1);
                rte_errno = EINVAL;
                return -rte_errno;
        }
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 8ad85e6027..ca4702efd9 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -170,7 +170,7 @@ mlx5_need_cache_flow(const struct mlx5_priv *priv,
 {
        return priv->isolated && priv->sh->config.dv_flow_en == 1 &&
                (attr ? !attr->group : true) &&
-               priv->mode_info.mode == MLX5_FLOW_ENGINE_MODE_STANDBY &&
+               priv->mode_info.mode == RTE_PMD_MLX5_FLOW_ENGINE_MODE_STANDBY &&
                (!priv->sh->config.dv_esw_en || !priv->sh->config.fdb_def_rule);
 }
 
@@ -7632,7 +7632,7 @@ mlx5_flow_cache_flow_info(struct rte_eth_dev *dev,
                          uint32_t flow_idx)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
-       struct mlx5_flow_engine_mode_info *mode_info = &priv->mode_info;
+       struct rte_pmd_mlx5_flow_engine_mode_info *mode_info = &priv->mode_info;
        struct mlx5_dv_flow_info *flow_info, *tmp_info;
        struct rte_flow_error error;
        int len, ret;
@@ -7706,7 +7706,7 @@ static int
 mlx5_flow_cache_flow_toggle(struct rte_eth_dev *dev, bool orig_prio)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
-       struct mlx5_flow_engine_mode_info *mode_info = &priv->mode_info;
+       struct rte_pmd_mlx5_flow_engine_mode_info *mode_info = &priv->mode_info;
        struct mlx5_dv_flow_info *flow_info;
        struct rte_flow_attr attr;
        struct rte_flow_error error;
@@ -7769,7 +7769,7 @@ mlx5_flow_cache_flow_toggle(struct rte_eth_dev *dev, bool 
orig_prio)
  * Set the mode of the flow engine of a process to active or standby during 
live migration.
  *
  * @param[in] mode
- *   MLX5 flow engine mode, @see `enum mlx5_flow_engine_mode`.
+ *   MLX5 flow engine mode, @see `enum rte_pmd_mlx5_flow_engine_mode`.
  * @param[in] flags
  *   Flow engine mode specific flags.
  *
@@ -7777,20 +7777,20 @@ mlx5_flow_cache_flow_toggle(struct rte_eth_dev *dev, 
bool orig_prio)
  *   Negative value on error, positive on success.
  */
 int
-rte_pmd_mlx5_flow_engine_set_mode(enum mlx5_flow_engine_mode mode, uint32_t 
flags)
+rte_pmd_mlx5_flow_engine_set_mode(enum rte_pmd_mlx5_flow_engine_mode mode, 
uint32_t flags)
 {
        struct mlx5_priv *priv;
-       struct mlx5_flow_engine_mode_info *mode_info;
+       struct rte_pmd_mlx5_flow_engine_mode_info *mode_info;
        struct mlx5_dv_flow_info *flow_info, *tmp_info;
        uint16_t port, port_id;
        uint16_t toggle_num = 0;
        struct rte_eth_dev *dev;
-       enum mlx5_flow_engine_mode orig_mode;
+       enum rte_pmd_mlx5_flow_engine_mode orig_mode;
        uint32_t orig_flags;
        bool need_toggle = false;
 
        /* Check if flags combinations are supported. */
-       if (flags && flags != MLX5_FLOW_ENGINE_FLAG_STANDBY_DUP_INGRESS) {
+       if (flags && flags != 
RTE_PMD_MLX5_FLOW_ENGINE_FLAG_STANDBY_DUP_INGRESS) {
                DRV_LOG(ERR, "Doesn't support such flags %u", flags);
                return -1;
        }
@@ -7813,7 +7813,7 @@ rte_pmd_mlx5_flow_engine_set_mode(enum 
mlx5_flow_engine_mode mode, uint32_t flag
                        continue;
                }
                /* Active -> standby. */
-               if (mode == MLX5_FLOW_ENGINE_MODE_STANDBY) {
+               if (mode == RTE_PMD_MLX5_FLOW_ENGINE_MODE_STANDBY) {
                        if (!LIST_EMPTY(&mode_info->hot_upgrade)) {
                                DRV_LOG(ERR, "Cached rule existed");
                                orig_mode = mode_info->mode;
@@ -7824,7 +7824,7 @@ rte_pmd_mlx5_flow_engine_set_mode(enum 
mlx5_flow_engine_mode mode, uint32_t flag
                        mode_info->mode = mode;
                        toggle_num++;
                /* Standby -> active. */
-               } else if (mode == MLX5_FLOW_ENGINE_MODE_ACTIVE) {
+               } else if (mode == RTE_PMD_MLX5_FLOW_ENGINE_MODE_ACTIVE) {
                        if (LIST_EMPTY(&mode_info->hot_upgrade)) {
                                DRV_LOG(INFO, "No cached rule existed");
                        } else {
@@ -7838,7 +7838,7 @@ rte_pmd_mlx5_flow_engine_set_mode(enum 
mlx5_flow_engine_mode mode, uint32_t flag
                        toggle_num++;
                }
        }
-       if (mode == MLX5_FLOW_ENGINE_MODE_ACTIVE) {
+       if (mode == RTE_PMD_MLX5_FLOW_ENGINE_MODE_ACTIVE) {
                /* Clear cache flow rules. */
                MLX5_ETH_FOREACH_DEV(port, NULL) {
                        priv = rte_eth_devices[port].data->dev_private;
@@ -7913,7 +7913,7 @@ mlx5_flow_create(struct rte_eth_dev *dev,
        if (unlikely(mlx5_need_cache_flow(priv, attr))) {
                if (attr->transfer ||
                    (attr->ingress &&
-                   !(priv->mode_info.mode_flag & 
MLX5_FLOW_ENGINE_FLAG_STANDBY_DUP_INGRESS)))
+                   !(priv->mode_info.mode_flag & 
RTE_PMD_MLX5_FLOW_ENGINE_FLAG_STANDBY_DUP_INGRESS)))
                        new_attr->priority += 1;
        }
        flow_idx = flow_list_create(dev, MLX5_FLOW_TYPE_GEN, attr, items, 
actions, true, error);
@@ -7982,7 +7982,7 @@ mlx5_flow_list_flush(struct rte_eth_dev *dev, enum 
mlx5_flow_type type,
        struct mlx5_priv *priv = dev->data->dev_private;
        uint32_t num_flushed = 0, fidx = 1;
        struct rte_flow *flow;
-       struct mlx5_flow_engine_mode_info *mode_info = &priv->mode_info;
+       struct rte_pmd_mlx5_flow_engine_mode_info *mode_info = &priv->mode_info;
        struct mlx5_dv_flow_info *flow_info;
 
 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
@@ -8475,7 +8475,7 @@ mlx5_flow_destroy(struct rte_eth_dev *dev,
                  struct rte_flow_error *error __rte_unused)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
-       struct mlx5_flow_engine_mode_info *mode_info = &priv->mode_info;
+       struct rte_pmd_mlx5_flow_engine_mode_info *mode_info = &priv->mode_info;
        struct mlx5_dv_flow_info *flow_info;
 
        if (priv->sh->config.dv_flow_en == 2)
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 903ff66d72..e9fe294e79 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -1656,7 +1656,7 @@ flow_hw_get_reg_id(enum rte_flow_item_type type, uint32_t 
id)
        case RTE_FLOW_ITEM_TYPE_METER_COLOR:
                return mlx5_flow_hw_aso_tag;
        case RTE_FLOW_ITEM_TYPE_TAG:
-               if (id == MLX5_LINEAR_HASH_TAG_INDEX)
+               if (id == RTE_PMD_MLX5_LINEAR_HASH_TAG_INDEX)
                        return REG_C_3;
                MLX5_ASSERT(id < MLX5_FLOW_HW_TAGS_MAX);
                return mlx5_flow_hw_avl_tags[id];
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 3dc2fe5c71..ddaf1a3508 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -1916,7 +1916,7 @@ mlx5_flow_field_id_to_modify_info
                        uint8_t tag_index = flow_tag_index_get(data);
                        int reg;
 
-                       off_be = (tag_index == MLX5_LINEAR_HASH_TAG_INDEX) ?
+                       off_be = (tag_index == 
RTE_PMD_MLX5_LINEAR_HASH_TAG_INDEX) ?
                                 16 - (data->offset + width) + 16 : 
data->offset;
                        if (priv->sh->config.dv_flow_en == 2)
                                reg = flow_hw_get_reg_id(RTE_FLOW_ITEM_TYPE_TAG,
@@ -19650,18 +19650,18 @@ flow_dv_sync_domain(struct rte_eth_dev *dev, uint32_t 
domains, uint32_t flags)
        struct mlx5_priv *priv = dev->data->dev_private;
        int ret = 0;
 
-       if ((domains & MLX5_DOMAIN_BIT_NIC_RX) && priv->sh->rx_domain != NULL) {
+       if ((domains & RTE_PMD_MLX5_DOMAIN_BIT_NIC_RX) && priv->sh->rx_domain 
!= NULL) {
                ret = mlx5_os_flow_dr_sync_domain(priv->sh->rx_domain,
                                                flags);
                if (ret != 0)
                        return ret;
        }
-       if ((domains & MLX5_DOMAIN_BIT_NIC_TX) && priv->sh->tx_domain != NULL) {
+       if ((domains & RTE_PMD_MLX5_DOMAIN_BIT_NIC_TX) && priv->sh->tx_domain 
!= NULL) {
                ret = mlx5_os_flow_dr_sync_domain(priv->sh->tx_domain, flags);
                if (ret != 0)
                        return ret;
        }
-       if ((domains & MLX5_DOMAIN_BIT_FDB) && priv->sh->fdb_domain != NULL) {
+       if ((domains & RTE_PMD_MLX5_DOMAIN_BIT_FDB) && priv->sh->fdb_domain != 
NULL) {
                ret = mlx5_os_flow_dr_sync_domain(priv->sh->fdb_domain, flags);
                if (ret != 0)
                        return ret;
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index 6fcf654e4a..8fd76b541b 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -1035,7 +1035,7 @@ flow_hw_modify_field_compile(struct rte_eth_dev *dev,
 
                        value = *(const unaligned_uint32_t *)item.spec;
                        if (conf->dst.field == RTE_FLOW_FIELD_TAG &&
-                           tag_index == MLX5_LINEAR_HASH_TAG_INDEX)
+                           tag_index == RTE_PMD_MLX5_LINEAR_HASH_TAG_INDEX)
                                value = rte_cpu_to_be_32(value << 16);
                        else
                                value = rte_cpu_to_be_32(value);
@@ -2154,7 +2154,7 @@ flow_hw_modify_field_construct(struct mlx5_hw_q_job *job,
 
                value_p = (unaligned_uint32_t *)values;
                if (mhdr_action->dst.field == RTE_FLOW_FIELD_TAG &&
-                   tag_index == MLX5_LINEAR_HASH_TAG_INDEX)
+                   tag_index == RTE_PMD_MLX5_LINEAR_HASH_TAG_INDEX)
                        *value_p = rte_cpu_to_be_32(*value_p << 16);
                else
                        *value_p = rte_cpu_to_be_32(*value_p);
diff --git a/drivers/net/mlx5/mlx5_rx.c b/drivers/net/mlx5/mlx5_rx.c
index 392784050f..5bf1a679b2 100644
--- a/drivers/net/mlx5/mlx5_rx.c
+++ b/drivers/net/mlx5/mlx5_rx.c
@@ -1560,7 +1560,7 @@ int rte_pmd_mlx5_host_shaper_config(int port_id, uint8_t 
rate,
        struct rte_eth_dev *dev = &rte_eth_devices[port_id];
        struct mlx5_priv *priv = dev->data->dev_private;
        bool lwm_triggered =
-            !!(flags & 
RTE_BIT32(MLX5_HOST_SHAPER_FLAG_AVAIL_THRESH_TRIGGERED));
+            !!(flags & 
RTE_BIT32(RTE_PMD_MLX5_HOST_SHAPER_FLAG_AVAIL_THRESH_TRIGGERED));
 
        if (!lwm_triggered) {
                priv->sh->host_shaper_rate = rate;
diff --git a/drivers/net/mlx5/mlx5_rx.h b/drivers/net/mlx5/mlx5_rx.h
index baeb4797aa..2fce908499 100644
--- a/drivers/net/mlx5/mlx5_rx.h
+++ b/drivers/net/mlx5/mlx5_rx.h
@@ -663,9 +663,9 @@ mlx5_is_external_rxq(struct rte_eth_dev *dev, uint16_t 
queue_idx)
        struct mlx5_priv *priv = dev->data->dev_private;
        struct mlx5_external_rxq *rxq;
 
-       if (!priv->ext_rxqs || queue_idx < MLX5_EXTERNAL_RX_QUEUE_ID_MIN)
+       if (!priv->ext_rxqs || queue_idx < 
RTE_PMD_MLX5_EXTERNAL_RX_QUEUE_ID_MIN)
                return false;
-       rxq = &priv->ext_rxqs[queue_idx - MLX5_EXTERNAL_RX_QUEUE_ID_MIN];
+       rxq = &priv->ext_rxqs[queue_idx - 
RTE_PMD_MLX5_EXTERNAL_RX_QUEUE_ID_MIN];
        return !!__atomic_load_n(&rxq->refcnt, __ATOMIC_RELAXED);
 }
 
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index 8ef7860e16..88b2dc54b3 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -2176,7 +2176,7 @@ mlx5_ext_rxq_get(struct rte_eth_dev *dev, uint16_t idx)
        struct mlx5_priv *priv = dev->data->dev_private;
 
        MLX5_ASSERT(mlx5_is_external_rxq(dev, idx));
-       return &priv->ext_rxqs[idx - MLX5_EXTERNAL_RX_QUEUE_ID_MIN];
+       return &priv->ext_rxqs[idx - RTE_PMD_MLX5_EXTERNAL_RX_QUEUE_ID_MIN];
 }
 
 /**
@@ -2341,7 +2341,7 @@ mlx5_ext_rxq_verify(struct rte_eth_dev *dev)
        if (priv->ext_rxqs == NULL)
                return 0;
 
-       for (i = MLX5_EXTERNAL_RX_QUEUE_ID_MIN; i <= UINT16_MAX ; ++i) {
+       for (i = RTE_PMD_MLX5_EXTERNAL_RX_QUEUE_ID_MIN; i <= UINT16_MAX ; ++i) {
                rxq = mlx5_ext_rxq_get(dev, i);
                if (rxq->refcnt < 2)
                        continue;
@@ -3210,9 +3210,9 @@ mlx5_external_rx_queue_get_validate(uint16_t port_id, 
uint16_t dpdk_idx)
        struct rte_eth_dev *dev;
        struct mlx5_priv *priv;
 
-       if (dpdk_idx < MLX5_EXTERNAL_RX_QUEUE_ID_MIN) {
+       if (dpdk_idx < RTE_PMD_MLX5_EXTERNAL_RX_QUEUE_ID_MIN) {
                DRV_LOG(ERR, "Queue index %u should be in range: [%u, %u].",
-                       dpdk_idx, MLX5_EXTERNAL_RX_QUEUE_ID_MIN, UINT16_MAX);
+                       dpdk_idx, RTE_PMD_MLX5_EXTERNAL_RX_QUEUE_ID_MIN, 
UINT16_MAX);
                rte_errno = EINVAL;
                return NULL;
        }
@@ -3243,7 +3243,7 @@ mlx5_external_rx_queue_get_validate(uint16_t port_id, 
uint16_t dpdk_idx)
         * DevX, external RxQs array is allocated.
         */
        MLX5_ASSERT(priv->ext_rxqs != NULL);
-       return &priv->ext_rxqs[dpdk_idx - MLX5_EXTERNAL_RX_QUEUE_ID_MIN];
+       return &priv->ext_rxqs[dpdk_idx - 
RTE_PMD_MLX5_EXTERNAL_RX_QUEUE_ID_MIN];
 }
 
 int
diff --git a/drivers/net/mlx5/mlx5_testpmd.c b/drivers/net/mlx5/mlx5_testpmd.c
index 879ea2826e..b108f0e9dd 100644
--- a/drivers/net/mlx5/mlx5_testpmd.c
+++ b/drivers/net/mlx5/mlx5_testpmd.c
@@ -112,10 +112,10 @@ mlx5_test_set_port_host_shaper(uint16_t port_id, uint16_t 
avail_thresh_triggered
        host_shaper_avail_thresh_triggered[port_id] = avail_thresh_triggered ? 
1 : 0;
        if (!avail_thresh_triggered) {
                ret = rte_pmd_mlx5_host_shaper_config(port_id, 0,
-               RTE_BIT32(MLX5_HOST_SHAPER_FLAG_AVAIL_THRESH_TRIGGERED));
+               
RTE_BIT32(RTE_PMD_MLX5_HOST_SHAPER_FLAG_AVAIL_THRESH_TRIGGERED));
        } else {
                ret = rte_pmd_mlx5_host_shaper_config(port_id, 1,
-               RTE_BIT32(MLX5_HOST_SHAPER_FLAG_AVAIL_THRESH_TRIGGERED));
+               
RTE_BIT32(RTE_PMD_MLX5_HOST_SHAPER_FLAG_AVAIL_THRESH_TRIGGERED));
        }
        if (ret)
                return ret;
diff --git a/drivers/net/mlx5/rte_pmd_mlx5.h b/drivers/net/mlx5/rte_pmd_mlx5.h
index b1a92b3dd0..654dd3cff3 100644
--- a/drivers/net/mlx5/rte_pmd_mlx5.h
+++ b/drivers/net/mlx5/rte_pmd_mlx5.h
@@ -38,9 +38,9 @@ extern "C" {
 __rte_experimental
 int rte_pmd_mlx5_get_dyn_flag_names(char *names[], unsigned int n);
 
-#define MLX5_DOMAIN_BIT_NIC_RX (1 << 0) /**< NIC RX domain bit mask. */
-#define MLX5_DOMAIN_BIT_NIC_TX (1 << 1) /**< NIC TX domain bit mask. */
-#define MLX5_DOMAIN_BIT_FDB    (1 << 2) /**< FDB (TX + RX) domain bit mask. */
+#define RTE_PMD_MLX5_DOMAIN_BIT_NIC_RX (1 << 0) /**< NIC RX domain bit mask. */
+#define RTE_PMD_MLX5_DOMAIN_BIT_NIC_TX (1 << 1) /**< NIC TX domain bit mask. */
+#define RTE_PMD_MLX5_DOMAIN_BIT_FDB    (1 << 2) /**< FDB (TX + RX) domain bit 
mask. */
 
 /**
  * Synchronize the flows to make them take effort on hardware.
@@ -52,7 +52,7 @@ int rte_pmd_mlx5_get_dyn_flag_names(char *names[], unsigned 
int n);
  * @param[in] domains
  *   Refer to "/usr/include/infiniband/mlx5dv.h".
  *   Bitmask of domains in which the synchronization will be done.
- *   MLX5_DOMAIN_BIT* macros are used to specify the domains.
+ *   RTE_PMD_MLX5_DOMAIN_BIT_* macros are used to specify the domains.
  *   An ADD or OR operation could be used to synchronize flows in more than
  *   one domain per call.
  *
@@ -66,12 +66,12 @@ int rte_pmd_mlx5_sync_flow(uint16_t port_id, uint32_t 
domains);
 /**
  * External Rx queue rte_flow index minimal value.
  */
-#define MLX5_EXTERNAL_RX_QUEUE_ID_MIN (UINT16_MAX - 1000 + 1)
+#define RTE_PMD_MLX5_EXTERNAL_RX_QUEUE_ID_MIN (UINT16_MAX - 1000 + 1)
 
 /**
  * Tag level to set the linear hash index.
  */
-#define MLX5_LINEAR_HASH_TAG_INDEX 255
+#define RTE_PMD_MLX5_LINEAR_HASH_TAG_INDEX 255
 
 /**
  * Update mapping between rte_flow queue index (16 bits) and HW queue index (32
@@ -123,7 +123,7 @@ int rte_pmd_mlx5_external_rx_queue_id_unmap(uint16_t 
port_id,
  * Unset this flag to update the rate of the host port shaper directly in
  * the API call; use rate 0 to disable the current shaper.
  */
-#define MLX5_HOST_SHAPER_FLAG_AVAIL_THRESH_TRIGGERED 0
+#define RTE_PMD_MLX5_HOST_SHAPER_FLAG_AVAIL_THRESH_TRIGGERED 0
 
 /**
  * Configure a HW shaper to limit Tx rate for a host port.
@@ -135,7 +135,7 @@ int rte_pmd_mlx5_external_rx_queue_id_unmap(uint16_t 
port_id,
  * @param[in] rate
  *   Unit is 100Mbps, setting the rate to 0 disables the shaper.
  * @param[in] flags
- *   Host shaper flags.
+ *   Host shaper flags (see RTE_PMD_MLX5_HOST_SHAPER_FLAG_*).
  * @return
  *   0 : operation success.
  *   Otherwise:
@@ -164,16 +164,16 @@ __rte_experimental
 int rte_pmd_mlx5_external_sq_enable(uint16_t port_id, uint32_t sq_num);
 
 /* MLX5 flow engine mode definition for live migration. */
-enum mlx5_flow_engine_mode {
-       MLX5_FLOW_ENGINE_MODE_ACTIVE, /* active means high priority, effective 
in HW. */
-       MLX5_FLOW_ENGINE_MODE_STANDBY, /* standby mode with lower priority flow 
rules. */
+enum rte_pmd_mlx5_flow_engine_mode {
+       RTE_PMD_MLX5_FLOW_ENGINE_MODE_ACTIVE, /* active means high priority, 
effective in HW. */
+       RTE_PMD_MLX5_FLOW_ENGINE_MODE_STANDBY, /* standby mode with lower 
priority flow rules. */
 };
 
 /**
  * When set on the flow engine of a standby process, ingress flow rules will 
be effective
  * in active and standby processes, so the ingress traffic may be duplicated.
  */
-#define MLX5_FLOW_ENGINE_FLAG_STANDBY_DUP_INGRESS      RTE_BIT32(0)
+#define RTE_PMD_MLX5_FLOW_ENGINE_FLAG_STANDBY_DUP_INGRESS      RTE_BIT32(0)
 
 /**
  * @warning
@@ -217,9 +217,9 @@ enum mlx5_flow_engine_mode {
  *   old:    shutdown
  *
  * @param mode
- *   The desired mode `mlx5_flow_engine_mode`.
+ *   The desired mode (see rte_pmd_mlx5_flow_engine_mode).
  * @param flags
- *   Mode specific flags.
+ *   Mode specific flags (see RTE_PMD_MLX5_FLOW_ENGINE_FLAG_*).
  * @return
  *   Positive value on success, -rte_errno value on error:
  *   - (> 0) Number of switched devices.
@@ -227,7 +227,7 @@ enum mlx5_flow_engine_mode {
  *   - (-EPERM) if operation failed and can't recover.
  */
 __rte_experimental
-int rte_pmd_mlx5_flow_engine_set_mode(enum mlx5_flow_engine_mode mode, 
uint32_t flags);
+int rte_pmd_mlx5_flow_engine_set_mode(enum rte_pmd_mlx5_flow_engine_mode mode, 
uint32_t flags);
 
 #ifdef __cplusplus
 }
-- 
2.42.0

Reply via email to