Hi Koh, See some small comments.
Thursday, May 3, 2018 2:11 AM, Yongseok Koh: > Subject: [dpdk-dev] [PATCH v2] net/mlx5: change device reference for > secondary process > > rte_eth_devices[] is not shared between primary and secondary process, > but a static array to each process. The reverse pointer of device (priv->dev) > is > invalid. Instead, priv has the pointer to shared data of the device, > struct rte_eth_dev_data *dev_data; > > Two macros are added, > #define port_id(priv) ((priv)->dev_data->port_id) > #define eth_dev(priv) (&rte_eth_devices[(priv)->dev_data->port_id]) > > Signed-off-by: Yongseok Koh <ys...@mellanox.com> > --- > drivers/net/mlx5/mlx5.c | 2 +- > drivers/net/mlx5/mlx5.h | 5 ++++- > drivers/net/mlx5/mlx5_flow.c | 21 +++++++++------------ > drivers/net/mlx5/mlx5_mr.c | 19 +++++++++---------- > drivers/net/mlx5/mlx5_rxq.c | 30 +++++++++++++++--------------- > drivers/net/mlx5/mlx5_txq.c | 15 +++++++-------- > 6 files changed, 45 insertions(+), 47 deletions(-) > > diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index > 8f983061a..6c4a571ab 100644 > --- a/drivers/net/mlx5/mlx5.c > +++ b/drivers/net/mlx5/mlx5.c > @@ -946,7 +946,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv > __rte_unused, > goto port_error; > } > eth_dev->data->dev_private = priv; > - priv->dev = eth_dev; > + priv->dev_data = eth_dev->data; > eth_dev->data->mac_addrs = priv->mac; > eth_dev->device = &pci_dev->device; > rte_eth_copy_pci_info(eth_dev, pci_dev); diff --git > a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index > c4d1d456b..3ab16bfa2 100644 > --- a/drivers/net/mlx5/mlx5.h > +++ b/drivers/net/mlx5/mlx5.h > @@ -121,7 +121,7 @@ struct mlx5_verbs_alloc_ctx { }; > > struct priv { > - struct rte_eth_dev *dev; /* Ethernet device of master process. */ > + struct rte_eth_dev_data *dev_data; /* Pointer to device data. */ > struct ibv_context *ctx; /* Verbs context. */ > struct ibv_device_attr_ex device_attr; /* Device properties. */ > struct ibv_pd *pd; /* Protection Domain. */ @@ -168,6 +168,9 @@ > struct priv { > uint32_t nl_sn; /* Netlink message sequence number. */ }; > > +#define port_id(priv) ((priv)->dev_data->port_id) #define eth_dev(priv) > +(&rte_eth_devices[(priv)->dev_data->port_id]) MACRO should with capital letters and prefixed with MLX5 to align with the files consistency. Also suggest to use the port_id macro inside the eth_dev one. > + > /* mlx5.c */ > > int mlx5_getenv_int(const char *); > diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c > index 129311d50..2e4bfac58 100644 > --- a/drivers/net/mlx5/mlx5_flow.c > +++ b/drivers/net/mlx5/mlx5_flow.c > @@ -2061,7 +2061,7 @@ mlx5_flow_create_action_queue_drop(struct > rte_eth_dev *dev, > parser->queue[HASH_RXQ_ETH].ibv_attr; > if (parser->count) > flow->cs = parser->cs; > - if (!priv->dev->data->dev_started) > + if (!dev->data->dev_started) > return 0; > parser->queue[HASH_RXQ_ETH].ibv_attr = NULL; > flow->frxq[HASH_RXQ_ETH].ibv_flow = > @@ -2113,7 +2113,6 @@ mlx5_flow_create_action_queue_rss(struct > rte_eth_dev *dev, > struct rte_flow *flow, > struct rte_flow_error *error) > { > - struct priv *priv = dev->data->dev_private; > unsigned int i; > > for (i = 0; i != hash_rxq_init_n; ++i) { @@ -2122,7 +2121,7 @@ > mlx5_flow_create_action_queue_rss(struct rte_eth_dev *dev, > flow->frxq[i].ibv_attr = parser->queue[i].ibv_attr; > parser->queue[i].ibv_attr = NULL; > flow->frxq[i].hash_fields = parser->queue[i].hash_fields; > - if (!priv->dev->data->dev_started) > + if (!dev->data->dev_started) > continue; > flow->frxq[i].hrxq = > mlx5_hrxq_get(dev, > @@ -2268,7 +2267,7 @@ mlx5_flow_create_action_queue(struct > rte_eth_dev *dev, > struct rte_flow *flow, > struct rte_flow_error *error) > { > - struct priv *priv = dev->data->dev_private; > + struct priv *priv __rte_unused = dev->data->dev_private; Why not removing it? > int ret; > unsigned int i; > unsigned int flows_n = 0; > @@ -2281,7 +2280,7 @@ mlx5_flow_create_action_queue(struct > rte_eth_dev *dev, > goto error; > if (parser->count) > flow->cs = parser->cs; > - if (!priv->dev->data->dev_started) > + if (!dev->data->dev_started) > return 0; > for (i = 0; i != hash_rxq_init_n; ++i) { > if (!flow->frxq[i].hrxq) > @@ -3124,9 +3123,9 @@ mlx5_flow_isolate(struct rte_eth_dev *dev, > } > priv->isolated = !!enable; > if (enable) > - priv->dev->dev_ops = &mlx5_dev_ops_isolate; > + dev->dev_ops = &mlx5_dev_ops_isolate; > else > - priv->dev->dev_ops = &mlx5_dev_ops; > + dev->dev_ops = &mlx5_dev_ops; > return 0; > } > > @@ -3514,11 +3513,10 @@ mlx5_fdir_filter_flush(struct rte_eth_dev *dev) > static void mlx5_fdir_info_get(struct rte_eth_dev *dev, struct > rte_eth_fdir_info *fdir_info) { > - struct priv *priv = dev->data->dev_private; > struct rte_eth_fdir_masks *mask = > - &priv->dev->data->dev_conf.fdir_conf.mask; > + &dev->data->dev_conf.fdir_conf.mask; > > - fdir_info->mode = priv->dev->data->dev_conf.fdir_conf.mode; > + fdir_info->mode = dev->data->dev_conf.fdir_conf.mode; > fdir_info->guarant_spc = 0; > rte_memcpy(&fdir_info->mask, mask, sizeof(fdir_info->mask)); > fdir_info->max_flexpayload = 0; > @@ -3546,9 +3544,8 @@ static int > mlx5_fdir_ctrl_func(struct rte_eth_dev *dev, enum rte_filter_op filter_op, > void *arg) > { > - struct priv *priv = dev->data->dev_private; > enum rte_fdir_mode fdir_mode = > - priv->dev->data->dev_conf.fdir_conf.mode; > + dev->data->dev_conf.fdir_conf.mode; > > if (filter_op == RTE_ETH_FILTER_NOP) > return 0; > diff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c index > 1b4c7ec5d..7a337d0c3 100644 > --- a/drivers/net/mlx5/mlx5_mr.c > +++ b/drivers/net/mlx5/mlx5_mr.c > @@ -105,8 +105,8 @@ mlx5_txq_mp2mr_reg(struct mlx5_txq_data *txq, > struct rte_mempool *mp, > rte_spinlock_lock(&txq_ctrl->priv->mr_lock); > /* Add a new entry, register MR first. */ > DRV_LOG(DEBUG, "port %u discovered new memory pool \"%s\" > (%p)", > - txq_ctrl->priv->dev->data->port_id, mp->name, (void > *)mp); > - dev = txq_ctrl->priv->dev; > + port_id(txq_ctrl->priv), mp->name, (void *)mp); > + dev = eth_dev(txq_ctrl->priv); > mr = mlx5_mr_get(dev, mp); > if (mr == NULL) { > if (rte_eal_process_type() != RTE_PROC_PRIMARY) { @@ - > 114,8 +114,7 @@ mlx5_txq_mp2mr_reg(struct mlx5_txq_data *txq, struct > rte_mempool *mp, > "port %u using unregistered mempool > 0x%p(%s)" > " in secondary process, please create > mempool" > " before rte_eth_dev_start()", > - txq_ctrl->priv->dev->data->port_id, > - (void *)mp, mp->name); > + port_id(txq_ctrl->priv), (void *)mp, mp- > >name); > rte_spinlock_unlock(&txq_ctrl->priv->mr_lock); > rte_errno = ENOTSUP; > return NULL; > @@ -126,7 +125,7 @@ mlx5_txq_mp2mr_reg(struct mlx5_txq_data *txq, > struct rte_mempool *mp, > DRV_LOG(DEBUG, > "port %u unable to configure memory region," > " ibv_reg_mr() failed.", > - txq_ctrl->priv->dev->data->port_id); > + port_id(txq_ctrl->priv)); > rte_spinlock_unlock(&txq_ctrl->priv->mr_lock); > return NULL; > } > @@ -135,7 +134,7 @@ mlx5_txq_mp2mr_reg(struct mlx5_txq_data *txq, > struct rte_mempool *mp, > DRV_LOG(DEBUG, > "port %u memory region <-> memory pool table full, > " > " dropping oldest entry", > - txq_ctrl->priv->dev->data->port_id); > + port_id(txq_ctrl->priv)); > --idx; > mlx5_mr_release(txq->mp2mr[0]); > memmove(&txq->mp2mr[0], &txq->mp2mr[1], @@ -146,7 > +145,7 @@ mlx5_txq_mp2mr_reg(struct mlx5_txq_data *txq, struct > rte_mempool *mp, > DRV_LOG(DEBUG, > "port %u new memory region lkey for MP \"%s\" (%p): > 0x%08" > PRIu32, > - txq_ctrl->priv->dev->data->port_id, mp->name, (void *)mp, > + port_id(txq_ctrl->priv), mp->name, (void *)mp, > txq_ctrl->txq.mp2mr[idx]->lkey); > rte_spinlock_unlock(&txq_ctrl->priv->mr_lock); > return mr; > @@ -207,15 +206,15 @@ mlx5_mp2mr_iter(struct rte_mempool *mp, void > *arg) > if (rte_mempool_obj_iter(mp, txq_mp2mr_mbuf_check, &data) == > 0 || > data.ret == -1) > return; > - mr = mlx5_mr_get(priv->dev, mp); > + mr = mlx5_mr_get(eth_dev(priv), mp); > if (mr) { > mlx5_mr_release(mr); > return; > } > - mr = mlx5_mr_new(priv->dev, mp); > + mr = mlx5_mr_new(eth_dev(priv), mp); > if (!mr) > DRV_LOG(ERR, "port %u cannot create memory region: %s", > - priv->dev->data->port_id, strerror(rte_errno)); > + port_id(priv), strerror(rte_errno)); > } > > /** > diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c > index 126412ddd..a85b628fe 100644 > --- a/drivers/net/mlx5/mlx5_rxq.c > +++ b/drivers/net/mlx5/mlx5_rxq.c > @@ -78,7 +78,7 @@ rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl) > buf = rte_pktmbuf_alloc(rxq_ctrl->rxq.mp); > if (buf == NULL) { > DRV_LOG(ERR, "port %u empty mbuf pool", > - rxq_ctrl->priv->dev->data->port_id); > + port_id(rxq_ctrl->priv)); > rte_errno = ENOMEM; > goto error; > } > @@ -122,7 +122,7 @@ rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl) > DRV_LOG(DEBUG, > "port %u Rx queue %u allocated and configured %u > segments" > " (max %u packets)", > - rxq_ctrl->priv->dev->data->port_id, rxq_ctrl->idx, elts_n, > + port_id(rxq_ctrl->priv), rxq_ctrl->idx, elts_n, > elts_n / (1 << rxq_ctrl->rxq.sges_n)); > return 0; > error: > @@ -134,7 +134,7 @@ rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl) > (*rxq_ctrl->rxq.elts)[i] = NULL; > } > DRV_LOG(DEBUG, "port %u Rx queue %u failed, freed everything", > - rxq_ctrl->priv->dev->data->port_id, rxq_ctrl->idx); > + port_id(rxq_ctrl->priv), rxq_ctrl->idx); > rte_errno = err; /* Restore rte_errno. */ > return -rte_errno; > } > @@ -155,7 +155,7 @@ rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl) > uint16_t i; > > DRV_LOG(DEBUG, "port %u Rx queue %u freeing WRs", > - rxq_ctrl->priv->dev->data->port_id, rxq_ctrl->idx); > + port_id(rxq_ctrl->priv), rxq_ctrl->idx); > if (rxq->elts == NULL) > return; > /** > @@ -186,7 +186,7 @@ void > mlx5_rxq_cleanup(struct mlx5_rxq_ctrl *rxq_ctrl) { > DRV_LOG(DEBUG, "port %u cleaning up Rx queue %u", > - rxq_ctrl->priv->dev->data->port_id, rxq_ctrl->idx); > + port_id(rxq_ctrl->priv), rxq_ctrl->idx); > if (rxq_ctrl->ibv) > mlx5_rxq_ibv_release(rxq_ctrl->ibv); > memset(rxq_ctrl, 0, sizeof(*rxq_ctrl)); @@ -354,11 +354,11 @@ > mlx5_rx_queue_release(void *dpdk_rxq) > return; > rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq); > priv = rxq_ctrl->priv; > - if (!mlx5_rxq_releasable(priv->dev, rxq_ctrl->rxq.stats.idx)) > + if (!mlx5_rxq_releasable(eth_dev(priv), rxq_ctrl->rxq.stats.idx)) > rte_panic("port %u Rx queue %u is still used by a flow and" > - " cannot be removed\n", priv->dev->data->port_id, > - rxq_ctrl->idx); > - mlx5_rxq_release(priv->dev, rxq_ctrl->rxq.stats.idx); > + " cannot be removed\n", > + port_id(priv), rxq_ctrl->idx); > + mlx5_rxq_release(eth_dev(priv), rxq_ctrl->rxq.stats.idx); > } > > /** > @@ -378,9 +378,9 @@ mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev) > unsigned int rxqs_n = priv->rxqs_n; > unsigned int n = RTE_MIN(rxqs_n, > (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID); > unsigned int count = 0; > - struct rte_intr_handle *intr_handle = priv->dev->intr_handle; > + struct rte_intr_handle *intr_handle = dev->intr_handle; > > - if (!priv->dev->data->dev_conf.intr_conf.rxq) > + if (!dev->data->dev_conf.intr_conf.rxq) > return 0; > mlx5_rx_intr_vec_disable(dev); > intr_handle->intr_vec = malloc(n * sizeof(intr_handle->intr_vec[0])); > @@ -452,12 +452,12 @@ void > mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev) { > struct priv *priv = dev->data->dev_private; > - struct rte_intr_handle *intr_handle = priv->dev->intr_handle; > + struct rte_intr_handle *intr_handle = dev->intr_handle; > unsigned int i; > unsigned int rxqs_n = priv->rxqs_n; > unsigned int n = RTE_MIN(rxqs_n, > (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID); > > - if (!priv->dev->data->dev_conf.intr_conf.rxq) > + if (!dev->data->dev_conf.intr_conf.rxq) > return; > if (!intr_handle->intr_vec) > goto free; > @@ -897,7 +897,7 @@ mlx5_rxq_ibv_release(struct mlx5_rxq_ibv *rxq_ibv) > if (!ret) > rxq_ibv->mr = NULL; > DRV_LOG(DEBUG, "port %u Verbs Rx queue %u: refcnt %d", > - rxq_ibv->rxq_ctrl->priv->dev->data->port_id, > + port_id(rxq_ibv->rxq_ctrl->priv), > rxq_ibv->rxq_ctrl->idx, rte_atomic32_read(&rxq_ibv- > >refcnt)); > if (rte_atomic32_dec_and_test(&rxq_ibv->refcnt)) { > rxq_free_elts(rxq_ibv->rxq_ctrl); > @@ -990,7 +990,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t > idx, uint16_t desc, > return NULL; > } > tmpl->socket = socket; > - if (priv->dev->data->dev_conf.intr_conf.rxq) > + if (dev->data->dev_conf.intr_conf.rxq) > tmpl->irq = 1; > /* Enable scattered packets support for this queue if necessary. */ > assert(mb_len >= RTE_PKTMBUF_HEADROOM); diff --git > a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c index > 44358744d..29959b4c7 100644 > --- a/drivers/net/mlx5/mlx5_txq.c > +++ b/drivers/net/mlx5/mlx5_txq.c > @@ -48,7 +48,7 @@ txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl) > for (i = 0; (i != elts_n); ++i) > (*txq_ctrl->txq.elts)[i] = NULL; > DRV_LOG(DEBUG, "port %u Tx queue %u allocated and configured > %u WRs", > - txq_ctrl->priv->dev->data->port_id, txq_ctrl->idx, elts_n); > + port_id(txq_ctrl->priv), txq_ctrl->idx, elts_n); > txq_ctrl->txq.elts_head = 0; > txq_ctrl->txq.elts_tail = 0; > txq_ctrl->txq.elts_comp = 0; > @@ -70,7 +70,7 @@ txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl) > struct rte_mbuf *(*elts)[elts_n] = txq_ctrl->txq.elts; > > DRV_LOG(DEBUG, "port %u Tx queue %u freeing WRs", > - txq_ctrl->priv->dev->data->port_id, txq_ctrl->idx); > + port_id(txq_ctrl->priv), txq_ctrl->idx); > txq_ctrl->txq.elts_head = 0; > txq_ctrl->txq.elts_tail = 0; > txq_ctrl->txq.elts_comp = 0; > @@ -255,9 +255,9 @@ mlx5_tx_queue_release(void *dpdk_txq) > priv = txq_ctrl->priv; > for (i = 0; (i != priv->txqs_n); ++i) > if ((*priv->txqs)[i] == txq) { > - mlx5_txq_release(priv->dev, i); > + mlx5_txq_release(eth_dev(priv), i); > DRV_LOG(DEBUG, "port %u removing Tx queue %u > from list", > - priv->dev->data->port_id, txq_ctrl->idx); > + port_id(priv), txq_ctrl->idx); > break; > } > } > @@ -618,7 +618,7 @@ mlx5_txq_ibv_release(struct mlx5_txq_ibv *txq_ibv) > { > assert(txq_ibv); > DRV_LOG(DEBUG, "port %u Verbs Tx queue %u: refcnt %d", > - txq_ibv->txq_ctrl->priv->dev->data->port_id, > + port_id(txq_ibv->txq_ctrl->priv), > txq_ibv->txq_ctrl->idx, rte_atomic32_read(&txq_ibv- > >refcnt)); > if (rte_atomic32_dec_and_test(&txq_ibv->refcnt)) { > claim_zero(mlx5_glue->destroy_qp(txq_ibv->qp)); > @@ -685,7 +685,7 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl) > unsigned int txqs_inline; > unsigned int inline_max_packet_sz; > eth_tx_burst_t tx_pkt_burst = > - mlx5_select_tx_function(txq_ctrl->priv->dev); > + mlx5_select_tx_function(eth_dev(priv)); > int is_empw_func = is_empw_burst_func(tx_pkt_burst); > int tso = !!(txq_ctrl->txq.offloads & (DEV_TX_OFFLOAD_TCP_TSO | > > DEV_TX_OFFLOAD_VXLAN_TNL_TSO | @@ -759,8 +759,7 @@ > txq_set_params(struct mlx5_txq_ctrl *txq_ctrl) > DRV_LOG(WARNING, > "port %u txq inline is too large (%d) setting" > " it to the maximum possible: %d\n", > - priv->dev->data->port_id, txq_inline, > - max_inline); > + port_id(priv), txq_inline, max_inline); > txq_ctrl->txq.max_inline = max_inline / > RTE_CACHE_LINE_SIZE; > } > -- > 2.11.0