Replace usages of direct access to shared memory config with calls to the new API.
Signed-off-by: Anatoly Burakov <anatoly.bura...@intel.com> --- drivers/bus/fslmc/fslmc_vfio.c | 8 +++----- drivers/net/mlx4/mlx4_mr.c | 11 +++++------ drivers/net/mlx5/mlx5_mr.c | 11 +++++------ drivers/net/virtio/virtio_user/virtio_user_dev.c | 7 +++---- 4 files changed, 16 insertions(+), 21 deletions(-) diff --git a/drivers/bus/fslmc/fslmc_vfio.c b/drivers/bus/fslmc/fslmc_vfio.c index 1aae56fa9..44e4fa6e2 100644 --- a/drivers/bus/fslmc/fslmc_vfio.c +++ b/drivers/bus/fslmc/fslmc_vfio.c @@ -347,14 +347,12 @@ fslmc_dmamap_seg(const struct rte_memseg_list *msl __rte_unused, int rte_fslmc_vfio_dmamap(void) { int i = 0, ret; - struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config; - rte_rwlock_t *mem_lock = &mcfg->memory_hotplug_lock; /* Lock before parsing and registering callback to memory subsystem */ - rte_rwlock_read_lock(mem_lock); + rte_mcfg_mem_read_lock(); if (rte_memseg_walk(fslmc_dmamap_seg, &i) < 0) { - rte_rwlock_read_unlock(mem_lock); + rte_mcfg_mem_read_unlock(); return -1; } @@ -378,7 +376,7 @@ int rte_fslmc_vfio_dmamap(void) /* Existing segments have been mapped and memory callback for hotplug * has been installed. */ - rte_rwlock_read_unlock(mem_lock); + rte_mcfg_mem_read_unlock(); return 0; } diff --git a/drivers/net/mlx4/mlx4_mr.c b/drivers/net/mlx4/mlx4_mr.c index 48d458ad4..80827ce75 100644 --- a/drivers/net/mlx4/mlx4_mr.c +++ b/drivers/net/mlx4/mlx4_mr.c @@ -593,7 +593,6 @@ mlx4_mr_create_primary(struct rte_eth_dev *dev, struct mlx4_mr_cache *entry, uintptr_t addr) { struct mlx4_priv *priv = dev->data->dev_private; - struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config; const struct rte_memseg_list *msl; const struct rte_memseg *ms; struct mlx4_mr *mr = NULL; @@ -696,7 +695,7 @@ mlx4_mr_create_primary(struct rte_eth_dev *dev, struct mlx4_mr_cache *entry, * just single page. If not, go on with the big chunk atomically from * here. */ - rte_rwlock_read_lock(&mcfg->memory_hotplug_lock); + rte_mcfg_mem_read_lock(); data_re = data; if (len > msl->page_sz && !rte_memseg_contig_walk(mr_find_contig_memsegs_cb, &data_re)) { @@ -714,7 +713,7 @@ mlx4_mr_create_primary(struct rte_eth_dev *dev, struct mlx4_mr_cache *entry, */ data.start = RTE_ALIGN_FLOOR(addr, msl->page_sz); data.end = data.start + msl->page_sz; - rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock); + rte_mcfg_mem_read_unlock(); mr_free(mr); goto alloc_resources; } @@ -734,7 +733,7 @@ mlx4_mr_create_primary(struct rte_eth_dev *dev, struct mlx4_mr_cache *entry, DEBUG("port %u found MR for %p on final lookup, abort", dev->data->port_id, (void *)addr); rte_rwlock_write_unlock(&priv->mr.rwlock); - rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock); + rte_mcfg_mem_read_unlock(); /* * Must be unlocked before calling rte_free() because * mlx4_mr_mem_event_free_cb() can be called inside. @@ -802,12 +801,12 @@ mlx4_mr_create_primary(struct rte_eth_dev *dev, struct mlx4_mr_cache *entry, /* Lookup can't fail. */ assert(entry->lkey != UINT32_MAX); rte_rwlock_write_unlock(&priv->mr.rwlock); - rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock); + rte_mcfg_mem_read_unlock(); return entry->lkey; err_mrlock: rte_rwlock_write_unlock(&priv->mr.rwlock); err_memlock: - rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock); + rte_mcfg_mem_read_unlock(); err_nolock: /* * In case of error, as this can be called in a datapath, a warning diff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c index 66e8e874e..872d0591e 100644 --- a/drivers/net/mlx5/mlx5_mr.c +++ b/drivers/net/mlx5/mlx5_mr.c @@ -580,7 +580,6 @@ mlx5_mr_create_primary(struct rte_eth_dev *dev, struct mlx5_mr_cache *entry, struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_ibv_shared *sh = priv->sh; struct mlx5_dev_config *config = &priv->config; - struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config; const struct rte_memseg_list *msl; const struct rte_memseg *ms; struct mlx5_mr *mr = NULL; @@ -684,7 +683,7 @@ mlx5_mr_create_primary(struct rte_eth_dev *dev, struct mlx5_mr_cache *entry, * just single page. If not, go on with the big chunk atomically from * here. */ - rte_rwlock_read_lock(&mcfg->memory_hotplug_lock); + rte_mcfg_mem_read_lock(); data_re = data; if (len > msl->page_sz && !rte_memseg_contig_walk(mr_find_contig_memsegs_cb, &data_re)) { @@ -702,7 +701,7 @@ mlx5_mr_create_primary(struct rte_eth_dev *dev, struct mlx5_mr_cache *entry, */ data.start = RTE_ALIGN_FLOOR(addr, msl->page_sz); data.end = data.start + msl->page_sz; - rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock); + rte_mcfg_mem_read_unlock(); mr_free(mr); goto alloc_resources; } @@ -722,7 +721,7 @@ mlx5_mr_create_primary(struct rte_eth_dev *dev, struct mlx5_mr_cache *entry, DEBUG("port %u found MR for %p on final lookup, abort", dev->data->port_id, (void *)addr); rte_rwlock_write_unlock(&sh->mr.rwlock); - rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock); + rte_mcfg_mem_read_unlock(); /* * Must be unlocked before calling rte_free() because * mlx5_mr_mem_event_free_cb() can be called inside. @@ -790,12 +789,12 @@ mlx5_mr_create_primary(struct rte_eth_dev *dev, struct mlx5_mr_cache *entry, /* Lookup can't fail. */ assert(entry->lkey != UINT32_MAX); rte_rwlock_write_unlock(&sh->mr.rwlock); - rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock); + rte_mcfg_mem_read_unlock(); return entry->lkey; err_mrlock: rte_rwlock_write_unlock(&sh->mr.rwlock); err_memlock: - rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock); + rte_mcfg_mem_read_unlock(); err_nolock: /* * In case of error, as this can be called in a datapath, a warning diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.c b/drivers/net/virtio/virtio_user/virtio_user_dev.c index e743695e4..c3ab9a21d 100644 --- a/drivers/net/virtio/virtio_user/virtio_user_dev.c +++ b/drivers/net/virtio/virtio_user/virtio_user_dev.c @@ -125,7 +125,6 @@ is_vhost_user_by_type(const char *path) int virtio_user_start_device(struct virtio_user_dev *dev) { - struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config; uint64_t features; int ret; @@ -142,7 +141,7 @@ virtio_user_start_device(struct virtio_user_dev *dev) * replaced when we get proper supports from the * memory subsystem in the future. */ - rte_rwlock_read_lock(&mcfg->memory_hotplug_lock); + rte_mcfg_mem_read_lock(); pthread_mutex_lock(&dev->mutex); if (is_vhost_user_by_type(dev->path) && dev->vhostfd < 0) @@ -180,12 +179,12 @@ virtio_user_start_device(struct virtio_user_dev *dev) dev->started = true; pthread_mutex_unlock(&dev->mutex); - rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock); + rte_mcfg_mem_read_unlock(); return 0; error: pthread_mutex_unlock(&dev->mutex); - rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock); + rte_mcfg_mem_read_unlock(); /* TODO: free resource here or caller to check */ return -1; } -- 2.17.1