Renamed memory translational api to _iova types.
The following api renamed from:

rte_mempool_populate_phys()
rte_mempool_populate_phys_tab()
rte_eal_using_phys_addrs()
rte_mem_virt2phy()
rte_dump_physmem_layout()
rte_eal_get_physmem_layout()
rte_eal_get_physmem_size()
rte_malloc_virt2phy()
rte_mem_phy2mch()

To the following iova types api:

rte_mempool_populate_iova()
rte_mempool_populate_iova_tab()
rte_eal_using_iova_addrs()
rte_mem_virt2iova()
rte_dump_iovamem_layout()
rte_eal_get_iovamem_layout()
rte_eal_get_iovamem_size()
rte_malloc_virt2iova()
rte_mem_phy2iova()

Signed-off-by: Santosh Shukla <santosh.shu...@caviumnetworks.com>
---
 app/proc_info/main.c                             |  2 +-
 app/test-crypto-perf/cperf_test_vector_parsing.c |  4 ++--
 app/test-crypto-perf/cperf_test_vectors.c        |  6 +++---
 app/test-pmd/cmdline.c                           |  2 +-
 doc/guides/rel_notes/release_17_11.rst           |  2 +-
 drivers/bus/dpaa/base/qbman/qman.c               |  2 +-
 drivers/bus/dpaa/base/qbman/qman.h               |  2 +-
 drivers/bus/dpaa/rte_dpaa_bus.h                  |  2 +-
 drivers/bus/fslmc/fslmc_vfio.c                   |  2 +-
 drivers/bus/fslmc/portal/dpaa2_hw_pvt.h          |  4 ++--
 drivers/crypto/dpaa_sec/dpaa_sec.c               |  4 ++--
 drivers/crypto/qat/qat_qp.c                      |  2 +-
 drivers/mempool/octeontx/octeontx_fpavf.c        |  2 +-
 drivers/net/ark/ark_ethdev_rx.c                  |  4 ++--
 drivers/net/ark/ark_ethdev_tx.c                  |  4 ++--
 drivers/net/bnxt/bnxt_ethdev.c                   |  8 ++++----
 drivers/net/bnxt/bnxt_hwrm.c                     | 20 ++++++++++----------
 drivers/net/bnxt/bnxt_ring.c                     |  4 ++--
 drivers/net/bnxt/bnxt_vnic.c                     |  4 ++--
 drivers/net/liquidio/lio_rxtx.c                  |  2 +-
 drivers/net/mlx4/mlx4_mr.c                       |  2 +-
 drivers/net/mlx5/mlx5_mr.c                       |  2 +-
 drivers/net/octeontx/base/octeontx_pkovf.c       |  2 +-
 drivers/net/sfc/sfc_tso.c                        |  2 +-
 examples/l2fwd-crypto/main.c                     |  2 +-
 lib/librte_cryptodev/rte_cryptodev.c             |  2 +-
 lib/librte_eal/bsdapp/eal/eal.c                  |  2 +-
 lib/librte_eal/bsdapp/eal/eal_memory.c           |  2 +-
 lib/librte_eal/bsdapp/eal/rte_eal_version.map    | 12 ++++++------
 lib/librte_eal/common/eal_common_memory.c        |  6 +++---
 lib/librte_eal/common/eal_common_memzone.c       |  4 ++--
 lib/librte_eal/common/eal_private.h              |  2 +-
 lib/librte_eal/common/include/rte_malloc.h       |  2 +-
 lib/librte_eal/common/include/rte_memory.h       |  8 ++++----
 lib/librte_eal/common/rte_malloc.c               |  2 +-
 lib/librte_eal/linuxapp/eal/eal.c                |  2 +-
 lib/librte_eal/linuxapp/eal/eal_memory.c         |  8 ++++----
 lib/librte_eal/linuxapp/eal/eal_pci.c            |  4 ++--
 lib/librte_eal/linuxapp/eal/eal_vfio.c           |  4 ++--
 lib/librte_eal/linuxapp/eal/rte_eal_version.map  | 12 ++++++------
 lib/librte_mempool/rte_mempool.c                 | 20 ++++++++++----------
 lib/librte_mempool/rte_mempool.h                 |  6 +++---
 lib/librte_mempool/rte_mempool_version.map       |  4 ++--
 lib/librte_vhost/vhost_user.c                    |  4 ++--
 test/test/commands.c                             |  2 +-
 test/test/test_malloc.c                          |  4 ++--
 test/test/test_memory.c                          |  6 +++---
 test/test/test_mempool.c                         |  4 ++--
 test/test/test_memzone.c                         | 10 +++++-----
 49 files changed, 112 insertions(+), 112 deletions(-)

diff --git a/app/proc_info/main.c b/app/proc_info/main.c
index 64fbbd0f8..f30b1b29e 100644
--- a/app/proc_info/main.c
+++ b/app/proc_info/main.c
@@ -297,7 +297,7 @@ static void
 meminfo_display(void)
 {
        printf("----------- MEMORY_SEGMENTS -----------\n");
-       rte_dump_physmem_layout(stdout);
+       rte_dump_iovamem_layout(stdout);
        printf("--------- END_MEMORY_SEGMENTS ---------\n");
 
        printf("------------ MEMORY_ZONES -------------\n");
diff --git a/app/test-crypto-perf/cperf_test_vector_parsing.c 
b/app/test-crypto-perf/cperf_test_vector_parsing.c
index 3952632c1..d4736f9ef 100644
--- a/app/test-crypto-perf/cperf_test_vector_parsing.c
+++ b/app/test-crypto-perf/cperf_test_vector_parsing.c
@@ -445,7 +445,7 @@ parse_entry(char *entry, struct cperf_test_vector *vector,
        } else if (strstr(key_token, "aad")) {
                rte_free(vector->aad.data);
                vector->aad.data = data;
-               vector->aad.phys_addr = rte_malloc_virt2phy(vector->aad.data);
+               vector->aad.phys_addr = rte_malloc_virt2iova(vector->aad.data);
                if (tc_found)
                        vector->aad.length = data_length;
                else {
@@ -460,7 +460,7 @@ parse_entry(char *entry, struct cperf_test_vector *vector,
        } else if (strstr(key_token, "digest")) {
                rte_free(vector->digest.data);
                vector->digest.data = data;
-               vector->digest.phys_addr = rte_malloc_virt2phy(
+               vector->digest.phys_addr = rte_malloc_virt2iova(
                        vector->digest.data);
                if (tc_found)
                        vector->digest.length = data_length;
diff --git a/app/test-crypto-perf/cperf_test_vectors.c 
b/app/test-crypto-perf/cperf_test_vectors.c
index e51dcc3f1..fa911ff69 100644
--- a/app/test-crypto-perf/cperf_test_vectors.c
+++ b/app/test-crypto-perf/cperf_test_vectors.c
@@ -498,7 +498,7 @@ cperf_test_vector_get_dummy(struct cperf_options *options)
                                return NULL;
                        }
                        t_vec->digest.phys_addr =
-                               rte_malloc_virt2phy(t_vec->digest.data);
+                               rte_malloc_virt2iova(t_vec->digest.data);
                        t_vec->digest.length = options->digest_sz;
                        memcpy(t_vec->digest.data, digest,
                                        options->digest_sz);
@@ -531,7 +531,7 @@ cperf_test_vector_get_dummy(struct cperf_options *options)
                                return NULL;
                        }
                        memcpy(t_vec->aad.data, aad, options->aead_aad_sz);
-                       t_vec->aad.phys_addr = 
rte_malloc_virt2phy(t_vec->aad.data);
+                       t_vec->aad.phys_addr = 
rte_malloc_virt2iova(t_vec->aad.data);
                        t_vec->aad.length = options->aead_aad_sz;
                } else {
                        t_vec->aad.data = NULL;
@@ -546,7 +546,7 @@ cperf_test_vector_get_dummy(struct cperf_options *options)
                        return NULL;
                }
                t_vec->digest.phys_addr =
-                               rte_malloc_virt2phy(t_vec->digest.data);
+                               rte_malloc_virt2iova(t_vec->digest.data);
                t_vec->digest.length = options->digest_sz;
                memcpy(t_vec->digest.data, digest, options->digest_sz);
                t_vec->data.aead_offset = 0;
diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
index bb01e989a..8e5f90c73 100644
--- a/app/test-pmd/cmdline.c
+++ b/app/test-pmd/cmdline.c
@@ -8315,7 +8315,7 @@ static void cmd_dump_parsed(void *parsed_result,
        struct cmd_dump_result *res = parsed_result;
 
        if (!strcmp(res->dump, "dump_physmem"))
-               rte_dump_physmem_layout(stdout);
+               rte_dump_iovamem_layout(stdout);
        else if (!strcmp(res->dump, "dump_memzone"))
                rte_memzone_dump(stdout);
        else if (!strcmp(res->dump, "dump_struct_sizes"))
diff --git a/doc/guides/rel_notes/release_17_11.rst 
b/doc/guides/rel_notes/release_17_11.rst
index 8db35f5e4..6f3b92bc5 100644
--- a/doc/guides/rel_notes/release_17_11.rst
+++ b/doc/guides/rel_notes/release_17_11.rst
@@ -269,7 +269,7 @@ API Changes
 
 * Xen dom0 in EAL was removed, as well as xenvirt PMD and vhost_xen.
 
-* ``rte_mem_phy2mch`` was used in Xen dom0 to obtain the physical address;
+* ``rte_mem_phy2iova`` was used in Xen dom0 to obtain the physical address;
   remove this API as Xen dom0 support was removed.
 
 * **Add return value to stats_get dev op API**
diff --git a/drivers/bus/dpaa/base/qbman/qman.c 
b/drivers/bus/dpaa/base/qbman/qman.c
index 8c8d270f8..87fec60d1 100644
--- a/drivers/bus/dpaa/base/qbman/qman.c
+++ b/drivers/bus/dpaa/base/qbman/qman.c
@@ -1351,7 +1351,7 @@ int qman_init_fq(struct qman_fq *fq, u32 flags, struct 
qm_mcc_initfq *opts)
                        memset(&mcc->initfq.fqd.context_a, 0,
                               sizeof(mcc->initfq.fqd.context_a));
                } else {
-                       phys_fq = rte_mem_virt2phy(fq);
+                       phys_fq = rte_mem_virt2iova(fq);
                        qm_fqd_stashing_set64(&mcc->initfq.fqd, phys_fq);
                }
        }
diff --git a/drivers/bus/dpaa/base/qbman/qman.h 
b/drivers/bus/dpaa/base/qbman/qman.h
index 7c645f478..2c0f694cd 100644
--- a/drivers/bus/dpaa/base/qbman/qman.h
+++ b/drivers/bus/dpaa/base/qbman/qman.h
@@ -240,7 +240,7 @@ struct qm_portal {
 #define EQCR_CARRYCLEAR(p) \
        (void *)((unsigned long)(p) & (~(unsigned long)(QM_EQCR_SIZE << 6)))
 
-extern dma_addr_t rte_mem_virt2phy(const void *addr);
+extern dma_addr_t rte_mem_virt2iova(const void *addr);
 
 /* Bit-wise logic to convert a ring pointer to a ring index */
 static inline u8 EQCR_PTR2IDX(struct qm_eqcr_entry *e)
diff --git a/drivers/bus/dpaa/rte_dpaa_bus.h b/drivers/bus/dpaa/rte_dpaa_bus.h
index dff13016d..1dd03847f 100644
--- a/drivers/bus/dpaa/rte_dpaa_bus.h
+++ b/drivers/bus/dpaa/rte_dpaa_bus.h
@@ -109,7 +109,7 @@ struct dpaa_portal {
 /* TODO - this is costly, need to write a fast coversion routine */
 static inline void *rte_dpaa_mem_ptov(iova_addr_t paddr)
 {
-       const struct rte_memseg *memseg = rte_eal_get_physmem_layout();
+       const struct rte_memseg *memseg = rte_eal_get_iovamem_layout();
        int i;
 
        for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr != NULL; i++) {
diff --git a/drivers/bus/fslmc/fslmc_vfio.c b/drivers/bus/fslmc/fslmc_vfio.c
index 7831201ad..4fbfd0f6b 100644
--- a/drivers/bus/fslmc/fslmc_vfio.c
+++ b/drivers/bus/fslmc/fslmc_vfio.c
@@ -234,7 +234,7 @@ int rte_fslmc_vfio_dmamap(void)
        if (is_dma_done)
                return 0;
 
-       memseg = rte_eal_get_physmem_layout();
+       memseg = rte_eal_get_iovamem_layout();
        if (memseg == NULL) {
                FSLMC_VFIO_LOG(ERR, "Cannot get physical layout.");
                return -ENODEV;
diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h 
b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
index 9524eb624..b4fd6496a 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
@@ -276,7 +276,7 @@ static void *dpaa2_mem_ptov(iova_addr_t paddr) 
__attribute__((unused));
 /* todo - this is costly, need to write a fast coversion routine */
 static void *dpaa2_mem_ptov(iova_addr_t paddr)
 {
-       const struct rte_memseg *memseg = rte_eal_get_physmem_layout();
+       const struct rte_memseg *memseg = rte_eal_get_iovamem_layout();
        int i;
 
        for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) {
@@ -291,7 +291,7 @@ static void *dpaa2_mem_ptov(iova_addr_t paddr)
 static iova_addr_t dpaa2_mem_vtop(uint64_t vaddr) __attribute__((unused));
 static iova_addr_t dpaa2_mem_vtop(uint64_t vaddr)
 {
-       const struct rte_memseg *memseg = rte_eal_get_physmem_layout();
+       const struct rte_memseg *memseg = rte_eal_get_iovamem_layout();
        int i;
 
        for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) {
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.c 
b/drivers/crypto/dpaa_sec/dpaa_sec.c
index 94b9f6700..fe3c46b10 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.c
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.c
@@ -113,7 +113,7 @@ dpaa_sec_alloc_ctx(dpaa_sec_session *ses)
 static inline iova_addr_t
 dpaa_mem_vtop(void *vaddr)
 {
-       const struct rte_memseg *memseg = rte_eal_get_physmem_layout();
+       const struct rte_memseg *memseg = rte_eal_get_iovamem_layout();
        uint64_t vaddr_64, paddr;
        int i;
 
@@ -133,7 +133,7 @@ dpaa_mem_vtop(void *vaddr)
 static inline void *
 dpaa_mem_ptov(iova_addr_t paddr)
 {
-       const struct rte_memseg *memseg = rte_eal_get_physmem_layout();
+       const struct rte_memseg *memseg = rte_eal_get_iovamem_layout();
        int i;
 
        for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) {
diff --git a/drivers/crypto/qat/qat_qp.c b/drivers/crypto/qat/qat_qp.c
index 8bd60ffa7..d29ce44ff 100644
--- a/drivers/crypto/qat/qat_qp.c
+++ b/drivers/crypto/qat/qat_qp.c
@@ -106,7 +106,7 @@ queue_dma_zone_reserve(const char *queue_name, uint32_t 
queue_size,
 
        PMD_DRV_LOG(DEBUG, "Allocate memzone for %s, size %u on socket %u",
                                        queue_name, queue_size, socket_id);
-       ms = rte_eal_get_physmem_layout();
+       ms = rte_eal_get_iovamem_layout();
        switch (ms[0].hugepage_sz) {
        case(RTE_PGSIZE_2M):
                memzone_flags = RTE_MEMZONE_2MB;
diff --git a/drivers/mempool/octeontx/octeontx_fpavf.c 
b/drivers/mempool/octeontx/octeontx_fpavf.c
index 4947f2dd5..6bdfc0339 100644
--- a/drivers/mempool/octeontx/octeontx_fpavf.c
+++ b/drivers/mempool/octeontx/octeontx_fpavf.c
@@ -247,7 +247,7 @@ octeontx_fpapf_pool_setup(unsigned int gpool, unsigned int 
buf_size,
 
        /* Configure stack */
        fpa->pool_stack_base = memptr;
-       phys_addr = rte_malloc_virt2phy(memptr);
+       phys_addr = rte_malloc_virt2iova(memptr);
 
        buf_size /= FPA_LN_SIZE;
 
diff --git a/drivers/net/ark/ark_ethdev_rx.c b/drivers/net/ark/ark_ethdev_rx.c
index 23918f6d3..915bf1e0e 100644
--- a/drivers/net/ark/ark_ethdev_rx.c
+++ b/drivers/net/ark/ark_ethdev_rx.c
@@ -99,11 +99,11 @@ eth_ark_rx_hw_setup(struct rte_eth_dev *dev,
        iova_addr_t phys_addr_q_base;
        iova_addr_t phys_addr_prod_index;
 
-       queue_base = rte_malloc_virt2phy(queue);
+       queue_base = rte_malloc_virt2iova(queue);
        phys_addr_prod_index = queue_base +
                offsetof(struct ark_rx_queue, prod_index);
 
-       phys_addr_q_base = rte_malloc_virt2phy(queue->paddress_q);
+       phys_addr_q_base = rte_malloc_virt2iova(queue->paddress_q);
 
        /* Verify HW */
        if (ark_mpu_verify(queue->mpu, sizeof(iova_addr_t))) {
diff --git a/drivers/net/ark/ark_ethdev_tx.c b/drivers/net/ark/ark_ethdev_tx.c
index 8c4b50ea7..4df4e3e1e 100644
--- a/drivers/net/ark/ark_ethdev_tx.c
+++ b/drivers/net/ark/ark_ethdev_tx.c
@@ -317,8 +317,8 @@ eth_ark_tx_hw_queue_config(struct ark_tx_queue *queue)
        if (ark_mpu_verify(queue->mpu, sizeof(struct ark_tx_meta)))
                return -1;
 
-       queue_base = rte_malloc_virt2phy(queue);
-       ring_base = rte_malloc_virt2phy(queue->meta_q);
+       queue_base = rte_malloc_virt2iova(queue);
+       ring_base = rte_malloc_virt2iova(queue->meta_q);
        cons_index_addr =
                queue_base + offsetof(struct ark_tx_queue, cons_index);
 
diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c
index 845bb03d9..bdf5bbabd 100644
--- a/drivers/net/bnxt/bnxt_ethdev.c
+++ b/drivers/net/bnxt/bnxt_ethdev.c
@@ -2829,8 +2829,8 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
                        RTE_LOG(WARNING, PMD,
                                "Memzone physical address same as virtual.\n");
                        RTE_LOG(WARNING, PMD,
-                               "Using rte_mem_virt2phy()\n");
-                       mz_phys_addr = rte_mem_virt2phy(mz->addr);
+                               "Using rte_mem_virt2iova()\n");
+                       mz_phys_addr = rte_mem_virt2iova(mz->addr);
                        if (mz_phys_addr == 0) {
                                RTE_LOG(ERR, PMD,
                                "unable to map address to physical memory\n");
@@ -2864,8 +2864,8 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
                        RTE_LOG(WARNING, PMD,
                                "Memzone physical address same as virtual.\n");
                        RTE_LOG(WARNING, PMD,
-                               "Using rte_mem_virt2phy()\n");
-                       mz_phys_addr = rte_mem_virt2phy(mz->addr);
+                               "Using rte_mem_virt2iova()\n");
+                       mz_phys_addr = rte_mem_virt2iova(mz->addr);
                        if (mz_phys_addr == 0) {
                                RTE_LOG(ERR, PMD,
                                "unable to map address to physical memory\n");
diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c
index 10898e19a..103856146 100644
--- a/drivers/net/bnxt/bnxt_hwrm.c
+++ b/drivers/net/bnxt/bnxt_hwrm.c
@@ -277,7 +277,7 @@ int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
                if (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN))
                        mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY;
                req.vlan_tag_tbl_addr = rte_cpu_to_le_64(
-                        rte_mem_virt2phy(vlan_table));
+                        rte_mem_virt2iova(vlan_table));
                req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count);
        }
        req.mask = rte_cpu_to_le_32(mask);
@@ -318,7 +318,7 @@ int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, 
uint16_t fid,
        req.fid = rte_cpu_to_le_16(fid);
 
        req.vlan_tag_mask_tbl_addr =
-               rte_cpu_to_le_64(rte_mem_virt2phy(vlan_table));
+               rte_cpu_to_le_64(rte_mem_virt2iova(vlan_table));
        req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count);
 
        rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
@@ -643,7 +643,7 @@ int bnxt_hwrm_ver_get(struct bnxt *bp)
                }
                rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
                bp->hwrm_cmd_resp_dma_addr =
-                       rte_mem_virt2phy(bp->hwrm_cmd_resp_addr);
+                       rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
                if (bp->hwrm_cmd_resp_dma_addr == 0) {
                        RTE_LOG(ERR, PMD,
                        "Unable to map response buffer to physical memory.\n");
@@ -669,7 +669,7 @@ int bnxt_hwrm_ver_get(struct bnxt *bp)
                }
                rte_mem_lock_page(bp->hwrm_short_cmd_req_addr);
                bp->hwrm_short_cmd_req_dma_addr =
-                       rte_mem_virt2phy(bp->hwrm_short_cmd_req_addr);
+                       rte_mem_virt2iova(bp->hwrm_short_cmd_req_addr);
                if (bp->hwrm_short_cmd_req_dma_addr == 0) {
                        rte_free(bp->hwrm_short_cmd_req_addr);
                        RTE_LOG(ERR, PMD,
@@ -1752,7 +1752,7 @@ int bnxt_alloc_hwrm_resources(struct bnxt *bp)
        if (bp->hwrm_cmd_resp_addr == NULL)
                return -ENOMEM;
        bp->hwrm_cmd_resp_dma_addr =
-               rte_mem_virt2phy(bp->hwrm_cmd_resp_addr);
+               rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
        if (bp->hwrm_cmd_resp_dma_addr == 0) {
                RTE_LOG(ERR, PMD,
                        "unable to map response address to physical memory\n");
@@ -2621,7 +2621,7 @@ int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
                         page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN));
        req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
        req.req_buf_page_addr[0] =
-               rte_cpu_to_le_64(rte_mem_virt2phy(bp->pf.vf_req_buf));
+               rte_cpu_to_le_64(rte_mem_virt2iova(bp->pf.vf_req_buf));
        if (req.req_buf_page_addr[0] == 0) {
                RTE_LOG(ERR, PMD,
                        "unable to map buffer address to physical memory\n");
@@ -3043,7 +3043,7 @@ int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t 
len, uint8_t *data)
        rte_mem_lock_page(buf);
        if (buf == NULL)
                return -ENOMEM;
-       dma_handle = rte_mem_virt2phy(buf);
+       dma_handle = rte_mem_virt2iova(buf);
        if (dma_handle == 0) {
                RTE_LOG(ERR, PMD,
                        "unable to map response address to physical memory\n");
@@ -3079,7 +3079,7 @@ int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t 
index,
        if (!buf)
                return -ENOMEM;
 
-       dma_handle = rte_mem_virt2phy(buf);
+       dma_handle = rte_mem_virt2iova(buf);
        if (dma_handle == 0) {
                RTE_LOG(ERR, PMD,
                        "unable to map response address to physical memory\n");
@@ -3140,7 +3140,7 @@ int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t 
dir_type,
        if (!buf)
                return -ENOMEM;
 
-       dma_handle = rte_mem_virt2phy(buf);
+       dma_handle = rte_mem_virt2iova(buf);
        if (dma_handle == 0) {
                RTE_LOG(ERR, PMD,
                        "unable to map response address to physical memory\n");
@@ -3195,7 +3195,7 @@ static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, 
uint16_t vf,
 
        req.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf);
        req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics);
-       req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2phy(vnic_ids));
+       req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2iova(vnic_ids));
 
        if (req.vnic_id_tbl_addr == 0) {
                HWRM_UNLOCK();
diff --git a/drivers/net/bnxt/bnxt_ring.c b/drivers/net/bnxt/bnxt_ring.c
index 8e83e4704..1e6db4495 100644
--- a/drivers/net/bnxt/bnxt_ring.c
+++ b/drivers/net/bnxt/bnxt_ring.c
@@ -177,10 +177,10 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
                RTE_LOG(WARNING, PMD,
                        "Memzone physical address same as virtual.\n");
                RTE_LOG(WARNING, PMD,
-                       "Using rte_mem_virt2phy()\n");
+                       "Using rte_mem_virt2iova()\n");
                for (sz = 0; sz < total_alloc_len; sz += getpagesize())
                        rte_mem_lock_page(((char *)mz->addr) + sz);
-               mz_phys_addr = rte_mem_virt2phy(mz->addr);
+               mz_phys_addr = rte_mem_virt2iova(mz->addr);
                if (mz_phys_addr == 0) {
                        RTE_LOG(ERR, PMD,
                        "unable to map ring address to physical memory\n");
diff --git a/drivers/net/bnxt/bnxt_vnic.c b/drivers/net/bnxt/bnxt_vnic.c
index 76c9a35d7..2e4f0e5e8 100644
--- a/drivers/net/bnxt/bnxt_vnic.c
+++ b/drivers/net/bnxt/bnxt_vnic.c
@@ -197,8 +197,8 @@ int bnxt_alloc_vnic_attributes(struct bnxt *bp)
                RTE_LOG(WARNING, PMD,
                        "Memzone physical address same as virtual.\n");
                RTE_LOG(WARNING, PMD,
-                       "Using rte_mem_virt2phy()\n");
-               mz_phys_addr = rte_mem_virt2phy(mz->addr);
+                       "Using rte_mem_virt2iova()\n");
+               mz_phys_addr = rte_mem_virt2iova(mz->addr);
                if (mz_phys_addr == 0) {
                        RTE_LOG(ERR, PMD,
                        "unable to map vnic address to physical memory\n");
diff --git a/drivers/net/liquidio/lio_rxtx.c b/drivers/net/liquidio/lio_rxtx.c
index 5156ac08d..67179eaf5 100644
--- a/drivers/net/liquidio/lio_rxtx.c
+++ b/drivers/net/liquidio/lio_rxtx.c
@@ -1790,7 +1790,7 @@ lio_dev_xmit_pkts(void *tx_queue, struct rte_mbuf **pkts, 
uint16_t nb_pkts)
                                m = m->next;
                        }
 
-                       phyaddr = rte_mem_virt2phy(g->sg);
+                       phyaddr = rte_mem_virt2iova(g->sg);
                        if (phyaddr == RTE_BAD_PHYS_ADDR) {
                                PMD_TX_LOG(lio_dev, ERR, "bad phys addr\n");
                                goto xmit_failed;
diff --git a/drivers/net/mlx4/mlx4_mr.c b/drivers/net/mlx4/mlx4_mr.c
index 9700884b4..120e1f352 100644
--- a/drivers/net/mlx4/mlx4_mr.c
+++ b/drivers/net/mlx4/mlx4_mr.c
@@ -144,7 +144,7 @@ mlx4_check_mempool(struct rte_mempool *mp, uintptr_t 
*start, uintptr_t *end)
 struct ibv_mr *
 mlx4_mp2mr(struct ibv_pd *pd, struct rte_mempool *mp)
 {
-       const struct rte_memseg *ms = rte_eal_get_physmem_layout();
+       const struct rte_memseg *ms = rte_eal_get_iovamem_layout();
        uintptr_t start;
        uintptr_t end;
        unsigned int i;
diff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c
index 6b29eed55..d4e4af568 100644
--- a/drivers/net/mlx5/mlx5_mr.c
+++ b/drivers/net/mlx5/mlx5_mr.c
@@ -272,7 +272,7 @@ mlx5_mp2mr_iter(struct rte_mempool *mp, void *arg)
 struct mlx5_mr*
 priv_mr_new(struct priv *priv, struct rte_mempool *mp)
 {
-       const struct rte_memseg *ms = rte_eal_get_physmem_layout();
+       const struct rte_memseg *ms = rte_eal_get_iovamem_layout();
        uintptr_t start;
        uintptr_t end;
        unsigned int i;
diff --git a/drivers/net/octeontx/base/octeontx_pkovf.c 
b/drivers/net/octeontx/base/octeontx_pkovf.c
index 5fefdffe3..06f9b5969 100644
--- a/drivers/net/octeontx/base/octeontx_pkovf.c
+++ b/drivers/net/octeontx/base/octeontx_pkovf.c
@@ -485,7 +485,7 @@ octeontx_pko_init_fc(const size_t pko_vf_count)
                return -ENOMEM;
        }
 
-       pko_vf_ctl.fc_iomem.iova = rte_malloc_virt2phy((void *)
+       pko_vf_ctl.fc_iomem.iova = rte_malloc_virt2iova((void *)
                                                        pko_vf_ctl.fc_iomem.va);
        pko_vf_ctl.fc_iomem.size = fc_mem_size;
 
diff --git a/drivers/net/sfc/sfc_tso.c b/drivers/net/sfc/sfc_tso.c
index fb79d7491..ad100676e 100644
--- a/drivers/net/sfc/sfc_tso.c
+++ b/drivers/net/sfc/sfc_tso.c
@@ -155,7 +155,7 @@ sfc_efx_tso_do(struct sfc_efx_txq *txq, unsigned int idx,
                                           header_len);
                tsoh = txq->sw_ring[idx & txq->ptr_mask].tsoh;
 
-               header_paddr = rte_malloc_virt2phy((void *)tsoh);
+               header_paddr = rte_malloc_virt2iova((void *)tsoh);
        } else {
                if (m->data_len == header_len) {
                        *in_off = 0;
diff --git a/examples/l2fwd-crypto/main.c b/examples/l2fwd-crypto/main.c
index 96e5ed77f..a51ba45e0 100644
--- a/examples/l2fwd-crypto/main.c
+++ b/examples/l2fwd-crypto/main.c
@@ -2484,7 +2484,7 @@ reserve_key_memory(struct l2fwd_crypto_options *options)
        options->aad.data = rte_malloc("aad", MAX_KEY_SIZE, 0);
        if (options->aad.data == NULL)
                rte_exit(EXIT_FAILURE, "Failed to allocate memory for AAD");
-       options->aad.phys_addr = rte_malloc_virt2phy(options->aad.data);
+       options->aad.phys_addr = rte_malloc_virt2iova(options->aad.data);
 }
 
 int
diff --git a/lib/librte_cryptodev/rte_cryptodev.c 
b/lib/librte_cryptodev/rte_cryptodev.c
index e48d562b4..a54a0b343 100644
--- a/lib/librte_cryptodev/rte_cryptodev.c
+++ b/lib/librte_cryptodev/rte_cryptodev.c
@@ -1271,7 +1271,7 @@ rte_crypto_op_init(struct rte_mempool *mempool,
 
        __rte_crypto_op_reset(op, type);
 
-       op->phys_addr = rte_mem_virt2phy(_op_data);
+       op->phys_addr = rte_mem_virt2iova(_op_data);
        op->mempool = mempool;
 }
 
diff --git a/lib/librte_eal/bsdapp/eal/eal.c b/lib/librte_eal/bsdapp/eal/eal.c
index e981721aa..eea7cfe62 100644
--- a/lib/librte_eal/bsdapp/eal/eal.c
+++ b/lib/librte_eal/bsdapp/eal/eal.c
@@ -457,7 +457,7 @@ eal_check_mem_on_local_socket(void)
 
        socket_id = rte_lcore_to_socket_id(rte_config.master_lcore);
 
-       ms = rte_eal_get_physmem_layout();
+       ms = rte_eal_get_iovamem_layout();
 
        for (i = 0; i < RTE_MAX_MEMSEG; i++)
                if (ms[i].socket_id == socket_id &&
diff --git a/lib/librte_eal/bsdapp/eal/eal_memory.c 
b/lib/librte_eal/bsdapp/eal/eal_memory.c
index d8882dcef..839befe59 100644
--- a/lib/librte_eal/bsdapp/eal/eal_memory.c
+++ b/lib/librte_eal/bsdapp/eal/eal_memory.c
@@ -51,7 +51,7 @@
  * Get physical address of any mapped virtual address in the current process.
  */
 iova_addr_t
-rte_mem_virt2phy(const void *virtaddr)
+rte_mem_virt2iova(const void *virtaddr)
 {
        /* XXX not implemented. This function is only used by
         * rte_mempool_virt2phy() when hugepages are disabled. */
diff --git a/lib/librte_eal/bsdapp/eal/rte_eal_version.map 
b/lib/librte_eal/bsdapp/eal/rte_eal_version.map
index 080896f73..b7f2d72b9 100644
--- a/lib/librte_eal/bsdapp/eal/rte_eal_version.map
+++ b/lib/librte_eal/bsdapp/eal/rte_eal_version.map
@@ -14,7 +14,7 @@ DPDK_2.0 {
        rte_cpu_get_flag_enabled;
        rte_cycles_vmware_tsc_map;
        rte_delay_us;
-       rte_dump_physmem_layout;
+       rte_dump_iovamem_layout;
        rte_dump_registers;
        rte_dump_stack;
        rte_dump_tailq;
@@ -25,8 +25,8 @@ DPDK_2.0 {
        rte_eal_devargs_type_count;
        rte_eal_get_configuration;
        rte_eal_get_lcore_state;
-       rte_eal_get_physmem_layout;
-       rte_eal_get_physmem_size;
+       rte_eal_get_iovamem_layout;
+       rte_eal_get_iovamem_size;
        rte_eal_has_hugepages;
        rte_eal_hpet_init;
        rte_eal_init;
@@ -62,10 +62,10 @@ DPDK_2.0 {
        rte_malloc_set_limit;
        rte_malloc_socket;
        rte_malloc_validate;
-       rte_malloc_virt2phy;
+       rte_malloc_virt2iova;
        rte_mem_lock_page;
-       rte_mem_phy2mch;
-       rte_mem_virt2phy;
+       rte_mem_phy2iova;
+       rte_mem_virt2iova;
        rte_memdump;
        rte_memory_get_nchannel;
        rte_memory_get_nrank;
diff --git a/lib/librte_eal/common/eal_common_memory.c 
b/lib/librte_eal/common/eal_common_memory.c
index 5ed83d20a..44bc072bf 100644
--- a/lib/librte_eal/common/eal_common_memory.c
+++ b/lib/librte_eal/common/eal_common_memory.c
@@ -55,7 +55,7 @@
  * memory. The last element of the table contains a NULL address.
  */
 const struct rte_memseg *
-rte_eal_get_physmem_layout(void)
+rte_eal_get_iovamem_layout(void)
 {
        return rte_eal_get_configuration()->mem_config->memseg;
 }
@@ -63,7 +63,7 @@ rte_eal_get_physmem_layout(void)
 
 /* get the total size of memory */
 uint64_t
-rte_eal_get_physmem_size(void)
+rte_eal_get_iovamem_size(void)
 {
        const struct rte_mem_config *mcfg;
        unsigned i = 0;
@@ -84,7 +84,7 @@ rte_eal_get_physmem_size(void)
 
 /* Dump the physical memory layout on console */
 void
-rte_dump_physmem_layout(FILE *f)
+rte_dump_iovamem_layout(FILE *f)
 {
        const struct rte_mem_config *mcfg;
        unsigned i = 0;
diff --git a/lib/librte_eal/common/eal_common_memzone.c 
b/lib/librte_eal/common/eal_common_memzone.c
index 3026e36b8..86457eaf0 100644
--- a/lib/librte_eal/common/eal_common_memzone.c
+++ b/lib/librte_eal/common/eal_common_memzone.c
@@ -251,7 +251,7 @@ memzone_reserve_aligned_thread_unsafe(const char *name, 
size_t len,
 
        mcfg->memzone_cnt++;
        snprintf(mz->name, sizeof(mz->name), "%s", name);
-       mz->phys_addr = rte_malloc_virt2phy(mz_addr);
+       mz->phys_addr = rte_malloc_virt2iova(mz_addr);
        mz->addr = mz_addr;
        mz->len = (requested_len == 0 ? elem->size : requested_len);
        mz->hugepage_sz = elem->ms->hugepage_sz;
@@ -419,7 +419,7 @@ rte_eal_memzone_init(void)
        if (rte_eal_process_type() == RTE_PROC_SECONDARY)
                return 0;
 
-       memseg = rte_eal_get_physmem_layout();
+       memseg = rte_eal_get_iovamem_layout();
        if (memseg == NULL) {
                RTE_LOG(ERR, EAL, "%s(): Cannot get physical layout\n", 
__func__);
                return -1;
diff --git a/lib/librte_eal/common/eal_private.h 
b/lib/librte_eal/common/eal_private.h
index 6e0f85def..a5a27563d 100644
--- a/lib/librte_eal/common/eal_private.h
+++ b/lib/librte_eal/common/eal_private.h
@@ -352,7 +352,7 @@ int rte_eal_hugepage_attach(void);
  * addresses are obtainable. It is only possible to get
  * physical addresses when running as a privileged user.
  */
-bool rte_eal_using_phys_addrs(void);
+bool rte_eal_using_iova_addrs(void);
 
 /**
  * Find a bus capable of identifying a device.
diff --git a/lib/librte_eal/common/include/rte_malloc.h 
b/lib/librte_eal/common/include/rte_malloc.h
index 491b479b1..b1a214c9d 100644
--- a/lib/librte_eal/common/include/rte_malloc.h
+++ b/lib/librte_eal/common/include/rte_malloc.h
@@ -333,7 +333,7 @@ rte_malloc_set_limit(const char *type, size_t max);
  *   otherwise return physical address of the buffer
  */
 iova_addr_t
-rte_malloc_virt2phy(const void *addr);
+rte_malloc_virt2iova(const void *addr);
 
 #ifdef __cplusplus
 }
diff --git a/lib/librte_eal/common/include/rte_memory.h 
b/lib/librte_eal/common/include/rte_memory.h
index 93e7a819f..173f141fc 100644
--- a/lib/librte_eal/common/include/rte_memory.h
+++ b/lib/librte_eal/common/include/rte_memory.h
@@ -134,7 +134,7 @@ int rte_mem_lock_page(const void *virt);
  * @return
  *   The physical address or RTE_BAD_PHYS_ADDR on error.
  */
-iova_addr_t rte_mem_virt2phy(const void *virt);
+iova_addr_t rte_mem_virt2iova(const void *virt);
 
 /**
  * Get the layout of the available physical memory.
@@ -151,7 +151,7 @@ iova_addr_t rte_mem_virt2phy(const void *virt);
  *  - On error, return NULL. This should not happen since it is a fatal
  *    error that will probably cause the entire system to panic.
  */
-const struct rte_memseg *rte_eal_get_physmem_layout(void);
+const struct rte_memseg *rte_eal_get_iovamem_layout(void);
 
 /**
  * Dump the physical memory layout to a file.
@@ -159,7 +159,7 @@ const struct rte_memseg *rte_eal_get_physmem_layout(void);
  * @param f
  *   A pointer to a file for output
  */
-void rte_dump_physmem_layout(FILE *f);
+void rte_dump_iovamem_layout(FILE *f);
 
 /**
  * Get the total amount of available physical memory.
@@ -167,7 +167,7 @@ void rte_dump_physmem_layout(FILE *f);
  * @return
  *    The total amount of available physical memory in bytes.
  */
-uint64_t rte_eal_get_physmem_size(void);
+uint64_t rte_eal_get_iovamem_size(void);
 
 /**
  * Get the number of memory channels.
diff --git a/lib/librte_eal/common/rte_malloc.c 
b/lib/librte_eal/common/rte_malloc.c
index 506d7a415..f8473832a 100644
--- a/lib/librte_eal/common/rte_malloc.c
+++ b/lib/librte_eal/common/rte_malloc.c
@@ -249,7 +249,7 @@ rte_malloc_set_limit(__rte_unused const char *type,
  * Return the physical address of a virtual address obtained through rte_malloc
  */
 iova_addr_t
-rte_malloc_virt2phy(const void *addr)
+rte_malloc_virt2iova(const void *addr)
 {
        iova_addr_t paddr;
        const struct malloc_elem *elem = malloc_elem_from_data(addr);
diff --git a/lib/librte_eal/linuxapp/eal/eal.c 
b/lib/librte_eal/linuxapp/eal/eal.c
index ad7278989..353e582d0 100644
--- a/lib/librte_eal/linuxapp/eal/eal.c
+++ b/lib/librte_eal/linuxapp/eal/eal.c
@@ -665,7 +665,7 @@ eal_check_mem_on_local_socket(void)
 
        socket_id = rte_lcore_to_socket_id(rte_config.master_lcore);
 
-       ms = rte_eal_get_physmem_layout();
+       ms = rte_eal_get_iovamem_layout();
 
        for (i = 0; i < RTE_MAX_MEMSEG; i++)
                if (ms[i].socket_id == socket_id &&
diff --git a/lib/librte_eal/linuxapp/eal/eal_memory.c 
b/lib/librte_eal/linuxapp/eal/eal_memory.c
index 603164d77..016365958 100644
--- a/lib/librte_eal/linuxapp/eal/eal_memory.c
+++ b/lib/librte_eal/linuxapp/eal/eal_memory.c
@@ -106,7 +106,7 @@ test_phys_addrs_available(void)
                return;
        }
 
-       physaddr = rte_mem_virt2phy(&tmp);
+       physaddr = rte_mem_virt2iova(&tmp);
        if (physaddr == RTE_BAD_PHYS_ADDR) {
                RTE_LOG(ERR, EAL,
                        "Cannot obtain physical addresses: %s. "
@@ -120,7 +120,7 @@ test_phys_addrs_available(void)
  * Get physical address of any mapped virtual address in the current process.
  */
 iova_addr_t
-rte_mem_virt2phy(const void *virtaddr)
+rte_mem_virt2iova(const void *virtaddr)
 {
        int fd, retval;
        uint64_t page, physaddr;
@@ -191,7 +191,7 @@ find_physaddrs(struct hugepage_file *hugepg_tbl, struct 
hugepage_info *hpi)
        iova_addr_t addr;
 
        for (i = 0; i < hpi->num_pages[0]; i++) {
-               addr = rte_mem_virt2phy(hugepg_tbl[i].orig_va);
+               addr = rte_mem_virt2iova(hugepg_tbl[i].orig_va);
                if (addr == RTE_BAD_PHYS_ADDR)
                        return -1;
                hugepg_tbl[i].physaddr = addr;
@@ -1495,7 +1495,7 @@ rte_eal_hugepage_attach(void)
 }
 
 bool
-rte_eal_using_phys_addrs(void)
+rte_eal_using_iova_addrs(void)
 {
        return phys_addrs_available;
 }
diff --git a/lib/librte_eal/linuxapp/eal/eal_pci.c 
b/lib/librte_eal/linuxapp/eal/eal_pci.c
index b4dbf953a..8e8ed6d3b 100644
--- a/lib/librte_eal/linuxapp/eal/eal_pci.c
+++ b/lib/librte_eal/linuxapp/eal/eal_pci.c
@@ -103,7 +103,7 @@ rte_pci_map_device(struct rte_pci_device *dev)
                break;
        case RTE_KDRV_IGB_UIO:
        case RTE_KDRV_UIO_GENERIC:
-               if (rte_eal_using_phys_addrs()) {
+               if (rte_eal_using_iova_addrs()) {
                        /* map resources for devices that use uio */
                        ret = pci_uio_map_resource(dev);
                }
@@ -145,7 +145,7 @@ rte_pci_unmap_device(struct rte_pci_device *dev)
 void *
 pci_find_max_end_va(void)
 {
-       const struct rte_memseg *seg = rte_eal_get_physmem_layout();
+       const struct rte_memseg *seg = rte_eal_get_iovamem_layout();
        const struct rte_memseg *last = seg;
        unsigned i = 0;
 
diff --git a/lib/librte_eal/linuxapp/eal/eal_vfio.c 
b/lib/librte_eal/linuxapp/eal/eal_vfio.c
index 32b090325..9f50b13f2 100644
--- a/lib/librte_eal/linuxapp/eal/eal_vfio.c
+++ b/lib/librte_eal/linuxapp/eal/eal_vfio.c
@@ -692,7 +692,7 @@ vfio_get_group_no(const char *sysfs_base,
 static int
 vfio_type1_dma_map(int vfio_container_fd)
 {
-       const struct rte_memseg *ms = rte_eal_get_physmem_layout();
+       const struct rte_memseg *ms = rte_eal_get_iovamem_layout();
        int i, ret;
 
        /* map all DPDK segments for DMA. use 1:1 PA to IOVA mapping */
@@ -728,7 +728,7 @@ vfio_type1_dma_map(int vfio_container_fd)
 static int
 vfio_spapr_dma_map(int vfio_container_fd)
 {
-       const struct rte_memseg *ms = rte_eal_get_physmem_layout();
+       const struct rte_memseg *ms = rte_eal_get_iovamem_layout();
        int i, ret;
 
        struct vfio_iommu_spapr_register_memory reg = {
diff --git a/lib/librte_eal/linuxapp/eal/rte_eal_version.map 
b/lib/librte_eal/linuxapp/eal/rte_eal_version.map
index c173ccfdb..93429d2ec 100644
--- a/lib/librte_eal/linuxapp/eal/rte_eal_version.map
+++ b/lib/librte_eal/linuxapp/eal/rte_eal_version.map
@@ -14,7 +14,7 @@ DPDK_2.0 {
        rte_cpu_get_flag_enabled;
        rte_cycles_vmware_tsc_map;
        rte_delay_us;
-       rte_dump_physmem_layout;
+       rte_dump_iovamem_layout;
        rte_dump_registers;
        rte_dump_stack;
        rte_dump_tailq;
@@ -25,8 +25,8 @@ DPDK_2.0 {
        rte_eal_devargs_type_count;
        rte_eal_get_configuration;
        rte_eal_get_lcore_state;
-       rte_eal_get_physmem_layout;
-       rte_eal_get_physmem_size;
+       rte_eal_get_iovamem_layout;
+       rte_eal_get_iovamem_size;
        rte_eal_has_hugepages;
        rte_eal_hpet_init;
        rte_eal_init;
@@ -62,10 +62,10 @@ DPDK_2.0 {
        rte_malloc_set_limit;
        rte_malloc_socket;
        rte_malloc_validate;
-       rte_malloc_virt2phy;
+       rte_malloc_virt2iova;
        rte_mem_lock_page;
-       rte_mem_phy2mch;
-       rte_mem_virt2phy;
+       rte_mem_phy2iova;
+       rte_mem_virt2iova;
        rte_memdump;
        rte_memory_get_nchannel;
        rte_memory_get_nrank;
diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c
index a9b64fffe..f45377f7e 100644
--- a/lib/librte_mempool/rte_mempool.c
+++ b/lib/librte_mempool/rte_mempool.c
@@ -357,7 +357,7 @@ rte_mempool_free_memchunks(struct rte_mempool *mp)
  * on error.
  */
 int
-rte_mempool_populate_phys(struct rte_mempool *mp, char *vaddr,
+rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr,
        iova_addr_t paddr, size_t len, rte_mempool_memchunk_free_cb_t *free_cb,
        void *opaque)
 {
@@ -439,7 +439,7 @@ rte_mempool_populate_phys(struct rte_mempool *mp, char 
*vaddr,
  * number of objects added, or a negative value on error.
  */
 int
-rte_mempool_populate_phys_tab(struct rte_mempool *mp, char *vaddr,
+rte_mempool_populate_iova_tab(struct rte_mempool *mp, char *vaddr,
        const iova_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift,
        rte_mempool_memchunk_free_cb_t *free_cb, void *opaque)
 {
@@ -452,7 +452,7 @@ rte_mempool_populate_phys_tab(struct rte_mempool *mp, char 
*vaddr,
                return -EEXIST;
 
        if (mp->flags & MEMPOOL_F_NO_PHYS_CONTIG)
-               return rte_mempool_populate_phys(mp, vaddr, RTE_BAD_PHYS_ADDR,
+               return rte_mempool_populate_iova(mp, vaddr, RTE_BAD_PHYS_ADDR,
                        pg_num * pg_sz, free_cb, opaque);
 
        for (i = 0; i < pg_num && mp->populated_size < mp->size; i += n) {
@@ -462,7 +462,7 @@ rte_mempool_populate_phys_tab(struct rte_mempool *mp, char 
*vaddr,
                             paddr[i + n - 1] + pg_sz == paddr[i + n]; n++)
                        ;
 
-               ret = rte_mempool_populate_phys(mp, vaddr + i * pg_sz,
+               ret = rte_mempool_populate_iova(mp, vaddr + i * pg_sz,
                        paddr[i], n * pg_sz, free_cb, opaque);
                if (ret < 0) {
                        rte_mempool_free_memchunks(mp);
@@ -497,13 +497,13 @@ rte_mempool_populate_virt(struct rte_mempool *mp, char 
*addr,
                return -EINVAL;
 
        if (mp->flags & MEMPOOL_F_NO_PHYS_CONTIG)
-               return rte_mempool_populate_phys(mp, addr, RTE_BAD_PHYS_ADDR,
+               return rte_mempool_populate_iova(mp, addr, RTE_BAD_PHYS_ADDR,
                        len, free_cb, opaque);
 
        for (off = 0; off + pg_sz <= len &&
                     mp->populated_size < mp->size; off += phys_len) {
 
-               paddr = rte_mem_virt2phy(addr + off);
+               paddr = rte_mem_virt2iova(addr + off);
 
                if (paddr == RTE_BAD_PHYS_ADDR && rte_eal_has_hugepages()) {
                        ret = -EINVAL;
@@ -514,13 +514,13 @@ rte_mempool_populate_virt(struct rte_mempool *mp, char 
*addr,
                for (phys_len = pg_sz; off + phys_len < len; phys_len += pg_sz) 
{
                        iova_addr_t paddr_tmp;
 
-                       paddr_tmp = rte_mem_virt2phy(addr + off + phys_len);
+                       paddr_tmp = rte_mem_virt2iova(addr + off + phys_len);
 
                        if (paddr_tmp != paddr + phys_len)
                                break;
                }
 
-               ret = rte_mempool_populate_phys(mp, addr + off, paddr,
+               ret = rte_mempool_populate_iova(mp, addr + off, paddr,
                        phys_len, free_cb, opaque);
                if (ret < 0)
                        goto fail;
@@ -604,7 +604,7 @@ rte_mempool_populate_default(struct rte_mempool *mp)
                        paddr = mz->phys_addr;
 
                if (rte_eal_has_hugepages())
-                       ret = rte_mempool_populate_phys(mp, mz->addr,
+                       ret = rte_mempool_populate_iova(mp, mz->addr,
                                paddr, mz->len,
                                rte_mempool_memchunk_mz_free,
                                (void *)(uintptr_t)mz);
@@ -990,7 +990,7 @@ rte_mempool_xmem_create(const char *name, unsigned n, 
unsigned elt_size,
        if (mp_init)
                mp_init(mp, mp_init_arg);
 
-       ret = rte_mempool_populate_phys_tab(mp, vaddr, paddr, pg_num, pg_shift,
+       ret = rte_mempool_populate_iova_tab(mp, vaddr, paddr, pg_num, pg_shift,
                NULL, NULL);
        if (ret < 0 || ret != (int)mp->size)
                goto fail;
diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h
index 3139be4f5..de2be4af0 100644
--- a/lib/librte_mempool/rte_mempool.h
+++ b/lib/librte_mempool/rte_mempool.h
@@ -577,7 +577,7 @@ rte_mempool_ops_get_capabilities(const struct rte_mempool 
*mp,
  * @return
  *   - 0: Success;
  *   - -ENOTSUP - doesn't support register_memory_area ops (valid error case).
- *   - Otherwise, rte_mempool_populate_phys fails thus pool create fails.
+ *   - Otherwise, rte_mempool_populate_iova fails thus pool create fails.
  */
 int
 rte_mempool_ops_register_memory_area(const struct rte_mempool *mp,
@@ -894,7 +894,7 @@ rte_mempool_free(struct rte_mempool *mp);
  *   On error, the chunk is not added in the memory list of the
  *   mempool and a negative errno is returned.
  */
-int rte_mempool_populate_phys(struct rte_mempool *mp, char *vaddr,
+int rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr,
        iova_addr_t paddr, size_t len, rte_mempool_memchunk_free_cb_t *free_cb,
        void *opaque);
 
@@ -925,7 +925,7 @@ int rte_mempool_populate_phys(struct rte_mempool *mp, char 
*vaddr,
  *   On error, the chunks are not added in the memory list of the
  *   mempool and a negative errno is returned.
  */
-int rte_mempool_populate_phys_tab(struct rte_mempool *mp, char *vaddr,
+int rte_mempool_populate_iova_tab(struct rte_mempool *mp, char *vaddr,
        const iova_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift,
        rte_mempool_memchunk_free_cb_t *free_cb, void *opaque);
 
diff --git a/lib/librte_mempool/rte_mempool_version.map 
b/lib/librte_mempool/rte_mempool_version.map
index ff86dc9a7..c0cfee71e 100644
--- a/lib/librte_mempool/rte_mempool_version.map
+++ b/lib/librte_mempool/rte_mempool_version.map
@@ -34,8 +34,8 @@ DPDK_16.07 {
        rte_mempool_ops_table;
        rte_mempool_populate_anon;
        rte_mempool_populate_default;
-       rte_mempool_populate_phys;
-       rte_mempool_populate_phys_tab;
+       rte_mempool_populate_iova;
+       rte_mempool_populate_iova_tab;
        rte_mempool_populate_virt;
        rte_mempool_register_ops;
        rte_mempool_set_ops_byname;
diff --git a/lib/librte_vhost/vhost_user.c b/lib/librte_vhost/vhost_user.c
index 76c4eeca5..7702622c2 100644
--- a/lib/librte_vhost/vhost_user.c
+++ b/lib/librte_vhost/vhost_user.c
@@ -516,7 +516,7 @@ add_guest_pages(struct virtio_net *dev, struct 
rte_vhost_mem_region *reg,
        uint64_t host_phys_addr;
        uint64_t size;
 
-       host_phys_addr = rte_mem_virt2phy((void *)(uintptr_t)host_user_addr);
+       host_phys_addr = rte_mem_virt2iova((void *)(uintptr_t)host_user_addr);
        size = page_size - (guest_phys_addr & (page_size - 1));
        size = RTE_MIN(size, reg_size);
 
@@ -527,7 +527,7 @@ add_guest_pages(struct virtio_net *dev, struct 
rte_vhost_mem_region *reg,
 
        while (reg_size > 0) {
                size = RTE_MIN(reg_size, page_size);
-               host_phys_addr = rte_mem_virt2phy((void *)(uintptr_t)
+               host_phys_addr = rte_mem_virt2iova((void *)(uintptr_t)
                                                  host_user_addr);
                add_one_guest_page(dev, guest_phys_addr, host_phys_addr, size);
 
diff --git a/test/test/commands.c b/test/test/commands.c
index 4097a3310..9f5028d41 100644
--- a/test/test/commands.c
+++ b/test/test/commands.c
@@ -147,7 +147,7 @@ static void cmd_dump_parsed(void *parsed_result,
        struct cmd_dump_result *res = parsed_result;
 
        if (!strcmp(res->dump, "dump_physmem"))
-               rte_dump_physmem_layout(stdout);
+               rte_dump_iovamem_layout(stdout);
        else if (!strcmp(res->dump, "dump_memzone"))
                rte_memzone_dump(stdout);
        else if (!strcmp(res->dump, "dump_struct_sizes"))
diff --git a/test/test/test_malloc.c b/test/test/test_malloc.c
index cee6469d8..7146e7a95 100644
--- a/test/test/test_malloc.c
+++ b/test/test/test_malloc.c
@@ -739,7 +739,7 @@ test_malloc_bad_params(void)
 static int
 is_mem_on_socket(int32_t socket)
 {
-       const struct rte_memseg *ms = rte_eal_get_physmem_layout();
+       const struct rte_memseg *ms = rte_eal_get_iovamem_layout();
        unsigned i;
 
        for (i = 0; i < RTE_MAX_MEMSEG; i++) {
@@ -756,7 +756,7 @@ is_mem_on_socket(int32_t socket)
 static int32_t
 addr_to_socket(void * addr)
 {
-       const struct rte_memseg *ms = rte_eal_get_physmem_layout();
+       const struct rte_memseg *ms = rte_eal_get_iovamem_layout();
        unsigned i;
 
        for (i = 0; i < RTE_MAX_MEMSEG; i++) {
diff --git a/test/test/test_memory.c b/test/test/test_memory.c
index 921bdc883..9ab0f52fd 100644
--- a/test/test/test_memory.c
+++ b/test/test/test_memory.c
@@ -64,17 +64,17 @@ test_memory(void)
         * that at least one line is dumped
         */
        printf("Dump memory layout\n");
-       rte_dump_physmem_layout(stdout);
+       rte_dump_iovamem_layout(stdout);
 
        /* check that memory size is != 0 */
-       s = rte_eal_get_physmem_size();
+       s = rte_eal_get_iovamem_size();
        if (s == 0) {
                printf("No memory detected\n");
                return -1;
        }
 
        /* try to read memory (should not segfault) */
-       mem = rte_eal_get_physmem_layout();
+       mem = rte_eal_get_iovamem_layout();
        for (i = 0; i < RTE_MAX_MEMSEG && mem[i].addr != NULL ; i++) {
 
                /* check memory */
diff --git a/test/test/test_mempool.c b/test/test/test_mempool.c
index a225e1209..9b8d2697b 100644
--- a/test/test/test_mempool.c
+++ b/test/test/test_mempool.c
@@ -145,9 +145,9 @@ test_mempool_basic(struct rte_mempool *mp, int 
use_external_cache)
                        MEMPOOL_HEADER_SIZE(mp, mp->cache_size))
                GOTO_ERR(ret, out);
 
-#ifndef RTE_EXEC_ENV_BSDAPP /* rte_mem_virt2phy() not supported on bsd */
+#ifndef RTE_EXEC_ENV_BSDAPP /* rte_mem_virt2iova() not supported on bsd */
        printf("get physical address of an object\n");
-       if (rte_mempool_virt2phy(mp, obj) != rte_mem_virt2phy(obj))
+       if (rte_mempool_virt2phy(mp, obj) != rte_mem_virt2iova(obj))
                GOTO_ERR(ret, out);
 #endif
 
diff --git a/test/test/test_memzone.c b/test/test/test_memzone.c
index 0afb159e9..177bcb73e 100644
--- a/test/test/test_memzone.c
+++ b/test/test/test_memzone.c
@@ -139,7 +139,7 @@ test_memzone_reserve_flags(void)
        int hugepage_16GB_avail = 0;
        const size_t size = 100;
        int i = 0;
-       ms = rte_eal_get_physmem_layout();
+       ms = rte_eal_get_iovamem_layout();
        for (i = 0; i < RTE_MAX_MEMSEG; i++) {
                if (ms[i].hugepage_sz == RTE_PGSIZE_2M)
                        hugepage_2MB_avail = 1;
@@ -422,7 +422,7 @@ test_memzone_reserve_max(void)
        if (mz == NULL){
                printf("Failed to reserve a big chunk of memory - %s\n",
                                rte_strerror(rte_errno));
-               rte_dump_physmem_layout(stdout);
+               rte_dump_iovamem_layout(stdout);
                rte_memzone_dump(stdout);
                return -1;
        }
@@ -430,7 +430,7 @@ test_memzone_reserve_max(void)
        if (mz->len != maxlen) {
                printf("Memzone reserve with 0 size did not return bigest 
block\n");
                printf("Expected size = %zu, actual size = %zu\n", maxlen, 
mz->len);
-               rte_dump_physmem_layout(stdout);
+               rte_dump_iovamem_layout(stdout);
                rte_memzone_dump(stdout);
                return -1;
        }
@@ -459,7 +459,7 @@ test_memzone_reserve_max_aligned(void)
        if (mz == NULL){
                printf("Failed to reserve a big chunk of memory - %s\n",
                                rte_strerror(rte_errno));
-               rte_dump_physmem_layout(stdout);
+               rte_dump_iovamem_layout(stdout);
                rte_memzone_dump(stdout);
                return -1;
        }
@@ -469,7 +469,7 @@ test_memzone_reserve_max_aligned(void)
                                " bigest block\n", align);
                printf("Expected size = %zu, actual size = %zu\n",
                                maxlen, mz->len);
-               rte_dump_physmem_layout(stdout);
+               rte_dump_iovamem_layout(stdout);
                rte_memzone_dump(stdout);
                return -1;
        }
-- 
2.14.1

Reply via email to