On 17/09/2020 11:41, Thomas Monjalon wrote:
> Remove the deprecated functions
>       - rte_mbuf_data_dma_addr
>       - rte_mbuf_data_dma_addr_default
> which aliased the more recent functions
>       - rte_mbuf_data_iova
>       - rte_mbuf_data_iova_default
> 
> Remove the deprecated macros
>       - rte_pktmbuf_mtophys
>       - rte_pktmbuf_mtophys_offset
> which aliased the more recent macros
>       - rte_pktmbuf_iova
>       - rte_pktmbuf_iova_offset
> 
> Signed-off-by: Thomas Monjalon <tho...@monjalon.net>
> Acked-by: Andrew Rybchenko <arybche...@solarflare.com>
> ---
>  doc/guides/rel_notes/release_20_11.rst        |  5 +++
>  .../fpga_5gnr_fec/rte_fpga_5gnr_fec.c         | 16 +++----
>  drivers/baseband/fpga_lte_fec/fpga_lte_fec.c  | 16 +++----
>  drivers/common/cpt/cpt_ucode.h                | 12 +++---
>  drivers/compress/qat/qat_comp.c               |  4 +-
>  drivers/crypto/ccp/ccp_crypto.c               | 22 +++++-----
>  drivers/crypto/dpaa_sec/dpaa_sec.c            | 42 +++++++++----------
>  drivers/crypto/nitrox/nitrox_sym_reqmgr.c     |  6 +--
>  drivers/crypto/virtio/virtio_rxtx.c           |  6 +--
>  drivers/net/axgbe/axgbe_rxtx.c                |  4 +-
>  drivers/net/octeontx2/otx2_ethdev_sec_tx.h    |  2 +-
>  examples/fips_validation/main.c               |  4 +-
>  lib/librte_mbuf/rte_mbuf.h                    | 21 ----------
>  13 files changed, 72 insertions(+), 88 deletions(-)
> 
> diff --git a/doc/guides/rel_notes/release_20_11.rst 
> b/doc/guides/rel_notes/release_20_11.rst
> index 71f970f1ac..a290b44ae5 100644
> --- a/doc/guides/rel_notes/release_20_11.rst
> +++ b/doc/guides/rel_notes/release_20_11.rst
> @@ -94,6 +94,11 @@ API Changes
>    The flag name ``MEMPOOL_F_NO_PHYS_CONTIG`` is removed,
>    while the aliased flag ``MEMPOOL_F_NO_IOVA_CONTIG`` is kept.
>  
> +* mbuf: Removed the functions ``rte_mbuf_data_dma_addr*``
> +  and the macros ``rte_pktmbuf_mtophys*``.
> +  The same functionality is still available with the functions and macros
> +  having ``iova`` in their names instead of ``dma_addr`` or ``mtophys``.
> +
>  * mbuf: Removed the unioned field ``refcnt_atomic`` from
>    the structures ``rte_mbuf`` and ``rte_mbuf_ext_shared_info``.
>    The field ``refcnt`` is remaining from the old unions.
> diff --git a/drivers/baseband/fpga_5gnr_fec/rte_fpga_5gnr_fec.c 
> b/drivers/baseband/fpga_5gnr_fec/rte_fpga_5gnr_fec.c
> index 930718cd47..61f9c04ba2 100644
> --- a/drivers/baseband/fpga_5gnr_fec/rte_fpga_5gnr_fec.c
> +++ b/drivers/baseband/fpga_5gnr_fec/rte_fpga_5gnr_fec.c
> @@ -950,14 +950,14 @@ fpga_dma_desc_te_fill(struct rte_bbdev_enc_op *op,
>       desc->num_null = op->ldpc_enc.n_filler;
>       /* Set inbound data buffer address */
>       desc->in_addr_hi = (uint32_t)(
> -                     rte_pktmbuf_mtophys_offset(input, in_offset) >> 32);
> +                     rte_pktmbuf_iova_offset(input, in_offset) >> 32);
>       desc->in_addr_lw = (uint32_t)(
> -                     rte_pktmbuf_mtophys_offset(input, in_offset));
> +                     rte_pktmbuf_iova_offset(input, in_offset));
>  
>       desc->out_addr_hi = (uint32_t)(
> -                     rte_pktmbuf_mtophys_offset(output, out_offset) >> 32);
> +                     rte_pktmbuf_iova_offset(output, out_offset) >> 32);
>       desc->out_addr_lw = (uint32_t)(
> -                     rte_pktmbuf_mtophys_offset(output, out_offset));
> +                     rte_pktmbuf_iova_offset(output, out_offset));
>       /* Save software context needed for dequeue */
>       desc->op_addr = op;
>       /* Set total number of CBs in an op */
> @@ -998,9 +998,9 @@ fpga_dma_desc_ld_fill(struct rte_bbdev_dec_op *op,
>       desc->error = 0;
>       /* Set inbound data buffer address */
>       desc->in_addr_hi = (uint32_t)(
> -                     rte_pktmbuf_mtophys_offset(input, in_offset) >> 32);
> +                     rte_pktmbuf_iova_offset(input, in_offset) >> 32);
>       desc->in_addr_lw = (uint32_t)(
> -                     rte_pktmbuf_mtophys_offset(input, in_offset));
> +                     rte_pktmbuf_iova_offset(input, in_offset));
>       desc->rm_e = op->ldpc_dec.cb_params.e;
>       desc->harq_input_length = harq_in_length;
>       desc->et_dis = !check_bit(op->ldpc_dec.op_flags,
> @@ -1021,9 +1021,9 @@ fpga_dma_desc_ld_fill(struct rte_bbdev_dec_op *op,
>       desc->max_iter = op->ldpc_dec.iter_max;
>       desc->qm_idx = op->ldpc_dec.q_m / 2;
>       desc->out_addr_hi = (uint32_t)(
> -                     rte_pktmbuf_mtophys_offset(output, out_offset) >> 32);
> +                     rte_pktmbuf_iova_offset(output, out_offset) >> 32);
>       desc->out_addr_lw = (uint32_t)(
> -                     rte_pktmbuf_mtophys_offset(output, out_offset));
> +                     rte_pktmbuf_iova_offset(output, out_offset));
>       /* Save software context needed for dequeue */
>       desc->op_addr = op;
>       /* Set total number of CBs in an op */
> diff --git a/drivers/baseband/fpga_lte_fec/fpga_lte_fec.c 
> b/drivers/baseband/fpga_lte_fec/fpga_lte_fec.c
> index 6be9131e72..37018b9c7f 100644
> --- a/drivers/baseband/fpga_lte_fec/fpga_lte_fec.c
> +++ b/drivers/baseband/fpga_lte_fec/fpga_lte_fec.c
> @@ -1251,14 +1251,14 @@ fpga_dma_desc_te_fill(struct rte_bbdev_enc_op *op,
>       desc->offset = desc_offset;
>       /* Set inbound data buffer address */
>       desc->in_addr_hi = (uint32_t)(
> -                     rte_pktmbuf_mtophys_offset(input, in_offset) >> 32);
> +                     rte_pktmbuf_iova_offset(input, in_offset) >> 32);
>       desc->in_addr_lw = (uint32_t)(
> -                     rte_pktmbuf_mtophys_offset(input, in_offset));
> +                     rte_pktmbuf_iova_offset(input, in_offset));
>  
>       desc->out_addr_hi = (uint32_t)(
> -                     rte_pktmbuf_mtophys_offset(output, out_offset) >> 32);
> +                     rte_pktmbuf_iova_offset(output, out_offset) >> 32);
>       desc->out_addr_lw = (uint32_t)(
> -                     rte_pktmbuf_mtophys_offset(output, out_offset));
> +                     rte_pktmbuf_iova_offset(output, out_offset));
>  
>       /* Save software context needed for dequeue */
>       desc->op_addr = op;
> @@ -1302,9 +1302,9 @@ fpga_dma_desc_td_fill(struct rte_bbdev_dec_op *op,
>       desc->done = 0;
>       /* Set inbound data buffer address */
>       desc->in_addr_hi = (uint32_t)(
> -                     rte_pktmbuf_mtophys_offset(input, in_offset) >> 32);
> +                     rte_pktmbuf_iova_offset(input, in_offset) >> 32);
>       desc->in_addr_lw = (uint32_t)(
> -                     rte_pktmbuf_mtophys_offset(input, in_offset));
> +                     rte_pktmbuf_iova_offset(input, in_offset));
>       desc->in_len = in_length;
>       desc->k = k;
>       desc->crc_type = !check_bit(op->turbo_dec.op_flags,
> @@ -1316,9 +1316,9 @@ fpga_dma_desc_td_fill(struct rte_bbdev_dec_op *op,
>       desc->max_iter = op->turbo_dec.iter_max * 2;
>       desc->offset = desc_offset;
>       desc->out_addr_hi = (uint32_t)(
> -                     rte_pktmbuf_mtophys_offset(output, out_offset) >> 32);
> +                     rte_pktmbuf_iova_offset(output, out_offset) >> 32);
>       desc->out_addr_lw = (uint32_t)(
> -                     rte_pktmbuf_mtophys_offset(output, out_offset));
> +                     rte_pktmbuf_iova_offset(output, out_offset));
>  
>       /* Save software context needed for dequeue */
>       desc->op_addr = op;
> diff --git a/drivers/common/cpt/cpt_ucode.h b/drivers/common/cpt/cpt_ucode.h
> index 44067cf1b1..4e79fbf7a5 100644
> --- a/drivers/common/cpt/cpt_ucode.h
> +++ b/drivers/common/cpt/cpt_ucode.h
> @@ -2918,7 +2918,7 @@ prepare_iov_from_pkt(struct rte_mbuf *pkt,
>  
>       if (!start_offset) {
>               seg_data = rte_pktmbuf_mtod(pkt, void *);
> -             seg_phys = rte_pktmbuf_mtophys(pkt);
> +             seg_phys = rte_pktmbuf_iova(pkt);
>               seg_size = pkt->data_len;
>       } else {
>               while (start_offset >= pkt->data_len) {
> @@ -2927,7 +2927,7 @@ prepare_iov_from_pkt(struct rte_mbuf *pkt,
>               }
>  
>               seg_data = rte_pktmbuf_mtod_offset(pkt, void *, start_offset);
> -             seg_phys = rte_pktmbuf_mtophys_offset(pkt, start_offset);
> +             seg_phys = rte_pktmbuf_iova_offset(pkt, start_offset);
>               seg_size = pkt->data_len - start_offset;
>               if (!seg_size)
>                       return 1;
> @@ -2942,7 +2942,7 @@ prepare_iov_from_pkt(struct rte_mbuf *pkt,
>  
>       while (unlikely(pkt != NULL)) {
>               seg_data = rte_pktmbuf_mtod(pkt, void *);
> -             seg_phys = rte_pktmbuf_mtophys(pkt);
> +             seg_phys = rte_pktmbuf_iova(pkt);
>               seg_size = pkt->data_len;
>               if (!seg_size)
>                       break;
> @@ -2972,7 +2972,7 @@ prepare_iov_from_pkt_inplace(struct rte_mbuf *pkt,
>       iov_ptr_t *iovec;
>  
>       seg_data = rte_pktmbuf_mtod(pkt, void *);
> -     seg_phys = rte_pktmbuf_mtophys(pkt);
> +     seg_phys = rte_pktmbuf_iova(pkt);
>       seg_size = pkt->data_len;
>  
>       /* first seg */
> @@ -3001,7 +3001,7 @@ prepare_iov_from_pkt_inplace(struct rte_mbuf *pkt,
>  
>       while (unlikely(pkt != NULL)) {
>               seg_data = rte_pktmbuf_mtod(pkt, void *);
> -             seg_phys = rte_pktmbuf_mtophys(pkt);
> +             seg_phys = rte_pktmbuf_iova(pkt);
>               seg_size = pkt->data_len;
>  
>               if (!seg_size)
> @@ -3463,7 +3463,7 @@ fill_digest_params(struct rte_crypto_op *cop,
>                       params.mac_buf.vaddr =
>                               rte_pktmbuf_mtod_offset(m_dst, void *, off);
>                       params.mac_buf.dma_addr =
> -                             rte_pktmbuf_mtophys_offset(m_dst, off);
> +                             rte_pktmbuf_iova_offset(m_dst, off);
>                       params.mac_buf.size = mac_len;
>               }
>       } else {
> diff --git a/drivers/compress/qat/qat_comp.c b/drivers/compress/qat/qat_comp.c
> index 335fd63b98..3a064ec3b2 100644
> --- a/drivers/compress/qat/qat_comp.c
> +++ b/drivers/compress/qat/qat_comp.c
> @@ -305,9 +305,9 @@ qat_comp_build_request(void *in_op, uint8_t *out_msg,
>                               comp_req->comp_pars.out_buffer_sz;
>  
>               comp_req->comn_mid.src_data_addr =
> -                 rte_pktmbuf_mtophys_offset(op->m_src, op->src.offset);
> +                 rte_pktmbuf_iova_offset(op->m_src, op->src.offset);
>               comp_req->comn_mid.dest_data_addr =
> -                 rte_pktmbuf_mtophys_offset(op->m_dst, op->dst.offset);
> +                 rte_pktmbuf_iova_offset(op->m_dst, op->dst.offset);
>       }
>  
>       if (unlikely(rte_pktmbuf_pkt_len(op->m_dst) < QAT_MIN_OUT_BUF_SIZE)) {
> diff --git a/drivers/crypto/ccp/ccp_crypto.c b/drivers/crypto/ccp/ccp_crypto.c
> index 4256734d16..db3fb6eff8 100644
> --- a/drivers/crypto/ccp/ccp_crypto.c
> +++ b/drivers/crypto/ccp/ccp_crypto.c
> @@ -1571,7 +1571,7 @@ ccp_perform_hmac(struct rte_crypto_op *op,
>                                        ccp_cryptodev_driver_id);
>       addr = session->auth.pre_compute;
>  
> -     src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
> +     src_addr = rte_pktmbuf_iova_offset(op->sym->m_src,
>                                             op->sym->auth.data.offset);
>       append_ptr = (void *)rte_pktmbuf_append(op->sym->m_src,
>                                               session->auth.ctx_len);
> @@ -1743,7 +1743,7 @@ ccp_perform_sha(struct rte_crypto_op *op,
>                                        op->sym->session,
>                                       ccp_cryptodev_driver_id);
>  
> -     src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
> +     src_addr = rte_pktmbuf_iova_offset(op->sym->m_src,
>                                             op->sym->auth.data.offset);
>  
>       append_ptr = (void *)rte_pktmbuf_append(op->sym->m_src,
> @@ -1832,7 +1832,7 @@ ccp_perform_sha3_hmac(struct rte_crypto_op *op,
>                                        op->sym->session,
>                                       ccp_cryptodev_driver_id);
>  
> -     src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
> +     src_addr = rte_pktmbuf_iova_offset(op->sym->m_src,
>                                             op->sym->auth.data.offset);
>       append_ptr = (uint8_t *)rte_pktmbuf_append(op->sym->m_src,
>                                               session->auth.ctx_len);
> @@ -1972,7 +1972,7 @@ ccp_perform_sha3(struct rte_crypto_op *op,
>                                        op->sym->session,
>                                       ccp_cryptodev_driver_id);
>  
> -     src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
> +     src_addr = rte_pktmbuf_iova_offset(op->sym->m_src,
>                                             op->sym->auth.data.offset);
>       append_ptr = (uint8_t *)rte_pktmbuf_append(op->sym->m_src,
>                                               session->auth.ctx_len);
> @@ -2041,7 +2041,7 @@ ccp_perform_aes_cmac(struct rte_crypto_op *op,
>                                       ccp_cryptodev_driver_id);
>       key_addr = rte_mem_virt2phy(session->auth.key_ccp);
>  
> -     src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
> +     src_addr = rte_pktmbuf_iova_offset(op->sym->m_src,
>                                             op->sym->auth.data.offset);
>       append_ptr = (uint8_t *)rte_pktmbuf_append(op->sym->m_src,
>                                               session->auth.ctx_len);
> @@ -2221,10 +2221,10 @@ ccp_perform_aes(struct rte_crypto_op *op,
>  
>       desc = &cmd_q->qbase_desc[cmd_q->qidx];
>  
> -     src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
> +     src_addr = rte_pktmbuf_iova_offset(op->sym->m_src,
>                                             op->sym->cipher.data.offset);
>       if (likely(op->sym->m_dst != NULL))
> -             dest_addr = rte_pktmbuf_mtophys_offset(op->sym->m_dst,
> +             dest_addr = rte_pktmbuf_iova_offset(op->sym->m_dst,
>                                               op->sym->cipher.data.offset);
>       else
>               dest_addr = src_addr;
> @@ -2303,11 +2303,11 @@ ccp_perform_3des(struct rte_crypto_op *op,
>               return -ENOTSUP;
>       }
>  
> -     src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
> +     src_addr = rte_pktmbuf_iova_offset(op->sym->m_src,
>                                             op->sym->cipher.data.offset);
>       if (unlikely(op->sym->m_dst != NULL))
>               dest_addr =
> -                     rte_pktmbuf_mtophys_offset(op->sym->m_dst,
> +                     rte_pktmbuf_iova_offset(op->sym->m_dst,
>                                                  op->sym->cipher.data.offset);
>       else
>               dest_addr = src_addr;
> @@ -2385,10 +2385,10 @@ ccp_perform_aes_gcm(struct rte_crypto_op *op, struct 
> ccp_queue *cmd_q)
>       iv = rte_crypto_op_ctod_offset(op, uint8_t *, session->iv.offset);
>       key_addr = session->cipher.key_phys;
>  
> -     src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
> +     src_addr = rte_pktmbuf_iova_offset(op->sym->m_src,
>                                             op->sym->aead.data.offset);
>       if (unlikely(op->sym->m_dst != NULL))
> -             dest_addr = rte_pktmbuf_mtophys_offset(op->sym->m_dst,
> +             dest_addr = rte_pktmbuf_iova_offset(op->sym->m_dst,
>                                               op->sym->aead.data.offset);
>       else
>               dest_addr = src_addr;
> diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.c 
> b/drivers/crypto/dpaa_sec/dpaa_sec.c
> index c4339336de..97002170b0 100644
> --- a/drivers/crypto/dpaa_sec/dpaa_sec.c
> +++ b/drivers/crypto/dpaa_sec/dpaa_sec.c
> @@ -748,7 +748,7 @@ build_auth_only_sg(struct rte_crypto_op *op, 
> dpaa_sec_session *ses)
>               sg++;
>       }
>  
> -     qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
> +     qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
>       sg->offset = data_offset;
>  
>       if (data_len <= (mbuf->data_len - data_offset)) {
> @@ -761,7 +761,7 @@ build_auth_only_sg(struct rte_crypto_op *op, 
> dpaa_sec_session *ses)
>                      (mbuf = mbuf->next)) {
>                       cpu_to_hw_sg(sg);
>                       sg++;
> -                     qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
> +                     qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
>                       if (data_len > mbuf->data_len)
>                               sg->length = mbuf->data_len;
>                       else
> @@ -866,7 +866,7 @@ build_auth_only(struct rte_crypto_op *op, 
> dpaa_sec_session *ses)
>               sg++;
>       }
>  
> -     qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
> +     qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
>       sg->offset = data_offset;
>       sg->length = data_len;
>  
> @@ -946,7 +946,7 @@ build_cipher_only_sg(struct rte_crypto_op *op, 
> dpaa_sec_session *ses)
>  
>       /* 1st seg */
>       sg = &cf->sg[2];
> -     qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
> +     qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
>       sg->length = mbuf->data_len - data_offset;
>       sg->offset = data_offset;
>  
> @@ -955,7 +955,7 @@ build_cipher_only_sg(struct rte_crypto_op *op, 
> dpaa_sec_session *ses)
>       while (mbuf) {
>               cpu_to_hw_sg(sg);
>               sg++;
> -             qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
> +             qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
>               sg->length = mbuf->data_len;
>               mbuf = mbuf->next;
>       }
> @@ -980,7 +980,7 @@ build_cipher_only_sg(struct rte_crypto_op *op, 
> dpaa_sec_session *ses)
>  
>       /* 1st seg */
>       sg++;
> -     qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
> +     qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
>       sg->length = mbuf->data_len - data_offset;
>       sg->offset = data_offset;
>  
> @@ -989,7 +989,7 @@ build_cipher_only_sg(struct rte_crypto_op *op, 
> dpaa_sec_session *ses)
>       while (mbuf) {
>               cpu_to_hw_sg(sg);
>               sg++;
> -             qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
> +             qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
>               sg->length = mbuf->data_len;
>               mbuf = mbuf->next;
>       }
> @@ -1121,7 +1121,7 @@ build_cipher_auth_gcm_sg(struct rte_crypto_op *op, 
> dpaa_sec_session *ses)
>       cpu_to_hw_sg(out_sg);
>  
>       /* 1st seg */
> -     qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
> +     qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
>       sg->length = mbuf->data_len - sym->aead.data.offset;
>       sg->offset = sym->aead.data.offset;
>  
> @@ -1130,7 +1130,7 @@ build_cipher_auth_gcm_sg(struct rte_crypto_op *op, 
> dpaa_sec_session *ses)
>       while (mbuf) {
>               cpu_to_hw_sg(sg);
>               sg++;
> -             qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
> +             qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
>               sg->length = mbuf->data_len;
>               mbuf = mbuf->next;
>       }
> @@ -1178,7 +1178,7 @@ build_cipher_auth_gcm_sg(struct rte_crypto_op *op, 
> dpaa_sec_session *ses)
>  
>       /* 3rd seg */
>       sg++;
> -     qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
> +     qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
>       sg->length = mbuf->data_len - sym->aead.data.offset;
>       sg->offset = sym->aead.data.offset;
>  
> @@ -1187,7 +1187,7 @@ build_cipher_auth_gcm_sg(struct rte_crypto_op *op, 
> dpaa_sec_session *ses)
>       while (mbuf) {
>               cpu_to_hw_sg(sg);
>               sg++;
> -             qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
> +             qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
>               sg->length = mbuf->data_len;
>               mbuf = mbuf->next;
>       }
> @@ -1367,7 +1367,7 @@ build_cipher_auth_sg(struct rte_crypto_op *op, 
> dpaa_sec_session *ses)
>       cpu_to_hw_sg(out_sg);
>  
>       /* 1st seg */
> -     qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
> +     qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
>       sg->length = mbuf->data_len - sym->auth.data.offset;
>       sg->offset = sym->auth.data.offset;
>  
> @@ -1376,7 +1376,7 @@ build_cipher_auth_sg(struct rte_crypto_op *op, 
> dpaa_sec_session *ses)
>       while (mbuf) {
>               cpu_to_hw_sg(sg);
>               sg++;
> -             qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
> +             qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
>               sg->length = mbuf->data_len;
>               mbuf = mbuf->next;
>       }
> @@ -1415,7 +1415,7 @@ build_cipher_auth_sg(struct rte_crypto_op *op, 
> dpaa_sec_session *ses)
>  
>       /* 2nd seg */
>       sg++;
> -     qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
> +     qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
>       sg->length = mbuf->data_len - sym->auth.data.offset;
>       sg->offset = sym->auth.data.offset;
>  
> @@ -1424,7 +1424,7 @@ build_cipher_auth_sg(struct rte_crypto_op *op, 
> dpaa_sec_session *ses)
>       while (mbuf) {
>               cpu_to_hw_sg(sg);
>               sg++;
> -             qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
> +             qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
>               sg->length = mbuf->data_len;
>               mbuf = mbuf->next;
>       }
> @@ -1555,10 +1555,10 @@ build_proto(struct rte_crypto_op *op, 
> dpaa_sec_session *ses)
>       cf = &ctx->job;
>       ctx->op = op;
>  
> -     src_start_addr = rte_pktmbuf_mtophys(sym->m_src);
> +     src_start_addr = rte_pktmbuf_iova(sym->m_src);
>  
>       if (sym->m_dst)
> -             dst_start_addr = rte_pktmbuf_mtophys(sym->m_dst);
> +             dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
>       else
>               dst_start_addr = src_start_addr;
>  
> @@ -1614,7 +1614,7 @@ build_proto_sg(struct rte_crypto_op *op, 
> dpaa_sec_session *ses)
>  
>       /* 1st seg */
>       sg = &cf->sg[2];
> -     qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
> +     qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
>       sg->offset = 0;
>  
>       /* Successive segs */
> @@ -1624,7 +1624,7 @@ build_proto_sg(struct rte_crypto_op *op, 
> dpaa_sec_session *ses)
>               mbuf = mbuf->next;
>               cpu_to_hw_sg(sg);
>               sg++;
> -             qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
> +             qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
>               sg->offset = 0;
>       }
>       sg->length = mbuf->buf_len - mbuf->data_off;
> @@ -1646,7 +1646,7 @@ build_proto_sg(struct rte_crypto_op *op, 
> dpaa_sec_session *ses)
>       qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
>  
>       /* 1st seg */
> -     qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
> +     qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
>       sg->length = mbuf->data_len;
>       sg->offset = 0;
>  
> @@ -1655,7 +1655,7 @@ build_proto_sg(struct rte_crypto_op *op, 
> dpaa_sec_session *ses)
>       while (mbuf) {
>               cpu_to_hw_sg(sg);
>               sg++;
> -             qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
> +             qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
>               sg->length = mbuf->data_len;
>               sg->offset = 0;
>               in_len += sg->length;
> diff --git a/drivers/crypto/nitrox/nitrox_sym_reqmgr.c 
> b/drivers/crypto/nitrox/nitrox_sym_reqmgr.c
> index d9b4267764..4492247801 100644
> --- a/drivers/crypto/nitrox/nitrox_sym_reqmgr.c
> +++ b/drivers/crypto/nitrox/nitrox_sym_reqmgr.c
> @@ -269,7 +269,7 @@ extract_cipher_auth_digest(struct nitrox_softreq *sr,
>              op->sym->auth.data.length + digest->len))
>               return -EINVAL;
>  
> -     digest->iova = rte_pktmbuf_mtophys_offset(mdst,
> +     digest->iova = rte_pktmbuf_iova_offset(mdst,
>                                       op->sym->auth.data.offset +
>                                       op->sym->auth.data.length);
>       digest->virt = rte_pktmbuf_mtod_offset(mdst, uint8_t *,
> @@ -318,7 +318,7 @@ create_sglist_from_mbuf(struct nitrox_sgtable *sgtbl, 
> struct rte_mbuf *mbuf,
>       if (datalen <= mlen)
>               mlen = datalen;
>       sglist[cnt].len = mlen;
> -     sglist[cnt].iova = rte_pktmbuf_mtophys_offset(m, off);
> +     sglist[cnt].iova = rte_pktmbuf_iova_offset(m, off);
>       sglist[cnt].virt = rte_pktmbuf_mtod_offset(m, uint8_t *, off);
>       sgtbl->total_bytes += mlen;
>       cnt++;
> @@ -327,7 +327,7 @@ create_sglist_from_mbuf(struct nitrox_sgtable *sgtbl, 
> struct rte_mbuf *mbuf,
>               mlen = rte_pktmbuf_data_len(m) < datalen ?
>                       rte_pktmbuf_data_len(m) : datalen;
>               sglist[cnt].len = mlen;
> -             sglist[cnt].iova = rte_pktmbuf_mtophys(m);
> +             sglist[cnt].iova = rte_pktmbuf_iova(m);
>               sglist[cnt].virt = rte_pktmbuf_mtod(m, uint8_t *);
>               sgtbl->total_bytes += mlen;
>               cnt++;
> diff --git a/drivers/crypto/virtio/virtio_rxtx.c 
> b/drivers/crypto/virtio/virtio_rxtx.c
> index e9a63cb5a0..e1cb4ad104 100644
> --- a/drivers/crypto/virtio/virtio_rxtx.c
> +++ b/drivers/crypto/virtio/virtio_rxtx.c
> @@ -284,18 +284,18 @@ virtqueue_crypto_sym_enqueue_xmit(
>       }
>  
>       /* indirect vring: src data */
> -     desc[idx].addr = rte_pktmbuf_mtophys_offset(sym_op->m_src, 0);
> +     desc[idx].addr = rte_pktmbuf_iova_offset(sym_op->m_src, 0);
>       desc[idx].len = (sym_op->cipher.data.offset
>               + sym_op->cipher.data.length);
>       desc[idx++].flags = VRING_DESC_F_NEXT;
>  
>       /* indirect vring: dst data */
>       if (sym_op->m_dst) {
> -             desc[idx].addr = rte_pktmbuf_mtophys_offset(sym_op->m_dst, 0);
> +             desc[idx].addr = rte_pktmbuf_iova_offset(sym_op->m_dst, 0);
>               desc[idx].len = (sym_op->cipher.data.offset
>                       + sym_op->cipher.data.length);
>       } else {
> -             desc[idx].addr = rte_pktmbuf_mtophys_offset(sym_op->m_src, 0);
> +             desc[idx].addr = rte_pktmbuf_iova_offset(sym_op->m_src, 0);
>               desc[idx].len = (sym_op->cipher.data.offset
>                       + sym_op->cipher.data.length);
>       }
> diff --git a/drivers/net/axgbe/axgbe_rxtx.c b/drivers/net/axgbe/axgbe_rxtx.c
> index 30c467db71..da3a9822b8 100644
> --- a/drivers/net/axgbe/axgbe_rxtx.c
> +++ b/drivers/net/axgbe/axgbe_rxtx.c
> @@ -95,7 +95,7 @@ int axgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, 
> uint16_t queue_idx,
>               axgbe_rx_queue_release(rxq);
>               return -ENOMEM;
>       }
> -     rxq->ring_phys_addr = (uint64_t)dma->phys_addr;
> +     rxq->ring_phys_addr = (uint64_t)dma->iova;
>       rxq->desc = (volatile union axgbe_rx_desc *)dma->addr;
>       memset((void *)rxq->desc, 0, size);
>       /* Allocate software ring */
> @@ -530,7 +530,7 @@ int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, 
> uint16_t queue_idx,
>               return -ENOMEM;
>       }
>       memset(tz->addr, 0, tsize);
> -     txq->ring_phys_addr = (uint64_t)tz->phys_addr;
> +     txq->ring_phys_addr = (uint64_t)tz->iova;
>       txq->desc = tz->addr;
>       txq->queue_id = queue_idx;
>       txq->port_id = dev->data->port_id;
> diff --git a/drivers/net/octeontx2/otx2_ethdev_sec_tx.h 
> b/drivers/net/octeontx2/otx2_ethdev_sec_tx.h
> index f8130ca624..15122b4154 100644
> --- a/drivers/net/octeontx2/otx2_ethdev_sec_tx.h
> +++ b/drivers/net/octeontx2/otx2_ethdev_sec_tx.h
> @@ -87,7 +87,7 @@ otx2_sec_event_tx(struct otx2_ssogws *ws, struct rte_event 
> *ev,
>        */
>       rte_pktmbuf_append(m, extend_tail);
>       data = rte_pktmbuf_prepend(m, extend_head);
> -     data_addr = rte_pktmbuf_mtophys(m);
> +     data_addr = rte_pktmbuf_iova(m);
>  
>       /*
>        * Move the Ethernet header, to insert otx2_ipsec_fp_out_hdr prior
> diff --git a/examples/fips_validation/main.c b/examples/fips_validation/main.c
> index efd32a86a5..0a1c8b568c 100644
> --- a/examples/fips_validation/main.c
> +++ b/examples/fips_validation/main.c
> @@ -531,7 +531,7 @@ prepare_auth_op(void)
>  
>       sym->auth.data.length = vec.pt.len;
>       sym->auth.digest.data = pt + vec.pt.len;
> -     sym->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
> +     sym->auth.digest.phys_addr = rte_pktmbuf_iova_offset(
>                       env.mbuf, vec.pt.len);
>  
>       memcpy(pt, vec.pt.val, vec.pt.len);
> @@ -584,7 +584,7 @@ prepare_aead_op(void)
>               memcpy(pt, vec.pt.val, vec.pt.len);
>               sym->aead.data.length = vec.pt.len;
>               sym->aead.digest.data = pt + vec.pt.len;
> -             sym->aead.digest.phys_addr = rte_pktmbuf_mtophys_offset(
> +             sym->aead.digest.phys_addr = rte_pktmbuf_iova_offset(
>                               env.mbuf, vec.pt.len);
>       } else {
>               uint8_t *ct;
> diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h
> index 7259575a77..a1414ed7cd 100644
> --- a/lib/librte_mbuf/rte_mbuf.h
> +++ b/lib/librte_mbuf/rte_mbuf.h
> @@ -151,13 +151,6 @@ rte_mbuf_data_iova(const struct rte_mbuf *mb)
>       return mb->buf_iova + mb->data_off;
>  }
>  
> -__rte_deprecated
> -static inline phys_addr_t
> -rte_mbuf_data_dma_addr(const struct rte_mbuf *mb)
> -{
> -     return rte_mbuf_data_iova(mb);
> -}
> -
>  /**
>   * Return the default IO address of the beginning of the mbuf data
>   *
> @@ -176,13 +169,6 @@ rte_mbuf_data_iova_default(const struct rte_mbuf *mb)
>       return mb->buf_iova + RTE_PKTMBUF_HEADROOM;
>  }
>  
> -__rte_deprecated
> -static inline phys_addr_t
> -rte_mbuf_data_dma_addr_default(const struct rte_mbuf *mb)
> -{
> -     return rte_mbuf_data_iova_default(mb);
> -}
> -
>  /**
>   * Return the mbuf owning the data buffer address of an indirect mbuf.
>   *
> @@ -1537,13 +1523,6 @@ static inline struct rte_mbuf 
> *rte_pktmbuf_lastseg(struct rte_mbuf *m)
>       return m;
>  }
>  
> -/* deprecated */
> -#define rte_pktmbuf_mtophys_offset(m, o) \
> -     rte_pktmbuf_iova_offset(m, o)
> -
> -/* deprecated */
> -#define rte_pktmbuf_mtophys(m) rte_pktmbuf_iova(m)
> -
>  /**
>   * A macro that returns the length of the packet.
>   *
> 
Acked-by: Ray Kinsella <m...@ashroe.eu>

Reply via email to