27/04/2018 19:20, Shreyansh Jain: > --- a/drivers/bus/dpaa/rte_dpaa_bus.h > +++ b/drivers/bus/dpaa/rte_dpaa_bus.h > @@ -95,9 +95,34 @@ struct dpaa_portal { > uint64_t tid;/**< Parent Thread id for this portal */ > }; > > -/* TODO - this is costly, need to write a fast coversion routine */ > +/* Various structures representing contiguous memory maps */ > +struct dpaa_memseg { > + TAILQ_ENTRY(dpaa_memseg) next; > + char *vaddr; > + rte_iova_t iova; > + size_t len; > +}; > + > +TAILQ_HEAD(dpaa_memseg_list, dpaa_memseg); > +extern struct dpaa_memseg_list dpaa_memsegs;
Same as for DPAA2, fixes are required: --- a/drivers/bus/dpaa/rte_dpaa_bus.h +++ b/drivers/bus/dpaa/rte_dpaa_bus.h @@ -104,7 +104,7 @@ struct dpaa_memseg { }; TAILQ_HEAD(dpaa_memseg_list, dpaa_memseg); -extern struct dpaa_memseg_list dpaa_memsegs; +extern struct dpaa_memseg_list rte_dpaa_memsegs; /* Either iterate over the list of internal memseg references or fallback to * EAL memseg based iova2virt. @@ -116,10 +116,10 @@ static inline void *rte_dpaa_mem_ptov(phys_addr_t paddr) /* Check if the address is already part of the memseg list internally * maintained by the dpaa driver. */ - TAILQ_FOREACH(ms, &dpaa_memsegs, next) { + TAILQ_FOREACH(ms, &rte_dpaa_memsegs, next) { if (paddr >= ms->iova && paddr < ms->iova + ms->len) - return RTE_PTR_ADD(ms->vaddr, (paddr - ms->iova)); + return RTE_PTR_ADD(ms->vaddr, (uintptr_t)(paddr - ms->iova)); } /* If not, Fallback to full memseg list searching */ --- a/drivers/mempool/dpaa/dpaa_mempool.c +++ b/drivers/mempool/dpaa/dpaa_mempool.c @@ -31,8 +31,8 @@ * is to optimize the PA_to_VA searches until a better mechanism (algo) is * available. */ -struct dpaa_memseg_list dpaa_memsegs - = TAILQ_HEAD_INITIALIZER(dpaa_memsegs); +struct dpaa_memseg_list rte_dpaa_memsegs + = TAILQ_HEAD_INITIALIZER(rte_dpaa_memsegs); struct dpaa_bp_info rte_dpaa_bpid_info[DPAA_MAX_BPOOLS]; @@ -318,7 +318,7 @@ dpaa_populate(struct rte_mempool *mp, unsigned int max_objs, /* Head insertions are generally faster than tail insertions as the * buffers pinned are picked from rear end. */ - TAILQ_INSERT_HEAD(&dpaa_memsegs, ms, next); + TAILQ_INSERT_HEAD(&rte_dpaa_memsegs, ms, next); return rte_mempool_op_populate_default(mp, max_objs, vaddr, paddr, len, obj_cb, obj_cb_arg); --- a/drivers/mempool/dpaa/rte_mempool_dpaa_version.map +++ b/drivers/mempool/dpaa/rte_mempool_dpaa_version.map @@ -2,6 +2,7 @@ DPDK_17.11 { global: rte_dpaa_bpid_info; + rte_dpaa_memsegs; local: *; };