Currently, external memory API will silently succeed if the IOVA addresses supplied by the user do not fit into the DMA mask. This can cause difficult to debug issues or accepting failed kernel VFIO DMA mappings.
Fix it so that if the IOVA addresses are provided, they are checked to see if they fit into the DMA mask. Signed-off-by: Anatoly Burakov <anatoly.bura...@intel.com> --- lib/librte_eal/common/rte_malloc.c | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/lib/librte_eal/common/rte_malloc.c b/lib/librte_eal/common/rte_malloc.c index f1b73168bd..0d3a3ef93f 100644 --- a/lib/librte_eal/common/rte_malloc.c +++ b/lib/librte_eal/common/rte_malloc.c @@ -392,6 +392,29 @@ find_named_heap(const char *name) return NULL; } +static int +check_iova_addrs_dma_mask(rte_iova_t iova_addrs[], unsigned int n_pages, + size_t page_sz) +{ + unsigned int i, bits; + rte_iova_t max = 0; + + /* we only care for the biggest address we will get */ + for (i = 0; i < n_pages; i++) { + rte_iova_t first = iova_addrs[i]; + rte_iova_t last = first + page_sz - 1; + max = RTE_MAX(last, max); + } + + bits = rte_fls_u64(max); + if (rte_mem_check_dma_mask(bits) != 0) { + RTE_LOG(ERR, EAL, "IOVA 0x%zx does not fit into the DMA mask\n", + max); + return -1; + } + return 0; +} + int rte_malloc_heap_memory_add(const char *heap_name, void *va_addr, size_t len, rte_iova_t iova_addrs[], unsigned int n_pages, size_t page_sz) @@ -412,6 +435,12 @@ rte_malloc_heap_memory_add(const char *heap_name, void *va_addr, size_t len, rte_errno = EINVAL; return -1; } + /* check if all IOVA's fit into the DMA mask */ + if (iova_addrs != NULL && check_iova_addrs_dma_mask(iova_addrs, + n_pages, page_sz) != 0) { + rte_errno = EINVAL; + return -1; + } rte_mcfg_mem_write_lock(); /* find our heap */ -- 2.17.1