During memory initialization calling rte_mem_check_dma_mask
leads to a deadlock because memory_hotplug_lock is locked by a
writer, the current code in execution, and rte_memseg_walk
tries to lock as a reader.

This patch adds a thread_unsafe version which will call the final
function specifying the memory_hotplug_lock does not need to be
acquired. The patch also modified rte_mem_check_dma_mask as a
intermediate step which will call the final function as before,
implying memory_hotplug_lock will be acquired.

PMDs should always use the version acquiring the lock with the
thread_unsafe one being just for internal EAL memory code.

Fixes: 223b7f1d5ef6 ("mem: add function for checking memseg IOVA")

Signed-off-by: Alejandro Lucero <alejandro.luc...@netronome.com>
---
 lib/librte_eal/common/eal_common_memory.c  | 24 +++++++++++++--
 lib/librte_eal/common/include/rte_memory.h | 35 +++++++++++++++++++---
 lib/librte_eal/common/malloc_heap.c        |  2 +-
 lib/librte_eal/linuxapp/eal/eal_memory.c   |  4 +--
 lib/librte_eal/rte_eal_version.map         |  1 +
 5 files changed, 56 insertions(+), 10 deletions(-)

diff --git a/lib/librte_eal/common/eal_common_memory.c 
b/lib/librte_eal/common/eal_common_memory.c
index cc4f1d80f..87fd9921f 100644
--- a/lib/librte_eal/common/eal_common_memory.c
+++ b/lib/librte_eal/common/eal_common_memory.c
@@ -446,11 +446,12 @@ check_iova(const struct rte_memseg_list *msl __rte_unused,
 #endif
 
 /* check memseg iovas are within the required range based on dma mask */
-int __rte_experimental
-rte_mem_check_dma_mask(uint8_t maskbits)
+static int __rte_experimental
+check_dma_mask(uint8_t maskbits, bool thread_unsafe)
 {
        struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
        uint64_t mask;
+       int ret;
 
        /* sanity check */
        if (maskbits > MAX_DMA_MASK_BITS) {
@@ -462,7 +463,12 @@ rte_mem_check_dma_mask(uint8_t maskbits)
        /* create dma mask */
        mask = ~((1ULL << maskbits) - 1);
 
-       if (rte_memseg_walk(check_iova, &mask))
+       if (thread_unsafe)
+               ret = rte_memseg_walk_thread_unsafe(check_iova, &mask);
+       else
+               ret = rte_memseg_walk(check_iova, &mask);
+
+       if (ret)
                /*
                 * Dma mask precludes hugepage usage.
                 * This device can not be used and we do not need to keep
@@ -480,6 +486,18 @@ rte_mem_check_dma_mask(uint8_t maskbits)
        return 0;
 }
 
+int __rte_experimental
+rte_mem_check_dma_mask(uint8_t maskbits)
+{
+       return check_dma_mask(maskbits, false);
+}
+
+int __rte_experimental
+rte_mem_check_dma_mask_thread_unsafe(uint8_t maskbits)
+{
+       return check_dma_mask(maskbits, true);
+}
+
 /*
  * Set dma mask to use when memory initialization is done.
  *
diff --git a/lib/librte_eal/common/include/rte_memory.h 
b/lib/librte_eal/common/include/rte_memory.h
index abbfe2364..d970825df 100644
--- a/lib/librte_eal/common/include/rte_memory.h
+++ b/lib/librte_eal/common/include/rte_memory.h
@@ -463,16 +463,43 @@ unsigned rte_memory_get_nchannel(void);
  */
 unsigned rte_memory_get_nrank(void);
 
-/* check memsegs iovas are within a range based on dma mask */
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Check if all currently allocated memory segments are compliant with
+ * supplied DMA address width.
+ *
+ *  @param maskbits
+ *    Address width to check against.
+ */
 int __rte_experimental rte_mem_check_dma_mask(uint8_t maskbits);
 
 /**
  * @warning
  * @b EXPERIMENTAL: this API may change without prior notice
  *
- *  Set dma mask to use once memory initialization is done.
- *  Previous function rte_mem_check_dma_mask can not be used
- *  safely until memory has been initialized.
+ * Check if all currently allocated memory segments are compliant with
+ * supplied DMA address width. This function will use
+ * rte_memseg_walk_thread_unsafe instead of rte_memseg_walk implying
+ * memory_hotplug_lock will not be acquired avoiding deadlock during
+ * memory initialization.
+ *
+ * This function is just for EAL core memory internal use. Drivers should
+ * use the previous rte_mem_check_dma_mask.
+ *
+ *  @param maskbits
+ *    Address width to check against.
+ */
+int __rte_experimental rte_mem_check_dma_mask_thread_unsafe(uint8_t maskbits);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ *  Set dma mask to use once memory initialization is done. Previous functions
+ *  rte_mem_check_dma_mask and rte_mem_check_dma_mask_thread_unsafe can not be
+ *  used safely until memory has been initialized.
  */
 void __rte_experimental rte_mem_set_dma_mask(uint8_t maskbits);
 
diff --git a/lib/librte_eal/common/malloc_heap.c 
b/lib/librte_eal/common/malloc_heap.c
index c2c112aa6..c6a6d4f6b 100644
--- a/lib/librte_eal/common/malloc_heap.c
+++ b/lib/librte_eal/common/malloc_heap.c
@@ -334,7 +334,7 @@ alloc_pages_on_heap(struct malloc_heap *heap, uint64_t 
pg_sz, size_t elt_size,
         * executed. For 2) implies the new memory can not be added.
         */
        if (mcfg->dma_maskbits &&
-           rte_mem_check_dma_mask(mcfg->dma_maskbits)) {
+           rte_mem_check_dma_mask_thread_unsafe(mcfg->dma_maskbits)) {
                /*
                 * Currently this can only happen if IOMMU is enabled
                 * and the address width supported by the IOMMU hw is
diff --git a/lib/librte_eal/linuxapp/eal/eal_memory.c 
b/lib/librte_eal/linuxapp/eal/eal_memory.c
index c7935879a..c1b5e0791 100644
--- a/lib/librte_eal/linuxapp/eal/eal_memory.c
+++ b/lib/librte_eal/linuxapp/eal/eal_memory.c
@@ -1394,7 +1394,7 @@ eal_legacy_hugepage_init(void)
                        addr = RTE_PTR_ADD(addr, (size_t)page_sz);
                }
                if (mcfg->dma_maskbits &&
-                   rte_mem_check_dma_mask(mcfg->dma_maskbits)) {
+                   rte_mem_check_dma_mask_thread_unsafe(mcfg->dma_maskbits)) {
                        RTE_LOG(ERR, EAL,
                                "%s(): couldnt allocate memory due to IOVA 
exceeding limits of current DMA mask.\n",
                                __func__);
@@ -1641,7 +1641,7 @@ eal_legacy_hugepage_init(void)
        }
 
        if (mcfg->dma_maskbits &&
-           rte_mem_check_dma_mask(mcfg->dma_maskbits)) {
+           rte_mem_check_dma_mask_thread_unsafe(mcfg->dma_maskbits)) {
                RTE_LOG(ERR, EAL,
                        "%s(): couldn't allocate memory due to IOVA exceeding 
limits of current DMA mask.\n",
                        __func__);
diff --git a/lib/librte_eal/rte_eal_version.map 
b/lib/librte_eal/rte_eal_version.map
index 51ee948ba..61bc5ca05 100644
--- a/lib/librte_eal/rte_eal_version.map
+++ b/lib/librte_eal/rte_eal_version.map
@@ -331,6 +331,7 @@ EXPERIMENTAL {
        rte_mem_alloc_validator_register;
        rte_mem_alloc_validator_unregister;
        rte_mem_check_dma_mask;
+       rte_mem_check_dma_mask_thread_unsafe;
        rte_mem_event_callback_register;
        rte_mem_event_callback_unregister;
        rte_mem_iova2virt;
-- 
2.17.1

Reply via email to