[PATCH] mem: allow using ASan in multi-process mode

2023-10-04 Thread Artur Paszkiewicz
Multi-process applications operate on shared hugepage memory but each
process has its own ASan shadow region which is not synchronized with
the other processes. This causes issues when different processes try to
use the same memory because they have their own view of which addresses
are valid.

Fix it by mapping the shadow regions for memseg lists as shared memory.
The primary process is responsible for creating and removing the shared
memory objects.

Disable ASan instrumentation for triggering the page fault in
alloc_seg() because if the segment is already allocated by another
process and is marked as free in the shadow, accessing this address will
cause an ASan error.

Signed-off-by: Artur Paszkiewicz 
---
 lib/eal/common/eal_common_memory.c | 10 
 lib/eal/common/eal_private.h   | 22 
 lib/eal/linux/eal_memalloc.c   |  9 +++-
 lib/eal/linux/eal_memory.c | 87 ++
 lib/eal/linux/meson.build  |  4 ++
 5 files changed, 131 insertions(+), 1 deletion(-)

diff --git a/lib/eal/common/eal_common_memory.c 
b/lib/eal/common/eal_common_memory.c
index d9433db623..2c15d5fc90 100644
--- a/lib/eal/common/eal_common_memory.c
+++ b/lib/eal/common/eal_common_memory.c
@@ -263,6 +263,12 @@ eal_memseg_list_alloc(struct rte_memseg_list *msl, int 
reserve_flags)
RTE_LOG(DEBUG, EAL, "VA reserved for memseg list at %p, size %zx\n",
addr, mem_sz);
 
+#ifdef RTE_MALLOC_ASAN
+   if (eal_memseg_list_map_asan_shadow(msl) != 0) {
+   RTE_LOG(ERR, EAL, "Failed to map ASan shadow region for memseg 
list");
+   return -1;
+   }
+#endif
return 0;
 }
 
@@ -1050,6 +1056,10 @@ rte_eal_memory_detach(void)
RTE_LOG(ERR, EAL, "Could not unmap memory: 
%s\n",
rte_strerror(rte_errno));
 
+#ifdef RTE_MALLOC_ASAN
+   eal_memseg_list_unmap_asan_shadow(msl);
+#endif
+
/*
 * we are detaching the fbarray rather than destroying because
 * other processes might still reference this fbarray, and we
diff --git a/lib/eal/common/eal_private.h b/lib/eal/common/eal_private.h
index 5eadba4902..48df338cf9 100644
--- a/lib/eal/common/eal_private.h
+++ b/lib/eal/common/eal_private.h
@@ -300,6 +300,28 @@ eal_memseg_list_alloc(struct rte_memseg_list *msl, int 
reserve_flags);
 void
 eal_memseg_list_populate(struct rte_memseg_list *msl, void *addr, int n_segs);
 
+#ifdef RTE_MALLOC_ASAN
+/**
+ * Map shared memory for MSL ASan shadow region.
+ *
+ * @param msl
+ *  Memory segment list.
+ * @return
+ *  0 on success, (-1) on failure.
+ */
+int
+eal_memseg_list_map_asan_shadow(struct rte_memseg_list *msl);
+
+/**
+ * Unmap the MSL ASan shadow region.
+ *
+ * @param msl
+ *  Memory segment list.
+ */
+void
+eal_memseg_list_unmap_asan_shadow(struct rte_memseg_list *msl);
+#endif
+
 /**
  * Distribute available memory between MSLs.
  *
diff --git a/lib/eal/linux/eal_memalloc.c b/lib/eal/linux/eal_memalloc.c
index f8b1588cae..5212ae6b56 100644
--- a/lib/eal/linux/eal_memalloc.c
+++ b/lib/eal/linux/eal_memalloc.c
@@ -511,6 +511,13 @@ resize_hugefile(int fd, uint64_t fa_offset, uint64_t 
page_sz, bool grow,
grow, dirty);
 }
 
+__rte_no_asan
+static inline void
+page_fault(void *addr)
+{
+   *(volatile int *)addr = *(volatile int *)addr;
+}
+
 static int
 alloc_seg(struct rte_memseg *ms, void *addr, int socket_id,
struct hugepage_info *hi, unsigned int list_idx,
@@ -641,7 +648,7 @@ alloc_seg(struct rte_memseg *ms, void *addr, int socket_id,
 * that is already there, so read the old value, and write itback.
 * kernel populates the page with zeroes initially.
 */
-   *(volatile int *)addr = *(volatile int *)addr;
+   page_fault(addr);
 
iova = rte_mem_virt2iova(addr);
if (iova == RTE_BAD_PHYS_ADDR) {
diff --git a/lib/eal/linux/eal_memory.c b/lib/eal/linux/eal_memory.c
index 9b6f08fba8..aabc5a68b3 100644
--- a/lib/eal/linux/eal_memory.c
+++ b/lib/eal/linux/eal_memory.c
@@ -41,6 +41,7 @@
 #include "eal_filesystem.h"
 #include "eal_hugepages.h"
 #include "eal_options.h"
+#include "malloc_elem.h"
 
 #define PFN_MASK_SIZE  8
 
@@ -1956,3 +1957,89 @@ rte_eal_memseg_init(void)
 #endif
memseg_secondary_init();
 }
+
+#ifdef RTE_MALLOC_ASAN
+int
+eal_memseg_list_map_asan_shadow(struct rte_memseg_list *msl)
+{
+   const struct internal_config *internal_conf =
+   eal_get_internal_configuration();
+   void *addr = msl->base_va;
+   void *shadow_addr = ASAN_MEM_TO_SHADOW(addr);
+   size_t shadow_sz = msl->len >> ASAN_SHADOW_SCALE;
+   int shm_oflag = O_RDWR;
+   char shm_path[PATH_MAX];
+   int shm_fd;
+   int ret = 0;
+
+   if (!msl->heap)
+ 

[PATCH v2] mem: allow using ASan in multi-process mode

2023-10-09 Thread Artur Paszkiewicz
Multi-process applications operate on shared hugepage memory but each
process has its own ASan shadow region which is not synchronized with
the other processes. This causes issues when different processes try to
use the same memory because they have their own view of which addresses
are valid.

Fix it by mapping the shadow regions for memseg lists as shared memory.
The primary process is responsible for creating and removing the shared
memory objects.

Disable ASan instrumentation for triggering the page fault in
alloc_seg() because if the segment is already allocated by another
process and is marked as free in the shadow, accessing this address will
cause an ASan error.

Signed-off-by: Artur Paszkiewicz 
---
v2:
- Added checks for config options disabling multi-process support.
- Fixed missing unmap in legacy mode.

 lib/eal/common/eal_common_memory.c |  9 +++
 lib/eal/common/eal_private.h   | 22 +++
 lib/eal/linux/eal_memalloc.c   |  9 ++-
 lib/eal/linux/eal_memory.c | 97 ++
 lib/eal/linux/meson.build  |  4 ++
 5 files changed, 140 insertions(+), 1 deletion(-)

diff --git a/lib/eal/common/eal_common_memory.c 
b/lib/eal/common/eal_common_memory.c
index d9433db623..15f950810b 100644
--- a/lib/eal/common/eal_common_memory.c
+++ b/lib/eal/common/eal_common_memory.c
@@ -263,6 +263,12 @@ eal_memseg_list_alloc(struct rte_memseg_list *msl, int 
reserve_flags)
RTE_LOG(DEBUG, EAL, "VA reserved for memseg list at %p, size %zx\n",
addr, mem_sz);
 
+#ifdef RTE_MALLOC_ASAN
+   if (eal_memseg_list_map_asan_shadow(msl) != 0) {
+   RTE_LOG(ERR, EAL, "Failed to map ASan shadow region for memseg 
list");
+   return -1;
+   }
+#endif
return 0;
 }
 
@@ -1050,6 +1056,9 @@ rte_eal_memory_detach(void)
RTE_LOG(ERR, EAL, "Could not unmap memory: 
%s\n",
rte_strerror(rte_errno));
 
+#ifdef RTE_MALLOC_ASAN
+   eal_memseg_list_unmap_asan_shadow(msl);
+#endif
/*
 * we are detaching the fbarray rather than destroying because
 * other processes might still reference this fbarray, and we
diff --git a/lib/eal/common/eal_private.h b/lib/eal/common/eal_private.h
index 5eadba4902..48df338cf9 100644
--- a/lib/eal/common/eal_private.h
+++ b/lib/eal/common/eal_private.h
@@ -300,6 +300,28 @@ eal_memseg_list_alloc(struct rte_memseg_list *msl, int 
reserve_flags);
 void
 eal_memseg_list_populate(struct rte_memseg_list *msl, void *addr, int n_segs);
 
+#ifdef RTE_MALLOC_ASAN
+/**
+ * Map shared memory for MSL ASan shadow region.
+ *
+ * @param msl
+ *  Memory segment list.
+ * @return
+ *  0 on success, (-1) on failure.
+ */
+int
+eal_memseg_list_map_asan_shadow(struct rte_memseg_list *msl);
+
+/**
+ * Unmap the MSL ASan shadow region.
+ *
+ * @param msl
+ *  Memory segment list.
+ */
+void
+eal_memseg_list_unmap_asan_shadow(struct rte_memseg_list *msl);
+#endif
+
 /**
  * Distribute available memory between MSLs.
  *
diff --git a/lib/eal/linux/eal_memalloc.c b/lib/eal/linux/eal_memalloc.c
index f8b1588cae..5212ae6b56 100644
--- a/lib/eal/linux/eal_memalloc.c
+++ b/lib/eal/linux/eal_memalloc.c
@@ -511,6 +511,13 @@ resize_hugefile(int fd, uint64_t fa_offset, uint64_t 
page_sz, bool grow,
grow, dirty);
 }
 
+__rte_no_asan
+static inline void
+page_fault(void *addr)
+{
+   *(volatile int *)addr = *(volatile int *)addr;
+}
+
 static int
 alloc_seg(struct rte_memseg *ms, void *addr, int socket_id,
struct hugepage_info *hi, unsigned int list_idx,
@@ -641,7 +648,7 @@ alloc_seg(struct rte_memseg *ms, void *addr, int socket_id,
 * that is already there, so read the old value, and write itback.
 * kernel populates the page with zeroes initially.
 */
-   *(volatile int *)addr = *(volatile int *)addr;
+   page_fault(addr);
 
iova = rte_mem_virt2iova(addr);
if (iova == RTE_BAD_PHYS_ADDR) {
diff --git a/lib/eal/linux/eal_memory.c b/lib/eal/linux/eal_memory.c
index 9b6f08fba8..3dca532874 100644
--- a/lib/eal/linux/eal_memory.c
+++ b/lib/eal/linux/eal_memory.c
@@ -41,6 +41,7 @@
 #include "eal_filesystem.h"
 #include "eal_hugepages.h"
 #include "eal_options.h"
+#include "malloc_elem.h"
 
 #define PFN_MASK_SIZE  8
 
@@ -1469,6 +1470,9 @@ eal_legacy_hugepage_init(void)
if (msl->memseg_arr.count > 0)
continue;
/* this is an unused list, deallocate it */
+#ifdef RTE_MALLOC_ASAN
+   eal_memseg_list_unmap_asan_shadow(msl);
+#endif
mem_sz = msl->len;
munmap(msl->base_va, mem_sz);
msl->base_va = NULL;
@@ -1956,3 +1960,96 @@ rte_eal_memseg_init(void)
 #endif
memseg_secondary_init();
 }
+
+#if

Re: [PATCH] mem: allow using ASan in multi-process mode

2023-10-09 Thread Artur Paszkiewicz

On 10/4/23 16:51, David Marchand wrote:

- did you test with --in-memory mode? with --no-huge?


Please see v2 of the patch. I added checks for these options. They imply
no multi-process support so mapping is skipped for those cases.


- I did not look at the patch, but I wonder if there is a risk some
"local" ASan region (for the process heap, for example) can overlap
with some "shared" ASan region (for shared DPDK hugepages).


I don't think it's possible unless the actual memory regions overlap.
The ASan shadow region is always at a fixed offset from the memory it
shadows. Also, this patch only makes the shadow regions shared, ASan
instrumentation already uses these regions.


- with this work, would unit tests (that were marked failing with
ASan) be ok now? See REGISTER_FAST_TEST macro in app/test.


I tried enabling these tests and some of them started passing with this
patch, namely:
- multiprocess_autotest
- eal_flags_c_opt_autotest
- eal_flags_main_opt_autotest
- eal_flags_a_opt_autotest

eal_flags_file_prefix_autotest still fails. The rest seem to be passing
even without the patch.

Regards,
Artur


[PATCH v3] mem: allow using ASan in multi-process mode

2023-10-25 Thread Artur Paszkiewicz
Multi-process applications operate on shared hugepage memory but each
process has its own ASan shadow region which is not synchronized with
the other processes. This causes issues when different processes try to
use the same memory because they have their own view of which addresses
are valid.

Fix it by mapping the shadow regions for memseg lists as shared memory.
The primary process is responsible for creating and removing the shared
memory objects.

Disable ASan instrumentation for triggering the page fault in
alloc_seg() because if the segment is already allocated by another
process and is marked as free in the shadow, accessing this address will
cause an ASan error.

Signed-off-by: Artur Paszkiewicz 
---
v3:
- Removed conditional compilation from eal_common_memory.c.
- Improved comments.
v2:
- Added checks for config options disabling multi-process support.
- Fixed missing unmap in legacy mode.

 lib/eal/common/eal_common_memory.c |   7 ++
 lib/eal/common/eal_private.h   |  35 ++
 lib/eal/linux/eal_memalloc.c   |  23 +--
 lib/eal/linux/eal_memory.c | 101 +
 lib/eal/linux/meson.build  |   4 ++
 5 files changed, 164 insertions(+), 6 deletions(-)

diff --git a/lib/eal/common/eal_common_memory.c 
b/lib/eal/common/eal_common_memory.c
index d9433db623..5daf53d4d2 100644
--- a/lib/eal/common/eal_common_memory.c
+++ b/lib/eal/common/eal_common_memory.c
@@ -263,6 +263,11 @@ eal_memseg_list_alloc(struct rte_memseg_list *msl, int 
reserve_flags)
RTE_LOG(DEBUG, EAL, "VA reserved for memseg list at %p, size %zx\n",
addr, mem_sz);
 
+   if (eal_memseg_list_map_asan_shadow(msl) != 0) {
+   RTE_LOG(ERR, EAL, "Failed to map ASan shadow region for memseg 
list");
+   return -1;
+   }
+
return 0;
 }
 
@@ -1050,6 +1055,8 @@ rte_eal_memory_detach(void)
RTE_LOG(ERR, EAL, "Could not unmap memory: 
%s\n",
rte_strerror(rte_errno));
 
+   eal_memseg_list_unmap_asan_shadow(msl);
+
/*
 * we are detaching the fbarray rather than destroying because
 * other processes might still reference this fbarray, and we
diff --git a/lib/eal/common/eal_private.h b/lib/eal/common/eal_private.h
index 5eadba4902..6535b38637 100644
--- a/lib/eal/common/eal_private.h
+++ b/lib/eal/common/eal_private.h
@@ -300,6 +300,41 @@ eal_memseg_list_alloc(struct rte_memseg_list *msl, int 
reserve_flags);
 void
 eal_memseg_list_populate(struct rte_memseg_list *msl, void *addr, int n_segs);
 
+/**
+ * Map shared memory for MSL ASan shadow region.
+ *
+ * @param msl
+ *  Memory segment list.
+ * @return
+ *  0 on success, (-1) on failure.
+ */
+#ifdef RTE_MALLOC_ASAN
+int
+eal_memseg_list_map_asan_shadow(struct rte_memseg_list *msl);
+#else
+static inline int
+eal_memseg_list_map_asan_shadow(__rte_unused struct rte_memseg_list *msl)
+{
+   return 0;
+}
+#endif
+
+/**
+ * Unmap the MSL ASan shadow region.
+ *
+ * @param msl
+ *  Memory segment list.
+ */
+#ifdef RTE_MALLOC_ASAN
+void
+eal_memseg_list_unmap_asan_shadow(struct rte_memseg_list *msl);
+#else
+static inline void
+eal_memseg_list_unmap_asan_shadow(__rte_unused struct rte_memseg_list *msl)
+{
+}
+#endif
+
 /**
  * Distribute available memory between MSLs.
  *
diff --git a/lib/eal/linux/eal_memalloc.c b/lib/eal/linux/eal_memalloc.c
index f8b1588cae..a4151534a8 100644
--- a/lib/eal/linux/eal_memalloc.c
+++ b/lib/eal/linux/eal_memalloc.c
@@ -511,6 +511,21 @@ resize_hugefile(int fd, uint64_t fa_offset, uint64_t 
page_sz, bool grow,
grow, dirty);
 }
 
+__rte_no_asan
+static inline void
+page_fault(void *addr)
+{
+   /* We need to trigger a write to the page to enforce page fault but we
+* can't overwrite value that is already there, so read the old value
+* and write it back. Kernel populates the page with zeroes initially.
+*
+* Disable ASan instrumentation here because if the segment is already
+* allocated by another process and is marked as free in the shadow,
+* accessing this address will cause an ASan error.
+*/
+   *(volatile int *)addr = *(volatile int *)addr;
+}
+
 static int
 alloc_seg(struct rte_memseg *ms, void *addr, int socket_id,
struct hugepage_info *hi, unsigned int list_idx,
@@ -636,12 +651,8 @@ alloc_seg(struct rte_memseg *ms, void *addr, int socket_id,
goto mapped;
}
 
-   /* we need to trigger a write to the page to enforce page fault and
-* ensure that page is accessible to us, but we can't overwrite value
-* that is already there, so read the old value, and write itback.
-* kernel populates the page with zeroes initially.
-*/
-   *(volatile int *)addr = *(volatile int *)addr;
+   /* enforce page fau

[PATCH] malloc: fix allocation for a specific case with ASAN

2023-09-04 Thread Artur Paszkiewicz
Allocation would fail with ASAN enabled if the size and alignment was
equal to half of the page size, e.g.:

size_t pg_sz = 2 * (1 << 20);
rte_malloc(NULL, pg_sz / 2, pg_sz / 2);

In such case, try_expand_heap_primary() only allocated one page but it
is not enough to fit this allocation with such alignment and
MALLOC_ELEM_TRAILER_LEN > 0, as correcly checked by
malloc_elem_can_hold().

Signed-off-by: Artur Paszkiewicz 
---
 lib/eal/common/malloc_heap.c | 4 ++--
 lib/eal/common/malloc_mp.c   | 4 ++--
 2 files changed, 4 insertions(+), 4 deletions(-)

diff --git a/lib/eal/common/malloc_heap.c b/lib/eal/common/malloc_heap.c
index 6b6cf9174c..bb7da0d2ef 100644
--- a/lib/eal/common/malloc_heap.c
+++ b/lib/eal/common/malloc_heap.c
@@ -402,8 +402,8 @@ try_expand_heap_primary(struct malloc_heap *heap, uint64_t 
pg_sz,
int n_segs;
bool callback_triggered = false;
 
-   alloc_sz = RTE_ALIGN_CEIL(RTE_ALIGN_CEIL(elt_size, align) +
-   MALLOC_ELEM_OVERHEAD, pg_sz);
+   alloc_sz = RTE_ALIGN_CEIL(RTE_MAX(MALLOC_ELEM_HEADER_LEN, align) +
+   elt_size + MALLOC_ELEM_TRAILER_LEN, pg_sz);
n_segs = alloc_sz / pg_sz;
 
/* we can't know in advance how many pages we'll need, so we malloc */
diff --git a/lib/eal/common/malloc_mp.c b/lib/eal/common/malloc_mp.c
index 7270c2ec90..62deaca9eb 100644
--- a/lib/eal/common/malloc_mp.c
+++ b/lib/eal/common/malloc_mp.c
@@ -250,8 +250,8 @@ handle_alloc_request(const struct malloc_mp_req *m,
return -1;
}
 
-   alloc_sz = RTE_ALIGN_CEIL(RTE_ALIGN_CEIL(ar->elt_size, ar->align) +
-   MALLOC_ELEM_OVERHEAD, ar->page_sz);
+   alloc_sz = RTE_ALIGN_CEIL(RTE_MAX(MALLOC_ELEM_HEADER_LEN, ar->align) +
+   ar->elt_size + MALLOC_ELEM_TRAILER_LEN, ar->page_sz);
n_segs = alloc_sz / ar->page_sz;
 
/* we can't know in advance how many pages we'll need, so we malloc */
-- 
2.35.3



[PATCH v2] malloc: fix allocation for a specific case with ASAN

2023-09-11 Thread Artur Paszkiewicz
Allocation would fail with ASAN enabled if the size and alignment was
equal to half of the page size, e.g.:

size_t pg_sz = 2 * (1 << 20);
rte_malloc(NULL, pg_sz / 2, pg_sz / 2);

In such case, try_expand_heap_primary() only allocated one page but it
is not enough to fit this allocation with such alignment and
MALLOC_ELEM_TRAILER_LEN > 0, as correctly checked by
malloc_elem_can_hold().

Signed-off-by: Artur Paszkiewicz 
---
v2: 
- fix commit message typo

 lib/eal/common/malloc_heap.c | 4 ++--
 lib/eal/common/malloc_mp.c   | 4 ++--
 2 files changed, 4 insertions(+), 4 deletions(-)

diff --git a/lib/eal/common/malloc_heap.c b/lib/eal/common/malloc_heap.c
index 6b6cf9174c..bb7da0d2ef 100644
--- a/lib/eal/common/malloc_heap.c
+++ b/lib/eal/common/malloc_heap.c
@@ -402,8 +402,8 @@ try_expand_heap_primary(struct malloc_heap *heap, uint64_t 
pg_sz,
int n_segs;
bool callback_triggered = false;
 
-   alloc_sz = RTE_ALIGN_CEIL(RTE_ALIGN_CEIL(elt_size, align) +
-   MALLOC_ELEM_OVERHEAD, pg_sz);
+   alloc_sz = RTE_ALIGN_CEIL(RTE_MAX(MALLOC_ELEM_HEADER_LEN, align) +
+   elt_size + MALLOC_ELEM_TRAILER_LEN, pg_sz);
n_segs = alloc_sz / pg_sz;
 
/* we can't know in advance how many pages we'll need, so we malloc */
diff --git a/lib/eal/common/malloc_mp.c b/lib/eal/common/malloc_mp.c
index 7270c2ec90..62deaca9eb 100644
--- a/lib/eal/common/malloc_mp.c
+++ b/lib/eal/common/malloc_mp.c
@@ -250,8 +250,8 @@ handle_alloc_request(const struct malloc_mp_req *m,
return -1;
}
 
-   alloc_sz = RTE_ALIGN_CEIL(RTE_ALIGN_CEIL(ar->elt_size, ar->align) +
-   MALLOC_ELEM_OVERHEAD, ar->page_sz);
+   alloc_sz = RTE_ALIGN_CEIL(RTE_MAX(MALLOC_ELEM_HEADER_LEN, ar->align) +
+   ar->elt_size + MALLOC_ELEM_TRAILER_LEN, ar->page_sz);
n_segs = alloc_sz / ar->page_sz;
 
/* we can't know in advance how many pages we'll need, so we malloc */
-- 
2.35.3



Re: [PATCH v3] mem: allow using ASan in multi-process mode

2024-10-17 Thread Artur Paszkiewicz

On 10/3/24 23:18, Stephen Hemminger wrote:

Makes sense, but patch has some fuzz against current main branch.
There is also another patch that address the ASAN touch issue.

https://patchwork.dpdk.org/project/dpdk/patch/20240723083419.12435-1-amic...@kalrayinc.com/


I just sent a new version of the patch, it no longer needs the change
that was related to that linked patch.

Thanks,
Artur


[PATCH RESEND] malloc: fix allocation for a specific case with ASan

2024-10-17 Thread Artur Paszkiewicz
Allocation would fail with ASan enabled if the size and alignment was
equal to half of the page size, e.g.:

size_t pg_sz = 2 * (1 << 20);
rte_malloc(NULL, pg_sz / 2, pg_sz / 2);

In such case, try_expand_heap_primary() only allocated one page but it
is not enough to fit this allocation with such alignment and
MALLOC_ELEM_TRAILER_LEN > 0, as correctly checked by
malloc_elem_can_hold().

Signed-off-by: Artur Paszkiewicz 
---
 lib/eal/common/malloc_heap.c | 4 ++--
 lib/eal/common/malloc_mp.c   | 4 ++--
 2 files changed, 4 insertions(+), 4 deletions(-)

diff --git a/lib/eal/common/malloc_heap.c b/lib/eal/common/malloc_heap.c
index 058aaf4209..5b93e7fcb8 100644
--- a/lib/eal/common/malloc_heap.c
+++ b/lib/eal/common/malloc_heap.c
@@ -401,8 +401,8 @@ try_expand_heap_primary(struct malloc_heap *heap, uint64_t 
pg_sz,
int n_segs;
bool callback_triggered = false;
 
-   alloc_sz = RTE_ALIGN_CEIL(RTE_ALIGN_CEIL(elt_size, align) +
-   MALLOC_ELEM_OVERHEAD, pg_sz);
+   alloc_sz = RTE_ALIGN_CEIL(RTE_MAX(MALLOC_ELEM_HEADER_LEN, align) +
+   elt_size + MALLOC_ELEM_TRAILER_LEN, pg_sz);
n_segs = alloc_sz / pg_sz;
 
/* we can't know in advance how many pages we'll need, so we malloc */
diff --git a/lib/eal/common/malloc_mp.c b/lib/eal/common/malloc_mp.c
index 9765277f5d..1373da44c9 100644
--- a/lib/eal/common/malloc_mp.c
+++ b/lib/eal/common/malloc_mp.c
@@ -251,8 +251,8 @@ handle_alloc_request(const struct malloc_mp_req *m,
return -1;
}
 
-   alloc_sz = RTE_ALIGN_CEIL(RTE_ALIGN_CEIL(ar->elt_size, ar->align) +
-   MALLOC_ELEM_OVERHEAD, ar->page_sz);
+   alloc_sz = RTE_ALIGN_CEIL(RTE_MAX(MALLOC_ELEM_HEADER_LEN, ar->align) +
+   ar->elt_size + MALLOC_ELEM_TRAILER_LEN, ar->page_sz);
n_segs = alloc_sz / ar->page_sz;
 
/* we can't know in advance how many pages we'll need, so we malloc */
-- 
2.43.0



[PATCH v4] mem: allow using ASan in multi-process mode

2024-10-17 Thread Artur Paszkiewicz
Multi-process applications operate on shared hugepage memory but each
process has its own ASan shadow region which is not synchronized with
the other processes. This causes issues when different processes try to
use the same memory because they have their own view of which addresses
are valid.

Fix it by mapping the shadow regions for allocated segments as shared
memory. The primary process is responsible for creating and removing the
shared memory objects.

Signed-off-by: Artur Paszkiewicz 
---
v4:
- Map ASan shadow shm after mapping the segment.
  Due to a change in ASan behavior[1] the mapped shadow shared memory
  regions are remapped later, when segments are mapped. So instead of
  mapping the whole shadow region when reserving the memseg list memory,
  map only the fragments corresponding to the segments after they are
  mapped. Because of this it is also no longer necessary to disable ASan
  instrumentation for triggering the page fault in alloc_seg().
- Adjusted function naming.
- Enabled unit tests.
v3:
- Removed conditional compilation from eal_common_memory.c.
- Improved comments.
v2:
- Added checks for config options disabling multi-process support.
- Fixed missing unmap in legacy mode.

[1] 
https://github.com/llvm/llvm-project/commit/a34e702aa16fde4cc76e9360d985a64e008e0b23

 app/test/test_mp_secondary.c   |  2 +-
 app/test/test_pdump.c  |  2 +-
 lib/eal/common/eal_common_memory.c |  7 +++
 lib/eal/common/eal_private.h   | 54 
 lib/eal/linux/eal_memalloc.c   | 30 +
 lib/eal/linux/eal_memory.c | 98 ++
 lib/eal/linux/meson.build  |  4 ++
 7 files changed, 195 insertions(+), 2 deletions(-)

diff --git a/app/test/test_mp_secondary.c b/app/test/test_mp_secondary.c
index f3694530a8..7da2878f64 100644
--- a/app/test/test_mp_secondary.c
+++ b/app/test/test_mp_secondary.c
@@ -223,4 +223,4 @@ test_mp_secondary(void)
 
 #endif /* !RTE_EXEC_ENV_WINDOWS */
 
-REGISTER_FAST_TEST(multiprocess_autotest, false, false, test_mp_secondary);
+REGISTER_FAST_TEST(multiprocess_autotest, false, true, test_mp_secondary);
diff --git a/app/test/test_pdump.c b/app/test/test_pdump.c
index 9f7769707e..a0919e89ba 100644
--- a/app/test/test_pdump.c
+++ b/app/test/test_pdump.c
@@ -219,4 +219,4 @@ test_pdump(void)
return TEST_SUCCESS;
 }
 
-REGISTER_FAST_TEST(pdump_autotest, true, false, test_pdump);
+REGISTER_FAST_TEST(pdump_autotest, true, true, test_pdump);
diff --git a/lib/eal/common/eal_common_memory.c 
b/lib/eal/common/eal_common_memory.c
index a185e0b580..8fbd0c5af9 100644
--- a/lib/eal/common/eal_common_memory.c
+++ b/lib/eal/common/eal_common_memory.c
@@ -263,6 +263,11 @@ eal_memseg_list_alloc(struct rte_memseg_list *msl, int 
reserve_flags)
EAL_LOG(DEBUG, "VA reserved for memseg list at %p, size %zx",
addr, mem_sz);
 
+   if (eal_memseg_list_init_asan_shadow(msl) != 0) {
+   EAL_LOG(ERR, "Failed to init ASan shadow region for memseg 
list");
+   return -1;
+   }
+
return 0;
 }
 
@@ -1052,6 +1057,8 @@ rte_eal_memory_detach(void)
EAL_LOG(ERR, "Could not unmap memory: %s",
rte_strerror(rte_errno));
 
+   eal_memseg_list_cleanup_asan_shadow(msl);
+
/*
 * we are detaching the fbarray rather than destroying because
 * other processes might still reference this fbarray, and we
diff --git a/lib/eal/common/eal_private.h b/lib/eal/common/eal_private.h
index bb315dab04..96e05647ff 100644
--- a/lib/eal/common/eal_private.h
+++ b/lib/eal/common/eal_private.h
@@ -309,6 +309,60 @@ eal_memseg_list_alloc(struct rte_memseg_list *msl, int 
reserve_flags);
 void
 eal_memseg_list_populate(struct rte_memseg_list *msl, void *addr, int n_segs);
 
+/**
+ * Initialize the MSL ASan shadow region shared memory.
+ *
+ * @param msl
+ *  Memory segment list.
+ * @return
+ *  0 on success, (-1) on failure.
+ */
+#ifdef RTE_MALLOC_ASAN
+int
+eal_memseg_list_init_asan_shadow(struct rte_memseg_list *msl);
+#else
+static inline int
+eal_memseg_list_init_asan_shadow(__rte_unused struct rte_memseg_list *msl)
+{
+   return 0;
+}
+#endif
+
+/**
+ * Cleanup the MSL ASan shadow region shared memory.
+ *
+ * @param msl
+ *  Memory segment list.
+ */
+#ifdef RTE_MALLOC_ASAN
+void
+eal_memseg_list_cleanup_asan_shadow(struct rte_memseg_list *msl);
+#else
+static inline void
+eal_memseg_list_cleanup_asan_shadow(__rte_unused struct rte_memseg_list *msl)
+{
+}
+#endif
+
+/**
+ * Get the MSL ASan shadow shared memory object file descriptor.
+ *
+ * @param msl
+ *  Index of the MSL.
+ * @return
+ *  A file descriptor.
+ */
+#ifdef RTE_MALLOC_ASAN
+int
+eal_memseg_list_get_asan_shadow_fd(int msl_idx);
+#else
+static inline int
+eal_memseg_list_get_asan_shadow_fd(__rte_unused int msl_idx)
+{
+   return -1;
+}
+#endif
+

Re: [PATCH RESEND] malloc: fix allocation for a specific case with ASan

2024-10-18 Thread Artur Paszkiewicz

Recheck-request: iol-unit-amd64-testing


Re: [PATCH v4] mem: allow using ASan in multi-process mode

2024-10-18 Thread Artur Paszkiewicz

Recheck-request: iol-unit-amd64-testing


Re: [PATCH RESEND] malloc: fix allocation for a specific case with ASan

2024-10-23 Thread Artur Paszkiewicz

Recheck-request: rebase=main,iol-unit-amd64-testing,iol-unit-arm64-testing


Re: [PATCH RESEND] malloc: fix allocation for a specific case with ASan

2024-10-24 Thread Artur Paszkiewicz
Recheck-request: 
iol-compile-amd64-testing,iol-compile-arm64-testing,iol-unit-amd64-testing,iol-unit-arm64-testing


Re: [PATCH v4] mem: allow using ASan in multi-process mode

2024-10-24 Thread Artur Paszkiewicz
Recheck-request: 
rebase=main,iol-compile-amd64-testing,iol-compile-arm64-testing,iol-unit-amd64-testing,iol-unit-arm64-testing