This is an automated email from the ASF dual-hosted git repository.

xiaoxiang pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/nuttx.git

commit bc9d6549e9fd18b4ac43eb077cfdc3ae6b7ad1bb
Author: xuxingliang <xuxingli...@xiaomi.com>
AuthorDate: Wed Sep 4 10:38:51 2024 +0800

    mm: call sched_note within mm lock
    
    Signed-off-by: xuxingliang <xuxingli...@xiaomi.com>
---
 mm/mm_heap/mm_initialize.c |  3 +--
 mm/mm_heap/mm_malloc.c     |  9 +++++++--
 mm/mm_heap/mm_memalign.c   |  6 +++---
 mm/tlsf/mm_tlsf.c          | 32 +++++++++++++++++++++-----------
 4 files changed, 32 insertions(+), 18 deletions(-)

diff --git a/mm/mm_heap/mm_initialize.c b/mm/mm_heap/mm_initialize.c
index 4b884c4a05..61b1b91a6c 100644
--- a/mm/mm_heap/mm_initialize.c
+++ b/mm/mm_heap/mm_initialize.c
@@ -206,10 +206,9 @@ void mm_addregion(FAR struct mm_heap_s *heap, FAR void 
*heapstart,
 
   mm_addfreechunk(heap, node);
   heap->mm_curused += 2 * MM_SIZEOF_ALLOCNODE;
-  mm_unlock(heap);
-
   sched_note_heap(NOTE_HEAP_ADD, heap, heapstart, heapsize,
                   heap->mm_curused);
+  mm_unlock(heap);
 }
 
 /****************************************************************************
diff --git a/mm/mm_heap/mm_malloc.c b/mm/mm_heap/mm_malloc.c
index c8cd761e2b..18fae3511b 100644
--- a/mm/mm_heap/mm_malloc.c
+++ b/mm/mm_heap/mm_malloc.c
@@ -321,14 +321,19 @@ FAR void *mm_malloc(FAR struct mm_heap_s *heap, size_t 
size)
     }
 
   DEBUGASSERT(ret == NULL || mm_heapmember(heap, ret));
+
+  if (ret)
+    {
+      sched_note_heap(NOTE_HEAP_ALLOC, heap, ret, nodesize,
+                      heap->mm_curused);
+    }
+
   mm_unlock(heap);
 
   if (ret)
     {
       MM_ADD_BACKTRACE(heap, node);
       ret = kasan_unpoison(ret, nodesize - MM_ALLOCNODE_OVERHEAD);
-      sched_note_heap(NOTE_HEAP_ALLOC, heap, ret, nodesize,
-                      heap->mm_curused);
 #ifdef CONFIG_MM_FILL_ALLOCATIONS
       memset(ret, MM_ALLOC_MAGIC, alignsize - MM_ALLOCNODE_OVERHEAD);
 #endif
diff --git a/mm/mm_heap/mm_memalign.c b/mm/mm_heap/mm_memalign.c
index cdf6338b1e..060b1f6f17 100644
--- a/mm/mm_heap/mm_memalign.c
+++ b/mm/mm_heap/mm_memalign.c
@@ -277,15 +277,15 @@ FAR void *mm_memalign(FAR struct mm_heap_s *heap, size_t 
alignment,
       heap->mm_maxused = heap->mm_curused;
     }
 
+  sched_note_heap(NOTE_HEAP_ALLOC, heap, (FAR void *)alignedchunk, size,
+                  heap->mm_curused);
+
   mm_unlock(heap);
 
   MM_ADD_BACKTRACE(heap, node);
 
   alignedchunk = (uintptr_t)kasan_unpoison((FAR const void *)alignedchunk,
                                            size - MM_ALLOCNODE_OVERHEAD);
-  sched_note_heap(NOTE_HEAP_ALLOC, heap, (FAR void *)alignedchunk, size,
-                  heap->mm_curused);
-
   DEBUGASSERT(alignedchunk % alignment == 0);
   return (FAR void *)alignedchunk;
 }
diff --git a/mm/tlsf/mm_tlsf.c b/mm/tlsf/mm_tlsf.c
index 816d5e0973..f97022d4a3 100644
--- a/mm/tlsf/mm_tlsf.c
+++ b/mm/tlsf/mm_tlsf.c
@@ -699,10 +699,9 @@ void mm_addregion(FAR struct mm_heap_s *heap, FAR void 
*heapstart,
   /* Add memory to the tlsf pool */
 
   tlsf_add_pool(heap->mm_tlsf, heapstart, heapsize);
-  mm_unlock(heap);
-
   sched_note_heap(NOTE_HEAP_ADD, heap, heapstart, heapsize,
                   heap->mm_curused);
+  mm_unlock(heap);
 }
 
 /****************************************************************************
@@ -1333,6 +1332,12 @@ FAR void *mm_malloc(FAR struct mm_heap_s *heap, size_t 
size)
       heap->mm_maxused = heap->mm_curused;
     }
 
+  if (ret)
+    {
+      sched_note_heap(NOTE_HEAP_ALLOC, heap, ret, nodesize,
+                      heap->mm_curused);
+    }
+
   mm_unlock(heap);
 
   if (ret)
@@ -1344,8 +1349,6 @@ FAR void *mm_malloc(FAR struct mm_heap_s *heap, size_t 
size)
 #endif
 
       ret = kasan_unpoison(ret, nodesize);
-      sched_note_heap(NOTE_HEAP_ALLOC, heap, ret, nodesize,
-                      heap->mm_curused);
 
 #ifdef CONFIG_MM_FILL_ALLOCATIONS
       memset(ret, 0xaa, nodesize);
@@ -1415,6 +1418,12 @@ FAR void *mm_memalign(FAR struct mm_heap_s *heap, size_t 
alignment,
       heap->mm_maxused = heap->mm_curused;
     }
 
+  if (ret)
+    {
+      sched_note_heap(NOTE_HEAP_ALLOC, heap, ret, nodesize,
+                      heap->mm_curused);
+    }
+
   mm_unlock(heap);
 
   if (ret)
@@ -1425,8 +1434,6 @@ FAR void *mm_memalign(FAR struct mm_heap_s *heap, size_t 
alignment,
       memdump_backtrace(heap, buf);
 #endif
       ret = kasan_unpoison(ret, nodesize);
-      sched_note_heap(NOTE_HEAP_ALLOC, heap, ret, nodesize,
-                      heap->mm_curused);
     }
 
 #if CONFIG_MM_FREE_DELAYCOUNT_MAX > 0
@@ -1545,6 +1552,14 @@ FAR void *mm_realloc(FAR struct mm_heap_s *heap, FAR 
void *oldmem,
       heap->mm_maxused = heap->mm_curused;
     }
 
+  if (newmem)
+    {
+      sched_note_heap(NOTE_HEAP_FREE, heap, oldmem, oldsize,
+                      heap->mm_curused - newsize);
+      sched_note_heap(NOTE_HEAP_ALLOC, heap, newmem, newsize,
+                      heap->mm_curused);
+    }
+
   mm_unlock(heap);
 
   if (newmem)
@@ -1553,11 +1568,6 @@ FAR void *mm_realloc(FAR struct mm_heap_s *heap, FAR 
void *oldmem,
       FAR struct memdump_backtrace_s *buf = newmem + newsize;
       memdump_backtrace(heap, buf);
 #endif
-
-      sched_note_heap(NOTE_HEAP_FREE, heap, oldmem, oldsize,
-                      heap->mm_curused - newsize);
-      sched_note_heap(NOTE_HEAP_ALLOC, heap, newmem, newsize,
-                      heap->mm_curused);
     }
 
 #if CONFIG_MM_FREE_DELAYCOUNT_MAX > 0

Reply via email to