This is an automated email from the ASF dual-hosted git repository.

xiaoxiang pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/nuttx.git


The following commit(s) were added to refs/heads/master by this push:
     new 70083168f1 mm: support custom the mm alignment and default to be 8
70083168f1 is described below

commit 70083168f100e8635cb1e5f3a5fcdd0362131994
Author: wangbowen6 <wangbow...@xiaomi.com>
AuthorDate: Fri Dec 30 16:30:55 2022 +0800

    mm: support custom the mm alignment and default to be 8
    
    Signed-off-by: wangbowen6 <wangbow...@xiaomi.com>
---
 mm/Kconfig                      |  9 +++++++++
 mm/mm_heap/mm.h                 | 16 +++++++++-------
 mm/mm_heap/mm_addfreechunk.c    |  2 +-
 mm/mm_heap/mm_checkcorruption.c |  2 +-
 mm/mm_heap/mm_initialize.c      |  3 +--
 mm/mm_heap/mm_mallinfo.c        |  2 +-
 mm/mm_heap/mm_malloc.c          | 15 ++++++++++-----
 mm/mm_heap/mm_memalign.c        | 30 +++++++++++++++++-------------
 mm/mm_heap/mm_memdump.c         |  2 +-
 mm/mm_heap/mm_realloc.c         | 24 +++++++++++++++++++++---
 mm/mm_heap/mm_shrinkchunk.c     |  2 +-
 mm/mm_heap/mm_size2ndx.c        |  1 +
 12 files changed, 73 insertions(+), 35 deletions(-)

diff --git a/mm/Kconfig b/mm/Kconfig
index 5dc5c9f85d..71a95c80fa 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -61,6 +61,15 @@ config MM_KERNEL_HEAPSIZE
                user-mode heap.  This value may need to be aligned to units of 
the
                size of the smallest memory protection region.
 
+config MM_DFAULT_ALIGNMENT
+       int "Memory default alignment in bytes"
+       default 8
+       range 0 64
+       ---help---
+               The memory default alignment in bytes, if this value is 0, the 
real
+               memory default alignment is equal to sizoef(uintptr), if this 
value
+               is not 0, this value must be 2^n and at least sizeof(uintptr).
+
 config MM_SMALL
        bool "Small memory model"
        default n
diff --git a/mm/mm_heap/mm.h b/mm/mm_heap/mm.h
index 05c3db4bf8..adc6f24d28 100644
--- a/mm/mm_heap/mm.h
+++ b/mm/mm_heap/mm.h
@@ -110,7 +110,12 @@
 #define MM_MAX_CHUNK     (1 << MM_MAX_SHIFT)
 #define MM_NNODES        (MM_MAX_SHIFT - MM_MIN_SHIFT + 1)
 
-#define MM_GRAN_MASK     (MM_MIN_CHUNK - 1)
+#if CONFIG_MM_DFAULT_ALIGNMENT == 0
+#  define MM_ALIGN       sizeof(uintptr_t)
+#else
+#  define MM_ALIGN       CONFIG_MM_DFAULT_ALIGNMENT
+#endif
+#define MM_GRAN_MASK     (MM_ALIGN - 1)
 #define MM_ALIGN_UP(a)   (((a) + MM_GRAN_MASK) & ~MM_GRAN_MASK)
 #define MM_ALIGN_DOWN(a) ((a) & ~MM_GRAN_MASK)
 
@@ -138,10 +143,6 @@
 
 #define OVERHEAD_MM_ALLOCNODE (SIZEOF_MM_ALLOCNODE - sizeof(mmsize_t))
 
-/* What is the size of the freenode? */
-
-#define SIZEOF_MM_FREENODE sizeof(struct mm_freenode_s)
-
 /* Get the node size */
 
 #define SIZEOF_MM_NODE(node) ((node)->size & (~MM_MASK_BIT))
@@ -194,8 +195,9 @@ struct mm_freenode_s
 static_assert(SIZEOF_MM_ALLOCNODE <= MM_MIN_CHUNK,
               "Error size for struct mm_allocnode_s\n");
 
-static_assert(SIZEOF_MM_FREENODE <= MM_MIN_CHUNK,
-              "Error size for struct mm_freenode_s\n");
+static_assert(MM_ALIGN >= sizeof(uintptr_t) &&
+              (MM_ALIGN & MM_GRAN_MASK) == 0,
+              "Error memory aligment\n");
 
 struct mm_delaynode_s
 {
diff --git a/mm/mm_heap/mm_addfreechunk.c b/mm/mm_heap/mm_addfreechunk.c
index 335451fff7..3c7f975608 100644
--- a/mm/mm_heap/mm_addfreechunk.c
+++ b/mm/mm_heap/mm_addfreechunk.c
@@ -51,7 +51,7 @@ void mm_addfreechunk(FAR struct mm_heap_s *heap,
   size_t nodesize = SIZEOF_MM_NODE(node);
   int ndx;
 
-  DEBUGASSERT(nodesize >= SIZEOF_MM_FREENODE);
+  DEBUGASSERT(nodesize >= MM_MIN_CHUNK);
   DEBUGASSERT((node->size & MM_ALLOC_BIT) == 0);
 
   /* Convert the size to a nodelist index */
diff --git a/mm/mm_heap/mm_checkcorruption.c b/mm/mm_heap/mm_checkcorruption.c
index 9e82833e53..30df3a1e14 100644
--- a/mm/mm_heap/mm_checkcorruption.c
+++ b/mm/mm_heap/mm_checkcorruption.c
@@ -50,7 +50,7 @@ static void checkcorruption_handler(FAR struct mm_allocnode_s 
*node,
     {
       FAR struct mm_freenode_s *fnode = (FAR void *)node;
 
-      assert(nodesize >= SIZEOF_MM_FREENODE);
+      assert(nodesize >= MM_MIN_CHUNK);
       assert(fnode->blink->flink == fnode);
       assert(SIZEOF_MM_NODE(fnode->blink) <= nodesize);
       assert(fnode->flink == NULL ||
diff --git a/mm/mm_heap/mm_initialize.c b/mm/mm_heap/mm_initialize.c
index c82f905634..f9ee40c156 100644
--- a/mm/mm_heap/mm_initialize.c
+++ b/mm/mm_heap/mm_initialize.c
@@ -139,7 +139,7 @@ void mm_addregion(FAR struct mm_heap_s *heap, FAR void 
*heapstart,
   heap->mm_heapstart[IDX]->size    = SIZEOF_MM_ALLOCNODE | MM_ALLOC_BIT;
   node                             = (FAR struct mm_freenode_s *)
                                      (heapbase + SIZEOF_MM_ALLOCNODE);
-  DEBUGASSERT((((uintptr_t)node + SIZEOF_MM_ALLOCNODE) % MM_MIN_CHUNK) == 0);
+  DEBUGASSERT((((uintptr_t)node + SIZEOF_MM_ALLOCNODE) % MM_ALIGN) == 0);
   node->size                       = heapsize - 2 * SIZEOF_MM_ALLOCNODE;
   heap->mm_heapend[IDX]            = (FAR struct mm_allocnode_s *)
                                      (heapend - SIZEOF_MM_ALLOCNODE);
@@ -204,7 +204,6 @@ FAR struct mm_heap_s *mm_initialize(FAR const char *name,
   heapsize -= sizeof(struct mm_heap_s);
   heapstart = (FAR char *)heap_adj + sizeof(struct mm_heap_s);
 
-  DEBUGASSERT(MM_MIN_CHUNK >= SIZEOF_MM_FREENODE);
   DEBUGASSERT(MM_MIN_CHUNK >= SIZEOF_MM_ALLOCNODE);
 
   /* Set up global variables */
diff --git a/mm/mm_heap/mm_mallinfo.c b/mm/mm_heap/mm_mallinfo.c
index 34e90994bf..85dd2e3b74 100644
--- a/mm/mm_heap/mm_mallinfo.c
+++ b/mm/mm_heap/mm_mallinfo.c
@@ -57,7 +57,7 @@ static void mallinfo_handler(FAR struct mm_allocnode_s *node, 
FAR void *arg)
     {
       FAR struct mm_freenode_s *fnode = (FAR void *)node;
 
-      DEBUGASSERT(nodesize >= SIZEOF_MM_FREENODE);
+      DEBUGASSERT(nodesize >= MM_MIN_CHUNK);
       DEBUGASSERT(fnode->blink->flink == fnode);
       DEBUGASSERT(SIZEOF_MM_NODE(fnode->blink) <= nodesize);
       DEBUGASSERT(fnode->flink == NULL ||
diff --git a/mm/mm_heap/mm_malloc.c b/mm/mm_heap/mm_malloc.c
index d03abec747..69ceb3e8d4 100644
--- a/mm/mm_heap/mm_malloc.c
+++ b/mm/mm_heap/mm_malloc.c
@@ -130,9 +130,15 @@ FAR void *mm_malloc(FAR struct mm_heap_s *heap, size_t 
size)
 #endif
 
   /* Adjust the size to account for (1) the size of the allocated node and
-   * (2) to make sure that it is an even multiple of our granule size.
+   * (2) to make sure that it is aligned with MM_ALIGN and its size is at
+   * least MM_MIN_CHUNK.
    */
 
+  if (size < MM_MIN_CHUNK - OVERHEAD_MM_ALLOCNODE)
+    {
+      size = MM_MIN_CHUNK - OVERHEAD_MM_ALLOCNODE;
+    }
+
   alignsize = MM_ALIGN_UP(size + OVERHEAD_MM_ALLOCNODE);
   if (alignsize < size)
     {
@@ -141,8 +147,7 @@ FAR void *mm_malloc(FAR struct mm_heap_s *heap, size_t size)
       return NULL;
     }
 
-  DEBUGASSERT(alignsize >= MM_MIN_CHUNK);
-  DEBUGASSERT(alignsize >= SIZEOF_MM_FREENODE);
+  DEBUGASSERT(alignsize >= MM_ALIGN);
 
   /* We need to hold the MM mutex while we muck with the nodelist. */
 
@@ -204,7 +209,7 @@ FAR void *mm_malloc(FAR struct mm_heap_s *heap, size_t size)
        */
 
       remaining = nodesize - alignsize;
-      if (remaining >= SIZEOF_MM_FREENODE)
+      if (remaining >= MM_MIN_CHUNK)
         {
           /* Create the remainder node */
 
@@ -277,6 +282,6 @@ FAR void *mm_malloc(FAR struct mm_heap_s *heap, size_t size)
     }
 #endif
 
-  DEBUGASSERT(ret == NULL || ((uintptr_t)ret) % MM_MIN_CHUNK == 0);
+  DEBUGASSERT(ret == NULL || ((uintptr_t)ret) % MM_ALIGN == 0);
   return ret;
 }
diff --git a/mm/mm_heap/mm_memalign.c b/mm/mm_heap/mm_memalign.c
index ee98a79c67..5833c8dcc0 100644
--- a/mm/mm_heap/mm_memalign.c
+++ b/mm/mm_heap/mm_memalign.c
@@ -54,7 +54,7 @@ FAR void *mm_memalign(FAR struct mm_heap_s *heap, size_t 
alignment,
   FAR struct mm_allocnode_s *node;
   uintptr_t rawchunk;
   uintptr_t alignedchunk;
-  size_t mask = alignment - 1;
+  size_t mask;
   size_t allocsize;
   size_t newsize;
 
@@ -84,16 +84,22 @@ FAR void *mm_memalign(FAR struct mm_heap_s *heap, size_t 
alignment,
    * alignment of malloc, then just let malloc do the work.
    */
 
-  if (alignment <= MM_MIN_CHUNK)
+  if (alignment <= MM_ALIGN)
     {
       FAR void *ptr = mm_malloc(heap, size);
       DEBUGASSERT(ptr == NULL || ((uintptr_t)ptr) % alignment == 0);
       return ptr;
     }
+  else if (alignment < MM_MIN_CHUNK)
+    {
+      alignment = MM_MIN_CHUNK;
+    }
+
+  mask = alignment - 1;
 
-  /* Adjust the size to account for (1) the size of the allocated node, (2)
-   * to make sure that it is an even multiple of our granule size, and to
-   * include the alignment amount.
+  /* Adjust the size to account for (1) the size of the allocated node and
+   * (2) to make sure that it is aligned with MM_ALIGN and its size is at
+   * least MM_MIN_CHUNK.
    *
    * Notice that we increase the allocation size by twice the requested
    * alignment.  We do this so that there will be at least two valid
@@ -103,6 +109,11 @@ FAR void *mm_memalign(FAR struct mm_heap_s *heap, size_t 
alignment,
    * not include SIZEOF_MM_ALLOCNODE.
    */
 
+  if (size < MM_MIN_CHUNK - OVERHEAD_MM_ALLOCNODE)
+    {
+      size = MM_MIN_CHUNK - OVERHEAD_MM_ALLOCNODE;
+    }
+
   newsize = MM_ALIGN_UP(size);         /* Make multiples of our granule size */
   allocsize = newsize + 2 * alignment; /* Add double full alignment size */
 
@@ -154,13 +165,6 @@ FAR void *mm_memalign(FAR struct mm_heap_s *heap, size_t 
alignment,
       next = (FAR struct mm_allocnode_s *)
         ((FAR char *)node + SIZEOF_MM_NODE(node));
 
-      /* Make sure that there is space to convert the preceding
-       * mm_allocnode_s into an mm_freenode_s.  I think that this should
-       * always be true
-       */
-
-      DEBUGASSERT(alignedchunk >= rawchunk + 8);
-
       newnode = (FAR struct mm_allocnode_s *)
         (alignedchunk - SIZEOF_MM_ALLOCNODE);
 
@@ -178,7 +182,7 @@ FAR void *mm_memalign(FAR struct mm_heap_s *heap, size_t 
alignment,
        * alignment point.
        */
 
-      if (precedingsize < SIZEOF_MM_FREENODE)
+      if (precedingsize < MM_MIN_CHUNK)
         {
           alignedchunk += alignment;
           newnode       = (FAR struct mm_allocnode_s *)
diff --git a/mm/mm_heap/mm_memdump.c b/mm/mm_heap/mm_memdump.c
index a0ad5c5c7f..55c8722abd 100644
--- a/mm/mm_heap/mm_memdump.c
+++ b/mm/mm_heap/mm_memdump.c
@@ -95,7 +95,7 @@ static void memdump_handler(FAR struct mm_allocnode_s *node, 
FAR void *arg)
     {
       FAR struct mm_freenode_s *fnode = (FAR void *)node;
 
-      DEBUGASSERT(nodesize >= SIZEOF_MM_FREENODE);
+      DEBUGASSERT(nodesize >= MM_MIN_CHUNK);
       DEBUGASSERT(fnode->blink->flink == fnode);
       DEBUGASSERT(SIZEOF_MM_NODE(fnode->blink) <= nodesize);
       DEBUGASSERT(fnode->flink == NULL ||
diff --git a/mm/mm_heap/mm_realloc.c b/mm/mm_heap/mm_realloc.c
index 788fca4f48..f7dcd4f41f 100644
--- a/mm/mm_heap/mm_realloc.c
+++ b/mm/mm_heap/mm_realloc.c
@@ -110,9 +110,15 @@ FAR void *mm_realloc(FAR struct mm_heap_s *heap, FAR void 
*oldmem,
 #endif
 
   /* Adjust the size to account for (1) the size of the allocated node and
-   * (2) to make sure that it is an even multiple of our granule size.
+   * (2) to make sure that it is aligned with MM_ALIGN and its size is at
+   * least MM_MIN_CHUNK.
    */
 
+  if (size < MM_MIN_CHUNK - OVERHEAD_MM_ALLOCNODE)
+    {
+      size = MM_MIN_CHUNK - OVERHEAD_MM_ALLOCNODE;
+    }
+
   newsize = MM_ALIGN_UP(size + OVERHEAD_MM_ALLOCNODE);
   if (newsize < size)
     {
@@ -256,6 +262,13 @@ FAR void *mm_realloc(FAR struct mm_heap_s *heap, FAR void 
*oldmem,
               prev->flink->blink = prev->blink;
             }
 
+          /* Make sure the new previous node has enough space */
+
+          if (prevsize < takeprev + MM_MIN_CHUNK)
+            {
+              takeprev = prevsize;
+            }
+
           /* Extend the node into the previous free chunk */
 
           newnode = (FAR struct mm_allocnode_s *)
@@ -270,7 +283,6 @@ FAR void *mm_realloc(FAR struct mm_heap_s *heap, FAR void 
*oldmem,
                */
 
               prevsize          -= takeprev;
-              DEBUGASSERT(prevsize >= SIZEOF_MM_FREENODE);
               prev->size         = prevsize | (prev->size & MM_MASK_BIT);
               nodesize          += takeprev;
               newnode->size      = nodesize | MM_ALLOC_BIT | MM_PREVFREE_BIT;
@@ -323,6 +335,13 @@ FAR void *mm_realloc(FAR struct mm_heap_s *heap, FAR void 
*oldmem,
               next->flink->blink = next->blink;
             }
 
+          /* Make sure the new next node has enough space */
+
+          if (nextsize < takenext + MM_MIN_CHUNK)
+            {
+              takenext = nextsize;
+            }
+
           /* Extend the node into the next chunk */
 
           nodesize += takenext;
@@ -339,7 +358,6 @@ FAR void *mm_realloc(FAR struct mm_heap_s *heap, FAR void 
*oldmem,
               newnode              = (FAR struct mm_freenode_s *)
                                      ((FAR char *)oldnode + nodesize);
               newnode->size        = nextsize - takenext;
-              DEBUGASSERT(newnode->size >= SIZEOF_MM_FREENODE);
               andbeyond->preceding = newnode->size;
 
               /* Add the new free node to the nodelist (with the new size) */
diff --git a/mm/mm_heap/mm_shrinkchunk.c b/mm/mm_heap/mm_shrinkchunk.c
index 9360f9ab61..ce2cf16913 100644
--- a/mm/mm_heap/mm_shrinkchunk.c
+++ b/mm/mm_heap/mm_shrinkchunk.c
@@ -106,7 +106,7 @@ void mm_shrinkchunk(FAR struct mm_heap_s *heap,
    * chunk to be shrunk.
    */
 
-  else if (nodesize >= size + SIZEOF_MM_FREENODE)
+  else if (nodesize >= size + MM_MIN_CHUNK)
     {
       FAR struct mm_freenode_s *newnode;
 
diff --git a/mm/mm_heap/mm_size2ndx.c b/mm/mm_heap/mm_size2ndx.c
index 73dad34cc5..7e022f80ec 100644
--- a/mm/mm_heap/mm_size2ndx.c
+++ b/mm/mm_heap/mm_size2ndx.c
@@ -44,6 +44,7 @@
 
 int mm_size2ndx(size_t size)
 {
+  DEBUGASSERT(size >= MM_MIN_CHUNK);
   if (size >= MM_MAX_CHUNK)
     {
       return MM_NNODES - 1;

Reply via email to