On 2022-10-05 06:09, datshan wrote:
From: Chengwen Feng <fengcheng...@huawei.com>

This patch supports backup memory mechanism, the memarea could use
another memarea as a backup.

Maybe it's worth mentioning what backup means already here.

"This patch adds a memarea backup mechanism, where an allocation request which cannot be met by a certain memarea is deferred to its backup memarea."

I assume they can be nested indefinitely?


Signed-off-by: Chengwen Feng <fengcheng...@huawei.com>
---
  doc/guides/prog_guide/memarea_lib.rst |  3 +++
  lib/memarea/memarea_private.h         |  2 ++
  lib/memarea/rte_memarea.c             | 22 ++++++++++++++++++++++
  lib/memarea/rte_memarea.h             |  7 +++++++
  4 files changed, 34 insertions(+)

diff --git a/doc/guides/prog_guide/memarea_lib.rst 
b/doc/guides/prog_guide/memarea_lib.rst
index c77012fe44..842d35f77a 100644
--- a/doc/guides/prog_guide/memarea_lib.rst
+++ b/doc/guides/prog_guide/memarea_lib.rst
@@ -25,6 +25,9 @@ The main features are as follows:
* It supports MT-safe as long as it's specified at creation time. +* It provides backup memory mechanism, the memarea could use another memarea
+  as a backup.
+
  Library API Overview
  --------------------
diff --git a/lib/memarea/memarea_private.h b/lib/memarea/memarea_private.h
index 98406879b9..08735ca81f 100644
--- a/lib/memarea/memarea_private.h
+++ b/lib/memarea/memarea_private.h
@@ -23,11 +23,13 @@ struct rte_memarea {
        struct rte_memarea_param init;
        rte_spinlock_t           lock;
        void                    *area_addr;
+       void                    *top_addr;
        struct memarea_elem_list elem_list;
        struct memarea_elem_list free_list;
uint64_t alloc_fails;
        uint64_t refcnt_check_fails;
+       uint64_t bak_alloc_fails;
  } __rte_cache_aligned;
#endif /* MEMAREA_PRIVATE_H */
diff --git a/lib/memarea/rte_memarea.c b/lib/memarea/rte_memarea.c
index b70830d0bb..f45191aa7f 100644
--- a/lib/memarea/rte_memarea.c
+++ b/lib/memarea/rte_memarea.c
@@ -132,6 +132,7 @@ rte_memarea_create(const struct rte_memarea_param *init)
        TAILQ_INIT(&ma->elem_list);
        TAILQ_INIT(&ma->free_list);
        ma->area_addr = addr;
+       ma->top_addr = (void *)((uintptr_t)addr + init->total_sz - 1);

RTE_PTR_ADD()

        elem = addr;
        elem->size = init->total_sz - sizeof(struct memarea_elem);
        elem->cookie = MEMAREA_FREE_ELEM_COOKIE;
@@ -200,6 +201,15 @@ memarea_add_node(struct rte_memarea *ma, struct 
memarea_elem *elem, size_t need_
        elem->size = align_size;
  }
+static inline void *
+memarea_alloc_backup(struct rte_memarea *ma, size_t size, uint32_t cookie)
+{
+       void *ptr = rte_memarea_alloc(ma->init.bak_memarea, size, cookie);
+       if (unlikely(ptr == NULL))
+               ma->bak_alloc_fails++;
+       return ptr;
+}
+
  void *
  rte_memarea_alloc(struct rte_memarea *ma, size_t size, uint32_t cookie)
  {
@@ -221,6 +231,8 @@ rte_memarea_alloc(struct rte_memarea *ma, size_t size, 
uint32_t cookie)
                ptr = (void *)((uintptr_t)elem + sizeof(struct memarea_elem));
                break;
        }
+       if (ptr == NULL && ma->init.bak_memarea != NULL)

Maybe you want an unlikely() around the above, too. I assume using the backup area is an exceptional case.

+               ptr = memarea_alloc_backup(ma, size, cookie);
        if (unlikely(ptr == NULL))
                ma->alloc_fails++;
        memarea_unlock(ma);
@@ -283,6 +295,12 @@ rte_memarea_update_refcnt(struct rte_memarea *ma, void 
*ptr, int16_t value)
                return;
memarea_lock(ma);
+       if (ptr < ma->area_addr || ptr > ma->top_addr) {
+               rte_memarea_update_refcnt(ma->init.bak_memarea, ptr, value);
+               memarea_unlock(ma);
+               return;
+       }
+
        if (unlikely(elem->refcnt <= 0 || elem->refcnt + value < 0)) {
                RTE_LOG(ERR, MEMAREA,
                        "memarea: %s cookie: 0x%x curr refcnt: %d update refcnt: %d 
check fail!\n",
@@ -373,10 +391,14 @@ rte_memarea_dump(struct rte_memarea *ma, FILE *f, bool 
dump_all)
        fprintf(f, "  algorithm: %s\n", memarea_alg_name(ma->init.alg));
        fprintf(f, "  total-size: 0x%zx\n", ma->init.total_sz);
        fprintf(f, "  mt-safe: %s\n", ma->init.mt_safe ? "yes" : "no");
+       if (ma->init.bak_memarea)
+               fprintf(f, "  backup-memarea-name: %s\n", 
ma->init.bak_memarea->init.name);
        fprintf(f, "  total-regions: %u\n", memarea_elem_list_num(ma));
        fprintf(f, "  total-free-regions: %u\n", memarea_free_list_num(ma));
        fprintf(f, "  alloc_fails: %" PRIu64 "\n", ma->alloc_fails);
        fprintf(f, "  refcnt_check_fails: %" PRIu64 "\n", 
ma->refcnt_check_fails);
+       if (ma->init.bak_memarea)
+               fprintf(f, "  backup_alloc_fails: %" PRIu64 "\n", 
ma->bak_alloc_fails);
        if (dump_all)
                memarea_dump_all(ma, f);
        memarea_unlock(ma);
diff --git a/lib/memarea/rte_memarea.h b/lib/memarea/rte_memarea.h
index 10b8229c64..348febab7f 100644
--- a/lib/memarea/rte_memarea.h
+++ b/lib/memarea/rte_memarea.h
@@ -39,6 +39,9 @@
   *   specified, all the functions of the memarea API are lock-free, and assume
   *   to not be invoked in parallel on different logical cores to work on the
   *   same memarea.
+ * - It provides backup memory mechanism, the memarea could use another memarea
+ *   as a backup. It will attempts to allocate object from backup memarea when
+ *   the current memarea failed to allocate.
   */
#include <stdbool.h>
@@ -105,6 +108,10 @@ struct rte_memarea_param {
                 */
                struct rte_memarea *user_memarea;
        };
+       /** Backup memarea, which is used to handle the scenario where the
+        * current memarea allocation failure.
+        */
+       struct rte_memarea *bak_memarea;
  };
/**

Reply via email to