This patch adds a memarea backup mechanism, where an allocation request
which cannot be met by the current memarea is deferred to its backup
memarea.

Signed-off-by: Chengwen Feng <fengcheng...@huawei.com>
---
 doc/guides/prog_guide/memarea_lib.rst |  4 ++++
 lib/memarea/memarea_private.h         |  2 ++
 lib/memarea/rte_memarea.c             | 22 ++++++++++++++++++++++
 lib/memarea/rte_memarea.h             | 11 +++++++++++
 4 files changed, 39 insertions(+)

diff --git a/doc/guides/prog_guide/memarea_lib.rst 
b/doc/guides/prog_guide/memarea_lib.rst
index 720d8099e2..feebafabcc 100644
--- a/doc/guides/prog_guide/memarea_lib.rst
+++ b/doc/guides/prog_guide/memarea_lib.rst
@@ -25,6 +25,10 @@ The main features are as follows:
 
 * It supports MT-safe as long as it's specified at creation time.
 
+* It provides backup memory mechanism, the memarea could use another memarea
+  as a backup. It will attempts to allocate object from backup memarea when
+  the current memarea failed to allocate.
+
 Library API Overview
 --------------------
 
diff --git a/lib/memarea/memarea_private.h b/lib/memarea/memarea_private.h
index 98406879b9..08735ca81f 100644
--- a/lib/memarea/memarea_private.h
+++ b/lib/memarea/memarea_private.h
@@ -23,11 +23,13 @@ struct rte_memarea {
        struct rte_memarea_param init;
        rte_spinlock_t           lock;
        void                    *area_addr;
+       void                    *top_addr;
        struct memarea_elem_list elem_list;
        struct memarea_elem_list free_list;
 
        uint64_t alloc_fails;
        uint64_t refcnt_check_fails;
+       uint64_t bak_alloc_fails;
 } __rte_cache_aligned;
 
 #endif /* MEMAREA_PRIVATE_H */
diff --git a/lib/memarea/rte_memarea.c b/lib/memarea/rte_memarea.c
index bc51d2a5ff..f95f89b6ec 100644
--- a/lib/memarea/rte_memarea.c
+++ b/lib/memarea/rte_memarea.c
@@ -132,6 +132,7 @@ rte_memarea_create(const struct rte_memarea_param *init)
        TAILQ_INIT(&ma->elem_list);
        TAILQ_INIT(&ma->free_list);
        ma->area_addr = addr;
+       ma->top_addr = RTE_PTR_ADD(addr, init->total_sz - 1);
        elem = addr;
        elem->size = init->total_sz - sizeof(struct memarea_elem);
        elem->cookie = MEMAREA_FREE_ELEM_COOKIE;
@@ -198,6 +199,15 @@ memarea_add_node(struct rte_memarea *ma, struct 
memarea_elem *elem, size_t need_
        elem->size = align_size;
 }
 
+static inline void *
+memarea_alloc_backup(struct rte_memarea *ma, size_t size, uint32_t cookie)
+{
+       void *ptr = rte_memarea_alloc(ma->init.bak_memarea, size, cookie);
+       if (unlikely(ptr == NULL))
+               ma->bak_alloc_fails++;
+       return ptr;
+}
+
 void *
 rte_memarea_alloc(struct rte_memarea *ma, size_t size, uint32_t cookie)
 {
@@ -219,6 +229,8 @@ rte_memarea_alloc(struct rte_memarea *ma, size_t size, 
uint32_t cookie)
                ptr = RTE_PTR_ADD(elem, sizeof(struct memarea_elem));
                break;
        }
+       if (unlikely(ptr == NULL && ma->init.bak_memarea != NULL))
+               ptr = memarea_alloc_backup(ma, size, cookie);
        if (unlikely(ptr == NULL))
                ma->alloc_fails++;
        memarea_unlock(ma);
@@ -281,6 +293,12 @@ rte_memarea_update_refcnt(struct rte_memarea *ma, void 
*ptr, int16_t value)
                return;
 
        memarea_lock(ma);
+       if (unlikely(ptr < ma->area_addr || ptr > ma->top_addr)) {
+               rte_memarea_update_refcnt(ma->init.bak_memarea, ptr, value);
+               memarea_unlock(ma);
+               return;
+       }
+
        if (unlikely(elem->refcnt <= 0 || elem->refcnt + value < 0)) {
                RTE_LOG(ERR, MEMAREA,
                        "memarea: %s cookie: 0x%x curr refcnt: %d update 
refcnt: %d check fail!\n",
@@ -371,10 +389,14 @@ rte_memarea_dump(struct rte_memarea *ma, FILE *f, bool 
dump_all)
        fprintf(f, "  algorithm: %s\n", memarea_alg_name(ma->init.alg));
        fprintf(f, "  total-size: 0x%zx\n", ma->init.total_sz);
        fprintf(f, "  mt-safe: %s\n", ma->init.mt_safe ? "yes" : "no");
+       if (ma->init.bak_memarea)
+               fprintf(f, "  backup-memarea-name: %s\n", 
ma->init.bak_memarea->init.name);
        fprintf(f, "  total-regions: %u\n", memarea_elem_list_num(ma));
        fprintf(f, "  total-free-regions: %u\n", memarea_free_list_num(ma));
        fprintf(f, "  alloc_fails: %" PRIu64 "\n", ma->alloc_fails);
        fprintf(f, "  refcnt_check_fails: %" PRIu64 "\n", 
ma->refcnt_check_fails);
+       if (ma->init.bak_memarea)
+               fprintf(f, "  backup_alloc_fails: %" PRIu64 "\n", 
ma->bak_alloc_fails);
        if (dump_all)
                memarea_dump_all(ma, f);
        memarea_unlock(ma);
diff --git a/lib/memarea/rte_memarea.h b/lib/memarea/rte_memarea.h
index 4bf2f36c7c..815d0e3d75 100644
--- a/lib/memarea/rte_memarea.h
+++ b/lib/memarea/rte_memarea.h
@@ -40,6 +40,13 @@
  *   specified, all the functions of the memarea API are lock-free, and assume
  *   to not be invoked in parallel on different logical cores to work on the
  *   same memarea.
+ * - It provides backup memory mechanism, the memarea could use another memarea
+ *   as a backup. It will attempts to allocate object from backup memarea when
+ *   the current memarea failed to allocate.
+ *   @note If the backup memarea is set improperly, loops may occur (e.g.
+ *   memarea-1's backup is memarea-2, and memarea-2's backup is memarea-1) and
+ *   the program will hangs, it is the responsibility of the application to
+ *   ensure that the loops do not form.
  */
 
 #include <stdbool.h>
@@ -106,6 +113,10 @@ struct rte_memarea_param {
                 */
                struct rte_memarea *user_memarea;
        };
+       /** Backup memarea, which is used to handle the scenario where the
+        * current memarea allocation failure.
+        */
+       struct rte_memarea *bak_memarea;
 };
 
 /**
-- 
2.17.1

Reply via email to