Until now, the objects stored in a mempool were internally stored in a
ring. This patch introduces the possibility to register external handlers
replacing the ring.

The default behavior remains unchanged, but calling the new function
rte_mempool_set_handler() right after rte_mempool_create_empty() allows
the user to change the handler that will be used when populating
the mempool.

Signed-off-by: Olivier Matz <olivier.matz at 6wind.com>
Signed-off-by: David Hunt <david.hunt at intel.com>
---
 lib/librte_mempool/Makefile          |   1 +
 lib/librte_mempool/rte_mempool.c     |  71 ++++-------
 lib/librte_mempool/rte_mempool.h     | 240 ++++++++++++++++++++++++++++++++---
 lib/librte_mempool/rte_mempool_ops.c | 141 ++++++++++++++++++++
 4 files changed, 389 insertions(+), 64 deletions(-)
 create mode 100644 lib/librte_mempool/rte_mempool_ops.c

diff --git a/lib/librte_mempool/Makefile b/lib/librte_mempool/Makefile
index 43423e0..4cbf772 100644
--- a/lib/librte_mempool/Makefile
+++ b/lib/librte_mempool/Makefile
@@ -42,6 +42,7 @@ LIBABIVER := 2

 # all source are stored in SRCS-y
 SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) +=  rte_mempool.c
+SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) +=  rte_mempool_ops.c
 # install includes
 SYMLINK-$(CONFIG_RTE_LIBRTE_MEMPOOL)-include := rte_mempool.h

diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c
index b54de43..1c61c57 100644
--- a/lib/librte_mempool/rte_mempool.c
+++ b/lib/librte_mempool/rte_mempool.c
@@ -148,7 +148,7 @@ mempool_add_elem(struct rte_mempool *mp, void *obj, 
phys_addr_t physaddr)
 #endif

        /* enqueue in ring */
-       rte_ring_sp_enqueue(mp->ring, obj);
+       rte_mempool_ops_enqueue_bulk(mp, &obj, 1);
 }

 /* call obj_cb() for each mempool element */
@@ -303,40 +303,6 @@ rte_mempool_xmem_usage(__rte_unused void *vaddr, uint32_t 
elt_num,
        return (size_t)paddr_idx << pg_shift;
 }

-/* create the internal ring */
-static int
-rte_mempool_ring_create(struct rte_mempool *mp)
-{
-       int rg_flags = 0, ret;
-       char rg_name[RTE_RING_NAMESIZE];
-       struct rte_ring *r;
-
-       ret = snprintf(rg_name, sizeof(rg_name),
-               RTE_MEMPOOL_MZ_FORMAT, mp->name);
-       if (ret < 0 || ret >= (int)sizeof(rg_name))
-               return -ENAMETOOLONG;
-
-       /* ring flags */
-       if (mp->flags & MEMPOOL_F_SP_PUT)
-               rg_flags |= RING_F_SP_ENQ;
-       if (mp->flags & MEMPOOL_F_SC_GET)
-               rg_flags |= RING_F_SC_DEQ;
-
-       /* Allocate the ring that will be used to store objects.
-        * Ring functions will return appropriate errors if we are
-        * running as a secondary process etc., so no checks made
-        * in this function for that condition.
-        */
-       r = rte_ring_create(rg_name, rte_align32pow2(mp->size + 1),
-               mp->socket_id, rg_flags);
-       if (r == NULL)
-               return -rte_errno;
-
-       mp->ring = r;
-       mp->flags |= MEMPOOL_F_RING_CREATED;
-       return 0;
-}
-
 /* free a memchunk allocated with rte_memzone_reserve() */
 static void
 rte_mempool_memchunk_mz_free(__rte_unused struct rte_mempool_memhdr *memhdr,
@@ -354,7 +320,7 @@ rte_mempool_free_memchunks(struct rte_mempool *mp)
        void *elt;

        while (!STAILQ_EMPTY(&mp->elt_list)) {
-               rte_ring_sc_dequeue(mp->ring, &elt);
+               rte_mempool_ops_dequeue_bulk(mp, &elt, 1);
                (void)elt;
                STAILQ_REMOVE_HEAD(&mp->elt_list, next);
                mp->populated_size--;
@@ -383,13 +349,16 @@ rte_mempool_populate_phys(struct rte_mempool *mp, char 
*vaddr,
        unsigned i = 0;
        size_t off;
        struct rte_mempool_memhdr *memhdr;
-       int ret;

        /* create the internal ring if not already done */
        if ((mp->flags & MEMPOOL_F_RING_CREATED) == 0) {
-               ret = rte_mempool_ring_create(mp);
-               if (ret < 0)
-                       return ret;
+               rte_errno = 0;
+               mp->pool_data = rte_mempool_ops_alloc(mp);
+               if (mp->pool_data == NULL) {
+                       if (rte_errno == 0)
+                               return -EINVAL;
+                       return -rte_errno;
+               }
        }

        /* mempool is already populated */
@@ -703,7 +672,7 @@ rte_mempool_free(struct rte_mempool *mp)
        rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);

        rte_mempool_free_memchunks(mp);
-       rte_ring_free(mp->ring);
+       rte_mempool_ops_free(mp);
        rte_memzone_free(mp->mz);
 }

@@ -815,6 +784,20 @@ rte_mempool_create_empty(const char *name, unsigned n, 
unsigned elt_size,
                RTE_PTR_ADD(mp, MEMPOOL_HEADER_SIZE(mp, 0));

        te->data = mp;
+
+       /*
+        * Since we have 4 combinations of the SP/SC/MP/MC examine the flags to
+        * set the correct index into the table of ops structs.
+        */
+       if (flags & (MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET))
+               rte_mempool_set_ops_byname(mp, "ring_sp_sc");
+       else if (flags & MEMPOOL_F_SP_PUT)
+               rte_mempool_set_ops_byname(mp, "ring_sp_mc");
+       else if (flags & MEMPOOL_F_SC_GET)
+               rte_mempool_set_ops_byname(mp, "ring_mp_sc");
+       else
+               rte_mempool_set_ops_byname(mp, "ring_mp_mc");
+
        rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
        TAILQ_INSERT_TAIL(mempool_list, te, next);
        rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
@@ -930,7 +913,7 @@ rte_mempool_count(const struct rte_mempool *mp)
        unsigned count;
        unsigned lcore_id;

-       count = rte_ring_count(mp->ring);
+       count = rte_mempool_ops_get_count(mp);

        if (mp->cache_size == 0)
                return count;
@@ -1123,7 +1106,7 @@ rte_mempool_dump(FILE *f, struct rte_mempool *mp)

        fprintf(f, "mempool <%s>@%p\n", mp->name, mp);
        fprintf(f, "  flags=%x\n", mp->flags);
-       fprintf(f, "  ring=<%s>@%p\n", mp->ring->name, mp->ring);
+       fprintf(f, "  pool=%p\n", mp->pool_data);
        fprintf(f, "  phys_addr=0x%" PRIx64 "\n", mp->mz->phys_addr);
        fprintf(f, "  nb_mem_chunks=%u\n", mp->nb_mem_chunks);
        fprintf(f, "  size=%"PRIu32"\n", mp->size);
@@ -1144,7 +1127,7 @@ rte_mempool_dump(FILE *f, struct rte_mempool *mp)
        }

        cache_count = rte_mempool_dump_cache(f, mp);
-       common_count = rte_ring_count(mp->ring);
+       common_count = rte_mempool_ops_get_count(mp);
        if ((cache_count + common_count) > mp->size)
                common_count = mp->size - cache_count;
        fprintf(f, "  common_pool_count=%u\n", common_count);
diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h
index 60339bd..a6b28b0 100644
--- a/lib/librte_mempool/rte_mempool.h
+++ b/lib/librte_mempool/rte_mempool.h
@@ -67,6 +67,7 @@
 #include <inttypes.h>
 #include <sys/queue.h>

+#include <rte_spinlock.h>
 #include <rte_log.h>
 #include <rte_debug.h>
 #include <rte_lcore.h>
@@ -204,9 +205,13 @@ struct rte_mempool_memhdr {
 struct rte_mempool {
        char name[RTE_MEMPOOL_NAMESIZE]; /**< Name of mempool. */
        struct rte_ring *ring;           /**< Ring to store objects. */
+       union {
+               void *pool_data;         /**< Ring or pool to store objects */
+               uint64_t pool_id;        /**< External mempool identifier */
+       };
        const struct rte_memzone *mz;    /**< Memzone where pool is allocated */
        int flags;                       /**< Flags of the mempool. */
-       int socket_id;                   /**< Socket id passed at mempool 
creation. */
+       int socket_id;                   /**< Socket id passed at create */
        uint32_t size;                   /**< Max size of the mempool. */
        uint32_t cache_size;             /**< Size of per-lcore local cache. */
        uint32_t cache_flushthresh;
@@ -217,6 +222,14 @@ struct rte_mempool {
        uint32_t trailer_size;           /**< Size of trailer (after elt). */

        unsigned private_data_size;      /**< Size of private data. */
+       /**
+        * Index into rte_mempool_ops_table array of mempool ops
+        * structs, which contain callback function pointers.
+        * We're using an index here rather than pointers to the callbacks
+        * to facilitate any secondary processes that may want to use
+        * this mempool.
+        */
+       int32_t ops_index;

        struct rte_mempool_cache *local_cache; /**< Per-lcore local cache */

@@ -325,6 +338,204 @@ void rte_mempool_check_cookies(const struct rte_mempool 
*mp,
 #define __mempool_check_cookies(mp, obj_table_const, n, free) do {} while(0)
 #endif /* RTE_LIBRTE_MEMPOOL_DEBUG */

+#define RTE_MEMPOOL_OPS_NAMESIZE 32 /**< Max length of ops struct name. */
+
+/**
+ * prototype for implementation specific data provisioning function
+ * The function should provide the implementation specific memory for
+ * for use by the other mempool ops functions in a given mempool ops struct.
+ * E.g. the default ops provides an instance of the rte_ring for this purpose.
+ * it will mostlikely point to a different type of data structure, and
+ * will be transparent to the application programmer.
+ * The function should also not touch the given *mp instance.
+ */
+typedef void *(*rte_mempool_alloc_t)(const struct rte_mempool *mp);
+
+/** Free the opaque private data pointed to by mp->pool_data pointer */
+typedef void (*rte_mempool_free_t)(void *p);
+
+/**
+ * Put an object in the external pool.
+ * The *p pointer is the opaque data for a given mempool manager (ring,
+ * array, linked list, etc)
+ */
+typedef int (*rte_mempool_put_t)(void *p,
+               void * const *obj_table, unsigned int n);
+
+/** Get an object from the external pool. */
+typedef int (*rte_mempool_get_t)(void *p,
+               void **obj_table, unsigned int n);
+
+/** Return the number of available objects in the external pool. */
+typedef unsigned (*rte_mempool_get_count)(void *p);
+
+/** Structure defining mempool operations structure */
+struct rte_mempool_ops {
+       char name[RTE_MEMPOOL_OPS_NAMESIZE]; /**< Name of mempool ops struct */
+       rte_mempool_alloc_t alloc;       /**< Allocate private data */
+       rte_mempool_free_t free;         /**< Free the external pool. */
+       rte_mempool_put_t put;           /**< Put an object. */
+       rte_mempool_get_t get;           /**< Get an object. */
+       rte_mempool_get_count get_count; /**< Get the number of available objs. 
*/
+} __rte_cache_aligned;
+
+#define RTE_MEMPOOL_MAX_OPS_IDX 16  /**< Max registered ops structs */
+
+/**
+ * Structure storing the table of registered ops structs, each of which contain
+ * the function pointers for the mempool ops functions.
+ * Each process has it's own storage for this ops struct aray so that
+ * the mempools can be shared across primary and secondary processes.
+ * The indices used to access the array are valid across processes, whereas
+ * any function pointers stored directly in the mempool struct would not be.
+ * This results in us simply having "ops_index" in the mempool struct.
+ */
+struct rte_mempool_ops_table {
+       rte_spinlock_t sl;     /**< Spinlock for add/delete. */
+       uint32_t num_ops;      /**< Number of used ops structs in the table. */
+       /**
+        * Storage for all possible ops structs.
+        */
+       struct rte_mempool_ops ops[RTE_MEMPOOL_MAX_OPS_IDX];
+} __rte_cache_aligned;
+
+/** Array of registered ops structs */
+extern struct rte_mempool_ops_table rte_mempool_ops_table;
+
+/**
+ * @internal Get the mempool ops struct from its index.
+ *
+ * @param ops_index
+ *   The index of the ops struct in the ops struct table. It must be a valid
+ *   index: (0 <= idx < num_ops).
+ * @return
+ *   The pointer to the ops struct in the table.
+ */
+static inline struct rte_mempool_ops *
+rte_mempool_ops_get(int ops_index)
+{
+       return &rte_mempool_ops_table.ops[ops_index];
+}
+
+/**
+ * @internal wrapper for external mempool manager alloc callback.
+ *
+ * @param mp
+ *   Pointer to the memory pool.
+ * @return
+ *   The opaque pointer to the external pool.
+ */
+void *
+rte_mempool_ops_alloc(const struct rte_mempool *mp);
+
+/**
+ * @internal wrapper for external mempool manager get callback.
+ *
+ * @param mp
+ *   Pointer to the memory pool.
+ * @param obj_table
+ *   Pointer to a table of void * pointers (objects).
+ * @param n
+ *   Number of objects to get.
+ * @return
+ *   - 0: Success; got n objects.
+ *   - <0: Error; code of get function.
+ */
+static inline int
+rte_mempool_ops_dequeue_bulk(struct rte_mempool *mp,
+               void **obj_table, unsigned n)
+{
+       struct rte_mempool_ops *ops;
+
+       ops = rte_mempool_ops_get(mp->ops_index);
+       return ops->get(mp->pool_data, obj_table, n);
+}
+
+/**
+ * @internal wrapper for external mempool manager put callback.
+ *
+ * @param mp
+ *   Pointer to the memory pool.
+ * @param obj_table
+ *   Pointer to a table of void * pointers (objects).
+ * @param n
+ *   Number of objects to put.
+ * @return
+ *   - 0: Success; n objects supplied.
+ *   - <0: Error; code of put function.
+ */
+static inline int
+rte_mempool_ops_enqueue_bulk(struct rte_mempool *mp, void * const *obj_table,
+               unsigned n)
+{
+       struct rte_mempool_ops *ops;
+
+       ops = rte_mempool_ops_get(mp->ops_index);
+       return ops->put(mp->pool_data, obj_table, n);
+}
+
+/**
+ * @internal wrapper for external mempool manager get_count callback.
+ *
+ * @param mp
+ *   Pointer to the memory pool.
+ * @return
+ *   The number of available objects in the external pool.
+ */
+unsigned
+rte_mempool_ops_get_count(const struct rte_mempool *mp);
+
+/**
+ * @internal wrapper for external mempool manager free callback.
+ *
+ * @param mp
+ *   Pointer to the memory pool.
+ */
+void
+rte_mempool_ops_free(struct rte_mempool *mp);
+
+/**
+ * Set the ops of a mempool
+ *
+ * This can only be done on a mempool that is not populated, i.e. just after
+ * a call to rte_mempool_create_empty().
+ *
+ * @param mp
+ *   Pointer to the memory pool.
+ * @param name
+ *   Name of the ops structure to use for this mempool.
+ * @return
+ *   - 0: Sucess; the mempool is now using the requested ops functions
+ *   - -EINVAL - Invalid ops struct name provided
+ *   - -EEXIST - mempool already has an ops struct assigned
+ */
+int
+rte_mempool_set_ops_byname(struct rte_mempool *mp, const char *name);
+
+/**
+ * Register mempool operations
+ *
+ * @param h
+ *   Pointer to and ops structure to register
+ * @return
+ *   - >=0: Sucess; return the index of the ops struct in the table.
+ *   - -EINVAL - some missing callbacks while registering ops struct
+ *   - -ENOSPC - the maximum number of ops structs has been reached
+ */
+int rte_mempool_ops_register(const struct rte_mempool_ops *h);
+
+/**
+ * Macro to statically register the ops of an external mempool manager
+ * Note that the rte_mempool_ops_register fails silently here when
+ * more then RTE_MEMPOOL_MAX_OPS_IDX is registered.
+ */
+#define MEMPOOL_REGISTER_OPS(h)                                        \
+       void mp_hdlr_init_##h(void);                                    \
+       void __attribute__((constructor, used)) mp_hdlr_init_##h(void)  \
+       {                                                               \
+               rte_mempool_ops_register(&h);                   \
+       }
+
 /**
  * An object callback function for mempool.
  *
@@ -774,7 +985,7 @@ __mempool_put_bulk(struct rte_mempool *mp, void * const 
*obj_table,
        cache->len += n;

        if (cache->len >= flushthresh) {
-               rte_ring_mp_enqueue_bulk(mp->ring, &cache->objs[cache_size],
+               rte_mempool_ops_enqueue_bulk(mp, &cache->objs[cache_size],
                                cache->len - cache_size);
                cache->len = cache_size;
        }
@@ -785,19 +996,10 @@ ring_enqueue:

        /* push remaining objects in ring */
 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
-       if (is_mp) {
-               if (rte_ring_mp_enqueue_bulk(mp->ring, obj_table, n) < 0)
-                       rte_panic("cannot put objects in mempool\n");
-       }
-       else {
-               if (rte_ring_sp_enqueue_bulk(mp->ring, obj_table, n) < 0)
-                       rte_panic("cannot put objects in mempool\n");
-       }
+       if (rte_mempool_ops_enqueue_bulk(mp, obj_table, n) < 0)
+               rte_panic("cannot put objects in mempool\n");
 #else
-       if (is_mp)
-               rte_ring_mp_enqueue_bulk(mp->ring, obj_table, n);
-       else
-               rte_ring_sp_enqueue_bulk(mp->ring, obj_table, n);
+       rte_mempool_ops_enqueue_bulk(mp, obj_table, n);
 #endif
 }

@@ -922,7 +1124,7 @@ rte_mempool_put(struct rte_mempool *mp, void *obj)
  */
 static inline int __attribute__((always_inline))
 __mempool_get_bulk(struct rte_mempool *mp, void **obj_table,
-                  unsigned n, int is_mc)
+                  unsigned int n, int is_mc)
 {
        int ret;
        struct rte_mempool_cache *cache;
@@ -945,7 +1147,8 @@ __mempool_get_bulk(struct rte_mempool *mp, void 
**obj_table,
                uint32_t req = n + (cache_size - cache->len);

                /* How many do we require i.e. number to fill the cache + the 
request */
-               ret = rte_ring_mc_dequeue_bulk(mp->ring, 
&cache->objs[cache->len], req);
+               ret = rte_mempool_ops_dequeue_bulk(mp,
+                       &cache->objs[cache->len], req);
                if (unlikely(ret < 0)) {
                        /*
                         * In the offchance that we are buffer constrained,
@@ -972,10 +1175,7 @@ __mempool_get_bulk(struct rte_mempool *mp, void 
**obj_table,
 ring_dequeue:

        /* get remaining objects from ring */
-       if (is_mc)
-               ret = rte_ring_mc_dequeue_bulk(mp->ring, obj_table, n);
-       else
-               ret = rte_ring_sc_dequeue_bulk(mp->ring, obj_table, n);
+       ret = rte_mempool_ops_dequeue_bulk(mp, obj_table, n);

        if (ret < 0)
                __MEMPOOL_STAT_ADD(mp, get_fail, n);
diff --git a/lib/librte_mempool/rte_mempool_ops.c 
b/lib/librte_mempool/rte_mempool_ops.c
new file mode 100644
index 0000000..ec92a58
--- /dev/null
+++ b/lib/librte_mempool/rte_mempool_ops.c
@@ -0,0 +1,141 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *   Copyright(c) 2016 6WIND S.A.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <string.h>
+
+#include <rte_mempool.h>
+
+/* indirect jump table to support external memory pools */
+struct rte_mempool_ops_table rte_mempool_ops_table = {
+       .sl =  RTE_SPINLOCK_INITIALIZER ,
+       .num_ops = 0
+};
+
+/* add a new ops struct in rte_mempool_ops_table, return its index */
+int
+rte_mempool_ops_register(const struct rte_mempool_ops *h)
+{
+       struct rte_mempool_ops *ops;
+       int16_t ops_index;
+
+       rte_spinlock_lock(&rte_mempool_ops_table.sl);
+
+       if (rte_mempool_ops_table.num_ops >=
+                       RTE_MEMPOOL_MAX_OPS_IDX) {
+               rte_spinlock_unlock(&rte_mempool_ops_table.sl);
+               RTE_LOG(ERR, MEMPOOL,
+                       "Maximum number of mempool ops structs exceeded\n");
+               return -ENOSPC;
+       }
+
+       if (h->put == NULL || h->get == NULL || h->get_count == NULL) {
+               rte_spinlock_unlock(&rte_mempool_ops_table.sl);
+               RTE_LOG(ERR, MEMPOOL,
+                       "Missing callback while registering mempool ops\n");
+               return -EINVAL;
+       }
+
+       ops_index = rte_mempool_ops_table.num_ops++;
+       ops = &rte_mempool_ops_table.ops[ops_index];
+       snprintf(ops->name, sizeof(ops->name), "%s", h->name);
+       ops->alloc = h->alloc;
+       ops->put = h->put;
+       ops->get = h->get;
+       ops->get_count = h->get_count;
+
+       rte_spinlock_unlock(&rte_mempool_ops_table.sl);
+
+       return ops_index;
+}
+
+/* wrapper to allocate an external mempool's private (pool) data */
+void *
+rte_mempool_ops_alloc(const struct rte_mempool *mp)
+{
+       struct rte_mempool_ops *ops;
+
+       ops = rte_mempool_ops_get(mp->ops_index);
+       if (ops->alloc == NULL)
+               return NULL;
+       return ops->alloc(mp);
+}
+
+/* wrapper to free an external pool ops */
+void
+rte_mempool_ops_free(struct rte_mempool *mp)
+{
+       struct rte_mempool_ops *ops;
+
+       ops = rte_mempool_ops_get(mp->ops_index);
+       if (ops->free == NULL)
+               return;
+       return ops->free(mp);
+}
+
+/* wrapper to get available objects in an external mempool */
+unsigned int
+rte_mempool_ops_get_count(const struct rte_mempool *mp)
+{
+       struct rte_mempool_ops *ops;
+
+       ops = rte_mempool_ops_get(mp->ops_index);
+       return ops->get_count(mp->pool_data);
+}
+
+/* sets mempool ops previously registered by rte_mempool_ops_register */
+int
+rte_mempool_set_ops_byname(struct rte_mempool *mp, const char *name)
+{
+       struct rte_mempool_ops *ops = NULL;
+       unsigned i;
+
+       /* too late, the mempool is already populated */
+       if (mp->flags & MEMPOOL_F_RING_CREATED)
+               return -EEXIST;
+
+       for (i = 0; i < rte_mempool_ops_table.num_ops; i++) {
+               if (!strcmp(name,
+                               rte_mempool_ops_table.ops[i].name)) {
+                       ops = &rte_mempool_ops_table.ops[i];
+                       break;
+               }
+       }
+
+       if (ops == NULL)
+               return -EINVAL;
+
+       mp->ops_index = i;
+       return 0;
+}
-- 
2.5.5

Reply via email to