Add the flag to enable reader-writer concurrency during
run time. The rte_hash_del_xxx APIs do not free the keystore
element when this flag is enabled. Hence a new API,
rte_hash_free_key_with_position, to free the key store element
is added.

Signed-off-by: Honnappa Nagarahalli <honnappa.nagaraha...@arm.com>
Reviewed-by: Gavin Hu <gavin...@arm.com>
Reviewed-by: Ola Liljedahl <ola.liljed...@arm.com>
Reviewed-by: Steve Capper <steve.cap...@arm.com>
---
 lib/librte_hash/rte_cuckoo_hash.c    | 105 ++++++++++++++++++++++++++---------
 lib/librte_hash/rte_cuckoo_hash.h    |   2 +
 lib/librte_hash/rte_hash.h           |  55 ++++++++++++++++++
 lib/librte_hash/rte_hash_version.map |   7 +++
 4 files changed, 142 insertions(+), 27 deletions(-)

diff --git a/lib/librte_hash/rte_cuckoo_hash.c 
b/lib/librte_hash/rte_cuckoo_hash.c
index 1e4a8d4..bf51a73 100644
--- a/lib/librte_hash/rte_cuckoo_hash.c
+++ b/lib/librte_hash/rte_cuckoo_hash.c
@@ -93,6 +93,7 @@ rte_hash_create(const struct rte_hash_parameters *params)
        unsigned i;
        unsigned int hw_trans_mem_support = 0, multi_writer_support = 0;
        unsigned int readwrite_concur_support = 0;
+       unsigned int readwrite_concur_lf_support = 0;
 
        rte_hash_function default_hash_func = (rte_hash_function)rte_jhash;
 
@@ -124,6 +125,9 @@ rte_hash_create(const struct rte_hash_parameters *params)
                multi_writer_support = 1;
        }
 
+       if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY_LF)
+               readwrite_concur_lf_support = 1;
+
        /* Store all keys and leave the first entry as a dummy entry for 
lookup_bulk */
        if (multi_writer_support)
                /*
@@ -272,6 +276,7 @@ rte_hash_create(const struct rte_hash_parameters *params)
        h->hw_trans_mem_support = hw_trans_mem_support;
        h->multi_writer_support = multi_writer_support;
        h->readwrite_concur_support = readwrite_concur_support;
+       h->readwrite_concur_lf_support = readwrite_concur_lf_support;
 
 #if defined(RTE_ARCH_X86)
        if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
@@ -647,19 +652,21 @@ rte_hash_cuckoo_move_insert_mw(struct rte_hash *h,
                        return -1;
                }
 
-               /* Inform the previous move. The current move need
-                * not be informed now as the current bucket entry
-                * is present in both primary and secondary.
-                * Since there is one writer, load acquires on
-                * tbl_chng_cnt are not required.
-                */
-               __atomic_store_n(&h->tbl_chng_cnt,
-                                h->tbl_chng_cnt + 1,
-                                __ATOMIC_RELEASE);
-               /* The stores to sig_alt and sig_current should not
-                * move above the store to tbl_chng_cnt.
-                */
-               __atomic_thread_fence(__ATOMIC_RELEASE);
+               if (h->readwrite_concur_lf_support) {
+                       /* Inform the previous move. The current move need
+                        * not be informed now as the current bucket entry
+                        * is present in both primary and secondary.
+                        * Since there is one writer, load acquires on
+                        * tbl_chng_cnt are not required.
+                        */
+                       __atomic_store_n(&h->tbl_chng_cnt,
+                                        h->tbl_chng_cnt + 1,
+                                        __ATOMIC_RELEASE);
+                       /* The stores to sig_alt and sig_current should not
+                        * move above the store to tbl_chng_cnt.
+                        */
+                       __atomic_thread_fence(__ATOMIC_RELEASE);
+               }
 
                /* Need to swap current/alt sig to allow later
                 * Cuckoo insert to move elements back to its
@@ -679,19 +686,21 @@ rte_hash_cuckoo_move_insert_mw(struct rte_hash *h,
                curr_bkt = curr_node->bkt;
        }
 
-       /* Inform the previous move. The current move need
-        * not be informed now as the current bucket entry
-        * is present in both primary and secondary.
-        * Since there is one writer, load acquires on
-        * tbl_chng_cnt are not required.
-        */
-       __atomic_store_n(&h->tbl_chng_cnt,
-                        h->tbl_chng_cnt + 1,
-                        __ATOMIC_RELEASE);
-       /* The stores to sig_alt and sig_current should not
-        * move above the store to tbl_chng_cnt.
-        */
-       __atomic_thread_fence(__ATOMIC_RELEASE);
+       if (h->readwrite_concur_lf_support) {
+               /* Inform the previous move. The current move need
+                * not be informed now as the current bucket entry
+                * is present in both primary and secondary.
+                * Since there is one writer, load acquires on
+                * tbl_chng_cnt are not required.
+                */
+               __atomic_store_n(&h->tbl_chng_cnt,
+                                h->tbl_chng_cnt + 1,
+                                __ATOMIC_RELEASE);
+               /* The stores to sig_alt and sig_current should not
+                * move above the store to tbl_chng_cnt.
+                */
+               __atomic_thread_fence(__ATOMIC_RELEASE);
+       }
 
        curr_bkt->sig_current[curr_slot] = sig;
        curr_bkt->sig_alt[curr_slot] = alt_hash;
@@ -1090,7 +1099,12 @@ search_and_remove(const struct rte_hash *h, const void 
*key,
                        k = (struct rte_hash_key *) ((char *)keys +
                                        key_idx * h->key_entry_size);
                        if (rte_hash_cmp_eq(key, k->key, h) == 0) {
-                               remove_entry(h, bkt, i);
+                               /* Do not free the key store element if
+                                * lock-free concurrency is enabled as
+                                * readers might still be using it.
+                                */
+                               if (!h->readwrite_concur_lf_support)
+                                       remove_entry(h, bkt, i);
 
                                __atomic_store_n(&bkt->key_idx[i],
                                                 EMPTY_SLOT,
@@ -1177,6 +1191,43 @@ rte_hash_get_key_with_position(const struct rte_hash *h, 
const int32_t position,
        return 0;
 }
 
+int __rte_experimental
+rte_hash_free_key_with_position(const struct rte_hash *h,
+                               const int32_t position)
+{
+       RETURN_IF_TRUE(((h == NULL) || (position == EMPTY_SLOT)), -EINVAL);
+
+       unsigned int lcore_id, n_slots;
+       struct lcore_cache *cached_free_slots;
+       const int32_t total_entries = h->num_buckets * RTE_HASH_BUCKET_ENTRIES;
+
+       /* Out of bounds */
+       if (position >= total_entries)
+               return -EINVAL;
+
+       if (h->multi_writer_support) {
+               lcore_id = rte_lcore_id();
+               cached_free_slots = &h->local_free_slots[lcore_id];
+               /* Cache full, need to free it. */
+               if (cached_free_slots->len == LCORE_CACHE_SIZE) {
+                       /* Need to enqueue the free slots in global ring. */
+                       n_slots = rte_ring_mp_enqueue_burst(h->free_slots,
+                                               cached_free_slots->objs,
+                                               LCORE_CACHE_SIZE, NULL);
+                       cached_free_slots->len -= n_slots;
+               }
+               /* Put index of new free slot in cache. */
+               cached_free_slots->objs[cached_free_slots->len] =
+                               (void *)((uintptr_t)position);
+               cached_free_slots->len++;
+       } else {
+               rte_ring_sp_enqueue(h->free_slots,
+                               (void *)((uintptr_t)position));
+       }
+
+       return 0;
+}
+
 static inline void
 compare_signatures(uint32_t *prim_hash_matches, uint32_t *sec_hash_matches,
                        const struct rte_hash_bucket *prim_bkt,
diff --git a/lib/librte_hash/rte_cuckoo_hash.h 
b/lib/librte_hash/rte_cuckoo_hash.h
index 5ce864c..08d67b8 100644
--- a/lib/librte_hash/rte_cuckoo_hash.h
+++ b/lib/librte_hash/rte_cuckoo_hash.h
@@ -168,6 +168,8 @@ struct rte_hash {
        /**< If multi-writer support is enabled. */
        uint8_t readwrite_concur_support;
        /**< If read-write concurrency support is enabled */
+       uint8_t readwrite_concur_lf_support;
+       /**< If read-write concurrency lock free support is enabled */
        rte_hash_function hash_func;    /**< Function used to calculate hash. */
        uint32_t hash_func_init_val;    /**< Init value used by hash_func. */
        rte_hash_cmp_eq_t rte_hash_custom_cmp_eq;
diff --git a/lib/librte_hash/rte_hash.h b/lib/librte_hash/rte_hash.h
index 05e024b..7d7372e 100644
--- a/lib/librte_hash/rte_hash.h
+++ b/lib/librte_hash/rte_hash.h
@@ -14,6 +14,8 @@
 #include <stdint.h>
 #include <stddef.h>
 
+#include <rte_compat.h>
+
 #ifdef __cplusplus
 extern "C" {
 #endif
@@ -37,6 +39,9 @@ extern "C" {
 /** Flag to support reader writer concurrency */
 #define RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY 0x04
 
+/** Flag to support lock free reader writer concurrency */
+#define RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY_LF 0x08
+
 /** Signature of key that is stored internally. */
 typedef uint32_t hash_sig_t;
 
@@ -143,6 +148,11 @@ rte_hash_count(const struct rte_hash *h);
  * and should only be called from one thread by default.
  * Thread safety can be enabled by setting flag during
  * table creation.
+ * When lock free reader writer concurrency is enabled,
+ * if this API is called to update an existing entry,
+ * the application should free any memory allocated for
+ * previous 'data' only after all the readers have stopped
+ * using previous 'data'.
  *
  * @param h
  *   Hash table to add the key to.
@@ -165,6 +175,11 @@ rte_hash_add_key_data(struct rte_hash *h, const void *key, 
void *data);
  * and should only be called from one thread by default.
  * Thread safety can be enabled by setting flag during
  * table creation.
+ * When lock free reader writer concurrency is enabled,
+ * if this API is called to update an existing entry,
+ * the application should free any memory allocated for
+ * previous 'data' only after all the readers have stopped
+ * using previous 'data'.
  *
  * @param h
  *   Hash table to add the key to.
@@ -230,6 +245,12 @@ rte_hash_add_key_with_hash(struct rte_hash *h, const void 
*key, hash_sig_t sig);
  * and should only be called from one thread by default.
  * Thread safety can be enabled by setting flag during
  * table creation.
+ * If lock free reader writer concurrency is enabled,
+ * the hash library's internal memory for the deleted
+ * key is not freed. It should be freed by calling
+ * rte_hash_free_key_with_position API after all
+ * the readers have stopped using the hash entry
+ * corresponding to this key.
  *
  * @param h
  *   Hash table to remove the key from.
@@ -241,6 +262,8 @@ rte_hash_add_key_with_hash(struct rte_hash *h, const void 
*key, hash_sig_t sig);
  *   - A positive value that can be used by the caller as an offset into an
  *     array of user data. This value is unique for this key, and is the same
  *     value that was returned when the key was added.
+ *     When lock free concurrency is enabled, this value should be used
+ *     while calling the rte_hash_free_key_with_position API.
  */
 int32_t
 rte_hash_del_key(const struct rte_hash *h, const void *key);
@@ -251,6 +274,12 @@ rte_hash_del_key(const struct rte_hash *h, const void 
*key);
  * and should only be called from one thread by default.
  * Thread safety can be enabled by setting flag during
  * table creation.
+ * If lock free reader writer concurrency is enabled,
+ * the hash library's internal memory for the deleted
+ * key is not freed. It should be freed by calling
+ * rte_hash_free_key_with_position API after all
+ * the readers have stopped using the hash entry
+ * corresponding to this key.
  *
  * @param h
  *   Hash table to remove the key from.
@@ -264,6 +293,8 @@ rte_hash_del_key(const struct rte_hash *h, const void *key);
  *   - A positive value that can be used by the caller as an offset into an
  *     array of user data. This value is unique for this key, and is the same
  *     value that was returned when the key was added.
+ *     When lock free concurrency is enabled, this value should be used
+ *     while calling the rte_hash_free_key_with_position API.
  */
 int32_t
 rte_hash_del_key_with_hash(const struct rte_hash *h, const void *key, 
hash_sig_t sig);
@@ -290,6 +321,30 @@ rte_hash_get_key_with_position(const struct rte_hash *h, 
const int32_t position,
                               void **key);
 
 /**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Free hash library's internal memory given the position
+ * of the key. This operation is not multi-thread safe and should
+ * only be called from one thread by default. Thread safety
+ * can be enabled by setting flag during table creation.
+ * If lock free reader writer concurrency is enabled,
+ * the hash library's internal memory must be freed using this API
+ * after the key is deleted using rte_hash_del_key_xxx API.
+ *
+ * @param h
+ *   Hash table to get the key from.
+ * @param position
+ *   Position returned when the key was deleted.
+ * @return
+ *   - 0 if freed successfully
+ *   - -EINVAL if the parameters are invalid.
+ */
+int __rte_experimental
+rte_hash_free_key_with_position(const struct rte_hash *h,
+                               const int32_t position);
+
+/**
  * Find a key-value pair in the hash table.
  * This operation is multi-thread safe with regarding to other lookup threads.
  * Read-write concurrency can be enabled by setting flag during
diff --git a/lib/librte_hash/rte_hash_version.map 
b/lib/librte_hash/rte_hash_version.map
index e216ac8..734ae28 100644
--- a/lib/librte_hash/rte_hash_version.map
+++ b/lib/librte_hash/rte_hash_version.map
@@ -53,3 +53,10 @@ DPDK_18.08 {
        rte_hash_count;
 
 } DPDK_16.07;
+
+EXPERIMENTAL {
+       global:
+
+       rte_hash_free_key_with_position;
+
+};
-- 
2.7.4

Reply via email to