Hi Mattias, -----Original Message----- From: Mattias Rönnblom <mattias.ronnb...@ericsson.com> Sent: Wednesday, April 15, 2020 8:00 PM To: Medvedkin, Vladimir <vladimir.medved...@intel.com>; dev@dpdk.org Cc: Ananyev, Konstantin <konstantin.anan...@intel.com>; Wang, Yipeng1 <yipeng1.w...@intel.com>; Gobriel, Sameh <sameh.gobr...@intel.com>; Richardson, Bruce <bruce.richard...@intel.com> Subject: Re: [dpdk-dev] [PATCH v3 1/4] hash: add k32v64 hash library
On 2020-04-15 20:17, Vladimir Medvedkin wrote: > K32V64 hash is a hash table that supports 32 bit keys and 64 bit values. > This table is hash function agnostic so user must provide > precalculated hash signature for add/delete/lookup operations. > > Signed-off-by: Vladimir Medvedkin <vladimir.medved...@intel.com> > --- > lib/Makefile | 2 +- > lib/librte_hash/Makefile | 13 +- > lib/librte_hash/k32v64_hash_avx512vl.c | 56 ++++++ > lib/librte_hash/meson.build | 17 +- > lib/librte_hash/rte_hash_version.map | 6 +- > lib/librte_hash/rte_k32v64_hash.c | 315 > +++++++++++++++++++++++++++++++++ > lib/librte_hash/rte_k32v64_hash.h | 211 ++++++++++++++++++++++ > 7 files changed, 615 insertions(+), 5 deletions(-) > create mode 100644 lib/librte_hash/k32v64_hash_avx512vl.c > create mode 100644 lib/librte_hash/rte_k32v64_hash.c > create mode 100644 lib/librte_hash/rte_k32v64_hash.h > > diff --git a/lib/Makefile b/lib/Makefile index 46b91ae..a8c02e4 100644 > --- a/lib/Makefile > +++ b/lib/Makefile > @@ -48,7 +48,7 @@ DIRS-$(CONFIG_RTE_LIBRTE_VHOST) += librte_vhost > DEPDIRS-librte_vhost := librte_eal librte_mempool librte_mbuf librte_ethdev > \ > librte_net librte_hash librte_cryptodev > DIRS-$(CONFIG_RTE_LIBRTE_HASH) += librte_hash -DEPDIRS-librte_hash > := librte_eal librte_ring > +DEPDIRS-librte_hash := librte_eal librte_ring librte_mempool > DIRS-$(CONFIG_RTE_LIBRTE_EFD) += librte_efd > DEPDIRS-librte_efd := librte_eal librte_ring librte_hash > DIRS-$(CONFIG_RTE_LIBRTE_RIB) += librte_rib diff --git > a/lib/librte_hash/Makefile b/lib/librte_hash/Makefile index > ec9f864..023689d 100644 > --- a/lib/librte_hash/Makefile > +++ b/lib/librte_hash/Makefile > @@ -8,13 +8,14 @@ LIB = librte_hash.a > > CFLAGS += -O3 > CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -LDLIBS += -lrte_eal > -lrte_ring > +LDLIBS += -lrte_eal -lrte_ring -lrte_mempool > > EXPORT_MAP := rte_hash_version.map > > # all source are stored in SRCS-y > SRCS-$(CONFIG_RTE_LIBRTE_HASH) := rte_cuckoo_hash.c > SRCS-$(CONFIG_RTE_LIBRTE_HASH) += rte_fbk_hash.c > +SRCS-$(CONFIG_RTE_LIBRTE_HASH) += rte_k32v64_hash.c > > # install this header file > SYMLINK-$(CONFIG_RTE_LIBRTE_HASH)-include := rte_hash.h @@ -27,5 > +28,15 @@ endif > SYMLINK-$(CONFIG_RTE_LIBRTE_HASH)-include += rte_jhash.h > SYMLINK-$(CONFIG_RTE_LIBRTE_HASH)-include += rte_thash.h > SYMLINK-$(CONFIG_RTE_LIBRTE_HASH)-include += rte_fbk_hash.h > +SYMLINK-$(CONFIG_RTE_LIBRTE_HASH)-include += rte_k32v64_hash.h > + > +CC_AVX512VL_SUPPORT=$(shell $(CC) -mavx512vl -dM -E - </dev/null 2>&1 > +| \ grep -q __AVX512VL__ && echo 1) > + > +ifeq ($(CC_AVX512VL_SUPPORT), 1) > + SRCS-$(CONFIG_RTE_LIBRTE_HASH) += k32v64_hash_avx512vl.c > + CFLAGS_k32v64_hash_avx512vl.o += -mavx512vl > + CFLAGS_rte_k32v64_hash.o += -DCC_AVX512VL_SUPPORT endif > > include $(RTE_SDK)/mk/rte.lib.mk > diff --git a/lib/librte_hash/k32v64_hash_avx512vl.c > b/lib/librte_hash/k32v64_hash_avx512vl.c > new file mode 100644 > index 0000000..7c70dd2 > --- /dev/null > +++ b/lib/librte_hash/k32v64_hash_avx512vl.c > @@ -0,0 +1,56 @@ > +/* SPDX-License-Identifier: BSD-3-Clause > + * Copyright(c) 2020 Intel Corporation */ > + > +#include <rte_k32v64_hash.h> > + > +int > +k32v64_hash_bulk_lookup_avx512vl(struct rte_k32v64_hash_table *table, > + uint32_t *keys, uint32_t *hashes, uint64_t *values, unsigned int n); > + > +static inline int > +k32v64_cmp_keys_avx512vl(struct rte_k32v64_hash_bucket *bucket, uint32_t key, > + uint64_t *val) > +{ > + __m128i keys, srch_key; > + __mmask8 msk; > + > + keys = _mm_load_si128((void *)bucket); > + srch_key = _mm_set1_epi32(key); > + > + msk = _mm_mask_cmpeq_epi32_mask(bucket->key_mask, keys, srch_key); > + if (msk) { > + *val = bucket->val[__builtin_ctz(msk)]; > + return 1; > + } > + > + return 0; > +} > + > +static inline int > +k32v64_hash_lookup_avx512vl(struct rte_k32v64_hash_table *table, uint32_t > key, > + uint32_t hash, uint64_t *value) > +{ > + return __k32v64_hash_lookup(table, key, hash, value, > + k32v64_cmp_keys_avx512vl); > +} > + > +int > +k32v64_hash_bulk_lookup_avx512vl(struct rte_k32v64_hash_table *table, > + uint32_t *keys, uint32_t *hashes, uint64_t *values, unsigned int n) > +{ > + int ret, cnt = 0; > + unsigned int i; > + > + if (unlikely((table == NULL) || (keys == NULL) || (hashes == NULL) || > + (values == NULL))) > + return -EINVAL; > + > + for (i = 0; i < n; i++) { > + ret = k32v64_hash_lookup_avx512vl(table, keys[i], hashes[i], > + &values[i]); > + if (ret == 0) > + cnt++; > + } > + return cnt; > +} > diff --git a/lib/librte_hash/meson.build b/lib/librte_hash/meson.build > index 6ab46ae..8a37cc4 100644 > --- a/lib/librte_hash/meson.build > +++ b/lib/librte_hash/meson.build > @@ -3,10 +3,23 @@ > > headers = files('rte_crc_arm64.h', > 'rte_fbk_hash.h', > + 'rte_k32v64_hash.h', > 'rte_hash_crc.h', > 'rte_hash.h', > 'rte_jhash.h', > 'rte_thash.h') > > -sources = files('rte_cuckoo_hash.c', 'rte_fbk_hash.c') -deps += > ['ring'] > +sources = files('rte_cuckoo_hash.c', 'rte_fbk_hash.c', > +'rte_k32v64_hash.c') deps += ['ring', 'mempool'] > + > +if dpdk_conf.has('RTE_ARCH_X86') > + if cc.has_argument('-mavx512vl') > + avx512_tmplib = static_library('avx512_tmp', > + 'k32v64_hash_avx512vl.c', > + dependencies: static_rte_mempool, > + c_args: cflags + ['-mavx512vl']) > + objs += > avx512_tmplib.extract_objects('k32v64_hash_avx512vl.c') > + cflags += '-DCC_AVX512VL_SUPPORT' > + > + endif > +endif > diff --git a/lib/librte_hash/rte_hash_version.map > b/lib/librte_hash/rte_hash_version.map > index a8fbbc3..9a4f2f6 100644 > --- a/lib/librte_hash/rte_hash_version.map > +++ b/lib/librte_hash/rte_hash_version.map > @@ -34,5 +34,9 @@ EXPERIMENTAL { > > rte_hash_free_key_with_position; > rte_hash_max_key_id; > - > + rte_k32v64_hash_create; > + rte_k32v64_hash_find_existing; > + rte_k32v64_hash_free; > + rte_k32v64_hash_add; > + rte_k32v64_hash_delete; > }; > diff --git a/lib/librte_hash/rte_k32v64_hash.c > b/lib/librte_hash/rte_k32v64_hash.c > new file mode 100644 > index 0000000..7ed94b4 > --- /dev/null > +++ b/lib/librte_hash/rte_k32v64_hash.c > @@ -0,0 +1,315 @@ > +/* SPDX-License-Identifier: BSD-3-Clause > + * Copyright(c) 2020 Intel Corporation */ > + > +#include <string.h> > + > +#include <rte_eal_memconfig.h> > +#include <rte_errno.h> > +#include <rte_malloc.h> > +#include <rte_memory.h> > +#include <rte_tailq.h> > + > +#include <rte_k32v64_hash.h> > + > +TAILQ_HEAD(rte_k32v64_hash_list, rte_tailq_entry); > + > +static struct rte_tailq_elem rte_k32v64_hash_tailq = { > + .name = "RTE_K32V64_HASH", > +}; > + > +EAL_REGISTER_TAILQ(rte_k32v64_hash_tailq); > + > +#define VALID_KEY_MSK ((1 << RTE_K32V64_KEYS_PER_BUCKET) - 1) > + > +#ifdef CC_AVX512VL_SUPPORT > +int > +k32v64_hash_bulk_lookup_avx512vl(struct rte_k32v64_hash_table *table, > + uint32_t *keys, uint32_t *hashes, uint64_t *values, unsigned int n); > +#endif > + > +static int > +k32v64_hash_bulk_lookup(struct rte_k32v64_hash_table *table, uint32_t *keys, > + uint32_t *hashes, uint64_t *values, unsigned int n) { > + int ret, cnt = 0; > + unsigned int i; > + > + if (unlikely((table == NULL) || (keys == NULL) || (hashes == NULL) || > + (values == NULL))) > + return -EINVAL; > + > + for (i = 0; i < n; i++) { > + ret = rte_k32v64_hash_lookup(table, keys[i], hashes[i], > + &values[i]); > + if (ret == 0) > + cnt++; > + } > + return cnt; > +} > + > +static rte_k32v64_hash_bulk_lookup_t > +get_lookup_bulk_fn(void) > +{ > +#ifdef CC_AVX512VL_SUPPORT > + if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F)) > + return k32v64_hash_bulk_lookup_avx512vl; #endif > + return k32v64_hash_bulk_lookup; > +} > + > +int > +rte_k32v64_hash_add(struct rte_k32v64_hash_table *table, uint32_t key, > + uint32_t hash, uint64_t value) > +{ > + uint32_t bucket; > + int i, idx, ret; > + uint8_t msk; > + struct rte_k32v64_ext_ent *tmp, *ent, *prev = NULL; > + > + if (table == NULL) > + return -EINVAL; > + > + bucket = hash & table->bucket_msk; > + /* Search key in table. Update value if exists */ > + for (i = 0; i < RTE_K32V64_KEYS_PER_BUCKET; i++) { > + if ((key == table->t[bucket].key[i]) && > + (table->t[bucket].key_mask & (1 << i))) { > + table->t[bucket].val[i] = value; > + return 0; > + } > + } > + > + if (!SLIST_EMPTY(&table->t[bucket].head)) { > + SLIST_FOREACH(ent, &table->t[bucket].head, next) { > + if (ent->key == key) { > + ent->val = value; > + return 0; > + } > + } > + } > + > + msk = ~table->t[bucket].key_mask & VALID_KEY_MSK; > + if (msk) { > + idx = __builtin_ctz(msk); > + table->t[bucket].key[idx] = key; > + table->t[bucket].val[idx] = value; > + rte_smp_wmb(); > + table->t[bucket].key_mask |= 1 << idx; > + table->nb_ent++; > + return 0; > + } > + > + ret = rte_mempool_get(table->ext_ent_pool, (void **)&ent); > + if (ret < 0) > + return ret; > + > + SLIST_NEXT(ent, next) = NULL; > + ent->key = key; > + ent->val = value; > + rte_smp_wmb(); > + SLIST_FOREACH(tmp, &table->t[bucket].head, next) > + prev = tmp; > + > + if (prev == NULL) > + SLIST_INSERT_HEAD(&table->t[bucket].head, ent, next); > + else > + SLIST_INSERT_AFTER(prev, ent, next); > + > + table->nb_ent++; > + table->nb_ext_ent++; > + return 0; > +} > + > +int > +rte_k32v64_hash_delete(struct rte_k32v64_hash_table *table, uint32_t key, > + uint32_t hash) > +{ > + uint32_t bucket; > + int i; > + struct rte_k32v64_ext_ent *ent; > + > + if (table == NULL) > + return -EINVAL; > + > + bucket = hash & table->bucket_msk; > + > + for (i = 0; i < RTE_K32V64_KEYS_PER_BUCKET; i++) { > + if ((key == table->t[bucket].key[i]) && > + (table->t[bucket].key_mask & (1 << i))) { > + ent = SLIST_FIRST(&table->t[bucket].head); > + if (ent) { > + rte_atomic32_inc(&table->t[bucket].cnt); > + table->t[bucket].key[i] = ent->key; > + table->t[bucket].val[i] = ent->val; > + SLIST_REMOVE_HEAD(&table->t[bucket].head, next); > + rte_atomic32_inc(&table->t[bucket].cnt); > + table->nb_ext_ent--; > + } else > + table->t[bucket].key_mask &= ~(1 << i); > + if (ent) > + rte_mempool_put(table->ext_ent_pool, ent); > + table->nb_ent--; > + return 0; > + } > + } > + > + SLIST_FOREACH(ent, &table->t[bucket].head, next) > + if (ent->key == key) > + break; > + > + if (ent == NULL) > + return -ENOENT; > + > + rte_atomic32_inc(&table->t[bucket].cnt); > + SLIST_REMOVE(&table->t[bucket].head, ent, rte_k32v64_ext_ent, next); > + rte_atomic32_inc(&table->t[bucket].cnt); > + rte_mempool_put(table->ext_ent_pool, ent); > + > + table->nb_ext_ent--; > + table->nb_ent--; > + > + return 0; > +} > + > +struct rte_k32v64_hash_table * > +rte_k32v64_hash_find_existing(const char *name) { > + struct rte_k32v64_hash_table *h = NULL; > + struct rte_tailq_entry *te; > + struct rte_k32v64_hash_list *k32v64_hash_list; > + > + k32v64_hash_list = RTE_TAILQ_CAST(rte_k32v64_hash_tailq.head, > + rte_k32v64_hash_list); > + > + rte_mcfg_tailq_read_lock(); > + TAILQ_FOREACH(te, k32v64_hash_list, next) { > + h = (struct rte_k32v64_hash_table *) te->data; > + if (strncmp(name, h->name, RTE_K32V64_HASH_NAMESIZE) == 0) > + break; > + } > + rte_mcfg_tailq_read_unlock(); > + if (te == NULL) { > + rte_errno = ENOENT; > + return NULL; > + } > + return h; > +} > + > +struct rte_k32v64_hash_table * > +rte_k32v64_hash_create(const struct rte_k32v64_hash_params *params) { > + char hash_name[RTE_K32V64_HASH_NAMESIZE]; > + struct rte_k32v64_hash_table *ht = NULL; > + struct rte_tailq_entry *te; > + struct rte_k32v64_hash_list *k32v64_hash_list; > + uint32_t mem_size, nb_buckets, max_ent; > + int ret; > + struct rte_mempool *mp; > + > + if ((params == NULL) || (params->name == NULL) || > + (params->entries == 0)) { > + rte_errno = EINVAL; > + return NULL; > + } > + > + k32v64_hash_list = RTE_TAILQ_CAST(rte_k32v64_hash_tailq.head, > + rte_k32v64_hash_list); > + > + ret = snprintf(hash_name, sizeof(hash_name), "K32V64_%s", params->name); > + if (ret < 0 || ret >= RTE_K32V64_HASH_NAMESIZE) { > + rte_errno = ENAMETOOLONG; > + return NULL; > + } > + > + max_ent = rte_align32pow2(params->entries); > + nb_buckets = max_ent / RTE_K32V64_KEYS_PER_BUCKET; > + mem_size = sizeof(struct rte_k32v64_hash_table) + > + sizeof(struct rte_k32v64_hash_bucket) * nb_buckets; > + > + mp = rte_mempool_create(hash_name, max_ent, > + sizeof(struct rte_k32v64_ext_ent), 0, 0, NULL, NULL, NULL, NULL, > + params->socket_id, 0); > + > + if (mp == NULL) > + return NULL; > + > + rte_mcfg_tailq_write_lock(); > + TAILQ_FOREACH(te, k32v64_hash_list, next) { > + ht = (struct rte_k32v64_hash_table *) te->data; > + if (strncmp(params->name, ht->name, > + RTE_K32V64_HASH_NAMESIZE) == 0) > + break; > + } > + ht = NULL; > + if (te != NULL) { > + rte_errno = EEXIST; > + rte_mempool_free(mp); > + goto exit; > + } > + > + te = rte_zmalloc("K32V64_HASH_TAILQ_ENTRY", sizeof(*te), 0); > + if (te == NULL) { > + RTE_LOG(ERR, HASH, "Failed to allocate tailq entry\n"); > + rte_mempool_free(mp); > + goto exit; > + } > + > + ht = rte_zmalloc_socket(hash_name, mem_size, > + RTE_CACHE_LINE_SIZE, params->socket_id); > + if (ht == NULL) { > + RTE_LOG(ERR, HASH, "Failed to allocate fbk hash table\n"); > + rte_free(te); > + rte_mempool_free(mp); > + goto exit; > + } > + > + memcpy(ht->name, hash_name, sizeof(ht->name)); > + ht->max_ent = max_ent; > + ht->bucket_msk = nb_buckets - 1; > + ht->ext_ent_pool = mp; > + ht->lookup = get_lookup_bulk_fn(); > + > + te->data = (void *)ht; > + TAILQ_INSERT_TAIL(k32v64_hash_list, te, next); > + > +exit: > + rte_mcfg_tailq_write_unlock(); > + > + return ht; > +} > + > +void > +rte_k32v64_hash_free(struct rte_k32v64_hash_table *ht) { > + struct rte_tailq_entry *te; > + struct rte_k32v64_hash_list *k32v64_hash_list; > + > + if (ht == NULL) > + return; > + > + k32v64_hash_list = RTE_TAILQ_CAST(rte_k32v64_hash_tailq.head, > + rte_k32v64_hash_list); > + > + rte_mcfg_tailq_write_lock(); > + > + /* find out tailq entry */ > + TAILQ_FOREACH(te, k32v64_hash_list, next) { > + if (te->data == (void *) ht) > + break; > + } > + > + > + if (te == NULL) { > + rte_mcfg_tailq_write_unlock(); > + return; > + } > + > + TAILQ_REMOVE(k32v64_hash_list, te, next); > + > + rte_mcfg_tailq_write_unlock(); > + > + rte_mempool_free(ht->ext_ent_pool); > + rte_free(ht); > + rte_free(te); > +} > diff --git a/lib/librte_hash/rte_k32v64_hash.h > b/lib/librte_hash/rte_k32v64_hash.h > new file mode 100644 > index 0000000..b2c52e9 > --- /dev/null > +++ b/lib/librte_hash/rte_k32v64_hash.h > @@ -0,0 +1,211 @@ > +/* SPDX-License-Identifier: BSD-3-Clause > + * Copyright(c) 2020 Intel Corporation */ > + > +#ifndef _RTE_K32V64_HASH_H_ > +#define _RTE_K32V64_HASH_H_ > + > +#ifdef __cplusplus > +extern "C" { > +#endif > + > +#include <rte_compat.h> > +#include <rte_atomic.h> > +#include <rte_mempool.h> > + > +#define RTE_K32V64_HASH_NAMESIZE 32 > +#define RTE_K32V64_KEYS_PER_BUCKET 4 > +#define RTE_K32V64_WRITE_IN_PROGRESS 1 > + > +struct rte_k32v64_hash_params { > + const char *name; > + uint32_t entries; > + int socket_id; > +}; > + > +struct rte_k32v64_ext_ent { > + SLIST_ENTRY(rte_k32v64_ext_ent) next; > + uint32_t key; > + uint64_t val; > +}; > + > +struct rte_k32v64_hash_bucket { > + uint32_t key[RTE_K32V64_KEYS_PER_BUCKET]; > + uint64_t val[RTE_K32V64_KEYS_PER_BUCKET]; > + uint8_t key_mask; > + rte_atomic32_t cnt; > + SLIST_HEAD(rte_k32v64_list_head, rte_k32v64_ext_ent) head; } > +__rte_cache_aligned; > + > +struct rte_k32v64_hash_table; > + > +typedef int (*rte_k32v64_hash_bulk_lookup_t) (struct > +rte_k32v64_hash_table *table, uint32_t *keys, uint32_t *hashes, > + uint64_t *values, unsigned int n); > + > +struct rte_k32v64_hash_table { > + char name[RTE_K32V64_HASH_NAMESIZE]; /**< Name of the hash. */ > + uint32_t nb_ent; /**< Number of entities in the table*/ > + uint32_t nb_ext_ent; /**< Number of extended entities */ > + uint32_t max_ent; /**< Maximum number of entities */ > + uint32_t bucket_msk; > + struct rte_mempool *ext_ent_pool; > + rte_k32v64_hash_bulk_lookup_t lookup; > + __extension__ struct rte_k32v64_hash_bucket t[0]; > +}; > + > +typedef int (*rte_k32v64_cmp_fn_t) > +(struct rte_k32v64_hash_bucket *bucket, uint32_t key, uint64_t *val); > + > +static inline int > +__k32v64_cmp_keys(struct rte_k32v64_hash_bucket *bucket, uint32_t key, > + uint64_t *val) > +{ > + int i; > + > + for (i = 0; i < RTE_K32V64_KEYS_PER_BUCKET; i++) { > + if ((key == bucket->key[i]) && > + (bucket->key_mask & (1 << i))) { > + *val = bucket->val[i]; > + return 1; > + } > + } > + > + return 0; > +} > + > +static inline int > +__k32v64_hash_lookup(struct rte_k32v64_hash_table *table, uint32_t key, > + uint32_t hash, uint64_t *value, rte_k32v64_cmp_fn_t cmp_f) { > + uint64_t val = 0; > + struct rte_k32v64_ext_ent *ent; > + int32_t cnt; > + int found = 0; > + uint32_t bucket = hash & table->bucket_msk; > + > + do { > + do > + cnt = rte_atomic32_read(&table->t[bucket].cnt); > + while (unlikely(cnt & RTE_K32V64_WRITE_IN_PROGRESS)); > + > + found = cmp_f(&table->t[bucket], key, &val); > + if (unlikely((found == 0) && > + (!SLIST_EMPTY(&table->t[bucket].head)))) { > + SLIST_FOREACH(ent, &table->t[bucket].head, next) { > + if (ent->key == key) { > + val = ent->val; > + found = 1; > + break; > + } > + } > + } > + > + } while (unlikely(cnt != rte_atomic32_read(&table->t[bucket].cnt))); > + > + if (found == 1) { > + *value = val; > + return 0; > + } else > + return -ENOENT; > +} > + > +static inline int > +rte_k32v64_hash_lookup(struct rte_k32v64_hash_table *table, uint32_t key, > + uint32_t hash, uint64_t *value) > +{ > + return __k32v64_hash_lookup(table, key, hash, value, > +__k32v64_cmp_keys); } > + > +static inline int > +rte_k32v64_hash_bulk_lookup(struct rte_k32v64_hash_table *table, > + uint32_t *keys, uint32_t *hashes, uint64_t *values, unsigned int n) > +{ > + return table->lookup(table, keys, hashes, values, n); } > + > +/** > + * Add a key to an existing hash table with hash value. > + * This operation is not multi-thread safe > + * and should only be called from one thread. > + * Does this hash allow multiple readers, and at most one writer, for a particular instance? If that's the case, it should probably be mentioned somewhere. Just specifying that a particular function is not MT safe doesn't say anything if it's safe to call it in parallel with other, non-MT safe functions. I assume you can't add and delete in parallel? Yes, this hash allows multiple readers and on writer. Writers must be serialized. I will add this information. Thanks! > + * @param ht > + * Hash table to add the key to. > + * @param key > + * Key to add to the hash table. > + * @param value > + * Value to associate with key. > + * @param hash > + * Hash value associated with key. > + * @return > + * 0 if ok, or negative value on error. > + */ > +__rte_experimental > +int > +rte_k32v64_hash_add(struct rte_k32v64_hash_table *table, uint32_t key, > + uint32_t hash, uint64_t value); > + > +/** > + * Remove a key with a given hash value from an existing hash table. > + * This operation is not multi-thread > + * safe and should only be called from one thread. > + * > + * @param ht > + * Hash table to remove the key from. > + * @param key > + * Key to remove from the hash table. > + * @param hash > + * hash value associated with key. > + * @return > + * 0 if ok, or negative value on error. > + */ > +__rte_experimental > +int > +rte_k32v64_hash_delete(struct rte_k32v64_hash_table *table, uint32_t key, > + uint32_t hash); > + > + > +/** > + * Performs a lookup for an existing hash table, and returns a > +pointer to > + * the table if found. > + * > + * @param name > + * Name of the hash table to find > + * > + * @return > + * pointer to hash table structure or NULL on error with rte_errno > + * set appropriately. > + */ > +__rte_experimental > +struct rte_k32v64_hash_table * > +rte_k32v64_hash_find_existing(const char *name); > + > +/** > + * Create a new hash table for use with four byte keys. > + * > + * @param params > + * Parameters used in creation of hash table. > + * > + * @return > + * Pointer to hash table structure that is used in future hash table > + * operations, or NULL on error with rte_errno set appropriately. > + */ > +__rte_experimental > +struct rte_k32v64_hash_table * > +rte_k32v64_hash_create(const struct rte_k32v64_hash_params *params); > + > +/** > + * Free all memory used by a hash table. > + * > + * @param table > + * Hash table to deallocate. > + */ > +__rte_experimental > +void > +rte_k32v64_hash_free(struct rte_k32v64_hash_table *table); > + > +#ifdef __cplusplus > +} > +#endif > + > +#endif /* _RTE_K32V64_HASH_H_ */