[dpdk-dev] [PATCH v4 1/2] hash table: fix a bug in rte_hash_iterate()

2018-10-09 Thread Qiaobin Fu
In current implementation of rte_hash_iterate(), it
tries to obtain the lock after the while loop. However,
this may lead to a bug. Notice the following racing condition:

1. The while loop above finishes because it finds
   a not empty slot. But it does so without a lock.
2. Then we get the lock.
3. The position that was once not empty is now empty.
   BUG because next_key is invalid.

This patch fixes this small bug.

Signed-off-by: Qiaobin Fu 
Reviewed-by: Michel Machado 
---
 lib/librte_hash/rte_cuckoo_hash.c | 6 --
 1 file changed, 4 insertions(+), 2 deletions(-)

diff --git a/lib/librte_hash/rte_cuckoo_hash.c 
b/lib/librte_hash/rte_cuckoo_hash.c
index f7b86c8c9..a3e76684d 100644
--- a/lib/librte_hash/rte_cuckoo_hash.c
+++ b/lib/librte_hash/rte_cuckoo_hash.c
@@ -1317,16 +1317,18 @@ rte_hash_iterate(const struct rte_hash *h, const void 
**key, void **data, uint32
bucket_idx = *next / RTE_HASH_BUCKET_ENTRIES;
idx = *next % RTE_HASH_BUCKET_ENTRIES;
 
+   __hash_rw_reader_lock(h);
/* If current position is empty, go to the next one */
while (h->buckets[bucket_idx].key_idx[idx] == EMPTY_SLOT) {
(*next)++;
/* End of table */
-   if (*next == total_entries)
+   if (*next == total_entries) {
+   __hash_rw_reader_unlock(h);
return -ENOENT;
+   }
bucket_idx = *next / RTE_HASH_BUCKET_ENTRIES;
idx = *next % RTE_HASH_BUCKET_ENTRIES;
}
-   __hash_rw_reader_lock(h);
/* Get position of entry in key table */
position = h->buckets[bucket_idx].key_idx[idx];
next_key = (struct rte_hash_key *) ((char *)h->key_store +
-- 
2.17.1



[dpdk-dev] [PATCH v4 2/2] hash table: add an iterator over conflicting entries

2018-10-09 Thread Qiaobin Fu
Function rte_hash_iterate_conflict_entries_with_hash() iterates
over the entries that conflict with an incoming entry.

Iterating over conflicting entries enables one to decide
if the incoming entry is more valuable than the entries already
in the hash table. This is particularly useful after
an insertion failure.

v4:
* Fix the style issue

* Follow the ABI updates

v3:
* Make the rte_hash_iterate() API similar to
  rte_hash_iterate_conflict_entries()

v2:
* Fix the style issue

* Make the API more universal

Signed-off-by: Qiaobin Fu 
Reviewed-by: Cody Doucette 
Reviewed-by: Michel Machado 
Reviewed-by: Keith Wiles 
Reviewed-by: Yipeng Wang 
Reviewed-by: Honnappa Nagarahalli 
Reviewed-by: Gaƫtan Rivet 
---
 MAINTAINERS  |   2 +-
 lib/librte_hash/rte_cuckoo_hash.c| 134 ++-
 lib/librte_hash/rte_cuckoo_hash.h|  11 +++
 lib/librte_hash/rte_hash.h   |  71 +-
 lib/librte_hash/rte_hash_version.map |  14 +++
 test/test/test_hash.c|   6 +-
 test/test/test_hash_multiwriter.c|   8 +-
 test/test/test_hash_readwrite.c  |  14 ++-
 8 files changed, 246 insertions(+), 14 deletions(-)

diff --git a/MAINTAINERS b/MAINTAINERS
index 9fd258fad..e8c81656f 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1055,7 +1055,7 @@ F: test/test/test_efd*
 F: examples/server_node_efd/
 F: doc/guides/sample_app_ug/server_node_efd.rst
 
-Hashes
+Hashes - EXPERIMENTAL
 M: Bruce Richardson 
 M: Pablo de Lara 
 F: lib/librte_hash/
diff --git a/lib/librte_hash/rte_cuckoo_hash.c 
b/lib/librte_hash/rte_cuckoo_hash.c
index a3e76684d..439251a7f 100644
--- a/lib/librte_hash/rte_cuckoo_hash.c
+++ b/lib/librte_hash/rte_cuckoo_hash.c
@@ -1301,7 +1301,10 @@ rte_hash_lookup_bulk_data(const struct rte_hash *h, 
const void **keys,
 }
 
 int32_t
-rte_hash_iterate(const struct rte_hash *h, const void **key, void **data, 
uint32_t *next)
+rte_hash_iterate_v1808(const struct rte_hash *h,
+   const void **key,
+   void **data,
+   uint32_t *next)
 {
uint32_t bucket_idx, idx, position;
struct rte_hash_key *next_key;
@@ -1344,3 +1347,132 @@ rte_hash_iterate(const struct rte_hash *h, const void 
**key, void **data, uint32
 
return position - 1;
 }
+VERSION_SYMBOL(rte_hash_iterate, _v1808, 18.08);
+
+int32_t
+rte_hash_iterate_v1811(const struct rte_hash *h,
+   const void **key,
+   void **data,
+   struct rte_hash_iterator_state *state)
+{
+   struct rte_hash_iterator_priv *it;
+   uint32_t bucket_idx, idx, position;
+   struct rte_hash_key *next_key;
+
+   RETURN_IF_TRUE(((h == NULL) || (key == NULL) ||
+   (data == NULL) || (state == NULL)), -EINVAL);
+
+   RTE_BUILD_BUG_ON(sizeof(struct rte_hash_iterator_priv) >
+   sizeof(struct rte_hash_iterator_state));
+
+   it = (struct rte_hash_iterator_priv *)state;
+   if (it->next == 0)
+   it->total_entries = h->num_buckets * RTE_HASH_BUCKET_ENTRIES;
+
+   /* Out of bounds */
+   if (it->next >= it->total_entries)
+   return -ENOENT;
+
+   /* Calculate bucket and index of current iterator */
+   bucket_idx = it->next / RTE_HASH_BUCKET_ENTRIES;
+   idx = it->next % RTE_HASH_BUCKET_ENTRIES;
+
+   __hash_rw_reader_lock(h);
+   /* If current position is empty, go to the next one */
+   while (h->buckets[bucket_idx].key_idx[idx] == EMPTY_SLOT) {
+   it->next++;
+   /* End of table */
+   if (it->next == it->total_entries) {
+   __hash_rw_reader_unlock(h);
+   return -ENOENT;
+   }
+   bucket_idx = it->next / RTE_HASH_BUCKET_ENTRIES;
+   idx = it->next % RTE_HASH_BUCKET_ENTRIES;
+   }
+   /* Get position of entry in key table */
+   position = h->buckets[bucket_idx].key_idx[idx];
+   next_key = (struct rte_hash_key *) ((char *)h->key_store +
+   position * h->key_entry_size);
+   /* Return key and data */
+   *key = next_key->key;
+   *data = next_key->pdata;
+
+   __hash_rw_reader_unlock(h);
+
+   /* Increment iterator */
+   it->next++;
+
+   return position - 1;
+}
+MAP_STATIC_SYMBOL(int32_t rte_hash_iterate(const struct rte_hash *h,
+   const void **key, void **data, struct rte_hash_iterator_state *state),
+   rte_hash_iterate_v1811);
+
+int32_t __rte_experimental
+rte_hash_iterate_conflict_entries_with_hash(struct rte_hash *h,
+   const void **key,
+   void **data,
+   hash_sig_t sig,
+   struct rte_hash_iterator_state *state)
+{
+   struct rte_hash_iterator_conflict_entries_priv *it;
+
+   RETURN_IF_TRUE(((h == NULL) || (key == NULL) ||
+   (data == NULL) || (state == NULL)), -EINVAL);
+
+   RTE_BUILD_BUG_ON(sizeof(
+   struct rte_hash_iterator_conflict_entrie

[dpdk-dev] [PATCH] LPM: add iterator over LPM rules

2018-07-08 Thread Qiaobin Fu
Add an iterator over the LPM rules.
The iterator requires a prefix as a parameter and
lists all entries as long as the given prefix (or longer).

Signed-off-by: Qiaobin Fu 
Reviewed-by: Cody Doucette 
Reviewed-by: Michel Machado 
---
 lib/librte_lpm/rte_lpm.c  | 99 +++
 lib/librte_lpm/rte_lpm.h  | 55 ++
 lib/librte_lpm/rte_lpm6.c | 82 +---
 lib/librte_lpm/rte_lpm6.h | 56 ++
 4 files changed, 285 insertions(+), 7 deletions(-)

diff --git a/lib/librte_lpm/rte_lpm.c b/lib/librte_lpm/rte_lpm.c
index d00b13d93..17cc49a86 100644
--- a/lib/librte_lpm/rte_lpm.c
+++ b/lib/librte_lpm/rte_lpm.c
@@ -85,6 +85,105 @@ depth_to_range(uint8_t depth)
return 1 << (RTE_LPM_MAX_DEPTH - depth);
 }
 
+int
+rte_lpm_iterator_state_init(const struct rte_lpm *lpm, uint32_t ip,
+   uint8_t depth, struct rte_lpm_iterator_state *state)
+{
+   if (lpm == NULL || depth > RTE_LPM_MAX_DEPTH || state == NULL)
+   return -EINVAL;
+
+   state->dmask = depth_to_mask(depth);
+   state->ip_masked = ip & state->dmask;
+   state->depth = depth == 0 ? 1 : depth;
+   state->next = 0;
+   state->lpm = lpm;
+
+   return 0;
+}
+
+/*
+ * An iterator over its rule entries.
+ */
+int
+rte_lpm_rule_iterate_v20(struct rte_lpm_iterator_state *state,
+   const struct rte_lpm_rule **rule)
+{
+   if (state == NULL || rule == NULL) {
+   if (rule != NULL)
+   *rule = NULL;
+
+   return -EINVAL;
+   }
+
+   while (state->depth <= RTE_LPM_MAX_DEPTH) {
+   uint32_t rule_gindex =
+   state->lpm->rule_info[state->depth - 1].first_rule;
+   uint32_t last_rule = rule_gindex +
+   state->lpm->rule_info[state->depth - 1].used_rules;
+   uint32_t rule_index = rule_gindex + state->next;
+
+   while (rule_index < last_rule) {
+   if ((state->lpm->rules_tbl[rule_index].ip &
+   state->dmask) == state->ip_masked) {
+   state->next = rule_index - rule_gindex + 1;
+   *rule = (const struct rte_lpm_rule *)
+   &state->lpm->rules_tbl[rule_index];
+   return 0;
+   }
+
+   rule_index++;
+   }
+
+   state->depth++;
+   state->next = 0;
+   }
+
+   *rule = NULL;
+   return -ENOENT;
+}
+VERSION_SYMBOL(rte_lpm_rule_iterate, _v20, 2.0);
+
+int
+rte_lpm_rule_iterate_v1604(struct rte_lpm_iterator_state *state,
+   const struct rte_lpm_rule **rule)
+{
+   if (state == NULL || rule == NULL) {
+   if (rule != NULL)
+   *rule = NULL;
+
+   return -EINVAL;
+   }
+
+   while (state->depth <= RTE_LPM_MAX_DEPTH) {
+   uint32_t rule_gindex =
+   state->lpm->rule_info[state->depth - 1].first_rule;
+   uint32_t last_rule = rule_gindex +
+   state->lpm->rule_info[state->depth - 1].used_rules;
+   uint32_t rule_index = rule_gindex + state->next;
+
+   while (rule_index < last_rule) {
+   if ((state->lpm->rules_tbl[rule_index].ip &
+   state->dmask) == state->ip_masked) {
+   state->next = rule_index - rule_gindex + 1;
+   *rule = (const struct rte_lpm_rule *)
+   &state->lpm->rules_tbl[rule_index];
+   return 0;
+   }
+
+   rule_index++;
+   }
+
+   state->depth++;
+   state->next = 0;
+   }
+
+   *rule = NULL;
+   return -ENOENT;
+}
+BIND_DEFAULT_SYMBOL(rte_lpm_rule_iterate, _v1604, 16.04);
+MAP_STATIC_SYMBOL(int rte_lpm_rule_iterate(struct rte_lpm_iterator_state 
*state,
+   const struct rte_lpm_rule **rule), rte_lpm_rule_iterate_v1604);
+
 /*
  * Find an existing lpm table and return a pointer to it.
  */
diff --git a/lib/librte_lpm/rte_lpm.h b/lib/librte_lpm/rte_lpm.h
index 21550444d..c1311c67a 100644
--- a/lib/librte_lpm/rte_lpm.h
+++ b/lib/librte_lpm/rte_lpm.h
@@ -188,6 +188,61 @@ struct rte_lpm {
struct rte_lpm_rule *rules_tbl; /**< LPM rules. */
 };
 
+/** LPM iterator state structure. */
+struct rte_lpm_iterator_state {
+   uint32_t dmask;
+   uint32_t ip_masked;
+   uint8_t  depth;
+   uint32_t next;
+   const struct rte_lpm *lpm;
+};
+
+/**
+ * Initialize the lpm iterator state.
+ *
+ * @param lpm
+ *   LPM object handle
+ * @param 

[dpdk-dev] [PATCH] hash table: add a bucket iterator function

2018-07-15 Thread Qiaobin Fu
Function rte_hash_bucket_iterate() enables callers to
incrementally iterate over the hash table bucket by bucket,
so that it can avoid creating hiccups and thrashing the cache
of the processor.

This patch mainly deals with cases in which
the hash table is full and one needs to decide if the incoming
entry is more valuable than the entries already in the hash table.

Signed-off-by: Qiaobin Fu 
Reviewed-by: Cody Doucette 
Reviewed-by: Michel Machado 
---
 lib/librte_hash/rte_cuckoo_hash.c | 60 
 lib/librte_hash/rte_hash.h| 66 +++
 2 files changed, 126 insertions(+)

diff --git a/lib/librte_hash/rte_cuckoo_hash.c 
b/lib/librte_hash/rte_cuckoo_hash.c
index a07543a29..2b90f3593 100644
--- a/lib/librte_hash/rte_cuckoo_hash.c
+++ b/lib/librte_hash/rte_cuckoo_hash.c
@@ -1160,3 +1160,63 @@ rte_hash_iterate(const struct rte_hash *h, const void 
**key, void **data, uint32
 
return position - 1;
 }
+
+int
+rte_hash_get_num_buckets(struct rte_hash *h)
+{
+   RETURN_IF_TRUE((h == NULL), -EINVAL);
+   return h->num_buckets;
+}
+
+uint32_t
+rte_hash_get_primary_bucket(const struct rte_hash *h, hash_sig_t sig)
+{
+   return sig & h->bucket_bitmask;
+}
+
+uint32_t
+rte_hash_get_secondary_bucket(const struct rte_hash *h, hash_sig_t sig)
+{
+   return rte_hash_secondary_hash(sig) & h->bucket_bitmask;
+}
+
+int32_t
+rte_hash_bucket_iterate(const struct rte_hash *h,
+   const void **key, void **data, uint32_t *bidx, uint32_t *next)
+{
+   uint32_t position;
+   struct rte_hash_key *next_key;
+
+   RETURN_IF_TRUE(((h == NULL) || (key == NULL) || (data == NULL) ||
+   (bidx == NULL) || (next == NULL)), -EINVAL);
+
+   /* Out of bounds. */
+   if (*bidx >= h->num_buckets || *next >= RTE_HASH_BUCKET_ENTRIES)
+   goto next_bucket;
+
+   /* If current position is empty, go to the next one. */
+   while (h->buckets[*bidx].key_idx[*next] == EMPTY_SLOT) {
+   (*next)++;
+   /* End of bucket. */
+   if (*next >= RTE_HASH_BUCKET_ENTRIES)
+   goto next_bucket;
+   }
+
+   /* Get position of entry in key table. */
+   position = h->buckets[*bidx].key_idx[*next];
+   next_key = (struct rte_hash_key *) ((char *)h->key_store +
+   position * h->key_entry_size);
+   /* Return key and data. */
+   *key = next_key->key;
+   *data = next_key->pdata;
+
+   /* Increment iterator. */
+   (*next)++;
+
+   return position - 1;
+
+next_bucket:
+   *bidx = (*bidx + 1) >= h->num_buckets ? 0 : *bidx + 1;
+   *next = 0;
+   return -ENOENT;
+}
diff --git a/lib/librte_hash/rte_hash.h b/lib/librte_hash/rte_hash.h
index f71ca9fbf..329bf42d6 100644
--- a/lib/librte_hash/rte_hash.h
+++ b/lib/librte_hash/rte_hash.h
@@ -94,6 +94,17 @@ rte_hash_create(const struct rte_hash_parameters *params);
  */
 void rte_hash_set_cmp_func(struct rte_hash *h, rte_hash_cmp_eq_t func);
 
+/**
+ * Get the number of buckets in the hash table.
+ * @param h
+ *   Hash table for which the function queries.
+ * @return
+ *   The number of buckets in the hash table h.
+ *- EINVAL - invalid parameter passed to function
+ */
+int
+rte_hash_get_num_buckets(struct rte_hash *h);
+
 /**
  * Find an existing hash table object and return a pointer to it.
  *
@@ -261,6 +272,34 @@ int
 rte_hash_get_key_with_position(const struct rte_hash *h, const int32_t 
position,
   void **key);
 
+/**
+ * Get the primary bucket index given the precomputed hash value.
+ * This operation is multi-thread safe.
+ *
+ * @param h
+ *   Hash table to get the key from.
+ * @param sig
+ *   Precomputed hash value.
+ * @return
+ *   The primary bucket index.
+ */
+uint32_t
+rte_hash_get_primary_bucket(const struct rte_hash *h, hash_sig_t sig);
+
+/**
+ * Get the secondary bucket index given the precomputed hash value.
+ * This operation is multi-thread safe.
+ *
+ * @param h
+ *   Hash table to get the key from.
+ * @param sig
+ *   Precomputed hash value.
+ * @return
+ *   The primary bucket index.
+ */
+uint32_t
+rte_hash_get_secondary_bucket(const struct rte_hash *h, hash_sig_t sig);
+
 /**
  * Find a key-value pair in the hash table.
  * This operation is multi-thread safe.
@@ -419,6 +458,33 @@ rte_hash_lookup_bulk(const struct rte_hash *h, const void 
**keys,
  */
 int32_t
 rte_hash_iterate(const struct rte_hash *h, const void **key, void **data, 
uint32_t *next);
+
+/**
+ * Iterate through the buckets in hash table, returning key-value pairs.
+ *
+ * @param h
+ *   Hash table to iterate
+ * @param key
+ *   Output containing the key where current iterator
+ *   was pointing at
+ * @param data
+ *   Output containing the data associated with key.
+ *   Returns NULL if data was not stored.
+ * @param bidx
+ *   Pointer to the current bucket index. Should b

[dpdk-dev] [PATCH] hash table: add a bucket iterator function

2018-07-28 Thread Qiaobin Fu
Function rte_hash_bucket_iterate() enables callers to
incrementally iterate over the hash table bucket by bucket,
so that it can avoid creating hiccups and thrashing the cache
of the processor.

This patch mainly deals with cases in which
the hash table is full and one needs to decide if the incoming
entry is more valuable than the entries already in the hash table.

Signed-off-by: Qiaobin Fu 
Reviewed-by: Cody Doucette 
Reviewed-by: Michel Machado 
---
 lib/librte_hash/rte_cuckoo_hash.c| 60 +
 lib/librte_hash/rte_hash.h   | 66 
 lib/librte_hash/rte_hash_version.map |  4 ++
 3 files changed, 130 insertions(+)

diff --git a/lib/librte_hash/rte_cuckoo_hash.c 
b/lib/librte_hash/rte_cuckoo_hash.c
index a07543a29..2b90f3593 100644
--- a/lib/librte_hash/rte_cuckoo_hash.c
+++ b/lib/librte_hash/rte_cuckoo_hash.c
@@ -1160,3 +1160,63 @@ rte_hash_iterate(const struct rte_hash *h, const void 
**key, void **data, uint32
 
return position - 1;
 }
+
+int
+rte_hash_get_num_buckets(struct rte_hash *h)
+{
+   RETURN_IF_TRUE((h == NULL), -EINVAL);
+   return h->num_buckets;
+}
+
+uint32_t
+rte_hash_get_primary_bucket(const struct rte_hash *h, hash_sig_t sig)
+{
+   return sig & h->bucket_bitmask;
+}
+
+uint32_t
+rte_hash_get_secondary_bucket(const struct rte_hash *h, hash_sig_t sig)
+{
+   return rte_hash_secondary_hash(sig) & h->bucket_bitmask;
+}
+
+int32_t
+rte_hash_bucket_iterate(const struct rte_hash *h,
+   const void **key, void **data, uint32_t *bidx, uint32_t *next)
+{
+   uint32_t position;
+   struct rte_hash_key *next_key;
+
+   RETURN_IF_TRUE(((h == NULL) || (key == NULL) || (data == NULL) ||
+   (bidx == NULL) || (next == NULL)), -EINVAL);
+
+   /* Out of bounds. */
+   if (*bidx >= h->num_buckets || *next >= RTE_HASH_BUCKET_ENTRIES)
+   goto next_bucket;
+
+   /* If current position is empty, go to the next one. */
+   while (h->buckets[*bidx].key_idx[*next] == EMPTY_SLOT) {
+   (*next)++;
+   /* End of bucket. */
+   if (*next >= RTE_HASH_BUCKET_ENTRIES)
+   goto next_bucket;
+   }
+
+   /* Get position of entry in key table. */
+   position = h->buckets[*bidx].key_idx[*next];
+   next_key = (struct rte_hash_key *) ((char *)h->key_store +
+   position * h->key_entry_size);
+   /* Return key and data. */
+   *key = next_key->key;
+   *data = next_key->pdata;
+
+   /* Increment iterator. */
+   (*next)++;
+
+   return position - 1;
+
+next_bucket:
+   *bidx = (*bidx + 1) >= h->num_buckets ? 0 : *bidx + 1;
+   *next = 0;
+   return -ENOENT;
+}
diff --git a/lib/librte_hash/rte_hash.h b/lib/librte_hash/rte_hash.h
index f71ca9fbf..329bf42d6 100644
--- a/lib/librte_hash/rte_hash.h
+++ b/lib/librte_hash/rte_hash.h
@@ -94,6 +94,17 @@ rte_hash_create(const struct rte_hash_parameters *params);
  */
 void rte_hash_set_cmp_func(struct rte_hash *h, rte_hash_cmp_eq_t func);
 
+/**
+ * Get the number of buckets in the hash table.
+ * @param h
+ *   Hash table for which the function queries.
+ * @return
+ *   The number of buckets in the hash table h.
+ *- EINVAL - invalid parameter passed to function
+ */
+int
+rte_hash_get_num_buckets(struct rte_hash *h);
+
 /**
  * Find an existing hash table object and return a pointer to it.
  *
@@ -261,6 +272,34 @@ int
 rte_hash_get_key_with_position(const struct rte_hash *h, const int32_t 
position,
   void **key);
 
+/**
+ * Get the primary bucket index given the precomputed hash value.
+ * This operation is multi-thread safe.
+ *
+ * @param h
+ *   Hash table to get the key from.
+ * @param sig
+ *   Precomputed hash value.
+ * @return
+ *   The primary bucket index.
+ */
+uint32_t
+rte_hash_get_primary_bucket(const struct rte_hash *h, hash_sig_t sig);
+
+/**
+ * Get the secondary bucket index given the precomputed hash value.
+ * This operation is multi-thread safe.
+ *
+ * @param h
+ *   Hash table to get the key from.
+ * @param sig
+ *   Precomputed hash value.
+ * @return
+ *   The primary bucket index.
+ */
+uint32_t
+rte_hash_get_secondary_bucket(const struct rte_hash *h, hash_sig_t sig);
+
 /**
  * Find a key-value pair in the hash table.
  * This operation is multi-thread safe.
@@ -419,6 +458,33 @@ rte_hash_lookup_bulk(const struct rte_hash *h, const void 
**keys,
  */
 int32_t
 rte_hash_iterate(const struct rte_hash *h, const void **key, void **data, 
uint32_t *next);
+
+/**
+ * Iterate through the buckets in hash table, returning key-value pairs.
+ *
+ * @param h
+ *   Hash table to iterate
+ * @param key
+ *   Output containing the key where current iterator
+ *   was pointing at
+ * @param data
+ *   Output containing the data associated with key.
+ *   Returns NULL if data was not stored.
+ * @param bidx
+ * 

[dpdk-dev] [PATCH v3] hash table: add an iterator over conflicting entries

2018-08-30 Thread Qiaobin Fu
Function rte_hash_iterate_conflict_entries() iterates over
the entries that conflict with an incoming entry.

Iterating over conflicting entries enables one to decide
if the incoming entry is more valuable than the entries already
in the hash table. This is particularly useful after
an insertion failure.

v3:
* Make the rte_hash_iterate() API similar to
  rte_hash_iterate_conflict_entries()

v2:
* Fix the style issue

* Make the API more universal

Signed-off-by: Qiaobin Fu 
Reviewed-by: Cody Doucette 
Reviewed-by: Michel Machado 
Reviewed-by: Keith Wiles 
Reviewed-by: Yipeng Wang 
Reviewed-by: Honnappa Nagarahalli 
---
 lib/librte_hash/rte_cuckoo_hash.c| 127 +++
 lib/librte_hash/rte_hash.h   |  69 +--
 lib/librte_hash/rte_hash_version.map |   8 ++
 test/test/test_hash.c|   7 +-
 test/test/test_hash_multiwriter.c|   8 +-
 5 files changed, 194 insertions(+), 25 deletions(-)

diff --git a/lib/librte_hash/rte_cuckoo_hash.c 
b/lib/librte_hash/rte_cuckoo_hash.c
index a07543a29..8a2e76ff1 100644
--- a/lib/librte_hash/rte_cuckoo_hash.c
+++ b/lib/librte_hash/rte_cuckoo_hash.c
@@ -1120,43 +1120,140 @@ rte_hash_lookup_bulk_data(const struct rte_hash *h, 
const void **keys,
return __builtin_popcountl(*hit_mask);
 }
 
+/* istate stands for internal state. */
+struct rte_hash_iterator_istate {
+   const struct rte_hash *h;
+   uint32_t  next;
+   uint32_t  total_entries;
+};
+
+int32_t
+rte_hash_iterator_init(const struct rte_hash *h,
+   struct rte_hash_iterator_state *state)
+{
+   struct rte_hash_iterator_istate *__state;
+
+   RETURN_IF_TRUE(((h == NULL) || (state == NULL)), -EINVAL);
+
+   __state = (struct rte_hash_iterator_istate *)state;
+   __state->h = h;
+   __state->next = 0;
+   __state->total_entries = h->num_buckets * RTE_HASH_BUCKET_ENTRIES;
+
+   return 0;
+}
+
 int32_t
-rte_hash_iterate(const struct rte_hash *h, const void **key, void **data, 
uint32_t *next)
+rte_hash_iterate(
+   struct rte_hash_iterator_state *state, const void **key, void **data)
 {
+   struct rte_hash_iterator_istate *__state;
uint32_t bucket_idx, idx, position;
struct rte_hash_key *next_key;
 
-   RETURN_IF_TRUE(((h == NULL) || (next == NULL)), -EINVAL);
+   RETURN_IF_TRUE(((state == NULL) || (key == NULL) ||
+   (data == NULL)), -EINVAL);
+
+   __state = (struct rte_hash_iterator_istate *)state;
 
-   const uint32_t total_entries = h->num_buckets * RTE_HASH_BUCKET_ENTRIES;
/* Out of bounds */
-   if (*next >= total_entries)
+   if (__state->next >= __state->total_entries)
return -ENOENT;
 
/* Calculate bucket and index of current iterator */
-   bucket_idx = *next / RTE_HASH_BUCKET_ENTRIES;
-   idx = *next % RTE_HASH_BUCKET_ENTRIES;
+   bucket_idx = __state->next / RTE_HASH_BUCKET_ENTRIES;
+   idx = __state->next % RTE_HASH_BUCKET_ENTRIES;
 
/* If current position is empty, go to the next one */
-   while (h->buckets[bucket_idx].key_idx[idx] == EMPTY_SLOT) {
-   (*next)++;
+   while (__state->h->buckets[bucket_idx].key_idx[idx] == EMPTY_SLOT) {
+   __state->next++;
/* End of table */
-   if (*next == total_entries)
+   if (__state->next == __state->total_entries)
return -ENOENT;
-   bucket_idx = *next / RTE_HASH_BUCKET_ENTRIES;
-   idx = *next % RTE_HASH_BUCKET_ENTRIES;
+   bucket_idx = __state->next / RTE_HASH_BUCKET_ENTRIES;
+   idx = __state->next % RTE_HASH_BUCKET_ENTRIES;
}
 
/* Get position of entry in key table */
-   position = h->buckets[bucket_idx].key_idx[idx];
-   next_key = (struct rte_hash_key *) ((char *)h->key_store +
-   position * h->key_entry_size);
+   position = __state->h->buckets[bucket_idx].key_idx[idx];
+   next_key = (struct rte_hash_key *) ((char *)__state->h->key_store +
+   position * __state->h->key_entry_size);
/* Return key and data */
*key = next_key->key;
*data = next_key->pdata;
 
/* Increment iterator */
-   (*next)++;
+   __state->next++;
 
return position - 1;
 }
+
+struct rte_hash_iterator_conflict_entries_istate {
+   const struct rte_hash *h;
+   uint32_t  vnext;
+   uint32_t  primary_bidx;
+   uint32_t  secondary_bidx;
+};
+
+int32_t __rte_experimental
+rte_hash_iterator_conflict_entries_init_with_hash(const struct rte_hash *h,
+   hash_sig_t sig, struct rte_hash_iterator_state *state)
+{
+   struct rte_hash_iterator_conflict_entries_istate *__state;
+
+   RETURN_IF_TRUE(((h == NULL) || (state == NU

[dpdk-dev] [PATCH v3] hash table: add an iterator over conflicting entries

2018-08-31 Thread Qiaobin Fu
Function rte_hash_iterate_conflict_entries() iterates over
the entries that conflict with an incoming entry.

Iterating over conflicting entries enables one to decide
if the incoming entry is more valuable than the entries already
in the hash table. This is particularly useful after
an insertion failure.

v3:
* Make the rte_hash_iterate() API similar to
  rte_hash_iterate_conflict_entries()

v2:
* Fix the style issue

* Make the API more universal

Signed-off-by: Qiaobin Fu 
Reviewed-by: Cody Doucette 
Reviewed-by: Michel Machado 
Reviewed-by: Keith Wiles 
Reviewed-by: Yipeng Wang 
Reviewed-by: Honnappa Nagarahalli 
---
 lib/librte_hash/Makefile |   1 +
 lib/librte_hash/rte_cuckoo_hash.c| 132 +++
 lib/librte_hash/rte_hash.h   |  80 ++--
 lib/librte_hash/rte_hash_version.map |   8 ++
 test/test/test_hash.c|   7 +-
 test/test/test_hash_multiwriter.c|   8 +-
 test/test/test_hash_readwrite.c  |  16 ++--
 7 files changed, 219 insertions(+), 33 deletions(-)

diff --git a/lib/librte_hash/Makefile b/lib/librte_hash/Makefile
index c8c435dfd..9be58a205 100644
--- a/lib/librte_hash/Makefile
+++ b/lib/librte_hash/Makefile
@@ -6,6 +6,7 @@ include $(RTE_SDK)/mk/rte.vars.mk
 # library name
 LIB = librte_hash.a
 
+CFLAGS += -DALLOW_EXPERIMENTAL_API
 CFLAGS += -O3
 CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR)
 LDLIBS += -lrte_eal -lrte_ring
diff --git a/lib/librte_hash/rte_cuckoo_hash.c 
b/lib/librte_hash/rte_cuckoo_hash.c
index f7b86c8c9..cf5b28196 100644
--- a/lib/librte_hash/rte_cuckoo_hash.c
+++ b/lib/librte_hash/rte_cuckoo_hash.c
@@ -1300,45 +1300,143 @@ rte_hash_lookup_bulk_data(const struct rte_hash *h, 
const void **keys,
return __builtin_popcountl(*hit_mask);
 }
 
+/* istate stands for internal state. */
+struct rte_hash_iterator_istate {
+   const struct rte_hash *h;
+   uint32_t  next;
+   uint32_t  total_entries;
+};
+
+int32_t
+rte_hash_iterator_init(const struct rte_hash *h,
+   struct rte_hash_iterator_state *state)
+{
+   struct rte_hash_iterator_istate *__state;
+
+   RETURN_IF_TRUE(((h == NULL) || (state == NULL)), -EINVAL);
+
+   __state = (struct rte_hash_iterator_istate *)state;
+   __state->h = h;
+   __state->next = 0;
+   __state->total_entries = h->num_buckets * RTE_HASH_BUCKET_ENTRIES;
+
+   return 0;
+}
+
 int32_t
-rte_hash_iterate(const struct rte_hash *h, const void **key, void **data, 
uint32_t *next)
+rte_hash_iterate(
+   struct rte_hash_iterator_state *state, const void **key, void **data)
 {
+   struct rte_hash_iterator_istate *__state;
uint32_t bucket_idx, idx, position;
struct rte_hash_key *next_key;
 
-   RETURN_IF_TRUE(((h == NULL) || (next == NULL)), -EINVAL);
+   RETURN_IF_TRUE(((state == NULL) || (key == NULL) ||
+   (data == NULL)), -EINVAL);
+
+   __state = (struct rte_hash_iterator_istate *)state;
 
-   const uint32_t total_entries = h->num_buckets * RTE_HASH_BUCKET_ENTRIES;
/* Out of bounds */
-   if (*next >= total_entries)
+   if (__state->next >= __state->total_entries)
return -ENOENT;
 
/* Calculate bucket and index of current iterator */
-   bucket_idx = *next / RTE_HASH_BUCKET_ENTRIES;
-   idx = *next % RTE_HASH_BUCKET_ENTRIES;
+   bucket_idx = __state->next / RTE_HASH_BUCKET_ENTRIES;
+   idx = __state->next % RTE_HASH_BUCKET_ENTRIES;
 
/* If current position is empty, go to the next one */
-   while (h->buckets[bucket_idx].key_idx[idx] == EMPTY_SLOT) {
-   (*next)++;
+   while (__state->h->buckets[bucket_idx].key_idx[idx] == EMPTY_SLOT) {
+   __state->next++;
/* End of table */
-   if (*next == total_entries)
+   if (__state->next == __state->total_entries)
return -ENOENT;
-   bucket_idx = *next / RTE_HASH_BUCKET_ENTRIES;
-   idx = *next % RTE_HASH_BUCKET_ENTRIES;
+   bucket_idx = __state->next / RTE_HASH_BUCKET_ENTRIES;
+   idx = __state->next % RTE_HASH_BUCKET_ENTRIES;
}
-   __hash_rw_reader_lock(h);
+   __hash_rw_reader_lock(__state->h);
/* Get position of entry in key table */
-   position = h->buckets[bucket_idx].key_idx[idx];
-   next_key = (struct rte_hash_key *) ((char *)h->key_store +
-   position * h->key_entry_size);
+   position = __state->h->buckets[bucket_idx].key_idx[idx];
+   next_key = (struct rte_hash_key *) ((char *)__state->h->key_store +
+   position * __state->h->key_entry_size);
/* Return key and data */
*key = next_key->key;
*data = next_key->pdata;
 
-   __hash_rw_reader_unlock(h);
+   __hash_rw_reader_unlock(__state->h);