In order to optimize lookup performance, hash structure is reordered, so all fields used for lookup will be in the first cache line.
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch at intel.com> --- lib/librte_hash/rte_cuckoo_hash.h | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/lib/librte_hash/rte_cuckoo_hash.h b/lib/librte_hash/rte_cuckoo_hash.h index e290dab..701531a 100644 --- a/lib/librte_hash/rte_cuckoo_hash.h +++ b/lib/librte_hash/rte_cuckoo_hash.h @@ -182,9 +182,7 @@ struct rte_hash_bucket { /** A hash table structure. */ struct rte_hash { - char name[RTE_HASH_NAMESIZE]; /**< Name of the hash. */ - uint32_t entries; /**< Total table entries. */ - uint32_t num_buckets; /**< Number of buckets in table. */ + /* first cache line - fields used in lookup */ uint32_t key_len; /**< Length of hash key. */ rte_hash_function hash_func; /**< Function used to calculate hash. */ uint32_t hash_func_init_val; /**< Init value used by hash_func. */ @@ -196,12 +194,13 @@ struct rte_hash { from hash signature. */ uint32_t key_entry_size; /**< Size of each key entry. */ - struct rte_ring *free_slots; /**< Ring that stores all indexes - of the free slots in the key table */ void *key_store; /**< Table storing all keys and data */ struct rte_hash_bucket *buckets; /**< Table with buckets storing all the hash values and key indexes to the key table*/ + + struct rte_ring *free_slots; /**< Ring that stores all indexes + of the free slots in the key table */ uint8_t hw_trans_mem_support; /**< Hardware transactional memory support */ struct lcore_cache *local_free_slots; @@ -209,6 +208,9 @@ struct rte_hash { enum add_key_case add_key; /**< Multi-writer hash add behavior */ rte_spinlock_t *multiwriter_lock; /**< Multi-writer spinlock for w/o TM */ + char name[RTE_HASH_NAMESIZE]; /**< Name of the hash. */ + uint32_t entries; /**< Total table entries. */ + uint32_t num_buckets; /**< Number of buckets in table. */ } __rte_cache_aligned; struct queue_node { -- 2.7.4