Since sig_tbl_bucket_size and key_tbl_key_size are explicitly aligned at initialization, offset dereferences in the hash table code cannot possibly be unaligned. However, the compiler is unaware of this fact and complains on -Wcast-align. This patch modifies the code to use RTE_PTR_ADD(), thereby silencing the compiler by casting through (void *).
Change-Id: Ia7102cf3f870752743cfe9f4443a3e53cd99bac1 Signed-off-by: Cyril Chemparathy <cchemparathy at ezchip.com> --- lib/librte_hash/rte_hash.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/lib/librte_hash/rte_hash.c b/lib/librte_hash/rte_hash.c index 9245716..67dff5b 100644 --- a/lib/librte_hash/rte_hash.c +++ b/lib/librte_hash/rte_hash.c @@ -96,23 +96,23 @@ EAL_REGISTER_TAILQ(rte_hash_tailq) static inline hash_sig_t * get_sig_tbl_bucket(const struct rte_hash *h, uint32_t bucket_index) { - return (hash_sig_t *) - &(h->sig_tbl[bucket_index * h->sig_tbl_bucket_size]); + return RTE_PTR_ADD(h->sig_tbl, (bucket_index * + h->sig_tbl_bucket_size)); } /* Returns a pointer to the first key in specified bucket. */ static inline uint8_t * get_key_tbl_bucket(const struct rte_hash *h, uint32_t bucket_index) { - return (uint8_t *) &(h->key_tbl[bucket_index * h->bucket_entries * - h->key_tbl_key_size]); + return RTE_PTR_ADD(h->key_tbl, (bucket_index * h->bucket_entries * + h->key_tbl_key_size)); } /* Returns a pointer to a key at a specific position in a specified bucket. */ static inline void * get_key_from_bucket(const struct rte_hash *h, uint8_t *bkt, uint32_t pos) { - return (void *) &bkt[pos * h->key_tbl_key_size]; + return RTE_PTR_ADD(bkt, pos * h->key_tbl_key_size); } /* Does integer division with rounding-up of result. */ -- 2.1.2