That's all that's ever asked for, and it makes the return
type of hash_long() consistent.

It also allows (upcoming patch) an optimized implementation
of hash_64 on 32-bit machines.

There's a WARN_ON in there in case I missed anything.  Most callers pass
a compile-time constant bits and will have no run-time overhead.

Signed-off-by: George Spelvin <li...@sciencehorizons.net>
---
 include/linux/hash.h | 12 +++++++++---
 1 file changed, 9 insertions(+), 3 deletions(-)

diff --git a/include/linux/hash.h b/include/linux/hash.h
index 79c52fa8..b9201c33 100644
--- a/include/linux/hash.h
+++ b/include/linux/hash.h
@@ -48,7 +48,7 @@
 #define GOLDEN_RATIO_32 0x61C88647
 #define GOLDEN_RATIO_64 0x61C8864680B583EBull
 
-static __always_inline u64 hash_64(u64 val, unsigned int bits)
+static __always_inline u32 hash_64(u64 val, unsigned int bits)
 {
        u64 hash = val;
 
@@ -71,8 +71,14 @@ static __always_inline u64 hash_64(u64 val, unsigned int 
bits)
        hash += n;
 #endif
 
+       if (__builtin_constant_p(bits > 32 || bits == 0)) {
+               BUILD_BUG_ON(bits > 32 || bits == 0);
+       } else {
+               WARN_ON(bits > 32 || bits == 0);
+       }
+
        /* High bits are more random, so use them. */
-       return hash >> (64 - bits);
+       return (u32)(hash >> (64 - bits));
 }
 
 static inline u32 hash_32(u32 val, unsigned int bits)
@@ -84,7 +90,7 @@ static inline u32 hash_32(u32 val, unsigned int bits)
        return hash >> (32 - bits);
 }
 
-static inline unsigned long hash_ptr(const void *ptr, unsigned int bits)
+static inline u32 hash_ptr(const void *ptr, unsigned int bits)
 {
        return hash_long((unsigned long)ptr, bits);
 }
-- 
2.8.1

Reply via email to