We can use ilog2() in fs/namespace.c to compute hash_bits and hash_mask at
compile time, not runtime.

Signed-off-by: Eric Dumazet <[EMAIL PROTECTED]>

diff --git a/fs/namespace.c b/fs/namespace.c
index 0608388..835f14a 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -25,6 +25,7 @@
 #include <linux/security.h>
 #include <linux/mount.h>
 #include <linux/ramfs.h>
+#include <linux/log2.h>
 #include <asm/uaccess.h>
 #include <asm/unistd.h>
 #include "pnode.h"
@@ -36,7 +37,8 @@ __cacheline_aligned_in_smp DEFINE_SPINLOCK(vfsmount_lock);
 static int event;
 
 static struct list_head *mount_hashtable __read_mostly;
-static int hash_mask __read_mostly, hash_bits __read_mostly;
+#define hash_bits ilog2(PAGE_SIZE / sizeof(struct list_head))
+#define hash_mask ((1UL << hash_bits) - 1)
 static struct kmem_cache *mnt_cache __read_mostly;
 static struct rw_semaphore namespace_sem;
 
@@ -1828,24 +1830,7 @@ void __init mnt_init(void)
        if (!mount_hashtable)
                panic("Failed to allocate mount hash table\n");
 
-       /*
-        * Find the power-of-two list-heads that can fit into the allocation..
-        * We don't guarantee that "sizeof(struct list_head)" is necessarily
-        * a power-of-two.
-        */
-       nr_hash = PAGE_SIZE / sizeof(struct list_head);
-       hash_bits = 0;
-       do {
-               hash_bits++;
-       } while ((nr_hash >> hash_bits) != 0);
-       hash_bits--;
-
-       /*
-        * Re-calculate the actual number of entries and the mask
-        * from the number of bits we can fit.
-        */
        nr_hash = 1UL << hash_bits;
-       hash_mask = nr_hash - 1;
 
        printk("Mount-cache hash table entries: %d\n", nr_hash);
 

Reply via email to