The patch introduces a special value SHRINKER_REGISTERING to use instead
of list_empty() to detect a semi-registered shrinker.

This should be clearer for a reader since "list is empty"  is not
an intuitive state of a shrinker), and this gives a better assembler
code:

Before:
callq  <idr_find>
mov    %rax,%r15
test   %rax,%rax
je     <shrink_slab_memcg+0x1d5>
mov    0x20(%rax),%rax
lea    0x20(%r15),%rdx
cmp    %rax,%rdx
je     <shrink_slab_memcg+0xbd>
mov    0x8(%rsp),%edx
mov    %r15,%rsi
lea    0x10(%rsp),%rdi
callq  <do_shrink_slab>

After:
callq  <idr_find>
mov    %rax,%r15
lea    -0x1(%rax),%rax
cmp    $0xfffffffffffffffd,%rax
ja     <shrink_slab_memcg+0x1cd>
mov    0x8(%rsp),%edx
mov    %r15,%rsi
lea    0x10(%rsp),%rdi
callq  ffffffff810cefd0 <do_shrink_slab>

Also, improve the comment.

Signed-off-by: Kirill Tkhai <ktk...@virtuozzo.com>
---
 mm/vmscan.c |   42 ++++++++++++++++++++----------------------
 1 file changed, 20 insertions(+), 22 deletions(-)

diff --git a/mm/vmscan.c b/mm/vmscan.c
index 0d980e801b8a..c18c4acf9599 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -170,6 +170,21 @@ static LIST_HEAD(shrinker_list);
 static DECLARE_RWSEM(shrinker_rwsem);
 
 #ifdef CONFIG_MEMCG_KMEM
+
+/*
+ * There is a window between prealloc_shrinker()
+ * and register_shrinker_prepared(). We don't want
+ * to clear bit of a shrinker in such the state
+ * in shrink_slab_memcg(), since this will impose
+ * restrictions on a code registering a shrinker
+ * (they would have to guarantee, their LRU lists
+ * are empty till shrinker is completely registered).
+ * So, we use this value to detect the situation,
+ * when id is assigned, but shrinker is not completely
+ * registered yet.
+ */
+#define SHRINKER_REGISTERING ((struct shrinker *)~0UL)
+
 static DEFINE_IDR(shrinker_idr);
 static int shrinker_nr_max;
 
@@ -179,7 +194,7 @@ static int prealloc_memcg_shrinker(struct shrinker 
*shrinker)
 
        down_write(&shrinker_rwsem);
        /* This may call shrinker, so it must use down_read_trylock() */
-       id = idr_alloc(&shrinker_idr, shrinker, 0, 0, GFP_KERNEL);
+       id = idr_alloc(&shrinker_idr, SHRINKER_REGISTERING, 0, 0, GFP_KERNEL);
        if (id < 0)
                goto unlock;
 
@@ -364,21 +379,6 @@ int prealloc_shrinker(struct shrinker *shrinker)
        if (!shrinker->nr_deferred)
                return -ENOMEM;
 
-       /*
-        * There is a window between prealloc_shrinker()
-        * and register_shrinker_prepared(). We don't want
-        * to clear bit of a shrinker in such the state
-        * in shrink_slab_memcg(), since this will impose
-        * restrictions on a code registering a shrinker
-        * (they would have to guarantee, their LRU lists
-        * are empty till shrinker is completely registered).
-        * So, we differ the situation, when 1)a shrinker
-        * is semi-registered (id is assigned, but it has
-        * not yet linked to shrinker_list) and 2)shrinker
-        * is not registered (id is not assigned).
-        */
-       INIT_LIST_HEAD(&shrinker->list);
-
        if (shrinker->flags & SHRINKER_MEMCG_AWARE) {
                if (prealloc_memcg_shrinker(shrinker))
                        goto free_deferred;
@@ -408,6 +408,7 @@ void register_shrinker_prepared(struct shrinker *shrinker)
 {
        down_write(&shrinker_rwsem);
        list_add_tail(&shrinker->list, &shrinker_list);
+       idr_replace(&shrinker_idr, shrinker, shrinker->id);
        up_write(&shrinker_rwsem);
 }
 
@@ -589,15 +590,12 @@ static unsigned long shrink_slab_memcg(gfp_t gfp_mask, 
int nid,
                struct shrinker *shrinker;
 
                shrinker = idr_find(&shrinker_idr, i);
-               if (unlikely(!shrinker)) {
-                       clear_bit(i, map->map);
+               if (unlikely(!shrinker || shrinker == SHRINKER_REGISTERING)) {
+                       if (!shrinker)
+                               clear_bit(i, map->map);
                        continue;
                }
 
-               /* See comment in prealloc_shrinker() */
-               if (unlikely(list_empty(&shrinker->list)))
-                       continue;
-
                ret = do_shrink_slab(&sc, shrinker, priority);
                if (ret == SHRINK_EMPTY) {
                        clear_bit(i, map->map);

Reply via email to