Like global and memcg slab shrink, also make count and scan
operations in memory shrinker debugfs lockless.

The debugfs_remove_recursive() will wait for debugfs_file_put()
to return, so there is no need to call rcu_read_lock() before
calling shrinker_try_get().

Signed-off-by: Qi Zheng <zhengqi.a...@bytedance.com>
---
 mm/shrinker_debug.c | 12 ++++++------
 1 file changed, 6 insertions(+), 6 deletions(-)

diff --git a/mm/shrinker_debug.c b/mm/shrinker_debug.c
index 3ab53fad8876..c18fa9b6b7f0 100644
--- a/mm/shrinker_debug.c
+++ b/mm/shrinker_debug.c
@@ -55,8 +55,8 @@ static int shrinker_debugfs_count_show(struct seq_file *m, 
void *v)
        if (!count_per_node)
                return -ENOMEM;
 
-       ret = down_read_killable(&shrinker_rwsem);
-       if (ret) {
+       ret = shrinker_try_get(shrinker);
+       if (!ret) {
                kfree(count_per_node);
                return ret;
        }
@@ -92,7 +92,7 @@ static int shrinker_debugfs_count_show(struct seq_file *m, 
void *v)
        } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL);
 
        rcu_read_unlock();
-       up_read(&shrinker_rwsem);
+       shrinker_put(shrinker);
 
        kfree(count_per_node);
        return ret;
@@ -146,8 +146,8 @@ static ssize_t shrinker_debugfs_scan_write(struct file 
*file,
                return -EINVAL;
        }
 
-       ret = down_read_killable(&shrinker_rwsem);
-       if (ret) {
+       ret = shrinker_try_get(shrinker);
+       if (!ret) {
                mem_cgroup_put(memcg);
                return ret;
        }
@@ -159,7 +159,7 @@ static ssize_t shrinker_debugfs_scan_write(struct file 
*file,
 
        shrinker->scan_objects(shrinker, &sc);
 
-       up_read(&shrinker_rwsem);
+       shrinker_put(shrinker);
        mem_cgroup_put(memcg);
 
        return size;
-- 
2.30.2

Reply via email to