Use per memcg's nr_deferred for memcg aware shrinkers.  The shrinker's 
nr_deferred
will be used in the following cases:
    1. Non memcg aware shrinkers
    2. !CONFIG_MEMCG
    3. memcg is disabled by boot parameter

Signed-off-by: Yang Shi <shy828...@gmail.com>
---
 mm/vmscan.c | 94 ++++++++++++++++++++++++++++++++++++++++++++++-------
 1 file changed, 83 insertions(+), 11 deletions(-)

diff --git a/mm/vmscan.c b/mm/vmscan.c
index bf34167dd67e..bce8cf44eca2 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -203,6 +203,12 @@ DECLARE_RWSEM(shrinker_rwsem);
 static DEFINE_IDR(shrinker_idr);
 static int shrinker_nr_max;
 
+static inline bool is_deferred_memcg_aware(struct shrinker *shrinker)
+{
+       return (shrinker->flags & SHRINKER_MEMCG_AWARE) &&
+               !mem_cgroup_disabled();
+}
+
 static int prealloc_memcg_shrinker(struct shrinker *shrinker)
 {
        int id, ret = -ENOMEM;
@@ -271,7 +277,58 @@ static bool writeback_throttling_sane(struct scan_control 
*sc)
 #endif
        return false;
 }
+
+static inline long count_nr_deferred(struct shrinker *shrinker,
+                                    struct shrink_control *sc)
+{
+       bool per_memcg_deferred = is_deferred_memcg_aware(shrinker) && 
sc->memcg;
+       struct memcg_shrinker_deferred *deferred;
+       struct mem_cgroup *memcg = sc->memcg;
+       int nid = sc->nid;
+       int id = shrinker->id;
+       long nr;
+
+       if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
+               nid = 0;
+
+       if (per_memcg_deferred) {
+               deferred = 
rcu_dereference_protected(memcg->nodeinfo[nid]->shrinker_deferred,
+                                                    true);
+               nr = atomic_long_xchg(&deferred->nr_deferred[id], 0);
+       } else
+               nr = atomic_long_xchg(&shrinker->nr_deferred[nid], 0);
+
+       return nr;
+}
+
+static inline long set_nr_deferred(long nr, struct shrinker *shrinker,
+                                  struct shrink_control *sc)
+{
+       bool per_memcg_deferred = is_deferred_memcg_aware(shrinker) && 
sc->memcg;
+       struct memcg_shrinker_deferred *deferred;
+       struct mem_cgroup *memcg = sc->memcg;
+       int nid = sc->nid;
+       int id = shrinker->id;
+       long new_nr;
+
+       if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
+               nid = 0;
+
+       if (per_memcg_deferred) {
+               deferred = 
rcu_dereference_protected(memcg->nodeinfo[nid]->shrinker_deferred,
+                                                    true);
+               new_nr = atomic_long_add_return(nr, &deferred->nr_deferred[id]);
+       } else
+               new_nr = atomic_long_add_return(nr, 
&shrinker->nr_deferred[nid]);
+
+       return new_nr;
+}
 #else
+static inline bool is_deferred_memcg_aware(struct shrinker *shrinker)
+{
+       return false;
+}
+
 static int prealloc_memcg_shrinker(struct shrinker *shrinker)
 {
        return 0;
@@ -290,6 +347,29 @@ static bool writeback_throttling_sane(struct scan_control 
*sc)
 {
        return true;
 }
+
+static inline long count_nr_deferred(struct shrinker *shrinker,
+                                    struct shrink_control *sc)
+{
+       int nid = sc->nid;
+
+       if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
+               nid = 0;
+
+       return atomic_long_xchg(&shrinker->nr_deferred[nid], 0);
+}
+
+static inline long set_nr_deferred(long nr, struct shrinker *shrinker,
+                                  struct shrink_control *sc)
+{
+       int nid = sc->nid;
+
+       if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
+               nid = 0;
+
+       return atomic_long_add_return(nr,
+                                     &shrinker->nr_deferred[nid]);
+}
 #endif
 
 /*
@@ -429,13 +509,10 @@ static unsigned long do_shrink_slab(struct shrink_control 
*shrinkctl,
        long freeable;
        long nr;
        long new_nr;
-       int nid = shrinkctl->nid;
        long batch_size = shrinker->batch ? shrinker->batch
                                          : SHRINK_BATCH;
        long scanned = 0, next_deferred;
 
-       if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
-               nid = 0;
 
        freeable = shrinker->count_objects(shrinker, shrinkctl);
        if (freeable == 0 || freeable == SHRINK_EMPTY)
@@ -446,7 +523,7 @@ static unsigned long do_shrink_slab(struct shrink_control 
*shrinkctl,
         * and zero it so that other concurrent shrinker invocations
         * don't also do this scanning work.
         */
-       nr = atomic_long_xchg(&shrinker->nr_deferred[nid], 0);
+       nr = count_nr_deferred(shrinker, shrinkctl);
 
        total_scan = nr;
        if (shrinker->seeks) {
@@ -537,14 +614,9 @@ static unsigned long do_shrink_slab(struct shrink_control 
*shrinkctl,
                next_deferred = 0;
        /*
         * move the unused scan count back into the shrinker in a
-        * manner that handles concurrent updates. If we exhausted the
-        * scan, there is no need to do an update.
+        * manner that handles concurrent updates.
         */
-       if (next_deferred > 0)
-               new_nr = atomic_long_add_return(next_deferred,
-                                               &shrinker->nr_deferred[nid]);
-       else
-               new_nr = atomic_long_read(&shrinker->nr_deferred[nid]);
+       new_nr = set_nr_deferred(next_deferred, shrinker, shrinkctl);
 
        trace_mm_shrink_slab_end(shrinker, shrinkctl->nid, freed, nr, new_nr, 
total_scan);
        return freed;
-- 
2.26.2

Reply via email to