The memcg hotunplug callback erroneously flushes counts on the local
CPU, not the counts of the CPU going away; those counts will be lost.

Flush the CPU that is actually going away.

Also simplify the code a bit by using mod_memcg_state() and
count_memcg_events() instead of open-coding the upward flush - this is
comparable to how vmstat.c handles hotunplug flushing.

Fixes: a983b5ebee572 ("mm: memcontrol: fix excessive complexity in memory.stat 
reporting")
Signed-off-by: Johannes Weiner <han...@cmpxchg.org>
Reviewed-by: Shakeel Butt <shake...@google.com>
Reviewed-by: Roman Gushchin <g...@fb.com>
Acked-by: Michal Hocko <mho...@suse.com>
---
 mm/memcontrol.c | 35 +++++++++++++++++++++--------------
 1 file changed, 21 insertions(+), 14 deletions(-)

diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index ed5cc78a8dbf..8120d565dd79 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2411,45 +2411,52 @@ static void drain_all_stock(struct mem_cgroup 
*root_memcg)
 static int memcg_hotplug_cpu_dead(unsigned int cpu)
 {
        struct memcg_stock_pcp *stock;
-       struct mem_cgroup *memcg, *mi;
+       struct mem_cgroup *memcg;
 
        stock = &per_cpu(memcg_stock, cpu);
        drain_stock(stock);
 
        for_each_mem_cgroup(memcg) {
+               struct memcg_vmstats_percpu *statc;
                int i;
 
+               statc = per_cpu_ptr(memcg->vmstats_percpu, cpu);
+
                for (i = 0; i < MEMCG_NR_STAT; i++) {
                        int nid;
-                       long x;
 
-                       x = this_cpu_xchg(memcg->vmstats_percpu->stat[i], 0);
-                       if (x)
-                               for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
-                                       atomic_long_add(x, &memcg->vmstats[i]);
+                       if (statc->stat[i]) {
+                               mod_memcg_state(memcg, i, statc->stat[i]);
+                               statc->stat[i] = 0;
+                       }
 
                        if (i >= NR_VM_NODE_STAT_ITEMS)
                                continue;
 
                        for_each_node(nid) {
+                               struct batched_lruvec_stat *lstatc;
                                struct mem_cgroup_per_node *pn;
+                               long x;
 
                                pn = mem_cgroup_nodeinfo(memcg, nid);
-                               x = 
this_cpu_xchg(pn->lruvec_stat_cpu->count[i], 0);
-                               if (x)
+                               lstatc = per_cpu_ptr(pn->lruvec_stat_cpu, cpu);
+
+                               x = lstatc->count[i];
+                               lstatc->count[i] = 0;
+
+                               if (x) {
                                        do {
                                                atomic_long_add(x, 
&pn->lruvec_stat[i]);
                                        } while ((pn = parent_nodeinfo(pn, 
nid)));
+                               }
                        }
                }
 
                for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
-                       long x;
-
-                       x = this_cpu_xchg(memcg->vmstats_percpu->events[i], 0);
-                       if (x)
-                               for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
-                                       atomic_long_add(x, &memcg->vmevents[i]);
+                       if (statc->events[i]) {
+                               count_memcg_events(memcg, i, statc->events[i]);
+                               statc->events[i] = 0;
+                       }
                }
        }
 
-- 
2.30.0

Reply via email to