From: Muchun Song <[email protected]>

[ Upstream commit 96403bfe50c344b587ea53894954a9d152af1c9d ]

SLUB currently account kmalloc() and kmalloc_node() allocations larger
than order-1 page per-node.  But it forget to update the per-memcg
vmstats.  So it can lead to inaccurate statistics of "slab_unreclaimable"
which is from memory.stat.  Fix it by using mod_lruvec_page_state instead
of mod_node_page_state.

Link: https://lkml.kernel.org/r/[email protected]
Fixes: 6a486c0ad4dc ("mm, sl[ou]b: improve memory accounting")
Signed-off-by: Muchun Song <[email protected]>
Reviewed-by: Shakeel Butt <[email protected]>
Reviewed-by: Roman Gushchin <[email protected]>
Reviewed-by: Michal Koutný <[email protected]>
Cc: Johannes Weiner <[email protected]>
Cc: Michal Hocko <[email protected]>
Cc: Vladimir Davydov <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
Signed-off-by: Sasha Levin <[email protected]>
---
 mm/slab_common.c | 4 ++--
 mm/slub.c        | 8 ++++----
 2 files changed, 6 insertions(+), 6 deletions(-)

diff --git a/mm/slab_common.c b/mm/slab_common.c
index e981c80d216c2..0b775cb5c1089 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -837,8 +837,8 @@ void *kmalloc_order(size_t size, gfp_t flags, unsigned int 
order)
        page = alloc_pages(flags, order);
        if (likely(page)) {
                ret = page_address(page);
-               mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE_B,
-                                   PAGE_SIZE << order);
+               mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B,
+                                     PAGE_SIZE << order);
        }
        ret = kasan_kmalloc_large(ret, size, flags);
        /* As ret might get tagged, call kmemleak hook after KASAN. */
diff --git a/mm/slub.c b/mm/slub.c
index b22a4b101c846..69dacc61b8435 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3999,8 +3999,8 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, 
int node)
        page = alloc_pages_node(node, flags, order);
        if (page) {
                ptr = page_address(page);
-               mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE_B,
-                                   PAGE_SIZE << order);
+               mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B,
+                                     PAGE_SIZE << order);
        }
 
        return kmalloc_large_node_hook(ptr, size, flags);
@@ -4131,8 +4131,8 @@ void kfree(const void *x)
 
                BUG_ON(!PageCompound(page));
                kfree_hook(object);
-               mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE_B,
-                                   -(PAGE_SIZE << order));
+               mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B,
+                                     -(PAGE_SIZE << order));
                __free_pages(page, order);
                return;
        }
-- 
2.27.0



Reply via email to