The vmstat threshold is 32 (MEMCG_CHARGE_BATCH), so the type of s32
of lruvec_stat_cpu is enough. And introduce struct per_cpu_lruvec_stat
to optimize memory usage.

Signed-off-by: Muchun Song <songmuc...@bytedance.com>
---
 include/linux/memcontrol.h | 6 +++++-
 mm/memcontrol.c            | 2 +-
 2 files changed, 6 insertions(+), 2 deletions(-)

diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index f9a496c4eac7..34cf119976b1 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -92,6 +92,10 @@ struct lruvec_stat {
        long count[NR_VM_NODE_STAT_ITEMS];
 };
 
+struct per_cpu_lruvec_stat {
+       s32 count[NR_VM_NODE_STAT_ITEMS];
+};
+
 /*
  * Bitmap of shrinker::id corresponding to memcg-aware shrinkers,
  * which have elements charged to this memcg.
@@ -111,7 +115,7 @@ struct mem_cgroup_per_node {
        struct lruvec_stat __percpu *lruvec_stat_local;
 
        /* Subtree VM stats (batched updates) */
-       struct lruvec_stat __percpu *lruvec_stat_cpu;
+       struct per_cpu_lruvec_stat __percpu *lruvec_stat_cpu;
        atomic_long_t           lruvec_stat[NR_VM_NODE_STAT_ITEMS];
 
        unsigned long           lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS];
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 49fbcf003bf5..c874ea37b05d 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -5184,7 +5184,7 @@ static int alloc_mem_cgroup_per_node_info(struct 
mem_cgroup *memcg, int node)
                return 1;
        }
 
-       pn->lruvec_stat_cpu = alloc_percpu_gfp(struct lruvec_stat,
+       pn->lruvec_stat_cpu = alloc_percpu_gfp(struct per_cpu_lruvec_stat,
                                               GFP_KERNEL_ACCOUNT);
        if (!pn->lruvec_stat_cpu) {
                free_percpu(pn->lruvec_stat_local);
-- 
2.11.0

Reply via email to