In general it's unknown in advance if a slab page will contain
accounted objects or not. In order to avoid memory waste, an
obj_cgroup vector is allocated dynamically when a need to account
of a new object arises. Such approach is memory efficient, but
requires an expensive cmpxchg() to set up the memcg/objcgs pointer,
because an allocation can race with a different allocation on another
cpu.

But in some common cases it's known for sure that a slab page will
contain accounted objects: if the page belongs to a slab cache with a
SLAB_ACCOUNT flag set. It includes such popular objects like
vm_area_struct, anon_vma, task_struct, etc.

In such cases we can pre-allocate the objcgs vector and simple assign
it to the page without any atomic operations, because at this early
stage the page is not visible to anyone else.

Signed-off-by: Roman Gushchin <g...@fb.com>
---
 include/linux/memcontrol.h | 14 ++++++++++----
 mm/memcontrol.c            |  4 ++--
 mm/slab.c                  |  2 +-
 mm/slab.h                  | 14 ++++++++++----
 4 files changed, 23 insertions(+), 11 deletions(-)

diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 20108e426f84..8271f11152e6 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -485,14 +485,20 @@ static inline struct obj_cgroup 
**page_objcgs_check(struct page *page)
  * set_page_objcgs - associate a page with a object cgroups vector
  * @page: a pointer to the page struct
  * @objcgs: a pointer to the object cgroups vector
+ * @atomic: save the value atomically
  *
  * Atomically associates a page with a vector of object cgroups.
  */
 static inline bool set_page_objcgs(struct page *page,
-                                       struct obj_cgroup **objcgs)
+                                  struct obj_cgroup **objcgs, bool atomic)
 {
-       return !cmpxchg(&page->memcg_data, 0, (unsigned long)objcgs |
-                       MEMCG_DATA_OBJCGS);
+       unsigned long memcg_data = (unsigned long) objcgs | MEMCG_DATA_OBJCGS;
+
+       if (atomic)
+               return !cmpxchg(&page->memcg_data, 0, memcg_data);
+
+       page->memcg_data = memcg_data;
+       return true;
 }
 #else
 static inline struct obj_cgroup **page_objcgs(struct page *page)
@@ -506,7 +512,7 @@ static inline struct obj_cgroup **page_objcgs_check(struct 
page *page)
 }
 
 static inline bool set_page_objcgs(struct page *page,
-                                       struct obj_cgroup **objcgs)
+                                  struct obj_cgroup **objcgs, bool atomic)
 {
        return true;
 }
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 69a2893a6455..37bffd336235 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2874,7 +2874,7 @@ static void commit_charge(struct page *page, struct 
mem_cgroup *memcg)
 
 #ifdef CONFIG_MEMCG_KMEM
 int memcg_alloc_page_obj_cgroups(struct page *page, struct kmem_cache *s,
-                                gfp_t gfp)
+                                gfp_t gfp, bool atomic)
 {
        unsigned int objects = objs_per_slab_page(s, page);
        void *vec;
@@ -2884,7 +2884,7 @@ int memcg_alloc_page_obj_cgroups(struct page *page, 
struct kmem_cache *s,
        if (!vec)
                return -ENOMEM;
 
-       if (!set_page_objcgs(page, vec))
+       if (!set_page_objcgs(page, vec, atomic))
                kfree(vec);
        else
                kmemleak_not_leak(vec);
diff --git a/mm/slab.c b/mm/slab.c
index c0ea4b1c7088..df0299e1d0b9 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1380,7 +1380,7 @@ static struct page *kmem_getpages(struct kmem_cache 
*cachep, gfp_t flags,
                return NULL;
        }
 
-       account_slab_page(page, cachep->gfporder, cachep);
+       account_slab_page(page, cachep->gfporder, cachep, flags);
        __SetPageSlab(page);
        /* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */
        if (sk_memalloc_socks() && page_is_pfmemalloc(page))
diff --git a/mm/slab.h b/mm/slab.h
index c73050654b8a..f1d6ba09b630 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -240,7 +240,7 @@ static inline bool kmem_cache_debug_flags(struct kmem_cache 
*s, slab_flags_t fla
 
 #ifdef CONFIG_MEMCG_KMEM
 int memcg_alloc_page_obj_cgroups(struct page *page, struct kmem_cache *s,
-                                gfp_t gfp);
+                                gfp_t gfp, bool atomic);
 
 static inline void memcg_free_page_obj_cgroups(struct page *page)
 {
@@ -307,7 +307,8 @@ static inline void memcg_slab_post_alloc_hook(struct 
kmem_cache *s,
                        page = virt_to_head_page(p[i]);
 
                        if (!page_objcgs(page) &&
-                           memcg_alloc_page_obj_cgroups(page, s, flags)) {
+                           memcg_alloc_page_obj_cgroups(page, s, flags,
+                                                        true)) {
                                obj_cgroup_uncharge(objcg, obj_full_size(s));
                                continue;
                        }
@@ -371,7 +372,8 @@ static inline struct mem_cgroup *memcg_from_slab_obj(void 
*ptr)
 }
 
 static inline int memcg_alloc_page_obj_cgroups(struct page *page,
-                                              struct kmem_cache *s, gfp_t gfp)
+                                              struct kmem_cache *s, gfp_t gfp,
+                                              bool atomic)
 {
        return 0;
 }
@@ -412,8 +414,12 @@ static inline struct kmem_cache *virt_to_cache(const void 
*obj)
 }
 
 static __always_inline void account_slab_page(struct page *page, int order,
-                                             struct kmem_cache *s)
+                                             struct kmem_cache *s,
+                                             gfp_t gfp)
 {
+       if (memcg_kmem_enabled() && (s->flags & SLAB_ACCOUNT))
+               memcg_alloc_page_obj_cgroups(page, s, gfp, false);
+
        mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
                            PAGE_SIZE << order);
 }
-- 
2.26.2

Reply via email to