On Fri, May 08, 2020 at 09:35:54PM +0000, Christoph Lameter wrote:
> On Mon, 4 May 2020, Roman Gushchin wrote:
> 
> > On Sat, May 02, 2020 at 11:54:09PM +0000, Christoph Lameter wrote:
> > > On Thu, 30 Apr 2020, Roman Gushchin wrote:
> > >
> > > > Sorry, but what exactly do you mean?
> > >
> > > I think the right approach is to add a pointer to each slab object for
> > > memcg support.
> > >
> >
> > As I understand, embedding the memcg pointer will hopefully make allocations
> > cheaper in terms of CPU, but will require more memory. And you think that
> > it's worth it. Is it a correct understanding?
> 
> It definitely makes the code less complex. The additional memory is
> minimal. In many cases you have already some space wasted at the end of
> the object that could be used for the pointer.
> 
> > Can you, please, describe a bit more detailed how it should be done
> > from your point of view?
> 
> Add it to the metadata at the end of the object. Like the debugging
> information or the pointer for RCU freeing.

I've tried to make a prototype, but realized that I don't know how to do
it in a right way with SLUB (without disabling caches merging, etc)
and ended up debugging various memory corruptions.

memcg/kmem changes required to switch between different ways of storing
the memcg pointer are pretty minimal (diff below).

There are two functions which SLAB/SLUB should provide:

void set_obj_cgroup(struct kmem_cache *s, void *ptr, struct obj_cgroup *objcg);
struct obj_cgroup *obtain_obj_cgroup(struct kmem_cache *s, void *ptr);

Ideally, obtain_obj_cgroup should work with an arbitrary kernel pointer, e.g.
a pointer to some field in the structure allocated using kmem_cache_alloc().

Christopher, will you be able to help with the SLUB implementation?
It will be highly appreciated.

Thanks!

--

diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 4af95739ccb6..398a714874d8 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2815,15 +2815,11 @@ struct mem_cgroup *mem_cgroup_from_obj(void *p)
 
        /*
         * Slab objects are accounted individually, not per-page.
-        * Memcg membership data for each individual object is saved in
-        * the page->obj_cgroups.
         */
-       if (page_has_obj_cgroups(page)) {
+       if (PageSlab(page)) {
                struct obj_cgroup *objcg;
-               unsigned int off;
 
-               off = obj_to_index(page->slab_cache, page, p);
-               objcg = page_obj_cgroups(page)[off];
+               objcg = obtain_obj_cgroup(page->slab_cache, p);
                if (objcg)
                        return obj_cgroup_memcg(objcg);
 
diff --git a/mm/slab.h b/mm/slab.h
index 13fadf33be5c..617ce017bc68 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -210,40 +210,15 @@ static inline int cache_vmstat_idx(struct kmem_cache *s)
 }
 
 #ifdef CONFIG_MEMCG_KMEM
-static inline struct obj_cgroup **page_obj_cgroups(struct page *page)
+static inline void set_obj_cgroup(struct kmem_cache *s, void *ptr,
+                                 struct obj_cgroup *objcg)
 {
-       /*
-        * page->mem_cgroup and page->obj_cgroups are sharing the same
-        * space. To distinguish between them in case we don't know for sure
-        * that the page is a slab page (e.g. page_cgroup_ino()), let's
-        * always set the lowest bit of obj_cgroups.
-        */
-       return (struct obj_cgroup **)
-               ((unsigned long)page->obj_cgroups & ~0x1UL);
-}
-
-static inline bool page_has_obj_cgroups(struct page *page)
-{
-       return ((unsigned long)page->obj_cgroups & 0x1UL);
-}
-
-static inline int memcg_alloc_page_obj_cgroups(struct page *page, gfp_t gfp,
-                                              unsigned int objects)
-{
-       void *vec;
-
-       vec = kcalloc(objects, sizeof(struct obj_cgroup *), gfp);
-       if (!vec)
-               return -ENOMEM;
-
-       page->obj_cgroups = (struct obj_cgroup **) ((unsigned long)vec | 0x1UL);
-       return 0;
+       // TODO
 }
-
-static inline void memcg_free_page_obj_cgroups(struct page *page)
+static inline struct obj_cgroup *obtain_obj_cgroup(struct kmem_cache *s, void 
*ptr)
 {
-       kfree(page_obj_cgroups(page));
-       page->obj_cgroups = NULL;
+       // TODO
+       return NULL;
 }
 
 static inline size_t obj_full_size(struct kmem_cache *s)
@@ -296,7 +271,6 @@ static inline void memcg_slab_post_alloc_hook(struct 
kmem_cache *s,
                                              void **p)
 {
        struct page *page;
-       unsigned long off;
        size_t i;
 
        if (!objcg)
@@ -306,17 +280,8 @@ static inline void memcg_slab_post_alloc_hook(struct 
kmem_cache *s,
        for (i = 0; i < size; i++) {
                if (likely(p[i])) {
                        page = virt_to_head_page(p[i]);
-
-                       if (!page_has_obj_cgroups(page) &&
-                           memcg_alloc_page_obj_cgroups(page, flags,
-                                                        objs_per_slab(s))) {
-                               obj_cgroup_uncharge(objcg, obj_full_size(s));
-                               continue;
-                       }
-
-                       off = obj_to_index(s, page, p[i]);
                        obj_cgroup_get(objcg);
-                       page_obj_cgroups(page)[off] = objcg;
+                       set_obj_cgroup(s, p[i], objcg);
                        mod_objcg_state(objcg, page_pgdat(page),
                                        cache_vmstat_idx(s), obj_full_size(s));
                } else {
@@ -330,21 +295,17 @@ static inline void memcg_slab_free_hook(struct kmem_cache 
*s, struct page *page,
                                        void *p)
 {
        struct obj_cgroup *objcg;
-       unsigned int off;
 
        if (!memcg_kmem_enabled())
                return;
 
-       if (!page_has_obj_cgroups(page))
-               return;
-
-       off = obj_to_index(s, page, p);
-       objcg = page_obj_cgroups(page)[off];
-       page_obj_cgroups(page)[off] = NULL;
+       objcg = obtain_obj_cgroup(s, p);
 
        if (!objcg)
                return;
 
+       set_obj_cgroup(s, p, NULL);
+
        obj_cgroup_uncharge(objcg, obj_full_size(s));
        mod_objcg_state(objcg, page_pgdat(page), cache_vmstat_idx(s),
                        -obj_full_size(s));
@@ -363,16 +324,6 @@ static inline struct mem_cgroup *memcg_from_slab_obj(void 
*ptr)
        return NULL;
 }
 
-static inline int memcg_alloc_page_obj_cgroups(struct page *page, gfp_t gfp,
-                                              unsigned int objects)
-{
-       return 0;
-}
-
-static inline void memcg_free_page_obj_cgroups(struct page *page)
-{
-}
-
 static inline struct obj_cgroup *memcg_slab_pre_alloc_hook(struct kmem_cache 
*s,
                                                           size_t objects,
                                                           gfp_t flags)
@@ -415,8 +366,6 @@ static __always_inline void charge_slab_page(struct page 
*page,
 static __always_inline void uncharge_slab_page(struct page *page, int order,
                                               struct kmem_cache *s)
 {
-       memcg_free_page_obj_cgroups(page);
-
        mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
                            -(PAGE_SIZE << order));
 }

Reply via email to