On 12.02.2014 [16:16:11 -0600], Christoph Lameter wrote:
> Here is another patch with some fixes. The additional logic is only
> compiled in if CONFIG_HAVE_MEMORYLESS_NODES is set.
> 
> Subject: slub: Memoryless node support
> 
> Support memoryless nodes by tracking which allocations are failing.
> Allocations targeted to the nodes without memory fall back to the
> current available per cpu objects and if that is not available will
> create a new slab using the page allocator to fallback from the
> memoryless node to some other node.

I'll try and retest this once the LPAR in question comes free. Hopefully
in the next day or two.

Thanks,
Nish

> Signed-off-by: Christoph Lameter <c...@linux.com>
> 
> Index: linux/mm/slub.c
> ===================================================================
> --- linux.orig/mm/slub.c      2014-02-12 16:07:48.957869570 -0600
> +++ linux/mm/slub.c   2014-02-12 16:09:22.198928260 -0600
> @@ -134,6 +134,10 @@ static inline bool kmem_cache_has_cpu_pa
>  #endif
>  }
> 
> +#ifdef CONFIG_HAVE_MEMORYLESS_NODES
> +static nodemask_t empty_nodes;
> +#endif
> +
>  /*
>   * Issues still to be resolved:
>   *
> @@ -1405,16 +1409,28 @@ static struct page *new_slab(struct kmem
>       void *last;
>       void *p;
>       int order;
> +     int alloc_node;
> 
>       BUG_ON(flags & GFP_SLAB_BUG_MASK);
> 
>       page = allocate_slab(s,
>               flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
> -     if (!page)
> +     if (!page) {
> +#ifdef CONFIG_HAVE_MEMORYLESS_NODES
> +             if (node != NUMA_NO_NODE)
> +                     node_set(node, empty_nodes);
> +#endif
>               goto out;
> +     }
> 
>       order = compound_order(page);
> -     inc_slabs_node(s, page_to_nid(page), page->objects);
> +     alloc_node = page_to_nid(page);
> +#ifdef CONFIG_HAVE_MEMORYLESS_NODES
> +     node_clear(alloc_node, empty_nodes);
> +     if (node != NUMA_NO_NODE && alloc_node != node)
> +             node_set(node, empty_nodes);
> +#endif
> +     inc_slabs_node(s, alloc_node, page->objects);
>       memcg_bind_pages(s, order);
>       page->slab_cache = s;
>       __SetPageSlab(page);
> @@ -1722,7 +1738,7 @@ static void *get_partial(struct kmem_cac
>               struct kmem_cache_cpu *c)
>  {
>       void *object;
> -     int searchnode = (node == NUMA_NO_NODE) ? numa_node_id() : node;
> +     int searchnode = (node == NUMA_NO_NODE) ? numa_mem_id() : node;
> 
>       object = get_partial_node(s, get_node(s, searchnode), c, flags);
>       if (object || node != NUMA_NO_NODE)
> @@ -2117,8 +2133,19 @@ static void flush_all(struct kmem_cache
>  static inline int node_match(struct page *page, int node)
>  {
>  #ifdef CONFIG_NUMA
> -     if (!page || (node != NUMA_NO_NODE && page_to_nid(page) != node))
> +     int page_node = page_to_nid(page);
> +
> +     if (!page)
>               return 0;
> +
> +     if (node != NUMA_NO_NODE) {
> +#ifdef CONFIG_HAVE_MEMORYLESS_NODES
> +             if (node_isset(node, empty_nodes))
> +                     return 1;
> +#endif
> +             if (page_node != node)
> +                     return 0;
> +     }
>  #endif
>       return 1;
>  }
> 

_______________________________________________
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev

Reply via email to