On Thu, 17 Jan 2008, Pekka Enberg wrote:

> Looks similar to the one discussed on linux-mm ("[BUG] at
> mm/slab.c:3320" thread). Christoph?

Right. Try the latest version of the patch to fix it:

Index: linux-2.6/mm/slab.c
===================================================================
--- linux-2.6.orig/mm/slab.c    2008-01-03 12:26:42.000000000 -0800
+++ linux-2.6/mm/slab.c 2008-01-09 15:59:49.000000000 -0800
@@ -2977,7 +2977,10 @@ retry:
        }
        l3 = cachep->nodelists[node];
 
-       BUG_ON(ac->avail > 0 || !l3);
+       if (!l3)
+               return NULL;
+
+       BUG_ON(ac->avail > 0);
        spin_lock(&l3->list_lock);
 
        /* See if we can refill from the shared array */
@@ -3224,7 +3227,7 @@ static void *alternate_node_alloc(struct
                nid_alloc = cpuset_mem_spread_node();
        else if (current->mempolicy)
                nid_alloc = slab_node(current->mempolicy);
-       if (nid_alloc != nid_here)
+       if (nid_alloc != nid_here && node_state(nid_alloc, N_NORMAL_MEMORY))
                return ____cache_alloc_node(cachep, flags, nid_alloc);
        return NULL;
 }
@@ -3439,8 +3442,14 @@ __do_cache_alloc(struct kmem_cache *cach
         * We may just have run out of memory on the local node.
         * ____cache_alloc_node() knows how to locate memory on other nodes
         */
-       if (!objp)
-               objp = ____cache_alloc_node(cache, flags, numa_node_id());
+       if (!objp) {
+               int node_id = numa_node_id();
+               if (likely(cache->nodelists[node_id])) /* fast path */
+                       objp = ____cache_alloc_node(cache, flags, node_id);
+               else /* this function can do good fallback */
+                       objp = __cache_alloc_node(cache, flags, node_id,
+                                       __builtin_return_address(0));
+       }
 
   out:
        return objp;
_______________________________________________
Linuxppc-dev mailing list
Linuxppc-dev@ozlabs.org
https://ozlabs.org/mailman/listinfo/linuxppc-dev

Reply via email to