commit ce4039e002eab66502cd7d2cbcc6fcbdcbf828ee
Author: Daniel Yeisley <[EMAIL PROTECTED]>
Date:   Wed Mar 26 23:37:41 2008 +0200

    slab: fix cache_cache bootstrap in kmem_cache_init()
    
    upstream commit: ec1f5eeeb5a79a0d48036de649a3498da42db565
    
    Commit 556a169dab38b5100df6f4a45b655dddd3db94c1 ("slab: fix bootstrap on
    memoryless node") introduced bootstrap-time cache_cache list3s for all nodes
    but forgot that initkmem_list3 needs to be accessed by [somevalue + node]. 
This
    patch fixes list_add() corruption in mm/slab.c seen on the ES7000.
    
    Cc: Mel Gorman <[EMAIL PROTECTED]>
    Cc: Olaf Hering <[EMAIL PROTECTED]>
    Signed-off-by: Dan Yeisley <[EMAIL PROTECTED]>
    Signed-off-by: Pekka Enberg <[EMAIL PROTECTED]>
    Signed-off-by: Christoph Lameter <[EMAIL PROTECTED]>
    Signed-off-by: Chris Wright <[EMAIL PROTECTED]>

diff --git a/mm/slab.c b/mm/slab.c
index 79c3be0..8323e7d 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1484,7 +1484,7 @@ void __init kmem_cache_init(void)
        list_add(&cache_cache.next, &cache_chain);
        cache_cache.colour_off = cache_line_size();
        cache_cache.array[smp_processor_id()] = &initarray_cache.cache;
-       cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE];
+       cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE + node];
 
        /*
         * struct kmem_cache size depends on nr_node_ids, which
@@ -1605,7 +1605,7 @@ void __init kmem_cache_init(void)
                int nid;
 
                for_each_online_node(nid) {
-                       init_list(&cache_cache, &initkmem_list3[CACHE_CACHE], 
nid);
+                       init_list(&cache_cache, &initkmem_list3[CACHE_CACHE + 
nid], nid);
 
                        init_list(malloc_sizes[INDEX_AC].cs_cachep,
                                  &initkmem_list3[SIZE_AC + nid], nid);
_______________________________________________
unionfs-cvs mailing list: http://unionfs.filesystems.org/
[email protected]
http://www.fsl.cs.sunysb.edu/mailman/listinfo/unionfs-cvs

Reply via email to