The constructor for buffer_head slabs was removed recently. We need
the constructor in order to insure that slab objects always have a definite
state even before we allocated them.

Signed-off-by: Christoph Lameter <[EMAIL PROTECTED]>

---
 fs/buffer.c |   19 +++++++++++++++----
 1 files changed, 15 insertions(+), 4 deletions(-)

diff --git a/fs/buffer.c b/fs/buffer.c
index 0e5ec37..f4824d1 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -2960,9 +2960,8 @@ static void recalc_bh_state(void)
        
 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
 {
-       struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
+       struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
        if (ret) {
-               INIT_LIST_HEAD(&ret->b_assoc_buffers);
                get_cpu_var(bh_accounting).nr++;
                recalc_bh_state();
                put_cpu_var(bh_accounting);
@@ -3003,12 +3002,24 @@ static int buffer_cpu_notify(struct notifier_block 
*self,
        return NOTIFY_OK;
 }
 
+static void
+init_buffer_head(void *data, struct kmem_cache *cachep, unsigned long flags)
+{
+       struct buffer_head * bh = (struct buffer_head *)data;
+
+       memset(bh, 0, sizeof(*bh));
+       INIT_LIST_HEAD(&bh->b_assoc_buffers);
+}
+
 void __init buffer_init(void)
 {
        int nrpages;
 
-       bh_cachep = KMEM_CACHE(buffer_head,
-                       SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD);
+       bh_cachep = kmem_cache_create("buffer_head",
+                       sizeof(struct buffer_head), 0,
+                               (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
+                               SLAB_MEM_SPREAD),
+                               init_buffer_head);
 
        /*
         * Limit the bh occupancy to 10% of ZONE_NORMAL
-- 
1.5.2.4

-- 
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to