Re: [patch 01/10] SLUB: add support for kmem_cache_ops

2007-05-20 Thread Pekka Enberg

Christoph Lameter wrote:
Yeah earlier versions did this but then I have to do a patch that changes 
all destructors and all kmem_cache_create calls in the kernel.


Yes, please ;-)


-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [patch 01/10] SLUB: add support for kmem_cache_ops

2007-05-19 Thread Christoph Lameter
On Sat, 19 May 2007, Pekka Enberg wrote:

> On 5/18/07, [EMAIL PROTECTED] <[EMAIL PROTECTED]> wrote:
> > kmem_cache_ops is created as empty. Later patches populate kmem_cache_ops.
> 
> Hmm, would make more sense to me to move "ctor" in kmem_cache_ops in
> this patch and not make kmem_cache_create() take both as parameters...

Yeah earlier versions did this but then I have to do a patch that changes 
all destructors and all kmem_cache_create calls in the kernel.
 
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [patch 01/10] SLUB: add support for kmem_cache_ops

2007-05-19 Thread Pekka Enberg

On 5/18/07, [EMAIL PROTECTED] <[EMAIL PROTECTED]> wrote:

kmem_cache_ops is created as empty. Later patches populate kmem_cache_ops.


Hmm, would make more sense to me to move "ctor" in kmem_cache_ops in
this patch and not make kmem_cache_create() take both as parameters...
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[patch 01/10] SLUB: add support for kmem_cache_ops

2007-05-18 Thread clameter
We use the parameter formerly used by the destructor to pass an optional
pointer to a kmem_cache_ops structure to kmem_cache_create.

kmem_cache_ops is created as empty. Later patches populate kmem_cache_ops.

Create a KMEM_CACHE_OPS macro that allows the specification of a the
kmem_cache_ops.

Code to handle kmem_cache_ops is added to SLUB. SLAB and SLOB are updated
to be able to take a kmem_cache_ops structure but will ignore it.

Signed-off-by: Christoph Lameter <[EMAIL PROTECTED]>

---
 include/linux/slab.h |   13 +
 include/linux/slub_def.h |1 +
 mm/slab.c|6 +++---
 mm/slob.c|2 +-
 mm/slub.c|   44 ++--
 5 files changed, 44 insertions(+), 22 deletions(-)

Index: slub/include/linux/slab.h
===
--- slub.orig/include/linux/slab.h  2007-05-15 21:19:51.0 -0700
+++ slub/include/linux/slab.h   2007-05-15 21:27:07.0 -0700
@@ -38,10 +38,13 @@ typedef struct kmem_cache kmem_cache_t _
 void __init kmem_cache_init(void);
 int slab_is_available(void);
 
+struct kmem_cache_ops {
+};
+
 struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
unsigned long,
void (*)(void *, struct kmem_cache *, unsigned long),
-   void (*)(void *, struct kmem_cache *, unsigned long));
+   const struct kmem_cache_ops *s);
 void kmem_cache_destroy(struct kmem_cache *);
 int kmem_cache_shrink(struct kmem_cache *);
 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
@@ -59,9 +62,11 @@ int kmem_ptr_validate(struct kmem_cache 
  * f.e. add cacheline_aligned_in_smp to the struct declaration
  * then the objects will be properly aligned in SMP configurations.
  */
-#define KMEM_CACHE(__struct, __flags) kmem_cache_create(#__struct,\
-   sizeof(struct __struct), __alignof__(struct __struct),\
-   (__flags), NULL, NULL)
+#define KMEM_CACHE_OPS(__struct, __flags, __ops) \
+   kmem_cache_create(#__struct, sizeof(struct __struct), \
+   __alignof__(struct __struct), (__flags), NULL, (__ops))
+
+#define KMEM_CACHE(__struct, __flags) KMEM_CACHE_OPS(__struct, __flags, NULL)
 
 #ifdef CONFIG_NUMA
 extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
Index: slub/mm/slub.c
===
--- slub.orig/mm/slub.c 2007-05-15 21:25:46.0 -0700
+++ slub/mm/slub.c  2007-05-15 21:29:36.0 -0700
@@ -294,6 +294,9 @@ static inline int check_valid_pointer(st
return 1;
 }
 
+struct kmem_cache_ops slub_default_ops = {
+};
+
 /*
  * Slow version of get and set free pointer.
  *
@@ -2003,11 +2006,13 @@ static int calculate_sizes(struct kmem_c
 static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
const char *name, size_t size,
size_t align, unsigned long flags,
-   void (*ctor)(void *, struct kmem_cache *, unsigned long))
+   void (*ctor)(void *, struct kmem_cache *, unsigned long),
+   const struct kmem_cache_ops *ops)
 {
memset(s, 0, kmem_size);
s->name = name;
s->ctor = ctor;
+   s->ops = ops;
s->objsize = size;
s->flags = flags;
s->align = align;
@@ -2191,7 +2196,7 @@ static struct kmem_cache *create_kmalloc
 
down_write(&slub_lock);
if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN,
-   flags, NULL))
+   flags, NULL, &slub_default_ops))
goto panic;
 
list_add(&s->list, &slab_caches);
@@ -2505,12 +2510,16 @@ static int slab_unmergeable(struct kmem_
if (s->ctor)
return 1;
 
+   if (s->ops != &slub_default_ops)
+   return 1;
+
return 0;
 }
 
 static struct kmem_cache *find_mergeable(size_t size,
size_t align, unsigned long flags,
-   void (*ctor)(void *, struct kmem_cache *, unsigned long))
+   void (*ctor)(void *, struct kmem_cache *, unsigned long),
+   const struct kmem_cache_ops *ops)
 {
struct list_head *h;
 
@@ -2520,6 +2529,9 @@ static struct kmem_cache *find_mergeable
if (ctor)
return NULL;
 
+   if (ops != &slub_default_ops)
+   return NULL;
+
size = ALIGN(size, sizeof(void *));
align = calculate_alignment(flags, align, size);
size = ALIGN(size, align);
@@ -2555,13 +2567,15 @@ static struct kmem_cache *find_mergeable
 struct kmem_cache *kmem_cache_create(const char *name, size_t size,
size_t align, unsigned long flags,
void (*ctor)(void *, struct kmem_cache *, unsigned long),
-   void (*dtor)(void *, struct kmem_cache *, unsigned long))
+   const struct kmem_cache_ops *ops)
 {