Re: [PATCH 1/2] slab, slub, slob: add slab_flags_t

2017-10-21 Thread Pekka Enberg



On 21/10/2017 13.02, Alexey Dobriyan wrote:

Add sparse-checked slab_flags_t for struct kmem_cache::flags
(SLAB_POISON, etc).

SLAB is bloated temporarily by switching to "unsigned long",
but only temporarily.

Signed-off-by: Alexey Dobriyan 


Acked-by: Pekka Enberg 


[PATCH 1/2] slab, slub, slob: add slab_flags_t

2017-10-21 Thread Alexey Dobriyan
Add sparse-checked slab_flags_t for struct kmem_cache::flags
(SLAB_POISON, etc).

SLAB is bloated temporarily by switching to "unsigned long",
but only temporarily.

Signed-off-by: Alexey Dobriyan 
---

 fs/ecryptfs/main.c   |2 -
 fs/xfs/kmem.h|2 -
 include/linux/kasan.h|4 +--
 include/linux/kmemleak.h |8 +++---
 include/linux/slab.h |   60 ---
 include/linux/slab_def.h |2 -
 include/linux/slub_def.h |2 -
 include/linux/types.h|1 
 include/net/sock.h   |2 -
 mm/kasan/kasan.c |2 -
 mm/slab.c|   23 --
 mm/slab.h|   26 ++--
 mm/slab_common.c |   16 ++--
 mm/slob.c|2 -
 mm/slub.c|   26 ++--
 15 files changed, 97 insertions(+), 81 deletions(-)

--- a/fs/ecryptfs/main.c
+++ b/fs/ecryptfs/main.c
@@ -660,7 +660,7 @@ static struct ecryptfs_cache_info {
struct kmem_cache **cache;
const char *name;
size_t size;
-   unsigned long flags;
+   slab_flags_t flags;
void (*ctor)(void *obj);
 } ecryptfs_cache_infos[] = {
{
--- a/fs/xfs/kmem.h
+++ b/fs/xfs/kmem.h
@@ -104,7 +104,7 @@ kmem_zone_init(int size, char *zone_name)
 }
 
 static inline kmem_zone_t *
-kmem_zone_init_flags(int size, char *zone_name, unsigned long flags,
+kmem_zone_init_flags(int size, char *zone_name, slab_flags_t flags,
 void (*construct)(void *))
 {
return kmem_cache_create(zone_name, size, 0, flags, construct);
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -45,7 +45,7 @@ void kasan_alloc_pages(struct page *page, unsigned int order);
 void kasan_free_pages(struct page *page, unsigned int order);
 
 void kasan_cache_create(struct kmem_cache *cache, size_t *size,
-   unsigned long *flags);
+   slab_flags_t *flags);
 void kasan_cache_shrink(struct kmem_cache *cache);
 void kasan_cache_shutdown(struct kmem_cache *cache);
 
@@ -94,7 +94,7 @@ static inline void kasan_free_pages(struct page *page, 
unsigned int order) {}
 
 static inline void kasan_cache_create(struct kmem_cache *cache,
  size_t *size,
- unsigned long *flags) {}
+ slab_flags_t *flags) {}
 static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
 static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
 
--- a/include/linux/kmemleak.h
+++ b/include/linux/kmemleak.h
@@ -48,14 +48,14 @@ extern void kmemleak_not_leak_phys(phys_addr_t phys) __ref;
 extern void kmemleak_ignore_phys(phys_addr_t phys) __ref;
 
 static inline void kmemleak_alloc_recursive(const void *ptr, size_t size,
-   int min_count, unsigned long flags,
+   int min_count, slab_flags_t flags,
gfp_t gfp)
 {
if (!(flags & SLAB_NOLEAKTRACE))
kmemleak_alloc(ptr, size, min_count, gfp);
 }
 
-static inline void kmemleak_free_recursive(const void *ptr, unsigned long 
flags)
+static inline void kmemleak_free_recursive(const void *ptr, slab_flags_t flags)
 {
if (!(flags & SLAB_NOLEAKTRACE))
kmemleak_free(ptr);
@@ -76,7 +76,7 @@ static inline void kmemleak_alloc(const void *ptr, size_t 
size, int min_count,
 {
 }
 static inline void kmemleak_alloc_recursive(const void *ptr, size_t size,
-   int min_count, unsigned long flags,
+   int min_count, slab_flags_t flags,
gfp_t gfp)
 {
 }
@@ -94,7 +94,7 @@ static inline void kmemleak_free(const void *ptr)
 static inline void kmemleak_free_part(const void *ptr, size_t size)
 {
 }
-static inline void kmemleak_free_recursive(const void *ptr, unsigned long 
flags)
+static inline void kmemleak_free_recursive(const void *ptr, slab_flags_t flags)
 {
 }
 static inline void kmemleak_free_percpu(const void __percpu *ptr)
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -20,13 +20,20 @@
  * Flags to pass to kmem_cache_create().
  * The ones marked DEBUG are only valid if CONFIG_DEBUG_SLAB is set.
  */
-#define SLAB_CONSISTENCY_CHECKS0x0100UL/* DEBUG: Perform 
(expensive) checks on alloc/free */
-#define SLAB_RED_ZONE  0x0400UL/* DEBUG: Red zone objs in a 
cache */
-#define SLAB_POISON0x0800UL/* DEBUG: Poison objects */
-#define SLAB_HWCACHE_ALIGN 0x2000UL/* Align objs on cache lines */
-#define SLAB_CACHE_DMA 0x4000UL/* Use GFP_DMA memory */
-#define SLAB_STORE_USER0x0001UL/* DEBUG: Store the 
last owner for bug hunting */
-#define SLAB_PANIC 0x0004UL/* Panic if kmem_cache_create() 
fails