[PATCH v1 1/2] mm: Reorganize SLAB freelist randomization

2016-05-26 Thread Thomas Garnier
This commit reorganizes the previous SLAB freelist randomization to
prepare for the SLUB implementation. It moves functions that will be
shared to slab_common.

The entropy functions are changed to align with the SLUB implementation,
now using get_random_(int|long) functions. These functions were chosen
because they provide a bit more entropy early on boot and better
performance when specific arch instructions are not available.

Signed-off-by: Thomas Garnier 
Reviewed-by: Kees Cook 
---
Based on next-20160526
---
 include/linux/slab_def.h |  2 +-
 mm/slab.c| 80 
 mm/slab.h| 14 +
 mm/slab_common.c | 47 
 4 files changed, 82 insertions(+), 61 deletions(-)

diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index 8694f7a..339ba02 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -81,7 +81,7 @@ struct kmem_cache {
 #endif
 
 #ifdef CONFIG_SLAB_FREELIST_RANDOM
-   void *random_seq;
+   unsigned int *random_seq;
 #endif
 
struct kmem_cache_node *node[MAX_NUMNODES];
diff --git a/mm/slab.c b/mm/slab.c
index cc8bbc1..763096a 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1236,61 +1236,6 @@ static void __init set_up_node(struct kmem_cache 
*cachep, int index)
}
 }
 
-#ifdef CONFIG_SLAB_FREELIST_RANDOM
-static void freelist_randomize(struct rnd_state *state, freelist_idx_t *list,
-   size_t count)
-{
-   size_t i;
-   unsigned int rand;
-
-   for (i = 0; i < count; i++)
-   list[i] = i;
-
-   /* Fisher-Yates shuffle */
-   for (i = count - 1; i > 0; i--) {
-   rand = prandom_u32_state(state);
-   rand %= (i + 1);
-   swap(list[i], list[rand]);
-   }
-}
-
-/* Create a random sequence per cache */
-static int cache_random_seq_create(struct kmem_cache *cachep, gfp_t gfp)
-{
-   unsigned int seed, count = cachep->num;
-   struct rnd_state state;
-
-   if (count < 2)
-   return 0;
-
-   /* If it fails, we will just use the global lists */
-   cachep->random_seq = kcalloc(count, sizeof(freelist_idx_t), gfp);
-   if (!cachep->random_seq)
-   return -ENOMEM;
-
-   /* Get best entropy at this stage */
-   get_random_bytes_arch(, sizeof(seed));
-   prandom_seed_state(, seed);
-
-   freelist_randomize(, cachep->random_seq, count);
-   return 0;
-}
-
-/* Destroy the per-cache random freelist sequence */
-static void cache_random_seq_destroy(struct kmem_cache *cachep)
-{
-   kfree(cachep->random_seq);
-   cachep->random_seq = NULL;
-}
-#else
-static inline int cache_random_seq_create(struct kmem_cache *cachep, gfp_t gfp)
-{
-   return 0;
-}
-static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { }
-#endif /* CONFIG_SLAB_FREELIST_RANDOM */
-
-
 /*
  * Initialisation.  Called after the page allocator have been initialised and
  * before smp_init().
@@ -2535,7 +2480,7 @@ static void cache_init_objs_debug(struct kmem_cache 
*cachep, struct page *page)
 union freelist_init_state {
struct {
unsigned int pos;
-   freelist_idx_t *list;
+   unsigned int *list;
unsigned int count;
unsigned int rand;
};
@@ -2554,7 +2499,7 @@ static bool freelist_state_initialize(union 
freelist_init_state *state,
unsigned int rand;
 
/* Use best entropy available to define a random shift */
-   get_random_bytes_arch(, sizeof(rand));
+   rand = get_random_int();
 
/* Use a random state if the pre-computed list is not available */
if (!cachep->random_seq) {
@@ -2576,13 +2521,20 @@ static freelist_idx_t next_random_slot(union 
freelist_init_state *state)
return (state->list[state->pos++] + state->rand) % state->count;
 }
 
+/* Swap two freelist entries */
+static void swap_free_obj(struct page *page, unsigned int a, unsigned int b)
+{
+   swap(((freelist_idx_t *)page->freelist)[a],
+   ((freelist_idx_t *)page->freelist)[b]);
+}
+
 /*
  * Shuffle the freelist initialization state based on pre-computed lists.
  * return true if the list was successfully shuffled, false otherwise.
  */
 static bool shuffle_freelist(struct kmem_cache *cachep, struct page *page)
 {
-   unsigned int objfreelist = 0, i, count = cachep->num;
+   unsigned int objfreelist = 0, i, rand, count = cachep->num;
union freelist_init_state state;
bool precomputed;
 
@@ -2607,7 +2559,15 @@ static bool shuffle_freelist(struct kmem_cache *cachep, 
struct page *page)
 * Later use a pre-computed list for speed.
 */
if (!precomputed) {
-   freelist_randomize(_state, page->freelist, count);
+   for (i = 0; i < count; i++)
+   set_free_obj(page, i, i);
+

[PATCH v1 1/2] mm: Reorganize SLAB freelist randomization

2016-05-26 Thread Thomas Garnier
This commit reorganizes the previous SLAB freelist randomization to
prepare for the SLUB implementation. It moves functions that will be
shared to slab_common.

The entropy functions are changed to align with the SLUB implementation,
now using get_random_(int|long) functions. These functions were chosen
because they provide a bit more entropy early on boot and better
performance when specific arch instructions are not available.

Signed-off-by: Thomas Garnier 
Reviewed-by: Kees Cook 
---
Based on next-20160526
---
 include/linux/slab_def.h |  2 +-
 mm/slab.c| 80 
 mm/slab.h| 14 +
 mm/slab_common.c | 47 
 4 files changed, 82 insertions(+), 61 deletions(-)

diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index 8694f7a..339ba02 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -81,7 +81,7 @@ struct kmem_cache {
 #endif
 
 #ifdef CONFIG_SLAB_FREELIST_RANDOM
-   void *random_seq;
+   unsigned int *random_seq;
 #endif
 
struct kmem_cache_node *node[MAX_NUMNODES];
diff --git a/mm/slab.c b/mm/slab.c
index cc8bbc1..763096a 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1236,61 +1236,6 @@ static void __init set_up_node(struct kmem_cache 
*cachep, int index)
}
 }
 
-#ifdef CONFIG_SLAB_FREELIST_RANDOM
-static void freelist_randomize(struct rnd_state *state, freelist_idx_t *list,
-   size_t count)
-{
-   size_t i;
-   unsigned int rand;
-
-   for (i = 0; i < count; i++)
-   list[i] = i;
-
-   /* Fisher-Yates shuffle */
-   for (i = count - 1; i > 0; i--) {
-   rand = prandom_u32_state(state);
-   rand %= (i + 1);
-   swap(list[i], list[rand]);
-   }
-}
-
-/* Create a random sequence per cache */
-static int cache_random_seq_create(struct kmem_cache *cachep, gfp_t gfp)
-{
-   unsigned int seed, count = cachep->num;
-   struct rnd_state state;
-
-   if (count < 2)
-   return 0;
-
-   /* If it fails, we will just use the global lists */
-   cachep->random_seq = kcalloc(count, sizeof(freelist_idx_t), gfp);
-   if (!cachep->random_seq)
-   return -ENOMEM;
-
-   /* Get best entropy at this stage */
-   get_random_bytes_arch(, sizeof(seed));
-   prandom_seed_state(, seed);
-
-   freelist_randomize(, cachep->random_seq, count);
-   return 0;
-}
-
-/* Destroy the per-cache random freelist sequence */
-static void cache_random_seq_destroy(struct kmem_cache *cachep)
-{
-   kfree(cachep->random_seq);
-   cachep->random_seq = NULL;
-}
-#else
-static inline int cache_random_seq_create(struct kmem_cache *cachep, gfp_t gfp)
-{
-   return 0;
-}
-static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { }
-#endif /* CONFIG_SLAB_FREELIST_RANDOM */
-
-
 /*
  * Initialisation.  Called after the page allocator have been initialised and
  * before smp_init().
@@ -2535,7 +2480,7 @@ static void cache_init_objs_debug(struct kmem_cache 
*cachep, struct page *page)
 union freelist_init_state {
struct {
unsigned int pos;
-   freelist_idx_t *list;
+   unsigned int *list;
unsigned int count;
unsigned int rand;
};
@@ -2554,7 +2499,7 @@ static bool freelist_state_initialize(union 
freelist_init_state *state,
unsigned int rand;
 
/* Use best entropy available to define a random shift */
-   get_random_bytes_arch(, sizeof(rand));
+   rand = get_random_int();
 
/* Use a random state if the pre-computed list is not available */
if (!cachep->random_seq) {
@@ -2576,13 +2521,20 @@ static freelist_idx_t next_random_slot(union 
freelist_init_state *state)
return (state->list[state->pos++] + state->rand) % state->count;
 }
 
+/* Swap two freelist entries */
+static void swap_free_obj(struct page *page, unsigned int a, unsigned int b)
+{
+   swap(((freelist_idx_t *)page->freelist)[a],
+   ((freelist_idx_t *)page->freelist)[b]);
+}
+
 /*
  * Shuffle the freelist initialization state based on pre-computed lists.
  * return true if the list was successfully shuffled, false otherwise.
  */
 static bool shuffle_freelist(struct kmem_cache *cachep, struct page *page)
 {
-   unsigned int objfreelist = 0, i, count = cachep->num;
+   unsigned int objfreelist = 0, i, rand, count = cachep->num;
union freelist_init_state state;
bool precomputed;
 
@@ -2607,7 +2559,15 @@ static bool shuffle_freelist(struct kmem_cache *cachep, 
struct page *page)
 * Later use a pre-computed list for speed.
 */
if (!precomputed) {
-   freelist_randomize(_state, page->freelist, count);
+   for (i = 0; i < count; i++)
+   set_free_obj(page, i, i);
+
+   /* Fisher-Yates shuffle */
+