Rather than incurring a division or requesting too many random bytes for
the given range, use the prandom_u32_max() function, which only takes
the minimum required bytes from the RNG and avoids divisions. This was
done by hand, covering things that coccinelle could not do on its own.

Reviewed-by: Kees Cook <keesc...@chromium.org>
Reviewed-by: Jan Kara <j...@suse.cz> # for ext2, ext4, and sbitmap
Signed-off-by: Jason A. Donenfeld <ja...@zx2c4.com>
---
 fs/ext2/ialloc.c   |  3 +--
 fs/ext4/ialloc.c   |  5 ++---
 lib/sbitmap.c      |  2 +-
 lib/test_vmalloc.c | 17 ++++-------------
 4 files changed, 8 insertions(+), 19 deletions(-)

diff --git a/fs/ext2/ialloc.c b/fs/ext2/ialloc.c
index 998dd2ac8008..f4944c4dee60 100644
--- a/fs/ext2/ialloc.c
+++ b/fs/ext2/ialloc.c
@@ -277,8 +277,7 @@ static int find_group_orlov(struct super_block *sb, struct 
inode *parent)
                int best_ndir = inodes_per_group;
                int best_group = -1;
 
-               group = prandom_u32();
-               parent_group = (unsigned)group % ngroups;
+               parent_group = prandom_u32_max(ngroups);
                for (i = 0; i < ngroups; i++) {
                        group = (parent_group + i) % ngroups;
                        desc = ext2_get_group_desc (sb, group, NULL);
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index f73e5eb43eae..36d5bc595cc2 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -463,10 +463,9 @@ static int find_group_orlov(struct super_block *sb, struct 
inode *parent,
                        hinfo.hash_version = DX_HASH_HALF_MD4;
                        hinfo.seed = sbi->s_hash_seed;
                        ext4fs_dirhash(parent, qstr->name, qstr->len, &hinfo);
-                       grp = hinfo.hash;
+                       parent_group = hinfo.hash % ngroups;
                } else
-                       grp = prandom_u32();
-               parent_group = (unsigned)grp % ngroups;
+                       parent_group = prandom_u32_max(ngroups);
                for (i = 0; i < ngroups; i++) {
                        g = (parent_group + i) % ngroups;
                        get_orlov_stats(sb, g, flex_size, &stats);
diff --git a/lib/sbitmap.c b/lib/sbitmap.c
index c4f04edf3ee9..ef0661504561 100644
--- a/lib/sbitmap.c
+++ b/lib/sbitmap.c
@@ -21,7 +21,7 @@ static int init_alloc_hint(struct sbitmap *sb, gfp_t flags)
                int i;
 
                for_each_possible_cpu(i)
-                       *per_cpu_ptr(sb->alloc_hint, i) = prandom_u32() % depth;
+                       *per_cpu_ptr(sb->alloc_hint, i) = 
prandom_u32_max(depth);
        }
        return 0;
 }
diff --git a/lib/test_vmalloc.c b/lib/test_vmalloc.c
index 4f2f2d1bac56..a26bbbf20e62 100644
--- a/lib/test_vmalloc.c
+++ b/lib/test_vmalloc.c
@@ -151,9 +151,7 @@ static int random_size_alloc_test(void)
        int i;
 
        for (i = 0; i < test_loop_count; i++) {
-               n = prandom_u32();
-               n = (n % 100) + 1;
-
+               n = prandom_u32_max(100) + 1;
                p = vmalloc(n * PAGE_SIZE);
 
                if (!p)
@@ -293,16 +291,12 @@ pcpu_alloc_test(void)
                return -1;
 
        for (i = 0; i < 35000; i++) {
-               unsigned int r;
-
-               r = prandom_u32();
-               size = (r % (PAGE_SIZE / 4)) + 1;
+               size = prandom_u32_max(PAGE_SIZE / 4) + 1;
 
                /*
                 * Maximum PAGE_SIZE
                 */
-               r = prandom_u32();
-               align = 1 << ((r % 11) + 1);
+               align = 1 << (prandom_u32_max(11) + 1);
 
                pcpu[i] = __alloc_percpu(size, align);
                if (!pcpu[i])
@@ -393,14 +387,11 @@ static struct test_driver {
 
 static void shuffle_array(int *arr, int n)
 {
-       unsigned int rnd;
        int i, j;
 
        for (i = n - 1; i > 0; i--)  {
-               rnd = prandom_u32();
-
                /* Cut the range. */
-               j = rnd % i;
+               j = prandom_u32_max(i);
 
                /* Swap indexes. */
                swap(arr[i], arr[j]);
-- 
2.37.3

Reply via email to