groups_sort() can be quite long if user loads a large gid table.

This is because GROUP_AT(group_info, some_integer) uses an integer divide.
So having to do XXX thousand divides during one syscall can lead to very high latencies. (NGROUPS_MAX=65536)

In the past (25 Mar 2006), an analog problem was found in groups_search()
(commit d74beb9f33a5f16d2965f11b275e401f225c949d ) and at that time I changed some variables to unsigned int.

I believe that a more generic fix is to make sure NGROUPS_PER_BLOCK is unsigned.

Signed-off-by: Eric Dumazet <[EMAIL PROTECTED]>
diff --git a/include/linux/sched.h b/include/linux/sched.h
index ac3d496..725a491 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -775,7 +775,7 @@ static inline int above_background_load(void)
 
 struct io_context;                     /* See blkdev.h */
 #define NGROUPS_SMALL          32
-#define NGROUPS_PER_BLOCK      ((int)(PAGE_SIZE / sizeof(gid_t)))
+#define NGROUPS_PER_BLOCK      ((unsigned int)(PAGE_SIZE / sizeof(gid_t)))
 struct group_info {
        int ngroups;
        atomic_t usage;
diff --git a/kernel/sys.c b/kernel/sys.c
index d1fe71e..091e58f 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -1148,7 +1148,7 @@ static int groups_to_user(gid_t __user *grouplist,
        int count = group_info->ngroups;
 
        for (i = 0; i < group_info->nblocks; i++) {
-               int cp_count = min(NGROUPS_PER_BLOCK, count);
+               int cp_count = min_t(int, NGROUPS_PER_BLOCK, count);
                int off = i * NGROUPS_PER_BLOCK;
                int len = cp_count * sizeof(*grouplist);
 
@@ -1168,7 +1168,7 @@ static int groups_from_user(struct group_info *group_info,
        int count = group_info->ngroups;
 
        for (i = 0; i < group_info->nblocks; i++) {
-               int cp_count = min(NGROUPS_PER_BLOCK, count);
+               int cp_count = min_t(int, NGROUPS_PER_BLOCK, count);
                int off = i * NGROUPS_PER_BLOCK;
                int len = cp_count * sizeof(*grouplist);
 

Reply via email to