[PATCH v2 02/16] userns: use new hashtable implementation

2012-08-18 Thread Sasha Levin
Switch to using the new hashtable implementation to store user structs.
This reduces the amount of generic unrelated code in kernel/user.c.

Signed-off-by: Sasha Levin 
---
 kernel/user.c |   33 +
 1 files changed, 13 insertions(+), 20 deletions(-)

diff --git a/kernel/user.c b/kernel/user.c
index b815fef..d10c484 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -16,6 +16,7 @@
 #include 
 #include 
 #include 
+#include 
 
 /*
  * userns count is 1 for root user, 1 for init_uts_ns,
@@ -52,13 +53,9 @@ EXPORT_SYMBOL_GPL(init_user_ns);
  */
 
 #define UIDHASH_BITS   (CONFIG_BASE_SMALL ? 3 : 7)
-#define UIDHASH_SZ (1 << UIDHASH_BITS)
-#define UIDHASH_MASK   (UIDHASH_SZ - 1)
-#define __uidhashfn(uid)   (((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK)
-#define uidhashentry(uid)  (uidhash_table + __uidhashfn((__kuid_val(uid
 
 static struct kmem_cache *uid_cachep;
-struct hlist_head uidhash_table[UIDHASH_SZ];
+static DEFINE_HASHTABLE(uidhash_table, UIDHASH_BITS)
 
 /*
  * The uidhash_lock is mostly taken from process context, but it is
@@ -84,22 +81,22 @@ struct user_struct root_user = {
 /*
  * These routines must be called with the uidhash spinlock held!
  */
-static void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent)
+static void uid_hash_insert(struct user_struct *up)
 {
-   hlist_add_head(>uidhash_node, hashent);
+   hash_add(uidhash_table, >uidhash_node, __kuid_val(up->uid));
 }
 
 static void uid_hash_remove(struct user_struct *up)
 {
-   hlist_del_init(>uidhash_node);
+   hash_del(>uidhash_node);
 }
 
-static struct user_struct *uid_hash_find(kuid_t uid, struct hlist_head 
*hashent)
+static struct user_struct *uid_hash_find(kuid_t uid)
 {
struct user_struct *user;
struct hlist_node *h;
 
-   hlist_for_each_entry(user, h, hashent, uidhash_node) {
+   hash_for_each_possible(uidhash_table, user, h, uidhash_node, 
__kuid_val(uid)) {
if (uid_eq(user->uid, uid)) {
atomic_inc(>__count);
return user;
@@ -135,7 +132,7 @@ struct user_struct *find_user(kuid_t uid)
unsigned long flags;
 
spin_lock_irqsave(_lock, flags);
-   ret = uid_hash_find(uid, uidhashentry(uid));
+   ret = uid_hash_find(uid);
spin_unlock_irqrestore(_lock, flags);
return ret;
 }
@@ -156,11 +153,10 @@ void free_uid(struct user_struct *up)
 
 struct user_struct *alloc_uid(kuid_t uid)
 {
-   struct hlist_head *hashent = uidhashentry(uid);
struct user_struct *up, *new;
 
spin_lock_irq(_lock);
-   up = uid_hash_find(uid, hashent);
+   up = uid_hash_find(uid);
spin_unlock_irq(_lock);
 
if (!up) {
@@ -176,13 +172,13 @@ struct user_struct *alloc_uid(kuid_t uid)
 * on adding the same user already..
 */
spin_lock_irq(_lock);
-   up = uid_hash_find(uid, hashent);
+   up = uid_hash_find(uid);
if (up) {
key_put(new->uid_keyring);
key_put(new->session_keyring);
kmem_cache_free(uid_cachep, new);
} else {
-   uid_hash_insert(new, hashent);
+   uid_hash_insert(new);
up = new;
}
spin_unlock_irq(_lock);
@@ -196,17 +192,14 @@ out_unlock:
 
 static int __init uid_cache_init(void)
 {
-   int n;
-
uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct),
0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
 
-   for(n = 0; n < UIDHASH_SZ; ++n)
-   INIT_HLIST_HEAD(uidhash_table + n);
+   hash_init(uidhash_table);
 
/* Insert the root user immediately (init already runs as root) */
spin_lock_irq(_lock);
-   uid_hash_insert(_user, uidhashentry(GLOBAL_ROOT_UID));
+   uid_hash_insert(_user);
spin_unlock_irq(_lock);
 
return 0;
-- 
1.7.8.6

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[PATCH v2 02/16] userns: use new hashtable implementation

2012-08-18 Thread Sasha Levin
Switch to using the new hashtable implementation to store user structs.
This reduces the amount of generic unrelated code in kernel/user.c.

Signed-off-by: Sasha Levin levinsasha...@gmail.com
---
 kernel/user.c |   33 +
 1 files changed, 13 insertions(+), 20 deletions(-)

diff --git a/kernel/user.c b/kernel/user.c
index b815fef..d10c484 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -16,6 +16,7 @@
 #include linux/interrupt.h
 #include linux/export.h
 #include linux/user_namespace.h
+#include linux/hashtable.h
 
 /*
  * userns count is 1 for root user, 1 for init_uts_ns,
@@ -52,13 +53,9 @@ EXPORT_SYMBOL_GPL(init_user_ns);
  */
 
 #define UIDHASH_BITS   (CONFIG_BASE_SMALL ? 3 : 7)
-#define UIDHASH_SZ (1  UIDHASH_BITS)
-#define UIDHASH_MASK   (UIDHASH_SZ - 1)
-#define __uidhashfn(uid)   (((uid  UIDHASH_BITS) + uid)  UIDHASH_MASK)
-#define uidhashentry(uid)  (uidhash_table + __uidhashfn((__kuid_val(uid
 
 static struct kmem_cache *uid_cachep;
-struct hlist_head uidhash_table[UIDHASH_SZ];
+static DEFINE_HASHTABLE(uidhash_table, UIDHASH_BITS)
 
 /*
  * The uidhash_lock is mostly taken from process context, but it is
@@ -84,22 +81,22 @@ struct user_struct root_user = {
 /*
  * These routines must be called with the uidhash spinlock held!
  */
-static void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent)
+static void uid_hash_insert(struct user_struct *up)
 {
-   hlist_add_head(up-uidhash_node, hashent);
+   hash_add(uidhash_table, up-uidhash_node, __kuid_val(up-uid));
 }
 
 static void uid_hash_remove(struct user_struct *up)
 {
-   hlist_del_init(up-uidhash_node);
+   hash_del(up-uidhash_node);
 }
 
-static struct user_struct *uid_hash_find(kuid_t uid, struct hlist_head 
*hashent)
+static struct user_struct *uid_hash_find(kuid_t uid)
 {
struct user_struct *user;
struct hlist_node *h;
 
-   hlist_for_each_entry(user, h, hashent, uidhash_node) {
+   hash_for_each_possible(uidhash_table, user, h, uidhash_node, 
__kuid_val(uid)) {
if (uid_eq(user-uid, uid)) {
atomic_inc(user-__count);
return user;
@@ -135,7 +132,7 @@ struct user_struct *find_user(kuid_t uid)
unsigned long flags;
 
spin_lock_irqsave(uidhash_lock, flags);
-   ret = uid_hash_find(uid, uidhashentry(uid));
+   ret = uid_hash_find(uid);
spin_unlock_irqrestore(uidhash_lock, flags);
return ret;
 }
@@ -156,11 +153,10 @@ void free_uid(struct user_struct *up)
 
 struct user_struct *alloc_uid(kuid_t uid)
 {
-   struct hlist_head *hashent = uidhashentry(uid);
struct user_struct *up, *new;
 
spin_lock_irq(uidhash_lock);
-   up = uid_hash_find(uid, hashent);
+   up = uid_hash_find(uid);
spin_unlock_irq(uidhash_lock);
 
if (!up) {
@@ -176,13 +172,13 @@ struct user_struct *alloc_uid(kuid_t uid)
 * on adding the same user already..
 */
spin_lock_irq(uidhash_lock);
-   up = uid_hash_find(uid, hashent);
+   up = uid_hash_find(uid);
if (up) {
key_put(new-uid_keyring);
key_put(new-session_keyring);
kmem_cache_free(uid_cachep, new);
} else {
-   uid_hash_insert(new, hashent);
+   uid_hash_insert(new);
up = new;
}
spin_unlock_irq(uidhash_lock);
@@ -196,17 +192,14 @@ out_unlock:
 
 static int __init uid_cache_init(void)
 {
-   int n;
-
uid_cachep = kmem_cache_create(uid_cache, sizeof(struct user_struct),
0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
 
-   for(n = 0; n  UIDHASH_SZ; ++n)
-   INIT_HLIST_HEAD(uidhash_table + n);
+   hash_init(uidhash_table);
 
/* Insert the root user immediately (init already runs as root) */
spin_lock_irq(uidhash_lock);
-   uid_hash_insert(root_user, uidhashentry(GLOBAL_ROOT_UID));
+   uid_hash_insert(root_user);
spin_unlock_irq(uidhash_lock);
 
return 0;
-- 
1.7.8.6

--
To unsubscribe from this list: send the line unsubscribe linux-kernel in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/