[PATCH v3 7/9] slab: destroy a slab without holding any alien cache lock

2014-07-01 Thread Joonsoo Kim
I haven't heard that this alien cache lock is contended, but to reduce
chance of contention would be better generally. And with this change,
we can simplify complex lockdep annotation in slab code.
In the following patch, it will be implemented.

Acked-by: Christoph Lameter 
Signed-off-by: Joonsoo Kim 
---
 mm/slab.c |   20 +---
 1 file changed, 13 insertions(+), 7 deletions(-)

diff --git a/mm/slab.c b/mm/slab.c
index 1c319ad..854dfa0 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1050,10 +1050,10 @@ static void free_alien_cache(struct alien_cache 
**alc_ptr)
 }
 
 static void __drain_alien_cache(struct kmem_cache *cachep,
-   struct array_cache *ac, int node)
+   struct array_cache *ac, int node,
+   struct list_head *list)
 {
struct kmem_cache_node *n = get_node(cachep, node);
-   LIST_HEAD(list);
 
if (ac->avail) {
spin_lock(>list_lock);
@@ -1065,10 +1065,9 @@ static void __drain_alien_cache(struct kmem_cache 
*cachep,
if (n->shared)
transfer_objects(n->shared, ac, ac->limit);
 
-   free_block(cachep, ac->entry, ac->avail, node, );
+   free_block(cachep, ac->entry, ac->avail, node, list);
ac->avail = 0;
spin_unlock(>list_lock);
-   slabs_destroy(cachep, );
}
 }
 
@@ -1086,8 +1085,11 @@ static void reap_alien(struct kmem_cache *cachep, struct 
kmem_cache_node *n)
if (alc) {
ac = >ac;
if (ac->avail && spin_trylock_irq(>lock)) {
-   __drain_alien_cache(cachep, ac, node);
+   LIST_HEAD(list);
+
+   __drain_alien_cache(cachep, ac, node, );
spin_unlock_irq(>lock);
+   slabs_destroy(cachep, );
}
}
}
@@ -1104,10 +1106,13 @@ static void drain_alien_cache(struct kmem_cache *cachep,
for_each_online_node(i) {
alc = alien[i];
if (alc) {
+   LIST_HEAD(list);
+
ac = >ac;
spin_lock_irqsave(>lock, flags);
-   __drain_alien_cache(cachep, ac, i);
+   __drain_alien_cache(cachep, ac, i, );
spin_unlock_irqrestore(>lock, flags);
+   slabs_destroy(cachep, );
}
}
 }
@@ -1138,10 +1143,11 @@ static inline int cache_free_alien(struct kmem_cache 
*cachep, void *objp)
spin_lock(>lock);
if (unlikely(ac->avail == ac->limit)) {
STATS_INC_ACOVERFLOW(cachep);
-   __drain_alien_cache(cachep, ac, nodeid);
+   __drain_alien_cache(cachep, ac, nodeid, );
}
ac_put_obj(cachep, ac, objp);
spin_unlock(>lock);
+   slabs_destroy(cachep, );
} else {
n = get_node(cachep, nodeid);
spin_lock(>list_lock);
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[PATCH v3 7/9] slab: destroy a slab without holding any alien cache lock

2014-07-01 Thread Joonsoo Kim
I haven't heard that this alien cache lock is contended, but to reduce
chance of contention would be better generally. And with this change,
we can simplify complex lockdep annotation in slab code.
In the following patch, it will be implemented.

Acked-by: Christoph Lameter c...@linux.com
Signed-off-by: Joonsoo Kim iamjoonsoo@lge.com
---
 mm/slab.c |   20 +---
 1 file changed, 13 insertions(+), 7 deletions(-)

diff --git a/mm/slab.c b/mm/slab.c
index 1c319ad..854dfa0 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1050,10 +1050,10 @@ static void free_alien_cache(struct alien_cache 
**alc_ptr)
 }
 
 static void __drain_alien_cache(struct kmem_cache *cachep,
-   struct array_cache *ac, int node)
+   struct array_cache *ac, int node,
+   struct list_head *list)
 {
struct kmem_cache_node *n = get_node(cachep, node);
-   LIST_HEAD(list);
 
if (ac-avail) {
spin_lock(n-list_lock);
@@ -1065,10 +1065,9 @@ static void __drain_alien_cache(struct kmem_cache 
*cachep,
if (n-shared)
transfer_objects(n-shared, ac, ac-limit);
 
-   free_block(cachep, ac-entry, ac-avail, node, list);
+   free_block(cachep, ac-entry, ac-avail, node, list);
ac-avail = 0;
spin_unlock(n-list_lock);
-   slabs_destroy(cachep, list);
}
 }
 
@@ -1086,8 +1085,11 @@ static void reap_alien(struct kmem_cache *cachep, struct 
kmem_cache_node *n)
if (alc) {
ac = alc-ac;
if (ac-avail  spin_trylock_irq(alc-lock)) {
-   __drain_alien_cache(cachep, ac, node);
+   LIST_HEAD(list);
+
+   __drain_alien_cache(cachep, ac, node, list);
spin_unlock_irq(alc-lock);
+   slabs_destroy(cachep, list);
}
}
}
@@ -1104,10 +1106,13 @@ static void drain_alien_cache(struct kmem_cache *cachep,
for_each_online_node(i) {
alc = alien[i];
if (alc) {
+   LIST_HEAD(list);
+
ac = alc-ac;
spin_lock_irqsave(alc-lock, flags);
-   __drain_alien_cache(cachep, ac, i);
+   __drain_alien_cache(cachep, ac, i, list);
spin_unlock_irqrestore(alc-lock, flags);
+   slabs_destroy(cachep, list);
}
}
 }
@@ -1138,10 +1143,11 @@ static inline int cache_free_alien(struct kmem_cache 
*cachep, void *objp)
spin_lock(alien-lock);
if (unlikely(ac-avail == ac-limit)) {
STATS_INC_ACOVERFLOW(cachep);
-   __drain_alien_cache(cachep, ac, nodeid);
+   __drain_alien_cache(cachep, ac, nodeid, list);
}
ac_put_obj(cachep, ac, objp);
spin_unlock(alien-lock);
+   slabs_destroy(cachep, list);
} else {
n = get_node(cachep, nodeid);
spin_lock(n-list_lock);
-- 
1.7.9.5

--
To unsubscribe from this list: send the line unsubscribe linux-kernel in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/