This is a note to let you know that I've just added the patch titled

    mempolicy: fix a race in shared_policy_replace()

to the 3.5-stable tree which can be found at:
    
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary

The filename of the patch is:
     mempolicy-fix-a-race-in-shared_policy_replace.patch
and it can be found in the queue-3.5 subdirectory.

If you, or anyone else, feels it should not be added to the stable tree,
please let <stable@vger.kernel.org> know about it.


>From b22d127a39ddd10d93deee3d96e643657ad53a49 Mon Sep 17 00:00:00 2001
From: Mel Gorman <mgor...@suse.de>
Date: Mon, 8 Oct 2012 16:29:17 -0700
Subject: mempolicy: fix a race in shared_policy_replace()

From: Mel Gorman <mgor...@suse.de>

commit b22d127a39ddd10d93deee3d96e643657ad53a49 upstream.

shared_policy_replace() use of sp_alloc() is unsafe.  1) sp_node cannot
be dereferenced if sp->lock is not held and 2) another thread can modify
sp_node between spin_unlock for allocating a new sp node and next
spin_lock.  The bug was introduced before 2.6.12-rc2.

Kosaki's original patch for this problem was to allocate an sp node and
policy within shared_policy_replace and initialise it when the lock is
reacquired.  I was not keen on this approach because it partially
duplicates sp_alloc().  As the paths were sp->lock is taken are not that
performance critical this patch converts sp->lock to sp->mutex so it can
sleep when calling sp_alloc().

[kosaki.motoh...@jp.fujitsu.com: Original patch]
Signed-off-by: Mel Gorman <mgor...@suse.de>
Acked-by: KOSAKI Motohiro <kosaki.motoh...@jp.fujitsu.com>
Reviewed-by: Christoph Lameter <c...@linux.com>
Cc: Josh Boyer <jwbo...@gmail.com>
Signed-off-by: Andrew Morton <a...@linux-foundation.org>
Signed-off-by: Linus Torvalds <torva...@linux-foundation.org>
Signed-off-by: Greg Kroah-Hartman <gre...@linuxfoundation.org>

---
 include/linux/mempolicy.h |    2 +-
 mm/mempolicy.c            |   37 ++++++++++++++++---------------------
 2 files changed, 17 insertions(+), 22 deletions(-)

--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -188,7 +188,7 @@ struct sp_node {
 
 struct shared_policy {
        struct rb_root root;
-       spinlock_t lock;
+       struct mutex mutex;
 };
 
 void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol);
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -2077,7 +2077,7 @@ bool __mpol_equal(struct mempolicy *a, s
  */
 
 /* lookup first element intersecting start-end */
-/* Caller holds sp->lock */
+/* Caller holds sp->mutex */
 static struct sp_node *
 sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
 {
@@ -2141,13 +2141,13 @@ mpol_shared_policy_lookup(struct shared_
 
        if (!sp->root.rb_node)
                return NULL;
-       spin_lock(&sp->lock);
+       mutex_lock(&sp->mutex);
        sn = sp_lookup(sp, idx, idx+1);
        if (sn) {
                mpol_get(sn->policy);
                pol = sn->policy;
        }
-       spin_unlock(&sp->lock);
+       mutex_unlock(&sp->mutex);
        return pol;
 }
 
@@ -2187,10 +2187,10 @@ static struct sp_node *sp_alloc(unsigned
 static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
                                 unsigned long end, struct sp_node *new)
 {
-       struct sp_node *n, *new2 = NULL;
+       struct sp_node *n;
+       int ret = 0;
 
-restart:
-       spin_lock(&sp->lock);
+       mutex_lock(&sp->mutex);
        n = sp_lookup(sp, start, end);
        /* Take care of old policies in the same range. */
        while (n && n->start < end) {
@@ -2203,16 +2203,14 @@ restart:
                } else {
                        /* Old policy spanning whole new range. */
                        if (n->end > end) {
+                               struct sp_node *new2;
+                               new2 = sp_alloc(end, n->end, n->policy);
                                if (!new2) {
-                                       spin_unlock(&sp->lock);
-                                       new2 = sp_alloc(end, n->end, n->policy);
-                                       if (!new2)
-                                               return -ENOMEM;
-                                       goto restart;
+                                       ret = -ENOMEM;
+                                       goto out;
                                }
                                n->end = start;
                                sp_insert(sp, new2);
-                               new2 = NULL;
                                break;
                        } else
                                n->end = start;
@@ -2223,12 +2221,9 @@ restart:
        }
        if (new)
                sp_insert(sp, new);
-       spin_unlock(&sp->lock);
-       if (new2) {
-               mpol_put(new2->policy);
-               kmem_cache_free(sn_cache, new2);
-       }
-       return 0;
+out:
+       mutex_unlock(&sp->mutex);
+       return ret;
 }
 
 /**
@@ -2246,7 +2241,7 @@ void mpol_shared_policy_init(struct shar
        int ret;
 
        sp->root = RB_ROOT;             /* empty tree == default mempolicy */
-       spin_lock_init(&sp->lock);
+       mutex_init(&sp->mutex);
 
        if (mpol) {
                struct vm_area_struct pvma;
@@ -2312,7 +2307,7 @@ void mpol_free_shared_policy(struct shar
 
        if (!p->root.rb_node)
                return;
-       spin_lock(&p->lock);
+       mutex_lock(&p->mutex);
        next = rb_first(&p->root);
        while (next) {
                n = rb_entry(next, struct sp_node, nd);
@@ -2321,7 +2316,7 @@ void mpol_free_shared_policy(struct shar
                mpol_put(n->policy);
                kmem_cache_free(sn_cache, n);
        }
-       spin_unlock(&p->lock);
+       mutex_unlock(&p->mutex);
 }
 
 /* assumes fs == KERNEL_DS */


Patches currently in stable-queue which might be from mgor...@suse.de are

queue-3.5/mempolicy-fix-a-memory-corruption-by-refcount-imbalance-in-alloc_pages_vma.patch
queue-3.5/mempolicy-fix-a-race-in-shared_policy_replace.patch
queue-3.5/mempolicy-remove-mempolicy-sharing.patch
queue-3.5/revert-mm-mempolicy-let-vma_merge-and-vma_split-handle-vma-vm_policy-linkages.patch
queue-3.5/mm-thp-fix-pmd_present-for-split_huge_page-and-prot_none-with-thp.patch
queue-3.5/mempolicy-fix-refcount-leak-in-mpol_set_shared_policy.patch
--
To unsubscribe from this list: send the line "unsubscribe stable" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to