Signed-off-by: Rakesh Pandit <rak...@tuxera.com>
---
 fs/aio.c | 55 +++++++++++++++++++++++--------------------------------
 1 file changed, 23 insertions(+), 32 deletions(-)

diff --git a/fs/aio.c b/fs/aio.c
index d3d55dc..20d690a 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -278,32 +278,46 @@ static int aio_set_page_dirty(struct page *page)
 }
 
 #if IS_ENABLED(CONFIG_MIGRATION)
-static int aio_migratepage(struct address_space *mapping, struct page *new,
-                       struct page *old, enum migrate_mode mode)
+static int aio_migratepage_helper(struct address_space *mapping,
+                       struct page *new, struct page *old, bool move)
 {
        struct kioctx *ctx;
        unsigned long flags;
-       int rc;
-
-       rc = 0;
+       int rc = 0;
 
-       /* Make sure the old page hasn't already been changed */
+       /* We can potentially race against kioctx teardown here.  Use the
+        * address_space's private data lock to protect the mapping's
+        * private_data.
+        */
        spin_lock(&mapping->private_lock);
        ctx = mapping->private_data;
        if (ctx) {
                pgoff_t idx;
                spin_lock_irqsave(&ctx->completion_lock, flags);
+               if (move)
+                       migrate_page_copy(new, old);
                idx = old->index;
                if (idx < (pgoff_t)ctx->nr_pages) {
                        if (ctx->ring_pages[idx] != old)
                                rc = -EAGAIN;
+                       else if (move)
+                               ctx->ring_pages[idx] = new;
                } else
                        rc = -EINVAL;
                spin_unlock_irqrestore(&ctx->completion_lock, flags);
        } else
-               rc = -EINVAL;
+               rc = move ? -EBUSY : -EINVAL;
        spin_unlock(&mapping->private_lock);
+       return rc;
+}
 
+static int aio_migratepage(struct address_space *mapping, struct page *new,
+                       struct page *old, enum migrate_mode mode)
+{
+       int rc = 0;
+
+       /* Make sure the old page hasn't already been changed */
+       rc = aio_migratepage_helper(mapping, new, old, false);
        if (rc != 0)
                return rc;
 
@@ -317,31 +331,8 @@ static int aio_migratepage(struct address_space *mapping, 
struct page *new,
                return rc;
        }
 
-       /* We can potentially race against kioctx teardown here.  Use the
-        * address_space's private data lock to protect the mapping's
-        * private_data.
-        */
-       spin_lock(&mapping->private_lock);
-       ctx = mapping->private_data;
-       if (ctx) {
-               pgoff_t idx;
-               spin_lock_irqsave(&ctx->completion_lock, flags);
-               migrate_page_copy(new, old);
-               idx = old->index;
-               if (idx < (pgoff_t)ctx->nr_pages) {
-                       /* And only do the move if things haven't changed */
-                       if (ctx->ring_pages[idx] == old)
-                               ctx->ring_pages[idx] = new;
-                       else
-                               rc = -EAGAIN;
-               } else
-                       rc = -EINVAL;
-               spin_unlock_irqrestore(&ctx->completion_lock, flags);
-       } else
-               rc = -EBUSY;
-       spin_unlock(&mapping->private_lock);
-
-       if (rc == MIGRATEPAGE_SUCCESS)
+       rc = aio_migratepage_helper(mapping, new, old, true);
+       if (rc == 0)
                put_page(old);
        else
                put_page(new);
-- 
1.8.3.1

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to