From: Josef Bacik <jba...@fb.com>

Since we're allocating under atomic we could every easily enomem, so if
that's the case and we can block then loop around and try to allocate
the prealloc not under a lock.

We also saw this happen during try_to_release_page in production, in
which case it's completely valid to return ENOMEM so we can tell
try_to_release_page that we can't release this page.

Signed-off-by: Josef Bacik <jba...@fb.com>
---
v1->v2:
- this time with my local changes committed!

 fs/btrfs/extent_io.c | 28 ++++++++++++++++++++++++----
 1 file changed, 24 insertions(+), 4 deletions(-)

diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index dd941885b9c3..fb0c1636c1c4 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -590,8 +590,9 @@ static int __clear_extent_bit(struct extent_io_tree *tree, 
u64 start, u64 end,
        struct extent_state *prealloc = NULL;
        struct rb_node *node;
        u64 last_end;
-       int err;
+       int err = 0;
        int clear = 0;
+       bool need_prealloc = false;
 
        btrfs_debug_check_extent_io_range(tree, start, end);
 
@@ -614,6 +615,9 @@ static int __clear_extent_bit(struct extent_io_tree *tree, 
u64 start, u64 end,
                 * If we end up needing a new extent state we allocate it later.
                 */
                prealloc = alloc_extent_state(mask);
+               if (!prealloc && need_prealloc)
+                       return -ENOMEM;
+               need_prealloc = false;
        }
 
        spin_lock(&tree->lock);
@@ -673,7 +677,15 @@ static int __clear_extent_bit(struct extent_io_tree *tree, 
u64 start, u64 end,
 
        if (state->start < start) {
                prealloc = alloc_extent_state_atomic(prealloc);
-               BUG_ON(!prealloc);
+               if (!prealloc) {
+                       if (gfpflags_allow_blocking(mask)) {
+                               need_prealloc = true;
+                               spin_unlock(&tree->lock);
+                               goto again;
+                       }
+                       err = -ENOMEM;
+                       goto out;
+               }
                err = split_state(tree, state, prealloc, start);
                if (err)
                        extent_io_tree_panic(tree, err);
@@ -696,7 +708,15 @@ static int __clear_extent_bit(struct extent_io_tree *tree, 
u64 start, u64 end,
         */
        if (state->start <= end && state->end > end) {
                prealloc = alloc_extent_state_atomic(prealloc);
-               BUG_ON(!prealloc);
+               if (!prealloc) {
+                       if (gfpflags_allow_blocking(mask)) {
+                               need_prealloc = true;
+                               spin_unlock(&tree->lock);
+                               goto again;
+                       }
+                       err = -ENOMEM;
+                       goto out;
+               }
                err = split_state(tree, state, prealloc, end + 1);
                if (err)
                        extent_io_tree_panic(tree, err);
@@ -731,7 +751,7 @@ static int __clear_extent_bit(struct extent_io_tree *tree, 
u64 start, u64 end,
        if (prealloc)
                free_extent_state(prealloc);
 
-       return 0;
+       return err;
 
 }
 
-- 
2.7.5

--
To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to