This patch will extract unclsutered extent allocation code into
find_free_extent_unclustered().

And this helper function will use return value to indicate what to do
next.

This should make find_free_extent() a little easier to read.

Signed-off-by: Qu Wenruo <w...@suse.com>
Reviewed-by: Su Yue <suy.f...@cn.fujitsu.com>
Reviewed-by: Josef Bacik <jo...@toxicpanda.com>
[Update merge conflict with fb5c39d7a887 ("btrfs: don't use ctl->free_space for 
max_extent_size")]
---
 fs/btrfs/extent-tree.c | 114 ++++++++++++++++++++++++-----------------
 1 file changed, 68 insertions(+), 46 deletions(-)

diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 7b0c93cf280e..2c9f00cb8f26 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -7400,6 +7400,69 @@ static int find_free_extent_clustered(struct 
btrfs_block_group_cache *bg,
        return 1;
 }
 
+/*
+ * Return >0 to inform caller that we find nothing
+ * Return 0 when we found an free extent and set ffe_ctrl->found_offset
+ * Return -EAGAIN to inform caller that we need to re-search this block group
+ */
+static int find_free_extent_unclustered(struct btrfs_block_group_cache *bg,
+               struct btrfs_free_cluster *last_ptr,
+               struct find_free_extent_ctl *ffe_ctl)
+{
+       u64 offset;
+
+       /*
+        * We are doing an unclustered alloc, set the fragmented flag so we
+        * don't bother trying to setup a cluster again until we get more space.
+        */
+       if (unlikely(last_ptr)) {
+               spin_lock(&last_ptr->lock);
+               last_ptr->fragmented = 1;
+               spin_unlock(&last_ptr->lock);
+       }
+       if (ffe_ctl->cached) {
+               struct btrfs_free_space_ctl *free_space_ctl;
+
+               free_space_ctl = bg->free_space_ctl;
+               spin_lock(&free_space_ctl->tree_lock);
+               if (free_space_ctl->free_space <
+                   ffe_ctl->num_bytes + ffe_ctl->empty_cluster +
+                   ffe_ctl->empty_size) {
+                       ffe_ctl->total_free_space = max_t(u64,
+                                       ffe_ctl->total_free_space,
+                                       free_space_ctl->free_space);
+                       spin_unlock(&free_space_ctl->tree_lock);
+                       return 1;
+               }
+               spin_unlock(&free_space_ctl->tree_lock);
+       }
+
+       offset = btrfs_find_space_for_alloc(bg, ffe_ctl->search_start,
+                       ffe_ctl->num_bytes, ffe_ctl->empty_size,
+                       &ffe_ctl->max_extent_size);
+
+       /*
+        * If we didn't find a chunk, and we haven't failed on this block group
+        * before, and this block group is in the middle of caching and we are
+        * ok with waiting, then go ahead and wait for progress to be made, and
+        * set @retry_unclustered to true.
+        *
+        * If @retry_unclustered is true then we've already waited on this block
+        * group once and should move on to the next block group.
+        */
+       if (!offset && !ffe_ctl->retry_unclustered && !ffe_ctl->cached &&
+           ffe_ctl->loop > LOOP_CACHING_NOWAIT) {
+               wait_block_group_cache_progress(bg, ffe_ctl->num_bytes +
+                                               ffe_ctl->empty_size);
+               ffe_ctl->retry_unclustered = true;
+               return -EAGAIN;
+       } else if (!offset) {
+               return 1;
+       }
+       ffe_ctl->found_offset = offset;
+       return 0;
+}
+
 /*
  * walks the btree of allocated extents and find a hole of a given size.
  * The key ins is changed to record the hole:
@@ -7602,54 +7665,13 @@ static noinline int find_free_extent(struct 
btrfs_fs_info *fs_info,
                        /* ret == -ENOENT case falls through */
                }
 
-               /*
-                * We are doing an unclustered alloc, set the fragmented flag so
-                * we don't bother trying to setup a cluster again until we get
-                * more space.
-                */
-               if (unlikely(last_ptr)) {
-                       spin_lock(&last_ptr->lock);
-                       last_ptr->fragmented = 1;
-                       spin_unlock(&last_ptr->lock);
-               }
-               if (ffe_ctl.cached) {
-                       struct btrfs_free_space_ctl *ctl =
-                               block_group->free_space_ctl;
-
-                       spin_lock(&ctl->tree_lock);
-                       if (ctl->free_space <
-                           num_bytes + ffe_ctl.empty_cluster + empty_size) {
-                               ffe_ctl.total_free_space = max(ctl->free_space,
-                                               ffe_ctl.total_free_space);
-                               spin_unlock(&ctl->tree_lock);
-                               goto loop;
-                       }
-                       spin_unlock(&ctl->tree_lock);
-               }
-
-               ffe_ctl.found_offset = btrfs_find_space_for_alloc(block_group,
-                               ffe_ctl.search_start, num_bytes, empty_size,
-                               &ffe_ctl.max_extent_size);
-               /*
-                * If we didn't find a chunk, and we haven't failed on this
-                * block group before, and this block group is in the middle of
-                * caching and we are ok with waiting, then go ahead and wait
-                * for progress to be made, and set ffe_ctl.retry_unclustered to
-                * true.
-                *
-                * If ffe_ctl.retry_unclustered is true then we've already
-                * waited on this block group once and should move on to the
-                * next block group.
-                */
-               if (!ffe_ctl.found_offset && !ffe_ctl.retry_unclustered &&
-                   !ffe_ctl.cached && ffe_ctl.loop > LOOP_CACHING_NOWAIT) {
-                       wait_block_group_cache_progress(block_group,
-                                               num_bytes + empty_size);
-                       ffe_ctl.retry_unclustered = true;
+               ret = find_free_extent_unclustered(block_group, last_ptr,
+                                                  &ffe_ctl);
+               if (ret == -EAGAIN)
                        goto have_block_group;
-               } else if (!ffe_ctl.found_offset) {
+               else if (ret > 0)
                        goto loop;
-               }
+               /* ret == 0 case falls through */
 checks:
                ffe_ctl.search_start = round_up(ffe_ctl.found_offset,
                                             fs_info->stripesize);
-- 
2.19.1

Reply via email to