Re: [PATCH v3 2/4] btrfs: Refactor clustered extent allocation into find_free_extent_clustered()

2018-10-12 Thread Josef Bacik
On Fri, Oct 12, 2018 at 02:18:17PM +0800, Qu Wenruo wrote:
> We have two main methods to find free extents inside a block group:
> 1) clustered allocation
> 2) unclustered allocation
> 
> This patch will extract the clustered allocation into
> find_free_extent_clustered() to make it a little easier to read.
> 
> Instead of jumping between different labels in find_free_extent(), the
> helper function will use return value to indicate different behavior.
> 
> Signed-off-by: Qu Wenruo 
> Reviewed-by: Su Yue 

Reviewed-by: Josef Bacik 

Thanks,

Josef


[PATCH v3 2/4] btrfs: Refactor clustered extent allocation into find_free_extent_clustered()

2018-10-12 Thread Qu Wenruo
We have two main methods to find free extents inside a block group:
1) clustered allocation
2) unclustered allocation

This patch will extract the clustered allocation into
find_free_extent_clustered() to make it a little easier to read.

Instead of jumping between different labels in find_free_extent(), the
helper function will use return value to indicate different behavior.

Signed-off-by: Qu Wenruo 
Reviewed-by: Su Yue 
---
 fs/btrfs/extent-tree.c | 244 -
 1 file changed, 121 insertions(+), 123 deletions(-)

diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index dc10f6fd26af..896d54b3c554 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -7261,6 +7261,115 @@ struct find_free_extent_ctl {
u64 found_offset;
 };
 
+
+/*
+ * Helper function for find_free_extent().
+ *
+ * Return -ENOENT to inform caller that we need fallback to unclustered mode.
+ * Return -EAGAIN to inform caller that we need to re-search this block group
+ * Return >0 to inform caller that we find nothing
+ * Return 0 means we have found a location and set ffe_ctl->found_offset.
+ */
+static int find_free_extent_clustered(struct btrfs_block_group_cache *bg,
+   struct btrfs_free_cluster *last_ptr,
+   struct find_free_extent_ctl *ffe_ctl,
+   struct btrfs_block_group_cache **cluster_bg_ret)
+{
+   struct btrfs_fs_info *fs_info = bg->fs_info;
+   struct btrfs_block_group_cache *cluster_bg;
+   u64 aligned_cluster;
+   u64 offset;
+   int ret;
+
+   cluster_bg = btrfs_lock_cluster(bg, last_ptr, ffe_ctl->delalloc);
+   if (!cluster_bg)
+   goto refill_cluster;
+   if (cluster_bg != bg && (cluster_bg->ro ||
+   !block_group_bits(cluster_bg, ffe_ctl->flags)))
+   goto release_cluster;
+
+   offset = btrfs_alloc_from_cluster(cluster_bg, last_ptr,
+   ffe_ctl->num_bytes, cluster_bg->key.objectid,
+   _ctl->max_extent_size);
+   if (offset) {
+   /* we have a block, we're done */
+   spin_unlock(_ptr->refill_lock);
+   trace_btrfs_reserve_extent_cluster(cluster_bg,
+   ffe_ctl->search_start, ffe_ctl->num_bytes);
+   *cluster_bg_ret = cluster_bg;
+   ffe_ctl->found_offset = offset;
+   return 0;
+   }
+   WARN_ON(last_ptr->block_group != cluster_bg);
+release_cluster:
+   /* If we are on LOOP_NO_EMPTY_SIZE, we can't set up a new clusters, so
+* lets just skip it and let the allocator find whatever block it can
+* find. If we reach this point, we will have tried the cluster
+* allocator plenty of times and not have found anything, so we are
+* likely way too fragmented for the clustering stuff to find anything.
+*
+* However, if the cluster is taken from the current block group,
+* release the cluster first, so that we stand a better chance of
+* succeeding in the unclustered allocation.
+*/
+   if (ffe_ctl->loop >= LOOP_NO_EMPTY_SIZE && cluster_bg != bg) {
+   spin_unlock(_ptr->refill_lock);
+   btrfs_release_block_group(cluster_bg, ffe_ctl->delalloc);
+   return -ENOENT;
+   }
+
+   /* This cluster didn't work out, free it and start over */
+   btrfs_return_cluster_to_free_space(NULL, last_ptr);
+
+   if (cluster_bg != bg)
+   btrfs_release_block_group(cluster_bg, ffe_ctl->delalloc);
+
+refill_cluster:
+   if (ffe_ctl->loop >= LOOP_NO_EMPTY_SIZE) {
+   spin_unlock(_ptr->refill_lock);
+   return -ENOENT;
+   }
+
+   aligned_cluster = max_t(u64,
+   ffe_ctl->empty_cluster + ffe_ctl->empty_size,
+   bg->full_stripe_len);
+   ret = btrfs_find_space_cluster(fs_info, bg, last_ptr,
+   ffe_ctl->search_start, ffe_ctl->num_bytes,
+   aligned_cluster);
+   if (ret == 0) {
+   /* now pull our allocation out of this cluster */
+   offset = btrfs_alloc_from_cluster(bg, last_ptr,
+   ffe_ctl->num_bytes,
+   ffe_ctl->search_start,
+   _ctl->max_extent_size);
+   if (offset) {
+   /* we found one, proceed */
+   spin_unlock(_ptr->refill_lock);
+   trace_btrfs_reserve_extent_cluster(bg,
+   ffe_ctl->search_start, 
ffe_ctl->num_bytes);
+   ffe_ctl->found_offset = offset;
+   return 0;
+   }
+   } else if (!ffe_ctl->cached && ffe_ctl->loop > LOOP_CACHING_NOWAIT &&
+  !ffe_ctl->retry_clustered) {
+   spin_unlock(_ptr->refill_lock);
+
+   ffe_ctl->retry_clustered =