If use priority aware allocator, bg->priority_tree->groups_sem should be written instead of space_info->groups_sem.
Signed-off-by: Su Yue <suy.f...@cn.fujitsu.com> --- fs/btrfs/extent-tree.c | 60 +++++++++++++++++++++++++++++++----------- 1 file changed, 44 insertions(+), 16 deletions(-) diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 6627bbe56ad5..5c9536609621 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -10944,6 +10944,7 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info) struct btrfs_block_group_cache *block_group; struct btrfs_space_info *space_info; struct btrfs_trans_handle *trans; + bool use_priority = is_priority_alloc_enabled(fs_info); int ret = 0; if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags)) @@ -10953,6 +10954,7 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info) while (!list_empty(&fs_info->unused_bgs)) { u64 start, end; int trimming; + struct btrfs_priority_tree *pt; block_group = list_first_entry(&fs_info->unused_bgs, struct btrfs_block_group_cache, @@ -10969,29 +10971,55 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info) mutex_lock(&fs_info->delete_unused_bgs_mutex); - /* Don't want to race with allocators so take the groups_sem */ - down_write(&space_info->groups_sem); - spin_lock(&block_group->lock); - if (block_group->reserved || block_group->pinned || - btrfs_block_group_used(&block_group->item) || - block_group->ro || - list_is_singular(&block_group->list)) { + if (use_priority) { + spin_lock(&block_group->lock); + if (block_group->reserved || block_group->pinned || + btrfs_block_group_used(&block_group->item) || + block_group->ro || + block_group->priority != 0) { + trace_btrfs_skip_unused_block_group( + block_group); + spin_unlock(&block_group->lock); + goto next; + } + block_group->priority = PRIORITY_BG_BUSY; + spin_unlock(&block_group->lock); + pt = block_group->priority_tree; + down_write(&pt->groups_sem); + } else { /* - * We want to bail if we made new allocations or have - * outstanding allocations in this block group. We do - * the ro check in case balance is currently acting on - * this block group. + * Don't want to race with allocators so take the + * groups_sem */ - trace_btrfs_skip_unused_block_group(block_group); + down_write(&space_info->groups_sem); + spin_lock(&block_group->lock); + if (block_group->reserved || block_group->pinned || + btrfs_block_group_used(&block_group->item) || + block_group->ro || + list_is_singular(&block_group->list)) { + /* + * We want to bail if we made new allocations + * or have outstanding allocations in this + * block group. We do the ro check in case + * balance is currently acting on this block + * group. + */ + trace_btrfs_skip_unused_block_group( + block_group); + spin_unlock(&block_group->lock); + up_write(&space_info->groups_sem); + goto next; + } spin_unlock(&block_group->lock); - up_write(&space_info->groups_sem); - goto next; } - spin_unlock(&block_group->lock); /* We don't want to force the issue, only flip if it's ok. */ ret = inc_block_group_ro(block_group, 0); - up_write(&space_info->groups_sem); + if (use_priority) + up_write(&pt->groups_sem); + else + up_write(&space_info->groups_sem); + if (ret < 0) { ret = 0; goto next; -- 2.19.1