This is used in a few logical parts of the block group code, temporarily export it so we can move things in pieces.
Signed-off-by: Josef Bacik <jo...@toxicpanda.com> --- fs/btrfs/block-group.h | 2 ++ fs/btrfs/extent-tree.c | 15 ++++++++------- 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/fs/btrfs/block-group.h b/fs/btrfs/block-group.h index 80b388ece277..26c5bf876737 100644 --- a/fs/btrfs/block-group.h +++ b/fs/btrfs/block-group.h @@ -185,4 +185,6 @@ static inline int btrfs_block_group_cache_done( cache->cached == BTRFS_CACHE_ERROR; } +int __btrfs_inc_block_group_ro(struct btrfs_block_group_cache *cache, + int force); #endif /* BTRFS_BLOCK_GROUP_H */ diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 90348105991d..54dc910eb6c8 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -6836,7 +6836,8 @@ static u64 update_block_group_flags(struct btrfs_fs_info *fs_info, u64 flags) * data in this block group. That check should be done by relocation routine, * not this function. */ -static int inc_block_group_ro(struct btrfs_block_group_cache *cache, int force) +int __btrfs_inc_block_group_ro(struct btrfs_block_group_cache *cache, + int force) { struct btrfs_space_info *sinfo = cache->space_info; u64 num_bytes; @@ -6946,14 +6947,14 @@ int btrfs_inc_block_group_ro(struct btrfs_block_group_cache *cache) goto out; } - ret = inc_block_group_ro(cache, 0); + ret = __btrfs_inc_block_group_ro(cache, 0); if (!ret) goto out; alloc_flags = get_alloc_profile(fs_info, cache->space_info->flags); ret = btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE); if (ret < 0) goto out; - ret = inc_block_group_ro(cache, 0); + ret = __btrfs_inc_block_group_ro(cache, 0); out: if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) { alloc_flags = update_block_group_flags(fs_info, cache->flags); @@ -7486,7 +7487,7 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info) set_avail_alloc_bits(info, cache->flags); if (btrfs_chunk_readonly(info, cache->key.objectid)) { - inc_block_group_ro(cache, 1); + __btrfs_inc_block_group_ro(cache, 1); } else if (btrfs_block_group_used(&cache->item) == 0) { ASSERT(list_empty(&cache->bg_list)); btrfs_mark_bg_unused(cache); @@ -7507,11 +7508,11 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info) list_for_each_entry(cache, &space_info->block_groups[BTRFS_RAID_RAID0], list) - inc_block_group_ro(cache, 1); + __btrfs_inc_block_group_ro(cache, 1); list_for_each_entry(cache, &space_info->block_groups[BTRFS_RAID_SINGLE], list) - inc_block_group_ro(cache, 1); + __btrfs_inc_block_group_ro(cache, 1); } btrfs_add_raid_kobjects(info); @@ -8051,7 +8052,7 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info) spin_unlock(&block_group->lock); /* We don't want to force the issue, only flip if it's ok. */ - ret = inc_block_group_ro(block_group, 0); + ret = __btrfs_inc_block_group_ro(block_group, 0); up_write(&space_info->groups_sem); if (ret < 0) { ret = 0; -- 2.21.0