These are relatively straightforward as well.

Signed-off-by: Josef Bacik <jo...@toxicpanda.com>
---
 fs/btrfs/block-group.c | 83 ++++++++++++++++++++++++++++++++++++++++++++++++++
 fs/btrfs/block-group.h |  6 ++++
 fs/btrfs/ctree.h       |  6 ----
 fs/btrfs/extent-tree.c | 82 -------------------------------------------------
 4 files changed, 89 insertions(+), 88 deletions(-)

diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
index b15d7070bcfd..aeb2c806b2b0 100644
--- a/fs/btrfs/block-group.c
+++ b/fs/btrfs/block-group.c
@@ -4,6 +4,7 @@
  */
 #include "ctree.h"
 #include "block-group.h"
+#include "space-info.h"
 
 void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
 {
@@ -121,3 +122,85 @@ btrfs_next_block_group(struct btrfs_block_group_cache 
*cache)
        spin_unlock(&fs_info->block_group_cache_lock);
        return cache;
 }
+
+bool btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr)
+{
+       struct btrfs_block_group_cache *bg;
+       bool ret = true;
+
+       bg = btrfs_lookup_block_group(fs_info, bytenr);
+       if (!bg)
+               return false;
+
+       spin_lock(&bg->lock);
+       if (bg->ro)
+               ret = false;
+       else
+               atomic_inc(&bg->nocow_writers);
+       spin_unlock(&bg->lock);
+
+       /* no put on block group, done by btrfs_dec_nocow_writers */
+       if (!ret)
+               btrfs_put_block_group(bg);
+
+       return ret;
+
+}
+
+void btrfs_dec_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr)
+{
+       struct btrfs_block_group_cache *bg;
+
+       bg = btrfs_lookup_block_group(fs_info, bytenr);
+       ASSERT(bg);
+       if (atomic_dec_and_test(&bg->nocow_writers))
+               wake_up_var(&bg->nocow_writers);
+       /*
+        * Once for our lookup and once for the lookup done by a previous call
+        * to btrfs_inc_nocow_writers()
+        */
+       btrfs_put_block_group(bg);
+       btrfs_put_block_group(bg);
+}
+
+void btrfs_wait_nocow_writers(struct btrfs_block_group_cache *bg)
+{
+       wait_var_event(&bg->nocow_writers, !atomic_read(&bg->nocow_writers));
+}
+
+void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info,
+                                       const u64 start)
+{
+       struct btrfs_block_group_cache *bg;
+
+       bg = btrfs_lookup_block_group(fs_info, start);
+       ASSERT(bg);
+       if (atomic_dec_and_test(&bg->reservations))
+               wake_up_var(&bg->reservations);
+       btrfs_put_block_group(bg);
+}
+
+void btrfs_wait_block_group_reservations(struct btrfs_block_group_cache *bg)
+{
+       struct btrfs_space_info *space_info = bg->space_info;
+
+       ASSERT(bg->ro);
+
+       if (!(bg->flags & BTRFS_BLOCK_GROUP_DATA))
+               return;
+
+       /*
+        * Our block group is read only but before we set it to read only,
+        * some task might have had allocated an extent from it already, but it
+        * has not yet created a respective ordered extent (and added it to a
+        * root's list of ordered extents).
+        * Therefore wait for any task currently allocating extents, since the
+        * block group's reservations counter is incremented while a read lock
+        * on the groups' semaphore is held and decremented after releasing
+        * the read access on that semaphore and creating the ordered extent.
+        */
+       down_write(&space_info->groups_sem);
+       up_write(&space_info->groups_sem);
+
+       wait_var_event(&bg->reservations, !atomic_read(&bg->reservations));
+}
diff --git a/fs/btrfs/block-group.h b/fs/btrfs/block-group.h
index ddd91c7ed44a..bc2ed52210a3 100644
--- a/fs/btrfs/block-group.h
+++ b/fs/btrfs/block-group.h
@@ -161,5 +161,11 @@ struct btrfs_block_group_cache *
 btrfs_next_block_group(struct btrfs_block_group_cache *cache);
 void btrfs_get_block_group(struct btrfs_block_group_cache *cache);
 void btrfs_put_block_group(struct btrfs_block_group_cache *cache);
+void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info,
+                                        const u64 start);
+void btrfs_wait_block_group_reservations(struct btrfs_block_group_cache *bg);
+bool btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr);
+void btrfs_dec_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr);
+void btrfs_wait_nocow_writers(struct btrfs_block_group_cache *bg);
 
 #endif /* BTRFS_BLOCK_GROUP_H */
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 4c6e643bc65d..c4ae6714e3d4 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -2460,12 +2460,6 @@ static inline u64 btrfs_calc_trunc_metadata_size(struct 
btrfs_fs_info *fs_info,
        return (u64)fs_info->nodesize * BTRFS_MAX_LEVEL * num_items;
 }
 
-void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info,
-                                        const u64 start);
-void btrfs_wait_block_group_reservations(struct btrfs_block_group_cache *bg);
-bool btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr);
-void btrfs_dec_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr);
-void btrfs_wait_nocow_writers(struct btrfs_block_group_cache *bg);
 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
                           unsigned long count);
 void btrfs_cleanup_ref_head_accounting(struct btrfs_fs_info *fs_info,
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 01a45674382e..63b594532b92 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -3560,51 +3560,6 @@ int btrfs_extent_readonly(struct btrfs_fs_info *fs_info, 
u64 bytenr)
        return readonly;
 }
 
-bool btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr)
-{
-       struct btrfs_block_group_cache *bg;
-       bool ret = true;
-
-       bg = btrfs_lookup_block_group(fs_info, bytenr);
-       if (!bg)
-               return false;
-
-       spin_lock(&bg->lock);
-       if (bg->ro)
-               ret = false;
-       else
-               atomic_inc(&bg->nocow_writers);
-       spin_unlock(&bg->lock);
-
-       /* no put on block group, done by btrfs_dec_nocow_writers */
-       if (!ret)
-               btrfs_put_block_group(bg);
-
-       return ret;
-
-}
-
-void btrfs_dec_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr)
-{
-       struct btrfs_block_group_cache *bg;
-
-       bg = btrfs_lookup_block_group(fs_info, bytenr);
-       ASSERT(bg);
-       if (atomic_dec_and_test(&bg->nocow_writers))
-               wake_up_var(&bg->nocow_writers);
-       /*
-        * Once for our lookup and once for the lookup done by a previous call
-        * to btrfs_inc_nocow_writers()
-        */
-       btrfs_put_block_group(bg);
-       btrfs_put_block_group(bg);
-}
-
-void btrfs_wait_nocow_writers(struct btrfs_block_group_cache *bg)
-{
-       wait_var_event(&bg->nocow_writers, !atomic_read(&bg->nocow_writers));
-}
-
 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
 {
        u64 extra_flags = chunk_to_extended(flags) &
@@ -4279,43 +4234,6 @@ btrfs_inc_block_group_reservations(struct 
btrfs_block_group_cache *bg)
        atomic_inc(&bg->reservations);
 }
 
-void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info,
-                                       const u64 start)
-{
-       struct btrfs_block_group_cache *bg;
-
-       bg = btrfs_lookup_block_group(fs_info, start);
-       ASSERT(bg);
-       if (atomic_dec_and_test(&bg->reservations))
-               wake_up_var(&bg->reservations);
-       btrfs_put_block_group(bg);
-}
-
-void btrfs_wait_block_group_reservations(struct btrfs_block_group_cache *bg)
-{
-       struct btrfs_space_info *space_info = bg->space_info;
-
-       ASSERT(bg->ro);
-
-       if (!(bg->flags & BTRFS_BLOCK_GROUP_DATA))
-               return;
-
-       /*
-        * Our block group is read only but before we set it to read only,
-        * some task might have had allocated an extent from it already, but it
-        * has not yet created a respective ordered extent (and added it to a
-        * root's list of ordered extents).
-        * Therefore wait for any task currently allocating extents, since the
-        * block group's reservations counter is incremented while a read lock
-        * on the groups' semaphore is held and decremented after releasing
-        * the read access on that semaphore and creating the ordered extent.
-        */
-       down_write(&space_info->groups_sem);
-       up_write(&space_info->groups_sem);
-
-       wait_var_event(&bg->reservations, !atomic_read(&bg->reservations));
-}
-
 /**
  * btrfs_add_reserved_bytes - update the block_group and space info counters
  * @cache:     The cache we are manipulating
-- 
2.14.3

Reply via email to