The helper scrub_free_wr_ctx is used only once and fits into
scrub_free_ctx as it continues sctx shutdown, no need to keep it
separate.

Signed-off-by: David Sterba <dste...@suse.com>
---
 fs/btrfs/scrub.c | 14 ++++----------
 1 file changed, 4 insertions(+), 10 deletions(-)

diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index e88edbeb7644..43c208dff67f 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -289,7 +289,6 @@ static void scrub_remap_extent(struct btrfs_fs_info 
*fs_info,
                               u64 *extent_physical,
                               struct btrfs_device **extent_dev,
                               int *extent_mirror_num);
-static void scrub_free_wr_ctx(struct scrub_wr_ctx *wr_ctx);
 static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
                                    struct scrub_page *spage);
 static void scrub_wr_submit(struct scrub_ctx *sctx);
@@ -640,7 +639,10 @@ static noinline_for_stack void scrub_free_ctx(struct 
scrub_ctx *sctx)
        if (!sctx)
                return;
 
-       scrub_free_wr_ctx(&sctx->wr_ctx);
+       mutex_lock(&sctx->wr_ctx.wr_lock);
+       kfree(sctx->wr_ctx.wr_curr_bio);
+       sctx->wr_ctx.wr_curr_bio = NULL;
+       mutex_unlock(&sctx->wr_ctx.wr_lock);
 
        /* this can happen when scrub is cancelled */
        if (sctx->curr != -1) {
@@ -4337,14 +4339,6 @@ static void scrub_remap_extent(struct btrfs_fs_info 
*fs_info,
        btrfs_put_bbio(bbio);
 }
 
-static void scrub_free_wr_ctx(struct scrub_wr_ctx *wr_ctx)
-{
-       mutex_lock(&wr_ctx->wr_lock);
-       kfree(wr_ctx->wr_curr_bio);
-       wr_ctx->wr_curr_bio = NULL;
-       mutex_unlock(&wr_ctx->wr_lock);
-}
-
 static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
                            int mirror_num, u64 physical_for_dev_replace)
 {
-- 
2.12.0

--
To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to