On 2019/06/13 23:24, Josef Bacik wrote:
> On Fri, Jun 07, 2019 at 10:10:20PM +0900, Naohiro Aota wrote:
>> Tree manipulating operations like merging nodes often release
>> once-allocated tree nodes. Btrfs cleans such nodes so that pages in the
>> node are not uselessly written out. On HMZONED drives, however, such
>> optimization blocks the following IOs as the cancellation of the write out
>> of the freed blocks breaks the sequential write sequence expected by the
>> device.
>>
>> This patch introduces a list of clean extent buffers that have been
>> released in a transaction. Btrfs consult the list before writing out and
>> waiting for the IOs, and it redirties a buffer if 1) it's in sequential BG,
>> 2) it's in un-submit range, and 3) it's not under IO. Thus, such buffers
>> are marked for IO in btrfs_write_and_wait_transaction() to send proper bios
>> to the disk.
>>
>> Signed-off-by: Naohiro Aota <naohiro.a...@wdc.com>
>> ---
>>   fs/btrfs/disk-io.c     | 27 ++++++++++++++++++++++++---
>>   fs/btrfs/extent_io.c   |  1 +
>>   fs/btrfs/extent_io.h   |  2 ++
>>   fs/btrfs/transaction.c | 35 +++++++++++++++++++++++++++++++++++
>>   fs/btrfs/transaction.h |  3 +++
>>   5 files changed, 65 insertions(+), 3 deletions(-)
>>
>> diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
>> index 6651986da470..c6147fce648f 100644
>> --- a/fs/btrfs/disk-io.c
>> +++ b/fs/btrfs/disk-io.c
>> @@ -535,7 +535,9 @@ static int csum_dirty_buffer(struct btrfs_fs_info 
>> *fs_info, struct page *page)
>>      if (csum_tree_block(eb, result))
>>              return -EINVAL;
>>   
>> -    if (btrfs_header_level(eb))
>> +    if (test_bit(EXTENT_BUFFER_NO_CHECK, &eb->bflags))
>> +            ret = 0;
>> +    else if (btrfs_header_level(eb))
>>              ret = btrfs_check_node(eb);
>>      else
>>              ret = btrfs_check_leaf_full(eb);
>> @@ -1115,10 +1117,20 @@ struct extent_buffer *read_tree_block(struct 
>> btrfs_fs_info *fs_info, u64 bytenr,
>>   void btrfs_clean_tree_block(struct extent_buffer *buf)
>>   {
>>      struct btrfs_fs_info *fs_info = buf->fs_info;
>> -    if (btrfs_header_generation(buf) ==
>> -        fs_info->running_transaction->transid) {
>> +    struct btrfs_transaction *cur_trans = fs_info->running_transaction;
>> +
>> +    if (btrfs_header_generation(buf) == cur_trans->transid) {
>>              btrfs_assert_tree_locked(buf);
>>   
>> +            if (btrfs_fs_incompat(fs_info, HMZONED) &&
>> +                list_empty(&buf->release_list)) {
>> +                    atomic_inc(&buf->refs);
>> +                    spin_lock(&cur_trans->releasing_ebs_lock);
>> +                    list_add_tail(&buf->release_list,
>> +                                  &cur_trans->releasing_ebs);
>> +                    spin_unlock(&cur_trans->releasing_ebs_lock);
>> +            }
>> +
>>              if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)) {
>>                      percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
>>                                               -buf->len,
>> @@ -4533,6 +4545,15 @@ void btrfs_cleanup_one_transaction(struct 
>> btrfs_transaction *cur_trans,
>>      btrfs_destroy_pinned_extent(fs_info,
>>                                  fs_info->pinned_extents);
>>   
>> +    while (!list_empty(&cur_trans->releasing_ebs)) {
>> +            struct extent_buffer *eb;
>> +
>> +            eb = list_first_entry(&cur_trans->releasing_ebs,
>> +                                  struct extent_buffer, release_list);
>> +            list_del_init(&eb->release_list);
>> +            free_extent_buffer(eb);
>> +    }
>> +
>>      cur_trans->state =TRANS_STATE_COMPLETED;
>>      wake_up(&cur_trans->commit_wait);
>>   }
>> diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
>> index 13fca7bfc1f2..c73c69e2bef4 100644
>> --- a/fs/btrfs/extent_io.c
>> +++ b/fs/btrfs/extent_io.c
>> @@ -4816,6 +4816,7 @@ __alloc_extent_buffer(struct btrfs_fs_info *fs_info, 
>> u64 start,
>>      init_waitqueue_head(&eb->read_lock_wq);
>>   
>>      btrfs_leak_debug_add(&eb->leak_list, &buffers);
>> +    INIT_LIST_HEAD(&eb->release_list);
>>   
>>      spin_lock_init(&eb->refs_lock);
>>      atomic_set(&eb->refs, 1);
>> diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
>> index aa18a16a6ed7..2987a01f84f9 100644
>> --- a/fs/btrfs/extent_io.h
>> +++ b/fs/btrfs/extent_io.h
>> @@ -58,6 +58,7 @@ enum {
>>      EXTENT_BUFFER_IN_TREE,
>>      /* write IO error */
>>      EXTENT_BUFFER_WRITE_ERR,
>> +    EXTENT_BUFFER_NO_CHECK,
>>   };
>>   
>>   /* these are flags for __process_pages_contig */
>> @@ -186,6 +187,7 @@ struct extent_buffer {
>>       */
>>      wait_queue_head_t read_lock_wq;
>>      struct page *pages[INLINE_EXTENT_BUFFER_PAGES];
>> +    struct list_head release_list;
>>   #ifdef CONFIG_BTRFS_DEBUG
>>      atomic_t spinning_writers;
>>      atomic_t spinning_readers;
>> diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
>> index 3f6811cdf803..ded40ad75419 100644
>> --- a/fs/btrfs/transaction.c
>> +++ b/fs/btrfs/transaction.c
>> @@ -236,6 +236,8 @@ static noinline int join_transaction(struct 
>> btrfs_fs_info *fs_info,
>>      spin_lock_init(&cur_trans->dirty_bgs_lock);
>>      INIT_LIST_HEAD(&cur_trans->deleted_bgs);
>>      spin_lock_init(&cur_trans->dropped_roots_lock);
>> +    INIT_LIST_HEAD(&cur_trans->releasing_ebs);
>> +    spin_lock_init(&cur_trans->releasing_ebs_lock);
>>      list_add_tail(&cur_trans->list, &fs_info->trans_list);
>>      extent_io_tree_init(fs_info, &cur_trans->dirty_pages,
>>                      IO_TREE_TRANS_DIRTY_PAGES, fs_info->btree_inode);
>> @@ -2219,7 +2221,31 @@ int btrfs_commit_transaction(struct 
>> btrfs_trans_handle *trans)
>>   
>>      wake_up(&fs_info->transaction_wait);
>>   
>> +    if (btrfs_fs_incompat(fs_info, HMZONED)) {
>> +            struct extent_buffer *eb;
>> +
>> +            list_for_each_entry(eb, &cur_trans->releasing_ebs,
>> +                                release_list) {
>> +                    struct btrfs_block_group_cache *cache;
>> +
>> +                    cache = btrfs_lookup_block_group(fs_info, eb->start);
>> +                    if (!cache)
>> +                            continue;
>> +                    mutex_lock(&cache->submit_lock);
>> +                    if (cache->alloc_type == BTRFS_ALLOC_SEQ &&
>> +                        cache->submit_offset <= eb->start &&
>> +                        !extent_buffer_under_io(eb)) {
>> +                            set_extent_buffer_dirty(eb);
>> +                            cache->space_info->bytes_readonly += eb->len;
> 
> Huh?
> 

I'm tracking once allocated then freed region in "space_info->bytes_readonly".
As I wrote in the other reply, I can add and use 
"space_info->bytes_zone_unavailable" instead.

>> +                            set_bit(EXTENT_BUFFER_NO_CHECK, &eb->bflags);
>> +                    }
>> +                    mutex_unlock(&cache->submit_lock);
>> +                    btrfs_put_block_group(cache);
>> +            }
>> +    }
>> +
> 
> Helper here please.
>>      ret = btrfs_write_and_wait_transaction(trans);
>> +
>>      if (ret) {
>>              btrfs_handle_fs_error(fs_info, ret,
>>                                    "Error while writing out transaction");
>> @@ -2227,6 +2253,15 @@ int btrfs_commit_transaction(struct 
>> btrfs_trans_handle *trans)
>>              goto scrub_continue;
>>      }
>>   
>> +    while (!list_empty(&cur_trans->releasing_ebs)) {
>> +            struct extent_buffer *eb;
>> +
>> +            eb = list_first_entry(&cur_trans->releasing_ebs,
>> +                                  struct extent_buffer, release_list);
>> +            list_del_init(&eb->release_list);
>> +            free_extent_buffer(eb);
>> +    }
>> +
> 
> Another helper, and also can't we release eb's above that we didn't need to
> re-mark dirty?  Thanks,
> 
> Josef
> 

hm, we can do so. I'll change the code in the next version.
Thanks,

Reply via email to