On Fri, Jun 20, 2025 at 03:58:26AM +0800, Alan Huang wrote:
> Signed-off-by: Alan Huang <[email protected]>
> ---
>  fs/bcachefs/btree_iter.c | 65 ++++++++++++++++++++--------------------
>  fs/bcachefs/btree_iter.h | 14 +++++++++
>  2 files changed, 46 insertions(+), 33 deletions(-)
> 
> diff --git a/fs/bcachefs/btree_iter.c b/fs/bcachefs/btree_iter.c
> index 352f9cd2634f..0168c4023c74 100644
> --- a/fs/bcachefs/btree_iter.c
> +++ b/fs/bcachefs/btree_iter.c
> @@ -3238,32 +3238,30 @@ void *__bch2_trans_kmalloc(struct btree_trans *trans, 
> size_t size, unsigned long
>       }
>  
>       EBUG_ON(trans->mem);
> +     EBUG_ON(trans->mem_bytes);
> +     EBUG_ON(trans->mem_top);

???

> +     EBUG_ON(new_bytes > BTREE_TRANS_MEM_MAX);

This can't be a BUG_ON(), it's not something we can prove can't happen
with static analysis. It needs to be a warning (with debug info! like
the previous warning provided!)

> +     
> +     bool lock_dropped = false;
> +     new_mem = allocate_dropping_locks_norelock(trans, lock_dropped, 
> kmalloc(new_bytes, _gfp));
> +     if (!new_mem) {
> +             new_mem = mempool_alloc(&c->btree_trans_mem_pool, GFP_KERNEL);
> +             new_bytes = BTREE_TRANS_MEM_MAX;
> +             trans->used_mempool = true;
> +     }
>  
> -     new_mem = kmalloc(new_bytes, GFP_NOWAIT|__GFP_NOWARN);
> -     if (unlikely(!new_mem)) {
> -             bch2_trans_unlock(trans);
> -
> -             new_mem = kmalloc(new_bytes, GFP_KERNEL);
> -             if (!new_mem && new_bytes <= BTREE_TRANS_MEM_MAX) {
> -                     new_mem = mempool_alloc(&c->btree_trans_mem_pool, 
> GFP_KERNEL);
> -                     new_bytes = BTREE_TRANS_MEM_MAX;
> -                     trans->used_mempool = true;
> -             }
> -
> -             EBUG_ON(!new_mem);
> +     EBUG_ON(!new_mem);
>  
> -             trans->mem = new_mem;
> -             trans->mem_bytes = new_bytes;
> +     trans->mem = new_mem;
> +     trans->mem_bytes = new_bytes;
>  
> +     if (unlikely(lock_dropped)) {
>               ret = bch2_trans_relock(trans);
>               if (ret)
>                       return ERR_PTR(ret);
>       }
>  
> -     trans->mem = new_mem;
> -     trans->mem_bytes = new_bytes;
> -
> -     p = trans->mem + trans->mem_top;
> +     p = trans->mem;
>       trans->mem_top += size;
>       memset(p, 0, size);
>       return p;
> @@ -3324,22 +3322,23 @@ u32 bch2_trans_begin(struct btree_trans *trans)
>       trans->mem_top                  = 0;
>  
>       if (trans->restarted == BCH_ERR_transaction_restart_mem_realloced) {
> -             EBUG_ON(!trans->mem || !trans->mem_bytes);
>               unsigned new_bytes = trans->realloc_bytes_required;
> -             void *new_mem = krealloc(trans->mem, new_bytes, 
> GFP_NOWAIT|__GFP_NOWARN);
> -             if (unlikely(!new_mem)) {
> -                     bch2_trans_unlock(trans);
> -                     new_mem = krealloc(trans->mem, new_bytes, GFP_KERNEL);
> -
> -                     EBUG_ON(new_bytes > BTREE_TRANS_MEM_MAX);
> -
> -                     if (!new_mem) {
> -                             new_mem = 
> mempool_alloc(&trans->c->btree_trans_mem_pool, GFP_KERNEL);
> -                             new_bytes = BTREE_TRANS_MEM_MAX;
> -                             trans->used_mempool = true;
> -                             kfree(trans->mem);
> -                     }
> -                }
> +             EBUG_ON(new_bytes > BTREE_TRANS_MEM_MAX);
> +             EBUG_ON(!trans->mem);
> +             EBUG_ON(!trans->mem_bytes);

Not sure what the purpose of these assertions even is

> +
> +             bool lock_dropped = false;
> +             void *new_mem = allocate_dropping_locks_norelock(trans, 
> lock_dropped,
> +                                     krealloc(trans->mem, new_bytes, _gfp));
> +             if (!new_mem) {
> +                     new_mem = 
> mempool_alloc(&trans->c->btree_trans_mem_pool, GFP_KERNEL);
> +                     new_bytes = BTREE_TRANS_MEM_MAX;
> +                     trans->used_mempool = true;
> +                     kfree(trans->mem);
> +             }
> +
> +             EBUG_ON(!new_mem);
> +
>               trans->mem = new_mem;
>               trans->mem_bytes = new_bytes;
>       }
> diff --git a/fs/bcachefs/btree_iter.h b/fs/bcachefs/btree_iter.h
> index 09dd3e52622e..cc2c6bb6b6a8 100644
> --- a/fs/bcachefs/btree_iter.h
> +++ b/fs/bcachefs/btree_iter.h
> @@ -963,6 +963,20 @@ struct bkey_s_c 
> bch2_btree_iter_peek_and_restart_outlined(struct btree_trans *,
>       _p;                                                             \
>  })
>  
> +#define allocate_dropping_locks_norelock(_trans, _lock_dropped, _do) \
> +({                                                                   \
> +     gfp_t _gfp = GFP_NOWAIT|__GFP_NOWARN;                           \
> +     typeof(_do) _p = _do;                                           \
> +     _lock_dropped = false;                                          \
> +     if (unlikely(!_p)) {                                            \
> +             bch2_trans_unlock(_trans);                              \
> +             _lock_dropped = true;                                   \
> +             _gfp = GFP_KERNEL;                                      \
> +             _p = _do;                                               \
> +     }                                                               \
> +     _p;                                                             \
> +})

This helper is fine

> +
>  struct btree_trans *__bch2_trans_get(struct bch_fs *, unsigned);
>  void bch2_trans_put(struct btree_trans *);
>  
> -- 
> 2.48.1
> 

Reply via email to