GFP_NOWAIT already includes __GFP_NOWARN, so let's remove the redundant __GFP_NOWARN.
Signed-off-by: Qianfeng Rong <[email protected]> --- fs/bcachefs/btree_cache.c | 4 ++-- fs/bcachefs/btree_io.c | 2 +- fs/bcachefs/btree_iter.h | 6 +++--- fs/bcachefs/btree_trans_commit.c | 2 +- fs/bcachefs/fs.c | 2 +- 5 files changed, 8 insertions(+), 8 deletions(-) diff --git a/fs/bcachefs/btree_cache.c b/fs/bcachefs/btree_cache.c index 2f7c384a8c81..3750b297dc76 100644 --- a/fs/bcachefs/btree_cache.c +++ b/fs/bcachefs/btree_cache.c @@ -788,7 +788,7 @@ struct btree *bch2_btree_node_mem_alloc(struct btree_trans *trans, bool pcpu_rea goto got_node; } - b = __btree_node_mem_alloc(c, GFP_NOWAIT|__GFP_NOWARN); + b = __btree_node_mem_alloc(c, GFP_NOWAIT); if (b) { bch2_btree_lock_init(&b->c, pcpu_read_locks ? SIX_LOCK_INIT_PCPU : 0, GFP_NOWAIT); } else { @@ -826,7 +826,7 @@ struct btree *bch2_btree_node_mem_alloc(struct btree_trans *trans, bool pcpu_rea mutex_unlock(&bc->lock); - if (btree_node_data_alloc(c, b, GFP_NOWAIT|__GFP_NOWARN)) { + if (btree_node_data_alloc(c, b, GFP_NOWAIT)) { bch2_trans_unlock(trans); if (btree_node_data_alloc(c, b, GFP_KERNEL|__GFP_NOWARN)) goto err; diff --git a/fs/bcachefs/btree_io.c b/fs/bcachefs/btree_io.c index 8a03cd75a64f..276cf088539e 100644 --- a/fs/bcachefs/btree_io.c +++ b/fs/bcachefs/btree_io.c @@ -131,7 +131,7 @@ static void *btree_bounce_alloc(struct bch_fs *c, size_t size, BUG_ON(size > c->opts.btree_node_size); *used_mempool = false; - p = kvmalloc(size, __GFP_NOWARN|GFP_NOWAIT); + p = kvmalloc(size, GFP_NOWAIT); if (!p) { *used_mempool = true; p = mempool_alloc(&c->btree_bounce_pool, GFP_NOFS); diff --git a/fs/bcachefs/btree_iter.h b/fs/bcachefs/btree_iter.h index b117cb5d7f94..c8fc6ee01d96 100644 --- a/fs/bcachefs/btree_iter.h +++ b/fs/bcachefs/btree_iter.h @@ -954,7 +954,7 @@ struct bkey_s_c bch2_btree_iter_peek_and_restart_outlined(struct btree_iter *); #define allocate_dropping_locks_errcode(_trans, _do) \ ({ \ - gfp_t _gfp = GFP_NOWAIT|__GFP_NOWARN; \ + gfp_t _gfp = GFP_NOWAIT; \ int _ret = _do; \ \ if (bch2_err_matches(_ret, ENOMEM)) { \ @@ -966,7 +966,7 @@ struct bkey_s_c bch2_btree_iter_peek_and_restart_outlined(struct btree_iter *); #define allocate_dropping_locks(_trans, _ret, _do) \ ({ \ - gfp_t _gfp = GFP_NOWAIT|__GFP_NOWARN; \ + gfp_t _gfp = GFP_NOWAIT; \ typeof(_do) _p = _do; \ \ _ret = 0; \ @@ -979,7 +979,7 @@ struct bkey_s_c bch2_btree_iter_peek_and_restart_outlined(struct btree_iter *); #define allocate_dropping_locks_norelock(_trans, _lock_dropped, _do) \ ({ \ - gfp_t _gfp = GFP_NOWAIT|__GFP_NOWARN; \ + gfp_t _gfp = GFP_NOWAIT; \ typeof(_do) _p = _do; \ _lock_dropped = false; \ if (unlikely(!_p)) { \ diff --git a/fs/bcachefs/btree_trans_commit.c b/fs/bcachefs/btree_trans_commit.c index 8b94a8156fbf..4d58bdb233e9 100644 --- a/fs/bcachefs/btree_trans_commit.c +++ b/fs/bcachefs/btree_trans_commit.c @@ -449,7 +449,7 @@ static int btree_key_can_insert_cached(struct btree_trans *trans, unsigned flags return 0; new_u64s = roundup_pow_of_two(u64s); - new_k = krealloc(ck->k, new_u64s * sizeof(u64), GFP_NOWAIT|__GFP_NOWARN); + new_k = krealloc(ck->k, new_u64s * sizeof(u64), GFP_NOWAIT); if (unlikely(!new_k)) return btree_key_can_insert_cached_slowpath(trans, flags, path, new_u64s); diff --git a/fs/bcachefs/fs.c b/fs/bcachefs/fs.c index d768a7e7a204..0ff56ccd581a 100644 --- a/fs/bcachefs/fs.c +++ b/fs/bcachefs/fs.c @@ -268,7 +268,7 @@ int bch2_inode_or_descendents_is_open(struct btree_trans *trans, struct bpos p) rht_for_each_entry_rcu_from(inode, he, rht_ptr_rcu(bkt), tbl, hash, hash) { if (inode->ei_inum.inum == inum) { ret = darray_push_gfp(&subvols, inode->ei_inum.subvol, - GFP_NOWAIT|__GFP_NOWARN); + GFP_NOWAIT); if (ret) { rcu_read_unlock(); ret = darray_make_room(&subvols, 1); -- 2.34.1
