Replace kmalloc(size * sizeof) with kmalloc_array() for safer memory
allocation and overflow prevention.

Signed-off-by: Liao Yuanhong <[email protected]>
---
 fs/bcachefs/btree_key_cache.c    | 2 +-
 fs/bcachefs/btree_trans_commit.c | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/fs/bcachefs/btree_key_cache.c b/fs/bcachefs/btree_key_cache.c
index 4890cbc88e7c..8dd70024e513 100644
--- a/fs/bcachefs/btree_key_cache.c
+++ b/fs/bcachefs/btree_key_cache.c
@@ -136,7 +136,7 @@ static struct bkey_cached *__bkey_cached_alloc(unsigned 
key_u64s, gfp_t gfp)
        struct bkey_cached *ck = kmem_cache_zalloc(bch2_key_cache, gfp);
        if (unlikely(!ck))
                return NULL;
-       ck->k = kmalloc(key_u64s * sizeof(u64), gfp);
+       ck->k = kmalloc_array(key_u64s, sizeof(u64), gfp);
        if (unlikely(!ck->k)) {
                kmem_cache_free(bch2_key_cache, ck);
                return NULL;
diff --git a/fs/bcachefs/btree_trans_commit.c b/fs/bcachefs/btree_trans_commit.c
index 4d58bdb233e9..4102a3cb2410 100644
--- a/fs/bcachefs/btree_trans_commit.c
+++ b/fs/bcachefs/btree_trans_commit.c
@@ -396,7 +396,7 @@ btree_key_can_insert_cached_slowpath(struct btree_trans 
*trans, unsigned flags,
        bch2_trans_unlock_updates_write(trans);
        bch2_trans_unlock(trans);
 
-       new_k = kmalloc(new_u64s * sizeof(u64), GFP_KERNEL);
+       new_k = kmalloc_array(new_u64s, sizeof(u64), GFP_KERNEL);
        if (!new_k) {
                struct bch_fs *c = trans->c;
                bch_err(c, "error allocating memory for key cache key, btree %s 
u64s %u",
-- 
2.34.1


Reply via email to