Signed-off-by: Jul Lang <[email protected]>
---
 fs/bcachefs/alloc/accounting.c            |  2 +-
 fs/bcachefs/alloc/buckets.c               |  2 +-
 fs/bcachefs/alloc/check.c                 |  2 +-
 fs/bcachefs/alloc/foreground.c            |  6 +++---
 fs/bcachefs/bcachefs.h                    |  2 +-
 fs/bcachefs/bcachefs_format.h             | 10 +++++-----
 fs/bcachefs/bcachefs_ioctl.h              |  4 ++--
 fs/bcachefs/btree/bkey_methods.c          |  4 ++--
 fs/bcachefs/btree/bkey_methods.h          |  2 +-
 fs/bcachefs/btree/bset.h                  |  4 ++--
 fs/bcachefs/btree/cache.c                 |  2 +-
 fs/bcachefs/btree/interior.h              |  2 +-
 fs/bcachefs/btree/iter.c                  |  4 ++--
 fs/bcachefs/btree/update.c                |  4 ++--
 fs/bcachefs/data/checksum.c               |  2 +-
 fs/bcachefs/data/reconcile/work.c         |  2 +-
 fs/bcachefs/debug/async_objs.c            |  2 +-
 fs/bcachefs/fs/check.c                    |  2 +-
 fs/bcachefs/fs/inode.c                    |  2 +-
 fs/bcachefs/init/passes.c                 |  2 +-
 fs/bcachefs/init/progress.h               |  2 +-
 fs/bcachefs/journal/journal.c             |  4 ++--
 fs/bcachefs/journal/types.h               |  4 ++--
 fs/bcachefs/journal/write.c               |  2 +-
 fs/bcachefs/sb/io.c                       |  2 +-
 fs/bcachefs/sb/members_format.h           |  2 +-
 fs/bcachefs/snapshots/format.h            |  4 ++--
 fs/bcachefs/util/eytzinger.h              |  2 +-
 fs/bcachefs/util/mean_and_variance.h      |  4 ++--
 fs/bcachefs/util/mean_and_variance_test.c |  2 +-
 fs/bcachefs/util/printbuf.c               |  2 +-
 fs/bcachefs/util/printbuf.h               |  4 ++--
 fs/bcachefs/util/rcu_pending.c            |  2 +-
 fs/bcachefs/util/siphash.h                |  2 +-
 fs/bcachefs/util/six.c                    |  6 +++---
 fs/bcachefs/util/six.h                    |  4 ++--
 fs/bcachefs/util/time_stats.h             |  2 +-
 fs/bcachefs/util/util.c                   |  2 +-
 fs/bcachefs/vfs/io.c                      |  2 +-
 39 files changed, 58 insertions(+), 58 deletions(-)

diff --git a/fs/bcachefs/alloc/accounting.c b/fs/bcachefs/alloc/accounting.c
index c86a01410..0e3de80b5 100644
--- a/fs/bcachefs/alloc/accounting.c
+++ b/fs/bcachefs/alloc/accounting.c
@@ -525,7 +525,7 @@ void bch2_accounting_mem_gc(struct bch_fs *c)
  * Read out accounting keys for replicas entries, as an array of
  * bch_replicas_usage entries.
  *
- * Note: this may be deprecated/removed at smoe point in the future and 
replaced
+ * Note: this may be deprecated/removed at some point in the future and 
replaced
  * with something more general, it exists to support the ioctl used by the
  * 'bcachefs fs usage' command.
  */
diff --git a/fs/bcachefs/alloc/buckets.c b/fs/bcachefs/alloc/buckets.c
index e6261a6cc..4a3373572 100644
--- a/fs/bcachefs/alloc/buckets.c
+++ b/fs/bcachefs/alloc/buckets.c
@@ -353,7 +353,7 @@ int bch2_check_fix_ptrs(struct btree_trans *trans,
 
                        /*
                         * no locking, we're single threaded and not rw yet, see
-                        * the big assertino above that we repeat here:
+                        * the big assertion above that we repeat here:
                         */
                        BUG_ON(test_bit(BCH_FS_rw, &c->flags));
 
diff --git a/fs/bcachefs/alloc/check.c b/fs/bcachefs/alloc/check.c
index eef2d8df3..db91af741 100644
--- a/fs/bcachefs/alloc/check.c
+++ b/fs/bcachefs/alloc/check.c
@@ -397,7 +397,7 @@ int __bch2_check_discard_freespace_key(struct btree_trans 
*trans, struct btree_i
        if (!bch2_dev_bucket_exists(c, bucket)) {
                if (__fsck_err(trans, fsck_flags,
                               need_discard_freespace_key_to_invalid_dev_bucket,
-                              "entry in %s btree for nonexistant dev:bucket 
%llu:%llu",
+                              "entry in %s btree for nonexistent dev:bucket 
%llu:%llu",
                               bch2_btree_id_str(iter->btree_id), bucket.inode, 
bucket.offset))
                        ret = delete_discard_freespace_key(trans, iter, 
async_repair);
                else
diff --git a/fs/bcachefs/alloc/foreground.c b/fs/bcachefs/alloc/foreground.c
index 7f4a0674d..c7bf0c3f9 100644
--- a/fs/bcachefs/alloc/foreground.c
+++ b/fs/bcachefs/alloc/foreground.c
@@ -388,7 +388,7 @@ static struct open_bucket 
*bch2_bucket_alloc_freelist(struct btree_trans *trans,
                                         POS(ca->dev_idx, U64_MAX),
                                         0, k, ret) {
                /*
-                * peek normally dosen't trim extents - they can span iter.pos,
+                * peek normally doesn't trim extents - they can span iter.pos,
                 * which is not what we want here:
                 */
                iter.k.size = iter.k.p.offset - iter.pos.offset;
@@ -633,7 +633,7 @@ static noinline void bch2_stripe_state_rescale(struct 
dev_stripe_state *stripe)
         * to the whole filesystem.
         */
        u64 scale_max = U64_MAX;        /* maximum we can subtract without 
underflow */
-       u64 scale_min = 0;              /* minumum we must subtract to avoid 
overflow */
+       u64 scale_min = 0;              /* minimum we must subtract to avoid 
overflow */
 
        for (u64 *v = stripe->next_alloc;
             v < stripe->next_alloc + ARRAY_SIZE(stripe->next_alloc); v++) {
@@ -1348,7 +1348,7 @@ void bch2_fs_allocator_foreground_init(struct bch_fs *c)
        mutex_init(&a->write_points_hash_lock);
        a->write_points_nr = ARRAY_SIZE(a->write_points);
 
-       /* open bucket 0 is a sentinal NULL: */
+       /* open bucket 0 is a sentinel NULL: */
        spin_lock_init(&a->open_buckets[0].lock);
 
        for (ob = a->open_buckets + 1;
diff --git a/fs/bcachefs/bcachefs.h b/fs/bcachefs/bcachefs.h
index 8b877099a..d3cfce3a4 100644
--- a/fs/bcachefs/bcachefs.h
+++ b/fs/bcachefs/bcachefs.h
@@ -778,7 +778,7 @@ struct bch_fs {
        struct enumerated_ref   writes;
 
        /*
-        * Analagous to c->writes, for asynchronous ops that don't necessarily
+        * Analogous to c->writes, for asynchronous ops that don't necessarily
         * need fs to be read-write
         */
        refcount_t              ro_ref;
diff --git a/fs/bcachefs/bcachefs_format.h b/fs/bcachefs/bcachefs_format.h
index aa3ffe695..10368565d 100644
--- a/fs/bcachefs/bcachefs_format.h
+++ b/fs/bcachefs/bcachefs_format.h
@@ -238,7 +238,7 @@ struct bkey {
  *
  * Specifically, when i was designing bkey, I wanted the header to be no
  * bigger than necessary so that bkey_packed could use the rest. That means 
that
- * decently offten extent keys will fit into only 8 bytes, instead of spilling 
over
+ * decently often extent keys will fit into only 8 bytes, instead of spilling 
over
  * to 16.
  *
  * But packed_bkey treats the part after the header - the packed section -
@@ -250,7 +250,7 @@ struct bkey {
  * So that constrains the key part of a bkig endian bkey to start right
  * after the header.
  *
- * If we ever do a bkey_v2 and need to expand the hedaer by another byte for
+ * If we ever do a bkey_v2 and need to expand the header by another byte for
  * some reason - that will clean up this wart.
  */
 __aligned(8)
@@ -844,7 +844,7 @@ struct bch_sb_field_ext {
        x(btree_node_accounting,        BCH_VERSION(1, 31))             \
        x(sb_field_extent_type_u64s,    BCH_VERSION(1, 32))             \
        x(reconcile,                    BCH_VERSION(1, 33))             \
-       x(extented_key_type_error,      BCH_VERSION(1, 34))             \
+       x(extended_key_type_error,      BCH_VERSION(1, 34))             \
        x(bucket_stripe_index,          BCH_VERSION(1, 35))
 
 enum bcachefs_metadata_version {
@@ -925,7 +925,7 @@ struct bch_sb {
 
 /*
  * Flags:
- * BCH_SB_INITALIZED   - set on first mount
+ * BCH_SB_INITIALIZED  - set on first mount
  * BCH_SB_CLEAN                - did we shut down cleanly? Just a hint, 
doesn't affect
  *                       behaviour of mount/recovery path:
  * BCH_SB_INODE_32BIT  - limit inode numbers to 32 bits
@@ -1523,7 +1523,7 @@ static inline bool btree_id_can_reconstruct(enum btree_id 
btree)
 }
 
 /*
- * We can reconstruct BTREE_ID_alloc, but reconstucting it from scratch is not
+ * We can reconstruct BTREE_ID_alloc, but reconstructing it from scratch is not
  * so cheap and OOMs on huge filesystems (until we have online
  * check_allocations)
  */
diff --git a/fs/bcachefs/bcachefs_ioctl.h b/fs/bcachefs/bcachefs_ioctl.h
index dd926db42..05e170f06 100644
--- a/fs/bcachefs/bcachefs_ioctl.h
+++ b/fs/bcachefs/bcachefs_ioctl.h
@@ -113,7 +113,7 @@ struct bch_ioctl_query_uuid {
  * may be either offline or offline.
  *
  * Will fail removing @dev would leave us with insufficient read write devices
- * or degraded/unavailable data, unless the approprate BCH_FORCE_IF_* flags are
+ * or degraded/unavailable data, unless the appropriate BCH_FORCE_IF_* flags 
are
  * set.
  */
 
@@ -136,7 +136,7 @@ struct bch_ioctl_query_uuid {
  *
  * Will fail (similarly to BCH_IOCTL_DISK_SET_STATE) if offlining @dev would
  * leave us with insufficient read write devices or degraded/unavailable data,
- * unless the approprate BCH_FORCE_IF_* flags are set.
+ * unless the appropriate BCH_FORCE_IF_* flags are set.
  */
 
 struct bch_ioctl_disk {
diff --git a/fs/bcachefs/btree/bkey_methods.c b/fs/bcachefs/btree/bkey_methods.c
index a694adcca..95a92696a 100644
--- a/fs/bcachefs/btree/bkey_methods.c
+++ b/fs/bcachefs/btree/bkey_methods.c
@@ -89,7 +89,7 @@ void bch2_set_bkey_error(struct bch_fs *c, struct bkey_i *k, 
enum bch_key_type_e
 {
        k->k.type = KEY_TYPE_error;
 
-       if (!bch2_request_incompat_feature(c, 
bcachefs_metadata_version_extented_key_type_error)) {
+       if (!bch2_request_incompat_feature(c, 
bcachefs_metadata_version_extended_key_type_error)) {
                set_bkey_val_bytes(&k->k, sizeof(struct bch_error));
 
                struct bkey_i_error *e = bkey_i_to_error(k);
@@ -389,7 +389,7 @@ bool bch2_bkey_merge(struct bch_fs *c, struct bkey_s l, 
struct bkey_s_c r)
        const struct bkey_ops *ops = bch2_bkey_type_ops(l.k->type);
 
        return ops->key_merge &&
-               bch2_bkey_maybe_mergable(l.k, r.k) &&
+               bch2_bkey_maybe_mergeable(l.k, r.k) &&
                (u64) l.k->size + r.k->size <= KEY_SIZE_MAX &&
                !static_branch_unlikely(&bch2_key_merging_disabled) &&
                ops->key_merge(c, l, r);
diff --git a/fs/bcachefs/btree/bkey_methods.h b/fs/bcachefs/btree/bkey_methods.h
index 3af1f35b4..08e74d8be 100644
--- a/fs/bcachefs/btree/bkey_methods.h
+++ b/fs/bcachefs/btree/bkey_methods.h
@@ -65,7 +65,7 @@ void bch2_bkey_val_to_text(struct printbuf *, struct bch_fs *,
 
 void bch2_bkey_swab_val(const struct bch_fs *c, struct bkey_s);
 
-static inline bool bch2_bkey_maybe_mergable(const struct bkey *l, const struct 
bkey *r)
+static inline bool bch2_bkey_maybe_mergeable(const struct bkey *l, const 
struct bkey *r)
 {
        return l->type == r->type &&
                !bversion_cmp(l->bversion, r->bversion) &&
diff --git a/fs/bcachefs/btree/bset.h b/fs/bcachefs/btree/bset.h
index f626d2ff8..b51144c7f 100644
--- a/fs/bcachefs/btree/bset.h
+++ b/fs/bcachefs/btree/bset.h
@@ -45,7 +45,7 @@
  * 4 in memory - we lazily resort as needed.
  *
  * We implement code here for creating and maintaining auxiliary search trees
- * (described below) for searching an individial bset, and on top of that we
+ * (described below) for searching an individual bset, and on top of that we
  * implement a btree iterator.
  *
  * BTREE ITERATOR:
@@ -179,7 +179,7 @@ static inline enum bset_aux_tree_type 
bset_aux_tree_type(const struct bset_tree
  * memory if it was 128.
  *
  * It definites the number of bytes (in struct bset) per struct bkey_float in
- * the auxiliar search tree - when we're done searching the bset_float tree we
+ * the auxiliary search tree - when we're done searching the bset_float tree we
  * have this many bytes left that we do a linear search over.
  *
  * Since (after level 5) every level of the bset_tree is on a new cacheline,
diff --git a/fs/bcachefs/btree/cache.c b/fs/bcachefs/btree/cache.c
index ce2799114..2a7196782 100644
--- a/fs/bcachefs/btree/cache.c
+++ b/fs/bcachefs/btree/cache.c
@@ -1212,7 +1212,7 @@ struct btree *bch2_btree_node_get(struct btree_trans 
*trans, struct btree_path *
        /*
         * Check b->hash_val _before_ calling btree_node_lock() - this might not
         * be the node we want anymore, and trying to lock the wrong node could
-        * cause an unneccessary transaction restart:
+        * cause an unnecessary transaction restart:
         */
        if (unlikely(!c->opts.btree_node_mem_ptr_optimization ||
                     !b ||
diff --git a/fs/bcachefs/btree/interior.h b/fs/bcachefs/btree/interior.h
index 952a8cd45..1996395f8 100644
--- a/fs/bcachefs/btree/interior.h
+++ b/fs/bcachefs/btree/interior.h
@@ -117,7 +117,7 @@ struct btree_update {
        struct keylist                  parent_keys;
        /*
         * Enough room for btree_split's keys without realloc - btree node
-        * pointers never have crc/compression info, so we only need to acount
+        * pointers never have crc/compression info, so we only need to account
         * for the pointers for three keys
         */
        u64                             inline_keys[BKEY_BTREE_PTR_U64s_MAX * 
3];
diff --git a/fs/bcachefs/btree/iter.c b/fs/bcachefs/btree/iter.c
index f763449db..2a6b8908a 100644
--- a/fs/bcachefs/btree/iter.c
+++ b/fs/bcachefs/btree/iter.c
@@ -3479,7 +3479,7 @@ u32 bch2_trans_begin(struct btree_trans *trans)
 
                /*
                 * If the transaction wasn't restarted, we're presuming to be
-                * doing something new: dont keep iterators excpt the ones that
+                * doing something new: dont keep iterators except the ones that
                 * are in use - except for the subvolumes btree:
                 */
                if (!trans->restarted && path->btree_id != BTREE_ID_subvolumes)
@@ -3623,7 +3623,7 @@ struct btree_trans *__bch2_trans_get(struct bch_fs *c, 
unsigned fn_idx)
 
        *trans_paths_nr(trans->paths) = BTREE_ITER_INITIAL;
 
-       /* Reserve path 0 for our sentinal value */
+       /* Reserve path 0 for our sentinel value */
        trans->paths_allocated[0] = 1;
 
        static struct lock_class_key lockdep_key;
diff --git a/fs/bcachefs/btree/update.c b/fs/bcachefs/btree/update.c
index 5f2de66b4..f03c24b2e 100644
--- a/fs/bcachefs/btree/update.c
+++ b/fs/bcachefs/btree/update.c
@@ -250,7 +250,7 @@ static int bch2_trans_update_extent(struct btree_trans 
*trans,
                goto out;
 
        if (bkey_eq(k.k->p, bkey_start_pos(&insert->k))) {
-               if (bch2_bkey_maybe_mergable(k.k, &insert->k))
+               if (bch2_bkey_maybe_mergeable(k.k, &insert->k))
                        try(extent_front_merge(trans, &iter, k, &insert, 
flags));
 
                goto next;
@@ -293,7 +293,7 @@ static int bch2_trans_update_extent(struct btree_trans 
*trans,
                        goto out;
        }
 
-       if (bch2_bkey_maybe_mergable(&insert->k, k.k))
+       if (bch2_bkey_maybe_mergeable(&insert->k, k.k))
                try(extent_back_merge(trans, &iter, insert, k));
 out:
        return !bkey_deleted(&insert->k)
diff --git a/fs/bcachefs/data/checksum.c b/fs/bcachefs/data/checksum.c
index 999024202..9b587887f 100644
--- a/fs/bcachefs/data/checksum.c
+++ b/fs/bcachefs/data/checksum.c
@@ -20,7 +20,7 @@
 /*
  * bch2_checksum state is an abstraction of the checksum state calculated over 
different pages.
  * it features page merging without having the checksum algorithm lose its 
state.
- * for native checksum aglorithms (like crc), a default seed value will do.
+ * for native checksum algorithms (like crc), a default seed value will do.
  * for hash-like algorithms, a state needs to be stored
  */
 
diff --git a/fs/bcachefs/data/reconcile/work.c 
b/fs/bcachefs/data/reconcile/work.c
index 1abe4354b..a761eb56d 100644
--- a/fs/bcachefs/data/reconcile/work.c
+++ b/fs/bcachefs/data/reconcile/work.c
@@ -1322,7 +1322,7 @@ void bch2_reconcile_stop(struct bch_fs *c)
        c->reconcile.thread = NULL;
 
        if (p) {
-               /* for sychronizing with bch2_reconcile_wakeup() */
+               /* for synchronizing with bch2_reconcile_wakeup() */
                synchronize_rcu();
 
                kthread_stop(p);
diff --git a/fs/bcachefs/debug/async_objs.c b/fs/bcachefs/debug/async_objs.c
index 6717deadd..9bec6c9e1 100644
--- a/fs/bcachefs/debug/async_objs.c
+++ b/fs/bcachefs/debug/async_objs.c
@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
  * Async obj debugging: keep asynchronous objects on (very fast) lists, make
- * them visibile in debugfs:
+ * them visible in debugfs:
  */
 
 #include "bcachefs.h"
diff --git a/fs/bcachefs/fs/check.c b/fs/bcachefs/fs/check.c
index 6b6da8406..c32fef694 100644
--- a/fs/bcachefs/fs/check.c
+++ b/fs/bcachefs/fs/check.c
@@ -1886,7 +1886,7 @@ int bch2_fix_reflink_p(struct bch_fs *c)
                        fix_reflink_p_key(trans, &iter, k));
 }
 
-/* translate to return code of fsck commad - man(8) fsck */
+/* translate to return code of fsck command - man(8) fsck */
 int bch2_fs_fsck_errcode(struct bch_fs *c, struct printbuf *msg)
 {
        int ret = 0;
diff --git a/fs/bcachefs/fs/inode.c b/fs/bcachefs/fs/inode.c
index 5a0925edd..ef1105811 100644
--- a/fs/bcachefs/fs/inode.c
+++ b/fs/bcachefs/fs/inode.c
@@ -1367,7 +1367,7 @@ int bch2_delete_dead_inodes(struct bch_fs *c)
        /*
         * if we ran check_inodes() unlinked inodes will have already been
         * cleaned up but the write buffer will be out of sync; therefore we
-        * alway need a write buffer flush
+        * always need a write buffer flush
         *
         * Weird transaction restart handling here because on successful delete,
         * bch2_inode_rm_snapshot() will return a nested transaction restart,
diff --git a/fs/bcachefs/init/passes.c b/fs/bcachefs/init/passes.c
index 5e49ca218..1a88cecf9 100644
--- a/fs/bcachefs/init/passes.c
+++ b/fs/bcachefs/init/passes.c
@@ -554,7 +554,7 @@ int bch2_run_recovery_passes(struct bch_fs *c, u64 
orig_passes_to_run, bool fail
 
                if (r->rewound_to) {
                        r->rewound_from = max(r->rewound_from, pass);
-                       /* Restore r->current_passses up to and including 
r->rewound_to */
+                       /* Restore r->current_passes up to and including 
r->rewound_to */
                        r->current_passes |= orig_passes_to_run & (~0ULL << 
r->rewound_to);
                        r->rewound_to = 0;
                } else if (!ret2) {
diff --git a/fs/bcachefs/init/progress.h b/fs/bcachefs/init/progress.h
index 877b0e311..74dbee395 100644
--- a/fs/bcachefs/init/progress.h
+++ b/fs/bcachefs/init/progress.h
@@ -8,7 +8,7 @@
  * Lame progress indicators
  *
  * We don't like to use these because they print to the dmesg console, which is
- * spammy - we much prefer to be wired up to a userspace programm (e.g. via
+ * spammy - we much prefer to be wired up to a userspace program (e.g. via
  * thread_with_file) and have it print the progress indicator.
  *
  * But some code is old and doesn't support that, or runs in a context where
diff --git a/fs/bcachefs/journal/journal.c b/fs/bcachefs/journal/journal.c
index 1a680c9a7..2242bdb89 100644
--- a/fs/bcachefs/journal/journal.c
+++ b/fs/bcachefs/journal/journal.c
@@ -123,7 +123,7 @@ journal_error_check_stuck(struct journal *j, int error, 
unsigned flags)
        CLASS(bch_log_msg, msg)(c);
        msg.m.suppress = true; /* only print once, when we go ERO */
 
-       prt_printf(&msg.m, "Journal stuck! Hava a pre-reservation but journal 
full (error %s)",
+       prt_printf(&msg.m, "Journal stuck! Have a pre-reservation but journal 
full (error %s)",
                   bch2_err_str(error));
        bch2_journal_debug_to_text(&msg.m, j);
 
@@ -879,7 +879,7 @@ bool bch2_journal_noflush_seq(struct journal *j, u64 start, 
u64 end)
             unwritten_seq++) {
                struct journal_buf *buf = journal_seq_to_buf(j, unwritten_seq);
 
-               /* journal flush already in flight, or flush requseted */
+               /* journal flush already in flight, or flush requested */
                if (buf->must_flush)
                        return false;
 
diff --git a/fs/bcachefs/journal/types.h b/fs/bcachefs/journal/types.h
index 0d2ec3e04..5a43f619a 100644
--- a/fs/bcachefs/journal/types.h
+++ b/fs/bcachefs/journal/types.h
@@ -130,7 +130,7 @@ union journal_res_state {
 #define JOURNAL_ENTRY_SIZE_MAX         (4U  << 20) /* 4M */
 
 /*
- * We stash some journal state as sentinal values in cur_entry_offset:
+ * We stash some journal state as sentinel values in cur_entry_offset:
  * note - cur_entry_offset is in units of u64s
  */
 #define JOURNAL_ENTRY_OFFSET_MAX       ((1U << 22) - 1)
@@ -212,7 +212,7 @@ struct journal {
        darray_u64              early_journal_entries;
 
        /*
-        * Protects journal_buf->data, when accessing without a jorunal
+        * Protects journal_buf->data, when accessing without a journal
         * reservation: for synchronization between the btree write buffer code
         * and the journal write path:
         */
diff --git a/fs/bcachefs/journal/write.c b/fs/bcachefs/journal/write.c
index 4d1449a3a..8e9ee9349 100644
--- a/fs/bcachefs/journal/write.c
+++ b/fs/bcachefs/journal/write.c
@@ -289,7 +289,7 @@ static CLOSURE_CALLBACK(journal_write_done)
                                 * can return an error if appending to
                                 * replicas_refs failed, but we don't
                                 * care - it's a preallocated darray so
-                                * it'll allways be able to do some
+                                * it'll always be able to do some
                                 * work, and we have to retry anyways,
                                 * because we have to drop j->lock to
                                 * put the replicas refs before updating
diff --git a/fs/bcachefs/sb/io.c b/fs/bcachefs/sb/io.c
index 849d71bc5..80743a474 100644
--- a/fs/bcachefs/sb/io.c
+++ b/fs/bcachefs/sb/io.c
@@ -479,7 +479,7 @@ int bch2_sb_validate(struct bch_sb *sb, struct bch_opts 
*opts, u64 read_offset,
                /*
                 * Been seeing a bug where these are getting inexplicably
                 * zeroed, so we're now validating them, but we have to be
-                * careful not to preven people's filesystems from mounting:
+                * careful not to prevent people's filesystems from mounting:
                 */
                if (!BCH_SB_JOURNAL_FLUSH_DELAY(sb))
                        SET_BCH_SB_JOURNAL_FLUSH_DELAY(sb, 1000);
diff --git a/fs/bcachefs/sb/members_format.h b/fs/bcachefs/sb/members_format.h
index 2c9f72653..941507d63 100644
--- a/fs/bcachefs/sb/members_format.h
+++ b/fs/bcachefs/sb/members_format.h
@@ -9,7 +9,7 @@
 #define BCH_SB_MEMBERS_MAX             64
 
 /*
- * Sentinal value - indicates a device that does not exist
+ * Sentinel value - indicates a device that does not exist
  */
 #define BCH_SB_MEMBER_INVALID          255
 
diff --git a/fs/bcachefs/snapshots/format.h b/fs/bcachefs/snapshots/format.h
index 72c28bd9c..a6e4fd33f 100644
--- a/fs/bcachefs/snapshots/format.h
+++ b/fs/bcachefs/snapshots/format.h
@@ -58,7 +58,7 @@ struct bch_snapshot {
  * something, key should be deleted)
  *
  * NO_KEYS: we don't remove interior snapshot nodes from snapshot trees at
- * runtime, since we can't do the adjustements for the depth/skiplist field
+ * runtime, since we can't do the adjustments for the depth/skiplist field
  * atomically - and that breaks e.g. is_ancestor(). Instead, we mark it to be
  * deleted at the next remount; this tells us that we don't need to run the 
full
  * delete_dead_snapshots().
@@ -78,7 +78,7 @@ LE32_BITMASK(BCH_SNAPSHOT_NO_KEYS,    struct bch_snapshot, 
flags,  3,  4)
 /*
  * Snapshot trees:
  *
- * The snapshot_trees btree gives us persistent indentifier for each tree of
+ * The snapshot_trees btree gives us persistent identifier for each tree of
  * bch_snapshot nodes, and allow us to record and easily find the root/master
  * subvolume that other snapshots were created from:
  */
diff --git a/fs/bcachefs/util/eytzinger.h b/fs/bcachefs/util/eytzinger.h
index b14ae1ff7..43a66cab5 100644
--- a/fs/bcachefs/util/eytzinger.h
+++ b/fs/bcachefs/util/eytzinger.h
@@ -13,7 +13,7 @@
 #endif
 
 /*
- * Traversal for trees in eytzinger layout - a full binary tree layed out in an
+ * Traversal for trees in eytzinger layout - a full binary tree laid out in an
  * array.
  *
  * Consider using an eytzinger tree any time you would otherwise be doing 
binary
diff --git a/fs/bcachefs/util/mean_and_variance.h 
b/fs/bcachefs/util/mean_and_variance.h
index 47e4a3c3d..281d6f9d1 100644
--- a/fs/bcachefs/util/mean_and_variance.h
+++ b/fs/bcachefs/util/mean_and_variance.h
@@ -152,7 +152,7 @@ struct mean_and_variance {
        u128_u  sum_squares;
 };
 
-/* expontentially weighted variant */
+/* exponentially weighted variant */
 struct mean_and_variance_weighted {
        s64     mean;
        u64     variance;
@@ -200,4 +200,4 @@ u64 mean_and_variance_weighted_get_variance(struct 
mean_and_variance_weighted s,
 u32 mean_and_variance_weighted_get_stddev(struct mean_and_variance_weighted s,
                u8 weight);
 
-#endif // MEAN_AND_VAIRANCE_H_
+#endif // MEAN_AND_VARIANCE_H_
diff --git a/fs/bcachefs/util/mean_and_variance_test.c 
b/fs/bcachefs/util/mean_and_variance_test.c
index e9d9c0212..86f38db11 100644
--- a/fs/bcachefs/util/mean_and_variance_test.c
+++ b/fs/bcachefs/util/mean_and_variance_test.c
@@ -25,7 +25,7 @@ static void mean_and_variance_basic_test(struct kunit *test)
 }
 
 /*
- * Test values computed using a spreadsheet from the psuedocode at the bottom:
+ * Test values computed using a spreadsheet from the pseudocode at the bottom:
  * https://fanf2.user.srcf.net/hermes/doc/antiforgery/stats.pdf
  */
 
diff --git a/fs/bcachefs/util/printbuf.c b/fs/bcachefs/util/printbuf.c
index 0a6f44fd5..80e101915 100644
--- a/fs/bcachefs/util/printbuf.c
+++ b/fs/bcachefs/util/printbuf.c
@@ -417,7 +417,7 @@ void bch2_prt_tab_rjust(struct printbuf *buf)
  * @str:       string to print
  * @count:     number of bytes to print
  *
- * The following contol characters are handled as so:
+ * The following control characters are handled as so:
  *   \n: prt_newline   newline that obeys current indent level
  *   \t: prt_tab       advance to next tabstop
  *   \r: prt_tab_rjust advance to next tabstop, with right justification
diff --git a/fs/bcachefs/util/printbuf.h b/fs/bcachefs/util/printbuf.h
index 41e8d71d9..fe41736bd 100644
--- a/fs/bcachefs/util/printbuf.h
+++ b/fs/bcachefs/util/printbuf.h
@@ -25,7 +25,7 @@
  * everything to the kernel log buffer, and then those pretty-printers can be
  * used by other code that outputs to kernel log, sysfs, debugfs, etc.
  *
- * Memory allocation: Outputing to a printbuf may allocate memory. This
+ * Memory allocation: Outputting to a printbuf may allocate memory. This
  * allocation is done with GFP_KERNEL, by default: use the newer
  * memalloc_*_(save|restore) functions as needed.
  *
@@ -56,7 +56,7 @@
  * next tabstop - right justifying it.
  *
  * Make sure you use prt_newline() instead of \n in the format string for 
indent
- * level and tabstops to work corretly.
+ * level and tabstops to work correctly.
  *
  * Output units: printbuf->units exists to tell pretty-printers how to output
  * numbers: a raw value (e.g. directly from a superblock field), as bytes, or 
as
diff --git a/fs/bcachefs/util/rcu_pending.c b/fs/bcachefs/util/rcu_pending.c
index 964ed493c..794f2d985 100644
--- a/fs/bcachefs/util/rcu_pending.c
+++ b/fs/bcachefs/util/rcu_pending.c
@@ -349,7 +349,7 @@ rcu_pending_enqueue_list(struct rcu_pending_pcpu *p, 
rcu_gp_poll_state_t seq,
                        /*
                         * kvfree_rcu_mightsleep(): we weren't passed an
                         * rcu_head, but we need one: use the low bit of the
-                        * ponter to free to flag that the head needs to be
+                        * pointer to free to flag that the head needs to be
                         * freed as well:
                         */
                        ptr = (void *)(((unsigned long) ptr)|1UL);
diff --git a/fs/bcachefs/util/siphash.h b/fs/bcachefs/util/siphash.h
index 3dfaf34a4..7744b4fe0 100644
--- a/fs/bcachefs/util/siphash.h
+++ b/fs/bcachefs/util/siphash.h
@@ -36,7 +36,7 @@
  * optimized for speed on short messages returning a 64bit hash/digest value.
  *
  * The number of rounds is defined during the initialization:
- *  SipHash24_Init() for the fast and resonable strong version
+ *  SipHash24_Init() for the fast and reasonably strong version
  *  SipHash48_Init() for the strong version (half as fast)
  *
  * struct SIPHASH_CTX ctx;
diff --git a/fs/bcachefs/util/six.c b/fs/bcachefs/util/six.c
index 08083d6ca..cdf7efb0c 100644
--- a/fs/bcachefs/util/six.c
+++ b/fs/bcachefs/util/six.c
@@ -111,7 +111,7 @@ static inline unsigned pcpu_read_count(struct six_lock 
*lock)
  * Returns 1 on success, 0 on failure
  *
  * In percpu reader mode, a failed trylock may cause a spurious trylock failure
- * for anoter thread taking the competing lock type, and we may havve to do a
+ * for another thread taking the competing lock type, and we may have to do a
  * wakeup: when a wakeup is required, we return -1 - wakeup_type.
  */
 static int __do_six_trylock(struct six_lock *lock, enum six_lock_type type,
@@ -591,7 +591,7 @@ static void do_six_unlock_type(struct six_lock *lock, enum 
six_lock_type type)
  * @type:      SIX_LOCK_read, SIX_LOCK_intent, or SIX_LOCK_write
  * @ip:                ip parameter for lockdep/lockstat, i.e. _THIS_IP_
  *
- * When a lock is held multiple times (because six_lock_incement()) was used),
+ * When a lock is held multiple times (because six_lock_increment()) was used),
  * this decrements the 'lock held' counter by one.
  *
  * For example:
@@ -632,7 +632,7 @@ EXPORT_SYMBOL_GPL(six_unlock_ip);
 
 /**
  * six_lock_downgrade - convert an intent lock to a read lock
- * @lock:      lock to dowgrade
+ * @lock:      lock to downgrade
  *
  * @lock will have read count incremented and intent count decremented
  */
diff --git a/fs/bcachefs/util/six.h b/fs/bcachefs/util/six.h
index 59b851cf8..2f55de935 100644
--- a/fs/bcachefs/util/six.h
+++ b/fs/bcachefs/util/six.h
@@ -79,7 +79,7 @@
  *     six_unlock_read(&foo->lock);
  *   foo->lock is now fully unlocked.
  *
- *   Since the intent state supercedes read, it's legal to increment the read
+ *   Since the intent state supersedes read, it's legal to increment the read
  *   counter when holding an intent lock, but not the reverse.
  *
  *   A lock may only be held once for write: six_lock_increment(.., 
SIX_LOCK_write)
@@ -298,7 +298,7 @@ void six_unlock_ip(struct six_lock *lock, enum 
six_lock_type type, unsigned long
  * @lock:      lock to unlock
  * @type:      SIX_LOCK_read, SIX_LOCK_intent, or SIX_LOCK_write
  *
- * When a lock is held multiple times (because six_lock_incement()) was used),
+ * When a lock is held multiple times (because six_lock_increment()) was used),
  * this decrements the 'lock held' counter by one.
  *
  * For example:
diff --git a/fs/bcachefs/util/time_stats.h b/fs/bcachefs/util/time_stats.h
index 7c7069673..37cf1a286 100644
--- a/fs/bcachefs/util/time_stats.h
+++ b/fs/bcachefs/util/time_stats.h
@@ -12,7 +12,7 @@
  *  - sum of all event durations
  *  - average event duration, standard and weighted
  *  - standard deviation of event durations, standard and weighted
- * and analagous statistics for the frequency of events
+ * and analogous statistics for the frequency of events
  *
  * We provide both mean and weighted mean (exponentially weighted), and 
standard
  * deviation and weighted standard deviation, to give an efficient-to-compute
diff --git a/fs/bcachefs/util/util.c b/fs/bcachefs/util/util.c
index d538758b0..a444ec7cd 100644
--- a/fs/bcachefs/util/util.c
+++ b/fs/bcachefs/util/util.c
@@ -501,7 +501,7 @@ void bch2_ratelimit_increment(struct bch_ratelimit *d, u64 
done)
 /* pd controller: */
 
 /*
- * Updates pd_controller. Attempts to scale inputed values to units per second.
+ * Updates pd_controller. Attempts to scale inputted values to units per 
second.
  * @target: desired value
  * @actual: current value
  *
diff --git a/fs/bcachefs/vfs/io.c b/fs/bcachefs/vfs/io.c
index 8ea29763d..91d0922a1 100644
--- a/fs/bcachefs/vfs/io.c
+++ b/fs/bcachefs/vfs/io.c
@@ -945,7 +945,7 @@ loff_t bch2_remap_file_range(struct file *file_src, loff_t 
pos_src,
                goto err;
 
        /*
-        * due to alignment, we might have remapped slightly more than requsted
+        * due to alignment, we might have remapped slightly more than requested
         */
        ret = min((u64) ret << 9, (u64) len);
 
-- 
2.52.0



Reply via email to