Introduce per-entry locks, like with struct bucket - the stripes heap is
going away.

Signed-off-by: Kent Overstreet <[email protected]>
---
 fs/bcachefs/buckets.c       |  6 +++---
 fs/bcachefs/buckets.h       | 27 ---------------------------
 fs/bcachefs/buckets_types.h | 27 +++++++++++++++++++++++++++
 fs/bcachefs/ec.h            | 14 ++++++++++++++
 fs/bcachefs/ec_types.h      |  5 ++---
 5 files changed, 46 insertions(+), 33 deletions(-)

diff --git a/fs/bcachefs/buckets.c b/fs/bcachefs/buckets.c
index 345b117a4a4a..88af61bc799d 100644
--- a/fs/bcachefs/buckets.c
+++ b/fs/bcachefs/buckets.c
@@ -674,10 +674,10 @@ static int bch2_trigger_stripe_ptr(struct btree_trans 
*trans,
                        return -BCH_ERR_ENOMEM_mark_stripe_ptr;
                }
 
-               mutex_lock(&c->ec_stripes_heap_lock);
+               gc_stripe_lock(m);
 
                if (!m || !m->alive) {
-                       mutex_unlock(&c->ec_stripes_heap_lock);
+                       gc_stripe_unlock(m);
                        struct printbuf buf = PRINTBUF;
                        bch2_bkey_val_to_text(&buf, c, k);
                        bch_err_ratelimited(c, "pointer to nonexistent stripe 
%llu\n  while marking %s",
@@ -693,7 +693,7 @@ static int bch2_trigger_stripe_ptr(struct btree_trans 
*trans,
                        .type = BCH_DISK_ACCOUNTING_replicas,
                };
                memcpy(&acc.replicas, &m->r.e, replicas_entry_bytes(&m->r.e));
-               mutex_unlock(&c->ec_stripes_heap_lock);
+               gc_stripe_unlock(m);
 
                acc.replicas.data_type = data_type;
                int ret = bch2_disk_accounting_mod(trans, &acc, &sectors, 1, 
true);
diff --git a/fs/bcachefs/buckets.h b/fs/bcachefs/buckets.h
index a9acdd6c0c86..6aeec1c0973c 100644
--- a/fs/bcachefs/buckets.h
+++ b/fs/bcachefs/buckets.h
@@ -39,33 +39,6 @@ static inline u64 sector_to_bucket_and_offset(const struct 
bch_dev *ca, sector_t
        for (_b = (_buckets)->b + (_buckets)->first_bucket;     \
             _b < (_buckets)->b + (_buckets)->nbuckets; _b++)
 
-/*
- * Ugly hack alert:
- *
- * We need to cram a spinlock in a single byte, because that's what we have 
left
- * in struct bucket, and we care about the size of these - during fsck, we need
- * in memory state for every single bucket on every device.
- *
- * We used to do
- *   while (xchg(&b->lock, 1) cpu_relax();
- * but, it turns out not all architectures support xchg on a single byte.
- *
- * So now we use bit_spin_lock(), with fun games since we can't burn a whole
- * ulong for this - we just need to make sure the lock bit always ends up in 
the
- * first byte.
- */
-
-#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
-#define BUCKET_LOCK_BITNR      0
-#else
-#define BUCKET_LOCK_BITNR      (BITS_PER_LONG - 1)
-#endif
-
-union ulong_byte_assert {
-       ulong   ulong;
-       u8      byte;
-};
-
 static inline void bucket_unlock(struct bucket *b)
 {
        BUILD_BUG_ON(!((union ulong_byte_assert) { .ulong = 1UL << 
BUCKET_LOCK_BITNR }).byte);
diff --git a/fs/bcachefs/buckets_types.h b/fs/bcachefs/buckets_types.h
index 7174047b8e92..900b8680c8b5 100644
--- a/fs/bcachefs/buckets_types.h
+++ b/fs/bcachefs/buckets_types.h
@@ -7,6 +7,33 @@
 
 #define BUCKET_JOURNAL_SEQ_BITS                16
 
+/*
+ * Ugly hack alert:
+ *
+ * We need to cram a spinlock in a single byte, because that's what we have 
left
+ * in struct bucket, and we care about the size of these - during fsck, we need
+ * in memory state for every single bucket on every device.
+ *
+ * We used to do
+ *   while (xchg(&b->lock, 1) cpu_relax();
+ * but, it turns out not all architectures support xchg on a single byte.
+ *
+ * So now we use bit_spin_lock(), with fun games since we can't burn a whole
+ * ulong for this - we just need to make sure the lock bit always ends up in 
the
+ * first byte.
+ */
+
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+#define BUCKET_LOCK_BITNR      0
+#else
+#define BUCKET_LOCK_BITNR      (BITS_PER_LONG - 1)
+#endif
+
+union ulong_byte_assert {
+       ulong   ulong;
+       u8      byte;
+};
+
 struct bucket {
        u8                      lock;
        u8                      gen_valid:1;
diff --git a/fs/bcachefs/ec.h b/fs/bcachefs/ec.h
index 583ca6a226da..4c9511887655 100644
--- a/fs/bcachefs/ec.h
+++ b/fs/bcachefs/ec.h
@@ -132,6 +132,20 @@ static inline bool bch2_ptr_matches_stripe_m(const struct 
gc_stripe *m,
                                         m->sectors);
 }
 
+static inline void gc_stripe_unlock(struct gc_stripe *s)
+{
+       BUILD_BUG_ON(!((union ulong_byte_assert) { .ulong = 1UL << 
BUCKET_LOCK_BITNR }).byte);
+
+       clear_bit_unlock(BUCKET_LOCK_BITNR, (void *) &s->lock);
+       wake_up_bit((void *) &s->lock, BUCKET_LOCK_BITNR);
+}
+
+static inline void gc_stripe_lock(struct gc_stripe *s)
+{
+       wait_on_bit_lock((void *) &s->lock, BUCKET_LOCK_BITNR,
+                        TASK_UNINTERRUPTIBLE);
+}
+
 struct bch_read_bio;
 
 struct ec_stripe_buf {
diff --git a/fs/bcachefs/ec_types.h b/fs/bcachefs/ec_types.h
index 8d1e70e830ac..37558cc2d89f 100644
--- a/fs/bcachefs/ec_types.h
+++ b/fs/bcachefs/ec_types.h
@@ -20,12 +20,11 @@ struct stripe {
 };
 
 struct gc_stripe {
+       u8                      lock;
+       unsigned                alive:1; /* does a corresponding key exist in 
stripes btree? */
        u16                     sectors;
-
        u8                      nr_blocks;
        u8                      nr_redundant;
-
-       unsigned                alive:1; /* does a corresponding key exist in 
stripes btree? */
        u16                     block_sectors[BCH_BKEY_PTRS_MAX];
        struct bch_extent_ptr   ptrs[BCH_BKEY_PTRS_MAX];
 
-- 
2.45.2


Reply via email to