Rework the read path a bit so that BCH_READ_NODECODE reads now also
self-heal after a read error and a successful retry - prerequisite for
scrub.

Signed-off-by: Kent Overstreet <[email protected]>
---
 fs/bcachefs/io_read.c | 59 +++++++++++++++++++++++--------------------
 1 file changed, 31 insertions(+), 28 deletions(-)

diff --git a/fs/bcachefs/io_read.c b/fs/bcachefs/io_read.c
index 17a6a3159917..e13766a73300 100644
--- a/fs/bcachefs/io_read.c
+++ b/fs/bcachefs/io_read.c
@@ -740,7 +740,7 @@ static void __bch2_read_endio(struct work_struct *work)
                        bio_copy_data_iter(dst, &dst_iter, src, &src_iter);
                }
        }
-
+nodecode:
        if (rbio->promote) {
                /*
                 * Re encrypt data we decrypted, so it's consistent with
@@ -753,7 +753,7 @@ static void __bch2_read_endio(struct work_struct *work)
                promote_start(rbio->promote, rbio);
                rbio->promote = NULL;
        }
-nodecode:
+
        if (likely(!(rbio->flags & BCH_READ_IN_RETRY))) {
                rbio = bch2_rbio_free(rbio);
                bch2_rbio_done(rbio);
@@ -888,6 +888,8 @@ int __bch2_read_extent(struct btree_trans *trans, struct 
bch_read_bio *orig,
        struct bpos data_pos = bkey_start_pos(k.k);
        int pick_ret;
 
+       //BUG_ON(failed && failed->nr);
+
        if (bkey_extent_is_inline_data(k.k)) {
                unsigned bytes = min_t(unsigned, iter.bi_size,
                                       bkey_inline_data_bytes(k.k));
@@ -952,7 +954,30 @@ int __bch2_read_extent(struct btree_trans *trans, struct 
bch_read_bio *orig,
         */
        bch2_trans_unlock(trans);
 
-       if (flags & BCH_READ_NODECODE) {
+       if (!(flags & BCH_READ_NODECODE)) {
+               if (!(flags & BCH_READ_LAST_FRAGMENT) ||
+                   bio_flagged(&orig->bio, BIO_CHAIN))
+                       flags |= BCH_READ_MUST_CLONE;
+
+               narrow_crcs = !(flags & BCH_READ_IN_RETRY) &&
+                       bch2_can_narrow_extent_crcs(k, pick.crc);
+
+               if (narrow_crcs && (flags & BCH_READ_USER_MAPPED))
+                       flags |= BCH_READ_MUST_BOUNCE;
+
+               EBUG_ON(offset_into_extent + bvec_iter_sectors(iter) > 
k.k->size);
+
+               if (crc_is_compressed(pick.crc) ||
+                   (pick.crc.csum_type != BCH_CSUM_none &&
+                    (bvec_iter_sectors(iter) != pick.crc.uncompressed_size ||
+                     (bch2_csum_type_is_encryption(pick.crc.csum_type) &&
+                      (flags & BCH_READ_USER_MAPPED)) ||
+                     (flags & BCH_READ_MUST_BOUNCE)))) {
+                       read_full = true;
+                       bounce = true;
+               }
+       } else {
+               read_full = true;
                /*
                 * can happen if we retry, and the extent we were going to read
                 * has been merged in the meantime:
@@ -964,32 +989,10 @@ int __bch2_read_extent(struct btree_trans *trans, struct 
bch_read_bio *orig,
                }
 
                iter.bi_size    = pick.crc.compressed_size << 9;
-               goto get_bio;
        }
 
-       if (!(flags & BCH_READ_LAST_FRAGMENT) ||
-           bio_flagged(&orig->bio, BIO_CHAIN))
-               flags |= BCH_READ_MUST_CLONE;
-
-       narrow_crcs = !(flags & BCH_READ_IN_RETRY) &&
-               bch2_can_narrow_extent_crcs(k, pick.crc);
-
-       if (narrow_crcs && (flags & BCH_READ_USER_MAPPED))
-               flags |= BCH_READ_MUST_BOUNCE;
-
-       EBUG_ON(offset_into_extent + bvec_iter_sectors(iter) > k.k->size);
-
-       if (crc_is_compressed(pick.crc) ||
-           (pick.crc.csum_type != BCH_CSUM_none &&
-            (bvec_iter_sectors(iter) != pick.crc.uncompressed_size ||
-             (bch2_csum_type_is_encryption(pick.crc.csum_type) &&
-              (flags & BCH_READ_USER_MAPPED)) ||
-             (flags & BCH_READ_MUST_BOUNCE)))) {
-               read_full = true;
-               bounce = true;
-       }
-
-       if (orig->opts.promote_target)// || failed)
+       if ((orig->opts.promote_target && !(flags & BCH_READ_NODECODE)) ||
+           (failed && failed->nr))
                promote = promote_alloc(trans, iter, k, &pick, orig->opts, 
flags,
                                        &rbio, &bounce, &read_full, failed);
 
@@ -1010,7 +1013,7 @@ int __bch2_read_extent(struct btree_trans *trans, struct 
bch_read_bio *orig,
                pick.crc.offset                 = 0;
                pick.crc.live_size              = bvec_iter_sectors(iter);
        }
-get_bio:
+
        if (rbio) {
                /*
                 * promote already allocated bounce rbio:
-- 
2.45.2


Reply via email to