Passsthrough bio handling should be the same as normal bio handling,
except that we need to take hardware limitations into account.  Thus
use the common try_merge implementation after checking the hardware
limits.  This changes behavior in that we now also check segment
and dma boundary settings for same page merges, which is a little
more work but has no effect as those need to be larger than the
page size.

Signed-off-by: Christoph Hellwig <h...@lst.de>
---
 block/bio.c | 34 ++++++++++------------------------
 1 file changed, 10 insertions(+), 24 deletions(-)

diff --git a/block/bio.c b/block/bio.c
index 6be22b8477ce..1db626f99bcb 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -645,25 +645,20 @@ static inline bool page_is_mergeable(const struct bio_vec 
*bv,
        return true;
 }
 
-/*
- * Check if the @page can be added to the current segment(@bv), and make
- * sure to call it only if page_is_mergeable(@bv, @page) is true
- */
-static bool can_add_page_to_seg(struct request_queue *q,
-               struct bio_vec *bv, struct page *page, unsigned len,
-               unsigned offset)
+static bool bio_try_merge_pc_page(struct request_queue *q, struct bio *bio,
+               struct page *page, unsigned len, unsigned offset,
+               bool *same_page)
 {
+       struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
        unsigned long mask = queue_segment_boundary(q);
        phys_addr_t addr1 = page_to_phys(bv->bv_page) + bv->bv_offset;
        phys_addr_t addr2 = page_to_phys(page) + offset + len - 1;
 
        if ((addr1 | mask) != (addr2 | mask))
                return false;
-
        if (bv->bv_len + len > queue_max_segment_size(q))
                return false;
-
-       return true;
+       return __bio_try_merge_page(bio, page, len, offset, same_page);
 }
 
 /**
@@ -699,26 +694,18 @@ static int __bio_add_pc_page(struct request_queue *q, 
struct bio *bio,
                return 0;
 
        if (bio->bi_vcnt > 0) {
-               bvec = &bio->bi_io_vec[bio->bi_vcnt - 1];
-
-               if (page == bvec->bv_page &&
-                   offset == bvec->bv_offset + bvec->bv_len) {
-                       if (put_same_page)
+               if (bio_try_merge_pc_page(q, bio, page, len, offset,
+                               &same_page)) {
+                       if (put_same_page && same_page)
                                put_page(page);
-                       bvec->bv_len += len;
-                       goto done;
-               }
-
-               if (page_is_mergeable(bvec, page, len, offset, &same_page) &&
-                   can_add_page_to_seg(q, bvec, page, len, offset)) {
-                       bvec->bv_len += len;
-                       goto done;
+                       return len;
                }
 
                /*
                 * If the queue doesn't support SG gaps and adding this segment
                 * would create a gap, disallow it.
                 */
+               bvec = &bio->bi_io_vec[bio->bi_vcnt - 1];
                if (bvec_gap_to_prev(q, bvec, offset))
                        return 0;
        }
@@ -734,7 +721,6 @@ static int __bio_add_pc_page(struct request_queue *q, 
struct bio *bio,
        bvec->bv_len = len;
        bvec->bv_offset = offset;
        bio->bi_vcnt++;
- done:
        bio->bi_iter.bi_size += len;
        return len;
 }
-- 
2.20.1

Reply via email to