This patch pulls the trigger for multi-page bvecs.

Signed-off-by: Ming Lei <ming....@redhat.com>
---
 block/bio.c       | 32 +++++++++++++++++++++++++++-----
 fs/iomap.c        |  2 +-
 fs/xfs/xfs_aops.c |  2 +-
 3 files changed, 29 insertions(+), 7 deletions(-)

diff --git a/block/bio.c b/block/bio.c
index 0f1635b9ec50..854676edc438 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -823,7 +823,7 @@ EXPORT_SYMBOL(bio_add_pc_page);
  * @len: length of the data to add
  * @off: offset of the data in @page
  *
- * Try to add the data at @page + @off to the last bvec of @bio.  This is a
+ * Try to add the data at @page + @off to the last page of @bio.  This is a
  * a useful optimisation for file systems with a block size smaller than the
  * page size.
  *
@@ -836,10 +836,13 @@ bool __bio_try_merge_page(struct bio *bio, struct page 
*page,
                return false;
 
        if (bio->bi_vcnt > 0) {
-               struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
+               struct bio_vec bv;
+               struct bio_vec *seg = &bio->bi_io_vec[bio->bi_vcnt - 1];
 
-               if (page == bv->bv_page && off == bv->bv_offset + bv->bv_len) {
-                       bv->bv_len += len;
+               bvec_last_segment(seg, &bv);
+
+               if (page == bv.bv_page && off == bv.bv_offset + bv.bv_len) {
+                       seg->bv_len += len;
                        bio->bi_iter.bi_size += len;
                        return true;
                }
@@ -848,6 +851,25 @@ bool __bio_try_merge_page(struct bio *bio, struct page 
*page,
 }
 EXPORT_SYMBOL_GPL(__bio_try_merge_page);
 
+static bool bio_try_merge_segment(struct bio *bio, struct page *page,
+                                 unsigned int len, unsigned int off)
+{
+       if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
+               return false;
+
+       if (bio->bi_vcnt > 0) {
+               struct bio_vec *seg = &bio->bi_io_vec[bio->bi_vcnt - 1];
+
+               if (page_to_phys(seg->bv_page) + seg->bv_offset + seg->bv_len ==
+                   page_to_phys(page) + off) {
+                       seg->bv_len += len;
+                       bio->bi_iter.bi_size += len;
+                       return true;
+               }
+       }
+       return false;
+}
+
 /**
  * __bio_add_page - add page to a bio in a new segment
  * @bio: destination bio
@@ -888,7 +910,7 @@ EXPORT_SYMBOL_GPL(__bio_add_page);
 int bio_add_page(struct bio *bio, struct page *page,
                 unsigned int len, unsigned int offset)
 {
-       if (!__bio_try_merge_page(bio, page, len, offset)) {
+       if (!bio_try_merge_segment(bio, page, len, offset)) {
                if (bio_full(bio))
                        return 0;
                __bio_add_page(bio, page, len, offset);
diff --git a/fs/iomap.c b/fs/iomap.c
index f5fb8bf75cc8..ccc2ba115f4d 100644
--- a/fs/iomap.c
+++ b/fs/iomap.c
@@ -344,7 +344,7 @@ iomap_readpage_actor(struct inode *inode, loff_t pos, 
loff_t length, void *data,
                ctx->bio->bi_end_io = iomap_read_end_io;
        }
 
-       __bio_add_page(ctx->bio, page, plen, poff);
+       bio_add_page(ctx->bio, page, plen, poff);
 done:
        /*
         * Move the caller beyond our range so that it keeps making progress.
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 1f1829e506e8..5c2190216614 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -621,7 +621,7 @@ xfs_add_to_ioend(
                        atomic_inc(&iop->write_count);
                if (bio_full(wpc->ioend->io_bio))
                        xfs_chain_bio(wpc->ioend, wbc, bdev, sector);
-               __bio_add_page(wpc->ioend->io_bio, page, len, poff);
+               bio_add_page(wpc->ioend->io_bio, page, len, poff);
        }
 
        wpc->ioend->io_size += len;
-- 
2.9.5

Reply via email to