There is no need to use dynamic bio allocation for BDI_CAP_SYNC
devices. They can with on-stack-bio without concern about waiting
bio allocation from mempool under heavy memory pressure.

Signed-off-by: Minchan Kim <minc...@kernel.org>
---
 fs/mpage.c | 43 +++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 43 insertions(+)

diff --git a/fs/mpage.c b/fs/mpage.c
index 2e4c41ccb5c9..eaeaef27d693 100644
--- a/fs/mpage.c
+++ b/fs/mpage.c
@@ -31,6 +31,14 @@
 #include <linux/cleancache.h>
 #include "internal.h"
 
+static void on_stack_page_end_io(struct bio *bio)
+{
+       struct page *page = bio->bi_io_vec->bv_page;
+
+       page_endio(page, op_is_write(bio_op(bio)),
+               blk_status_to_errno(bio->bi_status));
+}
+
 /*
  * I/O completion handler for multipage BIOs.
  *
@@ -278,6 +286,22 @@ do_mpage_readpage(struct bio *bio, struct page *page, 
unsigned nr_pages,
 alloc_new:
        if (bio == NULL) {
                if (first_hole == blocks_per_page) {
+                       if (bdi_cap_synchronous_io(inode_to_bdi(inode))) {
+                               /* on-stack-bio */
+                               struct bio sbio;
+                               struct bio_vec bvec;
+
+                               bio_init(&sbio, &bvec, 1);
+                               sbio.bi_bdev = bdev;
+                               sbio.bi_iter.bi_sector =
+                                       blocks[0] << (blkbits - 9);
+                               sbio.bi_end_io = on_stack_page_end_io;
+                               bio_add_page(&sbio, page, PAGE_SIZE, 0);
+                               bio_set_op_attrs(&sbio, REQ_OP_READ, 0);
+                               submit_bio(&sbio);
+                               goto out;
+                       }
+
                        if (!bdev_read_page(bdev, blocks[0] << (blkbits - 9),
                                                                page))
                                goto out;
@@ -604,6 +628,25 @@ static int __mpage_writepage(struct page *page, struct 
writeback_control *wbc,
 alloc_new:
        if (bio == NULL) {
                if (first_unmapped == blocks_per_page) {
+                       if (bdi_cap_synchronous_io(inode_to_bdi(inode))) {
+                               /* on-stack-bio */
+                               struct bio sbio;
+                               struct bio_vec bvec;
+
+                               bio_init(&sbio, &bvec, 1);
+                               sbio.bi_bdev = bdev;
+                               sbio.bi_iter.bi_sector =
+                                       blocks[0] << (blkbits - 9);
+                               sbio.bi_end_io = on_stack_page_end_io;
+                               bio_add_page(&sbio, page, PAGE_SIZE, 0);
+                               bio_set_op_attrs(&sbio, REQ_OP_WRITE, op_flags);
+                               WARN_ON_ONCE(PageWriteback(page));
+                               set_page_writeback(page);
+                               unlock_page(page);
+                               submit_bio(&sbio);
+                               clean_buffers(page, first_unmapped);
+                       }
+
                        if (!bdev_write_page(bdev, blocks[0] << (blkbits - 9),
                                                                page, wbc)) {
                                clean_buffers(page, first_unmapped);
-- 
2.7.4

Reply via email to