Re: [PATCH 11/33] iomap: add an iomap-based readpage and readpages implementation

2018-05-09 Thread Christoph Hellwig
On Thu, May 10, 2018 at 11:17:58AM +1000, Dave Chinner wrote:
> > +   if (ret <= 0)
> > +   break;
> > +   pos += ret;
> > +   length -= ret;
> > +   }
> > +
> > +   ret = 0;
> 
> This means the function will always return zero, regardless of
> whether iomap_apply returned an error or not.
> 
> > +   if (ctx.bio)
> > +   submit_bio(ctx.bio);
> > +   if (ctx.cur_page) {
> > +   if (!ctx.cur_page_in_bio)
> > +   unlock_page(ctx.cur_page);
> > +   put_page(ctx.cur_page);
> > +   }
> > +   WARN_ON_ONCE(ret && !list_empty(ctx.pages));
> 
> And this warning will never trigger. Was this intended behaviour?
> If it is, it needs a comment, because it looks wrong

Yes, the break should have been a goto out which jumps after the
ret.


Re: [PATCH 11/33] iomap: add an iomap-based readpage and readpages implementation

2018-05-09 Thread Dave Chinner
On Wed, May 09, 2018 at 09:48:08AM +0200, Christoph Hellwig wrote:
> Simply use iomap_apply to iterate over the file and a submit a bio for
> each non-uptodate but mapped region and zero everything else.  Note that
> as-is this can not be used for file systems with a blocksize smaller than
> the page size, but that support will be added later.
> 
> Signed-off-by: Christoph Hellwig 
.
> +int
> +iomap_readpages(struct address_space *mapping, struct list_head *pages,
> + unsigned nr_pages, const struct iomap_ops *ops)
> +{
> + struct iomap_readpage_ctx ctx = { .pages = pages };
> + loff_t pos = page_offset(list_entry(pages->prev, struct page, lru));
> + loff_t last = page_offset(list_entry(pages->next, struct page, lru));
> + loff_t length = last - pos + PAGE_SIZE, ret = 0;
> +
> + while (length > 0) {
> + ret = iomap_apply(mapping->host, pos, length, 0, ops,
> + &ctx, iomap_readpages_actor);
> + if (ret <= 0)
> + break;
> + pos += ret;
> + length -= ret;
> + }
> +
> + ret = 0;

This means the function will always return zero, regardless of
whether iomap_apply returned an error or not.

> + if (ctx.bio)
> + submit_bio(ctx.bio);
> + if (ctx.cur_page) {
> + if (!ctx.cur_page_in_bio)
> + unlock_page(ctx.cur_page);
> + put_page(ctx.cur_page);
> + }
> + WARN_ON_ONCE(ret && !list_empty(ctx.pages));

And this warning will never trigger. Was this intended behaviour?
If it is, it needs a comment, because it looks wrong

Cheers,

Dave.
-- 
Dave Chinner
da...@fromorbit.com


[PATCH 11/33] iomap: add an iomap-based readpage and readpages implementation

2018-05-09 Thread Christoph Hellwig
Simply use iomap_apply to iterate over the file and a submit a bio for
each non-uptodate but mapped region and zero everything else.  Note that
as-is this can not be used for file systems with a blocksize smaller than
the page size, but that support will be added later.

Signed-off-by: Christoph Hellwig 
---
 fs/iomap.c| 195 +-
 include/linux/iomap.h |   4 +
 2 files changed, 198 insertions(+), 1 deletion(-)

diff --git a/fs/iomap.c b/fs/iomap.c
index 049e0c4aacac..967bd31540fe 100644
--- a/fs/iomap.c
+++ b/fs/iomap.c
@@ -1,6 +1,6 @@
 /*
  * Copyright (C) 2010 Red Hat, Inc.
- * Copyright (c) 2016 Christoph Hellwig.
+ * Copyright (c) 2016-2018 Christoph Hellwig.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -18,6 +18,7 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 #include 
 #include 
@@ -102,6 +103,198 @@ iomap_sector(struct iomap *iomap, loff_t pos)
return (iomap->addr + pos - iomap->offset) >> SECTOR_SHIFT;
 }
 
+static inline bool
+iomap_block_needs_zeroing(struct inode *inode, loff_t pos, struct iomap *iomap)
+{
+   return iomap->type != IOMAP_MAPPED || pos > i_size_read(inode);
+}
+
+static void
+iomap_read_end_io(struct bio *bio)
+{
+   int error = blk_status_to_errno(bio->bi_status);
+   struct bio_vec *bvec;
+   int i;
+
+   bio_for_each_segment_all(bvec, bio, i)
+   page_endio(bvec->bv_page, false, error);
+   bio_put(bio);
+}
+
+static struct bio *
+iomap_read_bio_alloc(struct iomap *iomap, sector_t sector, loff_t length)
+{
+   int nr_vecs = (length + PAGE_SIZE - 1) >> PAGE_SHIFT;
+   struct bio *bio = bio_alloc(GFP_NOFS, min(BIO_MAX_PAGES, nr_vecs));
+
+   bio->bi_opf = REQ_OP_READ;
+   bio->bi_iter.bi_sector = sector;
+   bio_set_dev(bio, iomap->bdev);
+   bio->bi_end_io = iomap_read_end_io;
+   return bio;
+}
+
+struct iomap_readpage_ctx {
+   struct page *cur_page;
+   boolcur_page_in_bio;
+   struct bio  *bio;
+   struct list_head*pages;
+};
+
+static loff_t
+iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void 
*data,
+   struct iomap *iomap)
+{
+   struct iomap_readpage_ctx *ctx = data;
+   struct page *page = ctx->cur_page;
+   unsigned poff = pos & (PAGE_SIZE - 1);
+   unsigned plen = min_t(loff_t, PAGE_SIZE - poff, length);
+   bool is_contig = false;
+   sector_t sector;
+
+   /* we don't support blocksize < PAGE_SIZE quite yet: */
+   WARN_ON_ONCE(pos != page_offset(page));
+   WARN_ON_ONCE(plen != PAGE_SIZE);
+
+   if (iomap_block_needs_zeroing(inode, pos, iomap)) {
+   zero_user(page, poff, plen);
+   SetPageUptodate(page);
+   goto done;
+   }
+
+   ctx->cur_page_in_bio = true;
+
+   /*
+* Try to merge into a previous segment if we can.
+*/
+   sector = iomap_sector(iomap, pos);
+   if (ctx->bio && bio_end_sector(ctx->bio) == sector) {
+   if (__bio_try_merge_page(ctx->bio, page, plen, poff))
+   goto done;
+   is_contig = true;
+   }
+
+   if (!ctx->bio || !is_contig || bio_full(ctx->bio)) {
+   if (ctx->bio)
+   submit_bio(ctx->bio);
+   ctx->bio = iomap_read_bio_alloc(iomap, sector, length);
+   }
+
+   __bio_add_page(ctx->bio, page, plen, poff);
+done:
+   return plen;
+}
+
+int
+iomap_readpage(struct page *page, const struct iomap_ops *ops)
+{
+   struct iomap_readpage_ctx ctx = { .cur_page = page };
+   struct inode *inode = page->mapping->host;
+   unsigned poff;
+   loff_t ret;
+
+   WARN_ON_ONCE(page_has_buffers(page));
+
+   for (poff = 0; poff < PAGE_SIZE; poff += ret) {
+   ret = iomap_apply(inode, page_offset(page) + poff,
+   PAGE_SIZE - poff, 0, ops, &ctx,
+   iomap_readpage_actor);
+   if (ret <= 0) {
+   SetPageError(page);
+   break;
+   }
+   }
+
+   if (ctx.bio)
+   submit_bio(ctx.bio);
+   else
+   unlock_page(page);
+   return 0;
+}
+EXPORT_SYMBOL_GPL(iomap_readpage);
+
+static struct page *
+iomap_next_page(struct inode *inode, struct list_head *pages, loff_t end,
+   loff_t *done)
+{
+   while (!list_empty(pages)) {
+   struct page *page = lru_to_page(pages);
+
+   if (page_offset(page) >= end)
+   break;
+
+   list_del(&page->lru);
+   if (!add_to_page_cache_lru(page, inode->i_mapping, page->index,
+   GFP_NOFS))
+   return page;
+
+   *done += PAGE_SIZE;
+