With the traditional page-based writes, blocks are allocated separately
for each page written to.  With iomap writes, we can allocate a lot more
blocks at once, with a fraction of the allocation overhead for each
page.

Split calculating the number of blocks that can be allocated at a given
position (gfs2_alloc_size) off from gfs2_iomap_alloc: that size
determines the number of blocks to allocate and reserve in the journal.

In gfs2_iomap_alloc, set the type of newly allocated extents to
IOMAP_UNWRITTEN so that iomap_to_bh will set the buffer states
correctly: otherwise, if the type is left as IOMAP_HOLE, the buffers are
not marked as Mapped, which would confuse __mpage_writepage.  For
unwritten mappings, iomap_to_bh will set the Unwritten buffer flag as
well, but that's okay.  In fallocate_chunk, we need to check for
IOMAP_UNWRITTEN instead of IOMAP_HOLE now.

Signed-off-by: Andreas Gruenbacher <agrue...@redhat.com>
---
 fs/gfs2/aops.c |  20 +--
 fs/gfs2/aops.h |  22 ++++
 fs/gfs2/bmap.c | 345 ++++++++++++++++++++++++++++++++++++++++++++-----
 fs/gfs2/file.c |  46 ++++++-
 4 files changed, 387 insertions(+), 46 deletions(-)
 create mode 100644 fs/gfs2/aops.h

diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index a1c6b5de9200..3d9633175aa8 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -22,6 +22,7 @@
 #include <linux/backing-dev.h>
 #include <linux/uio.h>
 #include <trace/events/writeback.h>
+#include <linux/sched/signal.h>
 
 #include "gfs2.h"
 #include "incore.h"
@@ -36,10 +37,11 @@
 #include "super.h"
 #include "util.h"
 #include "glops.h"
+#include "aops.h"
 
 
-static void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page,
-                                  unsigned int from, unsigned int len)
+void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page,
+                           unsigned int from, unsigned int len)
 {
        struct buffer_head *head = page_buffers(page);
        unsigned int bsize = head->b_size;
@@ -462,7 +464,7 @@ static int gfs2_jdata_writepages(struct address_space 
*mapping,
  * Returns: errno
  */
 
-static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
+int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
 {
        struct buffer_head *dibh;
        u64 dsize = i_size_read(&ip->i_inode);
@@ -773,7 +775,7 @@ static int gfs2_write_begin(struct file *file, struct 
address_space *mapping,
  * adjust_fs_space - Adjusts the free space available due to gfs2_grow
  * @inode: the rindex inode
  */
-static void adjust_fs_space(struct inode *inode)
+void adjust_fs_space(struct inode *inode)
 {
        struct gfs2_sbd *sdp = inode->i_sb->s_fs_info;
        struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
@@ -819,11 +821,11 @@ static void adjust_fs_space(struct inode *inode)
  * This copies the data from the page into the inode block after
  * the inode data structure itself.
  *
- * Returns: errno
+ * Returns: copied bytes or errno
  */
-static int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head 
*dibh,
-                                 loff_t pos, unsigned copied,
-                                 struct page *page)
+int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh,
+                          loff_t pos, unsigned copied,
+                          struct page *page)
 {
        struct gfs2_inode *ip = GFS2_I(inode);
        u64 to = pos + copied;
@@ -862,7 +864,7 @@ static int gfs2_stuffed_write_end(struct inode *inode, 
struct buffer_head *dibh,
  * The main write_end function for GFS2. We just put our locking around the VFS
  * provided functions.
  *
- * Returns: errno
+ * Returns: copied bytes or errno
  */
 
 static int gfs2_write_end(struct file *file, struct address_space *mapping,
diff --git a/fs/gfs2/aops.h b/fs/gfs2/aops.h
new file mode 100644
index 000000000000..976bb32dd405
--- /dev/null
+++ b/fs/gfs2/aops.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2017 Red Hat, Inc.  All rights reserved.
+ *
+ * This copyrighted material is made available to anyone wishing to use,
+ * modify, copy, or redistribute it subject to the terms and conditions
+ * of the GNU General Public License version 2.
+ */
+
+#ifndef __AOPS_DOT_H__
+#define __AOPS_DOT_H__
+
+#include "incore.h"
+
+extern int stuffed_readpage(struct gfs2_inode *ip, struct page *page);
+extern int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head 
*dibh,
+                                 loff_t pos, unsigned copied,
+                                 struct page *page);
+extern void adjust_fs_space(struct inode *inode);
+extern void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page,
+                                  unsigned int from, unsigned int len);
+
+#endif /* __AOPS_DOT_H__ */
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index 725a2736681b..a549d3493f66 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -28,6 +28,7 @@
 #include "trans.h"
 #include "dir.h"
 #include "util.h"
+#include "aops.h"
 #include "trace_gfs2.h"
 
 /* This doesn't need to be that large as max 64 bit pointers in a 4k
@@ -41,6 +42,8 @@ struct metapath {
        int mp_aheight; /* actual height (lookup height) */
 };
 
+static int punch_hole(struct gfs2_inode *ip, u64 offset, u64 length);
+
 /**
  * gfs2_unstuffer_page - unstuff a stuffed inode into a block cached by a page
  * @ip: the inode
@@ -389,7 +392,7 @@ static int fillup_metapath(struct gfs2_inode *ip, struct 
metapath *mp, int h)
        return mp->mp_aheight - x - 1;
 }
 
-static inline void release_metapath(struct metapath *mp)
+static void release_metapath(struct metapath *mp)
 {
        int i;
 
@@ -397,6 +400,7 @@ static inline void release_metapath(struct metapath *mp)
                if (mp->mp_bh[i] == NULL)
                        break;
                brelse(mp->mp_bh[i]);
+               mp->mp_bh[i] = NULL;
        }
 }
 
@@ -473,11 +477,13 @@ enum alloc_state {
  *  ii) Indirect blocks to fill in lower part of the metadata tree
  * iii) Data blocks
  *
- * The function is in two parts. The first part works out the total
- * number of blocks which we need. The second part does the actual
- * allocation asking for an extent at a time (if enough contiguous free
- * blocks are available, there will only be one request per bmap call)
- * and uses the state machine to initialise the blocks in order.
+ * This function is called after gfs2_iomap_get, which works out the
+ * total number of blocks which we need via gfs2_alloc_size.
+ *
+ * We then do the actual allocation asking for an extent at a time (if
+ * enough contiguous free blocks are available, there will only be one
+ * allocation request per call) and uses the state machine to initialise
+ * the blocks in order.
  *
  * Right now, this function will allocate at most one indirect block
  * worth of data -- with a default block size of 4K, that's slightly
@@ -497,39 +503,26 @@ static int gfs2_iomap_alloc(struct inode *inode, struct 
iomap *iomap,
        struct buffer_head *dibh = mp->mp_bh[0];
        u64 bn;
        unsigned n, i, blks, alloced = 0, iblks = 0, branch_start = 0;
-       unsigned dblks = 0;
-       unsigned ptrs_per_blk;
+       size_t dblks = iomap->length >> inode->i_blkbits;
        const unsigned end_of_metadata = mp->mp_fheight - 1;
        int ret;
        enum alloc_state state;
        __be64 *ptr;
        __be64 zero_bn = 0;
-       size_t maxlen = iomap->length >> inode->i_blkbits;
 
        BUG_ON(mp->mp_aheight < 1);
        BUG_ON(dibh == NULL);
+       BUG_ON(dblks < 1);
 
        gfs2_trans_add_meta(ip->i_gl, dibh);
 
        down_write(&ip->i_rw_mutex);
 
        if (mp->mp_fheight == mp->mp_aheight) {
-               struct buffer_head *bh;
-               int eob;
-
-               /* Bottom indirect block exists, find unalloced extent size */
-               ptr = metapointer(end_of_metadata, mp);
-               bh = mp->mp_bh[end_of_metadata];
-               dblks = gfs2_extent_length(bh->b_data, bh->b_size, ptr,
-                                          maxlen, &eob);
-               BUG_ON(dblks < 1);
+               /* Bottom indirect block exists */
                state = ALLOC_DATA;
        } else {
                /* Need to allocate indirect blocks */
-               ptrs_per_blk = mp->mp_fheight > 1 ? sdp->sd_inptrs :
-                       sdp->sd_diptrs;
-               dblks = min(maxlen, (size_t)(ptrs_per_blk -
-                                            mp->mp_list[end_of_metadata]));
                if (mp->mp_fheight == ip->i_height) {
                        /* Writing into existing tree, extend tree down */
                        iblks = mp->mp_fheight - mp->mp_aheight;
@@ -614,6 +607,7 @@ static int gfs2_iomap_alloc(struct inode *inode, struct 
iomap *iomap,
                }
        } while (iomap->addr == IOMAP_NULL_ADDR);
 
+       iomap->type = IOMAP_UNWRITTEN;
        iomap->length = (u64)dblks << inode->i_blkbits;
        ip->i_height = mp->mp_fheight;
        gfs2_add_inode_blocks(&ip->i_inode, alloced);
@@ -759,14 +753,59 @@ static int gfs2_hole_size(struct inode *inode, sector_t 
lblock, u64 len,
        return ret;
 }
 
-static void gfs2_stuffed_iomap(struct inode *inode, struct iomap *iomap)
+/**
+ * gfs2_alloc_size - Compute the maximum allocation size
+ * @inode: The inode
+ * @mp: The metapath
+ * @size: Requested size in blocks
+ *
+ * Compute the maximum size of the next allocation at @mp.
+ *
+ * Returns: size in blocks
+ */
+static u64 gfs2_alloc_size(struct inode *inode, struct metapath *mp, u64 size)
+{
+       struct gfs2_inode *ip = GFS2_I(inode);
+       struct gfs2_sbd *sdp = GFS2_SB(inode);
+       const __be64 *first, *ptr, *end;
+
+       /*
+        * For writes to stuffed files, this function is called twice via
+        * gfs2_iomap_get, before and after unstuffing. The size we return the
+        * first time needs to be large enough to get the reservation and
+        * allocation sizes right.  The size we return the second time must
+        * be exact or else gfs2_iomap_alloc won't do the right thing.
+        */
+
+       if (gfs2_is_stuffed(ip) || mp->mp_fheight != mp->mp_aheight) {
+               unsigned maxsize = mp->mp_fheight > 1 ?
+                       sdp->sd_inptrs : sdp->sd_diptrs;
+               maxsize -= mp->mp_list[mp->mp_fheight - 1];
+               if (size > maxsize)
+                       size = maxsize;
+               return size;
+       }
+
+       first = metapointer(ip->i_height - 1, mp);
+       end = metaend(ip->i_height - 1, mp);
+       if (end - first > size)
+               end = first + size;
+       for (ptr = first; ptr < end; ptr++) {
+               if (*ptr)
+                       break;
+       }
+       return ptr - first;
+}
+
+static void gfs2_stuffed_iomap(struct inode *inode, struct iomap *iomap,
+                              u64 length)
 {
        struct gfs2_inode *ip = GFS2_I(inode);
 
        iomap->addr = (ip->i_no_addr << inode->i_blkbits) +
                      sizeof(struct gfs2_dinode);
        iomap->offset = 0;
-       iomap->length = i_size_read(inode);
+       iomap->length = length;
        iomap->type = IOMAP_MAPPED;
        iomap->flags = IOMAP_F_DATA_INLINE;
 }
@@ -804,10 +843,15 @@ static int gfs2_iomap_get(struct inode *inode, loff_t 
pos, loff_t length,
                if (flags & IOMAP_REPORT) {
                        if (pos >= i_size_read(inode))
                                return -ENOENT;
-                       gfs2_stuffed_iomap(inode, iomap);
+                       gfs2_stuffed_iomap(inode, iomap,
+                                          i_size_read(inode));
+                       return 0;
+               }
+               if (pos + length <= gfs2_max_stuffed_size(ip)) {
+                       gfs2_stuffed_iomap(inode, iomap,
+                                          gfs2_max_stuffed_size(ip));
                        return 0;
                }
-               BUG_ON(!(flags & IOMAP_WRITE));
        }
        lblock = pos >> inode->i_blkbits;
        iomap->offset = lblock << inode->i_blkbits;
@@ -867,10 +911,147 @@ static int gfs2_iomap_get(struct inode *inode, loff_t 
pos, loff_t length,
                        ret = gfs2_hole_size(inode, lblock, len, mp, iomap);
                else
                        iomap->length = size - pos;
+       } else if (flags & IOMAP_WRITE) {
+               u64 size;
+               size = gfs2_alloc_size(inode, mp, len) << inode->i_blkbits;
+               if (size < iomap->length)
+                       iomap->length = size;
+       } else {
+               if (height == ip->i_height)
+                       ret = gfs2_hole_size(inode, lblock, len, mp, iomap);
        }
        goto out;
 }
 
+static int gfs2_write_lock(struct inode *inode)
+{
+       struct gfs2_inode *ip = GFS2_I(inode);
+       struct gfs2_sbd *sdp = GFS2_SB(inode);
+       int error;
+
+       gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh);
+       error = gfs2_glock_nq(&ip->i_gh);
+       if (error)
+               goto out_uninit;
+       if (&ip->i_inode == sdp->sd_rindex) {
+               struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
+
+               error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE,
+                                          GL_NOCACHE, &m_ip->i_gh);
+               if (error)
+                       goto out_unlock;
+       }
+       return 0;
+
+out_unlock:
+       gfs2_glock_dq(&ip->i_gh);
+out_uninit:
+       gfs2_holder_uninit(&ip->i_gh);
+       return error;
+}
+
+static void gfs2_write_unlock(struct inode *inode)
+{
+       struct gfs2_inode *ip = GFS2_I(inode);
+       struct gfs2_sbd *sdp = GFS2_SB(inode);
+
+       if (&ip->i_inode == sdp->sd_rindex) {
+               struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
+
+               gfs2_glock_dq_uninit(&m_ip->i_gh);
+       }
+       gfs2_glock_dq_uninit(&ip->i_gh);
+}
+
+static int gfs2_iomap_begin_write(struct inode *inode, loff_t pos, loff_t 
length,
+                                 unsigned flags, struct iomap *iomap)
+{
+       struct metapath mp = { .mp_aheight = 1, };
+       struct gfs2_inode *ip = GFS2_I(inode);
+       struct gfs2_sbd *sdp = GFS2_SB(inode);
+       unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
+       bool unstuff, alloc_required;
+       int ret;
+
+       ret = gfs2_write_lock(inode);
+       if (ret)
+               return ret;
+
+       unstuff = gfs2_is_stuffed(ip) &&
+                 pos + length > gfs2_max_stuffed_size(ip);
+
+       ret = gfs2_iomap_get(inode, pos, length, flags, iomap, &mp);
+       if (ret)
+               goto out_release;
+
+       alloc_required = unstuff || iomap->type != IOMAP_MAPPED;
+
+       if (alloc_required || gfs2_is_jdata(ip))
+               gfs2_write_calc_reserv(ip, iomap->length, &data_blocks, 
&ind_blocks);
+
+       if (alloc_required) {
+               struct gfs2_alloc_parms ap = { .target = data_blocks + 
ind_blocks };
+               ret = gfs2_quota_lock_check(ip, &ap);
+               if (ret)
+                       goto out_release;
+
+               ret = gfs2_inplace_reserve(ip, &ap);
+               if (ret)
+                       goto out_qunlock;
+       }
+
+       rblocks = RES_DINODE + ind_blocks;
+       if (gfs2_is_jdata(ip))
+               rblocks += data_blocks;
+       if (ind_blocks || data_blocks)
+               rblocks += RES_STATFS + RES_QUOTA;
+       if (inode == sdp->sd_rindex)
+               rblocks += 2 * RES_STATFS;
+       if (alloc_required)
+               rblocks += gfs2_rg_blocks(ip, data_blocks + ind_blocks);
+
+       ret = gfs2_trans_begin(sdp, rblocks, iomap->length >> inode->i_blkbits);
+       if (ret)
+               goto out_trans_fail;
+
+       if (unstuff) {
+               ret = gfs2_unstuff_dinode(ip, NULL);
+               if (ret)
+                       goto out_trans_end;
+               release_metapath(&mp);
+               ret = gfs2_iomap_get(inode, iomap->offset, iomap->length,
+                                    flags, iomap, &mp);
+               if (ret)
+                       goto out_trans_end;
+       }
+
+       if (iomap->type != IOMAP_MAPPED) {
+               ret = gfs2_iomap_alloc(inode, iomap, flags, &mp);
+               if (ret) {
+                       gfs2_trans_end(sdp);
+                       if (alloc_required)
+                               gfs2_inplace_release(ip);
+                       punch_hole(ip, iomap->offset, iomap->length);
+                       goto out_qunlock;
+               }
+       }
+       release_metapath(&mp);
+       return 0;
+
+out_trans_end:
+       gfs2_trans_end(sdp);
+out_trans_fail:
+       if (alloc_required)
+               gfs2_inplace_release(ip);
+out_qunlock:
+       if (alloc_required)
+               gfs2_quota_unlock(ip);
+out_release:
+       release_metapath(&mp);
+       gfs2_write_unlock(inode);
+       return ret;
+}
+
 int gfs2_iomap_begin(struct inode *inode, loff_t pos, loff_t length,
                     unsigned flags, struct iomap *iomap)
 {
@@ -880,10 +1061,7 @@ int gfs2_iomap_begin(struct inode *inode, loff_t pos, 
loff_t length,
 
        trace_gfs2_iomap_start(ip, pos, length, flags);
        if (flags & IOMAP_WRITE) {
-               ret = gfs2_iomap_get(inode, pos, length, flags, iomap, &mp);
-               if (!ret && iomap->type == IOMAP_HOLE)
-                       ret = gfs2_iomap_alloc(inode, iomap, flags, &mp);
-               release_metapath(&mp);
+               ret = gfs2_iomap_begin_write(inode, pos, length, flags, iomap);
        } else {
                ret = gfs2_iomap_get(inode, pos, length, flags, iomap, &mp);
                release_metapath(&mp);
@@ -892,8 +1070,115 @@ int gfs2_iomap_begin(struct inode *inode, loff_t pos, 
loff_t length,
        return ret;
 }
 
+static int
+gfs2_iomap_write_begin(struct inode *inode, loff_t pos, unsigned len,
+                      unsigned flags, struct page **pagep, struct iomap *iomap)
+{
+       struct gfs2_inode *ip = GFS2_I(inode);
+       struct page *page;
+       int ret;
+
+       if (gfs2_is_stuffed(ip)) {
+               BUG_ON(pos + len > gfs2_max_stuffed_size(ip));
+
+               page = grab_cache_page_write_begin(inode->i_mapping, 0, flags);
+               if (!page)
+                       return -ENOMEM;
+
+               if (!PageUptodate(page)) {
+                       ret = stuffed_readpage(ip, page);
+                       if (ret) {
+                               unlock_page(page);
+                               put_page(page);
+                               return ret;
+                       }
+               }
+               *pagep = page;
+               return 0;
+       }
+
+       return iomap_write_begin(inode, pos, len, flags, pagep, iomap);
+}
+
+static int gfs2_iomap_write_end(struct inode *inode, loff_t pos, unsigned len,
+                               unsigned copied, struct page *page)
+{
+       struct gfs2_inode *ip = GFS2_I(inode);
+       struct buffer_head *dibh;
+       int ret;
+
+       if (gfs2_is_stuffed(ip)) {
+               ret = gfs2_meta_inode_buffer(ip, &dibh);
+               if (ret) {
+                       unlock_page(page);
+                       put_page(page);
+                       return ret;
+               }
+               ret = gfs2_stuffed_write_end(inode, dibh, pos, copied, page);
+               brelse(dibh);
+               return ret;
+       }
+
+       if (gfs2_is_jdata(ip))
+               gfs2_page_add_databufs(ip, page, offset_in_page(pos), len);
+
+       return iomap_write_end(inode, pos, len, copied, page);
+}
+
+static int gfs2_iomap_end(struct inode *inode, loff_t pos, loff_t length,
+                         ssize_t written, unsigned flags, struct iomap *iomap)
+{
+       struct gfs2_inode *ip = GFS2_I(inode);
+       struct gfs2_sbd *sdp = GFS2_SB(inode);
+       struct gfs2_trans *tr = current->journal_info;
+
+       if (!(flags & IOMAP_WRITE))
+               return 0;
+
+       gfs2_ordered_add_inode(ip);
+
+       if (tr->tr_num_buf_new)
+               __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
+       else {
+               struct buffer_head *dibh;
+               int ret;
+
+               ret = gfs2_meta_inode_buffer(ip, &dibh);
+               if (unlikely(ret))
+                       return ret;
+               gfs2_trans_add_meta(ip->i_gl, dibh);
+               brelse(dibh);
+       }
+
+       if (inode == sdp->sd_rindex) {
+               adjust_fs_space(inode);
+               sdp->sd_rindex_uptodate = 0;
+       }
+
+       gfs2_trans_end(sdp);
+       gfs2_inplace_release(ip);
+
+       if (length != written && (iomap->flags & IOMAP_F_NEW)) {
+               /* Deallocate blocks that were just allocated. */
+               loff_t end = round_down(pos + length, PAGE_SIZE);
+               pos = round_up(pos, PAGE_SIZE);
+               if (pos < end) {
+                       truncate_pagecache_range(inode, pos, end - 1);
+                       punch_hole(ip, pos, end - pos);
+               }
+       }
+
+       if (ip->i_qadata && ip->i_qadata->qa_qd_num)
+               gfs2_quota_unlock(ip);
+       gfs2_write_unlock(inode);
+       return 0;
+}
+
 const struct iomap_ops gfs2_iomap_ops = {
        .iomap_begin = gfs2_iomap_begin,
+       .write_begin = gfs2_iomap_write_begin,
+       .write_end = gfs2_iomap_write_end,
+       .iomap_end = gfs2_iomap_end,
 };
 
 /**
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index 1a168e4eac97..c2467988f96f 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -26,10 +26,12 @@
 #include <linux/dlm.h>
 #include <linux/dlm_plock.h>
 #include <linux/delay.h>
+#include <linux/backing-dev.h>
 
 #include "gfs2.h"
 #include "incore.h"
 #include "bmap.h"
+#include "aops.h"
 #include "dir.h"
 #include "glock.h"
 #include "glops.h"
@@ -691,9 +693,7 @@ static int gfs2_fsync(struct file *file, loff_t start, 
loff_t end,
 /**
  * gfs2_file_write_iter - Perform a write to a file
  * @iocb: The io context
- * @iov: The data to write
- * @nr_segs: Number of @iov segments
- * @pos: The file position
+ * @from: The data to write
  *
  * We have to do a lock/unlock here to refresh the inode size for
  * O_APPEND writes, otherwise we can land up writing at the wrong
@@ -705,8 +705,9 @@ static int gfs2_fsync(struct file *file, loff_t start, 
loff_t end,
 static ssize_t gfs2_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
 {
        struct file *file = iocb->ki_filp;
-       struct gfs2_inode *ip = GFS2_I(file_inode(file));
-       int ret;
+       struct inode *inode = file_inode(file);
+       struct gfs2_inode *ip = GFS2_I(inode);
+       ssize_t ret;
 
        ret = gfs2_rsqa_alloc(ip);
        if (ret)
@@ -723,7 +724,38 @@ static ssize_t gfs2_file_write_iter(struct kiocb *iocb, 
struct iov_iter *from)
                gfs2_glock_dq_uninit(&gh);
        }
 
-       return generic_file_write_iter(iocb, from);
+       if (iocb->ki_flags & IOCB_DIRECT)
+               return generic_file_write_iter(iocb, from);
+
+       inode_lock(inode);
+       ret = generic_write_checks(iocb, from);
+       if (ret <= 0)
+               goto out;
+
+       /* We can write back this queue in page reclaim */
+       current->backing_dev_info = inode_to_bdi(inode);
+
+       ret = file_remove_privs(file);
+       if (ret)
+               goto out2;
+
+       ret = file_update_time(file);
+       if (ret)
+               goto out2;
+
+       ret = iomap_file_buffered_write(iocb, from, &gfs2_iomap_ops);
+
+out2:
+       current->backing_dev_info = NULL;
+out:
+       inode_unlock(inode);
+       if (likely(ret > 0)) {
+               iocb->ki_pos += ret;
+
+               /* Handle various SYNC-type writes */
+               ret = generic_write_sync(iocb, ret);
+       }
+       return ret;
 }
 
 static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len,
@@ -754,7 +786,7 @@ static int fallocate_chunk(struct inode *inode, loff_t 
offset, loff_t len,
                if (error)
                        goto out;
                offset = iomap.offset + iomap.length;
-               if (iomap.type != IOMAP_HOLE)
+               if (iomap.type != IOMAP_UNWRITTEN)
                        continue;
                error = sb_issue_zeroout(sb, iomap.addr >> inode->i_blkbits,
                                         iomap.length >> inode->i_blkbits,
-- 
2.17.0

Reply via email to