Re: [PATCH v4] Btrfs: add support for fallocate's zero range operation

2018-01-05 Thread David Sterba
On Sat, Nov 04, 2017 at 04:07:47AM +, fdman...@kernel.org wrote:
> From: Filipe Manana 
> 
> This implements support the zero range operation of fallocate. For now
> at least it's as simple as possible while reusing most of the existing
> fallocate and hole punching infrastructure.
> 
> Signed-off-by: Filipe Manana 

FYI, I've added this patch to the rest of the 4.16 queue.
--
To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH v4] Btrfs: add support for fallocate's zero range operation

2017-11-10 Thread Nikolay Borisov


On  4.11.2017 06:07, fdman...@kernel.org wrote:
> From: Filipe Manana 
> 
> This implements support the zero range operation of fallocate. For now
> at least it's as simple as possible while reusing most of the existing
> fallocate and hole punching infrastructure.
> 
> Signed-off-by: Filipe Manana 
> ---
> 
> V2: Removed double inode unlock on error path from failure to lock range.
> V3: Factored common code to update isize and inode item into a helper
> function, plus some minor cleanup.
> V4: Removed no longer lock_inode parameter as of V3.
> 

When this gets merged into Linus' tree we'd need to update fallocate's
man page as well
--
To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v4] Btrfs: add support for fallocate's zero range operation

2017-11-07 Thread fdmanana
From: Filipe Manana 

This implements support the zero range operation of fallocate. For now
at least it's as simple as possible while reusing most of the existing
fallocate and hole punching infrastructure.

Signed-off-by: Filipe Manana 
---

V2: Removed double inode unlock on error path from failure to lock range.
V3: Factored common code to update isize and inode item into a helper
function, plus some minor cleanup.
V4: Removed no longer lock_inode parameter as of V3.

 fs/btrfs/file.c | 338 +---
 1 file changed, 276 insertions(+), 62 deletions(-)

diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index aafcc785f840..ea2e863eb540 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -2448,6 +2448,46 @@ static int find_first_non_hole(struct inode *inode, u64 
*start, u64 *len)
return ret;
 }
 
+static int btrfs_punch_hole_lock_range(struct inode *inode,
+  const u64 lockstart,
+  const u64 lockend,
+  struct extent_state **cached_state)
+{
+   while (1) {
+   struct btrfs_ordered_extent *ordered;
+   int ret;
+
+   truncate_pagecache_range(inode, lockstart, lockend);
+
+   lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
+cached_state);
+   ordered = btrfs_lookup_first_ordered_extent(inode, lockend);
+
+   /*
+* We need to make sure we have no ordered extents in this range
+* and nobody raced in and read a page in this range, if we did
+* we need to try again.
+*/
+   if ((!ordered ||
+   (ordered->file_offset + ordered->len <= lockstart ||
+ordered->file_offset > lockend)) &&
+!btrfs_page_exists_in_range(inode, lockstart, lockend)) {
+   if (ordered)
+   btrfs_put_ordered_extent(ordered);
+   break;
+   }
+   if (ordered)
+   btrfs_put_ordered_extent(ordered);
+   unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart,
+lockend, cached_state, GFP_NOFS);
+   ret = btrfs_wait_ordered_range(inode, lockstart,
+  lockend - lockstart + 1);
+   if (ret)
+   return ret;
+   }
+   return 0;
+}
+
 static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
 {
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
@@ -2564,38 +2604,11 @@ static int btrfs_punch_hole(struct inode *inode, loff_t 
offset, loff_t len)
goto out_only_mutex;
}
 
-   while (1) {
-   struct btrfs_ordered_extent *ordered;
-
-   truncate_pagecache_range(inode, lockstart, lockend);
-
-   lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
-&cached_state);
-   ordered = btrfs_lookup_first_ordered_extent(inode, lockend);
-
-   /*
-* We need to make sure we have no ordered extents in this range
-* and nobody raced in and read a page in this range, if we did
-* we need to try again.
-*/
-   if ((!ordered ||
-   (ordered->file_offset + ordered->len <= lockstart ||
-ordered->file_offset > lockend)) &&
-!btrfs_page_exists_in_range(inode, lockstart, lockend)) {
-   if (ordered)
-   btrfs_put_ordered_extent(ordered);
-   break;
-   }
-   if (ordered)
-   btrfs_put_ordered_extent(ordered);
-   unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart,
-lockend, &cached_state, GFP_NOFS);
-   ret = btrfs_wait_ordered_range(inode, lockstart,
-  lockend - lockstart + 1);
-   if (ret) {
-   inode_unlock(inode);
-   return ret;
-   }
+   ret = btrfs_punch_hole_lock_range(inode, lockstart, lockend,
+ &cached_state);
+   if (ret) {
+   inode_unlock(inode);
+   goto out_only_mutex;
}
 
path = btrfs_alloc_path();
@@ -2804,6 +2817,217 @@ static int add_falloc_range(struct list_head *head, u64 
start, u64 len)
return 0;
 }
 
+static int btrfs_fallocate_update_isize(struct inode *inode,
+   const u64 end,
+   const int mode)
+{
+   struct btrfs_trans_handle *trans;
+   s