Don't bother zeroing the page if it's already a hole under there.
We can save one allocation from this.

Signed-off-by: Li Dongyang <jerry87...@gmail.com>
---
 fs/btrfs/inode.c | 17 +++++++++++++++++
 1 file changed, 17 insertions(+)

diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 85a1e50..017052e 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -3481,6 +3481,7 @@ int btrfs_truncate_page(struct inode *inode, loff_t from, 
loff_t len,
        struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
        struct btrfs_ordered_extent *ordered;
        struct extent_state *cached_state = NULL;
+       struct extent_map *em = NULL;
        char *kaddr;
        u32 blocksize = root->sectorsize;
        pgoff_t index = from >> PAGE_CACHE_SHIFT;
@@ -3538,6 +3539,20 @@ again:
                goto again;
        }
 
+       em = btrfs_get_extent_fiemap(inode, NULL, 0, page_start,
+                                    PAGE_CACHE_SIZE, 0);
+       if (!IS_ERR_OR_NULL(em) && em->block_start == EXTENT_MAP_HOLE) {
+               u64 em_end = extent_map_end(em);
+               if (em->start <= page_start &&
+                   em_end >= page_start + PAGE_CACHE_SIZE) {
+                       btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
+                       unlock_extent_cached(io_tree, page_start, page_end,
+                                            &cached_state, GFP_NOFS);
+                       ret = 0;
+                       goto out_unlock;
+               }
+       }
+
        clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
                          EXTENT_DIRTY | EXTENT_DELALLOC |
                          EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
@@ -3574,6 +3589,8 @@ out_unlock:
        unlock_page(page);
        page_cache_release(page);
 out:
+       if (em)
+               free_extent_map(em);
        return ret;
 }
 
-- 
1.7.12.3

--
To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to