[PATCH 3/4] btrfs: Introduce extent_read_full_page_nolock()

2013-08-06 Thread Mark Fasheh
We want this for btrfs_extent_same. Basically readpage and friends do their
own extent locking but for the purposes of dedupe, we want to have both
files locked down across a set of readpage operations (so that we can
compare data). Introduce this variant and a flag which can be set for
extent_read_full_page() to indicate that we are already locked.

Partial credit for this patch goes to Gabriel de Perthuis g2p.c...@gmail.com
as I have included a fix from him to the original patch which avoids a
deadlock on compressed extents.

Signed-off-by: Mark Fasheh mfas...@suse.de
---
 fs/btrfs/compression.c |  6 +-
 fs/btrfs/extent_io.c   | 41 +++--
 fs/btrfs/extent_io.h   |  3 +++
 3 files changed, 39 insertions(+), 11 deletions(-)

diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 15b9408..05819c3 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -636,7 +636,11 @@ int btrfs_submit_compressed_read(struct inode *inode, 
struct bio *bio,
faili = nr_pages - 1;
cb-nr_pages = nr_pages;
 
-   add_ra_bio_pages(inode, em_start + em_len, cb);
+   /* In the parent-locked case, we only locked the range we are
+* interested in.  In all other cases, we can opportunistically
+* cache decompressed data that goes beyond the requested range. */
+   if (!(bio_flags  EXTENT_BIO_PARENT_LOCKED))
+   add_ra_bio_pages(inode, em_start + em_len, cb);
 
/* include any pages we added in add_ra-bio_pages */
uncompressed_len = bio-bi_vcnt * PAGE_CACHE_SIZE;
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index cdee391..80ce106 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -2643,11 +2643,12 @@ static int __extent_read_full_page(struct 
extent_io_tree *tree,
struct btrfs_ordered_extent *ordered;
int ret;
int nr = 0;
+   int parent_locked = *bio_flags  EXTENT_BIO_PARENT_LOCKED;
size_t pg_offset = 0;
size_t iosize;
size_t disk_io_size;
size_t blocksize = inode-i_sb-s_blocksize;
-   unsigned long this_bio_flag = 0;
+   unsigned long this_bio_flag = *bio_flags  EXTENT_BIO_PARENT_LOCKED;
 
set_page_extent_mapped(page);
 
@@ -2659,7 +2660,7 @@ static int __extent_read_full_page(struct extent_io_tree 
*tree,
}
 
end = page_end;
-   while (1) {
+   while (!parent_locked) {
lock_extent(tree, start, end);
ordered = btrfs_lookup_ordered_extent(inode, start);
if (!ordered)
@@ -2695,15 +2696,18 @@ static int __extent_read_full_page(struct 
extent_io_tree *tree,
kunmap_atomic(userpage);
set_extent_uptodate(tree, cur, cur + iosize - 1,
cached, GFP_NOFS);
-   unlock_extent_cached(tree, cur, cur + iosize - 1,
-cached, GFP_NOFS);
+   if (!parent_locked)
+   unlock_extent_cached(tree, cur,
+cur + iosize - 1,
+cached, GFP_NOFS);
break;
}
em = get_extent(inode, page, pg_offset, cur,
end - cur + 1, 0);
if (IS_ERR_OR_NULL(em)) {
SetPageError(page);
-   unlock_extent(tree, cur, end);
+   if (!parent_locked)
+   unlock_extent(tree, cur, end);
break;
}
extent_offset = cur - em-start;
@@ -2711,7 +2715,7 @@ static int __extent_read_full_page(struct extent_io_tree 
*tree,
BUG_ON(end  cur);
 
if (test_bit(EXTENT_FLAG_COMPRESSED, em-flags)) {
-   this_bio_flag = EXTENT_BIO_COMPRESSED;
+   this_bio_flag |= EXTENT_BIO_COMPRESSED;
extent_set_compress_type(this_bio_flag,
 em-compress_type);
}
@@ -2755,7 +2759,8 @@ static int __extent_read_full_page(struct extent_io_tree 
*tree,
if (test_range_bit(tree, cur, cur_end,
   EXTENT_UPTODATE, 1, NULL)) {
check_page_uptodate(tree, page);
-   unlock_extent(tree, cur, cur + iosize - 1);
+   if (!parent_locked)
+   unlock_extent(tree, cur, cur + iosize - 1);
cur = cur + iosize;
pg_offset += iosize;
continue;
@@ -2765,7 +2770,8 @@ static int __extent_read_full_page(struct extent_io_tree 
*tree,
 */
if (block_start == EXTENT_MAP_INLINE) {

[PATCH 3/4] btrfs: Introduce extent_read_full_page_nolock()

2013-07-26 Thread Mark Fasheh
We want this for btrfs_extent_same. Basically readpage and friends do their
own extent locking but for the purposes of dedupe, we want to have both
files locked down across a set of readpage operations (so that we can
compare data). Introduce this variant and a flag which can be set for
extent_read_full_page() to indicate that we are already locked.

Partial credit for this patch goes to Gabriel de Perthuis g2p.c...@gmail.com
as I have included a fix from him to the original patch which avoids a
deadlock on compressed extents.

Signed-off-by: Mark Fasheh mfas...@suse.de
---
 fs/btrfs/compression.c |  6 +-
 fs/btrfs/extent_io.c   | 41 +++--
 fs/btrfs/extent_io.h   |  3 +++
 3 files changed, 39 insertions(+), 11 deletions(-)

diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 15b9408..05819c3 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -636,7 +636,11 @@ int btrfs_submit_compressed_read(struct inode *inode, 
struct bio *bio,
faili = nr_pages - 1;
cb-nr_pages = nr_pages;
 
-   add_ra_bio_pages(inode, em_start + em_len, cb);
+   /* In the parent-locked case, we only locked the range we are
+* interested in.  In all other cases, we can opportunistically
+* cache decompressed data that goes beyond the requested range. */
+   if (!(bio_flags  EXTENT_BIO_PARENT_LOCKED))
+   add_ra_bio_pages(inode, em_start + em_len, cb);
 
/* include any pages we added in add_ra-bio_pages */
uncompressed_len = bio-bi_vcnt * PAGE_CACHE_SIZE;
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index cdee391..80ce106 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -2643,11 +2643,12 @@ static int __extent_read_full_page(struct 
extent_io_tree *tree,
struct btrfs_ordered_extent *ordered;
int ret;
int nr = 0;
+   int parent_locked = *bio_flags  EXTENT_BIO_PARENT_LOCKED;
size_t pg_offset = 0;
size_t iosize;
size_t disk_io_size;
size_t blocksize = inode-i_sb-s_blocksize;
-   unsigned long this_bio_flag = 0;
+   unsigned long this_bio_flag = *bio_flags  EXTENT_BIO_PARENT_LOCKED;
 
set_page_extent_mapped(page);
 
@@ -2659,7 +2660,7 @@ static int __extent_read_full_page(struct extent_io_tree 
*tree,
}
 
end = page_end;
-   while (1) {
+   while (!parent_locked) {
lock_extent(tree, start, end);
ordered = btrfs_lookup_ordered_extent(inode, start);
if (!ordered)
@@ -2695,15 +2696,18 @@ static int __extent_read_full_page(struct 
extent_io_tree *tree,
kunmap_atomic(userpage);
set_extent_uptodate(tree, cur, cur + iosize - 1,
cached, GFP_NOFS);
-   unlock_extent_cached(tree, cur, cur + iosize - 1,
-cached, GFP_NOFS);
+   if (!parent_locked)
+   unlock_extent_cached(tree, cur,
+cur + iosize - 1,
+cached, GFP_NOFS);
break;
}
em = get_extent(inode, page, pg_offset, cur,
end - cur + 1, 0);
if (IS_ERR_OR_NULL(em)) {
SetPageError(page);
-   unlock_extent(tree, cur, end);
+   if (!parent_locked)
+   unlock_extent(tree, cur, end);
break;
}
extent_offset = cur - em-start;
@@ -2711,7 +2715,7 @@ static int __extent_read_full_page(struct extent_io_tree 
*tree,
BUG_ON(end  cur);
 
if (test_bit(EXTENT_FLAG_COMPRESSED, em-flags)) {
-   this_bio_flag = EXTENT_BIO_COMPRESSED;
+   this_bio_flag |= EXTENT_BIO_COMPRESSED;
extent_set_compress_type(this_bio_flag,
 em-compress_type);
}
@@ -2755,7 +2759,8 @@ static int __extent_read_full_page(struct extent_io_tree 
*tree,
if (test_range_bit(tree, cur, cur_end,
   EXTENT_UPTODATE, 1, NULL)) {
check_page_uptodate(tree, page);
-   unlock_extent(tree, cur, cur + iosize - 1);
+   if (!parent_locked)
+   unlock_extent(tree, cur, cur + iosize - 1);
cur = cur + iosize;
pg_offset += iosize;
continue;
@@ -2765,7 +2770,8 @@ static int __extent_read_full_page(struct extent_io_tree 
*tree,
 */
if (block_start == EXTENT_MAP_INLINE) {

[PATCH 3/4] btrfs: Introduce extent_read_full_page_nolock()

2013-06-11 Thread Mark Fasheh
We want this for btrfs_extent_same. Basically readpage and friends do their
own extent locking but for the purposes of dedupe, we want to have both
files locked down across a set of readpage operations (so that we can
compare data). Introduce this variant and a flag which can be set for
extent_read_full_page() to indicate that we are already locked.

Partial credit for this patch goes to Gabriel de Perthuis g2p.c...@gmail.com
as I have included a fix from him to the original patch which avoids a
deadlock on compressed extents.

Signed-off-by: Mark Fasheh mfas...@suse.de
---
 fs/btrfs/compression.c |  6 +-
 fs/btrfs/extent_io.c   | 41 +++--
 fs/btrfs/extent_io.h   |  3 +++
 3 files changed, 39 insertions(+), 11 deletions(-)

diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 15b9408..05819c3 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -636,7 +636,11 @@ int btrfs_submit_compressed_read(struct inode *inode, 
struct bio *bio,
faili = nr_pages - 1;
cb-nr_pages = nr_pages;
 
-   add_ra_bio_pages(inode, em_start + em_len, cb);
+   /* In the parent-locked case, we only locked the range we are
+* interested in.  In all other cases, we can opportunistically
+* cache decompressed data that goes beyond the requested range. */
+   if (!(bio_flags  EXTENT_BIO_PARENT_LOCKED))
+   add_ra_bio_pages(inode, em_start + em_len, cb);
 
/* include any pages we added in add_ra-bio_pages */
uncompressed_len = bio-bi_vcnt * PAGE_CACHE_SIZE;
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index cdee391..80ce106 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -2643,11 +2643,12 @@ static int __extent_read_full_page(struct 
extent_io_tree *tree,
struct btrfs_ordered_extent *ordered;
int ret;
int nr = 0;
+   int parent_locked = *bio_flags  EXTENT_BIO_PARENT_LOCKED;
size_t pg_offset = 0;
size_t iosize;
size_t disk_io_size;
size_t blocksize = inode-i_sb-s_blocksize;
-   unsigned long this_bio_flag = 0;
+   unsigned long this_bio_flag = *bio_flags  EXTENT_BIO_PARENT_LOCKED;
 
set_page_extent_mapped(page);
 
@@ -2659,7 +2660,7 @@ static int __extent_read_full_page(struct extent_io_tree 
*tree,
}
 
end = page_end;
-   while (1) {
+   while (!parent_locked) {
lock_extent(tree, start, end);
ordered = btrfs_lookup_ordered_extent(inode, start);
if (!ordered)
@@ -2695,15 +2696,18 @@ static int __extent_read_full_page(struct 
extent_io_tree *tree,
kunmap_atomic(userpage);
set_extent_uptodate(tree, cur, cur + iosize - 1,
cached, GFP_NOFS);
-   unlock_extent_cached(tree, cur, cur + iosize - 1,
-cached, GFP_NOFS);
+   if (!parent_locked)
+   unlock_extent_cached(tree, cur,
+cur + iosize - 1,
+cached, GFP_NOFS);
break;
}
em = get_extent(inode, page, pg_offset, cur,
end - cur + 1, 0);
if (IS_ERR_OR_NULL(em)) {
SetPageError(page);
-   unlock_extent(tree, cur, end);
+   if (!parent_locked)
+   unlock_extent(tree, cur, end);
break;
}
extent_offset = cur - em-start;
@@ -2711,7 +2715,7 @@ static int __extent_read_full_page(struct extent_io_tree 
*tree,
BUG_ON(end  cur);
 
if (test_bit(EXTENT_FLAG_COMPRESSED, em-flags)) {
-   this_bio_flag = EXTENT_BIO_COMPRESSED;
+   this_bio_flag |= EXTENT_BIO_COMPRESSED;
extent_set_compress_type(this_bio_flag,
 em-compress_type);
}
@@ -2755,7 +2759,8 @@ static int __extent_read_full_page(struct extent_io_tree 
*tree,
if (test_range_bit(tree, cur, cur_end,
   EXTENT_UPTODATE, 1, NULL)) {
check_page_uptodate(tree, page);
-   unlock_extent(tree, cur, cur + iosize - 1);
+   if (!parent_locked)
+   unlock_extent(tree, cur, cur + iosize - 1);
cur = cur + iosize;
pg_offset += iosize;
continue;
@@ -2765,7 +2770,8 @@ static int __extent_read_full_page(struct extent_io_tree 
*tree,
 */
if (block_start == EXTENT_MAP_INLINE) {

[PATCH 3/4] btrfs: Introduce extent_read_full_page_nolock()

2013-05-21 Thread Mark Fasheh
We want this for btrfs_extent_same. Basically readpage and friends do their
own extent locking but for the purposes of dedupe, we want to have both
files locked down across a set of readpage operations (so that we can
compare data). Introduce this variant and a flag which can be set for
extent_read_full_page() to indicate that we are already locked.

Partial credit for this patch goes to Gabriel de Perthuis g2p.c...@gmail.com
as I have included a fix from him to the original patch which avoids a
deadlock on compressed extents.

Signed-off-by: Mark Fasheh mfas...@suse.de
---
 fs/btrfs/compression.c |  6 +-
 fs/btrfs/extent_io.c   | 41 +++--
 fs/btrfs/extent_io.h   |  3 +++
 3 files changed, 39 insertions(+), 11 deletions(-)

diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 15b9408..05819c3 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -636,7 +636,11 @@ int btrfs_submit_compressed_read(struct inode *inode, 
struct bio *bio,
faili = nr_pages - 1;
cb-nr_pages = nr_pages;
 
-   add_ra_bio_pages(inode, em_start + em_len, cb);
+   /* In the parent-locked case, we only locked the range we are
+* interested in.  In all other cases, we can opportunistically
+* cache decompressed data that goes beyond the requested range. */
+   if (!(bio_flags  EXTENT_BIO_PARENT_LOCKED))
+   add_ra_bio_pages(inode, em_start + em_len, cb);
 
/* include any pages we added in add_ra-bio_pages */
uncompressed_len = bio-bi_vcnt * PAGE_CACHE_SIZE;
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index cdee391..80ce106 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -2643,11 +2643,12 @@ static int __extent_read_full_page(struct 
extent_io_tree *tree,
struct btrfs_ordered_extent *ordered;
int ret;
int nr = 0;
+   int parent_locked = *bio_flags  EXTENT_BIO_PARENT_LOCKED;
size_t pg_offset = 0;
size_t iosize;
size_t disk_io_size;
size_t blocksize = inode-i_sb-s_blocksize;
-   unsigned long this_bio_flag = 0;
+   unsigned long this_bio_flag = *bio_flags  EXTENT_BIO_PARENT_LOCKED;
 
set_page_extent_mapped(page);
 
@@ -2659,7 +2660,7 @@ static int __extent_read_full_page(struct extent_io_tree 
*tree,
}
 
end = page_end;
-   while (1) {
+   while (!parent_locked) {
lock_extent(tree, start, end);
ordered = btrfs_lookup_ordered_extent(inode, start);
if (!ordered)
@@ -2695,15 +2696,18 @@ static int __extent_read_full_page(struct 
extent_io_tree *tree,
kunmap_atomic(userpage);
set_extent_uptodate(tree, cur, cur + iosize - 1,
cached, GFP_NOFS);
-   unlock_extent_cached(tree, cur, cur + iosize - 1,
-cached, GFP_NOFS);
+   if (!parent_locked)
+   unlock_extent_cached(tree, cur,
+cur + iosize - 1,
+cached, GFP_NOFS);
break;
}
em = get_extent(inode, page, pg_offset, cur,
end - cur + 1, 0);
if (IS_ERR_OR_NULL(em)) {
SetPageError(page);
-   unlock_extent(tree, cur, end);
+   if (!parent_locked)
+   unlock_extent(tree, cur, end);
break;
}
extent_offset = cur - em-start;
@@ -2711,7 +2715,7 @@ static int __extent_read_full_page(struct extent_io_tree 
*tree,
BUG_ON(end  cur);
 
if (test_bit(EXTENT_FLAG_COMPRESSED, em-flags)) {
-   this_bio_flag = EXTENT_BIO_COMPRESSED;
+   this_bio_flag |= EXTENT_BIO_COMPRESSED;
extent_set_compress_type(this_bio_flag,
 em-compress_type);
}
@@ -2755,7 +2759,8 @@ static int __extent_read_full_page(struct extent_io_tree 
*tree,
if (test_range_bit(tree, cur, cur_end,
   EXTENT_UPTODATE, 1, NULL)) {
check_page_uptodate(tree, page);
-   unlock_extent(tree, cur, cur + iosize - 1);
+   if (!parent_locked)
+   unlock_extent(tree, cur, cur + iosize - 1);
cur = cur + iosize;
pg_offset += iosize;
continue;
@@ -2765,7 +2770,8 @@ static int __extent_read_full_page(struct extent_io_tree 
*tree,
 */
if (block_start == EXTENT_MAP_INLINE) {

Re: [PATCH 3/4] btrfs: Introduce extent_read_full_page_nolock()

2013-05-09 Thread Gabriel de Perthuis
 We want this for btrfs_extent_same. Basically readpage and friends do their
 own extent locking but for the purposes of dedupe, we want to have both
 files locked down across a set of readpage operations (so that we can
 compare data). Introduce this variant and a flag which can be set for
 extent_read_full_page() to indicate that we are already locked.
 
 This one can get stuck in TASK_UNINTERRUPTIBLE:
 
 [32129.522257] SysRq : Show Blocked State
 [32129.524337]   taskPC stack   pid father
 [32129.526515] python  D 88021f394280 0 16281  1 
 0x0004
 [32129.528656]  88020e079a48 0082 88013d3cdd40 
 88020e079fd8
 [32129.530840]  88020e079fd8 88020e079fd8 8802138dc5f0 
 88013d3cdd40
 [32129.533044]   1fff 88015286f440 
 0008
 [32129.535285] Call Trace:
 [32129.537522]  [816dcca9] schedule+0x29/0x70
 [32129.539829]  [a02b4908] wait_extent_bit+0xf8/0x150 [btrfs]
 [32129.542130]  [8107ea00] ? finish_wait+0x80/0x80
 [32129.544463]  [a02b4f84] lock_extent_bits+0x44/0xa0 [btrfs]
 [32129.546824]  [a02b4ff3] lock_extent+0x13/0x20 [btrfs]
 [32129.549198]  [a02dc0cf] add_ra_bio_pages.isra.8+0x17f/0x2d0 
 [btrfs]
 [32129.551602]  [a02dccfc] btrfs_submit_compressed_read+0x25c/0x4c0 
 [btrfs]
 [32129.554028]  [a029d131] btrfs_submit_bio_hook+0x1d1/0x1e0 [btrfs]
 [32129.556457]  [a02b2d07] submit_one_bio+0x67/0xa0 [btrfs]
 [32129.558899]  [a02b7ecd] extent_read_full_page_nolock+0x4d/0x60 
 [btrfs]
 [32129.561290]  [a02c8052] fill_data+0xb2/0x230 [btrfs]
 [32129.563623]  [a02cd57e] btrfs_ioctl+0x1f7e/0x2560 [btrfs]
 [32129.565924]  [816ddbae] ? _raw_spin_lock+0xe/0x20
 [32129.568207]  [8119b907] ? inode_get_bytes+0x47/0x60
 [32129.570472]  [811a8297] do_vfs_ioctl+0x97/0x560
 [32129.572700]  [8119bb5a] ? sys_newfstat+0x2a/0x40
 [32129.574882]  [811a87f1] sys_ioctl+0x91/0xb0
 [32129.577008]  [816e64dd] system_call_fastpath+0x1a/0x1f

For anyone trying those patches, there's a fix here:
https://github.com/g2p/linux/tree/v3.9%2Bbtrfs-extent-same

--
To unsubscribe from this list: send the line unsubscribe linux-btrfs in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH 3/4] btrfs: Introduce extent_read_full_page_nolock()

2013-05-07 Thread Gabriel de Perthuis
 We want this for btrfs_extent_same. Basically readpage and friends do their
 own extent locking but for the purposes of dedupe, we want to have both
 files locked down across a set of readpage operations (so that we can
 compare data). Introduce this variant and a flag which can be set for
 extent_read_full_page() to indicate that we are already locked.

This one can get stuck in TASK_UNINTERRUPTIBLE:

[32129.522257] SysRq : Show Blocked State
[32129.524337]   taskPC stack   pid father
[32129.526515] python  D 88021f394280 0 16281  1 0x0004
[32129.528656]  88020e079a48 0082 88013d3cdd40 
88020e079fd8
[32129.530840]  88020e079fd8 88020e079fd8 8802138dc5f0 
88013d3cdd40
[32129.533044]   1fff 88015286f440 
0008
[32129.535285] Call Trace:
[32129.537522]  [816dcca9] schedule+0x29/0x70
[32129.539829]  [a02b4908] wait_extent_bit+0xf8/0x150 [btrfs]
[32129.542130]  [8107ea00] ? finish_wait+0x80/0x80
[32129.544463]  [a02b4f84] lock_extent_bits+0x44/0xa0 [btrfs]
[32129.546824]  [a02b4ff3] lock_extent+0x13/0x20 [btrfs]
[32129.549198]  [a02dc0cf] add_ra_bio_pages.isra.8+0x17f/0x2d0 [btrfs]
[32129.551602]  [a02dccfc] btrfs_submit_compressed_read+0x25c/0x4c0 
[btrfs]
[32129.554028]  [a029d131] btrfs_submit_bio_hook+0x1d1/0x1e0 [btrfs]
[32129.556457]  [a02b2d07] submit_one_bio+0x67/0xa0 [btrfs]
[32129.558899]  [a02b7ecd] extent_read_full_page_nolock+0x4d/0x60 
[btrfs]
[32129.561290]  [a02c8052] fill_data+0xb2/0x230 [btrfs]
[32129.563623]  [a02cd57e] btrfs_ioctl+0x1f7e/0x2560 [btrfs]
[32129.565924]  [816ddbae] ? _raw_spin_lock+0xe/0x20
[32129.568207]  [8119b907] ? inode_get_bytes+0x47/0x60
[32129.570472]  [811a8297] do_vfs_ioctl+0x97/0x560
[32129.572700]  [8119bb5a] ? sys_newfstat+0x2a/0x40
[32129.574882]  [811a87f1] sys_ioctl+0x91/0xb0
[32129.577008]  [816e64dd] system_call_fastpath+0x1a/0x1f

Side note, I wish btrfs used TASK_KILLABLE[1] instead.

[1]: https://lwn.net/Articles/288056/

--
To unsubscribe from this list: send the line unsubscribe linux-btrfs in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH 3/4] btrfs: Introduce extent_read_full_page_nolock()

2013-05-06 Thread David Sterba
On Tue, Apr 16, 2013 at 03:15:34PM -0700, Mark Fasheh wrote:
 @@ -2625,7 +2625,7 @@ static int __extent_read_full_page(struct 
 extent_io_tree *tree,
   }
  
   end = page_end;
 - while (1) {
 + while (1  !parent_locked) {

the patch is ok, just this caught my eye :)
--
To unsubscribe from this list: send the line unsubscribe linux-btrfs in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 3/4] btrfs: Introduce extent_read_full_page_nolock()

2013-04-16 Thread Mark Fasheh
We want this for btrfs_extent_same. Basically readpage and friends do their
own extent locking but for the purposes of dedupe, we want to have both
files locked down across a set of readpage operations (so that we can
compare data). Introduce this variant and a flag which can be set for
extent_read_full_page() to indicate that we are already locked.

Signed-off-by: Mark Fasheh mfas...@suse.de
---
 fs/btrfs/extent_io.c |   44 
 fs/btrfs/extent_io.h |2 ++
 2 files changed, 34 insertions(+), 12 deletions(-)

diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 1b319df..9256503 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -2592,7 +2592,7 @@ static int __extent_read_full_page(struct extent_io_tree 
*tree,
   struct page *page,
   get_extent_t *get_extent,
   struct bio **bio, int mirror_num,
-  unsigned long *bio_flags)
+  unsigned long *bio_flags, int parent_locked)
 {
struct inode *inode = page-mapping-host;
u64 start = (u64)page-index  PAGE_CACHE_SHIFT;
@@ -2625,7 +2625,7 @@ static int __extent_read_full_page(struct extent_io_tree 
*tree,
}
 
end = page_end;
-   while (1) {
+   while (1  !parent_locked) {
lock_extent(tree, start, end);
ordered = btrfs_lookup_ordered_extent(inode, start);
if (!ordered)
@@ -2659,15 +2659,18 @@ static int __extent_read_full_page(struct 
extent_io_tree *tree,
kunmap_atomic(userpage);
set_extent_uptodate(tree, cur, cur + iosize - 1,
cached, GFP_NOFS);
-   unlock_extent_cached(tree, cur, cur + iosize - 1,
-cached, GFP_NOFS);
+   if (!parent_locked)
+   unlock_extent_cached(tree, cur,
+cur + iosize - 1,
+cached, GFP_NOFS);
break;
}
em = get_extent(inode, page, pg_offset, cur,
end - cur + 1, 0);
if (IS_ERR_OR_NULL(em)) {
SetPageError(page);
-   unlock_extent(tree, cur, end);
+   if (!parent_locked)
+   unlock_extent(tree, cur, end);
break;
}
extent_offset = cur - em-start;
@@ -2719,7 +2722,8 @@ static int __extent_read_full_page(struct extent_io_tree 
*tree,
if (test_range_bit(tree, cur, cur_end,
   EXTENT_UPTODATE, 1, NULL)) {
check_page_uptodate(tree, page);
-   unlock_extent(tree, cur, cur + iosize - 1);
+   if (!parent_locked)
+   unlock_extent(tree, cur, cur + iosize - 1);
cur = cur + iosize;
pg_offset += iosize;
continue;
@@ -2729,7 +2733,8 @@ static int __extent_read_full_page(struct extent_io_tree 
*tree,
 */
if (block_start == EXTENT_MAP_INLINE) {
SetPageError(page);
-   unlock_extent(tree, cur, cur + iosize - 1);
+   if (!parent_locked)
+   unlock_extent(tree, cur, cur + iosize - 1);
cur = cur + iosize;
pg_offset += iosize;
continue;
@@ -2756,7 +2761,8 @@ static int __extent_read_full_page(struct extent_io_tree 
*tree,
}
if (ret) {
SetPageError(page);
-   unlock_extent(tree, cur, cur + iosize - 1);
+   if (!parent_locked)
+   unlock_extent(tree, cur, cur + iosize - 1);
}
cur = cur + iosize;
pg_offset += iosize;
@@ -2778,7 +2784,21 @@ int extent_read_full_page(struct extent_io_tree *tree, 
struct page *page,
int ret;
 
ret = __extent_read_full_page(tree, page, get_extent, bio, mirror_num,
- bio_flags);
+ bio_flags, 0);
+   if (bio)
+   ret = submit_one_bio(READ, bio, mirror_num, bio_flags);
+   return ret;
+}
+
+int extent_read_full_page_nolock(struct extent_io_tree *tree, struct page 
*page,
+get_extent_t *get_extent, int mirror_num)
+{
+   struct bio *bio = NULL;
+   unsigned long bio_flags = 0;
+   int ret;
+
+   ret = __extent_read_full_page(tree, page, get_extent, bio,