[PATCH v6 90/99] btrfs: Convert delayed_nodes_tree to XArray

2018-01-17 Thread Matthew Wilcox
From: Matthew Wilcox 

Rename it to just 'delayed_nodes' and remove it from the protection of
btrfs_root->inode_lock.

Signed-off-by: Matthew Wilcox 
---
 fs/btrfs/ctree.h |  8 +++---
 fs/btrfs/delayed-inode.c | 65 
 fs/btrfs/disk-io.c   |  2 +-
 fs/btrfs/inode.c |  2 +-
 4 files changed, 27 insertions(+), 50 deletions(-)

diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 87984ce3a4c2..9acfdc623d15 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -1219,11 +1219,9 @@ struct btrfs_root {
/* red-black tree that keeps track of in-memory inodes */
struct rb_root inode_tree;
 
-   /*
-* radix tree that keeps track of delayed nodes of every inode,
-* protected by inode_lock
-*/
-   struct radix_tree_root delayed_nodes_tree;
+   /* track delayed nodes of every inode */
+   struct xarray delayed_nodes;
+
/*
 * right now this just gets used so that a root has its own devid
 * for stat.  It may be used for more later
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index 056276101c63..156a762f3809 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -86,7 +86,7 @@ static struct btrfs_delayed_node *btrfs_get_delayed_node(
}
 
spin_lock(>inode_lock);
-   node = radix_tree_lookup(>delayed_nodes_tree, ino);
+   node = xa_load(>delayed_nodes, ino);
 
if (node) {
if (btrfs_inode->delayed_node) {
@@ -131,10 +131,9 @@ static struct btrfs_delayed_node *btrfs_get_delayed_node(
 static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
struct btrfs_inode *btrfs_inode)
 {
-   struct btrfs_delayed_node *node;
+   struct btrfs_delayed_node *node, *exists;
struct btrfs_root *root = btrfs_inode->root;
u64 ino = btrfs_ino(btrfs_inode);
-   int ret;
 
 again:
node = btrfs_get_delayed_node(btrfs_inode);
@@ -149,23 +148,18 @@ static struct btrfs_delayed_node 
*btrfs_get_or_create_delayed_node(
/* cached in the btrfs inode and can be accessed */
refcount_set(>refs, 2);
 
-   ret = radix_tree_preload(GFP_NOFS);
-   if (ret) {
+   xa_lock(>delayed_nodes);
+   exists = __xa_cmpxchg(>delayed_nodes, ino, NULL, node, GFP_NOFS);
+   if (unlikely(exists)) {
+   int ret = xa_err(exists);
+   xa_unlock(>delayed_nodes);
kmem_cache_free(delayed_node_cache, node);
+   if (ret == -EEXIST)
+   goto again;
return ERR_PTR(ret);
}
-
-   spin_lock(>inode_lock);
-   ret = radix_tree_insert(>delayed_nodes_tree, ino, node);
-   if (ret == -EEXIST) {
-   spin_unlock(>inode_lock);
-   kmem_cache_free(delayed_node_cache, node);
-   radix_tree_preload_end();
-   goto again;
-   }
btrfs_inode->delayed_node = node;
-   spin_unlock(>inode_lock);
-   radix_tree_preload_end();
+   xa_unlock(>delayed_nodes);
 
return node;
 }
@@ -278,15 +272,12 @@ static void __btrfs_release_delayed_node(
if (refcount_dec_and_test(_node->refs)) {
struct btrfs_root *root = delayed_node->root;
 
-   spin_lock(>inode_lock);
/*
 * Once our refcount goes to zero, nobody is allowed to bump it
 * back up.  We can delete it now.
 */
ASSERT(refcount_read(_node->refs) == 0);
-   radix_tree_delete(>delayed_nodes_tree,
- delayed_node->inode_id);
-   spin_unlock(>inode_lock);
+   xa_erase(>delayed_nodes, delayed_node->inode_id);
kmem_cache_free(delayed_node_cache, delayed_node);
}
 }
@@ -1926,31 +1917,19 @@ void btrfs_kill_delayed_inode_items(struct btrfs_inode 
*inode)
 
 void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
 {
-   u64 inode_id = 0;
-   struct btrfs_delayed_node *delayed_nodes[8];
-   int i, n;
-
-   while (1) {
-   spin_lock(>inode_lock);
-   n = radix_tree_gang_lookup(>delayed_nodes_tree,
-  (void **)delayed_nodes, inode_id,
-  ARRAY_SIZE(delayed_nodes));
-   if (!n) {
-   spin_unlock(>inode_lock);
-   break;
-   }
-
-   inode_id = delayed_nodes[n - 1]->inode_id + 1;
-
-   for (i = 0; i < n; i++)
-   refcount_inc(_nodes[i]->refs);
-   spin_unlock(>inode_lock);
+   struct btrfs_delayed_node *node;
+   unsigned long inode_id = 0;
 
-   for (i = 0; i < n; i++) {
-   __btrfs_kill_delayed_node(delayed_nodes[i]);
- 

[PATCH v6 90/99] btrfs: Convert delayed_nodes_tree to XArray

2018-01-17 Thread Matthew Wilcox
From: Matthew Wilcox 

Rename it to just 'delayed_nodes' and remove it from the protection of
btrfs_root->inode_lock.

Signed-off-by: Matthew Wilcox 
---
 fs/btrfs/ctree.h |  8 +++---
 fs/btrfs/delayed-inode.c | 65 
 fs/btrfs/disk-io.c   |  2 +-
 fs/btrfs/inode.c |  2 +-
 4 files changed, 27 insertions(+), 50 deletions(-)

diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 87984ce3a4c2..9acfdc623d15 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -1219,11 +1219,9 @@ struct btrfs_root {
/* red-black tree that keeps track of in-memory inodes */
struct rb_root inode_tree;
 
-   /*
-* radix tree that keeps track of delayed nodes of every inode,
-* protected by inode_lock
-*/
-   struct radix_tree_root delayed_nodes_tree;
+   /* track delayed nodes of every inode */
+   struct xarray delayed_nodes;
+
/*
 * right now this just gets used so that a root has its own devid
 * for stat.  It may be used for more later
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index 056276101c63..156a762f3809 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -86,7 +86,7 @@ static struct btrfs_delayed_node *btrfs_get_delayed_node(
}
 
spin_lock(>inode_lock);
-   node = radix_tree_lookup(>delayed_nodes_tree, ino);
+   node = xa_load(>delayed_nodes, ino);
 
if (node) {
if (btrfs_inode->delayed_node) {
@@ -131,10 +131,9 @@ static struct btrfs_delayed_node *btrfs_get_delayed_node(
 static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
struct btrfs_inode *btrfs_inode)
 {
-   struct btrfs_delayed_node *node;
+   struct btrfs_delayed_node *node, *exists;
struct btrfs_root *root = btrfs_inode->root;
u64 ino = btrfs_ino(btrfs_inode);
-   int ret;
 
 again:
node = btrfs_get_delayed_node(btrfs_inode);
@@ -149,23 +148,18 @@ static struct btrfs_delayed_node 
*btrfs_get_or_create_delayed_node(
/* cached in the btrfs inode and can be accessed */
refcount_set(>refs, 2);
 
-   ret = radix_tree_preload(GFP_NOFS);
-   if (ret) {
+   xa_lock(>delayed_nodes);
+   exists = __xa_cmpxchg(>delayed_nodes, ino, NULL, node, GFP_NOFS);
+   if (unlikely(exists)) {
+   int ret = xa_err(exists);
+   xa_unlock(>delayed_nodes);
kmem_cache_free(delayed_node_cache, node);
+   if (ret == -EEXIST)
+   goto again;
return ERR_PTR(ret);
}
-
-   spin_lock(>inode_lock);
-   ret = radix_tree_insert(>delayed_nodes_tree, ino, node);
-   if (ret == -EEXIST) {
-   spin_unlock(>inode_lock);
-   kmem_cache_free(delayed_node_cache, node);
-   radix_tree_preload_end();
-   goto again;
-   }
btrfs_inode->delayed_node = node;
-   spin_unlock(>inode_lock);
-   radix_tree_preload_end();
+   xa_unlock(>delayed_nodes);
 
return node;
 }
@@ -278,15 +272,12 @@ static void __btrfs_release_delayed_node(
if (refcount_dec_and_test(_node->refs)) {
struct btrfs_root *root = delayed_node->root;
 
-   spin_lock(>inode_lock);
/*
 * Once our refcount goes to zero, nobody is allowed to bump it
 * back up.  We can delete it now.
 */
ASSERT(refcount_read(_node->refs) == 0);
-   radix_tree_delete(>delayed_nodes_tree,
- delayed_node->inode_id);
-   spin_unlock(>inode_lock);
+   xa_erase(>delayed_nodes, delayed_node->inode_id);
kmem_cache_free(delayed_node_cache, delayed_node);
}
 }
@@ -1926,31 +1917,19 @@ void btrfs_kill_delayed_inode_items(struct btrfs_inode 
*inode)
 
 void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
 {
-   u64 inode_id = 0;
-   struct btrfs_delayed_node *delayed_nodes[8];
-   int i, n;
-
-   while (1) {
-   spin_lock(>inode_lock);
-   n = radix_tree_gang_lookup(>delayed_nodes_tree,
-  (void **)delayed_nodes, inode_id,
-  ARRAY_SIZE(delayed_nodes));
-   if (!n) {
-   spin_unlock(>inode_lock);
-   break;
-   }
-
-   inode_id = delayed_nodes[n - 1]->inode_id + 1;
-
-   for (i = 0; i < n; i++)
-   refcount_inc(_nodes[i]->refs);
-   spin_unlock(>inode_lock);
+   struct btrfs_delayed_node *node;
+   unsigned long inode_id = 0;
 
-   for (i = 0; i < n; i++) {
-   __btrfs_kill_delayed_node(delayed_nodes[i]);
-   btrfs_release_delayed_node(delayed_nodes[i]);