From: Matthew Wilcox <mawil...@microsoft.com>

Remove the last mentions of radix tree from various comments.

Signed-off-by: Matthew Wilcox <mawil...@microsoft.com>
---
 mm/shmem.c | 14 +++++++-------
 1 file changed, 7 insertions(+), 7 deletions(-)

diff --git a/mm/shmem.c b/mm/shmem.c
index 4dbcfb436bd1..5110848885d4 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -743,7 +743,7 @@ void shmem_unlock_mapping(struct address_space *mapping)
 }
 
 /*
- * Remove range of pages and swap entries from radix tree, and free them.
+ * Remove range of pages and swap entries from page cache, and free them.
  * If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate.
  */
 static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
@@ -1118,10 +1118,10 @@ static int shmem_unuse_inode(struct shmem_inode_info 
*info,
                 * We needed to drop mutex to make that restrictive page
                 * allocation, but the inode might have been freed while we
                 * dropped it: although a racing shmem_evict_inode() cannot
-                * complete without emptying the radix_tree, our page lock
+                * complete without emptying the page cache, our page lock
                 * on this swapcache page is not enough to prevent that -
                 * free_swap_and_cache() of our swap entry will only
-                * trylock_page(), removing swap from radix_tree whatever.
+                * trylock_page(), removing swap from page cache whatever.
                 *
                 * We must not proceed to shmem_add_to_page_cache() if the
                 * inode has been freed, but of course we cannot rely on
@@ -1187,7 +1187,7 @@ int shmem_unuse(swp_entry_t swap, struct page *page)
                        false);
        if (error)
                goto out;
-       /* No radix_tree_preload: swap entry keeps a place for page in tree */
+       /* No memory allocation: swap entry occupies the slot for the page */
        error = -EAGAIN;
 
        mutex_lock(&shmem_swaplist_mutex);
@@ -1863,7 +1863,7 @@ alloc_nohuge:             page = 
shmem_alloc_and_acct_page(gfp, inode,
                spin_unlock_irq(&info->lock);
                goto repeat;
        }
-       if (error == -EEXIST)   /* from above or from radix_tree_insert */
+       if (error == -EEXIST)
                goto repeat;
        return error;
 }
@@ -2475,7 +2475,7 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, 
struct iov_iter *to)
 }
 
 /*
- * llseek SEEK_DATA or SEEK_HOLE through the radix_tree.
+ * llseek SEEK_DATA or SEEK_HOLE through the page cache.
  */
 static pgoff_t shmem_seek_hole_data(struct address_space *mapping,
                                    pgoff_t index, pgoff_t end, int whence)
@@ -2563,7 +2563,7 @@ static loff_t shmem_file_llseek(struct file *file, loff_t 
offset, int whence)
 }
 
 /*
- * We need a tag: a new tag would expand every radix_tree_node by 8 bytes,
+ * We need a tag: a new tag would expand every xa_node by 8 bytes,
  * so reuse a tag which we firmly believe is never set or cleared on shmem.
  */
 #define SHMEM_TAG_PINNED        PAGECACHE_TAG_TOWRITE
-- 
2.15.1

Reply via email to