[PATCH 0/6] Remove page_mapping()

2024-04-23 Thread Matthew Wilcox (Oracle)
There are only a few users left.  Convert them all to either call
folio_mapping() or just use folio->mapping directly.

Matthew Wilcox (Oracle) (6):
  fscrypt: Convert bh_get_inode_and_lblk_num to use a folio
  f2fs: Convert f2fs_clear_page_cache_dirty_tag to use a folio
  memory-failure: Remove calls to page_mapping()
  migrate: Expand the use of folio in __migrate_device_pages()
  userfault; Expand folio use in mfill_atomic_install_pte()
  mm: Remove page_mapping()

 fs/crypto/inline_crypt.c |  6 +++---
 fs/f2fs/data.c   |  5 +++--
 include/linux/pagemap.h  |  1 -
 mm/folio-compat.c|  6 --
 mm/memory-failure.c  |  6 --
 mm/migrate_device.c  | 13 +
 mm/userfaultfd.c |  5 ++---
 7 files changed, 17 insertions(+), 25 deletions(-)

-- 
2.43.0




[PATCH 5/6] userfault; Expand folio use in mfill_atomic_install_pte()

2024-04-23 Thread Matthew Wilcox (Oracle)
Call page_folio() a little earlier so we can use folio_mapping()
instead of page_mapping(), saving a call to compound_head().

Signed-off-by: Matthew Wilcox (Oracle) 
---
 mm/userfaultfd.c | 5 ++---
 1 file changed, 2 insertions(+), 3 deletions(-)

diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index d9dcc7d71a39..e6486923263c 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -180,9 +180,9 @@ int mfill_atomic_install_pte(pmd_t *dst_pmd,
pte_t _dst_pte, *dst_pte;
bool writable = dst_vma->vm_flags & VM_WRITE;
bool vm_shared = dst_vma->vm_flags & VM_SHARED;
-   bool page_in_cache = page_mapping(page);
spinlock_t *ptl;
-   struct folio *folio;
+   struct folio *folio = page_folio(page);
+   bool page_in_cache = folio_mapping(folio);
 
_dst_pte = mk_pte(page, dst_vma->vm_page_prot);
_dst_pte = pte_mkdirty(_dst_pte);
@@ -212,7 +212,6 @@ int mfill_atomic_install_pte(pmd_t *dst_pmd,
if (!pte_none_mostly(ptep_get(dst_pte)))
goto out_unlock;
 
-   folio = page_folio(page);
if (page_in_cache) {
/* Usually, cache pages are already added to LRU */
if (newly_allocated)
-- 
2.43.0




[PATCH 2/6] f2fs: Convert f2fs_clear_page_cache_dirty_tag to use a folio

2024-04-23 Thread Matthew Wilcox (Oracle)
Removes uses of page_mapping() and page_index().

Signed-off-by: Matthew Wilcox (Oracle) 
---
 fs/f2fs/data.c | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 5d641fac02ba..9f74c867d790 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -4100,11 +4100,12 @@ const struct address_space_operations f2fs_dblock_aops 
= {
 
 void f2fs_clear_page_cache_dirty_tag(struct page *page)
 {
-   struct address_space *mapping = page_mapping(page);
+   struct folio *folio = page_folio(page);
+   struct address_space *mapping = folio->mapping;
unsigned long flags;
 
xa_lock_irqsave(&mapping->i_pages, flags);
-   __xa_clear_mark(&mapping->i_pages, page_index(page),
+   __xa_clear_mark(&mapping->i_pages, folio->index,
PAGECACHE_TAG_DIRTY);
xa_unlock_irqrestore(&mapping->i_pages, flags);
 }
-- 
2.43.0




[PATCH 6/6] mm: Remove page_mapping()

2024-04-23 Thread Matthew Wilcox (Oracle)
All callers are now converted, delete this compatibility wrapper.

Signed-off-by: Matthew Wilcox (Oracle) 
---
 include/linux/pagemap.h | 1 -
 mm/folio-compat.c   | 6 --
 2 files changed, 7 deletions(-)

diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index b6f14e9a2d98..941f7ed714b9 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -399,7 +399,6 @@ static inline void filemap_nr_thps_dec(struct address_space 
*mapping)
 #endif
 }
 
-struct address_space *page_mapping(struct page *);
 struct address_space *folio_mapping(struct folio *);
 struct address_space *swapcache_mapping(struct folio *);
 
diff --git a/mm/folio-compat.c b/mm/folio-compat.c
index f31e0ce65b11..f05906006b3c 100644
--- a/mm/folio-compat.c
+++ b/mm/folio-compat.c
@@ -10,12 +10,6 @@
 #include 
 #include "internal.h"
 
-struct address_space *page_mapping(struct page *page)
-{
-   return folio_mapping(page_folio(page));
-}
-EXPORT_SYMBOL(page_mapping);
-
 void unlock_page(struct page *page)
 {
return folio_unlock(page_folio(page));
-- 
2.43.0




[PATCH 1/6] fscrypt: Convert bh_get_inode_and_lblk_num to use a folio

2024-04-23 Thread Matthew Wilcox (Oracle)
Remove uses of page->index, page_mapping() and b_page.  Saves a call
to compound_head().

Signed-off-by: Matthew Wilcox (Oracle) 
---
 fs/crypto/inline_crypt.c | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/fs/crypto/inline_crypt.c b/fs/crypto/inline_crypt.c
index b4002aea7cdb..40de69860dcf 100644
--- a/fs/crypto/inline_crypt.c
+++ b/fs/crypto/inline_crypt.c
@@ -284,7 +284,7 @@ static bool bh_get_inode_and_lblk_num(const struct 
buffer_head *bh,
  const struct inode **inode_ret,
  u64 *lblk_num_ret)
 {
-   struct page *page = bh->b_page;
+   struct folio *folio = bh->b_folio;
const struct address_space *mapping;
const struct inode *inode;
 
@@ -292,13 +292,13 @@ static bool bh_get_inode_and_lblk_num(const struct 
buffer_head *bh,
 * The ext4 journal (jbd2) can submit a buffer_head it directly created
 * for a non-pagecache page.  fscrypt doesn't care about these.
 */
-   mapping = page_mapping(page);
+   mapping = folio_mapping(folio);
if (!mapping)
return false;
inode = mapping->host;
 
*inode_ret = inode;
-   *lblk_num_ret = ((u64)page->index << (PAGE_SHIFT - inode->i_blkbits)) +
+   *lblk_num_ret = ((u64)folio->index << (PAGE_SHIFT - inode->i_blkbits)) +
(bh_offset(bh) >> inode->i_blkbits);
return true;
 }
-- 
2.43.0




[PATCH 3/6] memory-failure: Remove calls to page_mapping()

2024-04-23 Thread Matthew Wilcox (Oracle)
This is mostly just inlining page_mapping() into the two callers.

Signed-off-by: Matthew Wilcox (Oracle) 
---
 mm/memory-failure.c | 6 --
 1 file changed, 4 insertions(+), 2 deletions(-)

diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index e065dd9be21e..62aa3db17854 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -216,6 +216,7 @@ EXPORT_SYMBOL_GPL(hwpoison_filter_flags_value);
 
 static int hwpoison_filter_dev(struct page *p)
 {
+   struct folio *folio = page_folio(p);
struct address_space *mapping;
dev_t dev;
 
@@ -223,7 +224,7 @@ static int hwpoison_filter_dev(struct page *p)
hwpoison_filter_dev_minor == ~0U)
return 0;
 
-   mapping = page_mapping(p);
+   mapping = folio_mapping(folio);
if (mapping == NULL || mapping->host == NULL)
return -EINVAL;
 
@@ -1090,7 +1091,8 @@ static int me_pagecache_clean(struct page_state *ps, 
struct page *p)
  */
 static int me_pagecache_dirty(struct page_state *ps, struct page *p)
 {
-   struct address_space *mapping = page_mapping(p);
+   struct folio *folio = page_folio(p);
+   struct address_space *mapping = folio_mapping(folio);
 
/* TBD: print more information about the file. */
if (mapping) {
-- 
2.43.0




[PATCH 4/6] migrate: Expand the use of folio in __migrate_device_pages()

2024-04-23 Thread Matthew Wilcox (Oracle)
Removes a few calls to compound_head() and a call to page_mapping().

Signed-off-by: Matthew Wilcox (Oracle) 
---
 mm/migrate_device.c | 13 +
 1 file changed, 5 insertions(+), 8 deletions(-)

diff --git a/mm/migrate_device.c b/mm/migrate_device.c
index a68616c1965f..aecc71972a87 100644
--- a/mm/migrate_device.c
+++ b/mm/migrate_device.c
@@ -692,6 +692,7 @@ static void __migrate_device_pages(unsigned long *src_pfns,
struct page *newpage = migrate_pfn_to_page(dst_pfns[i]);
struct page *page = migrate_pfn_to_page(src_pfns[i]);
struct address_space *mapping;
+   struct folio *folio;
int r;
 
if (!newpage) {
@@ -726,15 +727,12 @@ static void __migrate_device_pages(unsigned long 
*src_pfns,
continue;
}
 
-   mapping = page_mapping(page);
+   folio = page_folio(page);
+   mapping = folio_mapping(folio);
 
if (is_device_private_page(newpage) ||
is_device_coherent_page(newpage)) {
if (mapping) {
-   struct folio *folio;
-
-   folio = page_folio(page);
-
/*
 * For now only support anonymous memory 
migrating to
 * device private or coherent memory.
@@ -757,11 +755,10 @@ static void __migrate_device_pages(unsigned long 
*src_pfns,
 
if (migrate && migrate->fault_page == page)
r = migrate_folio_extra(mapping, page_folio(newpage),
-   page_folio(page),
-   MIGRATE_SYNC_NO_COPY, 1);
+   folio, MIGRATE_SYNC_NO_COPY, 1);
else
r = migrate_folio(mapping, page_folio(newpage),
-   page_folio(page), MIGRATE_SYNC_NO_COPY);
+   folio, MIGRATE_SYNC_NO_COPY);
if (r != MIGRATEPAGE_SUCCESS)
src_pfns[i] &= ~MIGRATE_PFN_MIGRATE;
}
-- 
2.43.0




[f2fs-dev] [PATCH] fscrypt: Copy the memcg information to the ciphertext page

2023-01-29 Thread Matthew Wilcox (Oracle)
Both f2fs and ext4 end up passing the ciphertext page to
wbc_account_cgroup_owner().  At the moment, the ciphertext page appears
to belong to no cgroup, so it is accounted to the root_mem_cgroup instead
of whatever cgroup the original page was in.

It's hard to say how far back this is a bug.  The crypto code shared
between ext4 & f2fs was created in May 2015 with commit 0b81d0779072,
but neither filesystem did anything with memcg_data before then.  memcg
writeback accounting was added to ext4 in July 2015 in commit 001e4a8775f6
and it wasn't added to f2fs until January 2018 (commit 578c647879f7).

I'm going with the ext4 commit since this is the first commit where
there was a difference in behaviour between encrypted and unencrypted
filesystems.

Fixes: 001e4a8775f6 ("ext4: implement cgroup writeback support")
Cc: sta...@vger.kernel.org
Signed-off-by: Matthew Wilcox (Oracle) 
---
 fs/crypto/crypto.c | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c
index e78be66bbf01..a4e76f96f291 100644
--- a/fs/crypto/crypto.c
+++ b/fs/crypto/crypto.c
@@ -205,6 +205,9 @@ struct page *fscrypt_encrypt_pagecache_blocks(struct page 
*page,
}
SetPagePrivate(ciphertext_page);
set_page_private(ciphertext_page, (unsigned long)page);
+#ifdef CONFIG_MEMCG
+   ciphertext_page->memcg_data = page->memcg_data;
+#endif
return ciphertext_page;
 }
 EXPORT_SYMBOL(fscrypt_encrypt_pagecache_blocks);
-- 
2.35.1



___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


[f2fs-dev] [PATCH v2 03/19] fs: Add aops->migrate_folio

2022-06-08 Thread Matthew Wilcox (Oracle)
Provide a folio-based replacement for aops->migratepage.  Update the
documentation to document migrate_folio instead of migratepage.

Signed-off-by: Matthew Wilcox (Oracle) 
Reviewed-by: Christoph Hellwig 
---
 Documentation/filesystems/locking.rst |  5 ++--
 Documentation/filesystems/vfs.rst | 13 ++-
 Documentation/vm/page_migration.rst   | 33 ++-
 include/linux/fs.h|  4 +++-
 mm/compaction.c   |  4 +++-
 mm/migrate.c  | 11 +
 6 files changed, 40 insertions(+), 30 deletions(-)

diff --git a/Documentation/filesystems/locking.rst 
b/Documentation/filesystems/locking.rst
index c0fe711f14d3..3d28b23676bd 100644
--- a/Documentation/filesystems/locking.rst
+++ b/Documentation/filesystems/locking.rst
@@ -253,7 +253,8 @@ prototypes::
void (*free_folio)(struct folio *);
int (*direct_IO)(struct kiocb *, struct iov_iter *iter);
bool (*isolate_page) (struct page *, isolate_mode_t);
-   int (*migratepage)(struct address_space *, struct page *, struct page 
*);
+   int (*migrate_folio)(struct address_space *, struct folio *dst,
+   struct folio *src, enum migrate_mode);
void (*putback_page) (struct page *);
int (*launder_folio)(struct folio *);
bool (*is_partially_uptodate)(struct folio *, size_t from, size_t 
count);
@@ -281,7 +282,7 @@ release_folio:  yes
 free_folio:yes
 direct_IO:
 isolate_page:  yes
-migratepage:   yes (both)
+migrate_folio: yes (both)
 putback_page:  yes
 launder_folio: yes
 is_partially_uptodate: yes
diff --git a/Documentation/filesystems/vfs.rst 
b/Documentation/filesystems/vfs.rst
index a08c652467d7..3ae1b039b03f 100644
--- a/Documentation/filesystems/vfs.rst
+++ b/Documentation/filesystems/vfs.rst
@@ -740,7 +740,8 @@ cache in your filesystem.  The following members are 
defined:
/* isolate a page for migration */
bool (*isolate_page) (struct page *, isolate_mode_t);
/* migrate the contents of a page to the specified target */
-   int (*migratepage) (struct page *, struct page *);
+   int (*migrate_folio)(struct mapping *, struct folio *dst,
+   struct folio *src, enum migrate_mode);
/* put migration-failed page back to right list */
void (*putback_page) (struct page *);
int (*launder_folio) (struct folio *);
@@ -935,12 +936,12 @@ cache in your filesystem.  The following members are 
defined:
is successfully isolated, VM marks the page as PG_isolated via
__SetPageIsolated.
 
-``migrate_page``
+``migrate_folio``
This is used to compact the physical memory usage.  If the VM
-   wants to relocate a page (maybe off a memory card that is
-   signalling imminent failure) it will pass a new page and an old
-   page to this function.  migrate_page should transfer any private
-   data across and update any references that it has to the page.
+   wants to relocate a folio (maybe from a memory device that is
+   signalling imminent failure) it will pass a new folio and an old
+   folio to this function.  migrate_folio should transfer any private
+   data across and update any references that it has to the folio.
 
 ``putback_page``
Called by the VM when isolated page's migration fails.
diff --git a/Documentation/vm/page_migration.rst 
b/Documentation/vm/page_migration.rst
index 8c5cb8147e55..e0f73ddfabb1 100644
--- a/Documentation/vm/page_migration.rst
+++ b/Documentation/vm/page_migration.rst
@@ -181,22 +181,23 @@ which are function pointers of struct 
address_space_operations.
Once page is successfully isolated, VM uses page.lru fields so driver
shouldn't expect to preserve values in those fields.
 
-2. ``int (*migratepage) (struct address_space *mapping,``
-|  ``struct page *newpage, struct page *oldpage, enum migrate_mode);``
-
-   After isolation, VM calls migratepage() of driver with the isolated page.
-   The function of migratepage() is to move the contents of the old page to the
-   new page
-   and set up fields of struct page newpage. Keep in mind that you should
-   indicate to the VM the oldpage is no longer movable via __ClearPageMovable()
-   under page_lock if you migrated the oldpage successfully and returned
-   MIGRATEPAGE_SUCCESS. If driver cannot migrate the page at the moment, driver
-   can return -EAGAIN. On -EAGAIN, VM will retry page migration in a short time
-   because VM interprets -EAGAIN as "temporary migration failure". On returning
-   any error except -EAGAIN, VM will give up the page migration without
-   retrying.
-
-   Driver shouldn't touch the page.lru field while in the migratepage() 
function.
+2. ``int (*migrate_folio) (struct address_space *mapping,``
+|  ``struct f

[f2fs-dev] [PATCH v2 12/19] btrfs: Convert btrfs_migratepage to migrate_folio

2022-06-08 Thread Matthew Wilcox (Oracle)
Use filemap_migrate_folio() to do the bulk of the work, and then copy
the ordered flag across if needed.

Signed-off-by: Matthew Wilcox (Oracle) 
Reviewed-by: Christoph Hellwig 
---
 fs/btrfs/inode.c | 26 +-
 1 file changed, 9 insertions(+), 17 deletions(-)

diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 81737eff92f3..5f41d869c648 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -8255,30 +8255,24 @@ static bool btrfs_release_folio(struct folio *folio, 
gfp_t gfp_flags)
 }
 
 #ifdef CONFIG_MIGRATION
-static int btrfs_migratepage(struct address_space *mapping,
-struct page *newpage, struct page *page,
+static int btrfs_migrate_folio(struct address_space *mapping,
+struct folio *dst, struct folio *src,
 enum migrate_mode mode)
 {
-   int ret;
+   int ret = filemap_migrate_folio(mapping, dst, src, mode);
 
-   ret = migrate_page_move_mapping(mapping, newpage, page, 0);
if (ret != MIGRATEPAGE_SUCCESS)
return ret;
 
-   if (page_has_private(page))
-   attach_page_private(newpage, detach_page_private(page));
-
-   if (PageOrdered(page)) {
-   ClearPageOrdered(page);
-   SetPageOrdered(newpage);
+   if (folio_test_ordered(src)) {
+   folio_clear_ordered(src);
+   folio_set_ordered(dst);
}
 
-   if (mode != MIGRATE_SYNC_NO_COPY)
-   migrate_page_copy(newpage, page);
-   else
-   migrate_page_states(newpage, page);
return MIGRATEPAGE_SUCCESS;
 }
+#else
+#define btrfs_migrate_folio NULL
 #endif
 
 static void btrfs_invalidate_folio(struct folio *folio, size_t offset,
@@ -11422,9 +11416,7 @@ static const struct address_space_operations btrfs_aops 
= {
.direct_IO  = noop_direct_IO,
.invalidate_folio = btrfs_invalidate_folio,
.release_folio  = btrfs_release_folio,
-#ifdef CONFIG_MIGRATION
-   .migratepage= btrfs_migratepage,
-#endif
+   .migrate_folio  = btrfs_migrate_folio,
.dirty_folio= filemap_dirty_folio,
.error_remove_page = generic_error_remove_page,
.swap_activate  = btrfs_swap_activate,
-- 
2.35.1



___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


[f2fs-dev] [PATCH v2 05/19] mm/migrate: Convert writeout() to take a folio

2022-06-08 Thread Matthew Wilcox (Oracle)
Use a folio throughout this function.

Signed-off-by: Matthew Wilcox (Oracle) 
Reviewed-by: Christoph Hellwig 
---
 mm/migrate.c | 21 ++---
 1 file changed, 10 insertions(+), 11 deletions(-)

diff --git a/mm/migrate.c b/mm/migrate.c
index 1878de817a01..6b6fec26f4d0 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -761,11 +761,10 @@ int buffer_migrate_page_norefs(struct address_space 
*mapping,
 #endif
 
 /*
- * Writeback a page to clean the dirty state
+ * Writeback a folio to clean the dirty state
  */
-static int writeout(struct address_space *mapping, struct page *page)
+static int writeout(struct address_space *mapping, struct folio *folio)
 {
-   struct folio *folio = page_folio(page);
struct writeback_control wbc = {
.sync_mode = WB_SYNC_NONE,
.nr_to_write = 1,
@@ -779,25 +778,25 @@ static int writeout(struct address_space *mapping, struct 
page *page)
/* No write method for the address space */
return -EINVAL;
 
-   if (!clear_page_dirty_for_io(page))
+   if (!folio_clear_dirty_for_io(folio))
/* Someone else already triggered a write */
return -EAGAIN;
 
/*
-* A dirty page may imply that the underlying filesystem has
-* the page on some queue. So the page must be clean for
-* migration. Writeout may mean we loose the lock and the
-* page state is no longer what we checked for earlier.
+* A dirty folio may imply that the underlying filesystem has
+* the folio on some queue. So the folio must be clean for
+* migration. Writeout may mean we lose the lock and the
+* folio state is no longer what we checked for earlier.
 * At this point we know that the migration attempt cannot
 * be successful.
 */
remove_migration_ptes(folio, folio, false);
 
-   rc = mapping->a_ops->writepage(page, &wbc);
+   rc = mapping->a_ops->writepage(&folio->page, &wbc);
 
if (rc != AOP_WRITEPAGE_ACTIVATE)
/* unlocked. Relock */
-   lock_page(page);
+   folio_lock(folio);
 
return (rc < 0) ? -EIO : -EAGAIN;
 }
@@ -817,7 +816,7 @@ static int fallback_migrate_folio(struct address_space 
*mapping,
default:
return -EBUSY;
}
-   return writeout(mapping, &src->page);
+   return writeout(mapping, src);
}
 
/*
-- 
2.35.1



___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


[f2fs-dev] [PATCH v2 06/19] mm/migrate: Convert buffer_migrate_page() to buffer_migrate_folio()

2022-06-08 Thread Matthew Wilcox (Oracle)
Use a folio throughout __buffer_migrate_folio(), add kernel-doc for
buffer_migrate_folio() and buffer_migrate_folio_norefs(), move their
declarations to buffer.h and switch all filesystems that have wired
them up.

Signed-off-by: Matthew Wilcox (Oracle) 
Reviewed-by: Christoph Hellwig 
---
 block/fops.c|  2 +-
 fs/ext2/inode.c |  4 +-
 fs/ext4/inode.c |  4 +-
 fs/ntfs/aops.c  |  6 +--
 fs/ocfs2/aops.c |  2 +-
 include/linux/buffer_head.h | 10 +
 include/linux/fs.h  | 12 --
 mm/migrate.c| 76 ++---
 8 files changed, 65 insertions(+), 51 deletions(-)

diff --git a/block/fops.c b/block/fops.c
index d6b3276a6c68..743fc46d0aad 100644
--- a/block/fops.c
+++ b/block/fops.c
@@ -417,7 +417,7 @@ const struct address_space_operations def_blk_aops = {
.write_end  = blkdev_write_end,
.writepages = blkdev_writepages,
.direct_IO  = blkdev_direct_IO,
-   .migratepage= buffer_migrate_page_norefs,
+   .migrate_folio  = buffer_migrate_folio_norefs,
.is_dirty_writeback = buffer_check_dirty_writeback,
 };
 
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index 360ce3604a2d..84570c6265aa 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -973,7 +973,7 @@ const struct address_space_operations ext2_aops = {
.bmap   = ext2_bmap,
.direct_IO  = ext2_direct_IO,
.writepages = ext2_writepages,
-   .migratepage= buffer_migrate_page,
+   .migrate_folio  = buffer_migrate_folio,
.is_partially_uptodate  = block_is_partially_uptodate,
.error_remove_page  = generic_error_remove_page,
 };
@@ -989,7 +989,7 @@ const struct address_space_operations ext2_nobh_aops = {
.bmap   = ext2_bmap,
.direct_IO  = ext2_direct_IO,
.writepages = ext2_writepages,
-   .migratepage= buffer_migrate_page,
+   .migrate_folio  = buffer_migrate_folio,
.error_remove_page  = generic_error_remove_page,
 };
 
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 1aaea53e67b5..53877ffe3c41 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -3633,7 +3633,7 @@ static const struct address_space_operations ext4_aops = {
.invalidate_folio   = ext4_invalidate_folio,
.release_folio  = ext4_release_folio,
.direct_IO  = noop_direct_IO,
-   .migratepage= buffer_migrate_page,
+   .migrate_folio  = buffer_migrate_folio,
.is_partially_uptodate  = block_is_partially_uptodate,
.error_remove_page  = generic_error_remove_page,
.swap_activate  = ext4_iomap_swap_activate,
@@ -3668,7 +3668,7 @@ static const struct address_space_operations ext4_da_aops 
= {
.invalidate_folio   = ext4_invalidate_folio,
.release_folio  = ext4_release_folio,
.direct_IO  = noop_direct_IO,
-   .migratepage= buffer_migrate_page,
+   .migrate_folio  = buffer_migrate_folio,
.is_partially_uptodate  = block_is_partially_uptodate,
.error_remove_page  = generic_error_remove_page,
.swap_activate  = ext4_iomap_swap_activate,
diff --git a/fs/ntfs/aops.c b/fs/ntfs/aops.c
index 9e3964ea2ea0..5f4fb6ca6f2e 100644
--- a/fs/ntfs/aops.c
+++ b/fs/ntfs/aops.c
@@ -1659,7 +1659,7 @@ const struct address_space_operations ntfs_normal_aops = {
.dirty_folio= block_dirty_folio,
 #endif /* NTFS_RW */
.bmap   = ntfs_bmap,
-   .migratepage= buffer_migrate_page,
+   .migrate_folio  = buffer_migrate_folio,
.is_partially_uptodate = block_is_partially_uptodate,
.error_remove_page = generic_error_remove_page,
 };
@@ -1673,7 +1673,7 @@ const struct address_space_operations 
ntfs_compressed_aops = {
.writepage  = ntfs_writepage,
.dirty_folio= block_dirty_folio,
 #endif /* NTFS_RW */
-   .migratepage= buffer_migrate_page,
+   .migrate_folio  = buffer_migrate_folio,
.is_partially_uptodate = block_is_partially_uptodate,
.error_remove_page = generic_error_remove_page,
 };
@@ -1688,7 +1688,7 @@ const struct address_space_operations ntfs_mst_aops = {
.writepage  = ntfs_writepage,   /* Write dirty page to disk. */
.dirty_folio= filemap_dirty_folio,
 #endif /* NTFS_RW */
-   .migratepage= buffer_migrate_page,
+   .migrate_folio  = buffer_migrate_folio,
.is_partially_uptodate  = block_is_partially_uptodate,
.error_remove_page = generic_error_remove_page,
 };
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 767df51f8657..1d489003f99d 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -2462,7 +2462,7 @@ const struct address_space_operations ocfs2_aops = {
.direct_IO

[f2fs-dev] [PATCH v2 19/19] mm/folio-compat: Remove migration compatibility functions

2022-06-08 Thread Matthew Wilcox (Oracle)
migrate_page_move_mapping(), migrate_page_copy() and migrate_page_states()
are all now unused after converting all the filesystems from
aops->migratepage() to aops->migrate_folio().

Signed-off-by: Matthew Wilcox (Oracle) 
Reviewed-by: Christoph Hellwig 
---
 include/linux/migrate.h | 11 ---
 mm/folio-compat.c   | 22 --
 mm/ksm.c|  2 +-
 3 files changed, 1 insertion(+), 34 deletions(-)

diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 59d64a1e6b4b..3e18c7048506 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -40,12 +40,8 @@ extern int migrate_pages(struct list_head *l, new_page_t 
new, free_page_t free,
 extern struct page *alloc_migration_target(struct page *page, unsigned long 
private);
 extern int isolate_movable_page(struct page *page, isolate_mode_t mode);
 
-extern void migrate_page_states(struct page *newpage, struct page *page);
-extern void migrate_page_copy(struct page *newpage, struct page *page);
 int migrate_huge_page_move_mapping(struct address_space *mapping,
struct folio *dst, struct folio *src);
-extern int migrate_page_move_mapping(struct address_space *mapping,
-   struct page *newpage, struct page *page, int extra_count);
 void migration_entry_wait_on_locked(swp_entry_t entry, pte_t *ptep,
spinlock_t *ptl);
 void folio_migrate_flags(struct folio *newfolio, struct folio *folio);
@@ -66,13 +62,6 @@ static inline struct page *alloc_migration_target(struct 
page *page,
 static inline int isolate_movable_page(struct page *page, isolate_mode_t mode)
{ return -EBUSY; }
 
-static inline void migrate_page_states(struct page *newpage, struct page *page)
-{
-}
-
-static inline void migrate_page_copy(struct page *newpage,
-struct page *page) {}
-
 static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
  struct folio *dst, struct folio *src)
 {
diff --git a/mm/folio-compat.c b/mm/folio-compat.c
index 20bc15b57d93..458618c7302c 100644
--- a/mm/folio-compat.c
+++ b/mm/folio-compat.c
@@ -51,28 +51,6 @@ void mark_page_accessed(struct page *page)
 }
 EXPORT_SYMBOL(mark_page_accessed);
 
-#ifdef CONFIG_MIGRATION
-int migrate_page_move_mapping(struct address_space *mapping,
-   struct page *newpage, struct page *page, int extra_count)
-{
-   return folio_migrate_mapping(mapping, page_folio(newpage),
-   page_folio(page), extra_count);
-}
-EXPORT_SYMBOL(migrate_page_move_mapping);
-
-void migrate_page_states(struct page *newpage, struct page *page)
-{
-   folio_migrate_flags(page_folio(newpage), page_folio(page));
-}
-EXPORT_SYMBOL(migrate_page_states);
-
-void migrate_page_copy(struct page *newpage, struct page *page)
-{
-   folio_migrate_copy(page_folio(newpage), page_folio(page));
-}
-EXPORT_SYMBOL(migrate_page_copy);
-#endif
-
 bool set_page_writeback(struct page *page)
 {
return folio_start_writeback(page_folio(page));
diff --git a/mm/ksm.c b/mm/ksm.c
index 54f78c9eecae..e8f8c1a2bb39 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -712,7 +712,7 @@ static struct page *get_ksm_page(struct stable_node 
*stable_node,
 * however, it might mean that the page is under page_ref_freeze().
 * The __remove_mapping() case is easy, again the node is now stale;
 * the same is in reuse_ksm_page() case; but if page is swapcache
-* in migrate_page_move_mapping(), it might still be our page,
+* in folio_migrate_mapping(), it might still be our page,
 * in which case it's essential to keep the node.
 */
while (!get_page_unless_zero(page)) {
-- 
2.35.1



___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


[f2fs-dev] [PATCH v2 16/19] hugetlb: Convert to migrate_folio

2022-06-08 Thread Matthew Wilcox (Oracle)
This involves converting migrate_huge_page_move_mapping().  We also need a
folio variant of hugetlb_set_page_subpool(), but that's for a later patch.

Signed-off-by: Matthew Wilcox (Oracle) 
---
 fs/hugetlbfs/inode.c| 23 ++-
 include/linux/migrate.h |  6 +++---
 mm/migrate.c| 18 +-
 3 files changed, 26 insertions(+), 21 deletions(-)

diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 14d33f725e05..eca1d0fabd7e 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -954,28 +954,33 @@ static int hugetlbfs_symlink(struct user_namespace 
*mnt_userns,
return error;
 }
 
-static int hugetlbfs_migrate_page(struct address_space *mapping,
-   struct page *newpage, struct page *page,
+#ifdef CONFIG_MIGRATION
+static int hugetlbfs_migrate_folio(struct address_space *mapping,
+   struct folio *dst, struct folio *src,
enum migrate_mode mode)
 {
int rc;
 
-   rc = migrate_huge_page_move_mapping(mapping, newpage, page);
+   rc = migrate_huge_page_move_mapping(mapping, dst, src);
if (rc != MIGRATEPAGE_SUCCESS)
return rc;
 
-   if (hugetlb_page_subpool(page)) {
-   hugetlb_set_page_subpool(newpage, hugetlb_page_subpool(page));
-   hugetlb_set_page_subpool(page, NULL);
+   if (hugetlb_page_subpool(&src->page)) {
+   hugetlb_set_page_subpool(&dst->page,
+   hugetlb_page_subpool(&src->page));
+   hugetlb_set_page_subpool(&src->page, NULL);
}
 
if (mode != MIGRATE_SYNC_NO_COPY)
-   migrate_page_copy(newpage, page);
+   folio_migrate_copy(dst, src);
else
-   migrate_page_states(newpage, page);
+   folio_migrate_flags(dst, src);
 
return MIGRATEPAGE_SUCCESS;
 }
+#else
+#define hugetlbfs_migrate_folio NULL
+#endif
 
 static int hugetlbfs_error_remove_page(struct address_space *mapping,
struct page *page)
@@ -1142,7 +1147,7 @@ static const struct address_space_operations 
hugetlbfs_aops = {
.write_begin= hugetlbfs_write_begin,
.write_end  = hugetlbfs_write_end,
.dirty_folio= noop_dirty_folio,
-   .migratepage= hugetlbfs_migrate_page,
+   .migrate_folio  = hugetlbfs_migrate_folio,
.error_remove_page  = hugetlbfs_error_remove_page,
 };
 
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 82f00ad69a54..59d64a1e6b4b 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -42,8 +42,8 @@ extern int isolate_movable_page(struct page *page, 
isolate_mode_t mode);
 
 extern void migrate_page_states(struct page *newpage, struct page *page);
 extern void migrate_page_copy(struct page *newpage, struct page *page);
-extern int migrate_huge_page_move_mapping(struct address_space *mapping,
- struct page *newpage, struct page *page);
+int migrate_huge_page_move_mapping(struct address_space *mapping,
+   struct folio *dst, struct folio *src);
 extern int migrate_page_move_mapping(struct address_space *mapping,
struct page *newpage, struct page *page, int extra_count);
 void migration_entry_wait_on_locked(swp_entry_t entry, pte_t *ptep,
@@ -74,7 +74,7 @@ static inline void migrate_page_copy(struct page *newpage,
 struct page *page) {}
 
 static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
- struct page *newpage, struct page *page)
+ struct folio *dst, struct folio *src)
 {
return -ENOSYS;
 }
diff --git a/mm/migrate.c b/mm/migrate.c
index 4d8115ca93bb..bed0de86f3ae 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -474,26 +474,26 @@ EXPORT_SYMBOL(folio_migrate_mapping);
  * of folio_migrate_mapping().
  */
 int migrate_huge_page_move_mapping(struct address_space *mapping,
-  struct page *newpage, struct page *page)
+  struct folio *dst, struct folio *src)
 {
-   XA_STATE(xas, &mapping->i_pages, page_index(page));
+   XA_STATE(xas, &mapping->i_pages, folio_index(src));
int expected_count;
 
xas_lock_irq(&xas);
-   expected_count = 2 + page_has_private(page);
-   if (!page_ref_freeze(page, expected_count)) {
+   expected_count = 2 + folio_has_private(src);
+   if (!folio_ref_freeze(src, expected_count)) {
xas_unlock_irq(&xas);
return -EAGAIN;
}
 
-   newpage->index = page->index;
-   newpage->mapping = page->mapping;
+   dst->index = src->index;
+   dst->mapping = src->mapping;
 
-   get_page(newpage);
+   

[f2fs-dev] [PATCH v2 14/19] f2fs: Convert to filemap_migrate_folio()

2022-06-08 Thread Matthew Wilcox (Oracle)
filemap_migrate_folio() fits f2fs's needs perfectly.

Signed-off-by: Matthew Wilcox (Oracle) 
---
 fs/f2fs/checkpoint.c |  4 +---
 fs/f2fs/data.c   | 40 +---
 fs/f2fs/f2fs.h   |  4 
 fs/f2fs/node.c   |  4 +---
 4 files changed, 3 insertions(+), 49 deletions(-)

diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index 6d8b2bf14de0..8259e0fa97e1 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -463,9 +463,7 @@ const struct address_space_operations f2fs_meta_aops = {
.dirty_folio= f2fs_dirty_meta_folio,
.invalidate_folio = f2fs_invalidate_folio,
.release_folio  = f2fs_release_folio,
-#ifdef CONFIG_MIGRATION
-   .migratepage= f2fs_migrate_page,
-#endif
+   .migrate_folio  = filemap_migrate_folio,
 };
 
 static void __add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino,
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 7fcbcf979737..318a3f91ad74 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -3751,42 +3751,6 @@ static sector_t f2fs_bmap(struct address_space *mapping, 
sector_t block)
return blknr;
 }
 
-#ifdef CONFIG_MIGRATION
-#include 
-
-int f2fs_migrate_page(struct address_space *mapping,
-   struct page *newpage, struct page *page, enum migrate_mode mode)
-{
-   int rc, extra_count = 0;
-
-   BUG_ON(PageWriteback(page));
-
-   rc = migrate_page_move_mapping(mapping, newpage,
-   page, extra_count);
-   if (rc != MIGRATEPAGE_SUCCESS)
-   return rc;
-
-   /* guarantee to start from no stale private field */
-   set_page_private(newpage, 0);
-   if (PagePrivate(page)) {
-   set_page_private(newpage, page_private(page));
-   SetPagePrivate(newpage);
-   get_page(newpage);
-
-   set_page_private(page, 0);
-   ClearPagePrivate(page);
-   put_page(page);
-   }
-
-   if (mode != MIGRATE_SYNC_NO_COPY)
-   migrate_page_copy(newpage, page);
-   else
-   migrate_page_states(newpage, page);
-
-   return MIGRATEPAGE_SUCCESS;
-}
-#endif
-
 #ifdef CONFIG_SWAP
 static int f2fs_migrate_blocks(struct inode *inode, block_t start_blk,
unsigned int blkcnt)
@@ -4018,15 +3982,13 @@ const struct address_space_operations f2fs_dblock_aops 
= {
.write_begin= f2fs_write_begin,
.write_end  = f2fs_write_end,
.dirty_folio= f2fs_dirty_data_folio,
+   .migrate_folio  = filemap_migrate_folio,
.invalidate_folio = f2fs_invalidate_folio,
.release_folio  = f2fs_release_folio,
.direct_IO  = noop_direct_IO,
.bmap   = f2fs_bmap,
.swap_activate  = f2fs_swap_activate,
.swap_deactivate = f2fs_swap_deactivate,
-#ifdef CONFIG_MIGRATION
-   .migratepage= f2fs_migrate_page,
-#endif
 };
 
 void f2fs_clear_page_cache_dirty_tag(struct page *page)
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index d9bbecd008d2..f258a1b6faed 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -3764,10 +3764,6 @@ int f2fs_write_single_data_page(struct page *page, int 
*submitted,
 void f2fs_write_failed(struct inode *inode, loff_t to);
 void f2fs_invalidate_folio(struct folio *folio, size_t offset, size_t length);
 bool f2fs_release_folio(struct folio *folio, gfp_t wait);
-#ifdef CONFIG_MIGRATION
-int f2fs_migrate_page(struct address_space *mapping, struct page *newpage,
-   struct page *page, enum migrate_mode mode);
-#endif
 bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len);
 void f2fs_clear_page_cache_dirty_tag(struct page *page);
 int f2fs_init_post_read_processing(void);
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index 836c79a20afc..ed1cbfb0345f 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -2163,9 +2163,7 @@ const struct address_space_operations f2fs_node_aops = {
.dirty_folio= f2fs_dirty_node_folio,
.invalidate_folio = f2fs_invalidate_folio,
.release_folio  = f2fs_release_folio,
-#ifdef CONFIG_MIGRATION
-   .migratepage= f2fs_migrate_page,
-#endif
+   .migrate_folio  = filemap_migrate_folio,
 };
 
 static struct free_nid *__lookup_free_nid_list(struct f2fs_nm_info *nm_i,
-- 
2.35.1



___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


[f2fs-dev] [PATCH v2 00/19] Convert aops->migratepage to aops->migrate_folio

2022-06-08 Thread Matthew Wilcox (Oracle)
We're getting to the last aops that take a struct page.  The only
remaining ones are ->writepage, ->write_begin, ->write_end and
->error_remove_page.

Changes from v1:
 - Remove ->isolate_page from secretmem
 - Split the movable_operations from address_space_operations
 - Drop the conversions of balloon, zsmalloc and z3fold
 - Fix the build errors with hugetlbfs
 - Fix the kerneldoc errors
 - Fix the ;; typo

Matthew Wilcox (Oracle) (19):
  secretmem: Remove isolate_page
  mm: Convert all PageMovable users to movable_operations
  fs: Add aops->migrate_folio
  mm/migrate: Convert fallback_migrate_page() to
fallback_migrate_folio()
  mm/migrate: Convert writeout() to take a folio
  mm/migrate: Convert buffer_migrate_page() to buffer_migrate_folio()
  mm/migrate: Convert expected_page_refs() to folio_expected_refs()
  btrfs: Convert btree_migratepage to migrate_folio
  nfs: Convert to migrate_folio
  mm/migrate: Convert migrate_page() to migrate_folio()
  mm/migrate: Add filemap_migrate_folio()
  btrfs: Convert btrfs_migratepage to migrate_folio
  ubifs: Convert to filemap_migrate_folio()
  f2fs: Convert to filemap_migrate_folio()
  aio: Convert to migrate_folio
  hugetlb: Convert to migrate_folio
  secretmem: Convert to migrate_folio
  fs: Remove aops->migratepage()
  mm/folio-compat: Remove migration compatibility functions

 Documentation/filesystems/locking.rst   |   5 +-
 Documentation/filesystems/vfs.rst   |  13 +-
 Documentation/vm/page_migration.rst |  33 +--
 arch/powerpc/platforms/pseries/cmm.c|  60 +
 block/fops.c|   2 +-
 drivers/gpu/drm/i915/gem/i915_gem_userptr.c |   4 +-
 drivers/misc/vmw_balloon.c  |  61 +
 drivers/virtio/virtio_balloon.c |  47 +---
 fs/aio.c|  36 +--
 fs/btrfs/disk-io.c  |  22 +-
 fs/btrfs/inode.c|  26 +--
 fs/ext2/inode.c |   4 +-
 fs/ext4/inode.c |   4 +-
 fs/f2fs/checkpoint.c|   4 +-
 fs/f2fs/data.c  |  40 +---
 fs/f2fs/f2fs.h  |   4 -
 fs/f2fs/node.c  |   4 +-
 fs/gfs2/aops.c  |   2 +-
 fs/hugetlbfs/inode.c|  23 +-
 fs/iomap/buffered-io.c  |  25 --
 fs/nfs/file.c   |   4 +-
 fs/nfs/internal.h   |   6 +-
 fs/nfs/write.c  |  16 +-
 fs/ntfs/aops.c  |   6 +-
 fs/ocfs2/aops.c |   2 +-
 fs/ubifs/file.c |  29 +--
 fs/xfs/xfs_aops.c   |   2 +-
 fs/zonefs/super.c   |   2 +-
 include/linux/balloon_compaction.h  |   6 +-
 include/linux/buffer_head.h |  10 +
 include/linux/fs.h  |  20 +-
 include/linux/iomap.h   |   6 -
 include/linux/migrate.h |  48 ++--
 include/linux/page-flags.h  |   2 +-
 include/linux/pagemap.h |   6 +
 include/uapi/linux/magic.h  |   4 -
 mm/balloon_compaction.c |  10 +-
 mm/compaction.c |  34 ++-
 mm/folio-compat.c   |  22 --
 mm/ksm.c|   2 +-
 mm/migrate.c| 238 
 mm/migrate_device.c |   3 +-
 mm/secretmem.c  |  13 +-
 mm/shmem.c  |   2 +-
 mm/swap_state.c |   2 +-
 mm/util.c   |   4 +-
 mm/z3fold.c |  82 +--
 mm/zsmalloc.c   | 102 ++---
 48 files changed, 367 insertions(+), 735 deletions(-)

-- 
2.35.1



___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


[f2fs-dev] [PATCH v2 09/19] nfs: Convert to migrate_folio

2022-06-08 Thread Matthew Wilcox (Oracle)
Use a folio throughout this function.  migrate_page() will be converted
later.

Signed-off-by: Matthew Wilcox (Oracle) 
Acked-by: Anna Schumaker 
Reviewed-by: Christoph Hellwig 
---
 fs/nfs/file.c |  4 +---
 fs/nfs/internal.h |  6 --
 fs/nfs/write.c| 16 
 3 files changed, 13 insertions(+), 13 deletions(-)

diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 2d72b1b7ed74..549baed76351 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -533,9 +533,7 @@ const struct address_space_operations nfs_file_aops = {
.write_end = nfs_write_end,
.invalidate_folio = nfs_invalidate_folio,
.release_folio = nfs_release_folio,
-#ifdef CONFIG_MIGRATION
-   .migratepage = nfs_migrate_page,
-#endif
+   .migrate_folio = nfs_migrate_folio,
.launder_folio = nfs_launder_folio,
.is_dirty_writeback = nfs_check_dirty_writeback,
.error_remove_page = generic_error_remove_page,
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 8f8cd6e2d4db..437ebe544aaf 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -578,8 +578,10 @@ void nfs_clear_pnfs_ds_commit_verifiers(struct 
pnfs_ds_commit_info *cinfo)
 #endif
 
 #ifdef CONFIG_MIGRATION
-extern int nfs_migrate_page(struct address_space *,
-   struct page *, struct page *, enum migrate_mode);
+int nfs_migrate_folio(struct address_space *, struct folio *dst,
+   struct folio *src, enum migrate_mode);
+#else
+#define nfs_migrate_folio NULL
 #endif
 
 static inline int
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 1c706465d090..649b9e633459 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -2119,27 +2119,27 @@ int nfs_wb_page(struct inode *inode, struct page *page)
 }
 
 #ifdef CONFIG_MIGRATION
-int nfs_migrate_page(struct address_space *mapping, struct page *newpage,
-   struct page *page, enum migrate_mode mode)
+int nfs_migrate_folio(struct address_space *mapping, struct folio *dst,
+   struct folio *src, enum migrate_mode mode)
 {
/*
-* If PagePrivate is set, then the page is currently associated with
+* If the private flag is set, the folio is currently associated with
 * an in-progress read or write request. Don't try to migrate it.
 *
 * FIXME: we could do this in principle, but we'll need a way to ensure
 *that we can safely release the inode reference while holding
-*the page lock.
+*the folio lock.
 */
-   if (PagePrivate(page))
+   if (folio_test_private(src))
return -EBUSY;
 
-   if (PageFsCache(page)) {
+   if (folio_test_fscache(src)) {
if (mode == MIGRATE_ASYNC)
return -EBUSY;
-   wait_on_page_fscache(page);
+   folio_wait_fscache(src);
}
 
-   return migrate_page(mapping, newpage, page, mode);
+   return migrate_page(mapping, &dst->page, &src->page, mode);
 }
 #endif
 
-- 
2.35.1



___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


[f2fs-dev] [PATCH v2 02/19] mm: Convert all PageMovable users to movable_operations

2022-06-08 Thread Matthew Wilcox (Oracle)
These drivers are rather uncomfortably hammered into the
address_space_operations hole.  They aren't filesystems and don't behave
like filesystems.  They just need their own movable_operations structure,
which we can point to directly from page->mapping.

Signed-off-by: Matthew Wilcox (Oracle) 
---
 arch/powerpc/platforms/pseries/cmm.c |  60 +---
 drivers/misc/vmw_balloon.c   |  61 +---
 drivers/virtio/virtio_balloon.c  |  47 +---
 include/linux/balloon_compaction.h   |   6 +-
 include/linux/fs.h   |   2 -
 include/linux/migrate.h  |  26 +--
 include/linux/page-flags.h   |   2 +-
 include/uapi/linux/magic.h   |   4 --
 mm/balloon_compaction.c  |  10 ++-
 mm/compaction.c  |  29 
 mm/migrate.c |  24 +++
 mm/util.c|   4 +-
 mm/z3fold.c  |  82 +++--
 mm/zsmalloc.c| 102 ++-
 14 files changed, 94 insertions(+), 365 deletions(-)

diff --git a/arch/powerpc/platforms/pseries/cmm.c 
b/arch/powerpc/platforms/pseries/cmm.c
index 15ed8206c463..5f4037c1d7fe 100644
--- a/arch/powerpc/platforms/pseries/cmm.c
+++ b/arch/powerpc/platforms/pseries/cmm.c
@@ -19,9 +19,6 @@
 #include 
 #include 
 #include 
-#include 
-#include 
-#include 
 #include 
 #include 
 #include 
@@ -500,19 +497,6 @@ static struct notifier_block cmm_mem_nb = {
 };
 
 #ifdef CONFIG_BALLOON_COMPACTION
-static struct vfsmount *balloon_mnt;
-
-static int cmm_init_fs_context(struct fs_context *fc)
-{
-   return init_pseudo(fc, PPC_CMM_MAGIC) ? 0 : -ENOMEM;
-}
-
-static struct file_system_type balloon_fs = {
-   .name = "ppc-cmm",
-   .init_fs_context = cmm_init_fs_context,
-   .kill_sb = kill_anon_super,
-};
-
 static int cmm_migratepage(struct balloon_dev_info *b_dev_info,
   struct page *newpage, struct page *page,
   enum migrate_mode mode)
@@ -564,47 +548,13 @@ static int cmm_migratepage(struct balloon_dev_info 
*b_dev_info,
return MIGRATEPAGE_SUCCESS;
 }
 
-static int cmm_balloon_compaction_init(void)
+static void cmm_balloon_compaction_init(void)
 {
-   int rc;
-
balloon_devinfo_init(&b_dev_info);
b_dev_info.migratepage = cmm_migratepage;
-
-   balloon_mnt = kern_mount(&balloon_fs);
-   if (IS_ERR(balloon_mnt)) {
-   rc = PTR_ERR(balloon_mnt);
-   balloon_mnt = NULL;
-   return rc;
-   }
-
-   b_dev_info.inode = alloc_anon_inode(balloon_mnt->mnt_sb);
-   if (IS_ERR(b_dev_info.inode)) {
-   rc = PTR_ERR(b_dev_info.inode);
-   b_dev_info.inode = NULL;
-   kern_unmount(balloon_mnt);
-   balloon_mnt = NULL;
-   return rc;
-   }
-
-   b_dev_info.inode->i_mapping->a_ops = &balloon_aops;
-   return 0;
-}
-static void cmm_balloon_compaction_deinit(void)
-{
-   if (b_dev_info.inode)
-   iput(b_dev_info.inode);
-   b_dev_info.inode = NULL;
-   kern_unmount(balloon_mnt);
-   balloon_mnt = NULL;
 }
 #else /* CONFIG_BALLOON_COMPACTION */
-static int cmm_balloon_compaction_init(void)
-{
-   return 0;
-}
-
-static void cmm_balloon_compaction_deinit(void)
+static void cmm_balloon_compaction_init(void)
 {
 }
 #endif /* CONFIG_BALLOON_COMPACTION */
@@ -622,9 +572,7 @@ static int cmm_init(void)
if (!firmware_has_feature(FW_FEATURE_CMO) && !simulate)
return -EOPNOTSUPP;
 
-   rc = cmm_balloon_compaction_init();
-   if (rc)
-   return rc;
+   cmm_balloon_compaction_init();
 
rc = register_oom_notifier(&cmm_oom_nb);
if (rc < 0)
@@ -658,7 +606,6 @@ static int cmm_init(void)
 out_oom_notifier:
unregister_oom_notifier(&cmm_oom_nb);
 out_balloon_compaction:
-   cmm_balloon_compaction_deinit();
return rc;
 }
 
@@ -677,7 +624,6 @@ static void cmm_exit(void)
unregister_memory_notifier(&cmm_mem_nb);
cmm_free_pages(atomic_long_read(&loaned_pages));
cmm_unregister_sysfs(&cmm_dev);
-   cmm_balloon_compaction_deinit();
 }
 
 /**
diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
index 086ce77d9074..85dd6aa33df6 100644
--- a/drivers/misc/vmw_balloon.c
+++ b/drivers/misc/vmw_balloon.c
@@ -29,8 +29,6 @@
 #include 
 #include 
 #include 
-#include 
-#include 
 #include 
 #include 
 #include 
@@ -1730,20 +1728,6 @@ static inline void vmballoon_debugfs_exit(struct 
vmballoon *b)
 
 
 #ifdef CONFIG_BALLOON_COMPACTION
-
-static int vmballoon_init_fs_context(struct fs_context *fc)
-{
-   return init_pseudo(fc, BALLOON_VMW_MAGIC) ? 0 : -ENOMEM;
-}
-
-static struct file_system_type vmballoon_fs = {
-   .name   = "balloon-vmware

[f2fs-dev] [PATCH v2 08/19] btrfs: Convert btree_migratepage to migrate_folio

2022-06-08 Thread Matthew Wilcox (Oracle)
Use a folio throughout this function.  migrate_page() will be converted
later.

Signed-off-by: Matthew Wilcox (Oracle) 
Reviewed-by: Christoph Hellwig 
---
 fs/btrfs/disk-io.c | 22 ++
 1 file changed, 10 insertions(+), 12 deletions(-)

diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 12b11e645c14..9ceb73f683af 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -952,28 +952,28 @@ void btrfs_submit_metadata_bio(struct inode *inode, 
struct bio *bio, int mirror_
 }
 
 #ifdef CONFIG_MIGRATION
-static int btree_migratepage(struct address_space *mapping,
-   struct page *newpage, struct page *page,
-   enum migrate_mode mode)
+static int btree_migrate_folio(struct address_space *mapping,
+   struct folio *dst, struct folio *src, enum migrate_mode mode)
 {
/*
 * we can't safely write a btree page from here,
 * we haven't done the locking hook
 */
-   if (PageDirty(page))
+   if (folio_test_dirty(src))
return -EAGAIN;
/*
 * Buffers may be managed in a filesystem specific way.
 * We must have no buffers or drop them.
 */
-   if (page_has_private(page) &&
-   !try_to_release_page(page, GFP_KERNEL))
+   if (folio_get_private(src) &&
+   !filemap_release_folio(src, GFP_KERNEL))
return -EAGAIN;
-   return migrate_page(mapping, newpage, page, mode);
+   return migrate_page(mapping, &dst->page, &src->page, mode);
 }
+#else
+#define btree_migrate_folio NULL
 #endif
 
-
 static int btree_writepages(struct address_space *mapping,
struct writeback_control *wbc)
 {
@@ -1073,10 +1073,8 @@ static const struct address_space_operations btree_aops 
= {
.writepages = btree_writepages,
.release_folio  = btree_release_folio,
.invalidate_folio = btree_invalidate_folio,
-#ifdef CONFIG_MIGRATION
-   .migratepage= btree_migratepage,
-#endif
-   .dirty_folio = btree_dirty_folio,
+   .migrate_folio  = btree_migrate_folio,
+   .dirty_folio= btree_dirty_folio,
 };
 
 struct extent_buffer *btrfs_find_create_tree_block(
-- 
2.35.1



___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


[f2fs-dev] [PATCH v2 07/19] mm/migrate: Convert expected_page_refs() to folio_expected_refs()

2022-06-08 Thread Matthew Wilcox (Oracle)
Now that both callers have a folio, convert this function to
take a folio & rename it.

Signed-off-by: Matthew Wilcox (Oracle) 
Reviewed-by: Christoph Hellwig 
---
 mm/migrate.c | 19 ---
 1 file changed, 12 insertions(+), 7 deletions(-)

diff --git a/mm/migrate.c b/mm/migrate.c
index 2975f0c4d7cf..2e2f41572066 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -336,13 +336,18 @@ void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t 
*pmd)
 }
 #endif
 
-static int expected_page_refs(struct address_space *mapping, struct page *page)
+static int folio_expected_refs(struct address_space *mapping,
+   struct folio *folio)
 {
-   int expected_count = 1;
+   int refs = 1;
+   if (!mapping)
+   return refs;
 
-   if (mapping)
-   expected_count += compound_nr(page) + page_has_private(page);
-   return expected_count;
+   refs += folio_nr_pages(folio);
+   if (folio_get_private(folio))
+   refs++;
+
+   return refs;
 }
 
 /*
@@ -359,7 +364,7 @@ int folio_migrate_mapping(struct address_space *mapping,
XA_STATE(xas, &mapping->i_pages, folio_index(folio));
struct zone *oldzone, *newzone;
int dirty;
-   int expected_count = expected_page_refs(mapping, &folio->page) + 
extra_count;
+   int expected_count = folio_expected_refs(mapping, folio) + extra_count;
long nr = folio_nr_pages(folio);
 
if (!mapping) {
@@ -669,7 +674,7 @@ static int __buffer_migrate_folio(struct address_space 
*mapping,
return migrate_page(mapping, &dst->page, &src->page, mode);
 
/* Check whether page does not have extra refs before we do more work */
-   expected_count = expected_page_refs(mapping, &src->page);
+   expected_count = folio_expected_refs(mapping, src);
if (folio_ref_count(src) != expected_count)
return -EAGAIN;
 
-- 
2.35.1



___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


[f2fs-dev] [PATCH v2 13/19] ubifs: Convert to filemap_migrate_folio()

2022-06-08 Thread Matthew Wilcox (Oracle)
filemap_migrate_folio() is a little more general than ubifs really needs,
but it's better to share the code.

Signed-off-by: Matthew Wilcox (Oracle) 
---
 fs/ubifs/file.c | 29 ++---
 1 file changed, 2 insertions(+), 27 deletions(-)

diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
index 04ced154960f..f2353dd676ef 100644
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@ -1461,29 +1461,6 @@ static bool ubifs_dirty_folio(struct address_space 
*mapping,
return ret;
 }
 
-#ifdef CONFIG_MIGRATION
-static int ubifs_migrate_page(struct address_space *mapping,
-   struct page *newpage, struct page *page, enum migrate_mode mode)
-{
-   int rc;
-
-   rc = migrate_page_move_mapping(mapping, newpage, page, 0);
-   if (rc != MIGRATEPAGE_SUCCESS)
-   return rc;
-
-   if (PagePrivate(page)) {
-   detach_page_private(page);
-   attach_page_private(newpage, (void *)1);
-   }
-
-   if (mode != MIGRATE_SYNC_NO_COPY)
-   migrate_page_copy(newpage, page);
-   else
-   migrate_page_states(newpage, page);
-   return MIGRATEPAGE_SUCCESS;
-}
-#endif
-
 static bool ubifs_release_folio(struct folio *folio, gfp_t unused_gfp_flags)
 {
struct inode *inode = folio->mapping->host;
@@ -1649,10 +1626,8 @@ const struct address_space_operations 
ubifs_file_address_operations = {
.write_end  = ubifs_write_end,
.invalidate_folio = ubifs_invalidate_folio,
.dirty_folio= ubifs_dirty_folio,
-#ifdef CONFIG_MIGRATION
-   .migratepage= ubifs_migrate_page,
-#endif
-   .release_folio= ubifs_release_folio,
+   .migrate_folio  = filemap_migrate_folio,
+   .release_folio  = ubifs_release_folio,
 };
 
 const struct inode_operations ubifs_file_inode_operations = {
-- 
2.35.1



___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


[f2fs-dev] [PATCH v2 04/19] mm/migrate: Convert fallback_migrate_page() to fallback_migrate_folio()

2022-06-08 Thread Matthew Wilcox (Oracle)
Use a folio throughout.  migrate_page() will be converted to
migrate_folio() later.

Signed-off-by: Matthew Wilcox (Oracle) 
Reviewed-by: Christoph Hellwig 
---
 mm/migrate.c | 19 +--
 1 file changed, 9 insertions(+), 10 deletions(-)

diff --git a/mm/migrate.c b/mm/migrate.c
index e064b998ead0..1878de817a01 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -805,11 +805,11 @@ static int writeout(struct address_space *mapping, struct 
page *page)
 /*
  * Default handling if a filesystem does not provide a migration function.
  */
-static int fallback_migrate_page(struct address_space *mapping,
-   struct page *newpage, struct page *page, enum migrate_mode mode)
+static int fallback_migrate_folio(struct address_space *mapping,
+   struct folio *dst, struct folio *src, enum migrate_mode mode)
 {
-   if (PageDirty(page)) {
-   /* Only writeback pages in full synchronous migration */
+   if (folio_test_dirty(src)) {
+   /* Only writeback folios in full synchronous migration */
switch (mode) {
case MIGRATE_SYNC:
case MIGRATE_SYNC_NO_COPY:
@@ -817,18 +817,18 @@ static int fallback_migrate_page(struct address_space 
*mapping,
default:
return -EBUSY;
}
-   return writeout(mapping, page);
+   return writeout(mapping, &src->page);
}
 
/*
 * Buffers may be managed in a filesystem specific way.
 * We must have no buffers or drop them.
 */
-   if (page_has_private(page) &&
-   !try_to_release_page(page, GFP_KERNEL))
+   if (folio_test_private(src) &&
+   !filemap_release_folio(src, GFP_KERNEL))
return mode == MIGRATE_SYNC ? -EAGAIN : -EBUSY;
 
-   return migrate_page(mapping, newpage, page, mode);
+   return migrate_page(mapping, &dst->page, &src->page, mode);
 }
 
 /*
@@ -870,8 +870,7 @@ static int move_to_new_folio(struct folio *dst, struct 
folio *src,
rc = mapping->a_ops->migratepage(mapping, &dst->page,
&src->page, mode);
else
-   rc = fallback_migrate_page(mapping, &dst->page,
-   &src->page, mode);
+   rc = fallback_migrate_folio(mapping, dst, src, mode);
} else {
const struct movable_operations *mops;
 
-- 
2.35.1



___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


[f2fs-dev] [PATCH v2 10/19] mm/migrate: Convert migrate_page() to migrate_folio()

2022-06-08 Thread Matthew Wilcox (Oracle)
Convert all callers to pass a folio.  Most have the folio
already available.  Switch all users from aops->migratepage to
aops->migrate_folio.  Also turn the documentation into kerneldoc.

Signed-off-by: Matthew Wilcox (Oracle) 
Reviewed-by: Christoph Hellwig 
---
 drivers/gpu/drm/i915/gem/i915_gem_userptr.c |  4 +--
 fs/btrfs/disk-io.c  |  2 +-
 fs/nfs/write.c  |  2 +-
 include/linux/migrate.h |  5 ++-
 mm/migrate.c| 37 +++--
 mm/migrate_device.c |  3 +-
 mm/shmem.c  |  2 +-
 mm/swap_state.c |  2 +-
 8 files changed, 30 insertions(+), 27 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c 
b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index 094f06b4ce33..8423df021b71 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -216,8 +216,8 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
 * However...!
 *
 * The mmu-notifier can be invalidated for a
-* migrate_page, that is alreadying holding the lock
-* on the page. Such a try_to_unmap() will result
+* migrate_folio, that is alreadying holding the lock
+* on the folio. Such a try_to_unmap() will result
 * in us calling put_pages() and so recursively try
 * to lock the page. We avoid that deadlock with
 * a trylock_page() and in exchange we risk missing
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 9ceb73f683af..8e5f1fa1e972 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -968,7 +968,7 @@ static int btree_migrate_folio(struct address_space 
*mapping,
if (folio_get_private(src) &&
!filemap_release_folio(src, GFP_KERNEL))
return -EAGAIN;
-   return migrate_page(mapping, &dst->page, &src->page, mode);
+   return migrate_folio(mapping, dst, src, mode);
 }
 #else
 #define btree_migrate_folio NULL
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 649b9e633459..69569696dde0 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -2139,7 +2139,7 @@ int nfs_migrate_folio(struct address_space *mapping, 
struct folio *dst,
folio_wait_fscache(src);
}
 
-   return migrate_page(mapping, &dst->page, &src->page, mode);
+   return migrate_folio(mapping, dst, src, mode);
 }
 #endif
 
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 48aa4be04108..82f00ad69a54 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -32,9 +32,8 @@ extern const char *migrate_reason_names[MR_TYPES];
 #ifdef CONFIG_MIGRATION
 
 extern void putback_movable_pages(struct list_head *l);
-extern int migrate_page(struct address_space *mapping,
-   struct page *newpage, struct page *page,
-   enum migrate_mode mode);
+int migrate_folio(struct address_space *mapping, struct folio *dst,
+   struct folio *src, enum migrate_mode mode);
 extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
unsigned long private, enum migrate_mode mode, int reason,
unsigned int *ret_succeeded);
diff --git a/mm/migrate.c b/mm/migrate.c
index 2e2f41572066..785e32d0cf1b 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -593,34 +593,37 @@ EXPORT_SYMBOL(folio_migrate_copy);
  *Migration functions
  ***/
 
-/*
- * Common logic to directly migrate a single LRU page suitable for
- * pages that do not use PagePrivate/PagePrivate2.
+/**
+ * migrate_folio() - Simple folio migration.
+ * @mapping: The address_space containing the folio.
+ * @dst: The folio to migrate the data to.
+ * @src: The folio containing the current data.
+ * @mode: How to migrate the page.
  *
- * Pages are locked upon entry and exit.
+ * Common logic to directly migrate a single LRU folio suitable for
+ * folios that do not use PagePrivate/PagePrivate2.
+ *
+ * Folios are locked upon entry and exit.
  */
-int migrate_page(struct address_space *mapping,
-   struct page *newpage, struct page *page,
-   enum migrate_mode mode)
+int migrate_folio(struct address_space *mapping, struct folio *dst,
+   struct folio *src, enum migrate_mode mode)
 {
-   struct folio *newfolio = page_folio(newpage);
-   struct folio *folio = page_folio(page);
int rc;
 
-   BUG_ON(folio_test_writeback(folio));/* Writeback must be complete */
+   BUG_ON(folio_test_writeback(src));  /* Writeback must be complete */
 
-   rc = folio_migrate_mappin

[f2fs-dev] [PATCH v2 17/19] secretmem: Convert to migrate_folio

2022-06-08 Thread Matthew Wilcox (Oracle)
This is little more than changing the types over; there's no real work
being done in this function.

Signed-off-by: Matthew Wilcox (Oracle) 
---
 mm/secretmem.c | 7 +++
 1 file changed, 3 insertions(+), 4 deletions(-)

diff --git a/mm/secretmem.c b/mm/secretmem.c
index 1c7f1775b56e..658a7486efa9 100644
--- a/mm/secretmem.c
+++ b/mm/secretmem.c
@@ -133,9 +133,8 @@ static const struct file_operations secretmem_fops = {
.mmap   = secretmem_mmap,
 };
 
-static int secretmem_migratepage(struct address_space *mapping,
-struct page *newpage, struct page *page,
-enum migrate_mode mode)
+static int secretmem_migrate_folio(struct address_space *mapping,
+   struct folio *dst, struct folio *src, enum migrate_mode mode)
 {
return -EBUSY;
 }
@@ -149,7 +148,7 @@ static void secretmem_free_folio(struct folio *folio)
 const struct address_space_operations secretmem_aops = {
.dirty_folio= noop_dirty_folio,
.free_folio = secretmem_free_folio,
-   .migratepage= secretmem_migratepage,
+   .migrate_folio  = secretmem_migrate_folio,
 };
 
 static int secretmem_setattr(struct user_namespace *mnt_userns,
-- 
2.35.1



___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


[f2fs-dev] [PATCH v2 15/19] aio: Convert to migrate_folio

2022-06-08 Thread Matthew Wilcox (Oracle)
Use a folio throughout this function.

Signed-off-by: Matthew Wilcox (Oracle) 
Reviewed-by: Christoph Hellwig 
---
 fs/aio.c | 36 ++--
 1 file changed, 18 insertions(+), 18 deletions(-)

diff --git a/fs/aio.c b/fs/aio.c
index 3c249b938632..a1911e86859c 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -400,8 +400,8 @@ static const struct file_operations aio_ring_fops = {
 };
 
 #if IS_ENABLED(CONFIG_MIGRATION)
-static int aio_migratepage(struct address_space *mapping, struct page *new,
-   struct page *old, enum migrate_mode mode)
+static int aio_migrate_folio(struct address_space *mapping, struct folio *dst,
+   struct folio *src, enum migrate_mode mode)
 {
struct kioctx *ctx;
unsigned long flags;
@@ -435,10 +435,10 @@ static int aio_migratepage(struct address_space *mapping, 
struct page *new,
goto out;
}
 
-   idx = old->index;
+   idx = src->index;
if (idx < (pgoff_t)ctx->nr_pages) {
-   /* Make sure the old page hasn't already been changed */
-   if (ctx->ring_pages[idx] != old)
+   /* Make sure the old folio hasn't already been changed */
+   if (ctx->ring_pages[idx] != &src->page)
rc = -EAGAIN;
} else
rc = -EINVAL;
@@ -447,27 +447,27 @@ static int aio_migratepage(struct address_space *mapping, 
struct page *new,
goto out_unlock;
 
/* Writeback must be complete */
-   BUG_ON(PageWriteback(old));
-   get_page(new);
+   BUG_ON(folio_test_writeback(src));
+   folio_get(dst);
 
-   rc = migrate_page_move_mapping(mapping, new, old, 1);
+   rc = folio_migrate_mapping(mapping, dst, src, 1);
if (rc != MIGRATEPAGE_SUCCESS) {
-   put_page(new);
+   folio_put(dst);
goto out_unlock;
}
 
/* Take completion_lock to prevent other writes to the ring buffer
-* while the old page is copied to the new.  This prevents new
+* while the old folio is copied to the new.  This prevents new
 * events from being lost.
 */
spin_lock_irqsave(&ctx->completion_lock, flags);
-   migrate_page_copy(new, old);
-   BUG_ON(ctx->ring_pages[idx] != old);
-   ctx->ring_pages[idx] = new;
+   folio_migrate_copy(dst, src);
+   BUG_ON(ctx->ring_pages[idx] != &src->page);
+   ctx->ring_pages[idx] = &dst->page;
spin_unlock_irqrestore(&ctx->completion_lock, flags);
 
-   /* The old page is no longer accessible. */
-   put_page(old);
+   /* The old folio is no longer accessible. */
+   folio_put(src);
 
 out_unlock:
mutex_unlock(&ctx->ring_lock);
@@ -475,13 +475,13 @@ static int aio_migratepage(struct address_space *mapping, 
struct page *new,
spin_unlock(&mapping->private_lock);
return rc;
 }
+#else
+#define aio_migrate_folio NULL
 #endif
 
 static const struct address_space_operations aio_ctx_aops = {
.dirty_folio= noop_dirty_folio,
-#if IS_ENABLED(CONFIG_MIGRATION)
-   .migratepage= aio_migratepage,
-#endif
+   .migrate_folio  = aio_migrate_folio,
 };
 
 static int aio_setup_ring(struct kioctx *ctx, unsigned int nr_events)
-- 
2.35.1



___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


[f2fs-dev] [PATCH v2 11/19] mm/migrate: Add filemap_migrate_folio()

2022-06-08 Thread Matthew Wilcox (Oracle)
There is nothing iomap-specific about iomap_migratepage(), and it fits
a pattern used by several other filesystems, so move it to mm/migrate.c,
convert it to be filemap_migrate_folio() and convert the iomap filesystems
to use it.

Signed-off-by: Matthew Wilcox (Oracle) 
Reviewed-by: Christoph Hellwig 
---
 fs/gfs2/aops.c  |  2 +-
 fs/iomap/buffered-io.c  | 25 -
 fs/xfs/xfs_aops.c   |  2 +-
 fs/zonefs/super.c   |  2 +-
 include/linux/iomap.h   |  6 --
 include/linux/pagemap.h |  6 ++
 mm/migrate.c| 20 
 7 files changed, 29 insertions(+), 34 deletions(-)

diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index 106e90a36583..57ff883d432c 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -774,7 +774,7 @@ static const struct address_space_operations gfs2_aops = {
.invalidate_folio = iomap_invalidate_folio,
.bmap = gfs2_bmap,
.direct_IO = noop_direct_IO,
-   .migratepage = iomap_migrate_page,
+   .migrate_folio = filemap_migrate_folio,
.is_partially_uptodate = iomap_is_partially_uptodate,
.error_remove_page = generic_error_remove_page,
 };
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index 66278a14bfa7..5a91aa1db945 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -489,31 +489,6 @@ void iomap_invalidate_folio(struct folio *folio, size_t 
offset, size_t len)
 }
 EXPORT_SYMBOL_GPL(iomap_invalidate_folio);
 
-#ifdef CONFIG_MIGRATION
-int
-iomap_migrate_page(struct address_space *mapping, struct page *newpage,
-   struct page *page, enum migrate_mode mode)
-{
-   struct folio *folio = page_folio(page);
-   struct folio *newfolio = page_folio(newpage);
-   int ret;
-
-   ret = folio_migrate_mapping(mapping, newfolio, folio, 0);
-   if (ret != MIGRATEPAGE_SUCCESS)
-   return ret;
-
-   if (folio_test_private(folio))
-   folio_attach_private(newfolio, folio_detach_private(folio));
-
-   if (mode != MIGRATE_SYNC_NO_COPY)
-   folio_migrate_copy(newfolio, folio);
-   else
-   folio_migrate_flags(newfolio, folio);
-   return MIGRATEPAGE_SUCCESS;
-}
-EXPORT_SYMBOL_GPL(iomap_migrate_page);
-#endif /* CONFIG_MIGRATION */
-
 static void
 iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
 {
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 8ec38b25187b..5d1a995b15f8 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -570,7 +570,7 @@ const struct address_space_operations 
xfs_address_space_operations = {
.invalidate_folio   = iomap_invalidate_folio,
.bmap   = xfs_vm_bmap,
.direct_IO  = noop_direct_IO,
-   .migratepage= iomap_migrate_page,
+   .migrate_folio  = filemap_migrate_folio,
.is_partially_uptodate  = iomap_is_partially_uptodate,
.error_remove_page  = generic_error_remove_page,
.swap_activate  = xfs_iomap_swapfile_activate,
diff --git a/fs/zonefs/super.c b/fs/zonefs/super.c
index bcb21aea990a..d4c3f28f34ee 100644
--- a/fs/zonefs/super.c
+++ b/fs/zonefs/super.c
@@ -237,7 +237,7 @@ static const struct address_space_operations 
zonefs_file_aops = {
.dirty_folio= filemap_dirty_folio,
.release_folio  = iomap_release_folio,
.invalidate_folio   = iomap_invalidate_folio,
-   .migratepage= iomap_migrate_page,
+   .migrate_folio  = filemap_migrate_folio,
.is_partially_uptodate  = iomap_is_partially_uptodate,
.error_remove_page  = generic_error_remove_page,
.direct_IO  = noop_direct_IO,
diff --git a/include/linux/iomap.h b/include/linux/iomap.h
index e552097c67e0..758a1125e72f 100644
--- a/include/linux/iomap.h
+++ b/include/linux/iomap.h
@@ -231,12 +231,6 @@ void iomap_readahead(struct readahead_control *, const 
struct iomap_ops *ops);
 bool iomap_is_partially_uptodate(struct folio *, size_t from, size_t count);
 bool iomap_release_folio(struct folio *folio, gfp_t gfp_flags);
 void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len);
-#ifdef CONFIG_MIGRATION
-int iomap_migrate_page(struct address_space *mapping, struct page *newpage,
-   struct page *page, enum migrate_mode mode);
-#else
-#define iomap_migrate_page NULL
-#endif
 int iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len,
const struct iomap_ops *ops);
 int iomap_zero_range(struct inode *inode, loff_t pos, loff_t len,
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 1caccb9f99aa..2a67c0ad7348 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -1078,6 +1078,12 @@ static inline int __must_check write_one_page(struct 
page *page)
 int __set_page_dirty_nobuffers(struct page *page);
 bool noop_dirty_folio(struct address_space *mapping, struct folio *folio

[f2fs-dev] [PATCH v2 01/19] secretmem: Remove isolate_page

2022-06-08 Thread Matthew Wilcox (Oracle)
The isolate_page operation is never called for filesystems, only
for device drivers which call SetPageMovable.

Signed-off-by: Matthew Wilcox (Oracle) 
---
 mm/secretmem.c | 6 --
 1 file changed, 6 deletions(-)

diff --git a/mm/secretmem.c b/mm/secretmem.c
index 206ed6b40c1d..1c7f1775b56e 100644
--- a/mm/secretmem.c
+++ b/mm/secretmem.c
@@ -133,11 +133,6 @@ static const struct file_operations secretmem_fops = {
.mmap   = secretmem_mmap,
 };
 
-static bool secretmem_isolate_page(struct page *page, isolate_mode_t mode)
-{
-   return false;
-}
-
 static int secretmem_migratepage(struct address_space *mapping,
 struct page *newpage, struct page *page,
 enum migrate_mode mode)
@@ -155,7 +150,6 @@ const struct address_space_operations secretmem_aops = {
.dirty_folio= noop_dirty_folio,
.free_folio = secretmem_free_folio,
.migratepage= secretmem_migratepage,
-   .isolate_page   = secretmem_isolate_page,
 };
 
 static int secretmem_setattr(struct user_namespace *mnt_userns,
-- 
2.35.1



___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


[f2fs-dev] [PATCH v2 18/19] fs: Remove aops->migratepage()

2022-06-08 Thread Matthew Wilcox (Oracle)
With all users converted to migrate_folio(), remove this operation.

Signed-off-by: Matthew Wilcox (Oracle) 
Reviewed-by: Christoph Hellwig 
---
 include/linux/fs.h | 2 --
 mm/compaction.c| 5 ++---
 mm/migrate.c   | 3 ---
 3 files changed, 2 insertions(+), 8 deletions(-)

diff --git a/include/linux/fs.h b/include/linux/fs.h
index 9e6b17da4e11..7e06919b8f60 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -367,8 +367,6 @@ struct address_space_operations {
 */
int (*migrate_folio)(struct address_space *, struct folio *dst,
struct folio *src, enum migrate_mode);
-   int (*migratepage) (struct address_space *,
-   struct page *, struct page *, enum migrate_mode);
int (*launder_folio)(struct folio *);
bool (*is_partially_uptodate) (struct folio *, size_t from,
size_t count);
diff --git a/mm/compaction.c b/mm/compaction.c
index 458f49f9ab09..a2c53fcf933e 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -1031,7 +1031,7 @@ isolate_migratepages_block(struct compact_control *cc, 
unsigned long low_pfn,
 
/*
 * Only pages without mappings or that have a
-* ->migratepage callback are possible to migrate
+* ->migrate_folio callback are possible to migrate
 * without blocking. However, we can be racing with
 * truncation so it's necessary to lock the page
 * to stabilise the mapping as truncation holds
@@ -1043,8 +1043,7 @@ isolate_migratepages_block(struct compact_control *cc, 
unsigned long low_pfn,
 
mapping = page_mapping(page);
migrate_dirty = !mapping ||
-   mapping->a_ops->migrate_folio ||
-   mapping->a_ops->migratepage;
+   mapping->a_ops->migrate_folio;
unlock_page(page);
if (!migrate_dirty)
goto isolate_fail_put;
diff --git a/mm/migrate.c b/mm/migrate.c
index bed0de86f3ae..767e41800d15 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -909,9 +909,6 @@ static int move_to_new_folio(struct folio *dst, struct 
folio *src,
 */
rc = mapping->a_ops->migrate_folio(mapping, dst, src,
mode);
-   else if (mapping->a_ops->migratepage)
-   rc = mapping->a_ops->migratepage(mapping, &dst->page,
-   &src->page, mode);
else
rc = fallback_migrate_folio(mapping, dst, src, mode);
} else {
-- 
2.35.1



___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


[f2fs-dev] [PATCH 07/20] nfs: Convert to migrate_folio

2022-06-06 Thread Matthew Wilcox (Oracle)
Use a folio throughout this function.  migrate_page() will be converted
later.

Signed-off-by: Matthew Wilcox (Oracle) 
---
 fs/nfs/file.c |  4 +---
 fs/nfs/internal.h |  6 --
 fs/nfs/write.c| 16 
 3 files changed, 13 insertions(+), 13 deletions(-)

diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 2d72b1b7ed74..549baed76351 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -533,9 +533,7 @@ const struct address_space_operations nfs_file_aops = {
.write_end = nfs_write_end,
.invalidate_folio = nfs_invalidate_folio,
.release_folio = nfs_release_folio,
-#ifdef CONFIG_MIGRATION
-   .migratepage = nfs_migrate_page,
-#endif
+   .migrate_folio = nfs_migrate_folio,
.launder_folio = nfs_launder_folio,
.is_dirty_writeback = nfs_check_dirty_writeback,
.error_remove_page = generic_error_remove_page,
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 8f8cd6e2d4db..437ebe544aaf 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -578,8 +578,10 @@ void nfs_clear_pnfs_ds_commit_verifiers(struct 
pnfs_ds_commit_info *cinfo)
 #endif
 
 #ifdef CONFIG_MIGRATION
-extern int nfs_migrate_page(struct address_space *,
-   struct page *, struct page *, enum migrate_mode);
+int nfs_migrate_folio(struct address_space *, struct folio *dst,
+   struct folio *src, enum migrate_mode);
+#else
+#define nfs_migrate_folio NULL
 #endif
 
 static inline int
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 1c706465d090..649b9e633459 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -2119,27 +2119,27 @@ int nfs_wb_page(struct inode *inode, struct page *page)
 }
 
 #ifdef CONFIG_MIGRATION
-int nfs_migrate_page(struct address_space *mapping, struct page *newpage,
-   struct page *page, enum migrate_mode mode)
+int nfs_migrate_folio(struct address_space *mapping, struct folio *dst,
+   struct folio *src, enum migrate_mode mode)
 {
/*
-* If PagePrivate is set, then the page is currently associated with
+* If the private flag is set, the folio is currently associated with
 * an in-progress read or write request. Don't try to migrate it.
 *
 * FIXME: we could do this in principle, but we'll need a way to ensure
 *that we can safely release the inode reference while holding
-*the page lock.
+*the folio lock.
 */
-   if (PagePrivate(page))
+   if (folio_test_private(src))
return -EBUSY;
 
-   if (PageFsCache(page)) {
+   if (folio_test_fscache(src)) {
if (mode == MIGRATE_ASYNC)
return -EBUSY;
-   wait_on_page_fscache(page);
+   folio_wait_fscache(src);
}
 
-   return migrate_page(mapping, newpage, page, mode);
+   return migrate_page(mapping, &dst->page, &src->page, mode);
 }
 #endif
 
-- 
2.35.1



___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


[f2fs-dev] [PATCH 00/20] Convert aops->migratepage to aops->migrate_folio

2022-06-06 Thread Matthew Wilcox (Oracle)
I plan to submit these patches through my pagecache tree in the upcoming
merge window.  I'm pretty happy that most filesystems are now using
common code for ->migrate_folio; it's not something that most filesystem
people want to care about.  I'm running xfstests using xfs against it now,
but it's little more than compile tested for other filesystems.

Matthew Wilcox (Oracle) (20):
  fs: Add aops->migrate_folio
  mm/migrate: Convert fallback_migrate_page() to
fallback_migrate_folio()
  mm/migrate: Convert writeout() to take a folio
  mm/migrate: Convert buffer_migrate_page() to buffer_migrate_folio()
  mm/migrate: Convert expected_page_refs() to folio_expected_refs()
  btrfs: Convert btree_migratepage to migrate_folio
  nfs: Convert to migrate_folio
  mm/migrate: Convert migrate_page() to migrate_folio()
  mm/migrate: Add filemap_migrate_folio()
  btrfs: Convert btrfs_migratepage to migrate_folio
  ubifs: Convert to filemap_migrate_folio()
  f2fs: Convert to filemap_migrate_folio()
  aio: Convert to migrate_folio
  hugetlb: Convert to migrate_folio
  balloon: Convert to migrate_folio
  secretmem: Convert to migrate_folio
  z3fold: Convert to migrate_folio
  zsmalloc: Convert to migrate_folio
  fs: Remove aops->migratepage()
  mm/folio-compat: Remove migration compatibility functions

 Documentation/filesystems/locking.rst   |   5 +-
 Documentation/filesystems/vfs.rst   |  13 +-
 Documentation/vm/page_migration.rst |  33 +--
 block/fops.c|   2 +-
 drivers/gpu/drm/i915/gem/i915_gem_userptr.c |   4 +-
 fs/aio.c|  36 ++--
 fs/btrfs/disk-io.c  |  22 +-
 fs/btrfs/inode.c|  26 +--
 fs/ext2/inode.c |   4 +-
 fs/ext4/inode.c |   4 +-
 fs/f2fs/checkpoint.c|   4 +-
 fs/f2fs/data.c  |  40 +---
 fs/f2fs/f2fs.h  |   4 -
 fs/f2fs/node.c  |   4 +-
 fs/gfs2/aops.c  |   2 +-
 fs/hugetlbfs/inode.c|  19 +-
 fs/iomap/buffered-io.c  |  25 ---
 fs/nfs/file.c   |   4 +-
 fs/nfs/internal.h   |   6 +-
 fs/nfs/write.c  |  16 +-
 fs/ntfs/aops.c  |   6 +-
 fs/ocfs2/aops.c |   2 +-
 fs/ubifs/file.c |  29 +--
 fs/xfs/xfs_aops.c   |   2 +-
 fs/zonefs/super.c   |   2 +-
 include/linux/buffer_head.h |  10 +
 include/linux/fs.h  |  18 +-
 include/linux/iomap.h   |   6 -
 include/linux/migrate.h |  22 +-
 include/linux/pagemap.h |   6 +
 mm/balloon_compaction.c |  15 +-
 mm/compaction.c |   5 +-
 mm/folio-compat.c   |  22 --
 mm/ksm.c|   2 +-
 mm/migrate.c| 217 
 mm/migrate_device.c |   3 +-
 mm/secretmem.c  |   6 +-
 mm/shmem.c  |   2 +-
 mm/swap_state.c |   2 +-
 mm/z3fold.c |   8 +-
 mm/zsmalloc.c   |   8 +-
 41 files changed, 287 insertions(+), 379 deletions(-)

-- 
2.35.1



___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


[f2fs-dev] [PATCH 08/20] mm/migrate: Convert migrate_page() to migrate_folio()

2022-06-06 Thread Matthew Wilcox (Oracle)
Convert all callers to pass a folio.  Most have the folio
already available.  Switch all users from aops->migratepage to
aops->migrate_folio.  Also turn the documentation into kerneldoc.

Signed-off-by: Matthew Wilcox (Oracle) 
---
 drivers/gpu/drm/i915/gem/i915_gem_userptr.c |  4 +--
 fs/btrfs/disk-io.c  |  2 +-
 fs/nfs/write.c  |  2 +-
 include/linux/migrate.h |  5 ++-
 mm/migrate.c| 37 +++--
 mm/migrate_device.c |  3 +-
 mm/shmem.c  |  2 +-
 mm/swap_state.c |  2 +-
 8 files changed, 30 insertions(+), 27 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c 
b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index 094f06b4ce33..8423df021b71 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -216,8 +216,8 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
 * However...!
 *
 * The mmu-notifier can be invalidated for a
-* migrate_page, that is alreadying holding the lock
-* on the page. Such a try_to_unmap() will result
+* migrate_folio, that is alreadying holding the lock
+* on the folio. Such a try_to_unmap() will result
 * in us calling put_pages() and so recursively try
 * to lock the page. We avoid that deadlock with
 * a trylock_page() and in exchange we risk missing
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 9ceb73f683af..8e5f1fa1e972 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -968,7 +968,7 @@ static int btree_migrate_folio(struct address_space 
*mapping,
if (folio_get_private(src) &&
!filemap_release_folio(src, GFP_KERNEL))
return -EAGAIN;
-   return migrate_page(mapping, &dst->page, &src->page, mode);
+   return migrate_folio(mapping, dst, src, mode);
 }
 #else
 #define btree_migrate_folio NULL
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 649b9e633459..69569696dde0 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -2139,7 +2139,7 @@ int nfs_migrate_folio(struct address_space *mapping, 
struct folio *dst,
folio_wait_fscache(src);
}
 
-   return migrate_page(mapping, &dst->page, &src->page, mode);
+   return migrate_folio(mapping, dst, src, mode);
 }
 #endif
 
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 069a89e847f3..4ef22806cd8e 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -25,9 +25,8 @@ extern const char *migrate_reason_names[MR_TYPES];
 #ifdef CONFIG_MIGRATION
 
 extern void putback_movable_pages(struct list_head *l);
-extern int migrate_page(struct address_space *mapping,
-   struct page *newpage, struct page *page,
-   enum migrate_mode mode);
+int migrate_folio(struct address_space *mapping, struct folio *dst,
+   struct folio *src, enum migrate_mode mode);
 extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
unsigned long private, enum migrate_mode mode, int reason,
unsigned int *ret_succeeded);
diff --git a/mm/migrate.c b/mm/migrate.c
index e0a593e5b5f9..6232c291fdb9 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -594,34 +594,37 @@ EXPORT_SYMBOL(folio_migrate_copy);
  *Migration functions
  ***/
 
-/*
- * Common logic to directly migrate a single LRU page suitable for
- * pages that do not use PagePrivate/PagePrivate2.
+/**
+ * migrate_folio() - Simple folio migration.
+ * @mapping: The address_space containing the folio.
+ * @dst: The folio to migrate the data to.
+ * @src: The folio containing the current data.
+ * @mode: How to migrate the page.
  *
- * Pages are locked upon entry and exit.
+ * Common logic to directly migrate a single LRU folio suitable for
+ * folios that do not use PagePrivate/PagePrivate2.
+ *
+ * Folios are locked upon entry and exit.
  */
-int migrate_page(struct address_space *mapping,
-   struct page *newpage, struct page *page,
-   enum migrate_mode mode)
+int migrate_folio(struct address_space *mapping, struct folio *dst,
+   struct folio *src, enum migrate_mode mode)
 {
-   struct folio *newfolio = page_folio(newpage);
-   struct folio *folio = page_folio(page);
int rc;
 
-   BUG_ON(folio_test_writeback(folio));/* Writeback must be complete */
+   BUG_ON(folio_test_writeback(src));  /* Writeback must be complete */
 
-   rc = folio_migrate_mapping(mapping, newfolio, folio, 0);
+

[f2fs-dev] [PATCH 20/20] mm/folio-compat: Remove migration compatibility functions

2022-06-06 Thread Matthew Wilcox (Oracle)
migrate_page_move_mapping(), migrate_page_copy() and migrate_page_states()
are all now unused after converting all the filesystems from
aops->migratepage() to aops->migrate_folio().

Signed-off-by: Matthew Wilcox (Oracle) 
---
 include/linux/migrate.h | 11 ---
 mm/folio-compat.c   | 22 --
 mm/ksm.c|  2 +-
 3 files changed, 1 insertion(+), 34 deletions(-)

diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 088749471485..4670f3aec232 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -33,12 +33,8 @@ extern int migrate_pages(struct list_head *l, new_page_t 
new, free_page_t free,
 extern struct page *alloc_migration_target(struct page *page, unsigned long 
private);
 extern int isolate_movable_page(struct page *page, isolate_mode_t mode);
 
-extern void migrate_page_states(struct page *newpage, struct page *page);
-extern void migrate_page_copy(struct page *newpage, struct page *page);
 int migrate_huge_page_move_mapping(struct address_space *mapping,
struct folio *dst, struct folio *src);
-extern int migrate_page_move_mapping(struct address_space *mapping,
-   struct page *newpage, struct page *page, int extra_count);
 void migration_entry_wait_on_locked(swp_entry_t entry, pte_t *ptep,
spinlock_t *ptl);
 void folio_migrate_flags(struct folio *newfolio, struct folio *folio);
@@ -59,13 +55,6 @@ static inline struct page *alloc_migration_target(struct 
page *page,
 static inline int isolate_movable_page(struct page *page, isolate_mode_t mode)
{ return -EBUSY; }
 
-static inline void migrate_page_states(struct page *newpage, struct page *page)
-{
-}
-
-static inline void migrate_page_copy(struct page *newpage,
-struct page *page) {}
-
 static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
  struct folio *dst, struct folio *src)
 {
diff --git a/mm/folio-compat.c b/mm/folio-compat.c
index 20bc15b57d93..458618c7302c 100644
--- a/mm/folio-compat.c
+++ b/mm/folio-compat.c
@@ -51,28 +51,6 @@ void mark_page_accessed(struct page *page)
 }
 EXPORT_SYMBOL(mark_page_accessed);
 
-#ifdef CONFIG_MIGRATION
-int migrate_page_move_mapping(struct address_space *mapping,
-   struct page *newpage, struct page *page, int extra_count)
-{
-   return folio_migrate_mapping(mapping, page_folio(newpage),
-   page_folio(page), extra_count);
-}
-EXPORT_SYMBOL(migrate_page_move_mapping);
-
-void migrate_page_states(struct page *newpage, struct page *page)
-{
-   folio_migrate_flags(page_folio(newpage), page_folio(page));
-}
-EXPORT_SYMBOL(migrate_page_states);
-
-void migrate_page_copy(struct page *newpage, struct page *page)
-{
-   folio_migrate_copy(page_folio(newpage), page_folio(page));
-}
-EXPORT_SYMBOL(migrate_page_copy);
-#endif
-
 bool set_page_writeback(struct page *page)
 {
return folio_start_writeback(page_folio(page));
diff --git a/mm/ksm.c b/mm/ksm.c
index 54f78c9eecae..e8f8c1a2bb39 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -712,7 +712,7 @@ static struct page *get_ksm_page(struct stable_node 
*stable_node,
 * however, it might mean that the page is under page_ref_freeze().
 * The __remove_mapping() case is easy, again the node is now stale;
 * the same is in reuse_ksm_page() case; but if page is swapcache
-* in migrate_page_move_mapping(), it might still be our page,
+* in folio_migrate_mapping(), it might still be our page,
 * in which case it's essential to keep the node.
 */
while (!get_page_unless_zero(page)) {
-- 
2.35.1



___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


[f2fs-dev] [PATCH 11/20] ubifs: Convert to filemap_migrate_folio()

2022-06-06 Thread Matthew Wilcox (Oracle)
filemap_migrate_folio() is a little more general than ubifs really needs,
but it's better to share the code.

Signed-off-by: Matthew Wilcox (Oracle) 
---
 fs/ubifs/file.c | 29 ++---
 1 file changed, 2 insertions(+), 27 deletions(-)

diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
index 04ced154960f..f2353dd676ef 100644
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@ -1461,29 +1461,6 @@ static bool ubifs_dirty_folio(struct address_space 
*mapping,
return ret;
 }
 
-#ifdef CONFIG_MIGRATION
-static int ubifs_migrate_page(struct address_space *mapping,
-   struct page *newpage, struct page *page, enum migrate_mode mode)
-{
-   int rc;
-
-   rc = migrate_page_move_mapping(mapping, newpage, page, 0);
-   if (rc != MIGRATEPAGE_SUCCESS)
-   return rc;
-
-   if (PagePrivate(page)) {
-   detach_page_private(page);
-   attach_page_private(newpage, (void *)1);
-   }
-
-   if (mode != MIGRATE_SYNC_NO_COPY)
-   migrate_page_copy(newpage, page);
-   else
-   migrate_page_states(newpage, page);
-   return MIGRATEPAGE_SUCCESS;
-}
-#endif
-
 static bool ubifs_release_folio(struct folio *folio, gfp_t unused_gfp_flags)
 {
struct inode *inode = folio->mapping->host;
@@ -1649,10 +1626,8 @@ const struct address_space_operations 
ubifs_file_address_operations = {
.write_end  = ubifs_write_end,
.invalidate_folio = ubifs_invalidate_folio,
.dirty_folio= ubifs_dirty_folio,
-#ifdef CONFIG_MIGRATION
-   .migratepage= ubifs_migrate_page,
-#endif
-   .release_folio= ubifs_release_folio,
+   .migrate_folio  = filemap_migrate_folio,
+   .release_folio  = ubifs_release_folio,
 };
 
 const struct inode_operations ubifs_file_inode_operations = {
-- 
2.35.1



___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


[f2fs-dev] [PATCH 12/20] f2fs: Convert to filemap_migrate_folio()

2022-06-06 Thread Matthew Wilcox (Oracle)
filemap_migrate_folio() fits f2fs's needs perfectly.

Signed-off-by: Matthew Wilcox (Oracle) 
---
 fs/f2fs/checkpoint.c |  4 +---
 fs/f2fs/data.c   | 40 +---
 fs/f2fs/f2fs.h   |  4 
 fs/f2fs/node.c   |  4 +---
 4 files changed, 3 insertions(+), 49 deletions(-)

diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index 6d8b2bf14de0..8259e0fa97e1 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -463,9 +463,7 @@ const struct address_space_operations f2fs_meta_aops = {
.dirty_folio= f2fs_dirty_meta_folio,
.invalidate_folio = f2fs_invalidate_folio,
.release_folio  = f2fs_release_folio,
-#ifdef CONFIG_MIGRATION
-   .migratepage= f2fs_migrate_page,
-#endif
+   .migrate_folio  = filemap_migrate_folio,
 };
 
 static void __add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino,
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 7fcbcf979737..318a3f91ad74 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -3751,42 +3751,6 @@ static sector_t f2fs_bmap(struct address_space *mapping, 
sector_t block)
return blknr;
 }
 
-#ifdef CONFIG_MIGRATION
-#include 
-
-int f2fs_migrate_page(struct address_space *mapping,
-   struct page *newpage, struct page *page, enum migrate_mode mode)
-{
-   int rc, extra_count = 0;
-
-   BUG_ON(PageWriteback(page));
-
-   rc = migrate_page_move_mapping(mapping, newpage,
-   page, extra_count);
-   if (rc != MIGRATEPAGE_SUCCESS)
-   return rc;
-
-   /* guarantee to start from no stale private field */
-   set_page_private(newpage, 0);
-   if (PagePrivate(page)) {
-   set_page_private(newpage, page_private(page));
-   SetPagePrivate(newpage);
-   get_page(newpage);
-
-   set_page_private(page, 0);
-   ClearPagePrivate(page);
-   put_page(page);
-   }
-
-   if (mode != MIGRATE_SYNC_NO_COPY)
-   migrate_page_copy(newpage, page);
-   else
-   migrate_page_states(newpage, page);
-
-   return MIGRATEPAGE_SUCCESS;
-}
-#endif
-
 #ifdef CONFIG_SWAP
 static int f2fs_migrate_blocks(struct inode *inode, block_t start_blk,
unsigned int blkcnt)
@@ -4018,15 +3982,13 @@ const struct address_space_operations f2fs_dblock_aops 
= {
.write_begin= f2fs_write_begin,
.write_end  = f2fs_write_end,
.dirty_folio= f2fs_dirty_data_folio,
+   .migrate_folio  = filemap_migrate_folio,
.invalidate_folio = f2fs_invalidate_folio,
.release_folio  = f2fs_release_folio,
.direct_IO  = noop_direct_IO,
.bmap   = f2fs_bmap,
.swap_activate  = f2fs_swap_activate,
.swap_deactivate = f2fs_swap_deactivate,
-#ifdef CONFIG_MIGRATION
-   .migratepage= f2fs_migrate_page,
-#endif
 };
 
 void f2fs_clear_page_cache_dirty_tag(struct page *page)
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index d9bbecd008d2..f258a1b6faed 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -3764,10 +3764,6 @@ int f2fs_write_single_data_page(struct page *page, int 
*submitted,
 void f2fs_write_failed(struct inode *inode, loff_t to);
 void f2fs_invalidate_folio(struct folio *folio, size_t offset, size_t length);
 bool f2fs_release_folio(struct folio *folio, gfp_t wait);
-#ifdef CONFIG_MIGRATION
-int f2fs_migrate_page(struct address_space *mapping, struct page *newpage,
-   struct page *page, enum migrate_mode mode);
-#endif
 bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len);
 void f2fs_clear_page_cache_dirty_tag(struct page *page);
 int f2fs_init_post_read_processing(void);
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index 836c79a20afc..ed1cbfb0345f 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -2163,9 +2163,7 @@ const struct address_space_operations f2fs_node_aops = {
.dirty_folio= f2fs_dirty_node_folio,
.invalidate_folio = f2fs_invalidate_folio,
.release_folio  = f2fs_release_folio,
-#ifdef CONFIG_MIGRATION
-   .migratepage= f2fs_migrate_page,
-#endif
+   .migrate_folio  = filemap_migrate_folio,
 };
 
 static struct free_nid *__lookup_free_nid_list(struct f2fs_nm_info *nm_i,
-- 
2.35.1



___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


[f2fs-dev] [PATCH 05/20] mm/migrate: Convert expected_page_refs() to folio_expected_refs()

2022-06-06 Thread Matthew Wilcox (Oracle)
Now that both callers have a folio, convert this function to
take a folio & rename it.

Signed-off-by: Matthew Wilcox (Oracle) 
---
 mm/migrate.c | 19 ---
 1 file changed, 12 insertions(+), 7 deletions(-)

diff --git a/mm/migrate.c b/mm/migrate.c
index 77b8c662c9ca..e0a593e5b5f9 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -337,13 +337,18 @@ void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t 
*pmd)
 }
 #endif
 
-static int expected_page_refs(struct address_space *mapping, struct page *page)
+static int folio_expected_refs(struct address_space *mapping,
+   struct folio *folio)
 {
-   int expected_count = 1;
+   int refs = 1;
+   if (!mapping)
+   return refs;
 
-   if (mapping)
-   expected_count += compound_nr(page) + page_has_private(page);
-   return expected_count;
+   refs += folio_nr_pages(folio);
+   if (folio_get_private(folio))
+   refs++;
+
+   return refs;;
 }
 
 /*
@@ -360,7 +365,7 @@ int folio_migrate_mapping(struct address_space *mapping,
XA_STATE(xas, &mapping->i_pages, folio_index(folio));
struct zone *oldzone, *newzone;
int dirty;
-   int expected_count = expected_page_refs(mapping, &folio->page) + 
extra_count;
+   int expected_count = folio_expected_refs(mapping, folio) + extra_count;
long nr = folio_nr_pages(folio);
 
if (!mapping) {
@@ -670,7 +675,7 @@ static int __buffer_migrate_folio(struct address_space 
*mapping,
return migrate_page(mapping, &dst->page, &src->page, mode);
 
/* Check whether page does not have extra refs before we do more work */
-   expected_count = expected_page_refs(mapping, &src->page);
+   expected_count = folio_expected_refs(mapping, src);
if (folio_ref_count(src) != expected_count)
return -EAGAIN;
 
-- 
2.35.1



___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


[f2fs-dev] [PATCH 03/20] mm/migrate: Convert writeout() to take a folio

2022-06-06 Thread Matthew Wilcox (Oracle)
Use a folio throughout this function.

Signed-off-by: Matthew Wilcox (Oracle) 
---
 mm/migrate.c | 21 ++---
 1 file changed, 10 insertions(+), 11 deletions(-)

diff --git a/mm/migrate.c b/mm/migrate.c
index d772ce63d7e2..f19246c12fe9 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -762,11 +762,10 @@ int buffer_migrate_page_norefs(struct address_space 
*mapping,
 #endif
 
 /*
- * Writeback a page to clean the dirty state
+ * Writeback a folio to clean the dirty state
  */
-static int writeout(struct address_space *mapping, struct page *page)
+static int writeout(struct address_space *mapping, struct folio *folio)
 {
-   struct folio *folio = page_folio(page);
struct writeback_control wbc = {
.sync_mode = WB_SYNC_NONE,
.nr_to_write = 1,
@@ -780,25 +779,25 @@ static int writeout(struct address_space *mapping, struct 
page *page)
/* No write method for the address space */
return -EINVAL;
 
-   if (!clear_page_dirty_for_io(page))
+   if (!folio_clear_dirty_for_io(folio))
/* Someone else already triggered a write */
return -EAGAIN;
 
/*
-* A dirty page may imply that the underlying filesystem has
-* the page on some queue. So the page must be clean for
-* migration. Writeout may mean we loose the lock and the
-* page state is no longer what we checked for earlier.
+* A dirty folio may imply that the underlying filesystem has
+* the folio on some queue. So the folio must be clean for
+* migration. Writeout may mean we lose the lock and the
+* folio state is no longer what we checked for earlier.
 * At this point we know that the migration attempt cannot
 * be successful.
 */
remove_migration_ptes(folio, folio, false);
 
-   rc = mapping->a_ops->writepage(page, &wbc);
+   rc = mapping->a_ops->writepage(&folio->page, &wbc);
 
if (rc != AOP_WRITEPAGE_ACTIVATE)
/* unlocked. Relock */
-   lock_page(page);
+   folio_lock(folio);
 
return (rc < 0) ? -EIO : -EAGAIN;
 }
@@ -818,7 +817,7 @@ static int fallback_migrate_folio(struct address_space 
*mapping,
default:
return -EBUSY;
}
-   return writeout(mapping, &src->page);
+   return writeout(mapping, src);
}
 
/*
-- 
2.35.1



___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


[f2fs-dev] [PATCH 16/20] secretmem: Convert to migrate_folio

2022-06-06 Thread Matthew Wilcox (Oracle)
This is little more than changing the types over; there's no real work
being done in this function.

Signed-off-by: Matthew Wilcox (Oracle) 
---
 mm/secretmem.c | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/mm/secretmem.c b/mm/secretmem.c
index 206ed6b40c1d..9c7f6e3bf3e1 100644
--- a/mm/secretmem.c
+++ b/mm/secretmem.c
@@ -138,8 +138,8 @@ static bool secretmem_isolate_page(struct page *page, 
isolate_mode_t mode)
return false;
 }
 
-static int secretmem_migratepage(struct address_space *mapping,
-struct page *newpage, struct page *page,
+static int secretmem_migrate_folio(struct address_space *mapping,
+struct folio *dst, struct folio *src,
 enum migrate_mode mode)
 {
return -EBUSY;
@@ -154,7 +154,7 @@ static void secretmem_free_folio(struct folio *folio)
 const struct address_space_operations secretmem_aops = {
.dirty_folio= noop_dirty_folio,
.free_folio = secretmem_free_folio,
-   .migratepage= secretmem_migratepage,
+   .migrate_folio  = secretmem_migrate_folio,
.isolate_page   = secretmem_isolate_page,
 };
 
-- 
2.35.1



___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


[f2fs-dev] [PATCH 14/20] hugetlb: Convert to migrate_folio

2022-06-06 Thread Matthew Wilcox (Oracle)
This involves converting migrate_huge_page_move_mapping().  We also need a
folio variant of hugetlb_set_page_subpool(), but that's for a later patch.

Signed-off-by: Matthew Wilcox (Oracle) 
---
 fs/hugetlbfs/inode.c| 19 ++-
 include/linux/migrate.h |  6 +++---
 mm/migrate.c| 18 +-
 3 files changed, 22 insertions(+), 21 deletions(-)

diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 14d33f725e05..583ca3f52c04 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -954,25 +954,26 @@ static int hugetlbfs_symlink(struct user_namespace 
*mnt_userns,
return error;
 }
 
-static int hugetlbfs_migrate_page(struct address_space *mapping,
-   struct page *newpage, struct page *page,
+static int hugetlbfs_migrate_folio(struct address_space *mapping,
+   struct folio *dst, struct folio *src,
enum migrate_mode mode)
 {
int rc;
 
-   rc = migrate_huge_page_move_mapping(mapping, newpage, page);
+   rc = migrate_huge_page_move_mapping(mapping, dst, src);
if (rc != MIGRATEPAGE_SUCCESS)
return rc;
 
-   if (hugetlb_page_subpool(page)) {
-   hugetlb_set_page_subpool(newpage, hugetlb_page_subpool(page));
-   hugetlb_set_page_subpool(page, NULL);
+   if (hugetlb_page_subpool(&src->page)) {
+   hugetlb_set_page_subpool(&dst->page,
+   hugetlb_page_subpool(&src->page));
+   hugetlb_set_page_subpool(&src->page, NULL);
}
 
if (mode != MIGRATE_SYNC_NO_COPY)
-   migrate_page_copy(newpage, page);
+   folio_migrate_copy(dst, src);
else
-   migrate_page_states(newpage, page);
+   folio_migrate_flags(dst, src);
 
return MIGRATEPAGE_SUCCESS;
 }
@@ -1142,7 +1143,7 @@ static const struct address_space_operations 
hugetlbfs_aops = {
.write_begin= hugetlbfs_write_begin,
.write_end  = hugetlbfs_write_end,
.dirty_folio= noop_dirty_folio,
-   .migratepage= hugetlbfs_migrate_page,
+   .migrate_folio  = hugetlbfs_migrate_folio,
.error_remove_page  = hugetlbfs_error_remove_page,
 };
 
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 4ef22806cd8e..088749471485 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -35,8 +35,8 @@ extern int isolate_movable_page(struct page *page, 
isolate_mode_t mode);
 
 extern void migrate_page_states(struct page *newpage, struct page *page);
 extern void migrate_page_copy(struct page *newpage, struct page *page);
-extern int migrate_huge_page_move_mapping(struct address_space *mapping,
- struct page *newpage, struct page *page);
+int migrate_huge_page_move_mapping(struct address_space *mapping,
+   struct folio *dst, struct folio *src);
 extern int migrate_page_move_mapping(struct address_space *mapping,
struct page *newpage, struct page *page, int extra_count);
 void migration_entry_wait_on_locked(swp_entry_t entry, pte_t *ptep,
@@ -67,7 +67,7 @@ static inline void migrate_page_copy(struct page *newpage,
 struct page *page) {}
 
 static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
- struct page *newpage, struct page *page)
+ struct folio *dst, struct folio *src)
 {
return -ENOSYS;
 }
diff --git a/mm/migrate.c b/mm/migrate.c
index 148dd0463dec..a8edd226c72d 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -475,26 +475,26 @@ EXPORT_SYMBOL(folio_migrate_mapping);
  * of folio_migrate_mapping().
  */
 int migrate_huge_page_move_mapping(struct address_space *mapping,
-  struct page *newpage, struct page *page)
+  struct folio *dst, struct folio *src)
 {
-   XA_STATE(xas, &mapping->i_pages, page_index(page));
+   XA_STATE(xas, &mapping->i_pages, folio_index(src));
int expected_count;
 
xas_lock_irq(&xas);
-   expected_count = 2 + page_has_private(page);
-   if (!page_ref_freeze(page, expected_count)) {
+   expected_count = 2 + folio_has_private(src);
+   if (!folio_ref_freeze(src, expected_count)) {
xas_unlock_irq(&xas);
return -EAGAIN;
}
 
-   newpage->index = page->index;
-   newpage->mapping = page->mapping;
+   dst->index = src->index;
+   dst->mapping = src->mapping;
 
-   get_page(newpage);
+   folio_get(dst);
 
-   xas_store(&xas, newpage);
+   xas_store(&xas, dst);
 
-   page_ref_unfreeze(page, expected_count - 1);
+   folio_ref_unfreeze(src, expected_count - 1);
 
xas_unlock_irq(

[f2fs-dev] [PATCH 09/20] mm/migrate: Add filemap_migrate_folio()

2022-06-06 Thread Matthew Wilcox (Oracle)
There is nothing iomap-specific about iomap_migratepage(), and it fits
a pattern used by several other filesystems, so move it to mm/migrate.c,
convert it to be filemap_migrate_folio() and convert the iomap filesystems
to use it.

Signed-off-by: Matthew Wilcox (Oracle) 
---
 fs/gfs2/aops.c  |  2 +-
 fs/iomap/buffered-io.c  | 25 -
 fs/xfs/xfs_aops.c   |  2 +-
 fs/zonefs/super.c   |  2 +-
 include/linux/iomap.h   |  6 --
 include/linux/pagemap.h |  6 ++
 mm/migrate.c| 20 
 7 files changed, 29 insertions(+), 34 deletions(-)

diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index 106e90a36583..57ff883d432c 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -774,7 +774,7 @@ static const struct address_space_operations gfs2_aops = {
.invalidate_folio = iomap_invalidate_folio,
.bmap = gfs2_bmap,
.direct_IO = noop_direct_IO,
-   .migratepage = iomap_migrate_page,
+   .migrate_folio = filemap_migrate_folio,
.is_partially_uptodate = iomap_is_partially_uptodate,
.error_remove_page = generic_error_remove_page,
 };
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index 66278a14bfa7..5a91aa1db945 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -489,31 +489,6 @@ void iomap_invalidate_folio(struct folio *folio, size_t 
offset, size_t len)
 }
 EXPORT_SYMBOL_GPL(iomap_invalidate_folio);
 
-#ifdef CONFIG_MIGRATION
-int
-iomap_migrate_page(struct address_space *mapping, struct page *newpage,
-   struct page *page, enum migrate_mode mode)
-{
-   struct folio *folio = page_folio(page);
-   struct folio *newfolio = page_folio(newpage);
-   int ret;
-
-   ret = folio_migrate_mapping(mapping, newfolio, folio, 0);
-   if (ret != MIGRATEPAGE_SUCCESS)
-   return ret;
-
-   if (folio_test_private(folio))
-   folio_attach_private(newfolio, folio_detach_private(folio));
-
-   if (mode != MIGRATE_SYNC_NO_COPY)
-   folio_migrate_copy(newfolio, folio);
-   else
-   folio_migrate_flags(newfolio, folio);
-   return MIGRATEPAGE_SUCCESS;
-}
-EXPORT_SYMBOL_GPL(iomap_migrate_page);
-#endif /* CONFIG_MIGRATION */
-
 static void
 iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
 {
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 8ec38b25187b..5d1a995b15f8 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -570,7 +570,7 @@ const struct address_space_operations 
xfs_address_space_operations = {
.invalidate_folio   = iomap_invalidate_folio,
.bmap   = xfs_vm_bmap,
.direct_IO  = noop_direct_IO,
-   .migratepage= iomap_migrate_page,
+   .migrate_folio  = filemap_migrate_folio,
.is_partially_uptodate  = iomap_is_partially_uptodate,
.error_remove_page  = generic_error_remove_page,
.swap_activate  = xfs_iomap_swapfile_activate,
diff --git a/fs/zonefs/super.c b/fs/zonefs/super.c
index bcb21aea990a..d4c3f28f34ee 100644
--- a/fs/zonefs/super.c
+++ b/fs/zonefs/super.c
@@ -237,7 +237,7 @@ static const struct address_space_operations 
zonefs_file_aops = {
.dirty_folio= filemap_dirty_folio,
.release_folio  = iomap_release_folio,
.invalidate_folio   = iomap_invalidate_folio,
-   .migratepage= iomap_migrate_page,
+   .migrate_folio  = filemap_migrate_folio,
.is_partially_uptodate  = iomap_is_partially_uptodate,
.error_remove_page  = generic_error_remove_page,
.direct_IO  = noop_direct_IO,
diff --git a/include/linux/iomap.h b/include/linux/iomap.h
index e552097c67e0..758a1125e72f 100644
--- a/include/linux/iomap.h
+++ b/include/linux/iomap.h
@@ -231,12 +231,6 @@ void iomap_readahead(struct readahead_control *, const 
struct iomap_ops *ops);
 bool iomap_is_partially_uptodate(struct folio *, size_t from, size_t count);
 bool iomap_release_folio(struct folio *folio, gfp_t gfp_flags);
 void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len);
-#ifdef CONFIG_MIGRATION
-int iomap_migrate_page(struct address_space *mapping, struct page *newpage,
-   struct page *page, enum migrate_mode mode);
-#else
-#define iomap_migrate_page NULL
-#endif
 int iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len,
const struct iomap_ops *ops);
 int iomap_zero_range(struct inode *inode, loff_t pos, loff_t len,
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 1caccb9f99aa..2a67c0ad7348 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -1078,6 +1078,12 @@ static inline int __must_check write_one_page(struct 
page *page)
 int __set_page_dirty_nobuffers(struct page *page);
 bool noop_dirty_folio(struct address_space *mapping, struct folio *folio);
 
+#ifdef CONFIG_MIGRATION
+int

[f2fs-dev] [PATCH 01/20] fs: Add aops->migrate_folio

2022-06-06 Thread Matthew Wilcox (Oracle)
Provide a folio-based replacement for aops->migratepage.  Update the
documentation to document migrate_folio instead of migratepage.

Signed-off-by: Matthew Wilcox (Oracle) 
---
 Documentation/filesystems/locking.rst |  5 ++--
 Documentation/filesystems/vfs.rst | 13 ++-
 Documentation/vm/page_migration.rst   | 33 ++-
 include/linux/fs.h|  4 +++-
 mm/compaction.c   |  4 +++-
 mm/migrate.c  | 19 ++-
 6 files changed, 46 insertions(+), 32 deletions(-)

diff --git a/Documentation/filesystems/locking.rst 
b/Documentation/filesystems/locking.rst
index c0fe711f14d3..3d28b23676bd 100644
--- a/Documentation/filesystems/locking.rst
+++ b/Documentation/filesystems/locking.rst
@@ -253,7 +253,8 @@ prototypes::
void (*free_folio)(struct folio *);
int (*direct_IO)(struct kiocb *, struct iov_iter *iter);
bool (*isolate_page) (struct page *, isolate_mode_t);
-   int (*migratepage)(struct address_space *, struct page *, struct page 
*);
+   int (*migrate_folio)(struct address_space *, struct folio *dst,
+   struct folio *src, enum migrate_mode);
void (*putback_page) (struct page *);
int (*launder_folio)(struct folio *);
bool (*is_partially_uptodate)(struct folio *, size_t from, size_t 
count);
@@ -281,7 +282,7 @@ release_folio:  yes
 free_folio:yes
 direct_IO:
 isolate_page:  yes
-migratepage:   yes (both)
+migrate_folio: yes (both)
 putback_page:  yes
 launder_folio: yes
 is_partially_uptodate: yes
diff --git a/Documentation/filesystems/vfs.rst 
b/Documentation/filesystems/vfs.rst
index a08c652467d7..3ae1b039b03f 100644
--- a/Documentation/filesystems/vfs.rst
+++ b/Documentation/filesystems/vfs.rst
@@ -740,7 +740,8 @@ cache in your filesystem.  The following members are 
defined:
/* isolate a page for migration */
bool (*isolate_page) (struct page *, isolate_mode_t);
/* migrate the contents of a page to the specified target */
-   int (*migratepage) (struct page *, struct page *);
+   int (*migrate_folio)(struct mapping *, struct folio *dst,
+   struct folio *src, enum migrate_mode);
/* put migration-failed page back to right list */
void (*putback_page) (struct page *);
int (*launder_folio) (struct folio *);
@@ -935,12 +936,12 @@ cache in your filesystem.  The following members are 
defined:
is successfully isolated, VM marks the page as PG_isolated via
__SetPageIsolated.
 
-``migrate_page``
+``migrate_folio``
This is used to compact the physical memory usage.  If the VM
-   wants to relocate a page (maybe off a memory card that is
-   signalling imminent failure) it will pass a new page and an old
-   page to this function.  migrate_page should transfer any private
-   data across and update any references that it has to the page.
+   wants to relocate a folio (maybe from a memory device that is
+   signalling imminent failure) it will pass a new folio and an old
+   folio to this function.  migrate_folio should transfer any private
+   data across and update any references that it has to the folio.
 
 ``putback_page``
Called by the VM when isolated page's migration fails.
diff --git a/Documentation/vm/page_migration.rst 
b/Documentation/vm/page_migration.rst
index 8c5cb8147e55..e0f73ddfabb1 100644
--- a/Documentation/vm/page_migration.rst
+++ b/Documentation/vm/page_migration.rst
@@ -181,22 +181,23 @@ which are function pointers of struct 
address_space_operations.
Once page is successfully isolated, VM uses page.lru fields so driver
shouldn't expect to preserve values in those fields.
 
-2. ``int (*migratepage) (struct address_space *mapping,``
-|  ``struct page *newpage, struct page *oldpage, enum migrate_mode);``
-
-   After isolation, VM calls migratepage() of driver with the isolated page.
-   The function of migratepage() is to move the contents of the old page to the
-   new page
-   and set up fields of struct page newpage. Keep in mind that you should
-   indicate to the VM the oldpage is no longer movable via __ClearPageMovable()
-   under page_lock if you migrated the oldpage successfully and returned
-   MIGRATEPAGE_SUCCESS. If driver cannot migrate the page at the moment, driver
-   can return -EAGAIN. On -EAGAIN, VM will retry page migration in a short time
-   because VM interprets -EAGAIN as "temporary migration failure". On returning
-   any error except -EAGAIN, VM will give up the page migration without
-   retrying.
-
-   Driver shouldn't touch the page.lru field while in the migratepage() 
function.
+2. ``int (*migrate_folio) (struct address_space *mapping,``
+|  ``struct folio *dst, struct f

[f2fs-dev] [PATCH 06/20] btrfs: Convert btree_migratepage to migrate_folio

2022-06-06 Thread Matthew Wilcox (Oracle)
Use a folio throughout this function.  migrate_page() will be converted
later.

Signed-off-by: Matthew Wilcox (Oracle) 
---
 fs/btrfs/disk-io.c | 22 ++
 1 file changed, 10 insertions(+), 12 deletions(-)

diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 12b11e645c14..9ceb73f683af 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -952,28 +952,28 @@ void btrfs_submit_metadata_bio(struct inode *inode, 
struct bio *bio, int mirror_
 }
 
 #ifdef CONFIG_MIGRATION
-static int btree_migratepage(struct address_space *mapping,
-   struct page *newpage, struct page *page,
-   enum migrate_mode mode)
+static int btree_migrate_folio(struct address_space *mapping,
+   struct folio *dst, struct folio *src, enum migrate_mode mode)
 {
/*
 * we can't safely write a btree page from here,
 * we haven't done the locking hook
 */
-   if (PageDirty(page))
+   if (folio_test_dirty(src))
return -EAGAIN;
/*
 * Buffers may be managed in a filesystem specific way.
 * We must have no buffers or drop them.
 */
-   if (page_has_private(page) &&
-   !try_to_release_page(page, GFP_KERNEL))
+   if (folio_get_private(src) &&
+   !filemap_release_folio(src, GFP_KERNEL))
return -EAGAIN;
-   return migrate_page(mapping, newpage, page, mode);
+   return migrate_page(mapping, &dst->page, &src->page, mode);
 }
+#else
+#define btree_migrate_folio NULL
 #endif
 
-
 static int btree_writepages(struct address_space *mapping,
struct writeback_control *wbc)
 {
@@ -1073,10 +1073,8 @@ static const struct address_space_operations btree_aops 
= {
.writepages = btree_writepages,
.release_folio  = btree_release_folio,
.invalidate_folio = btree_invalidate_folio,
-#ifdef CONFIG_MIGRATION
-   .migratepage= btree_migratepage,
-#endif
-   .dirty_folio = btree_dirty_folio,
+   .migrate_folio  = btree_migrate_folio,
+   .dirty_folio= btree_dirty_folio,
 };
 
 struct extent_buffer *btrfs_find_create_tree_block(
-- 
2.35.1



___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


[f2fs-dev] [PATCH 13/20] aio: Convert to migrate_folio

2022-06-06 Thread Matthew Wilcox (Oracle)
Use a folio throughout this function.

Signed-off-by: Matthew Wilcox (Oracle) 
---
 fs/aio.c | 36 ++--
 1 file changed, 18 insertions(+), 18 deletions(-)

diff --git a/fs/aio.c b/fs/aio.c
index 3c249b938632..a1911e86859c 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -400,8 +400,8 @@ static const struct file_operations aio_ring_fops = {
 };
 
 #if IS_ENABLED(CONFIG_MIGRATION)
-static int aio_migratepage(struct address_space *mapping, struct page *new,
-   struct page *old, enum migrate_mode mode)
+static int aio_migrate_folio(struct address_space *mapping, struct folio *dst,
+   struct folio *src, enum migrate_mode mode)
 {
struct kioctx *ctx;
unsigned long flags;
@@ -435,10 +435,10 @@ static int aio_migratepage(struct address_space *mapping, 
struct page *new,
goto out;
}
 
-   idx = old->index;
+   idx = src->index;
if (idx < (pgoff_t)ctx->nr_pages) {
-   /* Make sure the old page hasn't already been changed */
-   if (ctx->ring_pages[idx] != old)
+   /* Make sure the old folio hasn't already been changed */
+   if (ctx->ring_pages[idx] != &src->page)
rc = -EAGAIN;
} else
rc = -EINVAL;
@@ -447,27 +447,27 @@ static int aio_migratepage(struct address_space *mapping, 
struct page *new,
goto out_unlock;
 
/* Writeback must be complete */
-   BUG_ON(PageWriteback(old));
-   get_page(new);
+   BUG_ON(folio_test_writeback(src));
+   folio_get(dst);
 
-   rc = migrate_page_move_mapping(mapping, new, old, 1);
+   rc = folio_migrate_mapping(mapping, dst, src, 1);
if (rc != MIGRATEPAGE_SUCCESS) {
-   put_page(new);
+   folio_put(dst);
goto out_unlock;
}
 
/* Take completion_lock to prevent other writes to the ring buffer
-* while the old page is copied to the new.  This prevents new
+* while the old folio is copied to the new.  This prevents new
 * events from being lost.
 */
spin_lock_irqsave(&ctx->completion_lock, flags);
-   migrate_page_copy(new, old);
-   BUG_ON(ctx->ring_pages[idx] != old);
-   ctx->ring_pages[idx] = new;
+   folio_migrate_copy(dst, src);
+   BUG_ON(ctx->ring_pages[idx] != &src->page);
+   ctx->ring_pages[idx] = &dst->page;
spin_unlock_irqrestore(&ctx->completion_lock, flags);
 
-   /* The old page is no longer accessible. */
-   put_page(old);
+   /* The old folio is no longer accessible. */
+   folio_put(src);
 
 out_unlock:
mutex_unlock(&ctx->ring_lock);
@@ -475,13 +475,13 @@ static int aio_migratepage(struct address_space *mapping, 
struct page *new,
spin_unlock(&mapping->private_lock);
return rc;
 }
+#else
+#define aio_migrate_folio NULL
 #endif
 
 static const struct address_space_operations aio_ctx_aops = {
.dirty_folio= noop_dirty_folio,
-#if IS_ENABLED(CONFIG_MIGRATION)
-   .migratepage= aio_migratepage,
-#endif
+   .migrate_folio  = aio_migrate_folio,
 };
 
 static int aio_setup_ring(struct kioctx *ctx, unsigned int nr_events)
-- 
2.35.1



___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


[f2fs-dev] [PATCH 15/20] balloon: Convert to migrate_folio

2022-06-06 Thread Matthew Wilcox (Oracle)
This is little more than changing the types over; there's no real work
being done in this function.

Signed-off-by: Matthew Wilcox (Oracle) 
---
 mm/balloon_compaction.c | 15 +++
 1 file changed, 7 insertions(+), 8 deletions(-)

diff --git a/mm/balloon_compaction.c b/mm/balloon_compaction.c
index 4b8eab4b3f45..3f75b876ad76 100644
--- a/mm/balloon_compaction.c
+++ b/mm/balloon_compaction.c
@@ -230,11 +230,10 @@ static void balloon_page_putback(struct page *page)
 
 
 /* move_to_new_page() counterpart for a ballooned page */
-static int balloon_page_migrate(struct address_space *mapping,
-   struct page *newpage, struct page *page,
-   enum migrate_mode mode)
+static int balloon_migrate_folio(struct address_space *mapping,
+   struct folio *dst, struct folio *src, enum migrate_mode mode)
 {
-   struct balloon_dev_info *balloon = balloon_page_device(page);
+   struct balloon_dev_info *balloon = balloon_page_device(&src->page);
 
/*
 * We can not easily support the no copy case here so ignore it as it
@@ -244,14 +243,14 @@ static int balloon_page_migrate(struct address_space 
*mapping,
if (mode == MIGRATE_SYNC_NO_COPY)
return -EINVAL;
 
-   VM_BUG_ON_PAGE(!PageLocked(page), page);
-   VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
+   VM_BUG_ON_FOLIO(!folio_test_locked(src), src);
+   VM_BUG_ON_FOLIO(!folio_test_locked(dst), dst);
 
-   return balloon->migratepage(balloon, newpage, page, mode);
+   return balloon->migratepage(balloon, &dst->page, &src->page, mode);
 }
 
 const struct address_space_operations balloon_aops = {
-   .migratepage = balloon_page_migrate,
+   .migrate_folio = balloon_migrate_folio,
.isolate_page = balloon_page_isolate,
.putback_page = balloon_page_putback,
 };
-- 
2.35.1



___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


[f2fs-dev] [PATCH 19/20] fs: Remove aops->migratepage()

2022-06-06 Thread Matthew Wilcox (Oracle)
With all users converted to migrate_folio(), remove this operation.

Signed-off-by: Matthew Wilcox (Oracle) 
---
 include/linux/fs.h |  2 --
 mm/compaction.c|  5 ++---
 mm/migrate.c   | 10 +-
 3 files changed, 3 insertions(+), 14 deletions(-)

diff --git a/include/linux/fs.h b/include/linux/fs.h
index 5737c92ed286..95347cc035ae 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -367,8 +367,6 @@ struct address_space_operations {
 */
int (*migrate_folio)(struct address_space *, struct folio *dst,
struct folio *src, enum migrate_mode);
-   int (*migratepage) (struct address_space *,
-   struct page *, struct page *, enum migrate_mode);
bool (*isolate_page)(struct page *, isolate_mode_t);
void (*putback_page)(struct page *);
int (*launder_folio)(struct folio *);
diff --git a/mm/compaction.c b/mm/compaction.c
index db34b459e5d9..f0dc62159c0e 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -1034,7 +1034,7 @@ isolate_migratepages_block(struct compact_control *cc, 
unsigned long low_pfn,
 
/*
 * Only pages without mappings or that have a
-* ->migratepage callback are possible to migrate
+* ->migrate_folio callback are possible to migrate
 * without blocking. However, we can be racing with
 * truncation so it's necessary to lock the page
 * to stabilise the mapping as truncation holds
@@ -1046,8 +1046,7 @@ isolate_migratepages_block(struct compact_control *cc, 
unsigned long low_pfn,
 
mapping = page_mapping(page);
migrate_dirty = !mapping ||
-   mapping->a_ops->migrate_folio ||
-   mapping->a_ops->migratepage;
+   mapping->a_ops->migrate_folio;
unlock_page(page);
if (!migrate_dirty)
goto isolate_fail_put;
diff --git a/mm/migrate.c b/mm/migrate.c
index a8edd226c72d..c5560430dce4 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -911,9 +911,6 @@ static int move_to_new_folio(struct folio *dst, struct 
folio *src,
 */
rc = mapping->a_ops->migrate_folio(mapping, dst, src,
mode);
-   else if (mapping->a_ops->migratepage)
-   rc = mapping->a_ops->migratepage(mapping, &dst->page,
-   &src->page, mode);
else
rc = fallback_migrate_folio(mapping, dst, src, mode);
} else {
@@ -928,12 +925,7 @@ static int move_to_new_folio(struct folio *dst, struct 
folio *src,
goto out;
}
 
-   if (mapping->a_ops->migrate_folio)
-   rc = mapping->a_ops->migrate_folio(mapping, dst, src,
-   mode);
-   else
-   rc = mapping->a_ops->migratepage(mapping, &dst->page,
-   &src->page, mode);
+   rc = mapping->a_ops->migrate_folio(mapping, dst, src, mode);
WARN_ON_ONCE(rc == MIGRATEPAGE_SUCCESS &&
!folio_test_isolated(src));
}
-- 
2.35.1



___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


[f2fs-dev] [PATCH 10/20] btrfs: Convert btrfs_migratepage to migrate_folio

2022-06-06 Thread Matthew Wilcox (Oracle)
Use filemap_migrate_folio() to do the bulk of the work, and then copy
the ordered flag across if needed.

Signed-off-by: Matthew Wilcox (Oracle) 
---
 fs/btrfs/inode.c | 26 +-
 1 file changed, 9 insertions(+), 17 deletions(-)

diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 81737eff92f3..5f41d869c648 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -8255,30 +8255,24 @@ static bool btrfs_release_folio(struct folio *folio, 
gfp_t gfp_flags)
 }
 
 #ifdef CONFIG_MIGRATION
-static int btrfs_migratepage(struct address_space *mapping,
-struct page *newpage, struct page *page,
+static int btrfs_migrate_folio(struct address_space *mapping,
+struct folio *dst, struct folio *src,
 enum migrate_mode mode)
 {
-   int ret;
+   int ret = filemap_migrate_folio(mapping, dst, src, mode);
 
-   ret = migrate_page_move_mapping(mapping, newpage, page, 0);
if (ret != MIGRATEPAGE_SUCCESS)
return ret;
 
-   if (page_has_private(page))
-   attach_page_private(newpage, detach_page_private(page));
-
-   if (PageOrdered(page)) {
-   ClearPageOrdered(page);
-   SetPageOrdered(newpage);
+   if (folio_test_ordered(src)) {
+   folio_clear_ordered(src);
+   folio_set_ordered(dst);
}
 
-   if (mode != MIGRATE_SYNC_NO_COPY)
-   migrate_page_copy(newpage, page);
-   else
-   migrate_page_states(newpage, page);
return MIGRATEPAGE_SUCCESS;
 }
+#else
+#define btrfs_migrate_folio NULL
 #endif
 
 static void btrfs_invalidate_folio(struct folio *folio, size_t offset,
@@ -11422,9 +11416,7 @@ static const struct address_space_operations btrfs_aops 
= {
.direct_IO  = noop_direct_IO,
.invalidate_folio = btrfs_invalidate_folio,
.release_folio  = btrfs_release_folio,
-#ifdef CONFIG_MIGRATION
-   .migratepage= btrfs_migratepage,
-#endif
+   .migrate_folio  = btrfs_migrate_folio,
.dirty_folio= filemap_dirty_folio,
.error_remove_page = generic_error_remove_page,
.swap_activate  = btrfs_swap_activate,
-- 
2.35.1



___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


[f2fs-dev] [PATCH 18/20] zsmalloc: Convert to migrate_folio

2022-06-06 Thread Matthew Wilcox (Oracle)
zsmalloc doesn't really use folios, but it needs to be called like this
in order to migrate an individual page.  Convert from a folio back to
a page until we decide how to handle migration better for zsmalloc.

Signed-off-by: Matthew Wilcox (Oracle) 
---
 mm/zsmalloc.c | 8 +---
 1 file changed, 5 insertions(+), 3 deletions(-)

diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 5d5fc04385b8..8ed79121195a 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -1865,9 +1865,11 @@ static bool zs_page_isolate(struct page *page, 
isolate_mode_t mode)
return true;
 }
 
-static int zs_page_migrate(struct address_space *mapping, struct page *newpage,
-   struct page *page, enum migrate_mode mode)
+static int zs_migrate_folio(struct address_space *mapping,
+   struct folio *dst, struct folio *src, enum migrate_mode mode)
 {
+   struct page *newpage = &dst->page;
+   struct page *page = &src->page;
struct zs_pool *pool;
struct size_class *class;
struct zspage *zspage;
@@ -1966,7 +1968,7 @@ static void zs_page_putback(struct page *page)
 
 static const struct address_space_operations zsmalloc_aops = {
.isolate_page = zs_page_isolate,
-   .migratepage = zs_page_migrate,
+   .migrate_folio = zs_migrate_folio,
.putback_page = zs_page_putback,
 };
 
-- 
2.35.1



___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


[f2fs-dev] [PATCH 17/20] z3fold: Convert to migrate_folio

2022-06-06 Thread Matthew Wilcox (Oracle)
z3fold doesn't really use folios, but it needs to be called like this
in order to migrate an individual page.  Convert from a folio back to
a page until we decide how to handle migration better for z3fold.

Signed-off-by: Matthew Wilcox (Oracle) 
---
 mm/z3fold.c | 8 +---
 1 file changed, 5 insertions(+), 3 deletions(-)

diff --git a/mm/z3fold.c b/mm/z3fold.c
index f41f8b0d9e9a..5d091c41fb35 100644
--- a/mm/z3fold.c
+++ b/mm/z3fold.c
@@ -1554,9 +1554,11 @@ static bool z3fold_page_isolate(struct page *page, 
isolate_mode_t mode)
return false;
 }
 
-static int z3fold_page_migrate(struct address_space *mapping, struct page 
*newpage,
-  struct page *page, enum migrate_mode mode)
+static int z3fold_migrate_folio(struct address_space *mapping,
+   struct folio *dst, struct folio *src, enum migrate_mode mode)
 {
+   struct page *newpage = &dst->page;
+   struct page *page = &src->page;
struct z3fold_header *zhdr, *new_zhdr;
struct z3fold_pool *pool;
struct address_space *new_mapping;
@@ -1644,7 +1646,7 @@ static void z3fold_page_putback(struct page *page)
 
 static const struct address_space_operations z3fold_aops = {
.isolate_page = z3fold_page_isolate,
-   .migratepage = z3fold_page_migrate,
+   .migrate_folio = z3fold_migrate_folio,
.putback_page = z3fold_page_putback,
 };
 
-- 
2.35.1



___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


[f2fs-dev] [PATCH 04/20] mm/migrate: Convert buffer_migrate_page() to buffer_migrate_folio()

2022-06-06 Thread Matthew Wilcox (Oracle)
Use a folio throughout __buffer_migrate_folio(), add kernel-doc for
buffer_migrate_folio() and buffer_migrate_folio_norefs(), move their
declarations to buffer.h and switch all filesystems that have wired
them up.

Signed-off-by: Matthew Wilcox (Oracle) 
---
 block/fops.c|  2 +-
 fs/ext2/inode.c |  4 +-
 fs/ext4/inode.c |  4 +-
 fs/ntfs/aops.c  |  6 +--
 fs/ocfs2/aops.c |  2 +-
 include/linux/buffer_head.h | 10 +
 include/linux/fs.h  | 12 --
 mm/migrate.c| 76 ++---
 8 files changed, 65 insertions(+), 51 deletions(-)

diff --git a/block/fops.c b/block/fops.c
index d6b3276a6c68..743fc46d0aad 100644
--- a/block/fops.c
+++ b/block/fops.c
@@ -417,7 +417,7 @@ const struct address_space_operations def_blk_aops = {
.write_end  = blkdev_write_end,
.writepages = blkdev_writepages,
.direct_IO  = blkdev_direct_IO,
-   .migratepage= buffer_migrate_page_norefs,
+   .migrate_folio  = buffer_migrate_folio_norefs,
.is_dirty_writeback = buffer_check_dirty_writeback,
 };
 
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index 360ce3604a2d..84570c6265aa 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -973,7 +973,7 @@ const struct address_space_operations ext2_aops = {
.bmap   = ext2_bmap,
.direct_IO  = ext2_direct_IO,
.writepages = ext2_writepages,
-   .migratepage= buffer_migrate_page,
+   .migrate_folio  = buffer_migrate_folio,
.is_partially_uptodate  = block_is_partially_uptodate,
.error_remove_page  = generic_error_remove_page,
 };
@@ -989,7 +989,7 @@ const struct address_space_operations ext2_nobh_aops = {
.bmap   = ext2_bmap,
.direct_IO  = ext2_direct_IO,
.writepages = ext2_writepages,
-   .migratepage= buffer_migrate_page,
+   .migrate_folio  = buffer_migrate_folio,
.error_remove_page  = generic_error_remove_page,
 };
 
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 1aaea53e67b5..53877ffe3c41 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -3633,7 +3633,7 @@ static const struct address_space_operations ext4_aops = {
.invalidate_folio   = ext4_invalidate_folio,
.release_folio  = ext4_release_folio,
.direct_IO  = noop_direct_IO,
-   .migratepage= buffer_migrate_page,
+   .migrate_folio  = buffer_migrate_folio,
.is_partially_uptodate  = block_is_partially_uptodate,
.error_remove_page  = generic_error_remove_page,
.swap_activate  = ext4_iomap_swap_activate,
@@ -3668,7 +3668,7 @@ static const struct address_space_operations ext4_da_aops 
= {
.invalidate_folio   = ext4_invalidate_folio,
.release_folio  = ext4_release_folio,
.direct_IO  = noop_direct_IO,
-   .migratepage= buffer_migrate_page,
+   .migrate_folio  = buffer_migrate_folio,
.is_partially_uptodate  = block_is_partially_uptodate,
.error_remove_page  = generic_error_remove_page,
.swap_activate  = ext4_iomap_swap_activate,
diff --git a/fs/ntfs/aops.c b/fs/ntfs/aops.c
index 9e3964ea2ea0..5f4fb6ca6f2e 100644
--- a/fs/ntfs/aops.c
+++ b/fs/ntfs/aops.c
@@ -1659,7 +1659,7 @@ const struct address_space_operations ntfs_normal_aops = {
.dirty_folio= block_dirty_folio,
 #endif /* NTFS_RW */
.bmap   = ntfs_bmap,
-   .migratepage= buffer_migrate_page,
+   .migrate_folio  = buffer_migrate_folio,
.is_partially_uptodate = block_is_partially_uptodate,
.error_remove_page = generic_error_remove_page,
 };
@@ -1673,7 +1673,7 @@ const struct address_space_operations 
ntfs_compressed_aops = {
.writepage  = ntfs_writepage,
.dirty_folio= block_dirty_folio,
 #endif /* NTFS_RW */
-   .migratepage= buffer_migrate_page,
+   .migrate_folio  = buffer_migrate_folio,
.is_partially_uptodate = block_is_partially_uptodate,
.error_remove_page = generic_error_remove_page,
 };
@@ -1688,7 +1688,7 @@ const struct address_space_operations ntfs_mst_aops = {
.writepage  = ntfs_writepage,   /* Write dirty page to disk. */
.dirty_folio= filemap_dirty_folio,
 #endif /* NTFS_RW */
-   .migratepage= buffer_migrate_page,
+   .migrate_folio  = buffer_migrate_folio,
.is_partially_uptodate  = block_is_partially_uptodate,
.error_remove_page = generic_error_remove_page,
 };
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 767df51f8657..1d489003f99d 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -2462,7 +2462,7 @@ const struct address_space_operations ocfs2_aops = {
.direct_IO  = ocfs2_direct_IO

[f2fs-dev] [PATCH 02/20] mm/migrate: Convert fallback_migrate_page() to fallback_migrate_folio()

2022-06-06 Thread Matthew Wilcox (Oracle)
Use a folio throughout.  migrate_page() will be converted to
migrate_folio() later.

Signed-off-by: Matthew Wilcox (Oracle) 
---
 mm/migrate.c | 19 +--
 1 file changed, 9 insertions(+), 10 deletions(-)

diff --git a/mm/migrate.c b/mm/migrate.c
index 75cb6aa38988..d772ce63d7e2 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -806,11 +806,11 @@ static int writeout(struct address_space *mapping, struct 
page *page)
 /*
  * Default handling if a filesystem does not provide a migration function.
  */
-static int fallback_migrate_page(struct address_space *mapping,
-   struct page *newpage, struct page *page, enum migrate_mode mode)
+static int fallback_migrate_folio(struct address_space *mapping,
+   struct folio *dst, struct folio *src, enum migrate_mode mode)
 {
-   if (PageDirty(page)) {
-   /* Only writeback pages in full synchronous migration */
+   if (folio_test_dirty(src)) {
+   /* Only writeback folios in full synchronous migration */
switch (mode) {
case MIGRATE_SYNC:
case MIGRATE_SYNC_NO_COPY:
@@ -818,18 +818,18 @@ static int fallback_migrate_page(struct address_space 
*mapping,
default:
return -EBUSY;
}
-   return writeout(mapping, page);
+   return writeout(mapping, &src->page);
}
 
/*
 * Buffers may be managed in a filesystem specific way.
 * We must have no buffers or drop them.
 */
-   if (page_has_private(page) &&
-   !try_to_release_page(page, GFP_KERNEL))
+   if (folio_test_private(src) &&
+   !filemap_release_folio(src, GFP_KERNEL))
return mode == MIGRATE_SYNC ? -EAGAIN : -EBUSY;
 
-   return migrate_page(mapping, newpage, page, mode);
+   return migrate_page(mapping, &dst->page, &src->page, mode);
 }
 
 /*
@@ -872,8 +872,7 @@ static int move_to_new_folio(struct folio *dst, struct 
folio *src,
rc = mapping->a_ops->migratepage(mapping, &dst->page,
&src->page, mode);
else
-   rc = fallback_migrate_page(mapping, &dst->page,
-   &src->page, mode);
+   rc = fallback_migrate_folio(mapping, dst, src, mode);
} else {
/*
 * In case of non-lru page, it could be released after
-- 
2.35.1



___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


[f2fs-dev] [PATCH 05/10] f2fs: Convert f2fs_invalidate_compress_pages() to use filemap_get_folios()

2022-06-05 Thread Matthew Wilcox (Oracle)
Convert this function to use folios throughout.

Signed-off-by: Matthew Wilcox (Oracle) 
---
 fs/f2fs/compress.c | 35 +++
 1 file changed, 15 insertions(+), 20 deletions(-)

diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
index 24824cd96f36..009e6c519e98 100644
--- a/fs/f2fs/compress.c
+++ b/fs/f2fs/compress.c
@@ -1832,45 +1832,40 @@ bool f2fs_load_compressed_page(struct f2fs_sb_info 
*sbi, struct page *page,
 void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, nid_t ino)
 {
struct address_space *mapping = sbi->compress_inode->i_mapping;
-   struct pagevec pvec;
+   struct folio_batch fbatch;
pgoff_t index = 0;
pgoff_t end = MAX_BLKADDR(sbi);
 
if (!mapping->nrpages)
return;
 
-   pagevec_init(&pvec);
+   folio_batch_init(&fbatch);
 
do {
-   unsigned int nr_pages;
-   int i;
+   unsigned int nr, i;
 
-   nr_pages = pagevec_lookup_range(&pvec, mapping,
-   &index, end - 1);
-   if (!nr_pages)
+   nr = filemap_get_folios(mapping, &index, end - 1, &fbatch);
+   if (!nr)
break;
 
-   for (i = 0; i < nr_pages; i++) {
-   struct page *page = pvec.pages[i];
-
-   if (page->index > end)
-   break;
+   for (i = 0; i < nr; i++) {
+   struct folio *folio = fbatch.folios[i];
 
-   lock_page(page);
-   if (page->mapping != mapping) {
-   unlock_page(page);
+   folio_lock(folio);
+   if (folio->mapping != mapping) {
+   folio_unlock(folio);
continue;
}
 
-   if (ino != get_page_private_data(page)) {
-   unlock_page(page);
+   if (ino != get_page_private_data(&folio->page)) {
+   folio_unlock(folio);
continue;
}
 
-   generic_error_remove_page(mapping, page);
-   unlock_page(page);
+   generic_error_remove_page(mapping, &folio->page);
+   folio_unlock(folio);
}
-   pagevec_release(&pvec);
+   folio_batch_release(&fbatch);
cond_resched();
} while (index < end);
 }
-- 
2.35.1



___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


[f2fs-dev] [PATCH 04/10] ext4: Convert mpage_map_and_submit_buffers() to use filemap_get_folios()

2022-06-05 Thread Matthew Wilcox (Oracle)
The called functions all use pages, so just convert back to a page.

Signed-off-by: Matthew Wilcox (Oracle) 
---
 fs/ext4/inode.c | 19 +--
 1 file changed, 9 insertions(+), 10 deletions(-)

diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 32a7f5e024d6..1aaea53e67b5 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -2314,8 +2314,8 @@ static int mpage_process_page(struct mpage_da_data *mpd, 
struct page *page,
  */
 static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd)
 {
-   struct pagevec pvec;
-   int nr_pages, i;
+   struct folio_batch fbatch;
+   unsigned nr, i;
struct inode *inode = mpd->inode;
int bpp_bits = PAGE_SHIFT - inode->i_blkbits;
pgoff_t start, end;
@@ -2329,14 +2329,13 @@ static int mpage_map_and_submit_buffers(struct 
mpage_da_data *mpd)
lblk = start << bpp_bits;
pblock = mpd->map.m_pblk;
 
-   pagevec_init(&pvec);
+   folio_batch_init(&fbatch);
while (start <= end) {
-   nr_pages = pagevec_lookup_range(&pvec, inode->i_mapping,
-   &start, end);
-   if (nr_pages == 0)
+   nr = filemap_get_folios(inode->i_mapping, &start, end, &fbatch);
+   if (nr == 0)
break;
-   for (i = 0; i < nr_pages; i++) {
-   struct page *page = pvec.pages[i];
+   for (i = 0; i < nr; i++) {
+   struct page *page = &fbatch.folios[i]->page;
 
err = mpage_process_page(mpd, page, &lblk, &pblock,
 &map_bh);
@@ -2352,14 +2351,14 @@ static int mpage_map_and_submit_buffers(struct 
mpage_da_data *mpd)
if (err < 0)
goto out;
}
-   pagevec_release(&pvec);
+   folio_batch_release(&fbatch);
}
/* Extent fully mapped and matches with page boundary. We are done. */
mpd->map.m_len = 0;
mpd->map.m_flags = 0;
return 0;
 out:
-   pagevec_release(&pvec);
+   folio_batch_release(&fbatch);
return err;
 }
 
-- 
2.35.1



___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


[f2fs-dev] [PATCH 02/10] buffer: Convert clean_bdev_aliases() to use filemap_get_folios()

2022-06-05 Thread Matthew Wilcox (Oracle)
Use a folio throughout this function.

Signed-off-by: Matthew Wilcox (Oracle) 
---
 fs/buffer.c | 26 +-
 1 file changed, 13 insertions(+), 13 deletions(-)

diff --git a/fs/buffer.c b/fs/buffer.c
index 898c7f301b1b..276769d3715a 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -1604,7 +1604,7 @@ void clean_bdev_aliases(struct block_device *bdev, 
sector_t block, sector_t len)
 {
struct inode *bd_inode = bdev->bd_inode;
struct address_space *bd_mapping = bd_inode->i_mapping;
-   struct pagevec pvec;
+   struct folio_batch fbatch;
pgoff_t index = block >> (PAGE_SHIFT - bd_inode->i_blkbits);
pgoff_t end;
int i, count;
@@ -1612,24 +1612,24 @@ void clean_bdev_aliases(struct block_device *bdev, 
sector_t block, sector_t len)
struct buffer_head *head;
 
end = (block + len - 1) >> (PAGE_SHIFT - bd_inode->i_blkbits);
-   pagevec_init(&pvec);
-   while (pagevec_lookup_range(&pvec, bd_mapping, &index, end)) {
-   count = pagevec_count(&pvec);
+   folio_batch_init(&fbatch);
+   while (filemap_get_folios(bd_mapping, &index, end, &fbatch)) {
+   count = folio_batch_count(&fbatch);
for (i = 0; i < count; i++) {
-   struct page *page = pvec.pages[i];
+   struct folio *folio = fbatch.folios[i];
 
-   if (!page_has_buffers(page))
+   if (!folio_buffers(folio))
continue;
/*
-* We use page lock instead of bd_mapping->private_lock
+* We use folio lock instead of bd_mapping->private_lock
 * to pin buffers here since we can afford to sleep and
 * it scales better than a global spinlock lock.
 */
-   lock_page(page);
-   /* Recheck when the page is locked which pins bhs */
-   if (!page_has_buffers(page))
+   folio_lock(folio);
+   /* Recheck when the folio is locked which pins bhs */
+   head = folio_buffers(folio);
+   if (!head)
goto unlock_page;
-   head = page_buffers(page);
bh = head;
do {
if (!buffer_mapped(bh) || (bh->b_blocknr < 
block))
@@ -1643,9 +1643,9 @@ void clean_bdev_aliases(struct block_device *bdev, 
sector_t block, sector_t len)
bh = bh->b_this_page;
} while (bh != head);
 unlock_page:
-   unlock_page(page);
+   folio_unlock(folio);
}
-   pagevec_release(&pvec);
+   folio_batch_release(&fbatch);
cond_resched();
/* End of range already reached? */
if (index > end || !index)
-- 
2.35.1



___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


[f2fs-dev] [PATCH 10/10] filemap: Remove find_get_pages_range() and associated functions

2022-06-05 Thread Matthew Wilcox (Oracle)
All callers of find_get_pages_range(), pagevec_lookup_range() and
pagevec_lookup() have now been removed.

Signed-off-by: Matthew Wilcox (Oracle) 
---
 include/linux/pagemap.h |  3 --
 include/linux/pagevec.h | 10 --
 mm/filemap.c| 67 -
 mm/swap.c   | 29 --
 4 files changed, 109 deletions(-)

diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 50e57b2d845f..1caccb9f99aa 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -720,9 +720,6 @@ static inline struct page *find_subpage(struct page *head, 
pgoff_t index)
 
 unsigned filemap_get_folios(struct address_space *mapping, pgoff_t *start,
pgoff_t end, struct folio_batch *fbatch);
-unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start,
-   pgoff_t end, unsigned int nr_pages,
-   struct page **pages);
 unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
   unsigned int nr_pages, struct page **pages);
 unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t 
*index,
diff --git a/include/linux/pagevec.h b/include/linux/pagevec.h
index 67b1246f136b..6649154a2115 100644
--- a/include/linux/pagevec.h
+++ b/include/linux/pagevec.h
@@ -27,16 +27,6 @@ struct pagevec {
 
 void __pagevec_release(struct pagevec *pvec);
 void __pagevec_lru_add(struct pagevec *pvec);
-unsigned pagevec_lookup_range(struct pagevec *pvec,
- struct address_space *mapping,
- pgoff_t *start, pgoff_t end);
-static inline unsigned pagevec_lookup(struct pagevec *pvec,
- struct address_space *mapping,
- pgoff_t *start)
-{
-   return pagevec_lookup_range(pvec, mapping, start, (pgoff_t)-1);
-}
-
 unsigned pagevec_lookup_range_tag(struct pagevec *pvec,
struct address_space *mapping, pgoff_t *index, pgoff_t end,
xa_mark_t tag);
diff --git a/mm/filemap.c b/mm/filemap.c
index ea4145b7a84c..340ccb37f6b6 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2192,73 +2192,6 @@ bool folio_more_pages(struct folio *folio, pgoff_t 
index, pgoff_t max)
return index < folio->index + folio_nr_pages(folio) - 1;
 }
 
-/**
- * find_get_pages_range - gang pagecache lookup
- * @mapping:   The address_space to search
- * @start: The starting page index
- * @end:   The final page index (inclusive)
- * @nr_pages:  The maximum number of pages
- * @pages: Where the resulting pages are placed
- *
- * find_get_pages_range() will search for and return a group of up to @nr_pages
- * pages in the mapping starting at index @start and up to index @end
- * (inclusive).  The pages are placed at @pages.  find_get_pages_range() takes
- * a reference against the returned pages.
- *
- * The search returns a group of mapping-contiguous pages with ascending
- * indexes.  There may be holes in the indices due to not-present pages.
- * We also update @start to index the next page for the traversal.
- *
- * Return: the number of pages which were found. If this number is
- * smaller than @nr_pages, the end of specified range has been
- * reached.
- */
-unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start,
- pgoff_t end, unsigned int nr_pages,
- struct page **pages)
-{
-   XA_STATE(xas, &mapping->i_pages, *start);
-   struct folio *folio;
-   unsigned ret = 0;
-
-   if (unlikely(!nr_pages))
-   return 0;
-
-   rcu_read_lock();
-   while ((folio = find_get_entry(&xas, end, XA_PRESENT))) {
-   /* Skip over shadow, swap and DAX entries */
-   if (xa_is_value(folio))
-   continue;
-
-again:
-   pages[ret] = folio_file_page(folio, xas.xa_index);
-   if (++ret == nr_pages) {
-   *start = xas.xa_index + 1;
-   goto out;
-   }
-   if (folio_more_pages(folio, xas.xa_index, end)) {
-   xas.xa_index++;
-   folio_ref_inc(folio);
-   goto again;
-   }
-   }
-
-   /*
-* We come here when there is no page beyond @end. We take care to not
-* overflow the index @start as it confuses some of the callers. This
-* breaks the iteration when there is a page at index -1 but that is
-* already broken anyway.
-*/
-   if (end == (pgoff_t)-1)
-   *start = (pgoff_t)-1;
-   else
-   *start = end + 1;
-out:
-   rcu_read_unlock();
-
-   return ret;
-}
-
 /**
  * find_get_pages_contig - gang contiguous pagecache lookup
  * @mapping:   The address_space to search
diff --git a/mm/swap.c b/mm/swap.c
index f3922a96b2e9..f6

[f2fs-dev] [PATCH 01/10] filemap: Add filemap_get_folios()

2022-06-05 Thread Matthew Wilcox (Oracle)
This is the equivalent of find_get_pages() but fills a folio_batch
instead of an array of pages.

Signed-off-by: Matthew Wilcox (Oracle) 
---
 include/linux/pagemap.h |  2 ++
 mm/filemap.c| 55 +
 2 files changed, 57 insertions(+)

diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 689ea809..50e57b2d845f 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -718,6 +718,8 @@ static inline struct page *find_subpage(struct page *head, 
pgoff_t index)
return head + (index & (thp_nr_pages(head) - 1));
 }
 
+unsigned filemap_get_folios(struct address_space *mapping, pgoff_t *start,
+   pgoff_t end, struct folio_batch *fbatch);
 unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start,
pgoff_t end, unsigned int nr_pages,
struct page **pages);
diff --git a/mm/filemap.c b/mm/filemap.c
index 1e66eea98a7e..ea4145b7a84c 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2127,6 +2127,61 @@ unsigned find_lock_entries(struct address_space 
*mapping, pgoff_t start,
return folio_batch_count(fbatch);
 }
 
+/**
+ * filemap_get_folios - Get a batch of folios
+ * @mapping:   The address_space to search
+ * @start: The starting page index
+ * @end:   The final page index (inclusive)
+ * @fbatch:The batch to fill.
+ *
+ * Search for and return a batch of folios in the mapping starting at
+ * index @start and up to index @end (inclusive).  The folios are returned
+ * in @fbatch with an elevated reference count.
+ *
+ * The first folio may start before @start; if it does, it will contain
+ * @start.  The final folio may extend beyond @end; if it does, it will
+ * contain @end.  The folios have ascending indices.  There may be gaps
+ * between the folios if there are indices which have no folio in the
+ * page cache.  If folios are added to or removed from the page cache
+ * while this is running, they may or may not be found by this call.
+ *
+ * Return: The number of folios which were found.
+ * We also update @start to index the next folio for the traversal.
+ */
+unsigned filemap_get_folios(struct address_space *mapping, pgoff_t *start,
+   pgoff_t end, struct folio_batch *fbatch)
+{
+   XA_STATE(xas, &mapping->i_pages, *start);
+   struct folio *folio;
+
+   rcu_read_lock();
+   while ((folio = find_get_entry(&xas, end, XA_PRESENT)) != NULL) {
+   /* Skip over shadow, swap and DAX entries */
+   if (xa_is_value(folio))
+   continue;
+   if (!folio_batch_add(fbatch, folio)) {
+   *start = folio->index + folio_nr_pages(folio);
+   goto out;
+   }
+   }
+
+   /*
+* We come here when there is no page beyond @end. We take care to not
+* overflow the index @start as it confuses some of the callers. This
+* breaks the iteration when there is a page at index -1 but that is
+* already broken anyway.
+*/
+   if (end == (pgoff_t)-1)
+   *start = (pgoff_t)-1;
+   else
+   *start = end + 1;
+out:
+   rcu_read_unlock();
+
+   return folio_batch_count(fbatch);
+}
+EXPORT_SYMBOL(filemap_get_folios);
+
 static inline
 bool folio_more_pages(struct folio *folio, pgoff_t index, pgoff_t max)
 {
-- 
2.35.1



___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


[f2fs-dev] [PATCH 07/10] nilfs2: Convert nilfs_copy_back_pages() to use filemap_get_folios()

2022-06-05 Thread Matthew Wilcox (Oracle)
Use folios throughout.

Signed-off-by: Matthew Wilcox (Oracle) 
---
 fs/nilfs2/page.c | 60 
 1 file changed, 30 insertions(+), 30 deletions(-)

diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c
index a8e88cc38e16..3267e96c256c 100644
--- a/fs/nilfs2/page.c
+++ b/fs/nilfs2/page.c
@@ -294,57 +294,57 @@ int nilfs_copy_dirty_pages(struct address_space *dmap,
 void nilfs_copy_back_pages(struct address_space *dmap,
   struct address_space *smap)
 {
-   struct pagevec pvec;
+   struct folio_batch fbatch;
unsigned int i, n;
-   pgoff_t index = 0;
+   pgoff_t start = 0;
 
-   pagevec_init(&pvec);
+   folio_batch_init(&fbatch);
 repeat:
-   n = pagevec_lookup(&pvec, smap, &index);
+   n = filemap_get_folios(smap, &start, ~0UL, &fbatch);
if (!n)
return;
 
-   for (i = 0; i < pagevec_count(&pvec); i++) {
-   struct page *page = pvec.pages[i], *dpage;
-   pgoff_t offset = page->index;
-
-   lock_page(page);
-   dpage = find_lock_page(dmap, offset);
-   if (dpage) {
-   /* overwrite existing page in the destination cache */
-   WARN_ON(PageDirty(dpage));
-   nilfs_copy_page(dpage, page, 0);
-   unlock_page(dpage);
-   put_page(dpage);
-   /* Do we not need to remove page from smap here? */
+   for (i = 0; i < folio_batch_count(&fbatch); i++) {
+   struct folio *folio = fbatch.folios[i], *dfolio;
+   pgoff_t index = folio->index;
+
+   folio_lock(folio);
+   dfolio = filemap_lock_folio(dmap, index);
+   if (dfolio) {
+   /* overwrite existing folio in the destination cache */
+   WARN_ON(folio_test_dirty(dfolio));
+   nilfs_copy_page(&dfolio->page, &folio->page, 0);
+   folio_unlock(dfolio);
+   folio_put(dfolio);
+   /* Do we not need to remove folio from smap here? */
} else {
-   struct page *p;
+   struct folio *f;
 
-   /* move the page to the destination cache */
+   /* move the folio to the destination cache */
xa_lock_irq(&smap->i_pages);
-   p = __xa_erase(&smap->i_pages, offset);
-   WARN_ON(page != p);
+   f = __xa_erase(&smap->i_pages, index);
+   WARN_ON(folio != f);
smap->nrpages--;
xa_unlock_irq(&smap->i_pages);
 
xa_lock_irq(&dmap->i_pages);
-   p = __xa_store(&dmap->i_pages, offset, page, GFP_NOFS);
-   if (unlikely(p)) {
+   f = __xa_store(&dmap->i_pages, index, folio, GFP_NOFS);
+   if (unlikely(f)) {
/* Probably -ENOMEM */
-   page->mapping = NULL;
-   put_page(page);
+   folio->mapping = NULL;
+   folio_put(folio);
} else {
-   page->mapping = dmap;
+   folio->mapping = dmap;
dmap->nrpages++;
-   if (PageDirty(page))
-   __xa_set_mark(&dmap->i_pages, offset,
+   if (folio_test_dirty(folio))
+   __xa_set_mark(&dmap->i_pages, index,
PAGECACHE_TAG_DIRTY);
}
xa_unlock_irq(&dmap->i_pages);
}
-   unlock_page(page);
+   folio_unlock(folio);
}
-   pagevec_release(&pvec);
+   folio_batch_release(&fbatch);
cond_resched();
 
goto repeat;
-- 
2.35.1



___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


[f2fs-dev] [PATCH 06/10] hugetlbfs: Convert remove_inode_hugepages() to use filemap_get_folios()

2022-06-05 Thread Matthew Wilcox (Oracle)
Use folios throughout this function.  That removes the last caller of
huge_pagevec_release(), so delete that too.

Signed-off-by: Matthew Wilcox (Oracle) 
---
 fs/hugetlbfs/inode.c | 44 ++--
 1 file changed, 14 insertions(+), 30 deletions(-)

diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index ae2524480f23..14d33f725e05 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -108,16 +108,6 @@ static inline void hugetlb_drop_vma_policy(struct 
vm_area_struct *vma)
 }
 #endif
 
-static void huge_pagevec_release(struct pagevec *pvec)
-{
-   int i;
-
-   for (i = 0; i < pagevec_count(pvec); ++i)
-   put_page(pvec->pages[i]);
-
-   pagevec_reinit(pvec);
-}
-
 /*
  * Mask used when checking the page offset value passed in via system
  * calls.  This value will be converted to a loff_t which is signed.
@@ -480,25 +470,19 @@ static void remove_inode_hugepages(struct inode *inode, 
loff_t lstart,
struct address_space *mapping = &inode->i_data;
const pgoff_t start = lstart >> huge_page_shift(h);
const pgoff_t end = lend >> huge_page_shift(h);
-   struct pagevec pvec;
+   struct folio_batch fbatch;
pgoff_t next, index;
int i, freed = 0;
bool truncate_op = (lend == LLONG_MAX);
 
-   pagevec_init(&pvec);
+   folio_batch_init(&fbatch);
next = start;
-   while (next < end) {
-   /*
-* When no more pages are found, we are done.
-*/
-   if (!pagevec_lookup_range(&pvec, mapping, &next, end - 1))
-   break;
-
-   for (i = 0; i < pagevec_count(&pvec); ++i) {
-   struct page *page = pvec.pages[i];
+   while (filemap_get_folios(mapping, &next, end - 1, &fbatch)) {
+   for (i = 0; i < folio_batch_count(&fbatch); ++i) {
+   struct folio *folio = fbatch.folios[i];
u32 hash = 0;
 
-   index = page->index;
+   index = folio->index;
if (!truncate_op) {
/*
 * Only need to hold the fault mutex in the
@@ -511,15 +495,15 @@ static void remove_inode_hugepages(struct inode *inode, 
loff_t lstart,
}
 
/*
-* If page is mapped, it was faulted in after being
+* If folio is mapped, it was faulted in after being
 * unmapped in caller.  Unmap (again) now after taking
 * the fault mutex.  The mutex will prevent faults
-* until we finish removing the page.
+* until we finish removing the folio.
 *
 * This race can only happen in the hole punch case.
 * Getting here in a truncate operation is a bug.
 */
-   if (unlikely(page_mapped(page))) {
+   if (unlikely(folio_mapped(folio))) {
BUG_ON(truncate_op);
 
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
@@ -532,7 +516,7 @@ static void remove_inode_hugepages(struct inode *inode, 
loff_t lstart,
i_mmap_unlock_write(mapping);
}
 
-   lock_page(page);
+   folio_lock(folio);
/*
 * We must free the huge page and remove from page
 * cache (remove_huge_page) BEFORE removing the
@@ -542,8 +526,8 @@ static void remove_inode_hugepages(struct inode *inode, 
loff_t lstart,
 * the subpool and global reserve usage count can need
 * to be adjusted.
 */
-   VM_BUG_ON(HPageRestoreReserve(page));
-   remove_huge_page(page);
+   VM_BUG_ON(HPageRestoreReserve(&folio->page));
+   remove_huge_page(&folio->page);
freed++;
if (!truncate_op) {
if (unlikely(hugetlb_unreserve_pages(inode,
@@ -551,11 +535,11 @@ static void remove_inode_hugepages(struct inode *inode, 
loff_t lstart,
hugetlb_fix_reserve_counts(inode);
}
 
-   unlock_page(page);
+   folio_unlock(folio);
if (!truncate_op)
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
}
-   huge_pagevec_release(&pvec);
+   folio_batch_release(&fbatch);
 

[f2fs-dev] [PATCH 00/10] Convert to filemap_get_folios()

2022-06-05 Thread Matthew Wilcox (Oracle)
This patch series removes find_get_pages_range(), pagevec_lookup()
and pagevec_lookup_range(), converting all callers to use the new
filemap_get_folios().  I've only run xfstests over ext4 ... some other
testing might be appropriate.

Matthew Wilcox (Oracle) (10):
  filemap: Add filemap_get_folios()
  buffer: Convert clean_bdev_aliases() to use filemap_get_folios()
  ext4: Convert mpage_release_unused_pages() to use filemap_get_folios()
  ext4: Convert mpage_map_and_submit_buffers() to use
filemap_get_folios()
  f2fs: Convert f2fs_invalidate_compress_pages() to use
filemap_get_folios()
  hugetlbfs: Convert remove_inode_hugepages() to use
filemap_get_folios()
  nilfs2: Convert nilfs_copy_back_pages() to use filemap_get_folios()
  vmscan: Add check_move_unevictable_folios()
  shmem: Convert shmem_unlock_mapping() to use filemap_get_folios()
  filemap: Remove find_get_pages_range() and associated functions

 fs/buffer.c | 26 +++
 fs/ext4/inode.c | 40 ---
 fs/f2fs/compress.c  | 35 +---
 fs/hugetlbfs/inode.c| 44 -
 fs/nilfs2/page.c| 60 +-
 include/linux/pagemap.h |  5 ++-
 include/linux/pagevec.h | 10 --
 include/linux/swap.h|  3 +-
 mm/filemap.c| 72 +
 mm/shmem.c  | 13 
 mm/swap.c   | 29 -
 mm/vmscan.c | 55 ++-
 12 files changed, 166 insertions(+), 226 deletions(-)

-- 
2.35.1



___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


[f2fs-dev] [PATCH 09/10] shmem: Convert shmem_unlock_mapping() to use filemap_get_folios()

2022-06-05 Thread Matthew Wilcox (Oracle)
This is a straightforward conversion.

Signed-off-by: Matthew Wilcox (Oracle) 
---
 mm/shmem.c | 13 ++---
 1 file changed, 6 insertions(+), 7 deletions(-)

diff --git a/mm/shmem.c b/mm/shmem.c
index 60fdfc0208fd..313ae7df59d8 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -867,18 +867,17 @@ unsigned long shmem_swap_usage(struct vm_area_struct *vma)
  */
 void shmem_unlock_mapping(struct address_space *mapping)
 {
-   struct pagevec pvec;
+   struct folio_batch fbatch;
pgoff_t index = 0;
 
-   pagevec_init(&pvec);
+   folio_batch_init(&fbatch);
/*
 * Minor point, but we might as well stop if someone else SHM_LOCKs it.
 */
-   while (!mapping_unevictable(mapping)) {
-   if (!pagevec_lookup(&pvec, mapping, &index))
-   break;
-   check_move_unevictable_pages(&pvec);
-   pagevec_release(&pvec);
+   while (!mapping_unevictable(mapping) &&
+  filemap_get_folios(mapping, &index, ~0UL, &fbatch)) {
+   check_move_unevictable_folios(&fbatch);
+   folio_batch_release(&fbatch);
cond_resched();
}
 }
-- 
2.35.1



___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


[f2fs-dev] [PATCH 08/10] vmscan: Add check_move_unevictable_folios()

2022-06-05 Thread Matthew Wilcox (Oracle)
Change the guts of check_move_unevictable_pages() over to use folios
and add check_move_unevictable_pages() as a wrapper.

Signed-off-by: Matthew Wilcox (Oracle) 
---
 include/linux/swap.h |  3 ++-
 mm/vmscan.c  | 55 ++--
 2 files changed, 35 insertions(+), 23 deletions(-)

diff --git a/include/linux/swap.h b/include/linux/swap.h
index 0c0fed1b348f..8672a7123ccd 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -438,7 +438,8 @@ static inline bool node_reclaim_enabled(void)
return node_reclaim_mode & (RECLAIM_ZONE|RECLAIM_WRITE|RECLAIM_UNMAP);
 }
 
-extern void check_move_unevictable_pages(struct pagevec *pvec);
+void check_move_unevictable_folios(struct folio_batch *fbatch);
+void check_move_unevictable_pages(struct pagevec *pvec);
 
 extern void kswapd_run(int nid);
 extern void kswapd_stop(int nid);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index f7d9a683e3a7..5222c5ad600a 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -4790,45 +4790,56 @@ int node_reclaim(struct pglist_data *pgdat, gfp_t 
gfp_mask, unsigned int order)
 }
 #endif
 
+void check_move_unevictable_pages(struct pagevec *pvec)
+{
+   struct folio_batch fbatch;
+   unsigned i;
+
+   for (i = 0; i < pvec->nr; i++) {
+   struct page *page = pvec->pages[i];
+
+   if (PageTransTail(page))
+   continue;
+   folio_batch_add(&fbatch, page_folio(page));
+   }
+   check_move_unevictable_folios(&fbatch);
+}
+EXPORT_SYMBOL_GPL(check_move_unevictable_pages);
+
 /**
- * check_move_unevictable_pages - check pages for evictability and move to
- * appropriate zone lru list
- * @pvec: pagevec with lru pages to check
+ * check_move_unevictable_folios - Move evictable folios to appropriate zone
+ * lru list
+ * @fbatch: Batch of lru folios to check.
  *
- * Checks pages for evictability, if an evictable page is in the unevictable
+ * Checks folios for evictability, if an evictable folio is in the unevictable
  * lru list, moves it to the appropriate evictable lru list. This function
- * should be only used for lru pages.
+ * should be only used for lru folios.
  */
-void check_move_unevictable_pages(struct pagevec *pvec)
+void check_move_unevictable_folios(struct folio_batch *fbatch)
 {
struct lruvec *lruvec = NULL;
int pgscanned = 0;
int pgrescued = 0;
int i;
 
-   for (i = 0; i < pvec->nr; i++) {
-   struct page *page = pvec->pages[i];
-   struct folio *folio = page_folio(page);
-   int nr_pages;
-
-   if (PageTransTail(page))
-   continue;
+   for (i = 0; i < fbatch->nr; i++) {
+   struct folio *folio = fbatch->folios[i];
+   int nr_pages = folio_nr_pages(folio);
 
-   nr_pages = thp_nr_pages(page);
pgscanned += nr_pages;
 
-   /* block memcg migration during page moving between lru */
-   if (!TestClearPageLRU(page))
+   /* block memcg migration while the folio moves between lrus */
+   if (!folio_test_clear_lru(folio))
continue;
 
lruvec = folio_lruvec_relock_irq(folio, lruvec);
-   if (page_evictable(page) && PageUnevictable(page)) {
-   del_page_from_lru_list(page, lruvec);
-   ClearPageUnevictable(page);
-   add_page_to_lru_list(page, lruvec);
+   if (folio_evictable(folio) && folio_test_unevictable(folio)) {
+   lruvec_del_folio(lruvec, folio);
+   folio_clear_unevictable(folio);
+   lruvec_add_folio(lruvec, folio);
pgrescued += nr_pages;
}
-   SetPageLRU(page);
+   folio_set_lru(folio);
}
 
if (lruvec) {
@@ -4839,4 +4850,4 @@ void check_move_unevictable_pages(struct pagevec *pvec)
count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned);
}
 }
-EXPORT_SYMBOL_GPL(check_move_unevictable_pages);
+EXPORT_SYMBOL_GPL(check_move_unevictable_folios);
-- 
2.35.1



___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


[f2fs-dev] [PATCH 03/10] ext4: Convert mpage_release_unused_pages() to use filemap_get_folios()

2022-06-05 Thread Matthew Wilcox (Oracle)
If the folio is large, it may overlap the beginning or end of the
unused range.  If it does, we need to avoid invalidating it.

Signed-off-by: Matthew Wilcox (Oracle) 
---
 fs/ext4/inode.c | 21 -
 1 file changed, 12 insertions(+), 9 deletions(-)

diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 3dce7d058985..32a7f5e024d6 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -1554,9 +1554,9 @@ struct mpage_da_data {
 static void mpage_release_unused_pages(struct mpage_da_data *mpd,
   bool invalidate)
 {
-   int nr_pages, i;
+   unsigned nr, i;
pgoff_t index, end;
-   struct pagevec pvec;
+   struct folio_batch fbatch;
struct inode *inode = mpd->inode;
struct address_space *mapping = inode->i_mapping;
 
@@ -1574,15 +1574,18 @@ static void mpage_release_unused_pages(struct 
mpage_da_data *mpd,
ext4_es_remove_extent(inode, start, last - start + 1);
}
 
-   pagevec_init(&pvec);
+   folio_batch_init(&fbatch);
while (index <= end) {
-   nr_pages = pagevec_lookup_range(&pvec, mapping, &index, end);
-   if (nr_pages == 0)
+   nr = filemap_get_folios(mapping, &index, end, &fbatch);
+   if (nr == 0)
break;
-   for (i = 0; i < nr_pages; i++) {
-   struct page *page = pvec.pages[i];
-   struct folio *folio = page_folio(page);
+   for (i = 0; i < nr; i++) {
+   struct folio *folio = fbatch.folios[i];
 
+   if (folio->index < mpd->first_page)
+   continue;
+   if (folio->index + folio_nr_pages(folio) - 1 > end)
+   continue;
BUG_ON(!folio_test_locked(folio));
BUG_ON(folio_test_writeback(folio));
if (invalidate) {
@@ -1594,7 +1597,7 @@ static void mpage_release_unused_pages(struct 
mpage_da_data *mpd,
}
folio_unlock(folio);
}
-   pagevec_release(&pvec);
+   folio_batch_release(&fbatch);
}
 }
 
-- 
2.35.1



___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


[f2fs-dev] [PATCH] f2fs: Remove readahead collision detection

2021-01-14 Thread Matthew Wilcox (Oracle)
With the new ->readahead operation, locked pages are added to the page
cache, preventing two threads from racing with each other to read the
same chunk of file, so this is dead code.

Signed-off-by: Matthew Wilcox (Oracle) 
---
 fs/f2fs/data.c  | 25 -
 fs/f2fs/f2fs.h  |  1 -
 fs/f2fs/super.c |  2 --
 3 files changed, 28 deletions(-)

diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 4d80f00e5e40..c18248d54020 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -2265,11 +2265,6 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, 
struct bio **bio_ret,
 /*
  * This function was originally taken from fs/mpage.c, and customized for f2fs.
  * Major change was from block_size == page_size in f2fs by default.
- *
- * Note that the aops->readpages() function is ONLY used for read-ahead. If
- * this function ever deviates from doing just read-ahead, it should either
- * use ->readpage() or do the necessary surgery to decouple ->readpages()
- * from read-ahead.
  */
 static int f2fs_mpage_readpages(struct inode *inode,
struct readahead_control *rac, struct page *page)
@@ -2292,7 +2287,6 @@ static int f2fs_mpage_readpages(struct inode *inode,
unsigned nr_pages = rac ? readahead_count(rac) : 1;
unsigned max_nr_pages = nr_pages;
int ret = 0;
-   bool drop_ra = false;
 
map.m_pblk = 0;
map.m_lblk = 0;
@@ -2303,26 +2297,10 @@ static int f2fs_mpage_readpages(struct inode *inode,
map.m_seg_type = NO_CHECK_TYPE;
map.m_may_create = false;
 
-   /*
-* Two readahead threads for same address range can cause race condition
-* which fragments sequential read IOs. So let's avoid each other.
-*/
-   if (rac && readahead_count(rac)) {
-   if (READ_ONCE(F2FS_I(inode)->ra_offset) == readahead_index(rac))
-   drop_ra = true;
-   else
-   WRITE_ONCE(F2FS_I(inode)->ra_offset,
-   readahead_index(rac));
-   }
-
for (; nr_pages; nr_pages--) {
if (rac) {
page = readahead_page(rac);
prefetchw(&page->flags);
-   if (drop_ra) {
-   f2fs_put_page(page, 1);
-   continue;
-   }
}
 
 #ifdef CONFIG_F2FS_FS_COMPRESSION
@@ -2385,9 +2363,6 @@ static int f2fs_mpage_readpages(struct inode *inode,
}
if (bio)
__submit_bio(F2FS_I_SB(inode), bio, DATA);
-
-   if (rac && readahead_count(rac) && !drop_ra)
-   WRITE_ONCE(F2FS_I(inode)->ra_offset, -1);
return ret;
 }
 
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 980e061f7968..114a72a99df7 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -718,7 +718,6 @@ struct f2fs_inode_info {
struct list_head inmem_pages;   /* inmemory pages managed by f2fs */
struct task_struct *inmem_task; /* store inmemory task */
struct mutex inmem_lock;/* lock for inmemory pages */
-   pgoff_t ra_offset;  /* ongoing readahead offset */
struct extent_tree *extent_tree;/* cached extent_tree entry */
 
/* avoid racing between foreground op and gc */
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 1d42a59fb982..a25a2db273a3 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -1156,8 +1156,6 @@ static struct inode *f2fs_alloc_inode(struct super_block 
*sb)
/* Will be used by directory only */
fi->i_dir_level = F2FS_SB(sb)->dir_level;
 
-   fi->ra_offset = -1;
-
return &fi->vfs_inode;
 }
 
-- 
2.29.2



___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


[f2fs-dev] [PATCH] f2fs: Simplify SEEK_DATA implementation

2020-08-24 Thread Matthew Wilcox (Oracle)
Instead of finding the first dirty page and then seeing if it matches
the index of a block that is NEW_ADDR, delay the lookup of the dirty
bit until we've actually found a block that's NEW_ADDR.

Signed-off-by: Matthew Wilcox (Oracle) 
---
 fs/f2fs/file.c | 35 ---
 1 file changed, 8 insertions(+), 27 deletions(-)

diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index 8a422400e824..14f478871698 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -376,32 +376,15 @@ int f2fs_sync_file(struct file *file, loff_t start, 
loff_t end, int datasync)
return f2fs_do_sync_file(file, start, end, datasync, false);
 }
 
-static pgoff_t __get_first_dirty_index(struct address_space *mapping,
-   pgoff_t pgofs, int whence)
-{
-   struct page *page;
-   int nr_pages;
-
-   if (whence != SEEK_DATA)
-   return 0;
-
-   /* find first dirty page index */
-   nr_pages = find_get_pages_tag(mapping, &pgofs, PAGECACHE_TAG_DIRTY,
- 1, &page);
-   if (!nr_pages)
-   return ULONG_MAX;
-   pgofs = page->index;
-   put_page(page);
-   return pgofs;
-}
-
-static bool __found_offset(struct f2fs_sb_info *sbi, block_t blkaddr,
-   pgoff_t dirty, pgoff_t pgofs, int whence)
+static bool __found_offset(struct address_space *mapping, block_t blkaddr,
+   pgoff_t index, int whence)
 {
switch (whence) {
case SEEK_DATA:
-   if ((blkaddr == NEW_ADDR && dirty == pgofs) ||
-   __is_valid_data_blkaddr(blkaddr))
+   if (__is_valid_data_blkaddr(blkaddr))
+   return true;
+   if (blkaddr == NEW_ADDR &&
+   xa_get_mark(&mapping->i_pages, index, PAGECACHE_TAG_DIRTY))
return true;
break;
case SEEK_HOLE:
@@ -417,7 +400,7 @@ static loff_t f2fs_seek_block(struct file *file, loff_t 
offset, int whence)
struct inode *inode = file->f_mapping->host;
loff_t maxbytes = inode->i_sb->s_maxbytes;
struct dnode_of_data dn;
-   pgoff_t pgofs, end_offset, dirty;
+   pgoff_t pgofs, end_offset;
loff_t data_ofs = offset;
loff_t isize;
int err = 0;
@@ -437,8 +420,6 @@ static loff_t f2fs_seek_block(struct file *file, loff_t 
offset, int whence)
 
pgofs = (pgoff_t)(offset >> PAGE_SHIFT);
 
-   dirty = __get_first_dirty_index(inode->i_mapping, pgofs, whence);
-
for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
set_new_dnode(&dn, inode, NULL, NULL, 0);
err = f2fs_get_dnode_of_data(&dn, pgofs, LOOKUP_NODE);
@@ -471,7 +452,7 @@ static loff_t f2fs_seek_block(struct file *file, loff_t 
offset, int whence)
goto fail;
}
 
-   if (__found_offset(F2FS_I_SB(inode), blkaddr, dirty,
+   if (__found_offset(file->f_mapping, blkaddr,
pgofs, whence)) {
f2fs_put_dnode(&dn);
goto found;
-- 
2.28.0



___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel