[f2fs-dev] [PATCH 2/4] f2fs: no need to call ktime_get_real_seconds() if iostat is not enabled

2023-01-04 Thread Yangtao Li via Linux-f2fs-devel
When the iostat is not enabled, it is meaningless to call
ktime_get_real_seconds() to assign values to variables.

Let's put the call to the ktime_get_real_seconds() after iostat is enabled.

Signed-off-by: Yangtao Li 
---
 fs/f2fs/iostat.c | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

diff --git a/fs/f2fs/iostat.c b/fs/f2fs/iostat.c
index 8460989e9bab..e7d03c446994 100644
--- a/fs/f2fs/iostat.c
+++ b/fs/f2fs/iostat.c
@@ -29,12 +29,11 @@ int __maybe_unused iostat_info_seq_show(struct seq_file 
*seq, void *offset)
 {
struct super_block *sb = seq->private;
struct f2fs_sb_info *sbi = F2FS_SB(sb);
-   time64_t now = ktime_get_real_seconds();
 
if (!sbi->iostat_enable)
return 0;
 
-   seq_printf(seq, "time:  %-16llu\n", now);
+   seq_printf(seq, "time:  %-16llu\n", ktime_get_real_seconds());
seq_printf(seq, "\t\t\t%-16s %-16s %-16s\n",
"io_bytes", "count", "avg_bytes");
 
-- 
2.25.1



___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


[f2fs-dev] [PATCH 1/4] f2fs: reset iostat_count in f2fs_reset_iostat()

2023-01-04 Thread Yangtao Li via Linux-f2fs-devel
Commit 8754b465c249 ("f2fs: support accounting iostat count and avg_bytes")
forgot to reset iostat count in f2fs_reset_iostat(), let's fix it.

Signed-off-by: Yangtao Li 
---
 fs/f2fs/iostat.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/fs/f2fs/iostat.c b/fs/f2fs/iostat.c
index c53b62a7ca71..8460989e9bab 100644
--- a/fs/f2fs/iostat.c
+++ b/fs/f2fs/iostat.c
@@ -220,6 +220,7 @@ void f2fs_reset_iostat(struct f2fs_sb_info *sbi)
 
spin_lock_irq(&sbi->iostat_lock);
for (i = 0; i < NR_IO_TYPE; i++) {
+   sbi->iostat_count[i] = 0;
sbi->rw_iostat[i] = 0;
sbi->prev_rw_iostat[i] = 0;
}
-- 
2.25.1



___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


[f2fs-dev] [PATCH 4/4] f2fs: rename rw_iostat to iostat_bytes

2023-01-04 Thread Yangtao Li via Linux-f2fs-devel
The contents stored in the rw_iostat and prev_rw_iostat arrays do not
quite match the meaning of the names. In fact, array storage is not
only read, write io, but also discard and flush. In addition, in order
to better distinguish it from the iostat_count array, it is more accurate
to say that io bytes are stored in it. Also, the FS_DISCARD and FS_FLUSH_IO
names are less harmonious than others. Let's change to new names.

Signed-off-by: Yangtao Li 
---
 fs/f2fs/f2fs.h  |  8 
 fs/f2fs/iostat.c| 20 ++--
 fs/f2fs/segment.c   |  4 ++--
 include/trace/events/f2fs.h |  2 +-
 4 files changed, 17 insertions(+), 17 deletions(-)

diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 0a24447472db..331c330ea31d 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -1191,8 +1191,8 @@ enum iostat_type {
FS_META_READ_IO,/* meta read IOs */
 
/* other */
-   FS_DISCARD, /* discard */
-   FS_FLUSH,   /* flush */
+   FS_DISCARD_IO,  /* discard */
+   FS_FLUSH_IO,/* flush */
NR_IO_TYPE,
 };
 
@@ -1856,8 +1856,8 @@ struct f2fs_sb_info {
/* For app/fs IO statistics */
spinlock_t iostat_lock;
unsigned long long iostat_count[NR_IO_TYPE];
-   unsigned long long rw_iostat[NR_IO_TYPE];
-   unsigned long long prev_rw_iostat[NR_IO_TYPE];
+   unsigned long long iostat_bytes[NR_IO_TYPE];
+   unsigned long long prev_iostat_bytes[NR_IO_TYPE];
bool iostat_enable;
unsigned long iostat_next_period;
unsigned int iostat_period_ms;
diff --git a/fs/f2fs/iostat.c b/fs/f2fs/iostat.c
index 991605fcfe0b..59c72f92191a 100644
--- a/fs/f2fs/iostat.c
+++ b/fs/f2fs/iostat.c
@@ -21,13 +21,13 @@ static mempool_t *bio_iostat_ctx_pool;
 static inline unsigned long long iostat_get_avg_bytes(struct f2fs_sb_info *sbi,
enum iostat_type type)
 {
-   return sbi->iostat_count[type] ? div64_u64(sbi->rw_iostat[type],
+   return sbi->iostat_count[type] ? div64_u64(sbi->iostat_bytes[type],
sbi->iostat_count[type]) : 0;
 }
 
 #define IOSTAT_INFO_SHOW(name, type)   \
seq_printf(seq, "%-23s %-16llu %-16llu %-16llu\n",  \
-   name":", sbi->rw_iostat[type],  
\
+   name":", sbi->iostat_bytes[type],   
\
sbi->iostat_count[type],
\
iostat_get_avg_bytes(sbi, type))
\
 
@@ -79,8 +79,8 @@ int __maybe_unused iostat_info_seq_show(struct seq_file *seq, 
void *offset)
 
/* print other IOs */
seq_puts(seq, "[OTHER]\n");
-   IOSTAT_INFO_SHOW("fs discard", FS_DISCARD);
-   IOSTAT_INFO_SHOW("fs flush", FS_FLUSH);
+   IOSTAT_INFO_SHOW("fs discard", FS_DISCARD_IO);
+   IOSTAT_INFO_SHOW("fs flush", FS_FLUSH_IO);
 
return 0;
 }
@@ -129,9 +129,9 @@ static inline void f2fs_record_iostat(struct f2fs_sb_info 
*sbi)
msecs_to_jiffies(sbi->iostat_period_ms);
 
for (i = 0; i < NR_IO_TYPE; i++) {
-   iostat_diff[i] = sbi->rw_iostat[i] -
-   sbi->prev_rw_iostat[i];
-   sbi->prev_rw_iostat[i] = sbi->rw_iostat[i];
+   iostat_diff[i] = sbi->iostat_bytes[i] -
+   sbi->prev_iostat_bytes[i];
+   sbi->prev_iostat_bytes[i] = sbi->iostat_bytes[i];
}
spin_unlock_irqrestore(&sbi->iostat_lock, flags);
 
@@ -148,8 +148,8 @@ void f2fs_reset_iostat(struct f2fs_sb_info *sbi)
spin_lock_irq(&sbi->iostat_lock);
for (i = 0; i < NR_IO_TYPE; i++) {
sbi->iostat_count[i] = 0;
-   sbi->rw_iostat[i] = 0;
-   sbi->prev_rw_iostat[i] = 0;
+   sbi->iostat_bytes[i] = 0;
+   sbi->prev_iostat_bytes[i] = 0;
}
spin_unlock_irq(&sbi->iostat_lock);
 
@@ -161,7 +161,7 @@ void f2fs_reset_iostat(struct f2fs_sb_info *sbi)
 static inline void __f2fs_update_iostat(struct f2fs_sb_info *sbi,
enum iostat_type type, unsigned long long io_bytes)
 {
-   sbi->rw_iostat[type] += io_bytes;
+   sbi->iostat_bytes[type] += io_bytes;
sbi->iostat_count[type]++;
 }
 
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 34e9dc4df5bb..38bae9107a3b 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -504,7 +504,7 @@ static int __submit_flush_wait(struct f2fs_sb_info *sbi,
 {
int ret = blkdev_issue_flush(bdev);
if (!ret)
-   f2fs_update_iostat(sbi, NULL, FS_FLUSH, 0);
+   f2fs_update_iostat(sbi, NULL, FS_FLUSH_IO, 0);
 
trace_f2fs_issue_flush(bdev, test_opt(sbi, NOBARRIER),
test_opt(sbi, FLUSH_MERGE), ret);
@@ -1184,7 +1184,7 @@ stati

[f2fs-dev] [PATCH 3/4] f2fs: introduce IOSTAT_INFO_SHOW macro

2023-01-04 Thread Yangtao Li via Linux-f2fs-devel
Define IOSTAT_INFO_SHOW macro and use it to simplify code.

Signed-off-by: Yangtao Li 
---
 fs/f2fs/iostat.c | 136 +++
 1 file changed, 32 insertions(+), 104 deletions(-)

diff --git a/fs/f2fs/iostat.c b/fs/f2fs/iostat.c
index e7d03c446994..991605fcfe0b 100644
--- a/fs/f2fs/iostat.c
+++ b/fs/f2fs/iostat.c
@@ -25,6 +25,12 @@ static inline unsigned long long iostat_get_avg_bytes(struct 
f2fs_sb_info *sbi,
sbi->iostat_count[type]) : 0;
 }
 
+#define IOSTAT_INFO_SHOW(name, type)   \
+   seq_printf(seq, "%-23s %-16llu %-16llu %-16llu\n",  \
+   name":", sbi->rw_iostat[type],  
\
+   sbi->iostat_count[type],
\
+   iostat_get_avg_bytes(sbi, type))
\
+
 int __maybe_unused iostat_info_seq_show(struct seq_file *seq, void *offset)
 {
struct super_block *sb = seq->private;
@@ -39,120 +45,42 @@ int __maybe_unused iostat_info_seq_show(struct seq_file 
*seq, void *offset)
 
/* print app write IOs */
seq_puts(seq, "[WRITE]\n");
-   seq_printf(seq, "app buffered data: %-16llu %-16llu %-16llu\n",
-   sbi->rw_iostat[APP_BUFFERED_IO],
-   sbi->iostat_count[APP_BUFFERED_IO],
-   iostat_get_avg_bytes(sbi, APP_BUFFERED_IO));
-   seq_printf(seq, "app direct data:   %-16llu %-16llu %-16llu\n",
-   sbi->rw_iostat[APP_DIRECT_IO],
-   sbi->iostat_count[APP_DIRECT_IO],
-   iostat_get_avg_bytes(sbi, APP_DIRECT_IO));
-   seq_printf(seq, "app mapped data:   %-16llu %-16llu %-16llu\n",
-   sbi->rw_iostat[APP_MAPPED_IO],
-   sbi->iostat_count[APP_MAPPED_IO],
-   iostat_get_avg_bytes(sbi, APP_MAPPED_IO));
-   seq_printf(seq, "app buffered cdata:%-16llu %-16llu %-16llu\n",
-   sbi->rw_iostat[APP_BUFFERED_CDATA_IO],
-   sbi->iostat_count[APP_BUFFERED_CDATA_IO],
-   iostat_get_avg_bytes(sbi, 
APP_BUFFERED_CDATA_IO));
-   seq_printf(seq, "app mapped cdata:  %-16llu %-16llu %-16llu\n",
-   sbi->rw_iostat[APP_MAPPED_CDATA_IO],
-   sbi->iostat_count[APP_MAPPED_CDATA_IO],
-   iostat_get_avg_bytes(sbi, APP_MAPPED_CDATA_IO));
+   IOSTAT_INFO_SHOW("app buffered data", APP_BUFFERED_IO);
+   IOSTAT_INFO_SHOW("app direct data", APP_DIRECT_IO);
+   IOSTAT_INFO_SHOW("app mapped data", APP_MAPPED_IO);
+   IOSTAT_INFO_SHOW("app buffered cdata", APP_BUFFERED_CDATA_IO);
+   IOSTAT_INFO_SHOW("app mapped cdata", APP_MAPPED_CDATA_IO);
 
/* print fs write IOs */
-   seq_printf(seq, "fs data:   %-16llu %-16llu %-16llu\n",
-   sbi->rw_iostat[FS_DATA_IO],
-   sbi->iostat_count[FS_DATA_IO],
-   iostat_get_avg_bytes(sbi, FS_DATA_IO));
-   seq_printf(seq, "fs cdata:  %-16llu %-16llu %-16llu\n",
-   sbi->rw_iostat[FS_CDATA_IO],
-   sbi->iostat_count[FS_CDATA_IO],
-   iostat_get_avg_bytes(sbi, FS_CDATA_IO));
-   seq_printf(seq, "fs node:   %-16llu %-16llu %-16llu\n",
-   sbi->rw_iostat[FS_NODE_IO],
-   sbi->iostat_count[FS_NODE_IO],
-   iostat_get_avg_bytes(sbi, FS_NODE_IO));
-   seq_printf(seq, "fs meta:   %-16llu %-16llu %-16llu\n",
-   sbi->rw_iostat[FS_META_IO],
-   sbi->iostat_count[FS_META_IO],
-   iostat_get_avg_bytes(sbi, FS_META_IO));
-   seq_printf(seq, "fs gc data:%-16llu %-16llu %-16llu\n",
-   sbi->rw_iostat[FS_GC_DATA_IO],
-   sbi->iostat_count[FS_GC_DATA_IO],
-   iostat_get_avg_bytes(sbi, FS_GC_DATA_IO));
-   seq_printf(seq, "fs gc node:%-16llu %-16llu %-16llu\n",
-   sbi->rw_iostat[FS_GC_NODE_IO],
-   sbi->iostat_count[FS_GC_NODE_IO],
-   iostat_get_avg_bytes(sbi, FS_GC_NODE_IO));
-   seq_printf(seq, "fs cp data:%-16llu %-16llu %-16llu\n",
-   sbi->rw_iostat[FS_CP_DATA_IO],
-   sbi->iostat_count[FS_CP_DATA_IO],
-   iostat_get_avg_bytes(sbi, FS_CP_DATA_IO));
-   seq_printf(seq, "fs cp node:%-16llu %-16llu %-16

[f2fs-dev] [PATCH v3] f2fs: introduce discard_io_aware_gran sysfs node

2023-01-04 Thread Yangtao Li via Linux-f2fs-devel
The current discard_io_aware_gran is a fixed value, change it to be
configurable through the sys node.

Signed-off-by: Yangtao Li 
---
v3: remove DEFAULT_IO_AWARE_DISCARD_GRANULARITY
 Documentation/ABI/testing/sysfs-fs-f2fs |  9 +
 fs/f2fs/f2fs.h  |  1 +
 fs/f2fs/segment.c   |  3 ++-
 fs/f2fs/sysfs.c | 13 +
 4 files changed, 25 insertions(+), 1 deletion(-)

diff --git a/Documentation/ABI/testing/sysfs-fs-f2fs 
b/Documentation/ABI/testing/sysfs-fs-f2fs
index aaa379bb8a8f..75420c242cc4 100644
--- a/Documentation/ABI/testing/sysfs-fs-f2fs
+++ b/Documentation/ABI/testing/sysfs-fs-f2fs
@@ -708,3 +708,12 @@ Description:   Support configuring fault injection 
type, should be
FAULT_LOCK_OP0x2
FAULT_BLKADDR0x4
===  ===
+
+What:  /sys/fs/f2fs//discard_io_aware_gran
+Date:  January 2023
+Contact:   "Yangtao Li" 
+Description:   Controls background discard granularity of inner discard thread
+   when is not in idle. Inner thread will not issue discards with 
size that
+   is smaller than granularity. The unit size is one block(4KB), 
now only
+   support configuring in range of [0, 512].
+   Default: 512
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 0a24447472db..cf60221d084e 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -409,6 +409,7 @@ struct discard_cmd_control {
unsigned int min_discard_issue_time;/* min. interval between 
discard issue */
unsigned int mid_discard_issue_time;/* mid. interval between 
discard issue */
unsigned int max_discard_issue_time;/* max. interval between 
discard issue */
+   unsigned int discard_io_aware_gran; /* minimum discard granularity not 
be aware of I/O */
unsigned int discard_urgent_util;   /* utilization which issue 
discard proactively */
unsigned int discard_granularity;   /* discard granularity */
unsigned int max_ordered_discard;   /* maximum discard granularity 
issued by lba order */
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 34e9dc4df5bb..d988d83108b2 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -1059,7 +1059,7 @@ static void __init_discard_policy(struct f2fs_sb_info 
*sbi,
dpolicy->granularity = granularity;
 
dpolicy->max_requests = dcc->max_discard_request;
-   dpolicy->io_aware_gran = MAX_PLIST_NUM;
+   dpolicy->io_aware_gran = dcc->discard_io_aware_gran;
dpolicy->timeout = false;
 
if (discard_type == DPOLICY_BG) {
@@ -2063,6 +2063,7 @@ static int create_discard_cmd_control(struct f2fs_sb_info 
*sbi)
if (!dcc)
return -ENOMEM;
 
+   dcc->discard_io_aware_gran = MAX_PLIST_NUM;
dcc->discard_granularity = DEFAULT_DISCARD_GRANULARITY;
dcc->max_ordered_discard = DEFAULT_MAX_ORDERED_DISCARD_GRANULARITY;
if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SEGMENT)
diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
index 805b632a3af0..e396851a6dd1 100644
--- a/fs/f2fs/sysfs.c
+++ b/fs/f2fs/sysfs.c
@@ -473,6 +473,17 @@ static ssize_t __sbi_store(struct f2fs_attr *a,
return count;
}
 
+   if (!strcmp(a->attr.name, "discard_io_aware_gran")) {
+   if (t > MAX_PLIST_NUM)
+   return -EINVAL;
+   if (!f2fs_block_unit_discard(sbi))
+   return -EINVAL;
+   if (t == *ui)
+   return count;
+   *ui = t;
+   return count;
+   }
+
if (!strcmp(a->attr.name, "discard_granularity")) {
if (t == 0 || t > MAX_PLIST_NUM)
return -EINVAL;
@@ -825,6 +836,7 @@ F2FS_RW_ATTR(DCC_INFO, discard_cmd_control, 
max_discard_request, max_discard_req
 F2FS_RW_ATTR(DCC_INFO, discard_cmd_control, min_discard_issue_time, 
min_discard_issue_time);
 F2FS_RW_ATTR(DCC_INFO, discard_cmd_control, mid_discard_issue_time, 
mid_discard_issue_time);
 F2FS_RW_ATTR(DCC_INFO, discard_cmd_control, max_discard_issue_time, 
max_discard_issue_time);
+F2FS_RW_ATTR(DCC_INFO, discard_cmd_control, discard_io_aware_gran, 
discard_io_aware_gran);
 F2FS_RW_ATTR(DCC_INFO, discard_cmd_control, discard_urgent_util, 
discard_urgent_util);
 F2FS_RW_ATTR(DCC_INFO, discard_cmd_control, discard_granularity, 
discard_granularity);
 F2FS_RW_ATTR(DCC_INFO, discard_cmd_control, max_ordered_discard, 
max_ordered_discard);
@@ -960,6 +972,7 @@ static struct attribute *f2fs_attrs[] = {
ATTR_LIST(min_discard_issue_time),
ATTR_LIST(mid_discard_issue_time),
ATTR_LIST(max_discard_issue_time),
+   ATTR_LIST(discard_io_aware_gran),
ATTR_LIST(discard_urgent_util),
ATTR_LIST(discard_granularity),
ATTR_LIST(max_ordered_discard),
-- 
2.25.1



__

[f2fs-dev] [GIT PULL] f2fs fix for 6.2-rc3

2023-01-04 Thread Jaegeuk Kim
Hi Linus,

Could you please consider this pull request?

Thanks,

The following changes since commit 69b41ac87e4a664de78a395ff97166f0b2943210:

  Merge tag 'for-6.2-rc2-tag' of 
git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux (2023-01-02 11:06:18 
-0800)

are available in the Git repository at:

  git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs.git 
tags/f2fs-fix-6.2-rc3

for you to fetch changes up to df9d44b645b83fffccfb4e28c1f93376585fdec8:

  f2fs: let's avoid panic if extent_tree is not created (2023-01-03 08:59:06 
-0800)


f2fs-fix-6.2-rc3

This series fixes the below three bugs introduced in 6.2-rc1.

- fix a null pointer dereference in f2fs_issue_flush, which occurs by the
combination of mount/remount options.

- fix a bug in per-block age-based extent_cache newly introduced in 6.2-rc1,
which reported a wrong age information in extent_cache.

- fix a kernel panic if extent_tree was not created, which was caught by a
wrong BUG_ON.


Chao Yu (1):
  f2fs: fix to avoid NULL pointer dereference in f2fs_issue_flush()

Jaegeuk Kim (4):
  f2fs: initialize extent_cache parameter
  f2fs: don't mix to use union values in extent_info
  f2fs: should use a temp extent_info for lookup
  f2fs: let's avoid panic if extent_tree is not created

 fs/f2fs/data.c |  2 +-
 fs/f2fs/extent_cache.c | 34 ++
 fs/f2fs/file.c |  2 +-
 fs/f2fs/segment.c  | 13 +
 4 files changed, 25 insertions(+), 26 deletions(-)


___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


Re: [f2fs-dev] [PATCH 1/4] f2fs: reset iostat_count in f2fs_reset_iostat()

2023-01-04 Thread Jaegeuk Kim
Hi Yangtao,

These are all in dev-test branch, which means you don't need to stack up more
patches on top of it. I just integrated most of them into two original patches.
Could you please take a look at this?

c1706cc0cd72 f2fs: add iostat support for flush
acd6f525e01c f2fs: support accounting iostat count and avg_bytes

https://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs.git/log/?h=dev-test

Thanks,

On 01/04, Yangtao Li wrote:
> Commit 8754b465c249 ("f2fs: support accounting iostat count and avg_bytes")
> forgot to reset iostat count in f2fs_reset_iostat(), let's fix it.
> 
> Signed-off-by: Yangtao Li 
> ---
>  fs/f2fs/iostat.c | 1 +
>  1 file changed, 1 insertion(+)
> 
> diff --git a/fs/f2fs/iostat.c b/fs/f2fs/iostat.c
> index c53b62a7ca71..8460989e9bab 100644
> --- a/fs/f2fs/iostat.c
> +++ b/fs/f2fs/iostat.c
> @@ -220,6 +220,7 @@ void f2fs_reset_iostat(struct f2fs_sb_info *sbi)
>  
>   spin_lock_irq(&sbi->iostat_lock);
>   for (i = 0; i < NR_IO_TYPE; i++) {
> + sbi->iostat_count[i] = 0;
>   sbi->rw_iostat[i] = 0;
>   sbi->prev_rw_iostat[i] = 0;
>   }
> -- 
> 2.25.1


___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


Re: [f2fs-dev] [syzbot] [f2fs?] KASAN: use-after-free Read in __update_extent_tree_range

2023-01-04 Thread syzbot
syzbot has bisected this issue to:

commit 3db1de0e582c358dd013f3703cd55b5fe4076436
Author: Daeho Jeong 
Date:   Thu Apr 28 18:18:09 2022 +

f2fs: change the current atomic write way

bisection log:  https://syzkaller.appspot.com/x/bisect.txt?x=15c77d3848
start commit:   1b929c02afd3 Linux 6.2-rc1
git tree:   upstream
final oops: https://syzkaller.appspot.com/x/report.txt?x=17c77d3848
console output: https://syzkaller.appspot.com/x/log.txt?x=13c77d3848
kernel config:  https://syzkaller.appspot.com/x/.config?x=2651619a26b4d687
dashboard link: https://syzkaller.appspot.com/bug?extid=823000d23b3400619f7c
syz repro:  https://syzkaller.appspot.com/x/repro.syz?x=1259723848
C reproducer:   https://syzkaller.appspot.com/x/repro.c?x=11ae9d7f88

Reported-by: syzbot+823000d23b3400619...@syzkaller.appspotmail.com
Fixes: 3db1de0e582c ("f2fs: change the current atomic write way")

For information about bisection process see: https://goo.gl/tpsmEJ#bisection


___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


Re: [f2fs-dev] [GIT PULL] f2fs fix for 6.2-rc3

2023-01-04 Thread pr-tracker-bot
The pull request you sent on Wed, 4 Jan 2023 10:39:41 -0800:

> git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs.git 
> tags/f2fs-fix-6.2-rc3

has been merged into torvalds/linux.git:
https://git.kernel.org/torvalds/c/2ac44821a81612317f4451b765986d8b9695d5d5

Thank you!

-- 
Deet-doot-dot, I am a bot.
https://korg.docs.kernel.org/prtracker.html


___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


[f2fs-dev] [PATCH v5 04/23] page-writeback: Convert write_cache_pages() to use filemap_get_folios_tag()

2023-01-04 Thread Vishal Moola (Oracle)
Converted function to use folios throughout. This is in preparation for
the removal of find_get_pages_range_tag(). This change removes 8 calls
to compound_head(), and the function now supports large folios.

Signed-off-by: Vishal Moola (Oracle) 
Reviewed-by: Matthew Wilcow (Oracle) 
---
 mm/page-writeback.c | 44 +++-
 1 file changed, 23 insertions(+), 21 deletions(-)

diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index ad608ef2a243..5d61fa9eecc0 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -2398,15 +2398,15 @@ int write_cache_pages(struct address_space *mapping,
int ret = 0;
int done = 0;
int error;
-   struct pagevec pvec;
-   int nr_pages;
+   struct folio_batch fbatch;
+   int nr_folios;
pgoff_t index;
pgoff_t end;/* Inclusive */
pgoff_t done_index;
int range_whole = 0;
xa_mark_t tag;
 
-   pagevec_init(&pvec);
+   folio_batch_init(&fbatch);
if (wbc->range_cyclic) {
index = mapping->writeback_index; /* prev offset */
end = -1;
@@ -2426,17 +2426,18 @@ int write_cache_pages(struct address_space *mapping,
while (!done && (index <= end)) {
int i;
 
-   nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
-   tag);
-   if (nr_pages == 0)
+   nr_folios = filemap_get_folios_tag(mapping, &index, end,
+   tag, &fbatch);
+
+   if (nr_folios == 0)
break;
 
-   for (i = 0; i < nr_pages; i++) {
-   struct page *page = pvec.pages[i];
+   for (i = 0; i < nr_folios; i++) {
+   struct folio *folio = fbatch.folios[i];
 
-   done_index = page->index;
+   done_index = folio->index;
 
-   lock_page(page);
+   folio_lock(folio);
 
/*
 * Page truncated or invalidated. We can freely skip it
@@ -2446,30 +2447,30 @@ int write_cache_pages(struct address_space *mapping,
 * even if there is now a new, dirty page at the same
 * pagecache address.
 */
-   if (unlikely(page->mapping != mapping)) {
+   if (unlikely(folio->mapping != mapping)) {
 continue_unlock:
-   unlock_page(page);
+   folio_unlock(folio);
continue;
}
 
-   if (!PageDirty(page)) {
+   if (!folio_test_dirty(folio)) {
/* someone wrote it for us */
goto continue_unlock;
}
 
-   if (PageWriteback(page)) {
+   if (folio_test_writeback(folio)) {
if (wbc->sync_mode != WB_SYNC_NONE)
-   wait_on_page_writeback(page);
+   folio_wait_writeback(folio);
else
goto continue_unlock;
}
 
-   BUG_ON(PageWriteback(page));
-   if (!clear_page_dirty_for_io(page))
+   BUG_ON(folio_test_writeback(folio));
+   if (!folio_clear_dirty_for_io(folio))
goto continue_unlock;
 
trace_wbc_writepage(wbc, inode_to_bdi(mapping->host));
-   error = (*writepage)(page, wbc, data);
+   error = writepage(&folio->page, wbc, data);
if (unlikely(error)) {
/*
 * Handle errors according to the type of
@@ -2484,11 +2485,12 @@ int write_cache_pages(struct address_space *mapping,
 * the first error.
 */
if (error == AOP_WRITEPAGE_ACTIVATE) {
-   unlock_page(page);
+   folio_unlock(folio);
error = 0;
} else if (wbc->sync_mode != WB_SYNC_ALL) {
ret = error;
-   done_index = page->index + 1;
+   done_index = folio->index +
+   folio_nr_pages(folio);
done = 1;
break;
}
@@ -2508,7 +2510,7 @@ int write_cache_pages(struct address_s

[f2fs-dev] [PATCH v5 05/23] afs: Convert afs_writepages_region() to use filemap_get_folios_tag()

2023-01-04 Thread Vishal Moola (Oracle)
Convert to use folios throughout. This function is in preparation to
remove find_get_pages_range_tag().

Also modified this function to write the whole batch one at a time,
rather than calling for a new set every single write.

Signed-off-by: Vishal Moola (Oracle) 
Tested-by: David Howells 
---
 fs/afs/write.c | 116 +
 1 file changed, 59 insertions(+), 57 deletions(-)

diff --git a/fs/afs/write.c b/fs/afs/write.c
index 19df10d63323..2d3b08b7406c 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -704,85 +704,87 @@ static int afs_writepages_region(struct address_space 
*mapping,
 bool max_one_loop)
 {
struct folio *folio;
-   struct page *head_page;
+   struct folio_batch fbatch;
ssize_t ret;
+   unsigned int i;
int n, skips = 0;
 
_enter("%llx,%llx,", start, end);
+   folio_batch_init(&fbatch);
 
do {
pgoff_t index = start / PAGE_SIZE;
 
-   n = find_get_pages_range_tag(mapping, &index, end / PAGE_SIZE,
-PAGECACHE_TAG_DIRTY, 1, 
&head_page);
+   n = filemap_get_folios_tag(mapping, &index, end / PAGE_SIZE,
+   PAGECACHE_TAG_DIRTY, &fbatch);
+
if (!n)
break;
+   for (i = 0; i < n; i++) {
+   folio = fbatch.folios[i];
+   start = folio_pos(folio); /* May regress with THPs */
 
-   folio = page_folio(head_page);
-   start = folio_pos(folio); /* May regress with THPs */
-
-   _debug("wback %lx", folio_index(folio));
+   _debug("wback %lx", folio_index(folio));
 
-   /* At this point we hold neither the i_pages lock nor the
-* page lock: the page may be truncated or invalidated
-* (changing page->mapping to NULL), or even swizzled
-* back from swapper_space to tmpfs file mapping
-*/
-   if (wbc->sync_mode != WB_SYNC_NONE) {
-   ret = folio_lock_killable(folio);
-   if (ret < 0) {
-   folio_put(folio);
-   return ret;
-   }
-   } else {
-   if (!folio_trylock(folio)) {
-   folio_put(folio);
-   return 0;
+   /* At this point we hold neither the i_pages lock nor 
the
+* page lock: the page may be truncated or invalidated
+* (changing page->mapping to NULL), or even swizzled
+* back from swapper_space to tmpfs file mapping
+*/
+   if (wbc->sync_mode != WB_SYNC_NONE) {
+   ret = folio_lock_killable(folio);
+   if (ret < 0) {
+   folio_batch_release(&fbatch);
+   return ret;
+   }
+   } else {
+   if (!folio_trylock(folio))
+   continue;
}
-   }
 
-   if (folio_mapping(folio) != mapping ||
-   !folio_test_dirty(folio)) {
-   start += folio_size(folio);
-   folio_unlock(folio);
-   folio_put(folio);
-   continue;
-   }
+   if (folio->mapping != mapping ||
+   !folio_test_dirty(folio)) {
+   start += folio_size(folio);
+   folio_unlock(folio);
+   continue;
+   }
 
-   if (folio_test_writeback(folio) ||
-   folio_test_fscache(folio)) {
-   folio_unlock(folio);
-   if (wbc->sync_mode != WB_SYNC_NONE) {
-   folio_wait_writeback(folio);
+   if (folio_test_writeback(folio) ||
+   folio_test_fscache(folio)) {
+   folio_unlock(folio);
+   if (wbc->sync_mode != WB_SYNC_NONE) {
+   folio_wait_writeback(folio);
 #ifdef CONFIG_AFS_FSCACHE
-   folio_wait_fscache(folio);
+   folio_wait_fscache(folio);
 #endif
-   } else {
-   start += folio_size(folio);
+   } else {
+   start += folio_size(folio);
+   }
+   if (wbc->sync_mode

[f2fs-dev] [PATCH v5 06/23] btrfs: Convert btree_write_cache_pages() to use filemap_get_folio_tag()

2023-01-04 Thread Vishal Moola (Oracle)
Converted function to use folios throughout. This is in preparation for
the removal of find_get_pages_range_tag().

Signed-off-by: Vishal Moola (Oracle) 
Acked-by: David Sterba 
---
 fs/btrfs/extent_io.c | 19 ++-
 1 file changed, 10 insertions(+), 9 deletions(-)

diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 83dd3aa59663..64fbafc70822 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -2845,14 +2845,14 @@ int btree_write_cache_pages(struct address_space 
*mapping,
int ret = 0;
int done = 0;
int nr_to_write_done = 0;
-   struct pagevec pvec;
-   int nr_pages;
+   struct folio_batch fbatch;
+   unsigned int nr_folios;
pgoff_t index;
pgoff_t end;/* Inclusive */
int scanned = 0;
xa_mark_t tag;
 
-   pagevec_init(&pvec);
+   folio_batch_init(&fbatch);
if (wbc->range_cyclic) {
index = mapping->writeback_index; /* Start from prev offset */
end = -1;
@@ -2875,14 +2875,15 @@ int btree_write_cache_pages(struct address_space 
*mapping,
if (wbc->sync_mode == WB_SYNC_ALL)
tag_pages_for_writeback(mapping, index, end);
while (!done && !nr_to_write_done && (index <= end) &&
-  (nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
-   tag))) {
+  (nr_folios = filemap_get_folios_tag(mapping, &index, end,
+   tag, &fbatch))) {
unsigned i;
 
-   for (i = 0; i < nr_pages; i++) {
-   struct page *page = pvec.pages[i];
+   for (i = 0; i < nr_folios; i++) {
+   struct folio *folio = fbatch.folios[i];
 
-   ret = submit_eb_page(page, wbc, &bio_ctrl, &eb_context);
+   ret = submit_eb_page(&folio->page, wbc, &bio_ctrl,
+   &eb_context);
if (ret == 0)
continue;
if (ret < 0) {
@@ -2897,7 +2898,7 @@ int btree_write_cache_pages(struct address_space *mapping,
 */
nr_to_write_done = wbc->nr_to_write <= 0;
}
-   pagevec_release(&pvec);
+   folio_batch_release(&fbatch);
cond_resched();
}
if (!scanned && !done) {
-- 
2.38.1



___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


[f2fs-dev] [PATCH v5 03/23] filemap: Convert __filemap_fdatawait_range() to use filemap_get_folios_tag()

2023-01-04 Thread Vishal Moola (Oracle)
Converted function to use folios. This is in preparation for the removal
of find_get_pages_range_tag(). This change removes 2 calls to
compound_head().

Signed-off-by: Vishal Moola (Oracle) 
Reviewed-by: Matthew Wilcow (Oracle) 
---
 mm/filemap.c | 24 +---
 1 file changed, 13 insertions(+), 11 deletions(-)

diff --git a/mm/filemap.c b/mm/filemap.c
index 291bb3e0957a..85adbcf2d9a7 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -503,25 +503,27 @@ static void __filemap_fdatawait_range(struct 
address_space *mapping,
 {
pgoff_t index = start_byte >> PAGE_SHIFT;
pgoff_t end = end_byte >> PAGE_SHIFT;
-   struct pagevec pvec;
-   int nr_pages;
+   struct folio_batch fbatch;
+   unsigned nr_folios;
+
+   folio_batch_init(&fbatch);
 
-   pagevec_init(&pvec);
while (index <= end) {
unsigned i;
 
-   nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index,
-   end, PAGECACHE_TAG_WRITEBACK);
-   if (!nr_pages)
+   nr_folios = filemap_get_folios_tag(mapping, &index, end,
+   PAGECACHE_TAG_WRITEBACK, &fbatch);
+
+   if (!nr_folios)
break;
 
-   for (i = 0; i < nr_pages; i++) {
-   struct page *page = pvec.pages[i];
+   for (i = 0; i < nr_folios; i++) {
+   struct folio *folio = fbatch.folios[i];
 
-   wait_on_page_writeback(page);
-   ClearPageError(page);
+   folio_wait_writeback(folio);
+   folio_clear_error(folio);
}
-   pagevec_release(&pvec);
+   folio_batch_release(&fbatch);
cond_resched();
}
 }
-- 
2.38.1



___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


[f2fs-dev] [PATCH v5 00/23] Convert to filemap_get_folios_tag()

2023-01-04 Thread Vishal Moola (Oracle)
This patch series replaces find_get_pages_range_tag() with
filemap_get_folios_tag(). This also allows the removal of multiple
calls to compound_head() throughout.
It also makes a good chunk of the straightforward conversions to folios,
and takes the opportunity to introduce a function that grabs a folio
from the pagecache.

I've run xfstests on xfs, btrfs, ext4, f2fs, and nilfs2, but more testing may
be beneficial. The page-writeback and filemap changes implicitly work. Still
looking for review of cifs, gfs2, and ext4.

---
v5:
  Rebased onto upstream 6.2-rc2
  Filesystems modified to use folio_get() instead of folio_ref_inc()
  F2fs modified to maintain use of F2FS_ONSTACK_PAGES

v4:
  Fixed a bug with reference counting in cifs changes
  - Reported-by: kernel test robot  
  Improved commit messages to be more meaningful
  Got some Acked-bys and Reviewed-bys

v3:
  Rebased onto upstream 6.1
  Simplified the ceph patch to only necessary changes
  Changed commit messages throughout to be clearer
  Got an Acked-by for another nilfs patch
  Got Tested-by for afs

v2:
  Got Acked-By tags for nilfs and btrfs changes
  Fixed an error arising in f2fs
  - Reported-by: kernel test robot 

Vishal Moola (Oracle) (23):
  pagemap: Add filemap_grab_folio()
  filemap: Added filemap_get_folios_tag()
  filemap: Convert __filemap_fdatawait_range() to use
filemap_get_folios_tag()
  page-writeback: Convert write_cache_pages() to use
filemap_get_folios_tag()
  afs: Convert afs_writepages_region() to use filemap_get_folios_tag()
  btrfs: Convert btree_write_cache_pages() to use
filemap_get_folio_tag()
  btrfs: Convert extent_write_cache_pages() to use
filemap_get_folios_tag()
  ceph: Convert ceph_writepages_start() to use filemap_get_folios_tag()
  cifs: Convert wdata_alloc_and_fillpages() to use
filemap_get_folios_tag()
  ext4: Convert mpage_prepare_extent_to_map() to use
filemap_get_folios_tag()
  f2fs: Convert f2fs_fsync_node_pages() to use filemap_get_folios_tag()
  f2fs: Convert f2fs_flush_inline_data() to use filemap_get_folios_tag()
  f2fs: Convert f2fs_sync_node_pages() to use filemap_get_folios_tag()
  f2fs: Convert f2fs_write_cache_pages() to use filemap_get_folios_tag()
  f2fs: Convert last_fsync_dnode() to use filemap_get_folios_tag()
  f2fs: Convert f2fs_sync_meta_pages() to use filemap_get_folios_tag()
  gfs2: Convert gfs2_write_cache_jdata() to use filemap_get_folios_tag()
  nilfs2: Convert nilfs_lookup_dirty_data_buffers() to use
filemap_get_folios_tag()
  nilfs2: Convert nilfs_lookup_dirty_node_buffers() to use
filemap_get_folios_tag()
  nilfs2: Convert nilfs_btree_lookup_dirty_buffers() to use
filemap_get_folios_tag()
  nilfs2: Convert nilfs_copy_dirty_pages() to use
filemap_get_folios_tag()
  nilfs2: Convert nilfs_clear_dirty_pages() to use
filemap_get_folios_tag()
  filemap: Remove find_get_pages_range_tag()

 fs/afs/write.c  | 116 
 fs/btrfs/extent_io.c|  57 ++--
 fs/ceph/addr.c  |  58 ++--
 fs/cifs/file.c  |  32 +--
 fs/ext4/inode.c |  65 +++---
 fs/f2fs/checkpoint.c|  49 +
 fs/f2fs/data.c  |  84 -
 fs/f2fs/node.c  |  72 +
 fs/gfs2/aops.c  |  64 --
 fs/nilfs2/btree.c   |  14 ++---
 fs/nilfs2/page.c|  59 ++--
 fs/nilfs2/segment.c |  44 +++
 include/linux/pagemap.h |  32 +++
 include/linux/pagevec.h |   8 ---
 mm/filemap.c|  84 ++---
 mm/page-writeback.c |  44 +++
 mm/swap.c   |  10 
 17 files changed, 481 insertions(+), 411 deletions(-)

-- 
2.38.1



___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


[f2fs-dev] [PATCH v5 08/23] ceph: Convert ceph_writepages_start() to use filemap_get_folios_tag()

2023-01-04 Thread Vishal Moola (Oracle)
Convert function to use a folio_batch instead of pagevec. This is in
preparation for the removal of find_get_pages_range_tag().

Also some minor renaming for consistency.

Signed-off-by: Vishal Moola (Oracle) 
Acked-by: Jeff Layton 
---
 fs/ceph/addr.c | 58 ++
 1 file changed, 30 insertions(+), 28 deletions(-)

diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 8c74871e37c9..905268bf9741 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -792,7 +792,7 @@ static int ceph_writepages_start(struct address_space 
*mapping,
struct ceph_vino vino = ceph_vino(inode);
pgoff_t index, start_index, end = -1;
struct ceph_snap_context *snapc = NULL, *last_snapc = NULL, *pgsnapc;
-   struct pagevec pvec;
+   struct folio_batch fbatch;
int rc = 0;
unsigned int wsize = i_blocksize(inode);
struct ceph_osd_request *req = NULL;
@@ -821,7 +821,7 @@ static int ceph_writepages_start(struct address_space 
*mapping,
if (fsc->mount_options->wsize < wsize)
wsize = fsc->mount_options->wsize;
 
-   pagevec_init(&pvec);
+   folio_batch_init(&fbatch);
 
start_index = wbc->range_cyclic ? mapping->writeback_index : 0;
index = start_index;
@@ -869,7 +869,7 @@ static int ceph_writepages_start(struct address_space 
*mapping,
 
while (!done && index <= end) {
int num_ops = 0, op_idx;
-   unsigned i, pvec_pages, max_pages, locked_pages = 0;
+   unsigned i, nr_folios, max_pages, locked_pages = 0;
struct page **pages = NULL, **data_pages;
struct page *page;
pgoff_t strip_unit_end = 0;
@@ -879,13 +879,13 @@ static int ceph_writepages_start(struct address_space 
*mapping,
max_pages = wsize >> PAGE_SHIFT;
 
 get_more_pages:
-   pvec_pages = pagevec_lookup_range_tag(&pvec, mapping, &index,
-   end, PAGECACHE_TAG_DIRTY);
-   dout("pagevec_lookup_range_tag got %d\n", pvec_pages);
-   if (!pvec_pages && !locked_pages)
+   nr_folios = filemap_get_folios_tag(mapping, &index,
+   end, PAGECACHE_TAG_DIRTY, &fbatch);
+   dout("pagevec_lookup_range_tag got %d\n", nr_folios);
+   if (!nr_folios && !locked_pages)
break;
-   for (i = 0; i < pvec_pages && locked_pages < max_pages; i++) {
-   page = pvec.pages[i];
+   for (i = 0; i < nr_folios && locked_pages < max_pages; i++) {
+   page = &fbatch.folios[i]->page;
dout("? %p idx %lu\n", page, page->index);
if (locked_pages == 0)
lock_page(page);  /* first page */
@@ -995,7 +995,7 @@ static int ceph_writepages_start(struct address_space 
*mapping,
len = 0;
}
 
-   /* note position of first page in pvec */
+   /* note position of first page in fbatch */
dout("%p will write page %p idx %lu\n",
 inode, page, page->index);
 
@@ -1005,30 +1005,30 @@ static int ceph_writepages_start(struct address_space 
*mapping,
fsc->write_congested = true;
 
pages[locked_pages++] = page;
-   pvec.pages[i] = NULL;
+   fbatch.folios[i] = NULL;
 
len += thp_size(page);
}
 
/* did we get anything? */
if (!locked_pages)
-   goto release_pvec_pages;
+   goto release_folios;
if (i) {
unsigned j, n = 0;
-   /* shift unused page to beginning of pvec */
-   for (j = 0; j < pvec_pages; j++) {
-   if (!pvec.pages[j])
+   /* shift unused page to beginning of fbatch */
+   for (j = 0; j < nr_folios; j++) {
+   if (!fbatch.folios[j])
continue;
if (n < j)
-   pvec.pages[n] = pvec.pages[j];
+   fbatch.folios[n] = fbatch.folios[j];
n++;
}
-   pvec.nr = n;
+   fbatch.nr = n;
 
-   if (pvec_pages && i == pvec_pages &&
+   if (nr_folios && i == nr_folios &&
locked_pages < max_pages) {
-   dout("reached end pvec, trying for more\n");
-   pagevec_release(&pvec);
+   dout("reached end 

[f2fs-dev] [PATCH v5 09/23] cifs: Convert wdata_alloc_and_fillpages() to use filemap_get_folios_tag()

2023-01-04 Thread Vishal Moola (Oracle)
This is in preparation for the removal of find_get_pages_range_tag(). Now also
supports the use of large folios.

Since tofind might be larger than the max number of folios in a
folio_batch (15), we loop through filling in wdata->pages pulling more
batches until we either reach tofind pages or run out of folios.

This function may not return all pages in the last found folio before
tofind pages are reached.

Signed-off-by: Vishal Moola (Oracle) 
---
 fs/cifs/file.c | 32 +---
 1 file changed, 29 insertions(+), 3 deletions(-)

diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 22dfc1f8b4f1..8cdd2f67af24 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -2527,14 +2527,40 @@ wdata_alloc_and_fillpages(pgoff_t tofind, struct 
address_space *mapping,
  unsigned int *found_pages)
 {
struct cifs_writedata *wdata;
-
+   struct folio_batch fbatch;
+   unsigned int i, idx, p, nr;
wdata = cifs_writedata_alloc((unsigned int)tofind,
 cifs_writev_complete);
if (!wdata)
return NULL;
 
-   *found_pages = find_get_pages_range_tag(mapping, index, end,
-   PAGECACHE_TAG_DIRTY, tofind, wdata->pages);
+   folio_batch_init(&fbatch);
+   *found_pages = 0;
+
+again:
+   nr = filemap_get_folios_tag(mapping, index, end,
+   PAGECACHE_TAG_DIRTY, &fbatch);
+   if (!nr)
+   goto out; /* No dirty pages left in the range */
+
+   for (i = 0; i < nr; i++) {
+   struct folio *folio = fbatch.folios[i];
+
+   idx = 0;
+   p = folio_nr_pages(folio);
+add_more:
+   wdata->pages[*found_pages] = folio_page(folio, idx);
+   folio_get(folio);
+   if (++*found_pages == tofind) {
+   folio_batch_release(&fbatch);
+   goto out;
+   }
+   if (++idx < p)
+   goto add_more;
+   }
+   folio_batch_release(&fbatch);
+   goto again;
+out:
return wdata;
 }
 
-- 
2.38.1



___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


[f2fs-dev] [PATCH v5 02/23] filemap: Added filemap_get_folios_tag()

2023-01-04 Thread Vishal Moola (Oracle)
This is the equivalent of find_get_pages_range_tag(), except for folios
instead of pages.

One noteable difference is filemap_get_folios_tag() does not take in a
maximum pages argument. It instead tries to fill a folio batch and stops
either once full (15 folios) or reaching the end of the search range.

The new function supports large folios, the initial function did not
since all callers don't use large folios.

Signed-off-by: Vishal Moola (Oracle) 
Reviewed-by: Matthew Wilcow (Oracle) 
---
 include/linux/pagemap.h |  2 ++
 mm/filemap.c| 54 +
 2 files changed, 56 insertions(+)

diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 468183be67be..bb3c1d51b1cb 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -739,6 +739,8 @@ unsigned filemap_get_folios(struct address_space *mapping, 
pgoff_t *start,
pgoff_t end, struct folio_batch *fbatch);
 unsigned filemap_get_folios_contig(struct address_space *mapping,
pgoff_t *start, pgoff_t end, struct folio_batch *fbatch);
+unsigned filemap_get_folios_tag(struct address_space *mapping, pgoff_t *start,
+   pgoff_t end, xa_mark_t tag, struct folio_batch *fbatch);
 unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t 
*index,
pgoff_t end, xa_mark_t tag, unsigned int nr_pages,
struct page **pages);
diff --git a/mm/filemap.c b/mm/filemap.c
index c4d4ace9cc70..291bb3e0957a 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2281,6 +2281,60 @@ unsigned filemap_get_folios_contig(struct address_space 
*mapping,
 }
 EXPORT_SYMBOL(filemap_get_folios_contig);
 
+/**
+ * filemap_get_folios_tag - Get a batch of folios matching @tag
+ * @mapping:The address_space to search
+ * @start:  The starting page index
+ * @end:The final page index (inclusive)
+ * @tag:The tag index
+ * @fbatch: The batch to fill
+ *
+ * Same as filemap_get_folios(), but only returning folios tagged with @tag.
+ *
+ * Return: The number of folios found.
+ * Also update @start to index the next folio for traversal.
+ */
+unsigned filemap_get_folios_tag(struct address_space *mapping, pgoff_t *start,
+   pgoff_t end, xa_mark_t tag, struct folio_batch *fbatch)
+{
+   XA_STATE(xas, &mapping->i_pages, *start);
+   struct folio *folio;
+
+   rcu_read_lock();
+   while ((folio = find_get_entry(&xas, end, tag)) != NULL) {
+   /*
+* Shadow entries should never be tagged, but this iteration
+* is lockless so there is a window for page reclaim to evict
+* a page we saw tagged. Skip over it.
+*/
+   if (xa_is_value(folio))
+   continue;
+   if (!folio_batch_add(fbatch, folio)) {
+   unsigned long nr = folio_nr_pages(folio);
+
+   if (folio_test_hugetlb(folio))
+   nr = 1;
+   *start = folio->index + nr;
+   goto out;
+   }
+   }
+   /*
+* We come here when there is no page beyond @end. We take care to not
+* overflow the index @start as it confuses some of the callers. This
+* breaks the iteration when there is a page at index -1 but that is
+* already broke anyway.
+*/
+   if (end == (pgoff_t)-1)
+   *start = (pgoff_t)-1;
+   else
+   *start = end + 1;
+out:
+   rcu_read_unlock();
+
+   return folio_batch_count(fbatch);
+}
+EXPORT_SYMBOL(filemap_get_folios_tag);
+
 /**
  * find_get_pages_range_tag - Find and return head pages matching @tag.
  * @mapping:   the address_space to search
-- 
2.38.1



___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


[f2fs-dev] [PATCH v5 10/23] ext4: Convert mpage_prepare_extent_to_map() to use filemap_get_folios_tag()

2023-01-04 Thread Vishal Moola (Oracle)
Converted the function to use folios throughout. This is in preparation
for the removal of find_get_pages_range_tag(). Now supports large
folios. This change removes 11 calls to compound_head().

Signed-off-by: Vishal Moola (Oracle) 
---
 fs/ext4/inode.c | 65 -
 1 file changed, 32 insertions(+), 33 deletions(-)

diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 9d9f414f99fe..fb6cd994e59a 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -2595,8 +2595,8 @@ static bool ext4_page_nomap_can_writeout(struct page 
*page)
 static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
 {
struct address_space *mapping = mpd->inode->i_mapping;
-   struct pagevec pvec;
-   unsigned int nr_pages;
+   struct folio_batch fbatch;
+   unsigned int nr_folios;
long left = mpd->wbc->nr_to_write;
pgoff_t index = mpd->first_page;
pgoff_t end = mpd->last_page;
@@ -2610,18 +2610,17 @@ static int mpage_prepare_extent_to_map(struct 
mpage_da_data *mpd)
tag = PAGECACHE_TAG_TOWRITE;
else
tag = PAGECACHE_TAG_DIRTY;
-
-   pagevec_init(&pvec);
+   folio_batch_init(&fbatch);
mpd->map.m_len = 0;
mpd->next_page = index;
while (index <= end) {
-   nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
-   tag);
-   if (nr_pages == 0)
+   nr_folios = filemap_get_folios_tag(mapping, &index, end,
+   tag, &fbatch);
+   if (nr_folios == 0)
break;
 
-   for (i = 0; i < nr_pages; i++) {
-   struct page *page = pvec.pages[i];
+   for (i = 0; i < nr_folios; i++) {
+   struct folio *folio = fbatch.folios[i];
 
/*
 * Accumulated enough dirty pages? This doesn't apply
@@ -2635,10 +2634,10 @@ static int mpage_prepare_extent_to_map(struct 
mpage_da_data *mpd)
goto out;
 
/* If we can't merge this page, we are done. */
-   if (mpd->map.m_len > 0 && mpd->next_page != page->index)
+   if (mpd->map.m_len > 0 && mpd->next_page != 
folio->index)
goto out;
 
-   lock_page(page);
+   folio_lock(folio);
/*
 * If the page is no longer dirty, or its mapping no
 * longer corresponds to inode we are writing (which
@@ -2646,16 +2645,16 @@ static int mpage_prepare_extent_to_map(struct 
mpage_da_data *mpd)
 * page is already under writeback and we are not doing
 * a data integrity writeback, skip the page
 */
-   if (!PageDirty(page) ||
-   (PageWriteback(page) &&
+   if (!folio_test_dirty(folio) ||
+   (folio_test_writeback(folio) &&
 (mpd->wbc->sync_mode == WB_SYNC_NONE)) ||
-   unlikely(page->mapping != mapping)) {
-   unlock_page(page);
+   unlikely(folio->mapping != mapping)) {
+   folio_unlock(folio);
continue;
}
 
-   wait_on_page_writeback(page);
-   BUG_ON(PageWriteback(page));
+   folio_wait_writeback(folio);
+   BUG_ON(folio_test_writeback(folio));
 
/*
 * Should never happen but for buggy code in
@@ -2666,49 +2665,49 @@ static int mpage_prepare_extent_to_map(struct 
mpage_da_data *mpd)
 *
 * [1] 
https://lore.kernel.org/linux-mm/20180103100430.ge4...@quack2.suse.cz
 */
-   if (!page_has_buffers(page)) {
-   ext4_warning_inode(mpd->inode, "page %lu does 
not have buffers attached", page->index);
-   ClearPageDirty(page);
-   unlock_page(page);
+   if (!folio_buffers(folio)) {
+   ext4_warning_inode(mpd->inode, "page %lu does 
not have buffers attached", folio->index);
+   folio_clear_dirty(folio);
+   folio_unlock(folio);
continue;
}
 
if (mpd->map.m_len == 0)
-   mpd->first_page = page->index;
-   mpd->next_page = page->index + 1;
+   mpd->first_page = folio->index;
+   mpd->next_page =

[f2fs-dev] [PATCH v5 01/23] pagemap: Add filemap_grab_folio()

2023-01-04 Thread Vishal Moola (Oracle)
Add function filemap_grab_folio() to grab a folio from the page cache.
This function is meant to serve as a folio replacement for
grab_cache_page, and is used to facilitate the removal of
find_get_pages_range_tag().

Signed-off-by: Vishal Moola (Oracle) 
Reviewed-by: Matthew Wilcox (Oracle) 
---
 include/linux/pagemap.h | 20 
 1 file changed, 20 insertions(+)

diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 29e1f9e76eb6..468183be67be 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -546,6 +546,26 @@ static inline struct folio *filemap_lock_folio(struct 
address_space *mapping,
return __filemap_get_folio(mapping, index, FGP_LOCK, 0);
 }
 
+/**
+ * filemap_grab_folio - grab a folio from the page cache
+ * @mapping: The address space to search
+ * @index: The page index
+ *
+ * Looks up the page cache entry at @mapping & @index. If no folio is found,
+ * a new folio is created. The folio is locked, marked as accessed, and
+ * returned.
+ *
+ * Return: A found or created folio. NULL if no folio is found and failed to
+ * create a folio.
+ */
+static inline struct folio *filemap_grab_folio(struct address_space *mapping,
+   pgoff_t index)
+{
+   return __filemap_get_folio(mapping, index,
+   FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
+   mapping_gfp_mask(mapping));
+}
+
 /**
  * find_get_page - find and get a page reference
  * @mapping: the address_space to search
-- 
2.38.1



___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


[f2fs-dev] [PATCH v5 07/23] btrfs: Convert extent_write_cache_pages() to use filemap_get_folios_tag()

2023-01-04 Thread Vishal Moola (Oracle)
Converted function to use folios throughout. This is in preparation for
the removal of find_get_pages_range_tag(). Now also supports large
folios.

Signed-off-by: Vishal Moola (Oracle) 
Acked-by: David Sterba 
---
 fs/btrfs/extent_io.c | 38 +++---
 1 file changed, 19 insertions(+), 19 deletions(-)

diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 64fbafc70822..a214b98c52fe 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -2973,8 +2973,8 @@ static int extent_write_cache_pages(struct address_space 
*mapping,
int ret = 0;
int done = 0;
int nr_to_write_done = 0;
-   struct pagevec pvec;
-   int nr_pages;
+   struct folio_batch fbatch;
+   unsigned int nr_folios;
pgoff_t index;
pgoff_t end;/* Inclusive */
pgoff_t done_index;
@@ -2994,7 +2994,7 @@ static int extent_write_cache_pages(struct address_space 
*mapping,
if (!igrab(inode))
return 0;
 
-   pagevec_init(&pvec);
+   folio_batch_init(&fbatch);
if (wbc->range_cyclic) {
index = mapping->writeback_index; /* Start from prev offset */
end = -1;
@@ -3032,14 +3032,14 @@ static int extent_write_cache_pages(struct 
address_space *mapping,
tag_pages_for_writeback(mapping, index, end);
done_index = index;
while (!done && !nr_to_write_done && (index <= end) &&
-   (nr_pages = pagevec_lookup_range_tag(&pvec, mapping,
-   &index, end, tag))) {
+   (nr_folios = filemap_get_folios_tag(mapping, &index,
+   end, tag, &fbatch))) {
unsigned i;
 
-   for (i = 0; i < nr_pages; i++) {
-   struct page *page = pvec.pages[i];
+   for (i = 0; i < nr_folios; i++) {
+   struct folio *folio = fbatch.folios[i];
 
-   done_index = page->index + 1;
+   done_index = folio->index + folio_nr_pages(folio);
/*
 * At this point we hold neither the i_pages lock nor
 * the page lock: the page may be truncated or
@@ -3047,29 +3047,29 @@ static int extent_write_cache_pages(struct 
address_space *mapping,
 * or even swizzled back from swapper_space to
 * tmpfs file mapping
 */
-   if (!trylock_page(page)) {
+   if (!folio_trylock(folio)) {
submit_write_bio(bio_ctrl, 0);
-   lock_page(page);
+   folio_lock(folio);
}
 
-   if (unlikely(page->mapping != mapping)) {
-   unlock_page(page);
+   if (unlikely(folio->mapping != mapping)) {
+   folio_unlock(folio);
continue;
}
 
if (wbc->sync_mode != WB_SYNC_NONE) {
-   if (PageWriteback(page))
+   if (folio_test_writeback(folio))
submit_write_bio(bio_ctrl, 0);
-   wait_on_page_writeback(page);
+   folio_wait_writeback(folio);
}
 
-   if (PageWriteback(page) ||
-   !clear_page_dirty_for_io(page)) {
-   unlock_page(page);
+   if (folio_test_writeback(folio) ||
+   !folio_clear_dirty_for_io(folio)) {
+   folio_unlock(folio);
continue;
}
 
-   ret = __extent_writepage(page, wbc, bio_ctrl);
+   ret = __extent_writepage(&folio->page, wbc, bio_ctrl);
if (ret < 0) {
done = 1;
break;
@@ -3082,7 +3082,7 @@ static int extent_write_cache_pages(struct address_space 
*mapping,
 */
nr_to_write_done = wbc->nr_to_write <= 0;
}
-   pagevec_release(&pvec);
+   folio_batch_release(&fbatch);
cond_resched();
}
if (!scanned && !done) {
-- 
2.38.1



___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


[f2fs-dev] [PATCH v5 15/23] f2fs: Convert last_fsync_dnode() to use filemap_get_folios_tag()

2023-01-04 Thread Vishal Moola (Oracle)
Convert to use a folio_batch instead of pagevec. This is in preparation for
the removal of find_get_pages_range_tag().

Signed-off-by: Vishal Moola (Oracle) 
Acked-by: Chao Yu 
---
 fs/f2fs/node.c | 19 ++-
 1 file changed, 10 insertions(+), 9 deletions(-)

diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index 51e9f286f53a..cf997356d9f9 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -1518,23 +1518,24 @@ static void flush_inline_data(struct f2fs_sb_info *sbi, 
nid_t ino)
 static struct page *last_fsync_dnode(struct f2fs_sb_info *sbi, nid_t ino)
 {
pgoff_t index;
-   struct pagevec pvec;
+   struct folio_batch fbatch;
struct page *last_page = NULL;
-   int nr_pages;
+   int nr_folios;
 
-   pagevec_init(&pvec);
+   folio_batch_init(&fbatch);
index = 0;
 
-   while ((nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
-   PAGECACHE_TAG_DIRTY))) {
+   while ((nr_folios = filemap_get_folios_tag(NODE_MAPPING(sbi), &index,
+   (pgoff_t)-1, PAGECACHE_TAG_DIRTY,
+   &fbatch))) {
int i;
 
-   for (i = 0; i < nr_pages; i++) {
-   struct page *page = pvec.pages[i];
+   for (i = 0; i < nr_folios; i++) {
+   struct page *page = &fbatch.folios[i]->page;
 
if (unlikely(f2fs_cp_error(sbi))) {
f2fs_put_page(last_page, 0);
-   pagevec_release(&pvec);
+   folio_batch_release(&fbatch);
return ERR_PTR(-EIO);
}
 
@@ -1565,7 +1566,7 @@ static struct page *last_fsync_dnode(struct f2fs_sb_info 
*sbi, nid_t ino)
last_page = page;
unlock_page(page);
}
-   pagevec_release(&pvec);
+   folio_batch_release(&fbatch);
cond_resched();
}
return last_page;
-- 
2.38.1



___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


[f2fs-dev] [PATCH v5 11/23] f2fs: Convert f2fs_fsync_node_pages() to use filemap_get_folios_tag()

2023-01-04 Thread Vishal Moola (Oracle)
Convert function to use a folio_batch instead of pagevec. This is in
preparation for the removal of find_get_pages_range_tag().

Signed-off-by: Vishal Moola (Oracle) 
Acked-by: Chao Yu 
---
 fs/f2fs/node.c | 19 ++-
 1 file changed, 10 insertions(+), 9 deletions(-)

diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index dde4c0458704..3e0362794e27 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -1731,12 +1731,12 @@ int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, 
struct inode *inode,
unsigned int *seq_id)
 {
pgoff_t index;
-   struct pagevec pvec;
+   struct folio_batch fbatch;
int ret = 0;
struct page *last_page = NULL;
bool marked = false;
nid_t ino = inode->i_ino;
-   int nr_pages;
+   int nr_folios;
int nwritten = 0;
 
if (atomic) {
@@ -1745,20 +1745,21 @@ int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, 
struct inode *inode,
return PTR_ERR_OR_ZERO(last_page);
}
 retry:
-   pagevec_init(&pvec);
+   folio_batch_init(&fbatch);
index = 0;
 
-   while ((nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
-   PAGECACHE_TAG_DIRTY))) {
+   while ((nr_folios = filemap_get_folios_tag(NODE_MAPPING(sbi), &index,
+   (pgoff_t)-1, PAGECACHE_TAG_DIRTY,
+   &fbatch))) {
int i;
 
-   for (i = 0; i < nr_pages; i++) {
-   struct page *page = pvec.pages[i];
+   for (i = 0; i < nr_folios; i++) {
+   struct page *page = &fbatch.folios[i]->page;
bool submitted = false;
 
if (unlikely(f2fs_cp_error(sbi))) {
f2fs_put_page(last_page, 0);
-   pagevec_release(&pvec);
+   folio_batch_release(&fbatch);
ret = -EIO;
goto out;
}
@@ -1824,7 +1825,7 @@ int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, 
struct inode *inode,
break;
}
}
-   pagevec_release(&pvec);
+   folio_batch_release(&fbatch);
cond_resched();
 
if (ret || marked)
-- 
2.38.1



___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


[f2fs-dev] [PATCH v5 14/23] f2fs: Convert f2fs_write_cache_pages() to use filemap_get_folios_tag()

2023-01-04 Thread Vishal Moola (Oracle)
Converted the function to use a folio_batch instead of pagevec. This is in
preparation for the removal of find_get_pages_range_tag().

Also modified f2fs_all_cluster_page_ready to take in a folio_batch instead
of pagevec. This does NOT support large folios. The function currently
only utilizes folios of size 1 so this shouldn't cause any issues right
now.

This version of the patch limits the number of pages fetched to
F2FS_ONSTACK_PAGES. If that ever happens, update the start index here
since filemap_get_folios_tag() updates the index to be after the last
found folio, not necessarily the last used page.

Signed-off-by: Vishal Moola (Oracle) 
---
 fs/f2fs/data.c | 84 ++
 1 file changed, 58 insertions(+), 26 deletions(-)

diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 6e43e19c7d1c..ee1256e4fd92 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -2957,6 +2957,7 @@ static int f2fs_write_cache_pages(struct address_space 
*mapping,
int ret = 0;
int done = 0, retry = 0;
struct page *pages[F2FS_ONSTACK_PAGES];
+   struct folio_batch fbatch;
struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
struct bio *bio = NULL;
sector_t last_block;
@@ -2977,6 +2978,7 @@ static int f2fs_write_cache_pages(struct address_space 
*mapping,
.private = NULL,
};
 #endif
+   int nr_folios, p, idx;
int nr_pages;
pgoff_t index;
pgoff_t end;/* Inclusive */
@@ -2987,6 +2989,8 @@ static int f2fs_write_cache_pages(struct address_space 
*mapping,
int submitted = 0;
int i;
 
+   folio_batch_init(&fbatch);
+
if (get_dirty_pages(mapping->host) <=
SM_I(F2FS_M_SB(mapping))->min_hot_blocks)
set_inode_flag(mapping->host, FI_HOT_DATA);
@@ -3012,13 +3016,38 @@ static int f2fs_write_cache_pages(struct address_space 
*mapping,
tag_pages_for_writeback(mapping, index, end);
done_index = index;
while (!done && !retry && (index <= end)) {
-   nr_pages = find_get_pages_range_tag(mapping, &index, end,
-   tag, F2FS_ONSTACK_PAGES, pages);
-   if (nr_pages == 0)
+   nr_pages = 0;
+again:
+   nr_folios = filemap_get_folios_tag(mapping, &index, end,
+   tag, &fbatch);
+   if (nr_folios == 0) {
+   if (nr_pages)
+   goto write;
break;
+   }
 
+   for (i = 0; i < nr_folios; i++) {
+   struct folio *folio = fbatch.folios[i];
+
+   idx = 0;
+   p = folio_nr_pages(folio);
+add_more:
+   pages[nr_pages] = folio_page(folio, idx);
+   folio_get(folio);
+   if (++nr_pages == F2FS_ONSTACK_PAGES) {
+   index = folio->index + idx + 1;
+   folio_batch_release(&fbatch);
+   goto write;
+   }
+   if (++idx < p)
+   goto add_more;
+   }
+   folio_batch_release(&fbatch);
+   goto again;
+write:
for (i = 0; i < nr_pages; i++) {
struct page *page = pages[i];
+   struct folio *folio = page_folio(page);
bool need_readd;
 readd:
need_readd = false;
@@ -3035,7 +3064,7 @@ static int f2fs_write_cache_pages(struct address_space 
*mapping,
}
 
if (!f2fs_cluster_can_merge_page(&cc,
-   page->index)) {
+   folio->index)) {
ret = f2fs_write_multi_pages(&cc,
&submitted, wbc, io_type);
if (!ret)
@@ -3044,27 +3073,28 @@ static int f2fs_write_cache_pages(struct address_space 
*mapping,
}
 
if (unlikely(f2fs_cp_error(sbi)))
-   goto lock_page;
+   goto lock_folio;
 
if (!f2fs_cluster_is_empty(&cc))
-   goto lock_page;
+   goto lock_folio;
 
if (f2fs_all_cluster_page_ready(&cc,
pages, i, nr_pages, true))
-   goto lock_page;
+   goto lock_folio;
 
ret2 = f2fs_prepare_compress_overwrite(
   

[f2fs-dev] [PATCH v5 16/23] f2fs: Convert f2fs_sync_meta_pages() to use filemap_get_folios_tag()

2023-01-04 Thread Vishal Moola (Oracle)
Convert function to use folios throughout. This is in preparation for the
removal of find_get_pages_range_tag(). This change removes 5 calls to
compound_head().

Initially the function was checking if the previous page index is truly the
previous page i.e. 1 index behind the current page. To convert to folios and
maintain this check we need to make the check
folio->index != prev + folio_nr_pages(previous folio) since we don't know
how many pages are in a folio.

At index i == 0 the check is guaranteed to succeed, so to workaround indexing
bounds we can simply ignore the check for that specific index. This makes the
initial assignment of prev trivial, so I removed that as well.

Also modified a comment in commit_checkpoint for consistency.

Signed-off-by: Vishal Moola (Oracle) 
Acked-by: Chao Yu 
---
 fs/f2fs/checkpoint.c | 49 +++-
 1 file changed, 26 insertions(+), 23 deletions(-)

diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index 56f7d0d6a8b2..5a5515d83a1b 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -395,59 +395,62 @@ long f2fs_sync_meta_pages(struct f2fs_sb_info *sbi, enum 
page_type type,
 {
struct address_space *mapping = META_MAPPING(sbi);
pgoff_t index = 0, prev = ULONG_MAX;
-   struct pagevec pvec;
+   struct folio_batch fbatch;
long nwritten = 0;
-   int nr_pages;
+   int nr_folios;
struct writeback_control wbc = {
.for_reclaim = 0,
};
struct blk_plug plug;
 
-   pagevec_init(&pvec);
+   folio_batch_init(&fbatch);
 
blk_start_plug(&plug);
 
-   while ((nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
-   PAGECACHE_TAG_DIRTY))) {
+   while ((nr_folios = filemap_get_folios_tag(mapping, &index,
+   (pgoff_t)-1,
+   PAGECACHE_TAG_DIRTY, &fbatch))) {
int i;
 
-   for (i = 0; i < nr_pages; i++) {
-   struct page *page = pvec.pages[i];
+   for (i = 0; i < nr_folios; i++) {
+   struct folio *folio = fbatch.folios[i];
 
-   if (prev == ULONG_MAX)
-   prev = page->index - 1;
-   if (nr_to_write != LONG_MAX && page->index != prev + 1) 
{
-   pagevec_release(&pvec);
+   if (nr_to_write != LONG_MAX && i != 0 &&
+   folio->index != prev +
+   folio_nr_pages(fbatch.folios[i-1])) {
+   folio_batch_release(&fbatch);
goto stop;
}
 
-   lock_page(page);
+   folio_lock(folio);
 
-   if (unlikely(page->mapping != mapping)) {
+   if (unlikely(folio->mapping != mapping)) {
 continue_unlock:
-   unlock_page(page);
+   folio_unlock(folio);
continue;
}
-   if (!PageDirty(page)) {
+   if (!folio_test_dirty(folio)) {
/* someone wrote it for us */
goto continue_unlock;
}
 
-   f2fs_wait_on_page_writeback(page, META, true, true);
+   f2fs_wait_on_page_writeback(&folio->page, META,
+   true, true);
 
-   if (!clear_page_dirty_for_io(page))
+   if (!folio_clear_dirty_for_io(folio))
goto continue_unlock;
 
-   if (__f2fs_write_meta_page(page, &wbc, io_type)) {
-   unlock_page(page);
+   if (__f2fs_write_meta_page(&folio->page, &wbc,
+   io_type)) {
+   folio_unlock(folio);
break;
}
-   nwritten++;
-   prev = page->index;
+   nwritten += folio_nr_pages(folio);
+   prev = folio->index;
if (unlikely(nwritten >= nr_to_write))
break;
}
-   pagevec_release(&pvec);
+   folio_batch_release(&fbatch);
cond_resched();
}
 stop:
@@ -1403,7 +1406,7 @@ static void commit_checkpoint(struct f2fs_sb_info *sbi,
};
 
/*
-* pagevec_lookup_tag and lock_page again will take
+* filemap_get_folios_tag and lock_page again will take
 * some extra time. Therefore, f2fs_update_meta_pages and
 * f2fs_sync_meta_pages are combined in this functio

[f2fs-dev] [PATCH v5 12/23] f2fs: Convert f2fs_flush_inline_data() to use filemap_get_folios_tag()

2023-01-04 Thread Vishal Moola (Oracle)
Convert function to use a folio_batch instead of pagevec. This is in
preparation for the removal of find_get_pages_tag().

Signed-off-by: Vishal Moola (Oracle) 
Acked-by: Chao Yu 
---
 fs/f2fs/node.c | 17 +
 1 file changed, 9 insertions(+), 8 deletions(-)

diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index 3e0362794e27..1c5dc7a3207e 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -1890,17 +1890,18 @@ static bool flush_dirty_inode(struct page *page)
 void f2fs_flush_inline_data(struct f2fs_sb_info *sbi)
 {
pgoff_t index = 0;
-   struct pagevec pvec;
-   int nr_pages;
+   struct folio_batch fbatch;
+   int nr_folios;
 
-   pagevec_init(&pvec);
+   folio_batch_init(&fbatch);
 
-   while ((nr_pages = pagevec_lookup_tag(&pvec,
-   NODE_MAPPING(sbi), &index, PAGECACHE_TAG_DIRTY))) {
+   while ((nr_folios = filemap_get_folios_tag(NODE_MAPPING(sbi), &index,
+   (pgoff_t)-1, PAGECACHE_TAG_DIRTY,
+   &fbatch))) {
int i;
 
-   for (i = 0; i < nr_pages; i++) {
-   struct page *page = pvec.pages[i];
+   for (i = 0; i < nr_folios; i++) {
+   struct page *page = &fbatch.folios[i]->page;
 
if (!IS_DNODE(page))
continue;
@@ -1927,7 +1928,7 @@ void f2fs_flush_inline_data(struct f2fs_sb_info *sbi)
}
unlock_page(page);
}
-   pagevec_release(&pvec);
+   folio_batch_release(&fbatch);
cond_resched();
}
 }
-- 
2.38.1



___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


[f2fs-dev] [PATCH v5 13/23] f2fs: Convert f2fs_sync_node_pages() to use filemap_get_folios_tag()

2023-01-04 Thread Vishal Moola (Oracle)
Convert function to use a folio_batch instead of pagevec. This is in
preparation for the removal of find_get_pages_range_tag().

Signed-off-by: Vishal Moola (Oracle) 
Acked-by: Chao Yu 
---
 fs/f2fs/node.c | 17 +
 1 file changed, 9 insertions(+), 8 deletions(-)

diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index 1c5dc7a3207e..51e9f286f53a 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -1938,23 +1938,24 @@ int f2fs_sync_node_pages(struct f2fs_sb_info *sbi,
bool do_balance, enum iostat_type io_type)
 {
pgoff_t index;
-   struct pagevec pvec;
+   struct folio_batch fbatch;
int step = 0;
int nwritten = 0;
int ret = 0;
-   int nr_pages, done = 0;
+   int nr_folios, done = 0;
 
-   pagevec_init(&pvec);
+   folio_batch_init(&fbatch);
 
 next_step:
index = 0;
 
-   while (!done && (nr_pages = pagevec_lookup_tag(&pvec,
-   NODE_MAPPING(sbi), &index, PAGECACHE_TAG_DIRTY))) {
+   while (!done && (nr_folios = filemap_get_folios_tag(NODE_MAPPING(sbi),
+   &index, (pgoff_t)-1, PAGECACHE_TAG_DIRTY,
+   &fbatch))) {
int i;
 
-   for (i = 0; i < nr_pages; i++) {
-   struct page *page = pvec.pages[i];
+   for (i = 0; i < nr_folios; i++) {
+   struct page *page = &fbatch.folios[i]->page;
bool submitted = false;
 
/* give a priority to WB_SYNC threads */
@@ -2029,7 +2030,7 @@ int f2fs_sync_node_pages(struct f2fs_sb_info *sbi,
if (--wbc->nr_to_write == 0)
break;
}
-   pagevec_release(&pvec);
+   folio_batch_release(&fbatch);
cond_resched();
 
if (wbc->nr_to_write == 0) {
-- 
2.38.1



___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


[f2fs-dev] [PATCH v5 17/23] gfs2: Convert gfs2_write_cache_jdata() to use filemap_get_folios_tag()

2023-01-04 Thread Vishal Moola (Oracle)
Converted function to use folios throughout. This is in preparation for
the removal of find_get_pgaes_range_tag(). This change removes 8 calls
to compound_head().

Also had to modify and rename gfs2_write_jdata_pagevec() to take in
and utilize folio_batch rather than pagevec and use folios rather
than pages. gfs2_write_jdata_batch() now supports large folios.

Signed-off-by: Vishal Moola (Oracle) 
---
 fs/gfs2/aops.c | 64 +++---
 1 file changed, 35 insertions(+), 29 deletions(-)

diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index e782b4f1d104..0a47068f9acc 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -195,67 +195,71 @@ static int gfs2_writepages(struct address_space *mapping,
 }
 
 /**
- * gfs2_write_jdata_pagevec - Write back a pagevec's worth of pages
+ * gfs2_write_jdata_batch - Write back a folio batch's worth of folios
  * @mapping: The mapping
  * @wbc: The writeback control
- * @pvec: The vector of pages
- * @nr_pages: The number of pages to write
+ * @fbatch: The batch of folios
  * @done_index: Page index
  *
  * Returns: non-zero if loop should terminate, zero otherwise
  */
 
-static int gfs2_write_jdata_pagevec(struct address_space *mapping,
+static int gfs2_write_jdata_batch(struct address_space *mapping,
struct writeback_control *wbc,
-   struct pagevec *pvec,
-   int nr_pages,
+   struct folio_batch *fbatch,
pgoff_t *done_index)
 {
struct inode *inode = mapping->host;
struct gfs2_sbd *sdp = GFS2_SB(inode);
-   unsigned nrblocks = nr_pages * (PAGE_SIZE >> inode->i_blkbits);
+   unsigned nrblocks;
int i;
int ret;
+   int nr_pages = 0;
+   int nr_folios = folio_batch_count(fbatch);
+
+   for (i = 0; i < nr_folios; i++)
+   nr_pages += folio_nr_pages(fbatch->folios[i]);
+   nrblocks = nr_pages * (PAGE_SIZE >> inode->i_blkbits);
 
ret = gfs2_trans_begin(sdp, nrblocks, nrblocks);
if (ret < 0)
return ret;
 
-   for(i = 0; i < nr_pages; i++) {
-   struct page *page = pvec->pages[i];
+   for (i = 0; i < nr_folios; i++) {
+   struct folio *folio = fbatch->folios[i];
 
-   *done_index = page->index;
+   *done_index = folio->index;
 
-   lock_page(page);
+   folio_lock(folio);
 
-   if (unlikely(page->mapping != mapping)) {
+   if (unlikely(folio->mapping != mapping)) {
 continue_unlock:
-   unlock_page(page);
+   folio_unlock(folio);
continue;
}
 
-   if (!PageDirty(page)) {
+   if (!folio_test_dirty(folio)) {
/* someone wrote it for us */
goto continue_unlock;
}
 
-   if (PageWriteback(page)) {
+   if (folio_test_writeback(folio)) {
if (wbc->sync_mode != WB_SYNC_NONE)
-   wait_on_page_writeback(page);
+   folio_wait_writeback(folio);
else
goto continue_unlock;
}
 
-   BUG_ON(PageWriteback(page));
-   if (!clear_page_dirty_for_io(page))
+   BUG_ON(folio_test_writeback(folio));
+   if (!folio_clear_dirty_for_io(folio))
goto continue_unlock;
 
trace_wbc_writepage(wbc, inode_to_bdi(inode));
 
-   ret = __gfs2_jdata_writepage(page, wbc);
+   ret = __gfs2_jdata_writepage(&folio->page, wbc);
if (unlikely(ret)) {
if (ret == AOP_WRITEPAGE_ACTIVATE) {
-   unlock_page(page);
+   folio_unlock(folio);
ret = 0;
} else {
 
@@ -268,7 +272,8 @@ static int gfs2_write_jdata_pagevec(struct address_space 
*mapping,
 * not be suitable for data integrity
 * writeout).
 */
-   *done_index = page->index + 1;
+   *done_index = folio->index +
+   folio_nr_pages(folio);
ret = 1;
break;
}
@@ -305,8 +310,8 @@ static int gfs2_write_cache_jdata(struct address_space 
*mapping,
 {
int ret = 0;
int done = 0;
-   struct pagevec pvec;
-   int nr_pages;
+   struct folio_batch fbatch;
+   int nr_folios;
pgoff_t writeback_index;
pgoff_t index;
pgoff_t end;
@@ -315,7 +320,7 @@ static int gfs2_write_cache_

[f2fs-dev] [PATCH v5 20/23] nilfs2: Convert nilfs_btree_lookup_dirty_buffers() to use filemap_get_folios_tag()

2023-01-04 Thread Vishal Moola (Oracle)
Convert function to use folios throughout. This is in preparation for
the removal of find_get_pages_range_tag(). This change removes 1 call to
compound_head().

Signed-off-by: Vishal Moola (Oracle) 
Acked-by: Ryusuke Konishi 
---
 fs/nilfs2/btree.c | 14 +++---
 1 file changed, 7 insertions(+), 7 deletions(-)

diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c
index b9d15c3df3cc..da6a19eede9a 100644
--- a/fs/nilfs2/btree.c
+++ b/fs/nilfs2/btree.c
@@ -2141,7 +2141,7 @@ static void nilfs_btree_lookup_dirty_buffers(struct 
nilfs_bmap *btree,
struct inode *btnc_inode = NILFS_BMAP_I(btree)->i_assoc_inode;
struct address_space *btcache = btnc_inode->i_mapping;
struct list_head lists[NILFS_BTREE_LEVEL_MAX];
-   struct pagevec pvec;
+   struct folio_batch fbatch;
struct buffer_head *bh, *head;
pgoff_t index = 0;
int level, i;
@@ -2151,19 +2151,19 @@ static void nilfs_btree_lookup_dirty_buffers(struct 
nilfs_bmap *btree,
 level++)
INIT_LIST_HEAD(&lists[level]);
 
-   pagevec_init(&pvec);
+   folio_batch_init(&fbatch);
 
-   while (pagevec_lookup_tag(&pvec, btcache, &index,
-   PAGECACHE_TAG_DIRTY)) {
-   for (i = 0; i < pagevec_count(&pvec); i++) {
-   bh = head = page_buffers(pvec.pages[i]);
+   while (filemap_get_folios_tag(btcache, &index, (pgoff_t)-1,
+   PAGECACHE_TAG_DIRTY, &fbatch)) {
+   for (i = 0; i < folio_batch_count(&fbatch); i++) {
+   bh = head = folio_buffers(fbatch.folios[i]);
do {
if (buffer_dirty(bh))
nilfs_btree_add_dirty_buffer(btree,
 lists, bh);
} while ((bh = bh->b_this_page) != head);
}
-   pagevec_release(&pvec);
+   folio_batch_release(&fbatch);
cond_resched();
}
 
-- 
2.38.1



___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


[f2fs-dev] [PATCH v5 18/23] nilfs2: Convert nilfs_lookup_dirty_data_buffers() to use filemap_get_folios_tag()

2023-01-04 Thread Vishal Moola (Oracle)
Convert function to use folios throughout. This is in preparation for
the removal of find_get_pages_range_tag(). This change removes 4 calls
to compound_head().

Signed-off-by: Vishal Moola (Oracle) 
Acked-by: Ryusuke Konishi 
---
 fs/nilfs2/segment.c | 29 -
 1 file changed, 16 insertions(+), 13 deletions(-)

diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index 76c3bd88b858..8866af742a49 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -680,7 +680,7 @@ static size_t nilfs_lookup_dirty_data_buffers(struct inode 
*inode,
  loff_t start, loff_t end)
 {
struct address_space *mapping = inode->i_mapping;
-   struct pagevec pvec;
+   struct folio_batch fbatch;
pgoff_t index = 0, last = ULONG_MAX;
size_t ndirties = 0;
int i;
@@ -694,23 +694,26 @@ static size_t nilfs_lookup_dirty_data_buffers(struct 
inode *inode,
index = start >> PAGE_SHIFT;
last = end >> PAGE_SHIFT;
}
-   pagevec_init(&pvec);
+   folio_batch_init(&fbatch);
  repeat:
if (unlikely(index > last) ||
-   !pagevec_lookup_range_tag(&pvec, mapping, &index, last,
-   PAGECACHE_TAG_DIRTY))
+ !filemap_get_folios_tag(mapping, &index, last,
+ PAGECACHE_TAG_DIRTY, &fbatch))
return ndirties;
 
-   for (i = 0; i < pagevec_count(&pvec); i++) {
+   for (i = 0; i < folio_batch_count(&fbatch); i++) {
struct buffer_head *bh, *head;
-   struct page *page = pvec.pages[i];
+   struct folio *folio = fbatch.folios[i];
 
-   lock_page(page);
-   if (!page_has_buffers(page))
-   create_empty_buffers(page, i_blocksize(inode), 0);
-   unlock_page(page);
+   folio_lock(folio);
+   head = folio_buffers(folio);
+   if (!head) {
+   create_empty_buffers(&folio->page, i_blocksize(inode), 
0);
+   head = folio_buffers(folio);
+   }
+   folio_unlock(folio);
 
-   bh = head = page_buffers(page);
+   bh = head;
do {
if (!buffer_dirty(bh) || buffer_async_write(bh))
continue;
@@ -718,13 +721,13 @@ static size_t nilfs_lookup_dirty_data_buffers(struct 
inode *inode,
list_add_tail(&bh->b_assoc_buffers, listp);
ndirties++;
if (unlikely(ndirties >= nlimit)) {
-   pagevec_release(&pvec);
+   folio_batch_release(&fbatch);
cond_resched();
return ndirties;
}
} while (bh = bh->b_this_page, bh != head);
}
-   pagevec_release(&pvec);
+   folio_batch_release(&fbatch);
cond_resched();
goto repeat;
 }
-- 
2.38.1



___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


[f2fs-dev] [PATCH v5 19/23] nilfs2: Convert nilfs_lookup_dirty_node_buffers() to use filemap_get_folios_tag()

2023-01-04 Thread Vishal Moola (Oracle)
Convert function to use folios throughout. This is in preparation for
the removal of find_get_pages_range_tag(). This change removes 1 call to
compound_head().

Signed-off-by: Vishal Moola (Oracle) 
Acked-by: Ryusuke Konishi 
---
 fs/nilfs2/segment.c | 15 +++
 1 file changed, 7 insertions(+), 8 deletions(-)

diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index 8866af742a49..da56a9221277 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -737,20 +737,19 @@ static void nilfs_lookup_dirty_node_buffers(struct inode 
*inode,
 {
struct nilfs_inode_info *ii = NILFS_I(inode);
struct inode *btnc_inode = ii->i_assoc_inode;
-   struct pagevec pvec;
+   struct folio_batch fbatch;
struct buffer_head *bh, *head;
unsigned int i;
pgoff_t index = 0;
 
if (!btnc_inode)
return;
+   folio_batch_init(&fbatch);
 
-   pagevec_init(&pvec);
-
-   while (pagevec_lookup_tag(&pvec, btnc_inode->i_mapping, &index,
-   PAGECACHE_TAG_DIRTY)) {
-   for (i = 0; i < pagevec_count(&pvec); i++) {
-   bh = head = page_buffers(pvec.pages[i]);
+   while (filemap_get_folios_tag(btnc_inode->i_mapping, &index,
+   (pgoff_t)-1, PAGECACHE_TAG_DIRTY, &fbatch)) {
+   for (i = 0; i < folio_batch_count(&fbatch); i++) {
+   bh = head = folio_buffers(fbatch.folios[i]);
do {
if (buffer_dirty(bh) &&
!buffer_async_write(bh)) {
@@ -761,7 +760,7 @@ static void nilfs_lookup_dirty_node_buffers(struct inode 
*inode,
bh = bh->b_this_page;
} while (bh != head);
}
-   pagevec_release(&pvec);
+   folio_batch_release(&fbatch);
cond_resched();
}
 }
-- 
2.38.1



___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


[f2fs-dev] [PATCH v5 21/23] nilfs2: Convert nilfs_copy_dirty_pages() to use filemap_get_folios_tag()

2023-01-04 Thread Vishal Moola (Oracle)
Convert function to use folios throughout. This is in preparation for
the removal of find_get_pages_range_tag(). This change removes 8 calls
to compound_head().

Signed-off-by: Vishal Moola (Oracle) 
Acked-by: Ryusuke Konishi 
---
 fs/nilfs2/page.c | 39 ---
 1 file changed, 20 insertions(+), 19 deletions(-)

diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c
index 39b7eea2642a..d921542a9593 100644
--- a/fs/nilfs2/page.c
+++ b/fs/nilfs2/page.c
@@ -240,42 +240,43 @@ static void nilfs_copy_page(struct page *dst, struct page 
*src, int copy_dirty)
 int nilfs_copy_dirty_pages(struct address_space *dmap,
   struct address_space *smap)
 {
-   struct pagevec pvec;
+   struct folio_batch fbatch;
unsigned int i;
pgoff_t index = 0;
int err = 0;
 
-   pagevec_init(&pvec);
+   folio_batch_init(&fbatch);
 repeat:
-   if (!pagevec_lookup_tag(&pvec, smap, &index, PAGECACHE_TAG_DIRTY))
+   if (!filemap_get_folios_tag(smap, &index, (pgoff_t)-1,
+   PAGECACHE_TAG_DIRTY, &fbatch))
return 0;
 
-   for (i = 0; i < pagevec_count(&pvec); i++) {
-   struct page *page = pvec.pages[i], *dpage;
+   for (i = 0; i < folio_batch_count(&fbatch); i++) {
+   struct folio *folio = fbatch.folios[i], *dfolio;
 
-   lock_page(page);
-   if (unlikely(!PageDirty(page)))
-   NILFS_PAGE_BUG(page, "inconsistent dirty state");
+   folio_lock(folio);
+   if (unlikely(!folio_test_dirty(folio)))
+   NILFS_PAGE_BUG(&folio->page, "inconsistent dirty 
state");
 
-   dpage = grab_cache_page(dmap, page->index);
-   if (unlikely(!dpage)) {
+   dfolio = filemap_grab_folio(dmap, folio->index);
+   if (unlikely(!dfolio)) {
/* No empty page is added to the page cache */
err = -ENOMEM;
-   unlock_page(page);
+   folio_unlock(folio);
break;
}
-   if (unlikely(!page_has_buffers(page)))
-   NILFS_PAGE_BUG(page,
+   if (unlikely(!folio_buffers(folio)))
+   NILFS_PAGE_BUG(&folio->page,
   "found empty page in dat page cache");
 
-   nilfs_copy_page(dpage, page, 1);
-   __set_page_dirty_nobuffers(dpage);
+   nilfs_copy_page(&dfolio->page, &folio->page, 1);
+   filemap_dirty_folio(folio_mapping(dfolio), dfolio);
 
-   unlock_page(dpage);
-   put_page(dpage);
-   unlock_page(page);
+   folio_unlock(dfolio);
+   folio_put(dfolio);
+   folio_unlock(folio);
}
-   pagevec_release(&pvec);
+   folio_batch_release(&fbatch);
cond_resched();
 
if (likely(!err))
-- 
2.38.1



___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


[f2fs-dev] [PATCH v5 22/23] nilfs2: Convert nilfs_clear_dirty_pages() to use filemap_get_folios_tag()

2023-01-04 Thread Vishal Moola (Oracle)
Convert function to use folios throughout. This is in preparation for
the removal of find_get_pages_range_tag(). This change removes 2 calls
to compound_head().

Signed-off-by: Vishal Moola (Oracle) 
Acked-by: Ryusuke Konishi 
---
 fs/nilfs2/page.c | 20 ++--
 1 file changed, 10 insertions(+), 10 deletions(-)

diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c
index d921542a9593..41ccd43cd979 100644
--- a/fs/nilfs2/page.c
+++ b/fs/nilfs2/page.c
@@ -358,22 +358,22 @@ void nilfs_copy_back_pages(struct address_space *dmap,
  */
 void nilfs_clear_dirty_pages(struct address_space *mapping, bool silent)
 {
-   struct pagevec pvec;
+   struct folio_batch fbatch;
unsigned int i;
pgoff_t index = 0;
 
-   pagevec_init(&pvec);
+   folio_batch_init(&fbatch);
 
-   while (pagevec_lookup_tag(&pvec, mapping, &index,
-   PAGECACHE_TAG_DIRTY)) {
-   for (i = 0; i < pagevec_count(&pvec); i++) {
-   struct page *page = pvec.pages[i];
+   while (filemap_get_folios_tag(mapping, &index, (pgoff_t)-1,
+   PAGECACHE_TAG_DIRTY, &fbatch)) {
+   for (i = 0; i < folio_batch_count(&fbatch); i++) {
+   struct folio *folio = fbatch.folios[i];
 
-   lock_page(page);
-   nilfs_clear_dirty_page(page, silent);
-   unlock_page(page);
+   folio_lock(folio);
+   nilfs_clear_dirty_page(&folio->page, silent);
+   folio_unlock(folio);
}
-   pagevec_release(&pvec);
+   folio_batch_release(&fbatch);
cond_resched();
}
 }
-- 
2.38.1



___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


[f2fs-dev] [PATCH v5 23/23] filemap: Remove find_get_pages_range_tag()

2023-01-04 Thread Vishal Moola (Oracle)
All callers to find_get_pages_range_tag(), find_get_pages_tag(),
pagevec_lookup_range_tag(), and pagevec_lookup_tag() have been removed.

Signed-off-by: Vishal Moola (Oracle) 
---
 include/linux/pagemap.h | 10 ---
 include/linux/pagevec.h |  8 --
 mm/filemap.c| 60 -
 mm/swap.c   | 10 ---
 4 files changed, 88 deletions(-)

diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index bb3c1d51b1cb..9f1081683771 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -741,16 +741,6 @@ unsigned filemap_get_folios_contig(struct address_space 
*mapping,
pgoff_t *start, pgoff_t end, struct folio_batch *fbatch);
 unsigned filemap_get_folios_tag(struct address_space *mapping, pgoff_t *start,
pgoff_t end, xa_mark_t tag, struct folio_batch *fbatch);
-unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t 
*index,
-   pgoff_t end, xa_mark_t tag, unsigned int nr_pages,
-   struct page **pages);
-static inline unsigned find_get_pages_tag(struct address_space *mapping,
-   pgoff_t *index, xa_mark_t tag, unsigned int nr_pages,
-   struct page **pages)
-{
-   return find_get_pages_range_tag(mapping, index, (pgoff_t)-1, tag,
-   nr_pages, pages);
-}
 
 struct page *grab_cache_page_write_begin(struct address_space *mapping,
pgoff_t index);
diff --git a/include/linux/pagevec.h b/include/linux/pagevec.h
index 215eb6c3bdc9..a520632297ac 100644
--- a/include/linux/pagevec.h
+++ b/include/linux/pagevec.h
@@ -26,14 +26,6 @@ struct pagevec {
 };
 
 void __pagevec_release(struct pagevec *pvec);
-unsigned pagevec_lookup_range_tag(struct pagevec *pvec,
-   struct address_space *mapping, pgoff_t *index, pgoff_t end,
-   xa_mark_t tag);
-static inline unsigned pagevec_lookup_tag(struct pagevec *pvec,
-   struct address_space *mapping, pgoff_t *index, xa_mark_t tag)
-{
-   return pagevec_lookup_range_tag(pvec, mapping, index, (pgoff_t)-1, tag);
-}
 
 static inline void pagevec_init(struct pagevec *pvec)
 {
diff --git a/mm/filemap.c b/mm/filemap.c
index 85adbcf2d9a7..31bf18ec6d01 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2337,66 +2337,6 @@ unsigned filemap_get_folios_tag(struct address_space 
*mapping, pgoff_t *start,
 }
 EXPORT_SYMBOL(filemap_get_folios_tag);
 
-/**
- * find_get_pages_range_tag - Find and return head pages matching @tag.
- * @mapping:   the address_space to search
- * @index: the starting page index
- * @end:   The final page index (inclusive)
- * @tag:   the tag index
- * @nr_pages:  the maximum number of pages
- * @pages: where the resulting pages are placed
- *
- * Like find_get_pages_range(), except we only return head pages which are
- * tagged with @tag.  @index is updated to the index immediately after the
- * last page we return, ready for the next iteration.
- *
- * Return: the number of pages which were found.
- */
-unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t 
*index,
-   pgoff_t end, xa_mark_t tag, unsigned int nr_pages,
-   struct page **pages)
-{
-   XA_STATE(xas, &mapping->i_pages, *index);
-   struct folio *folio;
-   unsigned ret = 0;
-
-   if (unlikely(!nr_pages))
-   return 0;
-
-   rcu_read_lock();
-   while ((folio = find_get_entry(&xas, end, tag))) {
-   /*
-* Shadow entries should never be tagged, but this iteration
-* is lockless so there is a window for page reclaim to evict
-* a page we saw tagged.  Skip over it.
-*/
-   if (xa_is_value(folio))
-   continue;
-
-   pages[ret] = &folio->page;
-   if (++ret == nr_pages) {
-   *index = folio->index + folio_nr_pages(folio);
-   goto out;
-   }
-   }
-
-   /*
-* We come here when we got to @end. We take care to not overflow the
-* index @index as it confuses some of the callers. This breaks the
-* iteration when there is a page at index -1 but that is already
-* broken anyway.
-*/
-   if (end == (pgoff_t)-1)
-   *index = (pgoff_t)-1;
-   else
-   *index = end + 1;
-out:
-   rcu_read_unlock();
-
-   return ret;
-}
-EXPORT_SYMBOL(find_get_pages_range_tag);
-
 /*
  * CD/DVDs are error prone. When a medium error occurs, the driver may fail
  * a _large_ part of the i/o request. Imagine the worst scenario:
diff --git a/mm/swap.c b/mm/swap.c
index 70e2063ef43a..5f20ba07d46b 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -1119,16 +1119,6 @@ void folio_batch_remove_exceptionals(struct folio_batch 
*fbatch)
fbatch->nr = j;
 }
 
-unsigned pag

[f2fs-dev] [PATCH] f2fs: use iostat_lat_type directly as a parameter in the iostat_update_and_unbind_ctx()

2023-01-04 Thread Yangtao Li via Linux-f2fs-devel
Convert to use iostat_lat_type as parameter instead of raw number.
BTW, move NUM_PREALLOC_IOSTAT_CTXS to the header file, and rename
iotype to page_type to match the definition.

Signed-off-by: Yangtao Li 
---
 fs/f2fs/data.c   |  5 +++--
 fs/f2fs/iostat.c | 34 +++---
 fs/f2fs/iostat.h | 19 ++-
 3 files changed, 24 insertions(+), 34 deletions(-)

diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index c940da1c540f..4e8fd5697c42 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -292,7 +292,7 @@ static void f2fs_read_end_io(struct bio *bio)
struct bio_post_read_ctx *ctx;
bool intask = in_task();
 
-   iostat_update_and_unbind_ctx(bio, 0);
+   iostat_update_and_unbind_ctx(bio, READ_IO);
ctx = bio->bi_private;
 
if (time_to_inject(sbi, FAULT_READ_IO))
@@ -330,7 +330,8 @@ static void f2fs_write_end_io(struct bio *bio)
struct bio_vec *bvec;
struct bvec_iter_all iter_all;
 
-   iostat_update_and_unbind_ctx(bio, 1);
+   iostat_update_and_unbind_ctx(bio, bio->bi_opf & REQ_SYNC ? 
WRITE_SYNC_IO :
+   
WRITE_ASYNC_IO);
sbi = bio->bi_private;
 
if (time_to_inject(sbi, FAULT_WRITE_IO))
diff --git a/fs/f2fs/iostat.c b/fs/f2fs/iostat.c
index 59c72f92191a..20944c4a683a 100644
--- a/fs/f2fs/iostat.c
+++ b/fs/f2fs/iostat.c
@@ -14,7 +14,6 @@
 #include "iostat.h"
 #include 
 
-#define NUM_PREALLOC_IOSTAT_CTXS   128
 static struct kmem_cache *bio_iostat_ctx_cache;
 static mempool_t *bio_iostat_ctx_pool;
 
@@ -210,49 +209,38 @@ void f2fs_update_iostat(struct f2fs_sb_info *sbi, struct 
inode *inode,
 }
 
 static inline void __update_iostat_latency(struct bio_iostat_ctx *iostat_ctx,
-   int rw, bool is_sync)
+   enum iostat_lat_type type)
 {
unsigned long ts_diff;
-   unsigned int iotype = iostat_ctx->type;
+   unsigned int page_type = iostat_ctx->type;
struct f2fs_sb_info *sbi = iostat_ctx->sbi;
struct iostat_lat_info *io_lat = sbi->iostat_io_lat;
-   int idx;
unsigned long flags;
 
if (!sbi->iostat_enable)
return;
 
ts_diff = jiffies - iostat_ctx->submit_ts;
-   if (iotype >= META_FLUSH)
-   iotype = META;
-
-   if (rw == 0) {
-   idx = READ_IO;
-   } else {
-   if (is_sync)
-   idx = WRITE_SYNC_IO;
-   else
-   idx = WRITE_ASYNC_IO;
-   }
+   if (page_type >= META_FLUSH)
+   page_type = META;
 
spin_lock_irqsave(&sbi->iostat_lat_lock, flags);
-   io_lat->sum_lat[idx][iotype] += ts_diff;
-   io_lat->bio_cnt[idx][iotype]++;
-   if (ts_diff > io_lat->peak_lat[idx][iotype])
-   io_lat->peak_lat[idx][iotype] = ts_diff;
+   io_lat->sum_lat[type][page_type] += ts_diff;
+   io_lat->bio_cnt[type][page_type]++;
+   if (ts_diff > io_lat->peak_lat[type][page_type])
+   io_lat->peak_lat[type][page_type] = ts_diff;
spin_unlock_irqrestore(&sbi->iostat_lat_lock, flags);
 }
 
-void iostat_update_and_unbind_ctx(struct bio *bio, int rw)
+void iostat_update_and_unbind_ctx(struct bio *bio, enum iostat_lat_type type)
 {
struct bio_iostat_ctx *iostat_ctx = bio->bi_private;
-   bool is_sync = bio->bi_opf & REQ_SYNC;
 
-   if (rw == 0)
+   if (type == READ_IO)
bio->bi_private = iostat_ctx->post_read_ctx;
else
bio->bi_private = iostat_ctx->sbi;
-   __update_iostat_latency(iostat_ctx, rw, is_sync);
+   __update_iostat_latency(iostat_ctx, type);
mempool_free(iostat_ctx, bio_iostat_ctx_pool);
 }
 
diff --git a/fs/f2fs/iostat.h b/fs/f2fs/iostat.h
index 2c048307b6e0..1f827a2fe6b2 100644
--- a/fs/f2fs/iostat.h
+++ b/fs/f2fs/iostat.h
@@ -8,20 +8,21 @@
 
 struct bio_post_read_ctx;
 
+enum iostat_lat_type {
+   READ_IO = 0,
+   WRITE_SYNC_IO,
+   WRITE_ASYNC_IO,
+   MAX_IO_TYPE,
+};
+
 #ifdef CONFIG_F2FS_IOSTAT
 
+#define NUM_PREALLOC_IOSTAT_CTXS   128
 #define DEFAULT_IOSTAT_PERIOD_MS   3000
 #define MIN_IOSTAT_PERIOD_MS   100
 /* maximum period of iostat tracing is 1 day */
 #define MAX_IOSTAT_PERIOD_MS   864
 
-enum {
-   READ_IO,
-   WRITE_SYNC_IO,
-   WRITE_ASYNC_IO,
-   MAX_IO_TYPE,
-};
-
 struct iostat_lat_info {
unsigned long sum_lat[MAX_IO_TYPE][NR_PAGE_TYPE];   /* sum of io 
latencies */
unsigned long peak_lat[MAX_IO_TYPE][NR_PAGE_TYPE];  /* peak io 
latency */
@@ -57,7 +58,7 @@ static inline struct bio_post_read_ctx 
*get_post_read_ctx(struct bio *bio)
return iostat_ctx->post_read_ctx;
 }
 
-extern void iostat_update_and_unbind_ctx(struct bio *bio, int rw);
+extern void iostat_update_and_unbind_ctx(struct bio *bio, enum iostat_lat_type 
type);
 extern void iostat_alloc_and_bind_