[f2fs-dev] [PATCH] f2fs: synchronize atomic write aborts

2023-01-12 Thread Daeho Jeong
From: Daeho Jeong 

To fix a race condition between atomic write aborts, I use the inode
lock and make COW inode to be re-usable thoroughout the whole
atomic file inode lifetime.

Reported-by: syzbot+823000d23b3400619...@syzkaller.appspotmail.com
Fixes: 3db1de0e582c ("f2fs: change the current atomic write way")
Signed-off-by: Daeho Jeong 
---
 fs/f2fs/file.c| 43 ---
 fs/f2fs/inode.c   | 11 +--
 fs/f2fs/segment.c |  3 ---
 fs/f2fs/super.c   |  2 --
 4 files changed, 37 insertions(+), 22 deletions(-)

diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index ecbc8c135b49..ff072a9ed258 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -1866,7 +1866,10 @@ static int f2fs_release_file(struct inode *inode, struct 
file *filp)
atomic_read(>i_writecount) != 1)
return 0;
 
+   inode_lock(inode);
f2fs_abort_atomic_write(inode, true);
+   inode_unlock(inode);
+
return 0;
 }
 
@@ -1880,8 +1883,11 @@ static int f2fs_file_flush(struct file *file, fl_owner_t 
id)
 * until all the writers close its file. Since this should be done
 * before dropping file lock, it needs to do in ->flush.
 */
-   if (F2FS_I(inode)->atomic_write_task == current)
+   if (F2FS_I(inode)->atomic_write_task == current) {
+   inode_lock(inode);
f2fs_abort_atomic_write(inode, true);
+   inode_unlock(inode);
+   }
return 0;
 }
 
@@ -2087,19 +2093,28 @@ static int f2fs_ioc_start_atomic_write(struct file 
*filp, bool truncate)
goto out;
}
 
-   /* Create a COW inode for atomic write */
-   pinode = f2fs_iget(inode->i_sb, fi->i_pino);
-   if (IS_ERR(pinode)) {
-   f2fs_up_write(>i_gc_rwsem[WRITE]);
-   ret = PTR_ERR(pinode);
-   goto out;
-   }
+   /* Check if the inode already has a COW inode */
+   if (fi->cow_inode == NULL) {
+   /* Create a COW inode for atomic write */
+   pinode = f2fs_iget(inode->i_sb, fi->i_pino);
+   if (IS_ERR(pinode)) {
+   f2fs_up_write(>i_gc_rwsem[WRITE]);
+   ret = PTR_ERR(pinode);
+   goto out;
+   }
 
-   ret = f2fs_get_tmpfile(mnt_userns, pinode, >cow_inode);
-   iput(pinode);
-   if (ret) {
-   f2fs_up_write(>i_gc_rwsem[WRITE]);
-   goto out;
+   ret = f2fs_get_tmpfile(mnt_userns, pinode, >cow_inode);
+   iput(pinode);
+   if (ret) {
+   f2fs_up_write(>i_gc_rwsem[WRITE]);
+   goto out;
+   }
+
+   set_inode_flag(fi->cow_inode, FI_COW_FILE);
+   clear_inode_flag(fi->cow_inode, FI_INLINE_DATA);
+   } else {
+   /* Reuse the already created COW inode */
+   f2fs_do_truncate_blocks(fi->cow_inode, 0, true);
}
 
f2fs_write_inode(inode, NULL);
@@ -2107,8 +2122,6 @@ static int f2fs_ioc_start_atomic_write(struct file *filp, 
bool truncate)
stat_inc_atomic_inode(inode);
 
set_inode_flag(inode, FI_ATOMIC_FILE);
-   set_inode_flag(fi->cow_inode, FI_COW_FILE);
-   clear_inode_flag(fi->cow_inode, FI_INLINE_DATA);
 
isize = i_size_read(inode);
fi->original_i_size = isize;
diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
index ff6cf66ed46b..4921f7209e28 100644
--- a/fs/f2fs/inode.c
+++ b/fs/f2fs/inode.c
@@ -766,11 +766,18 @@ int f2fs_write_inode(struct inode *inode, struct 
writeback_control *wbc)
 void f2fs_evict_inode(struct inode *inode)
 {
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
-   nid_t xnid = F2FS_I(inode)->i_xattr_nid;
+   struct f2fs_inode_info *fi = F2FS_I(inode);
+   nid_t xnid = fi->i_xattr_nid;
int err = 0;
 
f2fs_abort_atomic_write(inode, true);
 
+   if (fi->cow_inode) {
+   clear_inode_flag(fi->cow_inode, FI_COW_FILE);
+   iput(fi->cow_inode);
+   fi->cow_inode = NULL;
+   }
+
trace_f2fs_evict_inode(inode);
truncate_inode_pages_final(>i_data);
 
@@ -857,7 +864,7 @@ void f2fs_evict_inode(struct inode *inode)
stat_dec_inline_inode(inode);
stat_dec_compr_inode(inode);
stat_sub_compr_blocks(inode,
-   atomic_read(_I(inode)->i_compr_blocks));
+   atomic_read(>i_compr_blocks));
 
if (likely(!f2fs_cp_error(sbi) &&
!is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index ae3c4e5474ef..536d7c674b04 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -192,9 +192,6 @@ void f2fs_abort_atomic_write(struct inode *inode, bool 
clean)
if (!f2fs_is_atomic_file(inode))
return;
 
-   clear_inode_flag(fi->cow_inode, FI_COW_FILE);
-   iput(fi->cow_inode);

Re: [f2fs-dev] [PATCH v2] f2fs: retry to update the inode page given EIO

2023-01-12 Thread Jaegeuk Kim
On 01/12, Chao Yu wrote:
> On 2023/1/12 2:50, Jaegeuk Kim wrote:
> > On 01/11, Chao Yu wrote:
> > > On 2023/1/11 9:20, Jaegeuk Kim wrote:
> > > > In f2fs_update_inode_page, f2fs_get_node_page handles EIO along with
> > > > f2fs_handle_page_eio that stops checkpoint, if the disk couldn't be 
> > > > recovered.
> > > > As a result, we don't need to stop checkpoint right away given single 
> > > > EIO.
> > > 
> > > f2fs_handle_page_eio() only covers the case that EIO occurs on the same
> > > page, should we cover the case EIO occurs on different pages?
> > 
> > Which case are you looking at?
> 
> - __get_node_page(PageA)  - __get_node_page(PageB)
>  - f2fs_handle_page_eio
>   - sbi->page_eio_ofs[type] = PageA->index
>- f2fs_handle_page_eio
> - sbi->page_eio_ofs[type] = 
> PageB->index
> 
> In such race case, it may has low probability to set CP_ERROR_FLAG as we 
> expect?

Do you see that case in products?
I'm trying to avoid setting CP_ERROR_FLAG here.

> 
> Thanks,
> 
> > 
> > > 
> > > Thanks,
> > > 
> > > > 
> > > > Cc: sta...@vger.kernel.org
> > > > Signed-off-by: Randall Huang 
> > > > Signed-off-by: Jaegeuk Kim 
> > > > ---
> > > > 
> > > >Change log from v1:
> > > > - fix a bug
> > > > 
> > > >fs/f2fs/inode.c | 2 +-
> > > >1 file changed, 1 insertion(+), 1 deletion(-)
> > > > 
> > > > diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
> > > > index ff6cf66ed46b..2ed7a621fdf1 100644
> > > > --- a/fs/f2fs/inode.c
> > > > +++ b/fs/f2fs/inode.c
> > > > @@ -719,7 +719,7 @@ void f2fs_update_inode_page(struct inode *inode)
> > > > if (IS_ERR(node_page)) {
> > > > int err = PTR_ERR(node_page);
> > > > -   if (err == -ENOMEM) {
> > > > +   if (err == -ENOMEM || (err == -EIO && 
> > > > !f2fs_cp_error(sbi))) {
> > > > cond_resched();
> > > > goto retry;
> > > > } else if (err != -ENOENT) {


___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


Re: [f2fs-dev] [PATCH 1/4] f2fs: reset iostat_count in f2fs_reset_iostat()

2023-01-12 Thread Jaegeuk Kim
On 01/12, Yangtao Li wrote:
> Dear Jaegeuk,
> 
> > Hi Yangtao,
> > 
> > These are all in dev-test branch, which means you don't need to stack up 
> > more
> > patches on top of it. I just integrated most of them into two original 
> > patches.
> 
> Ok, I'll merge the previous commits and resend next time.
> 
> > Could you please take a look at this?
> 
> LGTM.
> 
> > Applied with the fix.
> 
> Thanks!
> 
> BTW, I would like to ask whether it is possible to maintain discard in 
> separated file.
> 
> https://patchwork.kernel.org/project/f2fs/patch/20221217132318.37718-1-frank...@vivo.com/

Not worth.

> 
> MBR,
> Yangtao


___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


[f2fs-dev] [PATCH] f2fs: add compression feature check for all compress mount opt

2023-01-12 Thread Yangtao Li via Linux-f2fs-devel
Opt_compress_chksum, Opt_compress_mode and Opt_compress_cache
lack the necessary check to see if the image supports compression,
let's add it.

Signed-off-by: Yangtao Li 
---
 fs/f2fs/super.c | 55 +
 1 file changed, 28 insertions(+), 27 deletions(-)

diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 5fc83771042d..8ef1449272b3 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -89,7 +89,7 @@ static struct shrinker f2fs_shrinker_info = {
.seeks = DEFAULT_SEEKS,
 };
 
-enum {
+enum f2fs_mount_opt {
Opt_gc_background,
Opt_disable_roll_forward,
Opt_norecovery,
@@ -655,6 +655,30 @@ static int f2fs_set_zstd_level(struct f2fs_sb_info *sbi, 
const char *str)
 #endif
 #endif
 
+static bool f2fs_mount_opt_need_skip(struct f2fs_sb_info *sbi, enum 
f2fs_mount_opt opt)
+{
+   switch (opt) {
+   case Opt_compress_algorithm:
+   case Opt_compress_log_size:
+   case Opt_compress_extension:
+   case Opt_nocompress_extension:
+   case Opt_compress_chksum:
+   case Opt_compress_mode:
+   case Opt_compress_cache:
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+   if (f2fs_sb_has_compression(sbi))
+   return false;
+
+   f2fs_info(sbi, "Image doesn't support compression");
+#else
+   f2fs_info(sbi, "compression options not supported");
+#endif
+   return true;
+   default:
+   return false;
+   }
+}
+
 static int parse_options(struct super_block *sb, char *options, bool 
is_remount)
 {
struct f2fs_sb_info *sbi = F2FS_SB(sb);
@@ -685,6 +709,9 @@ static int parse_options(struct super_block *sb, char 
*options, bool is_remount)
args[0].to = args[0].from = NULL;
token = match_token(p, f2fs_tokens, args);
 
+   if (f2fs_mount_opt_need_skip(sbi, token))
+   continue;
+
switch (token) {
case Opt_gc_background:
name = match_strdup([0]);
@@ -1068,10 +1095,6 @@ static int parse_options(struct super_block *sb, char 
*options, bool is_remount)
break;
 #ifdef CONFIG_F2FS_FS_COMPRESSION
case Opt_compress_algorithm:
-   if (!f2fs_sb_has_compression(sbi)) {
-   f2fs_info(sbi, "Image doesn't support 
compression");
-   break;
-   }
name = match_strdup([0]);
if (!name)
return -ENOMEM;
@@ -1122,10 +1145,6 @@ static int parse_options(struct super_block *sb, char 
*options, bool is_remount)
kfree(name);
break;
case Opt_compress_log_size:
-   if (!f2fs_sb_has_compression(sbi)) {
-   f2fs_info(sbi, "Image doesn't support 
compression");
-   break;
-   }
if (args->from && match_int(args, ))
return -EINVAL;
if (arg < MIN_COMPRESS_LOG_SIZE ||
@@ -1137,10 +1156,6 @@ static int parse_options(struct super_block *sb, char 
*options, bool is_remount)
F2FS_OPTION(sbi).compress_log_size = arg;
break;
case Opt_compress_extension:
-   if (!f2fs_sb_has_compression(sbi)) {
-   f2fs_info(sbi, "Image doesn't support 
compression");
-   break;
-   }
name = match_strdup([0]);
if (!name)
return -ENOMEM;
@@ -1161,10 +1176,6 @@ static int parse_options(struct super_block *sb, char 
*options, bool is_remount)
kfree(name);
break;
case Opt_nocompress_extension:
-   if (!f2fs_sb_has_compression(sbi)) {
-   f2fs_info(sbi, "Image doesn't support 
compression");
-   break;
-   }
name = match_strdup([0]);
if (!name)
return -ENOMEM;
@@ -1204,16 +1215,6 @@ static int parse_options(struct super_block *sb, char 
*options, bool is_remount)
case Opt_compress_cache:
set_opt(sbi, COMPRESS_CACHE);
break;
-#else
-   case Opt_compress_algorithm:
-   case Opt_compress_log_size:
-   case Opt_compress_extension:
-   case Opt_nocompress_extension:
-   case Opt_compress_chksum:
-   case Opt_compress_mode:
-   case Opt_compress_cache:
-   f2fs_info(sbi, "compression options not supported");
- 

Re: [f2fs-dev] [PATCH v5 09/23] cifs: Convert wdata_alloc_and_fillpages() to use filemap_get_folios_tag()

2023-01-12 Thread Paulo Alcantara via Linux-f2fs-devel
"Vishal Moola (Oracle)"  writes:

> This is in preparation for the removal of find_get_pages_range_tag(). Now also
> supports the use of large folios.
>
> Since tofind might be larger than the max number of folios in a
> folio_batch (15), we loop through filling in wdata->pages pulling more
> batches until we either reach tofind pages or run out of folios.
>
> This function may not return all pages in the last found folio before
> tofind pages are reached.
>
> Signed-off-by: Vishal Moola (Oracle) 
> ---
>  fs/cifs/file.c | 32 +---
>  1 file changed, 29 insertions(+), 3 deletions(-)

Looks good.

Acked-by: Paulo Alcantara (SUSE) 


___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


[f2fs-dev] [PATCH] f2fs: return true if all cmd were issued or no cmd need to be issued for f2fs_issue_discard_timeout()

2023-01-12 Thread Yangtao Li via Linux-f2fs-devel
f2fs_issue_discard_timeout() returns whether discard cmds are dropped,
which does not match the meaning of the function. Let's change it to
return whether all discard cmd are issued.

After commit 4d67490498ac ("f2fs: Don't create discard thread when
device doesn't support realtime discard"), f2fs_issue_discard_timeout()
is alse called by f2fs_remount(). Since the comments of
f2fs_issue_discard_timeout() doesn't make much sense, let's update it.

Signed-off-by: Yangtao Li 
---
 fs/f2fs/segment.c | 13 ++---
 fs/f2fs/super.c   |  7 +++
 2 files changed, 13 insertions(+), 7 deletions(-)

diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index bd1cd98fa6eb..9346209b7c94 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -1650,7 +1650,14 @@ void f2fs_stop_discard_thread(struct f2fs_sb_info *sbi)
}
 }
 
-/* This comes from f2fs_put_super */
+/**
+ * f2fs_issue_discard_timeout() - Issue all discard cmd within 
UMOUNT_DISCARD_TIMEOUT
+ * @sbi: the f2fs_sb_info data for discard cmd to issue
+ *
+ * When UMOUNT_DISCARD_TIMEOUT is exceeded, all remaining discard commands 
will be dropped
+ *
+ * Return true if issued all discard cmd or no discard cmd need issue, 
otherwise return false.
+ */
 bool f2fs_issue_discard_timeout(struct f2fs_sb_info *sbi)
 {
struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
@@ -1658,7 +1665,7 @@ bool f2fs_issue_discard_timeout(struct f2fs_sb_info *sbi)
bool dropped;
 
if (!atomic_read(>discard_cmd_cnt))
-   return false;
+   return true;
 
__init_discard_policy(sbi, , DPOLICY_UMOUNT,
dcc->discard_granularity);
@@ -1669,7 +1676,7 @@ bool f2fs_issue_discard_timeout(struct f2fs_sb_info *sbi)
__wait_all_discard_cmd(sbi, NULL);
 
f2fs_bug_on(sbi, atomic_read(>discard_cmd_cnt));
-   return dropped;
+   return !dropped;
 }
 
 static int issue_discard_thread(void *data)
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 5fc83771042d..8f13798058f4 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -1546,7 +1546,7 @@ static void f2fs_put_super(struct super_block *sb)
 {
struct f2fs_sb_info *sbi = F2FS_SB(sb);
int i;
-   bool dropped;
+   bool done;
 
/* unregister procfs/sysfs entries in advance to avoid race case */
f2fs_unregister_sysfs(sbi);
@@ -1576,9 +1576,8 @@ static void f2fs_put_super(struct super_block *sb)
}
 
/* be sure to wait for any on-going discard commands */
-   dropped = f2fs_issue_discard_timeout(sbi);
-
-   if (f2fs_realtime_discard_enable(sbi) && !sbi->discard_blks && 
!dropped) {
+   done = f2fs_issue_discard_timeout(sbi);
+   if (f2fs_realtime_discard_enable(sbi) && !sbi->discard_blks && done) {
struct cp_control cpc = {
.reason = CP_UMOUNT | CP_TRIMMED,
};
-- 
2.25.1



___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


Re: [f2fs-dev] [PATCH v5 09/23] cifs: Convert wdata_alloc_and_fillpages() to use filemap_get_folios_tag()

2023-01-12 Thread Vishal Moola
On Wed, Jan 4, 2023 at 1:15 PM Vishal Moola (Oracle)
 wrote:
>
> This is in preparation for the removal of find_get_pages_range_tag(). Now also
> supports the use of large folios.
>
> Since tofind might be larger than the max number of folios in a
> folio_batch (15), we loop through filling in wdata->pages pulling more
> batches until we either reach tofind pages or run out of folios.
>
> This function may not return all pages in the last found folio before
> tofind pages are reached.
>
> Signed-off-by: Vishal Moola (Oracle) 
> ---
>  fs/cifs/file.c | 32 +---
>  1 file changed, 29 insertions(+), 3 deletions(-)
>
> diff --git a/fs/cifs/file.c b/fs/cifs/file.c
> index 22dfc1f8b4f1..8cdd2f67af24 100644
> --- a/fs/cifs/file.c
> +++ b/fs/cifs/file.c
> @@ -2527,14 +2527,40 @@ wdata_alloc_and_fillpages(pgoff_t tofind, struct 
> address_space *mapping,
>   unsigned int *found_pages)
>  {
> struct cifs_writedata *wdata;
> -
> +   struct folio_batch fbatch;
> +   unsigned int i, idx, p, nr;
> wdata = cifs_writedata_alloc((unsigned int)tofind,
>  cifs_writev_complete);
> if (!wdata)
> return NULL;
>
> -   *found_pages = find_get_pages_range_tag(mapping, index, end,
> -   PAGECACHE_TAG_DIRTY, tofind, wdata->pages);
> +   folio_batch_init();
> +   *found_pages = 0;
> +
> +again:
> +   nr = filemap_get_folios_tag(mapping, index, end,
> +   PAGECACHE_TAG_DIRTY, );
> +   if (!nr)
> +   goto out; /* No dirty pages left in the range */
> +
> +   for (i = 0; i < nr; i++) {
> +   struct folio *folio = fbatch.folios[i];
> +
> +   idx = 0;
> +   p = folio_nr_pages(folio);
> +add_more:
> +   wdata->pages[*found_pages] = folio_page(folio, idx);
> +   folio_get(folio);
> +   if (++*found_pages == tofind) {
> +   folio_batch_release();
> +   goto out;
> +   }
> +   if (++idx < p)
> +   goto add_more;
> +   }
> +   folio_batch_release();
> +   goto again;
> +out:
> return wdata;
>  }
>
> --
> 2.38.1
>

Could someone review this cifs patch, please? This is one of the
2 remaining patches that need to be looked at in the series.


___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


Re: [f2fs-dev] [PATCH v5 10/23] ext4: Convert mpage_prepare_extent_to_map() to use filemap_get_folios_tag()

2023-01-12 Thread Vishal Moola
On Wed, Jan 4, 2023 at 1:15 PM Vishal Moola (Oracle)
 wrote:
>
> Converted the function to use folios throughout. This is in preparation
> for the removal of find_get_pages_range_tag(). Now supports large
> folios. This change removes 11 calls to compound_head().
>
> Signed-off-by: Vishal Moola (Oracle) 
> ---
>  fs/ext4/inode.c | 65 -
>  1 file changed, 32 insertions(+), 33 deletions(-)
>
> diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
> index 9d9f414f99fe..fb6cd994e59a 100644
> --- a/fs/ext4/inode.c
> +++ b/fs/ext4/inode.c
> @@ -2595,8 +2595,8 @@ static bool ext4_page_nomap_can_writeout(struct page 
> *page)
>  static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
>  {
> struct address_space *mapping = mpd->inode->i_mapping;
> -   struct pagevec pvec;
> -   unsigned int nr_pages;
> +   struct folio_batch fbatch;
> +   unsigned int nr_folios;
> long left = mpd->wbc->nr_to_write;
> pgoff_t index = mpd->first_page;
> pgoff_t end = mpd->last_page;
> @@ -2610,18 +2610,17 @@ static int mpage_prepare_extent_to_map(struct 
> mpage_da_data *mpd)
> tag = PAGECACHE_TAG_TOWRITE;
> else
> tag = PAGECACHE_TAG_DIRTY;
> -
> -   pagevec_init();
> +   folio_batch_init();
> mpd->map.m_len = 0;
> mpd->next_page = index;
> while (index <= end) {
> -   nr_pages = pagevec_lookup_range_tag(, mapping, , 
> end,
> -   tag);
> -   if (nr_pages == 0)
> +   nr_folios = filemap_get_folios_tag(mapping, , end,
> +   tag, );
> +   if (nr_folios == 0)
> break;
>
> -   for (i = 0; i < nr_pages; i++) {
> -   struct page *page = pvec.pages[i];
> +   for (i = 0; i < nr_folios; i++) {
> +   struct folio *folio = fbatch.folios[i];
>
> /*
>  * Accumulated enough dirty pages? This doesn't apply
> @@ -2635,10 +2634,10 @@ static int mpage_prepare_extent_to_map(struct 
> mpage_da_data *mpd)
> goto out;
>
> /* If we can't merge this page, we are done. */
> -   if (mpd->map.m_len > 0 && mpd->next_page != 
> page->index)
> +   if (mpd->map.m_len > 0 && mpd->next_page != 
> folio->index)
> goto out;
>
> -   lock_page(page);
> +   folio_lock(folio);
> /*
>  * If the page is no longer dirty, or its mapping no
>  * longer corresponds to inode we are writing (which
> @@ -2646,16 +2645,16 @@ static int mpage_prepare_extent_to_map(struct 
> mpage_da_data *mpd)
>  * page is already under writeback and we are not 
> doing
>  * a data integrity writeback, skip the page
>  */
> -   if (!PageDirty(page) ||
> -   (PageWriteback(page) &&
> +   if (!folio_test_dirty(folio) ||
> +   (folio_test_writeback(folio) &&
>  (mpd->wbc->sync_mode == WB_SYNC_NONE)) ||
> -   unlikely(page->mapping != mapping)) {
> -   unlock_page(page);
> +   unlikely(folio->mapping != mapping)) {
> +   folio_unlock(folio);
> continue;
> }
>
> -   wait_on_page_writeback(page);
> -   BUG_ON(PageWriteback(page));
> +   folio_wait_writeback(folio);
> +   BUG_ON(folio_test_writeback(folio));
>
> /*
>  * Should never happen but for buggy code in
> @@ -2666,49 +2665,49 @@ static int mpage_prepare_extent_to_map(struct 
> mpage_da_data *mpd)
>  *
>  * [1] 
> https://lore.kernel.org/linux-mm/20180103100430.ge4...@quack2.suse.cz
>  */
> -   if (!page_has_buffers(page)) {
> -   ext4_warning_inode(mpd->inode, "page %lu does 
> not have buffers attached", page->index);
> -   ClearPageDirty(page);
> -   unlock_page(page);
> +   if (!folio_buffers(folio)) {
> +   ext4_warning_inode(mpd->inode, "page %lu does 
> not have buffers attached", folio->index);
> +   folio_clear_dirty(folio);
> +   folio_unlock(folio);
> continue;
> }
>
> if 

Re: [f2fs-dev] [PATCH 1/4] f2fs: reset iostat_count in f2fs_reset_iostat()

2023-01-12 Thread Yangtao Li via Linux-f2fs-devel
Dear Jaegeuk,

> Hi Yangtao,
> 
> These are all in dev-test branch, which means you don't need to stack up more
> patches on top of it. I just integrated most of them into two original 
> patches.

Ok, I'll merge the previous commits and resend next time.

> Could you please take a look at this?

LGTM.

> Applied with the fix.

Thanks!

BTW, I would like to ask whether it is possible to maintain discard in 
separated file.

https://patchwork.kernel.org/project/f2fs/patch/20221217132318.37718-1-frank...@vivo.com/

MBR,
Yangtao


___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


[f2fs-dev] [PATCH v4] f2fs: introduce discard_io_aware_gran sysfs node

2023-01-12 Thread Yangtao Li via Linux-f2fs-devel
The current discard_io_aware_gran is a fixed value, change it to be
configurable through the sys node.

Signed-off-by: Yangtao Li 
---
v4:update description
 Documentation/ABI/testing/sysfs-fs-f2fs | 10 ++
 fs/f2fs/f2fs.h  |  1 +
 fs/f2fs/segment.c   |  3 ++-
 fs/f2fs/sysfs.c | 13 +
 4 files changed, 26 insertions(+), 1 deletion(-)

diff --git a/Documentation/ABI/testing/sysfs-fs-f2fs 
b/Documentation/ABI/testing/sysfs-fs-f2fs
index aaa379bb8a8f..06e6795db6f5 100644
--- a/Documentation/ABI/testing/sysfs-fs-f2fs
+++ b/Documentation/ABI/testing/sysfs-fs-f2fs
@@ -708,3 +708,13 @@ Description:   Support configuring fault injection 
type, should be
FAULT_LOCK_OP0x2
FAULT_BLKADDR0x4
===  ===
+
+What:  /sys/fs/f2fs//discard_io_aware_gran
+Date:  January 2023
+Contact:   "Yangtao Li" 
+Description:   Controls background discard granularity of inner discard thread
+   when is not in idle. Inner thread will not issue discards with 
size that
+   is smaller than granularity. The unit size is one block(4KB), 
now only
+   support configuring in range of [0, 512].
+   By default, the value is 512, all discard IOs will be 
interrupted by other
+   inflight IO. It can be set to 0, then IO aware functionality 
will be disabled.
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 331c330ea31d..f3c5f7740c1a 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -409,6 +409,7 @@ struct discard_cmd_control {
unsigned int min_discard_issue_time;/* min. interval between 
discard issue */
unsigned int mid_discard_issue_time;/* mid. interval between 
discard issue */
unsigned int max_discard_issue_time;/* max. interval between 
discard issue */
+   unsigned int discard_io_aware_gran; /* minimum discard granularity not 
be aware of I/O */
unsigned int discard_urgent_util;   /* utilization which issue 
discard proactively */
unsigned int discard_granularity;   /* discard granularity */
unsigned int max_ordered_discard;   /* maximum discard granularity 
issued by lba order */
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 976316218bd3..bd1cd98fa6eb 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -1059,7 +1059,7 @@ static void __init_discard_policy(struct f2fs_sb_info 
*sbi,
dpolicy->granularity = granularity;
 
dpolicy->max_requests = dcc->max_discard_request;
-   dpolicy->io_aware_gran = MAX_PLIST_NUM;
+   dpolicy->io_aware_gran = dcc->discard_io_aware_gran;
dpolicy->timeout = false;
 
if (discard_type == DPOLICY_BG) {
@@ -2063,6 +2063,7 @@ static int create_discard_cmd_control(struct f2fs_sb_info 
*sbi)
if (!dcc)
return -ENOMEM;
 
+   dcc->discard_io_aware_gran = MAX_PLIST_NUM;
dcc->discard_granularity = DEFAULT_DISCARD_GRANULARITY;
dcc->max_ordered_discard = DEFAULT_MAX_ORDERED_DISCARD_GRANULARITY;
if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SEGMENT)
diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
index 805b632a3af0..e396851a6dd1 100644
--- a/fs/f2fs/sysfs.c
+++ b/fs/f2fs/sysfs.c
@@ -473,6 +473,17 @@ static ssize_t __sbi_store(struct f2fs_attr *a,
return count;
}
 
+   if (!strcmp(a->attr.name, "discard_io_aware_gran")) {
+   if (t > MAX_PLIST_NUM)
+   return -EINVAL;
+   if (!f2fs_block_unit_discard(sbi))
+   return -EINVAL;
+   if (t == *ui)
+   return count;
+   *ui = t;
+   return count;
+   }
+
if (!strcmp(a->attr.name, "discard_granularity")) {
if (t == 0 || t > MAX_PLIST_NUM)
return -EINVAL;
@@ -825,6 +836,7 @@ F2FS_RW_ATTR(DCC_INFO, discard_cmd_control, 
max_discard_request, max_discard_req
 F2FS_RW_ATTR(DCC_INFO, discard_cmd_control, min_discard_issue_time, 
min_discard_issue_time);
 F2FS_RW_ATTR(DCC_INFO, discard_cmd_control, mid_discard_issue_time, 
mid_discard_issue_time);
 F2FS_RW_ATTR(DCC_INFO, discard_cmd_control, max_discard_issue_time, 
max_discard_issue_time);
+F2FS_RW_ATTR(DCC_INFO, discard_cmd_control, discard_io_aware_gran, 
discard_io_aware_gran);
 F2FS_RW_ATTR(DCC_INFO, discard_cmd_control, discard_urgent_util, 
discard_urgent_util);
 F2FS_RW_ATTR(DCC_INFO, discard_cmd_control, discard_granularity, 
discard_granularity);
 F2FS_RW_ATTR(DCC_INFO, discard_cmd_control, max_ordered_discard, 
max_ordered_discard);
@@ -960,6 +972,7 @@ static struct attribute *f2fs_attrs[] = {
ATTR_LIST(min_discard_issue_time),
ATTR_LIST(mid_discard_issue_time),
ATTR_LIST(max_discard_issue_time),
+   ATTR_LIST(discard_io_aware_gran),
  

Re: [f2fs-dev] [PATCH] f2fs: drop useless initializer and unneeded local variable

2023-01-12 Thread Yangtao Li via Linux-f2fs-devel
> exceed 80 column?

I use the checkpatch.pl script to check that there are no errors.
Earlier, the default line length was 80 columns.
Commit bdc48fa11e46 (???checkpatch/coding-style: deprecate 80-column 
warning???) increased the limit to 100 columns. 

$ ./scripts/checkpatch.pl 
0001-f2fs-drop-useless-initializer-and-unneeded-local-var.patch
total: 0 errors, 0 warnings, 24 lines checked

0001-f2fs-drop-useless-initializer-and-unneeded-local-var.patch has no obvious 
style problems and is ready for submission.

Thx,
Yangtao

___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


[f2fs-dev] [PATCH] f2fs: convert is_extension_exist() to return bool type

2023-01-12 Thread Yangtao Li via Linux-f2fs-devel
is_extension_exist() only return two values, 0 or 1.
So there is no need to use int type.

Signed-off-by: Yangtao Li 
---
 fs/f2fs/namei.c | 12 ++--
 1 file changed, 6 insertions(+), 6 deletions(-)

diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index 6032589099ce..516968cbc6d6 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -22,7 +22,7 @@
 #include "acl.h"
 #include 
 
-static inline int is_extension_exist(const unsigned char *s, const char *sub,
+static inline bool is_extension_exist(const unsigned char *s, const char *sub,
bool tmp_ext)
 {
size_t slen = strlen(s);
@@ -30,19 +30,19 @@ static inline int is_extension_exist(const unsigned char 
*s, const char *sub,
int i;
 
if (sublen == 1 && *sub == '*')
-   return 1;
+   return true;
 
/*
 * filename format of multimedia file should be defined as:
 * "filename + '.' + extension + (optional: '.' + temp extension)".
 */
if (slen < sublen + 2)
-   return 0;
+   return false;
 
if (!tmp_ext) {
/* file has no temp extension */
if (s[slen - sublen - 1] != '.')
-   return 0;
+   return false;
return !strncasecmp(s + slen - sublen, sub, sublen);
}
 
@@ -50,10 +50,10 @@ static inline int is_extension_exist(const unsigned char 
*s, const char *sub,
if (s[i] != '.')
continue;
if (!strncasecmp(s + i + 1, sub, sublen))
-   return 1;
+   return true;
}
 
-   return 0;
+   return false;
 }
 
 int f2fs_update_extension_list(struct f2fs_sb_info *sbi, const char *name,
-- 
2.25.1



___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


[f2fs-dev] [PATCH] f2fs: introduce sanity_check_blocks()

2023-01-12 Thread Yangtao Li via Linux-f2fs-devel
There are very similar codes in release_compress_blocks() and
reserve_compress_blocks() which is used for data blocks check.

This patch introduces a new helper sanity_check_blocks()
to include those common codes, and used it instead for cleanup.

Signed-off-by: Yangtao Li 
---
 fs/f2fs/file.c | 36 
 1 file changed, 20 insertions(+), 16 deletions(-)

diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index f5c1b7814954..0d539155379c 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -3369,11 +3369,9 @@ static int f2fs_get_compress_blocks(struct file *filp, 
unsigned long arg)
return put_user(blocks, (u64 __user *)arg);
 }
 
-static int release_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
+static int sanity_check_blocks(struct dnode_of_data *dn, pgoff_t count)
 {
struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
-   unsigned int released_blocks = 0;
-   int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
block_t blkaddr;
int i;
 
@@ -3390,6 +3388,21 @@ static int release_compress_blocks(struct dnode_of_data 
*dn, pgoff_t count)
}
}
 
+   return 0;
+}
+
+static int release_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
+{
+   struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
+   unsigned int released_blocks = 0;
+   int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
+   block_t blkaddr;
+   int i, rc;
+
+   rc = sanity_check_blocks(dn, count);
+   if (rc)
+   return rc;
+
while (count) {
int compr_blocks = 0;
 
@@ -3539,20 +3552,11 @@ static int reserve_compress_blocks(struct dnode_of_data 
*dn, pgoff_t count)
unsigned int reserved_blocks = 0;
int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
block_t blkaddr;
-   int i;
+   int i, rc;
 
-   for (i = 0; i < count; i++) {
-   blkaddr = data_blkaddr(dn->inode, dn->node_page,
-   dn->ofs_in_node + i);
-
-   if (!__is_valid_data_blkaddr(blkaddr))
-   continue;
-   if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr,
-   DATA_GENERIC_ENHANCE))) {
-   f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
-   return -EFSCORRUPTED;
-   }
-   }
+   rc = sanity_check_blocks(dn, count);
+   if (rc)
+   return rc;
 
while (count) {
int compr_blocks = 0;
-- 
2.25.1



___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


[f2fs-dev] [PATCH 3/3] f2fs: add F2FS_IOC_GET_COMPRESS_OPTION_V2 ioctl

2023-01-12 Thread Yangtao Li via Linux-f2fs-devel
Added a new F2FS_IOC_GET_COMPRESS_OPTION_V2 ioctl to get file compression
option of a file.

struct f2fs_comp_option_v2 {
union {
struct {
__u8 algorithm;
__u8 log_cluster_size;
__u16 compress_flag;
};
struct f2fs_comp_option option;
};
};

struct f2fs_comp_option_v2 option;

ioctl(fd, F2FS_IOC_GET_COMPRESS_OPTION_V2, );

printf("compression algorithm:%u\n", option.algorithm);
printf("compression cluster log size:%u\n", option.log_cluster_size);
printf("compress level:%u\n", GET_COMPRESS_LEVEL(option.compress_flag));
printf("compress chksum:%s\n",
(BIT(COMPRESS_CHKSUM) & option.compress_flag) ? "true" : 
"false");

Signed-off-by: Yangtao Li 
---
 fs/f2fs/file.c| 20 +++-
 include/uapi/linux/f2fs.h |  4 +++-
 2 files changed, 18 insertions(+), 6 deletions(-)

diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index 719706ef0d46..e011fb50ccc3 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -3885,10 +3885,12 @@ static int f2fs_sec_trim_file(struct file *filp, 
unsigned long arg)
return ret;
 }
 
-static int f2fs_ioc_get_compress_option(struct file *filp, unsigned long arg)
+static int f2fs_ioc_get_compress_option(struct file *filp, unsigned long arg,
+   
unsigned int cmd)
 {
struct inode *inode = file_inode(filp);
-   struct f2fs_comp_option option;
+   struct f2fs_comp_option_v2 option;
+   int len;
 
if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
return -EOPNOTSUPP;
@@ -3902,11 +3904,17 @@ static int f2fs_ioc_get_compress_option(struct file 
*filp, unsigned long arg)
 
option.algorithm = F2FS_I(inode)->i_compress_algorithm;
option.log_cluster_size = F2FS_I(inode)->i_log_cluster_size;
+   if (cmd == F2FS_IOC_GET_COMPRESS_OPTION_V2)
+   option.compress_flag = F2FS_I(inode)->i_compress_flag;
 
inode_unlock_shared(inode);
 
-   if (copy_to_user((struct f2fs_comp_option __user *)arg, ,
-   sizeof(option)))
+   if (cmd == F2FS_IOC_GET_COMPRESS_OPTION_V2)
+   len = sizeof(struct f2fs_comp_option_v2);
+   else
+   len = sizeof(struct f2fs_comp_option);
+
+   if (copy_to_user((void __user *)arg, , len))
return -EFAULT;
 
return 0;
@@ -4244,7 +4252,9 @@ static long __f2fs_ioctl(struct file *filp, unsigned int 
cmd, unsigned long arg)
case F2FS_IOC_SEC_TRIM_FILE:
return f2fs_sec_trim_file(filp, arg);
case F2FS_IOC_GET_COMPRESS_OPTION:
-   return f2fs_ioc_get_compress_option(filp, arg);
+   return f2fs_ioc_get_compress_option(filp, arg, 
F2FS_IOC_GET_COMPRESS_OPTION);
+   case F2FS_IOC_GET_COMPRESS_OPTION_V2:
+   return f2fs_ioc_get_compress_option(filp, arg, 
F2FS_IOC_GET_COMPRESS_OPTION_V2);
case F2FS_IOC_SET_COMPRESS_OPTION:
return f2fs_ioc_set_compress_option(filp, arg, 
F2FS_IOC_SET_COMPRESS_OPTION);
case F2FS_IOC_SET_COMPRESS_OPTION_V2:
diff --git a/include/uapi/linux/f2fs.h b/include/uapi/linux/f2fs.h
index aaf7f55273fb..b42f6b322b8b 100644
--- a/include/uapi/linux/f2fs.h
+++ b/include/uapi/linux/f2fs.h
@@ -45,6 +45,8 @@
 #define F2FS_IOC_START_ATOMIC_REPLACE  _IO(F2FS_IOCTL_MAGIC, 25)
 #define F2FS_IOC_SET_COMPRESS_OPTION_V2_IOW(F2FS_IOCTL_MAGIC, 26,  
\
struct f2fs_comp_option_v2)
+#define F2FS_IOC_GET_COMPRESS_OPTION_V2_IOW(F2FS_IOCTL_MAGIC, 27,  
\
+   struct f2fs_comp_option_v2)
 
 /*
  * should be same as XFS_IOC_GOINGDOWN.
@@ -65,7 +67,7 @@
 #define F2FS_TRIM_FILE_MASK0x3
 
 /*
- * Flags used by F2FS_IOC_SET_COMPRESS_OPTION_V2
+ * Flags used by F2FS_IOC_SET_COMPRESS_OPTION_V2 and 
F2FS_IOC_GET_COMPRESS_OPTION
  */
 #define COMPRESS_CHKSUM0x0 /* enable chksum 
for compress file */
 #define COMPRESS_LEVEL_OFFSET  8
-- 
2.25.1



___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


[f2fs-dev] [PATCH 1/3] f2fs: clarify compress level bit offset

2023-01-12 Thread Yangtao Li via Linux-f2fs-devel
commit 3fde13f817e2 ("f2fs: compress: support compress level") introduce
compress level, which macro(COMPRESS_LEVEL_OFFSET) is 8, But use wrong
comment about compress level.

Let's fix it.

Signed-off-by: Yangtao Li 
---
 include/linux/f2fs_fs.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index ee0d75d9a302..1701f25117ea 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -315,7 +315,7 @@ struct f2fs_inode {
__u8 i_log_cluster_size;/* log of cluster size 
*/
__le16 i_compress_flag; /* compress flag */
/* 0 bit: chksum flag
-* [10,15] bits: compress level
+* [8,15] bits: compress level
 */
__le32 i_extra_end[0];  /* for attribute size 
calculation */
} __packed;
-- 
2.25.1



___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


[f2fs-dev] [PATCH 2/3] f2fs: add F2FS_IOC_SET_COMPRESS_OPTION_V2 ioctl

2023-01-12 Thread Yangtao Li via Linux-f2fs-devel
Added a new F2FS_IOC_SET_COMPRESS_OPTION_V2 ioctl to change file
compression option of a file.

struct f2fs_comp_option_v2 {
union {
struct {
__u8 algorithm;
__u8 log_cluster_size;
__u16 compress_flag;
};
struct f2fs_comp_option option;
};
};

struct f2fs_comp_option_v2 option;

option.algorithm = 2;
option.log_cluster_size = 2;
option.compress_flag = (5 << COMPRESS_LEVEL_OFFSET) | BIT(COMPRESS_CHKSUM);

ioctl(fd, F2FS_IOC_SET_COMPRESS_OPTION_V2, );

Signed-off-by: Yangtao Li 
---
 fs/f2fs/f2fs.h|  8 +---
 fs/f2fs/file.c| 41 ---
 include/uapi/linux/f2fs.h | 21 
 3 files changed, 56 insertions(+), 14 deletions(-)

diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index f3c5f7740c1a..c2267f44bcf8 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -25,6 +25,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #include 
 #include 
@@ -1501,11 +1502,6 @@ enum compress_algorithm_type {
COMPRESS_MAX,
 };
 
-enum compress_flag {
-   COMPRESS_CHKSUM,
-   COMPRESS_MAX_FLAG,
-};
-
 #defineCOMPRESS_WATERMARK  20
 #defineCOMPRESS_PERCENT20
 
@@ -1521,8 +1517,6 @@ struct compress_data {
 
 #define F2FS_COMPRESSED_PAGE_MAGIC 0xF5F2C000
 
-#defineCOMPRESS_LEVEL_OFFSET   8
-
 /* compress context */
 struct compress_ctx {
struct inode *inode;/* inode the context belong to */
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index f5c1b7814954..719706ef0d46 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -25,6 +25,8 @@
 #include 
 #include 
 #include 
+#include 
+#include 
 
 #include "f2fs.h"
 #include "node.h"
@@ -3910,12 +3912,13 @@ static int f2fs_ioc_get_compress_option(struct file 
*filp, unsigned long arg)
return 0;
 }
 
-static int f2fs_ioc_set_compress_option(struct file *filp, unsigned long arg)
+static int f2fs_ioc_set_compress_option(struct file *filp, unsigned long arg,
+   
unsigned int cmd)
 {
struct inode *inode = file_inode(filp);
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
-   struct f2fs_comp_option option;
-   int ret = 0;
+   struct f2fs_comp_option_v2 option;
+   int ret = 0, len;
 
if (!f2fs_sb_has_compression(sbi))
return -EOPNOTSUPP;
@@ -3923,8 +3926,12 @@ static int f2fs_ioc_set_compress_option(struct file 
*filp, unsigned long arg)
if (!(filp->f_mode & FMODE_WRITE))
return -EBADF;
 
-   if (copy_from_user(, (struct f2fs_comp_option __user *)arg,
-   sizeof(option)))
+   if (cmd == F2FS_IOC_SET_COMPRESS_OPTION_V2)
+   len = sizeof(struct f2fs_comp_option_v2);
+   else
+   len = sizeof(struct f2fs_comp_option);
+
+   if (copy_from_user(, (void __user *)arg, len))
return -EFAULT;
 
if (!f2fs_compressed_file(inode) ||
@@ -3933,6 +3940,21 @@ static int f2fs_ioc_set_compress_option(struct file 
*filp, unsigned long arg)
option.algorithm >= COMPRESS_MAX)
return -EINVAL;
 
+   if (cmd == F2FS_IOC_SET_COMPRESS_OPTION_V2) {
+   unsigned int level = GET_COMPRESS_LEVEL(option.compress_flag);
+
+   switch (option.algorithm) {
+   case COMPRESS_LZ4:
+   if (level < LZ4HC_MIN_CLEVEL || level > 
LZ4HC_MAX_CLEVEL)
+   return -EINVAL;
+   break;
+   case COMPRESS_ZSTD:
+   if (!level || level > zstd_max_clevel())
+   return -EINVAL;
+   break;
+   }
+   }
+
file_start_write(filp);
inode_lock(inode);
 
@@ -3948,7 +3970,10 @@ static int f2fs_ioc_set_compress_option(struct file 
*filp, unsigned long arg)
 
F2FS_I(inode)->i_compress_algorithm = option.algorithm;
F2FS_I(inode)->i_log_cluster_size = option.log_cluster_size;
-   F2FS_I(inode)->i_cluster_size = 1 << option.log_cluster_size;
+   F2FS_I(inode)->i_cluster_size = BIT(option.log_cluster_size);
+
+   if (cmd == F2FS_IOC_SET_COMPRESS_OPTION_V2)
+   F2FS_I(inode)->i_compress_flag = option.compress_flag & 
COMPRESS_OPTION_MASK;
f2fs_mark_inode_dirty_sync(inode, true);
 
if (!f2fs_is_compress_backend_ready(inode))
@@ -4221,7 +4246,9 @@ static long __f2fs_ioctl(struct file *filp, unsigned int 
cmd, unsigned long arg)
case F2FS_IOC_GET_COMPRESS_OPTION:
return f2fs_ioc_get_compress_option(filp, arg);
case F2FS_IOC_SET_COMPRESS_OPTION:
-   return f2fs_ioc_set_compress_option(filp, arg);
+   return f2fs_ioc_set_compress_option(filp, arg, 

Re: [f2fs-dev] [PATCH v5 14/23] f2fs: Convert f2fs_write_cache_pages() to use filemap_get_folios_tag()

2023-01-12 Thread Chao Yu

On 2023/1/5 5:14, Vishal Moola (Oracle) wrote:

Converted the function to use a folio_batch instead of pagevec. This is in
preparation for the removal of find_get_pages_range_tag().

Also modified f2fs_all_cluster_page_ready to take in a folio_batch instead
of pagevec. This does NOT support large folios. The function currently
only utilizes folios of size 1 so this shouldn't cause any issues right
now.

This version of the patch limits the number of pages fetched to
F2FS_ONSTACK_PAGES. If that ever happens, update the start index here
since filemap_get_folios_tag() updates the index to be after the last
found folio, not necessarily the last used page.

Signed-off-by: Vishal Moola (Oracle) 


Acked-by: Chao Yu 

Thanks,


___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


Re: [f2fs-dev] [PATCH v2] f2fs: retry to update the inode page given EIO

2023-01-12 Thread Chao Yu

On 2023/1/12 2:50, Jaegeuk Kim wrote:

On 01/11, Chao Yu wrote:

On 2023/1/11 9:20, Jaegeuk Kim wrote:

In f2fs_update_inode_page, f2fs_get_node_page handles EIO along with
f2fs_handle_page_eio that stops checkpoint, if the disk couldn't be recovered.
As a result, we don't need to stop checkpoint right away given single EIO.


f2fs_handle_page_eio() only covers the case that EIO occurs on the same
page, should we cover the case EIO occurs on different pages?


Which case are you looking at?


- __get_node_page(PageA)- __get_node_page(PageB)
 - f2fs_handle_page_eio
  - sbi->page_eio_ofs[type] = PageA->index
 - f2fs_handle_page_eio
  - sbi->page_eio_ofs[type] = 
PageB->index

In such race case, it may has low probability to set CP_ERROR_FLAG as we expect?

Thanks,





Thanks,



Cc: sta...@vger.kernel.org
Signed-off-by: Randall Huang 
Signed-off-by: Jaegeuk Kim 
---

   Change log from v1:
- fix a bug

   fs/f2fs/inode.c | 2 +-
   1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
index ff6cf66ed46b..2ed7a621fdf1 100644
--- a/fs/f2fs/inode.c
+++ b/fs/f2fs/inode.c
@@ -719,7 +719,7 @@ void f2fs_update_inode_page(struct inode *inode)
if (IS_ERR(node_page)) {
int err = PTR_ERR(node_page);
-   if (err == -ENOMEM) {
+   if (err == -ENOMEM || (err == -EIO && !f2fs_cp_error(sbi))) {
cond_resched();
goto retry;
} else if (err != -ENOENT) {



___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


[f2fs-dev] Silikonmischungen

2023-01-12 Thread Zbynek Spacek via Linux-f2fs-devel
Good morning,

do you need intermediates for processing, plastics (e.g. rubber) or silicone 
mixtures?

We provide a wide range of silicone rubbers with various properties, silicone 
mixtures from renowned manufacturers such as Wacker, Elastosil LR and dyes, 
stabilizers, primers and anti-adhesive additives.

We also produce technical silicone compounds with increased resistance to oils, 
resistant to high temperatures and water vapor, conductive and many more.

We provide fast order fulfillment, timely deliveries and cost optimization.

Can I introduce what we can offer you?


Best regards
Zbynek Spacek


___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel