Re: [f2fs-dev] [PATCH 2/2] f2fs: handle decompress only post processing in softirq

2022-07-24 Thread Chao Yu

On 2022/7/18 23:47, Daeho Jeong wrote:

On Sat, Jul 16, 2022 at 8:22 PM Chao Yu  wrote:


On 2022/6/22 0:07, Jaegeuk Kim wrote:

Can we do like this which has no functional change but refactored
some functions?

---
   fs/f2fs/compress.c | 208 -
   fs/f2fs/data.c |  52 
   fs/f2fs/f2fs.h |  17 ++--
   3 files changed, 172 insertions(+), 105 deletions(-)

diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
index fa237e5c7173..494ce3634b62 100644
--- a/fs/f2fs/compress.c
+++ b/fs/f2fs/compress.c
@@ -729,28 +729,18 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
   return ret;
   }

-void f2fs_decompress_cluster(struct decompress_io_ctx *dic)
+static int f2fs_prepare_decomp_mem(struct decompress_io_ctx *dic, bool end_io)
   {
- struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
- struct f2fs_inode_info *fi = F2FS_I(dic->inode);
   const struct f2fs_compress_ops *cops =
- f2fs_cops[fi->i_compress_algorithm];
- int ret;
+ f2fs_cops[F2FS_I(dic->inode)->i_compress_algorithm];
   int i;

- trace_f2fs_decompress_pages_start(dic->inode, dic->cluster_idx,
- dic->cluster_size, fi->i_compress_algorithm);
-
- if (dic->failed) {
- ret = -EIO;
- goto out_end_io;
- }
+ if (!(end_io ^ f2fs_low_mem_mode(F2FS_I_SB(dic->inode


How about using allow_decompress_in_softirq() to wrap !f2fs_low_mem_mode()
to improve readability?


+ return 0;

   dic->tpages = page_array_alloc(dic->inode, dic->cluster_size);
- if (!dic->tpages) {
- ret = -ENOMEM;
- goto out_end_io;
- }
+ if (!dic->tpages)
+ return 1;


return -ENOMEM instead of magic number.


The callers already change the error number to ENOMEM when it gets 1.
This is the same flow in the previous code. Do you want to change
this?


Yes, please. :)







   for (i = 0; i < dic->cluster_size; i++) {
   if (dic->rpages[i]) {
@@ -759,28 +749,100 @@ void f2fs_decompress_cluster(struct decompress_io_ctx 
*dic)
   }

   dic->tpages[i] = f2fs_compress_alloc_page();
- if (!dic->tpages[i]) {
- ret = -ENOMEM;
- goto out_end_io;
- }
+ if (!dic->tpages[i])
+ return 1;


Ditto,


   }

+ dic->rbuf = f2fs_vmap(dic->tpages, dic->cluster_size);
+ if (!dic->rbuf)
+ return 1;


Ditto,


+
+ dic->cbuf = f2fs_vmap(dic->cpages, dic->nr_cpages);
+ if (!dic->cbuf)
+ return 1;


Ditto,


+
+ cops = f2fs_cops[F2FS_I(dic->inode)->i_compress_algorithm];


No need to assign cops again?


Ack




   if (cops->init_decompress_ctx) {
- ret = cops->init_decompress_ctx(dic);
+ int ret = cops->init_decompress_ctx(dic);
+
   if (ret)
- goto out_end_io;
+ return 1;


How about returning ret here instead of magic number?


   }

- dic->rbuf = f2fs_vmap(dic->tpages, dic->cluster_size);
- if (!dic->rbuf) {
- ret = -ENOMEM;
- goto out_destroy_decompress_ctx;
+ return 0;
+}
+
+static void f2fs_release_decomp_mem(struct decompress_io_ctx *dic,
+ bool bypass_destroy_callback, bool end_io)
+{
+ const struct f2fs_compress_ops *cops =
+ f2fs_cops[F2FS_I(dic->inode)->i_compress_algorithm];
+
+ if (end_io ^ f2fs_low_mem_mode(F2FS_I_SB(dic->inode)))
+ return;
+
+ if (!bypass_destroy_callback && cops->destroy_decompress_ctx)
+ cops->destroy_decompress_ctx(dic);
+
+ if (dic->cbuf)
+ vm_unmap_ram(dic->cbuf, dic->nr_cpages);
+
+ if (dic->rbuf)
+ vm_unmap_ram(dic->rbuf, dic->cluster_size);
+}
+
+static void f2fs_free_dic(struct decompress_io_ctx *dic,
+ bool bypass_destroy_callback)
+{
+ int i;
+
+ f2fs_release_decomp_mem(dic, bypass_destroy_callback, false);
+
+ if (dic->tpages) {
+ for (i = 0; i < dic->cluster_size; i++) {
+ if (dic->rpages[i])
+ continue;
+ if (!dic->tpages[i])
+ continue;
+ f2fs_compress_free_page(dic->tpages[i]);
+ }
+ page_array_free(dic->inode, dic->tpages, dic->cluster_size);
   }

- dic->cbuf = f2fs_vmap(dic->cpages, dic->nr_cpages);
- if (!dic->cbuf) {
+ if (dic->cpages) {
+ for (i = 0; i < dic->nr_cpages; i++) {
+ if (!dic->cpages[i])
+ continue;
+ f2fs_compress_free_page(dic->cpages[i]);
+ }
+ page_array_free(dic->inode, dic->cpages, dic->nr_cpages);
+ }
+
+ page_array_free(dic->inode, dic->rpages, dic->nr_rpages);
+ kmem_cache_free(dic_entry_slab, 

Re: [f2fs-dev] [PATCH 2/2] f2fs: handle decompress only post processing in softirq

2022-07-18 Thread Daeho Jeong via Linux-f2fs-devel
On Sat, Jul 16, 2022 at 8:22 PM Chao Yu  wrote:
>
> On 2022/6/22 0:07, Jaegeuk Kim wrote:
> > Can we do like this which has no functional change but refactored
> > some functions?
> >
> > ---
> >   fs/f2fs/compress.c | 208 -
> >   fs/f2fs/data.c |  52 
> >   fs/f2fs/f2fs.h |  17 ++--
> >   3 files changed, 172 insertions(+), 105 deletions(-)
> >
> > diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
> > index fa237e5c7173..494ce3634b62 100644
> > --- a/fs/f2fs/compress.c
> > +++ b/fs/f2fs/compress.c
> > @@ -729,28 +729,18 @@ static int f2fs_compress_pages(struct compress_ctx 
> > *cc)
> >   return ret;
> >   }
> >
> > -void f2fs_decompress_cluster(struct decompress_io_ctx *dic)
> > +static int f2fs_prepare_decomp_mem(struct decompress_io_ctx *dic, bool 
> > end_io)
> >   {
> > - struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
> > - struct f2fs_inode_info *fi = F2FS_I(dic->inode);
> >   const struct f2fs_compress_ops *cops =
> > - f2fs_cops[fi->i_compress_algorithm];
> > - int ret;
> > + f2fs_cops[F2FS_I(dic->inode)->i_compress_algorithm];
> >   int i;
> >
> > - trace_f2fs_decompress_pages_start(dic->inode, dic->cluster_idx,
> > - dic->cluster_size, fi->i_compress_algorithm);
> > -
> > - if (dic->failed) {
> > - ret = -EIO;
> > - goto out_end_io;
> > - }
> > + if (!(end_io ^ f2fs_low_mem_mode(F2FS_I_SB(dic->inode
>
> How about using allow_decompress_in_softirq() to wrap !f2fs_low_mem_mode()
> to improve readability?
>
> > + return 0;
> >
> >   dic->tpages = page_array_alloc(dic->inode, dic->cluster_size);
> > - if (!dic->tpages) {
> > - ret = -ENOMEM;
> > - goto out_end_io;
> > - }
> > + if (!dic->tpages)
> > + return 1;
>
> return -ENOMEM instead of magic number.

The callers already change the error number to ENOMEM when it gets 1.
This is the same flow in the previous code. Do you want to change
this?

>
> >
> >   for (i = 0; i < dic->cluster_size; i++) {
> >   if (dic->rpages[i]) {
> > @@ -759,28 +749,100 @@ void f2fs_decompress_cluster(struct 
> > decompress_io_ctx *dic)
> >   }
> >
> >   dic->tpages[i] = f2fs_compress_alloc_page();
> > - if (!dic->tpages[i]) {
> > - ret = -ENOMEM;
> > - goto out_end_io;
> > - }
> > + if (!dic->tpages[i])
> > + return 1;
>
> Ditto,
>
> >   }
> >
> > + dic->rbuf = f2fs_vmap(dic->tpages, dic->cluster_size);
> > + if (!dic->rbuf)
> > + return 1;
>
> Ditto,
>
> > +
> > + dic->cbuf = f2fs_vmap(dic->cpages, dic->nr_cpages);
> > + if (!dic->cbuf)
> > + return 1;
>
> Ditto,
>
> > +
> > + cops = f2fs_cops[F2FS_I(dic->inode)->i_compress_algorithm];
>
> No need to assign cops again?

Ack

>
> >   if (cops->init_decompress_ctx) {
> > - ret = cops->init_decompress_ctx(dic);
> > + int ret = cops->init_decompress_ctx(dic);
> > +
> >   if (ret)
> > - goto out_end_io;
> > + return 1;
>
> How about returning ret here instead of magic number?
>
> >   }
> >
> > - dic->rbuf = f2fs_vmap(dic->tpages, dic->cluster_size);
> > - if (!dic->rbuf) {
> > - ret = -ENOMEM;
> > - goto out_destroy_decompress_ctx;
> > + return 0;
> > +}
> > +
> > +static void f2fs_release_decomp_mem(struct decompress_io_ctx *dic,
> > + bool bypass_destroy_callback, bool end_io)
> > +{
> > + const struct f2fs_compress_ops *cops =
> > + f2fs_cops[F2FS_I(dic->inode)->i_compress_algorithm];
> > +
> > + if (end_io ^ f2fs_low_mem_mode(F2FS_I_SB(dic->inode)))
> > + return;
> > +
> > + if (!bypass_destroy_callback && cops->destroy_decompress_ctx)
> > + cops->destroy_decompress_ctx(dic);
> > +
> > + if (dic->cbuf)
> > + vm_unmap_ram(dic->cbuf, dic->nr_cpages);
> > +
> > + if (dic->rbuf)
> > + vm_unmap_ram(dic->rbuf, dic->cluster_size);
> > +}
> > +
> > +static void f2fs_free_dic(struct decompress_io_ctx *dic,
> > + bool bypass_destroy_callback)
> > +{
> > + int i;
> > +
> > + f2fs_release_decomp_mem(dic, bypass_destroy_callback, false);
> > +
> > + if (dic->tpages) {
> > + for (i = 0; i < dic->cluster_size; i++) {
> > + if (dic->rpages[i])
> > + continue;
> > + if (!dic->tpages[i])
> > + continue;
> > + f2fs_compress_free_page(dic->tpages[i]);
> > + }
> > + page_array_free(dic->inode, dic->tpages, dic->cluster_size);
> >   }
> >
> > - dic->cbuf = f2fs_vmap(dic->cpages, dic->nr_cpages);
> > - if (!dic-

Re: [f2fs-dev] [PATCH 2/2] f2fs: handle decompress only post processing in softirq

2022-07-16 Thread Chao Yu

On 2022/6/22 0:07, Jaegeuk Kim wrote:

Can we do like this which has no functional change but refactored
some functions?

---
  fs/f2fs/compress.c | 208 -
  fs/f2fs/data.c |  52 
  fs/f2fs/f2fs.h |  17 ++--
  3 files changed, 172 insertions(+), 105 deletions(-)

diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
index fa237e5c7173..494ce3634b62 100644
--- a/fs/f2fs/compress.c
+++ b/fs/f2fs/compress.c
@@ -729,28 +729,18 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
return ret;
  }
  
-void f2fs_decompress_cluster(struct decompress_io_ctx *dic)

+static int f2fs_prepare_decomp_mem(struct decompress_io_ctx *dic, bool end_io)
  {
-   struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
-   struct f2fs_inode_info *fi = F2FS_I(dic->inode);
const struct f2fs_compress_ops *cops =
-   f2fs_cops[fi->i_compress_algorithm];
-   int ret;
+   f2fs_cops[F2FS_I(dic->inode)->i_compress_algorithm];
int i;
  
-	trace_f2fs_decompress_pages_start(dic->inode, dic->cluster_idx,

-   dic->cluster_size, fi->i_compress_algorithm);
-
-   if (dic->failed) {
-   ret = -EIO;
-   goto out_end_io;
-   }
+   if (!(end_io ^ f2fs_low_mem_mode(F2FS_I_SB(dic->inode


How about using allow_decompress_in_softirq() to wrap !f2fs_low_mem_mode()
to improve readability?


+   return 0;
  
  	dic->tpages = page_array_alloc(dic->inode, dic->cluster_size);

-   if (!dic->tpages) {
-   ret = -ENOMEM;
-   goto out_end_io;
-   }
+   if (!dic->tpages)
+   return 1;


return -ENOMEM instead of magic number.

  
  	for (i = 0; i < dic->cluster_size; i++) {

if (dic->rpages[i]) {
@@ -759,28 +749,100 @@ void f2fs_decompress_cluster(struct decompress_io_ctx 
*dic)
}
  
  		dic->tpages[i] = f2fs_compress_alloc_page();

-   if (!dic->tpages[i]) {
-   ret = -ENOMEM;
-   goto out_end_io;
-   }
+   if (!dic->tpages[i])
+   return 1;


Ditto,


}
  
+	dic->rbuf = f2fs_vmap(dic->tpages, dic->cluster_size);

+   if (!dic->rbuf)
+   return 1;


Ditto,


+
+   dic->cbuf = f2fs_vmap(dic->cpages, dic->nr_cpages);
+   if (!dic->cbuf)
+   return 1;


Ditto,


+
+   cops = f2fs_cops[F2FS_I(dic->inode)->i_compress_algorithm];


No need to assign cops again?


if (cops->init_decompress_ctx) {
-   ret = cops->init_decompress_ctx(dic);
+   int ret = cops->init_decompress_ctx(dic);
+
if (ret)
-   goto out_end_io;
+   return 1;


How about returning ret here instead of magic number?


}
  
-	dic->rbuf = f2fs_vmap(dic->tpages, dic->cluster_size);

-   if (!dic->rbuf) {
-   ret = -ENOMEM;
-   goto out_destroy_decompress_ctx;
+   return 0;
+}
+
+static void f2fs_release_decomp_mem(struct decompress_io_ctx *dic,
+   bool bypass_destroy_callback, bool end_io)
+{
+   const struct f2fs_compress_ops *cops =
+   f2fs_cops[F2FS_I(dic->inode)->i_compress_algorithm];
+
+   if (end_io ^ f2fs_low_mem_mode(F2FS_I_SB(dic->inode)))
+   return;
+
+   if (!bypass_destroy_callback && cops->destroy_decompress_ctx)
+   cops->destroy_decompress_ctx(dic);
+
+   if (dic->cbuf)
+   vm_unmap_ram(dic->cbuf, dic->nr_cpages);
+
+   if (dic->rbuf)
+   vm_unmap_ram(dic->rbuf, dic->cluster_size);
+}
+
+static void f2fs_free_dic(struct decompress_io_ctx *dic,
+   bool bypass_destroy_callback)
+{
+   int i;
+
+   f2fs_release_decomp_mem(dic, bypass_destroy_callback, false);
+
+   if (dic->tpages) {
+   for (i = 0; i < dic->cluster_size; i++) {
+   if (dic->rpages[i])
+   continue;
+   if (!dic->tpages[i])
+   continue;
+   f2fs_compress_free_page(dic->tpages[i]);
+   }
+   page_array_free(dic->inode, dic->tpages, dic->cluster_size);
}
  
-	dic->cbuf = f2fs_vmap(dic->cpages, dic->nr_cpages);

-   if (!dic->cbuf) {
+   if (dic->cpages) {
+   for (i = 0; i < dic->nr_cpages; i++) {
+   if (!dic->cpages[i])
+   continue;
+   f2fs_compress_free_page(dic->cpages[i]);
+   }
+   page_array_free(dic->inode, dic->cpages, dic->nr_cpages);
+   }
+
+   page_array_free(dic->inode, dic->rpages, dic->nr_rpages);
+   kmem_cache_free(dic_entry_slab, dic);
+}
+
+void f2fs_decompress_cluster(struct decompress_io_ctx *dic, bool in_task)
+{
+   struct f2fs_sb_info *sbi = F2FS

Re: [f2fs-dev] [PATCH 2/2] f2fs: handle decompress only post processing in softirq

2022-06-21 Thread Daeho Jeong
On Mon, Jun 20, 2022 at 5:58 PM Gao Xiang  wrote:
>
> On Mon, Jun 20, 2022 at 10:38:43AM -0700, Daeho Jeong wrote:
> > From: Daeho Jeong 
> >
> > Now decompression is being handled in workqueue and it makes read I/O
> > latency non-deterministic, because of the non-deterministic scheduling
> > nature of workqueues. So, I made it handled in softirq context only if
> > possible, not in low memory devices, since this modification will
> > maintain decompresion related memory a little longer.
> >
>
> Again, I don't think this method scalable.  Since you already handle
> all decompression algorithms in this way.  Laterly, I wonder if you'd
> like to handle all:
>  - decompression algorithms;
>  - verity algorithms;
>  - decrypt algorithms;
>
> in this way, regardless of slow decompression algorithms, that would be a
> long latency and CPU overhead of softirq context.  This is my last words
> on this, I will not talk anymore.
>
> Thanks,
> Gao Xiang

I understand what you're worried about. However, verity cannot be
inlined because of its nature triggering I/Os. Except that, other twos
are almost inducing overhead comparable to memcpy. Still, decrypt part
is done in a H/W way mostly these days, though.

Thanks,


___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


Re: [f2fs-dev] [PATCH 2/2] f2fs: handle decompress only post processing in softirq

2022-06-21 Thread Jaegeuk Kim
Can we do like this which has no functional change but refactored
some functions?

---
 fs/f2fs/compress.c | 208 -
 fs/f2fs/data.c |  52 
 fs/f2fs/f2fs.h |  17 ++--
 3 files changed, 172 insertions(+), 105 deletions(-)

diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
index fa237e5c7173..494ce3634b62 100644
--- a/fs/f2fs/compress.c
+++ b/fs/f2fs/compress.c
@@ -729,28 +729,18 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
return ret;
 }
 
-void f2fs_decompress_cluster(struct decompress_io_ctx *dic)
+static int f2fs_prepare_decomp_mem(struct decompress_io_ctx *dic, bool end_io)
 {
-   struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
-   struct f2fs_inode_info *fi = F2FS_I(dic->inode);
const struct f2fs_compress_ops *cops =
-   f2fs_cops[fi->i_compress_algorithm];
-   int ret;
+   f2fs_cops[F2FS_I(dic->inode)->i_compress_algorithm];
int i;
 
-   trace_f2fs_decompress_pages_start(dic->inode, dic->cluster_idx,
-   dic->cluster_size, fi->i_compress_algorithm);
-
-   if (dic->failed) {
-   ret = -EIO;
-   goto out_end_io;
-   }
+   if (!(end_io ^ f2fs_low_mem_mode(F2FS_I_SB(dic->inode
+   return 0;
 
dic->tpages = page_array_alloc(dic->inode, dic->cluster_size);
-   if (!dic->tpages) {
-   ret = -ENOMEM;
-   goto out_end_io;
-   }
+   if (!dic->tpages)
+   return 1;
 
for (i = 0; i < dic->cluster_size; i++) {
if (dic->rpages[i]) {
@@ -759,28 +749,100 @@ void f2fs_decompress_cluster(struct decompress_io_ctx 
*dic)
}
 
dic->tpages[i] = f2fs_compress_alloc_page();
-   if (!dic->tpages[i]) {
-   ret = -ENOMEM;
-   goto out_end_io;
-   }
+   if (!dic->tpages[i])
+   return 1;
}
 
+   dic->rbuf = f2fs_vmap(dic->tpages, dic->cluster_size);
+   if (!dic->rbuf)
+   return 1;
+
+   dic->cbuf = f2fs_vmap(dic->cpages, dic->nr_cpages);
+   if (!dic->cbuf)
+   return 1;
+
+   cops = f2fs_cops[F2FS_I(dic->inode)->i_compress_algorithm];
if (cops->init_decompress_ctx) {
-   ret = cops->init_decompress_ctx(dic);
+   int ret = cops->init_decompress_ctx(dic);
+
if (ret)
-   goto out_end_io;
+   return 1;
}
 
-   dic->rbuf = f2fs_vmap(dic->tpages, dic->cluster_size);
-   if (!dic->rbuf) {
-   ret = -ENOMEM;
-   goto out_destroy_decompress_ctx;
+   return 0;
+}
+
+static void f2fs_release_decomp_mem(struct decompress_io_ctx *dic,
+   bool bypass_destroy_callback, bool end_io)
+{
+   const struct f2fs_compress_ops *cops =
+   f2fs_cops[F2FS_I(dic->inode)->i_compress_algorithm];
+
+   if (end_io ^ f2fs_low_mem_mode(F2FS_I_SB(dic->inode)))
+   return;
+
+   if (!bypass_destroy_callback && cops->destroy_decompress_ctx)
+   cops->destroy_decompress_ctx(dic);
+
+   if (dic->cbuf)
+   vm_unmap_ram(dic->cbuf, dic->nr_cpages);
+
+   if (dic->rbuf)
+   vm_unmap_ram(dic->rbuf, dic->cluster_size);
+}
+
+static void f2fs_free_dic(struct decompress_io_ctx *dic,
+   bool bypass_destroy_callback)
+{
+   int i;
+
+   f2fs_release_decomp_mem(dic, bypass_destroy_callback, false);
+
+   if (dic->tpages) {
+   for (i = 0; i < dic->cluster_size; i++) {
+   if (dic->rpages[i])
+   continue;
+   if (!dic->tpages[i])
+   continue;
+   f2fs_compress_free_page(dic->tpages[i]);
+   }
+   page_array_free(dic->inode, dic->tpages, dic->cluster_size);
}
 
-   dic->cbuf = f2fs_vmap(dic->cpages, dic->nr_cpages);
-   if (!dic->cbuf) {
+   if (dic->cpages) {
+   for (i = 0; i < dic->nr_cpages; i++) {
+   if (!dic->cpages[i])
+   continue;
+   f2fs_compress_free_page(dic->cpages[i]);
+   }
+   page_array_free(dic->inode, dic->cpages, dic->nr_cpages);
+   }
+
+   page_array_free(dic->inode, dic->rpages, dic->nr_rpages);
+   kmem_cache_free(dic_entry_slab, dic);
+}
+
+void f2fs_decompress_cluster(struct decompress_io_ctx *dic, bool in_task)
+{
+   struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
+   struct f2fs_inode_info *fi = F2FS_I(dic->inode);
+   const struct f2fs_compress_ops *cops =
+   f2fs_cops[fi->i_compress_algorithm];
+   bool bypass_callback = false;
+   int ret;
+
+   trace_f2fs_decompress_pages_start(dic-

Re: [f2fs-dev] [PATCH 2/2] f2fs: handle decompress only post processing in softirq

2022-06-20 Thread Gao Xiang
On Mon, Jun 20, 2022 at 10:38:43AM -0700, Daeho Jeong wrote:
> From: Daeho Jeong 
> 
> Now decompression is being handled in workqueue and it makes read I/O
> latency non-deterministic, because of the non-deterministic scheduling
> nature of workqueues. So, I made it handled in softirq context only if
> possible, not in low memory devices, since this modification will
> maintain decompresion related memory a little longer.
> 

Again, I don't think this method scalable.  Since you already handle
all decompression algorithms in this way.  Laterly, I wonder if you'd
like to handle all:
 - decompression algorithms;
 - verity algorithms;
 - decrypt algorithms;

in this way, regardless of slow decompression algorithms, that would be a
long latency and CPU overhead of softirq context.  This is my last words
on this, I will not talk anymore.

Thanks,
Gao Xiang


___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


[f2fs-dev] [PATCH 2/2] f2fs: handle decompress only post processing in softirq

2022-06-20 Thread Daeho Jeong
From: Daeho Jeong 

Now decompression is being handled in workqueue and it makes read I/O
latency non-deterministic, because of the non-deterministic scheduling
nature of workqueues. So, I made it handled in softirq context only if
possible, not in low memory devices, since this modification will
maintain decompresion related memory a little longer.

Signed-off-by: Daeho Jeong 
---
v1: fixed build errors reported by kernel test robot 
---
 fs/f2fs/compress.c | 180 +
 fs/f2fs/data.c |  52 -
 fs/f2fs/f2fs.h |  17 +++--
 3 files changed, 162 insertions(+), 87 deletions(-)

diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
index 24824cd96f36..ddd47ad464d9 100644
--- a/fs/f2fs/compress.c
+++ b/fs/f2fs/compress.c
@@ -729,14 +729,18 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
return ret;
 }
 
-void f2fs_decompress_cluster(struct decompress_io_ctx *dic)
+static int f2fs_prepare_decomp_mem(struct decompress_io_ctx *dic);
+static void f2fs_release_decomp_mem(struct decompress_io_ctx *dic,
+   bool bypass_destroy_callback);
+
+void f2fs_decompress_cluster(struct decompress_io_ctx *dic, bool in_task)
 {
struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
struct f2fs_inode_info *fi = F2FS_I(dic->inode);
const struct f2fs_compress_ops *cops =
f2fs_cops[fi->i_compress_algorithm];
+   bool bypass_callback = false;
int ret;
-   int i;
 
trace_f2fs_decompress_pages_start(dic->inode, dic->cluster_idx,
dic->cluster_size, fi->i_compress_algorithm);
@@ -746,49 +750,20 @@ void f2fs_decompress_cluster(struct decompress_io_ctx 
*dic)
goto out_end_io;
}
 
-   dic->tpages = page_array_alloc(dic->inode, dic->cluster_size);
-   if (!dic->tpages) {
-   ret = -ENOMEM;
-   goto out_end_io;
-   }
-
-   for (i = 0; i < dic->cluster_size; i++) {
-   if (dic->rpages[i]) {
-   dic->tpages[i] = dic->rpages[i];
-   continue;
-   }
-
-   dic->tpages[i] = f2fs_compress_alloc_page();
-   if (!dic->tpages[i]) {
+   if (f2fs_low_mem_mode(sbi)) {
+   if (f2fs_prepare_decomp_mem(dic)) {
+   bypass_callback = true;
ret = -ENOMEM;
-   goto out_end_io;
+   goto out_release;
}
}
 
-   if (cops->init_decompress_ctx) {
-   ret = cops->init_decompress_ctx(dic);
-   if (ret)
-   goto out_end_io;
-   }
-
-   dic->rbuf = f2fs_vmap(dic->tpages, dic->cluster_size);
-   if (!dic->rbuf) {
-   ret = -ENOMEM;
-   goto out_destroy_decompress_ctx;
-   }
-
-   dic->cbuf = f2fs_vmap(dic->cpages, dic->nr_cpages);
-   if (!dic->cbuf) {
-   ret = -ENOMEM;
-   goto out_vunmap_rbuf;
-   }
-
dic->clen = le32_to_cpu(dic->cbuf->clen);
dic->rlen = PAGE_SIZE << dic->log_cluster_size;
 
if (dic->clen > PAGE_SIZE * dic->nr_cpages - COMPRESS_HEADER_SIZE) {
ret = -EFSCORRUPTED;
-   goto out_vunmap_cbuf;
+   goto out_release;
}
 
ret = cops->decompress_pages(dic);
@@ -809,17 +784,14 @@ void f2fs_decompress_cluster(struct decompress_io_ctx 
*dic)
}
}
 
-out_vunmap_cbuf:
-   vm_unmap_ram(dic->cbuf, dic->nr_cpages);
-out_vunmap_rbuf:
-   vm_unmap_ram(dic->rbuf, dic->cluster_size);
-out_destroy_decompress_ctx:
-   if (cops->destroy_decompress_ctx)
-   cops->destroy_decompress_ctx(dic);
+out_release:
+   if (f2fs_low_mem_mode(sbi))
+   f2fs_release_decomp_mem(dic, bypass_callback);
+
 out_end_io:
trace_f2fs_decompress_pages_end(dic->inode, dic->cluster_idx,
dic->clen, ret);
-   f2fs_decompress_end_io(dic, ret);
+   f2fs_decompress_end_io(dic, ret, in_task);
 }
 
 /*
@@ -829,7 +801,7 @@ void f2fs_decompress_cluster(struct decompress_io_ctx *dic)
  * (or in the case of a failure, cleans up without actually decompressing).
  */
 void f2fs_end_read_compressed_page(struct page *page, bool failed,
-   block_t blkaddr)
+   block_t blkaddr, bool in_task)
 {
struct decompress_io_ctx *dic =
(struct decompress_io_ctx *)page_private(page);
@@ -839,12 +811,12 @@ void f2fs_end_read_compressed_page(struct page *page, 
bool failed,
 
if (failed)
WRITE_ONCE(dic->failed, true);
-   else if (blkaddr)
+   else if (blkaddr && in_task)
f2fs_cache_compressed_page(sbi, page,
dic->inode->i_ino, blkaddr);
 
if (atomic_dec_and_test(