Re: [f2fs-dev] [PATCH v3] f2fs: submit cached bio to avoid endless PageWriteback

2018-10-01 Thread Jaegeuk Kim
On 10/01, Sahitya Tummala wrote:
> On Wed, Sep 26, 2018 at 12:20:38PM +0800, Chao Yu wrote:
> 
> Hi Chao, Jaegeuk,
> 
> Is there any further any conclusion on this thread?
> 
> I think we still need this patch in addition to another patch from Chao -
> "Revert: "f2fs: check last page index in cached bio to decide submission""
> to make sure it covers the encrypted data block path as well.

Queued and started to test. :P

> 
> Thanks,
> Sahitya.
> 
> > On 2018/9/26 11:32, Jaegeuk Kim wrote:
> > > On 09/26, Chao Yu wrote:
> > >> On 2018/9/26 9:42, Jaegeuk Kim wrote:
> > >>> On 09/26, Chao Yu wrote:
> >  On 2018/9/26 8:20, Jaegeuk Kim wrote:
> > > On 09/21, Chao Yu wrote:
> > >> On 2018/9/18 10:14, Chao Yu wrote:
> > >>> On 2018/9/18 10:02, Jaegeuk Kim wrote:
> >  On 09/18, Chao Yu wrote:
> > > On 2018/9/18 9:37, Jaegeuk Kim wrote:
> > >> On 09/18, Chao Yu wrote:
> > >>> On 2018/9/18 9:04, Jaegeuk Kim wrote:
> >  On 09/13, Chao Yu wrote:
> > > From: Chao Yu 
> > >
> > > When migrating encrypted block from background GC thread, we 
> > > only add
> > > them into f2fs inner bio cache, but forget to submit the 
> > > cached bio, it
> > > may cause potential deadlock when we are waiting page 
> > > writebacked, fix
> > > it.
> > >
> > > Signed-off-by: Chao Yu 
> > > ---
> > > v3:
> > > clean up codes suggested by Jaegeuk.
> > >  fs/f2fs/f2fs.h |  2 +-
> > >  fs/f2fs/gc.c   | 71 
> > > +++---
> > >  fs/f2fs/node.c | 13 ++---
> > >  3 files changed, 61 insertions(+), 25 deletions(-)
> > >
> > > diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
> > > index b676b82312e0..917b2ca76aac 100644
> > > --- a/fs/f2fs/f2fs.h
> > > +++ b/fs/f2fs/f2fs.h
> > > @@ -2869,7 +2869,7 @@ struct page *f2fs_new_node_page(struct 
> > > dnode_of_data *dn, unsigned int ofs);
> > >  void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid);
> > >  struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, 
> > > pgoff_t nid);
> > >  struct page *f2fs_get_node_page_ra(struct page *parent, int 
> > > start);
> > > -void f2fs_move_node_page(struct page *node_page, int 
> > > gc_type);
> > > +int f2fs_move_node_page(struct page *node_page, int gc_type);
> > >  int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct 
> > > inode *inode,
> > >   struct writeback_control *wbc, bool 
> > > atomic,
> > >   unsigned int *seq_id);
> > > diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
> > > index a4c1a419611d..f57622cfe058 100644
> > > --- a/fs/f2fs/gc.c
> > > +++ b/fs/f2fs/gc.c
> > > @@ -461,7 +461,7 @@ static int check_valid_map(struct 
> > > f2fs_sb_info *sbi,
> > >   * On validity, copy that node with cold status, otherwise 
> > > (invalid node)
> > >   * ignore that.
> > >   */
> > > -static void gc_node_segment(struct f2fs_sb_info *sbi,
> > > +static int gc_node_segment(struct f2fs_sb_info *sbi,
> > >   struct f2fs_summary *sum, unsigned int segno, 
> > > int gc_type)
> > >  {
> > >   struct f2fs_summary *entry;
> > > @@ -469,6 +469,7 @@ static void gc_node_segment(struct 
> > > f2fs_sb_info *sbi,
> > >   int off;
> > >   int phase = 0;
> > >   bool fggc = (gc_type == FG_GC);
> > > + int submitted = 0;
> > >  
> > >   start_addr = START_BLOCK(sbi, segno);
> > >  
> > > @@ -482,10 +483,11 @@ static void gc_node_segment(struct 
> > > f2fs_sb_info *sbi,
> > >   nid_t nid = le32_to_cpu(entry->nid);
> > >   struct page *node_page;
> > >   struct node_info ni;
> > > + int err;
> > >  
> > >   /* stop BG_GC if there is not enough free 
> > > sections. */
> > >   if (gc_type == BG_GC && 
> > > has_not_enough_free_secs(sbi, 0, 0))
> > > - return;
> > > + return submitted;
> > >  
> > >   if (check_valid_map(sbi, segno, off) == 0)
> > >   continue;
> > > @@ -522,7 +524,9 @@ static void 

Re: [f2fs-dev] [PATCH v3] f2fs: submit cached bio to avoid endless PageWriteback

2018-10-01 Thread Sahitya Tummala
On Wed, Sep 26, 2018 at 12:20:38PM +0800, Chao Yu wrote:

Hi Chao, Jaegeuk,

Is there any further any conclusion on this thread?

I think we still need this patch in addition to another patch from Chao -
"Revert: "f2fs: check last page index in cached bio to decide submission""
to make sure it covers the encrypted data block path as well.

Thanks,
Sahitya.

> On 2018/9/26 11:32, Jaegeuk Kim wrote:
> > On 09/26, Chao Yu wrote:
> >> On 2018/9/26 9:42, Jaegeuk Kim wrote:
> >>> On 09/26, Chao Yu wrote:
>  On 2018/9/26 8:20, Jaegeuk Kim wrote:
> > On 09/21, Chao Yu wrote:
> >> On 2018/9/18 10:14, Chao Yu wrote:
> >>> On 2018/9/18 10:02, Jaegeuk Kim wrote:
>  On 09/18, Chao Yu wrote:
> > On 2018/9/18 9:37, Jaegeuk Kim wrote:
> >> On 09/18, Chao Yu wrote:
> >>> On 2018/9/18 9:04, Jaegeuk Kim wrote:
>  On 09/13, Chao Yu wrote:
> > From: Chao Yu 
> >
> > When migrating encrypted block from background GC thread, we 
> > only add
> > them into f2fs inner bio cache, but forget to submit the cached 
> > bio, it
> > may cause potential deadlock when we are waiting page 
> > writebacked, fix
> > it.
> >
> > Signed-off-by: Chao Yu 
> > ---
> > v3:
> > clean up codes suggested by Jaegeuk.
> >  fs/f2fs/f2fs.h |  2 +-
> >  fs/f2fs/gc.c   | 71 
> > +++---
> >  fs/f2fs/node.c | 13 ++---
> >  3 files changed, 61 insertions(+), 25 deletions(-)
> >
> > diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
> > index b676b82312e0..917b2ca76aac 100644
> > --- a/fs/f2fs/f2fs.h
> > +++ b/fs/f2fs/f2fs.h
> > @@ -2869,7 +2869,7 @@ struct page *f2fs_new_node_page(struct 
> > dnode_of_data *dn, unsigned int ofs);
> >  void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid);
> >  struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, 
> > pgoff_t nid);
> >  struct page *f2fs_get_node_page_ra(struct page *parent, int 
> > start);
> > -void f2fs_move_node_page(struct page *node_page, int gc_type);
> > +int f2fs_move_node_page(struct page *node_page, int gc_type);
> >  int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct 
> > inode *inode,
> > struct writeback_control *wbc, bool 
> > atomic,
> > unsigned int *seq_id);
> > diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
> > index a4c1a419611d..f57622cfe058 100644
> > --- a/fs/f2fs/gc.c
> > +++ b/fs/f2fs/gc.c
> > @@ -461,7 +461,7 @@ static int check_valid_map(struct 
> > f2fs_sb_info *sbi,
> >   * On validity, copy that node with cold status, otherwise 
> > (invalid node)
> >   * ignore that.
> >   */
> > -static void gc_node_segment(struct f2fs_sb_info *sbi,
> > +static int gc_node_segment(struct f2fs_sb_info *sbi,
> > struct f2fs_summary *sum, unsigned int segno, 
> > int gc_type)
> >  {
> > struct f2fs_summary *entry;
> > @@ -469,6 +469,7 @@ static void gc_node_segment(struct 
> > f2fs_sb_info *sbi,
> > int off;
> > int phase = 0;
> > bool fggc = (gc_type == FG_GC);
> > +   int submitted = 0;
> >  
> > start_addr = START_BLOCK(sbi, segno);
> >  
> > @@ -482,10 +483,11 @@ static void gc_node_segment(struct 
> > f2fs_sb_info *sbi,
> > nid_t nid = le32_to_cpu(entry->nid);
> > struct page *node_page;
> > struct node_info ni;
> > +   int err;
> >  
> > /* stop BG_GC if there is not enough free 
> > sections. */
> > if (gc_type == BG_GC && 
> > has_not_enough_free_secs(sbi, 0, 0))
> > -   return;
> > +   return submitted;
> >  
> > if (check_valid_map(sbi, segno, off) == 0)
> > continue;
> > @@ -522,7 +524,9 @@ static void gc_node_segment(struct 
> > f2fs_sb_info *sbi,
> > continue;
> > }
> >  
> > -   f2fs_move_node_page(node_page, gc_type);
> > +  

Re: [f2fs-dev] [PATCH v3] f2fs: submit cached bio to avoid endless PageWriteback

2018-09-25 Thread Chao Yu
On 2018/9/26 11:32, Jaegeuk Kim wrote:
> On 09/26, Chao Yu wrote:
>> On 2018/9/26 9:42, Jaegeuk Kim wrote:
>>> On 09/26, Chao Yu wrote:
 On 2018/9/26 8:20, Jaegeuk Kim wrote:
> On 09/21, Chao Yu wrote:
>> On 2018/9/18 10:14, Chao Yu wrote:
>>> On 2018/9/18 10:02, Jaegeuk Kim wrote:
 On 09/18, Chao Yu wrote:
> On 2018/9/18 9:37, Jaegeuk Kim wrote:
>> On 09/18, Chao Yu wrote:
>>> On 2018/9/18 9:04, Jaegeuk Kim wrote:
 On 09/13, Chao Yu wrote:
> From: Chao Yu 
>
> When migrating encrypted block from background GC thread, we only 
> add
> them into f2fs inner bio cache, but forget to submit the cached 
> bio, it
> may cause potential deadlock when we are waiting page 
> writebacked, fix
> it.
>
> Signed-off-by: Chao Yu 
> ---
> v3:
> clean up codes suggested by Jaegeuk.
>  fs/f2fs/f2fs.h |  2 +-
>  fs/f2fs/gc.c   | 71 
> +++---
>  fs/f2fs/node.c | 13 ++---
>  3 files changed, 61 insertions(+), 25 deletions(-)
>
> diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
> index b676b82312e0..917b2ca76aac 100644
> --- a/fs/f2fs/f2fs.h
> +++ b/fs/f2fs/f2fs.h
> @@ -2869,7 +2869,7 @@ struct page *f2fs_new_node_page(struct 
> dnode_of_data *dn, unsigned int ofs);
>  void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid);
>  struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, 
> pgoff_t nid);
>  struct page *f2fs_get_node_page_ra(struct page *parent, int 
> start);
> -void f2fs_move_node_page(struct page *node_page, int gc_type);
> +int f2fs_move_node_page(struct page *node_page, int gc_type);
>  int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode 
> *inode,
>   struct writeback_control *wbc, bool atomic,
>   unsigned int *seq_id);
> diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
> index a4c1a419611d..f57622cfe058 100644
> --- a/fs/f2fs/gc.c
> +++ b/fs/f2fs/gc.c
> @@ -461,7 +461,7 @@ static int check_valid_map(struct 
> f2fs_sb_info *sbi,
>   * On validity, copy that node with cold status, otherwise 
> (invalid node)
>   * ignore that.
>   */
> -static void gc_node_segment(struct f2fs_sb_info *sbi,
> +static int gc_node_segment(struct f2fs_sb_info *sbi,
>   struct f2fs_summary *sum, unsigned int segno, int 
> gc_type)
>  {
>   struct f2fs_summary *entry;
> @@ -469,6 +469,7 @@ static void gc_node_segment(struct 
> f2fs_sb_info *sbi,
>   int off;
>   int phase = 0;
>   bool fggc = (gc_type == FG_GC);
> + int submitted = 0;
>  
>   start_addr = START_BLOCK(sbi, segno);
>  
> @@ -482,10 +483,11 @@ static void gc_node_segment(struct 
> f2fs_sb_info *sbi,
>   nid_t nid = le32_to_cpu(entry->nid);
>   struct page *node_page;
>   struct node_info ni;
> + int err;
>  
>   /* stop BG_GC if there is not enough free sections. */
>   if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 
> 0, 0))
> - return;
> + return submitted;
>  
>   if (check_valid_map(sbi, segno, off) == 0)
>   continue;
> @@ -522,7 +524,9 @@ static void gc_node_segment(struct 
> f2fs_sb_info *sbi,
>   continue;
>   }
>  
> - f2fs_move_node_page(node_page, gc_type);
> + err = f2fs_move_node_page(node_page, gc_type);
> + if (!err && gc_type == FG_GC)
> + submitted++;
>   stat_inc_node_blk_count(sbi, 1, gc_type);
>   }
>  
> @@ -531,6 +535,7 @@ static void gc_node_segment(struct 
> f2fs_sb_info *sbi,
>  
>   if (fggc)
>   atomic_dec(>wb_sync_req[NODE]);
> + return submitted;
>  }
>  
>  /*
> @@ -666,7 +671,7 @@ static int ra_data_block(struct inode *inode, 
> pgoff_t index)
>   * Move data block via META_MAPPING while keeping locked data 

Re: [f2fs-dev] [PATCH v3] f2fs: submit cached bio to avoid endless PageWriteback

2018-09-25 Thread Jaegeuk Kim
On 09/26, Chao Yu wrote:
> On 2018/9/26 9:42, Jaegeuk Kim wrote:
> > On 09/26, Chao Yu wrote:
> >> On 2018/9/26 8:20, Jaegeuk Kim wrote:
> >>> On 09/21, Chao Yu wrote:
>  On 2018/9/18 10:14, Chao Yu wrote:
> > On 2018/9/18 10:02, Jaegeuk Kim wrote:
> >> On 09/18, Chao Yu wrote:
> >>> On 2018/9/18 9:37, Jaegeuk Kim wrote:
>  On 09/18, Chao Yu wrote:
> > On 2018/9/18 9:04, Jaegeuk Kim wrote:
> >> On 09/13, Chao Yu wrote:
> >>> From: Chao Yu 
> >>>
> >>> When migrating encrypted block from background GC thread, we only 
> >>> add
> >>> them into f2fs inner bio cache, but forget to submit the cached 
> >>> bio, it
> >>> may cause potential deadlock when we are waiting page 
> >>> writebacked, fix
> >>> it.
> >>>
> >>> Signed-off-by: Chao Yu 
> >>> ---
> >>> v3:
> >>> clean up codes suggested by Jaegeuk.
> >>>  fs/f2fs/f2fs.h |  2 +-
> >>>  fs/f2fs/gc.c   | 71 
> >>> +++---
> >>>  fs/f2fs/node.c | 13 ++---
> >>>  3 files changed, 61 insertions(+), 25 deletions(-)
> >>>
> >>> diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
> >>> index b676b82312e0..917b2ca76aac 100644
> >>> --- a/fs/f2fs/f2fs.h
> >>> +++ b/fs/f2fs/f2fs.h
> >>> @@ -2869,7 +2869,7 @@ struct page *f2fs_new_node_page(struct 
> >>> dnode_of_data *dn, unsigned int ofs);
> >>>  void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid);
> >>>  struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, 
> >>> pgoff_t nid);
> >>>  struct page *f2fs_get_node_page_ra(struct page *parent, int 
> >>> start);
> >>> -void f2fs_move_node_page(struct page *node_page, int gc_type);
> >>> +int f2fs_move_node_page(struct page *node_page, int gc_type);
> >>>  int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode 
> >>> *inode,
> >>>   struct writeback_control *wbc, bool atomic,
> >>>   unsigned int *seq_id);
> >>> diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
> >>> index a4c1a419611d..f57622cfe058 100644
> >>> --- a/fs/f2fs/gc.c
> >>> +++ b/fs/f2fs/gc.c
> >>> @@ -461,7 +461,7 @@ static int check_valid_map(struct 
> >>> f2fs_sb_info *sbi,
> >>>   * On validity, copy that node with cold status, otherwise 
> >>> (invalid node)
> >>>   * ignore that.
> >>>   */
> >>> -static void gc_node_segment(struct f2fs_sb_info *sbi,
> >>> +static int gc_node_segment(struct f2fs_sb_info *sbi,
> >>>   struct f2fs_summary *sum, unsigned int segno, int 
> >>> gc_type)
> >>>  {
> >>>   struct f2fs_summary *entry;
> >>> @@ -469,6 +469,7 @@ static void gc_node_segment(struct 
> >>> f2fs_sb_info *sbi,
> >>>   int off;
> >>>   int phase = 0;
> >>>   bool fggc = (gc_type == FG_GC);
> >>> + int submitted = 0;
> >>>  
> >>>   start_addr = START_BLOCK(sbi, segno);
> >>>  
> >>> @@ -482,10 +483,11 @@ static void gc_node_segment(struct 
> >>> f2fs_sb_info *sbi,
> >>>   nid_t nid = le32_to_cpu(entry->nid);
> >>>   struct page *node_page;
> >>>   struct node_info ni;
> >>> + int err;
> >>>  
> >>>   /* stop BG_GC if there is not enough free sections. */
> >>>   if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 
> >>> 0, 0))
> >>> - return;
> >>> + return submitted;
> >>>  
> >>>   if (check_valid_map(sbi, segno, off) == 0)
> >>>   continue;
> >>> @@ -522,7 +524,9 @@ static void gc_node_segment(struct 
> >>> f2fs_sb_info *sbi,
> >>>   continue;
> >>>   }
> >>>  
> >>> - f2fs_move_node_page(node_page, gc_type);
> >>> + err = f2fs_move_node_page(node_page, gc_type);
> >>> + if (!err && gc_type == FG_GC)
> >>> + submitted++;
> >>>   stat_inc_node_blk_count(sbi, 1, gc_type);
> >>>   }
> >>>  
> >>> @@ -531,6 +535,7 @@ static void gc_node_segment(struct 
> >>> f2fs_sb_info *sbi,
> >>>  
> >>>   if (fggc)
> >>>   atomic_dec(>wb_sync_req[NODE]);
> >>> + return submitted;
> >>>  }
> >>>  
> >>>  /*
> >>> @@ -666,7 +671,7 @@ static int ra_data_block(struct inode *inode, 
> >>> pgoff_t index)
> >>>   * Move data block via META_MAPPING while keeping locked data 
> >>> page.
> >>>   * This 

Re: [f2fs-dev] [PATCH v3] f2fs: submit cached bio to avoid endless PageWriteback

2018-09-25 Thread Chao Yu
On 2018/9/26 9:42, Jaegeuk Kim wrote:
> On 09/26, Chao Yu wrote:
>> On 2018/9/26 8:20, Jaegeuk Kim wrote:
>>> On 09/21, Chao Yu wrote:
 On 2018/9/18 10:14, Chao Yu wrote:
> On 2018/9/18 10:02, Jaegeuk Kim wrote:
>> On 09/18, Chao Yu wrote:
>>> On 2018/9/18 9:37, Jaegeuk Kim wrote:
 On 09/18, Chao Yu wrote:
> On 2018/9/18 9:04, Jaegeuk Kim wrote:
>> On 09/13, Chao Yu wrote:
>>> From: Chao Yu 
>>>
>>> When migrating encrypted block from background GC thread, we only 
>>> add
>>> them into f2fs inner bio cache, but forget to submit the cached 
>>> bio, it
>>> may cause potential deadlock when we are waiting page writebacked, 
>>> fix
>>> it.
>>>
>>> Signed-off-by: Chao Yu 
>>> ---
>>> v3:
>>> clean up codes suggested by Jaegeuk.
>>>  fs/f2fs/f2fs.h |  2 +-
>>>  fs/f2fs/gc.c   | 71 
>>> +++---
>>>  fs/f2fs/node.c | 13 ++---
>>>  3 files changed, 61 insertions(+), 25 deletions(-)
>>>
>>> diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
>>> index b676b82312e0..917b2ca76aac 100644
>>> --- a/fs/f2fs/f2fs.h
>>> +++ b/fs/f2fs/f2fs.h
>>> @@ -2869,7 +2869,7 @@ struct page *f2fs_new_node_page(struct 
>>> dnode_of_data *dn, unsigned int ofs);
>>>  void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid);
>>>  struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t 
>>> nid);
>>>  struct page *f2fs_get_node_page_ra(struct page *parent, int start);
>>> -void f2fs_move_node_page(struct page *node_page, int gc_type);
>>> +int f2fs_move_node_page(struct page *node_page, int gc_type);
>>>  int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode 
>>> *inode,
>>> struct writeback_control *wbc, bool atomic,
>>> unsigned int *seq_id);
>>> diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
>>> index a4c1a419611d..f57622cfe058 100644
>>> --- a/fs/f2fs/gc.c
>>> +++ b/fs/f2fs/gc.c
>>> @@ -461,7 +461,7 @@ static int check_valid_map(struct f2fs_sb_info 
>>> *sbi,
>>>   * On validity, copy that node with cold status, otherwise 
>>> (invalid node)
>>>   * ignore that.
>>>   */
>>> -static void gc_node_segment(struct f2fs_sb_info *sbi,
>>> +static int gc_node_segment(struct f2fs_sb_info *sbi,
>>> struct f2fs_summary *sum, unsigned int segno, int 
>>> gc_type)
>>>  {
>>> struct f2fs_summary *entry;
>>> @@ -469,6 +469,7 @@ static void gc_node_segment(struct f2fs_sb_info 
>>> *sbi,
>>> int off;
>>> int phase = 0;
>>> bool fggc = (gc_type == FG_GC);
>>> +   int submitted = 0;
>>>  
>>> start_addr = START_BLOCK(sbi, segno);
>>>  
>>> @@ -482,10 +483,11 @@ static void gc_node_segment(struct 
>>> f2fs_sb_info *sbi,
>>> nid_t nid = le32_to_cpu(entry->nid);
>>> struct page *node_page;
>>> struct node_info ni;
>>> +   int err;
>>>  
>>> /* stop BG_GC if there is not enough free sections. */
>>> if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 
>>> 0, 0))
>>> -   return;
>>> +   return submitted;
>>>  
>>> if (check_valid_map(sbi, segno, off) == 0)
>>> continue;
>>> @@ -522,7 +524,9 @@ static void gc_node_segment(struct f2fs_sb_info 
>>> *sbi,
>>> continue;
>>> }
>>>  
>>> -   f2fs_move_node_page(node_page, gc_type);
>>> +   err = f2fs_move_node_page(node_page, gc_type);
>>> +   if (!err && gc_type == FG_GC)
>>> +   submitted++;
>>> stat_inc_node_blk_count(sbi, 1, gc_type);
>>> }
>>>  
>>> @@ -531,6 +535,7 @@ static void gc_node_segment(struct f2fs_sb_info 
>>> *sbi,
>>>  
>>> if (fggc)
>>> atomic_dec(>wb_sync_req[NODE]);
>>> +   return submitted;
>>>  }
>>>  
>>>  /*
>>> @@ -666,7 +671,7 @@ static int ra_data_block(struct inode *inode, 
>>> pgoff_t index)
>>>   * Move data block via META_MAPPING while keeping locked data page.
>>>   * This can be used to move blocks, aka LBAs, directly on disk.
>>>   */
>>> -static void move_data_block(struct inode *inode, block_t bidx,
>>> +static int move_data_block(struct inode *inode, block_t 

Re: [f2fs-dev] [PATCH v3] f2fs: submit cached bio to avoid endless PageWriteback

2018-09-25 Thread Jaegeuk Kim
On 09/26, Chao Yu wrote:
> On 2018/9/26 8:20, Jaegeuk Kim wrote:
> > On 09/21, Chao Yu wrote:
> >> On 2018/9/18 10:14, Chao Yu wrote:
> >>> On 2018/9/18 10:02, Jaegeuk Kim wrote:
>  On 09/18, Chao Yu wrote:
> > On 2018/9/18 9:37, Jaegeuk Kim wrote:
> >> On 09/18, Chao Yu wrote:
> >>> On 2018/9/18 9:04, Jaegeuk Kim wrote:
>  On 09/13, Chao Yu wrote:
> > From: Chao Yu 
> >
> > When migrating encrypted block from background GC thread, we only 
> > add
> > them into f2fs inner bio cache, but forget to submit the cached 
> > bio, it
> > may cause potential deadlock when we are waiting page writebacked, 
> > fix
> > it.
> >
> > Signed-off-by: Chao Yu 
> > ---
> > v3:
> > clean up codes suggested by Jaegeuk.
> >  fs/f2fs/f2fs.h |  2 +-
> >  fs/f2fs/gc.c   | 71 
> > +++---
> >  fs/f2fs/node.c | 13 ++---
> >  3 files changed, 61 insertions(+), 25 deletions(-)
> >
> > diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
> > index b676b82312e0..917b2ca76aac 100644
> > --- a/fs/f2fs/f2fs.h
> > +++ b/fs/f2fs/f2fs.h
> > @@ -2869,7 +2869,7 @@ struct page *f2fs_new_node_page(struct 
> > dnode_of_data *dn, unsigned int ofs);
> >  void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid);
> >  struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t 
> > nid);
> >  struct page *f2fs_get_node_page_ra(struct page *parent, int start);
> > -void f2fs_move_node_page(struct page *node_page, int gc_type);
> > +int f2fs_move_node_page(struct page *node_page, int gc_type);
> >  int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode 
> > *inode,
> > struct writeback_control *wbc, bool atomic,
> > unsigned int *seq_id);
> > diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
> > index a4c1a419611d..f57622cfe058 100644
> > --- a/fs/f2fs/gc.c
> > +++ b/fs/f2fs/gc.c
> > @@ -461,7 +461,7 @@ static int check_valid_map(struct f2fs_sb_info 
> > *sbi,
> >   * On validity, copy that node with cold status, otherwise 
> > (invalid node)
> >   * ignore that.
> >   */
> > -static void gc_node_segment(struct f2fs_sb_info *sbi,
> > +static int gc_node_segment(struct f2fs_sb_info *sbi,
> > struct f2fs_summary *sum, unsigned int segno, int 
> > gc_type)
> >  {
> > struct f2fs_summary *entry;
> > @@ -469,6 +469,7 @@ static void gc_node_segment(struct f2fs_sb_info 
> > *sbi,
> > int off;
> > int phase = 0;
> > bool fggc = (gc_type == FG_GC);
> > +   int submitted = 0;
> >  
> > start_addr = START_BLOCK(sbi, segno);
> >  
> > @@ -482,10 +483,11 @@ static void gc_node_segment(struct 
> > f2fs_sb_info *sbi,
> > nid_t nid = le32_to_cpu(entry->nid);
> > struct page *node_page;
> > struct node_info ni;
> > +   int err;
> >  
> > /* stop BG_GC if there is not enough free sections. */
> > if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 
> > 0, 0))
> > -   return;
> > +   return submitted;
> >  
> > if (check_valid_map(sbi, segno, off) == 0)
> > continue;
> > @@ -522,7 +524,9 @@ static void gc_node_segment(struct f2fs_sb_info 
> > *sbi,
> > continue;
> > }
> >  
> > -   f2fs_move_node_page(node_page, gc_type);
> > +   err = f2fs_move_node_page(node_page, gc_type);
> > +   if (!err && gc_type == FG_GC)
> > +   submitted++;
> > stat_inc_node_blk_count(sbi, 1, gc_type);
> > }
> >  
> > @@ -531,6 +535,7 @@ static void gc_node_segment(struct f2fs_sb_info 
> > *sbi,
> >  
> > if (fggc)
> > atomic_dec(>wb_sync_req[NODE]);
> > +   return submitted;
> >  }
> >  
> >  /*
> > @@ -666,7 +671,7 @@ static int ra_data_block(struct inode *inode, 
> > pgoff_t index)
> >   * Move data block via META_MAPPING while keeping locked data page.
> >   * This can be used to move blocks, aka LBAs, directly on disk.
> >   */
> > -static void move_data_block(struct inode *inode, block_t bidx,
> > +static int move_data_block(struct inode *inode, block_t bidx,
> > 

Re: [f2fs-dev] [PATCH v3] f2fs: submit cached bio to avoid endless PageWriteback

2018-09-25 Thread Chao Yu
On 2018/9/26 8:20, Jaegeuk Kim wrote:
> On 09/21, Chao Yu wrote:
>> On 2018/9/18 10:14, Chao Yu wrote:
>>> On 2018/9/18 10:02, Jaegeuk Kim wrote:
 On 09/18, Chao Yu wrote:
> On 2018/9/18 9:37, Jaegeuk Kim wrote:
>> On 09/18, Chao Yu wrote:
>>> On 2018/9/18 9:04, Jaegeuk Kim wrote:
 On 09/13, Chao Yu wrote:
> From: Chao Yu 
>
> When migrating encrypted block from background GC thread, we only add
> them into f2fs inner bio cache, but forget to submit the cached bio, 
> it
> may cause potential deadlock when we are waiting page writebacked, fix
> it.
>
> Signed-off-by: Chao Yu 
> ---
> v3:
> clean up codes suggested by Jaegeuk.
>  fs/f2fs/f2fs.h |  2 +-
>  fs/f2fs/gc.c   | 71 
> +++---
>  fs/f2fs/node.c | 13 ++---
>  3 files changed, 61 insertions(+), 25 deletions(-)
>
> diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
> index b676b82312e0..917b2ca76aac 100644
> --- a/fs/f2fs/f2fs.h
> +++ b/fs/f2fs/f2fs.h
> @@ -2869,7 +2869,7 @@ struct page *f2fs_new_node_page(struct 
> dnode_of_data *dn, unsigned int ofs);
>  void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid);
>  struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t 
> nid);
>  struct page *f2fs_get_node_page_ra(struct page *parent, int start);
> -void f2fs_move_node_page(struct page *node_page, int gc_type);
> +int f2fs_move_node_page(struct page *node_page, int gc_type);
>  int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode 
> *inode,
>   struct writeback_control *wbc, bool atomic,
>   unsigned int *seq_id);
> diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
> index a4c1a419611d..f57622cfe058 100644
> --- a/fs/f2fs/gc.c
> +++ b/fs/f2fs/gc.c
> @@ -461,7 +461,7 @@ static int check_valid_map(struct f2fs_sb_info 
> *sbi,
>   * On validity, copy that node with cold status, otherwise (invalid 
> node)
>   * ignore that.
>   */
> -static void gc_node_segment(struct f2fs_sb_info *sbi,
> +static int gc_node_segment(struct f2fs_sb_info *sbi,
>   struct f2fs_summary *sum, unsigned int segno, int 
> gc_type)
>  {
>   struct f2fs_summary *entry;
> @@ -469,6 +469,7 @@ static void gc_node_segment(struct f2fs_sb_info 
> *sbi,
>   int off;
>   int phase = 0;
>   bool fggc = (gc_type == FG_GC);
> + int submitted = 0;
>  
>   start_addr = START_BLOCK(sbi, segno);
>  
> @@ -482,10 +483,11 @@ static void gc_node_segment(struct f2fs_sb_info 
> *sbi,
>   nid_t nid = le32_to_cpu(entry->nid);
>   struct page *node_page;
>   struct node_info ni;
> + int err;
>  
>   /* stop BG_GC if there is not enough free sections. */
>   if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 
> 0, 0))
> - return;
> + return submitted;
>  
>   if (check_valid_map(sbi, segno, off) == 0)
>   continue;
> @@ -522,7 +524,9 @@ static void gc_node_segment(struct f2fs_sb_info 
> *sbi,
>   continue;
>   }
>  
> - f2fs_move_node_page(node_page, gc_type);
> + err = f2fs_move_node_page(node_page, gc_type);
> + if (!err && gc_type == FG_GC)
> + submitted++;
>   stat_inc_node_blk_count(sbi, 1, gc_type);
>   }
>  
> @@ -531,6 +535,7 @@ static void gc_node_segment(struct f2fs_sb_info 
> *sbi,
>  
>   if (fggc)
>   atomic_dec(>wb_sync_req[NODE]);
> + return submitted;
>  }
>  
>  /*
> @@ -666,7 +671,7 @@ static int ra_data_block(struct inode *inode, 
> pgoff_t index)
>   * Move data block via META_MAPPING while keeping locked data page.
>   * This can be used to move blocks, aka LBAs, directly on disk.
>   */
> -static void move_data_block(struct inode *inode, block_t bidx,
> +static int move_data_block(struct inode *inode, block_t bidx,
>   int gc_type, unsigned int segno, int 
> off)

 We don't need to submit IOs in this case.
>>>
>>> Actually, previously, we missed to submit IOs for encrypted block only 
>>> 

Re: [f2fs-dev] [PATCH v3] f2fs: submit cached bio to avoid endless PageWriteback

2018-09-25 Thread Jaegeuk Kim
On 09/21, Chao Yu wrote:
> On 2018/9/18 10:14, Chao Yu wrote:
> > On 2018/9/18 10:02, Jaegeuk Kim wrote:
> >> On 09/18, Chao Yu wrote:
> >>> On 2018/9/18 9:37, Jaegeuk Kim wrote:
>  On 09/18, Chao Yu wrote:
> > On 2018/9/18 9:04, Jaegeuk Kim wrote:
> >> On 09/13, Chao Yu wrote:
> >>> From: Chao Yu 
> >>>
> >>> When migrating encrypted block from background GC thread, we only add
> >>> them into f2fs inner bio cache, but forget to submit the cached bio, 
> >>> it
> >>> may cause potential deadlock when we are waiting page writebacked, fix
> >>> it.
> >>>
> >>> Signed-off-by: Chao Yu 
> >>> ---
> >>> v3:
> >>> clean up codes suggested by Jaegeuk.
> >>>  fs/f2fs/f2fs.h |  2 +-
> >>>  fs/f2fs/gc.c   | 71 
> >>> +++---
> >>>  fs/f2fs/node.c | 13 ++---
> >>>  3 files changed, 61 insertions(+), 25 deletions(-)
> >>>
> >>> diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
> >>> index b676b82312e0..917b2ca76aac 100644
> >>> --- a/fs/f2fs/f2fs.h
> >>> +++ b/fs/f2fs/f2fs.h
> >>> @@ -2869,7 +2869,7 @@ struct page *f2fs_new_node_page(struct 
> >>> dnode_of_data *dn, unsigned int ofs);
> >>>  void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid);
> >>>  struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t 
> >>> nid);
> >>>  struct page *f2fs_get_node_page_ra(struct page *parent, int start);
> >>> -void f2fs_move_node_page(struct page *node_page, int gc_type);
> >>> +int f2fs_move_node_page(struct page *node_page, int gc_type);
> >>>  int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode 
> >>> *inode,
> >>>   struct writeback_control *wbc, bool atomic,
> >>>   unsigned int *seq_id);
> >>> diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
> >>> index a4c1a419611d..f57622cfe058 100644
> >>> --- a/fs/f2fs/gc.c
> >>> +++ b/fs/f2fs/gc.c
> >>> @@ -461,7 +461,7 @@ static int check_valid_map(struct f2fs_sb_info 
> >>> *sbi,
> >>>   * On validity, copy that node with cold status, otherwise (invalid 
> >>> node)
> >>>   * ignore that.
> >>>   */
> >>> -static void gc_node_segment(struct f2fs_sb_info *sbi,
> >>> +static int gc_node_segment(struct f2fs_sb_info *sbi,
> >>>   struct f2fs_summary *sum, unsigned int segno, int 
> >>> gc_type)
> >>>  {
> >>>   struct f2fs_summary *entry;
> >>> @@ -469,6 +469,7 @@ static void gc_node_segment(struct f2fs_sb_info 
> >>> *sbi,
> >>>   int off;
> >>>   int phase = 0;
> >>>   bool fggc = (gc_type == FG_GC);
> >>> + int submitted = 0;
> >>>  
> >>>   start_addr = START_BLOCK(sbi, segno);
> >>>  
> >>> @@ -482,10 +483,11 @@ static void gc_node_segment(struct f2fs_sb_info 
> >>> *sbi,
> >>>   nid_t nid = le32_to_cpu(entry->nid);
> >>>   struct page *node_page;
> >>>   struct node_info ni;
> >>> + int err;
> >>>  
> >>>   /* stop BG_GC if there is not enough free sections. */
> >>>   if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 
> >>> 0, 0))
> >>> - return;
> >>> + return submitted;
> >>>  
> >>>   if (check_valid_map(sbi, segno, off) == 0)
> >>>   continue;
> >>> @@ -522,7 +524,9 @@ static void gc_node_segment(struct f2fs_sb_info 
> >>> *sbi,
> >>>   continue;
> >>>   }
> >>>  
> >>> - f2fs_move_node_page(node_page, gc_type);
> >>> + err = f2fs_move_node_page(node_page, gc_type);
> >>> + if (!err && gc_type == FG_GC)
> >>> + submitted++;
> >>>   stat_inc_node_blk_count(sbi, 1, gc_type);
> >>>   }
> >>>  
> >>> @@ -531,6 +535,7 @@ static void gc_node_segment(struct f2fs_sb_info 
> >>> *sbi,
> >>>  
> >>>   if (fggc)
> >>>   atomic_dec(>wb_sync_req[NODE]);
> >>> + return submitted;
> >>>  }
> >>>  
> >>>  /*
> >>> @@ -666,7 +671,7 @@ static int ra_data_block(struct inode *inode, 
> >>> pgoff_t index)
> >>>   * Move data block via META_MAPPING while keeping locked data page.
> >>>   * This can be used to move blocks, aka LBAs, directly on disk.
> >>>   */
> >>> -static void move_data_block(struct inode *inode, block_t bidx,
> >>> +static int move_data_block(struct inode *inode, block_t bidx,
> >>>   int gc_type, unsigned int segno, int 
> >>> off)
> >>
> >> We don't need to submit IOs in this case.
> >
> > Actually, previously, we missed to submit IOs for encrypted block only 
> > in
> > BGGC, so we fix to submit for 

Re: [f2fs-dev] [PATCH v3] f2fs: submit cached bio to avoid endless PageWriteback

2018-09-21 Thread Chao Yu
On 2018/9/18 10:14, Chao Yu wrote:
> On 2018/9/18 10:02, Jaegeuk Kim wrote:
>> On 09/18, Chao Yu wrote:
>>> On 2018/9/18 9:37, Jaegeuk Kim wrote:
 On 09/18, Chao Yu wrote:
> On 2018/9/18 9:04, Jaegeuk Kim wrote:
>> On 09/13, Chao Yu wrote:
>>> From: Chao Yu 
>>>
>>> When migrating encrypted block from background GC thread, we only add
>>> them into f2fs inner bio cache, but forget to submit the cached bio, it
>>> may cause potential deadlock when we are waiting page writebacked, fix
>>> it.
>>>
>>> Signed-off-by: Chao Yu 
>>> ---
>>> v3:
>>> clean up codes suggested by Jaegeuk.
>>>  fs/f2fs/f2fs.h |  2 +-
>>>  fs/f2fs/gc.c   | 71 +++---
>>>  fs/f2fs/node.c | 13 ++---
>>>  3 files changed, 61 insertions(+), 25 deletions(-)
>>>
>>> diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
>>> index b676b82312e0..917b2ca76aac 100644
>>> --- a/fs/f2fs/f2fs.h
>>> +++ b/fs/f2fs/f2fs.h
>>> @@ -2869,7 +2869,7 @@ struct page *f2fs_new_node_page(struct 
>>> dnode_of_data *dn, unsigned int ofs);
>>>  void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid);
>>>  struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid);
>>>  struct page *f2fs_get_node_page_ra(struct page *parent, int start);
>>> -void f2fs_move_node_page(struct page *node_page, int gc_type);
>>> +int f2fs_move_node_page(struct page *node_page, int gc_type);
>>>  int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode 
>>> *inode,
>>> struct writeback_control *wbc, bool atomic,
>>> unsigned int *seq_id);
>>> diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
>>> index a4c1a419611d..f57622cfe058 100644
>>> --- a/fs/f2fs/gc.c
>>> +++ b/fs/f2fs/gc.c
>>> @@ -461,7 +461,7 @@ static int check_valid_map(struct f2fs_sb_info *sbi,
>>>   * On validity, copy that node with cold status, otherwise (invalid 
>>> node)
>>>   * ignore that.
>>>   */
>>> -static void gc_node_segment(struct f2fs_sb_info *sbi,
>>> +static int gc_node_segment(struct f2fs_sb_info *sbi,
>>> struct f2fs_summary *sum, unsigned int segno, int 
>>> gc_type)
>>>  {
>>> struct f2fs_summary *entry;
>>> @@ -469,6 +469,7 @@ static void gc_node_segment(struct f2fs_sb_info 
>>> *sbi,
>>> int off;
>>> int phase = 0;
>>> bool fggc = (gc_type == FG_GC);
>>> +   int submitted = 0;
>>>  
>>> start_addr = START_BLOCK(sbi, segno);
>>>  
>>> @@ -482,10 +483,11 @@ static void gc_node_segment(struct f2fs_sb_info 
>>> *sbi,
>>> nid_t nid = le32_to_cpu(entry->nid);
>>> struct page *node_page;
>>> struct node_info ni;
>>> +   int err;
>>>  
>>> /* stop BG_GC if there is not enough free sections. */
>>> if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 
>>> 0, 0))
>>> -   return;
>>> +   return submitted;
>>>  
>>> if (check_valid_map(sbi, segno, off) == 0)
>>> continue;
>>> @@ -522,7 +524,9 @@ static void gc_node_segment(struct f2fs_sb_info 
>>> *sbi,
>>> continue;
>>> }
>>>  
>>> -   f2fs_move_node_page(node_page, gc_type);
>>> +   err = f2fs_move_node_page(node_page, gc_type);
>>> +   if (!err && gc_type == FG_GC)
>>> +   submitted++;
>>> stat_inc_node_blk_count(sbi, 1, gc_type);
>>> }
>>>  
>>> @@ -531,6 +535,7 @@ static void gc_node_segment(struct f2fs_sb_info 
>>> *sbi,
>>>  
>>> if (fggc)
>>> atomic_dec(>wb_sync_req[NODE]);
>>> +   return submitted;
>>>  }
>>>  
>>>  /*
>>> @@ -666,7 +671,7 @@ static int ra_data_block(struct inode *inode, 
>>> pgoff_t index)
>>>   * Move data block via META_MAPPING while keeping locked data page.
>>>   * This can be used to move blocks, aka LBAs, directly on disk.
>>>   */
>>> -static void move_data_block(struct inode *inode, block_t bidx,
>>> +static int move_data_block(struct inode *inode, block_t bidx,
>>> int gc_type, unsigned int segno, int 
>>> off)
>>
>> We don't need to submit IOs in this case.
>
> Actually, previously, we missed to submit IOs for encrypted block only in
> BGGC, so we fix to submit for this case, all other codes are cleanups. 
> Right?

 The move_data_block migrates encrypted blocks all the time with meta page 
 IOs.
 I don't know what you're saying about BGGC.
>>>
>>> In move_data_block(), we use 

Re: [f2fs-dev] [PATCH v3] f2fs: submit cached bio to avoid endless PageWriteback

2018-09-17 Thread Chao Yu
On 2018/9/18 10:02, Jaegeuk Kim wrote:
> On 09/18, Chao Yu wrote:
>> On 2018/9/18 9:37, Jaegeuk Kim wrote:
>>> On 09/18, Chao Yu wrote:
 On 2018/9/18 9:04, Jaegeuk Kim wrote:
> On 09/13, Chao Yu wrote:
>> From: Chao Yu 
>>
>> When migrating encrypted block from background GC thread, we only add
>> them into f2fs inner bio cache, but forget to submit the cached bio, it
>> may cause potential deadlock when we are waiting page writebacked, fix
>> it.
>>
>> Signed-off-by: Chao Yu 
>> ---
>> v3:
>> clean up codes suggested by Jaegeuk.
>>  fs/f2fs/f2fs.h |  2 +-
>>  fs/f2fs/gc.c   | 71 +++---
>>  fs/f2fs/node.c | 13 ++---
>>  3 files changed, 61 insertions(+), 25 deletions(-)
>>
>> diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
>> index b676b82312e0..917b2ca76aac 100644
>> --- a/fs/f2fs/f2fs.h
>> +++ b/fs/f2fs/f2fs.h
>> @@ -2869,7 +2869,7 @@ struct page *f2fs_new_node_page(struct 
>> dnode_of_data *dn, unsigned int ofs);
>>  void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid);
>>  struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid);
>>  struct page *f2fs_get_node_page_ra(struct page *parent, int start);
>> -void f2fs_move_node_page(struct page *node_page, int gc_type);
>> +int f2fs_move_node_page(struct page *node_page, int gc_type);
>>  int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
>>  struct writeback_control *wbc, bool atomic,
>>  unsigned int *seq_id);
>> diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
>> index a4c1a419611d..f57622cfe058 100644
>> --- a/fs/f2fs/gc.c
>> +++ b/fs/f2fs/gc.c
>> @@ -461,7 +461,7 @@ static int check_valid_map(struct f2fs_sb_info *sbi,
>>   * On validity, copy that node with cold status, otherwise (invalid 
>> node)
>>   * ignore that.
>>   */
>> -static void gc_node_segment(struct f2fs_sb_info *sbi,
>> +static int gc_node_segment(struct f2fs_sb_info *sbi,
>>  struct f2fs_summary *sum, unsigned int segno, int 
>> gc_type)
>>  {
>>  struct f2fs_summary *entry;
>> @@ -469,6 +469,7 @@ static void gc_node_segment(struct f2fs_sb_info *sbi,
>>  int off;
>>  int phase = 0;
>>  bool fggc = (gc_type == FG_GC);
>> +int submitted = 0;
>>  
>>  start_addr = START_BLOCK(sbi, segno);
>>  
>> @@ -482,10 +483,11 @@ static void gc_node_segment(struct f2fs_sb_info 
>> *sbi,
>>  nid_t nid = le32_to_cpu(entry->nid);
>>  struct page *node_page;
>>  struct node_info ni;
>> +int err;
>>  
>>  /* stop BG_GC if there is not enough free sections. */
>>  if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 
>> 0, 0))
>> -return;
>> +return submitted;
>>  
>>  if (check_valid_map(sbi, segno, off) == 0)
>>  continue;
>> @@ -522,7 +524,9 @@ static void gc_node_segment(struct f2fs_sb_info *sbi,
>>  continue;
>>  }
>>  
>> -f2fs_move_node_page(node_page, gc_type);
>> +err = f2fs_move_node_page(node_page, gc_type);
>> +if (!err && gc_type == FG_GC)
>> +submitted++;
>>  stat_inc_node_blk_count(sbi, 1, gc_type);
>>  }
>>  
>> @@ -531,6 +535,7 @@ static void gc_node_segment(struct f2fs_sb_info *sbi,
>>  
>>  if (fggc)
>>  atomic_dec(>wb_sync_req[NODE]);
>> +return submitted;
>>  }
>>  
>>  /*
>> @@ -666,7 +671,7 @@ static int ra_data_block(struct inode *inode, 
>> pgoff_t index)
>>   * Move data block via META_MAPPING while keeping locked data page.
>>   * This can be used to move blocks, aka LBAs, directly on disk.
>>   */
>> -static void move_data_block(struct inode *inode, block_t bidx,
>> +static int move_data_block(struct inode *inode, block_t bidx,
>>  int gc_type, unsigned int segno, int 
>> off)
>
> We don't need to submit IOs in this case.

 Actually, previously, we missed to submit IOs for encrypted block only in
 BGGC, so we fix to submit for this case, all other codes are cleanups. 
 Right?
>>>
>>> The move_data_block migrates encrypted blocks all the time with meta page 
>>> IOs.
>>> I don't know what you're saying about BGGC.
>>
>> In move_data_block(), we use f2fs_submit_page_write() to add encrypted page
>> in to sbi->write_io[META].bio cache, so before exit GC, we need to submit
>> this cache by 

Re: [f2fs-dev] [PATCH v3] f2fs: submit cached bio to avoid endless PageWriteback

2018-09-17 Thread Jaegeuk Kim
On 09/18, Chao Yu wrote:
> On 2018/9/18 9:37, Jaegeuk Kim wrote:
> > On 09/18, Chao Yu wrote:
> >> On 2018/9/18 9:04, Jaegeuk Kim wrote:
> >>> On 09/13, Chao Yu wrote:
>  From: Chao Yu 
> 
>  When migrating encrypted block from background GC thread, we only add
>  them into f2fs inner bio cache, but forget to submit the cached bio, it
>  may cause potential deadlock when we are waiting page writebacked, fix
>  it.
> 
>  Signed-off-by: Chao Yu 
>  ---
>  v3:
>  clean up codes suggested by Jaegeuk.
>   fs/f2fs/f2fs.h |  2 +-
>   fs/f2fs/gc.c   | 71 +++---
>   fs/f2fs/node.c | 13 ++---
>   3 files changed, 61 insertions(+), 25 deletions(-)
> 
>  diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
>  index b676b82312e0..917b2ca76aac 100644
>  --- a/fs/f2fs/f2fs.h
>  +++ b/fs/f2fs/f2fs.h
>  @@ -2869,7 +2869,7 @@ struct page *f2fs_new_node_page(struct 
>  dnode_of_data *dn, unsigned int ofs);
>   void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid);
>   struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid);
>   struct page *f2fs_get_node_page_ra(struct page *parent, int start);
>  -void f2fs_move_node_page(struct page *node_page, int gc_type);
>  +int f2fs_move_node_page(struct page *node_page, int gc_type);
>   int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
>   struct writeback_control *wbc, bool atomic,
>   unsigned int *seq_id);
>  diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
>  index a4c1a419611d..f57622cfe058 100644
>  --- a/fs/f2fs/gc.c
>  +++ b/fs/f2fs/gc.c
>  @@ -461,7 +461,7 @@ static int check_valid_map(struct f2fs_sb_info *sbi,
>    * On validity, copy that node with cold status, otherwise (invalid 
>  node)
>    * ignore that.
>    */
>  -static void gc_node_segment(struct f2fs_sb_info *sbi,
>  +static int gc_node_segment(struct f2fs_sb_info *sbi,
>   struct f2fs_summary *sum, unsigned int segno, int 
>  gc_type)
>   {
>   struct f2fs_summary *entry;
>  @@ -469,6 +469,7 @@ static void gc_node_segment(struct f2fs_sb_info *sbi,
>   int off;
>   int phase = 0;
>   bool fggc = (gc_type == FG_GC);
>  +int submitted = 0;
>   
>   start_addr = START_BLOCK(sbi, segno);
>   
>  @@ -482,10 +483,11 @@ static void gc_node_segment(struct f2fs_sb_info 
>  *sbi,
>   nid_t nid = le32_to_cpu(entry->nid);
>   struct page *node_page;
>   struct node_info ni;
>  +int err;
>   
>   /* stop BG_GC if there is not enough free sections. */
>   if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 
>  0, 0))
>  -return;
>  +return submitted;
>   
>   if (check_valid_map(sbi, segno, off) == 0)
>   continue;
>  @@ -522,7 +524,9 @@ static void gc_node_segment(struct f2fs_sb_info *sbi,
>   continue;
>   }
>   
>  -f2fs_move_node_page(node_page, gc_type);
>  +err = f2fs_move_node_page(node_page, gc_type);
>  +if (!err && gc_type == FG_GC)
>  +submitted++;
>   stat_inc_node_blk_count(sbi, 1, gc_type);
>   }
>   
>  @@ -531,6 +535,7 @@ static void gc_node_segment(struct f2fs_sb_info *sbi,
>   
>   if (fggc)
>   atomic_dec(>wb_sync_req[NODE]);
>  +return submitted;
>   }
>   
>   /*
>  @@ -666,7 +671,7 @@ static int ra_data_block(struct inode *inode, 
>  pgoff_t index)
>    * Move data block via META_MAPPING while keeping locked data page.
>    * This can be used to move blocks, aka LBAs, directly on disk.
>    */
>  -static void move_data_block(struct inode *inode, block_t bidx,
>  +static int move_data_block(struct inode *inode, block_t bidx,
>   int gc_type, unsigned int segno, int 
>  off)
> >>>
> >>> We don't need to submit IOs in this case.
> >>
> >> Actually, previously, we missed to submit IOs for encrypted block only in
> >> BGGC, so we fix to submit for this case, all other codes are cleanups. 
> >> Right?
> > 
> > The move_data_block migrates encrypted blocks all the time with meta page 
> > IOs.
> > I don't know what you're saying about BGGC.
> 
> In move_data_block(), we use f2fs_submit_page_write() to add encrypted page
> in to sbi->write_io[META].bio cache, so before exit GC, we need to submit
> this cache by f2fs_submit_merged_write(), otherwise bio with encrypted page

Re: [f2fs-dev] [PATCH v3] f2fs: submit cached bio to avoid endless PageWriteback

2018-09-17 Thread Chao Yu
On 2018/9/18 9:37, Jaegeuk Kim wrote:
> On 09/18, Chao Yu wrote:
>> On 2018/9/18 9:04, Jaegeuk Kim wrote:
>>> On 09/13, Chao Yu wrote:
 From: Chao Yu 

 When migrating encrypted block from background GC thread, we only add
 them into f2fs inner bio cache, but forget to submit the cached bio, it
 may cause potential deadlock when we are waiting page writebacked, fix
 it.

 Signed-off-by: Chao Yu 
 ---
 v3:
 clean up codes suggested by Jaegeuk.
  fs/f2fs/f2fs.h |  2 +-
  fs/f2fs/gc.c   | 71 +++---
  fs/f2fs/node.c | 13 ++---
  3 files changed, 61 insertions(+), 25 deletions(-)

 diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
 index b676b82312e0..917b2ca76aac 100644
 --- a/fs/f2fs/f2fs.h
 +++ b/fs/f2fs/f2fs.h
 @@ -2869,7 +2869,7 @@ struct page *f2fs_new_node_page(struct dnode_of_data 
 *dn, unsigned int ofs);
  void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid);
  struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid);
  struct page *f2fs_get_node_page_ra(struct page *parent, int start);
 -void f2fs_move_node_page(struct page *node_page, int gc_type);
 +int f2fs_move_node_page(struct page *node_page, int gc_type);
  int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
struct writeback_control *wbc, bool atomic,
unsigned int *seq_id);
 diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
 index a4c1a419611d..f57622cfe058 100644
 --- a/fs/f2fs/gc.c
 +++ b/fs/f2fs/gc.c
 @@ -461,7 +461,7 @@ static int check_valid_map(struct f2fs_sb_info *sbi,
   * On validity, copy that node with cold status, otherwise (invalid node)
   * ignore that.
   */
 -static void gc_node_segment(struct f2fs_sb_info *sbi,
 +static int gc_node_segment(struct f2fs_sb_info *sbi,
struct f2fs_summary *sum, unsigned int segno, int gc_type)
  {
struct f2fs_summary *entry;
 @@ -469,6 +469,7 @@ static void gc_node_segment(struct f2fs_sb_info *sbi,
int off;
int phase = 0;
bool fggc = (gc_type == FG_GC);
 +  int submitted = 0;
  
start_addr = START_BLOCK(sbi, segno);
  
 @@ -482,10 +483,11 @@ static void gc_node_segment(struct f2fs_sb_info *sbi,
nid_t nid = le32_to_cpu(entry->nid);
struct page *node_page;
struct node_info ni;
 +  int err;
  
/* stop BG_GC if there is not enough free sections. */
if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
 -  return;
 +  return submitted;
  
if (check_valid_map(sbi, segno, off) == 0)
continue;
 @@ -522,7 +524,9 @@ static void gc_node_segment(struct f2fs_sb_info *sbi,
continue;
}
  
 -  f2fs_move_node_page(node_page, gc_type);
 +  err = f2fs_move_node_page(node_page, gc_type);
 +  if (!err && gc_type == FG_GC)
 +  submitted++;
stat_inc_node_blk_count(sbi, 1, gc_type);
}
  
 @@ -531,6 +535,7 @@ static void gc_node_segment(struct f2fs_sb_info *sbi,
  
if (fggc)
atomic_dec(>wb_sync_req[NODE]);
 +  return submitted;
  }
  
  /*
 @@ -666,7 +671,7 @@ static int ra_data_block(struct inode *inode, pgoff_t 
 index)
   * Move data block via META_MAPPING while keeping locked data page.
   * This can be used to move blocks, aka LBAs, directly on disk.
   */
 -static void move_data_block(struct inode *inode, block_t bidx,
 +static int move_data_block(struct inode *inode, block_t bidx,
int gc_type, unsigned int segno, int off)
>>>
>>> We don't need to submit IOs in this case.
>>
>> Actually, previously, we missed to submit IOs for encrypted block only in
>> BGGC, so we fix to submit for this case, all other codes are cleanups. Right?
> 
> The move_data_block migrates encrypted blocks all the time with meta page IOs.
> I don't know what you're saying about BGGC.

In move_data_block(), we use f2fs_submit_page_write() to add encrypted page
in to sbi->write_io[META].bio cache, so before exit GC, we need to submit
this cache by f2fs_submit_merged_write(), otherwise bio with encrypted page
will be cached in sbi->write_io[META].bio for long time, since we only
submmit this bio cache in foreground GC.

if (gc_type == FG_GC)
f2fs_submit_merged_write(sbi,
(type == SUM_TYPE_NODE) ? NODE : DATA);

> 
>>
>> Thanks,
>>
>>>
  {
struct f2fs_io_info fio = {
 @@ -685,25 +690,29 @@ static void move_data_block(struct inode *inode, 
 block_t bidx,
struct node_info ni;
struct page *page, *mpage;

Re: [f2fs-dev] [PATCH v3] f2fs: submit cached bio to avoid endless PageWriteback

2018-09-17 Thread Jaegeuk Kim
On 09/18, Chao Yu wrote:
> On 2018/9/18 9:04, Jaegeuk Kim wrote:
> > On 09/13, Chao Yu wrote:
> >> From: Chao Yu 
> >>
> >> When migrating encrypted block from background GC thread, we only add
> >> them into f2fs inner bio cache, but forget to submit the cached bio, it
> >> may cause potential deadlock when we are waiting page writebacked, fix
> >> it.
> >>
> >> Signed-off-by: Chao Yu 
> >> ---
> >> v3:
> >> clean up codes suggested by Jaegeuk.
> >>  fs/f2fs/f2fs.h |  2 +-
> >>  fs/f2fs/gc.c   | 71 +++---
> >>  fs/f2fs/node.c | 13 ++---
> >>  3 files changed, 61 insertions(+), 25 deletions(-)
> >>
> >> diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
> >> index b676b82312e0..917b2ca76aac 100644
> >> --- a/fs/f2fs/f2fs.h
> >> +++ b/fs/f2fs/f2fs.h
> >> @@ -2869,7 +2869,7 @@ struct page *f2fs_new_node_page(struct dnode_of_data 
> >> *dn, unsigned int ofs);
> >>  void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid);
> >>  struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid);
> >>  struct page *f2fs_get_node_page_ra(struct page *parent, int start);
> >> -void f2fs_move_node_page(struct page *node_page, int gc_type);
> >> +int f2fs_move_node_page(struct page *node_page, int gc_type);
> >>  int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
> >>struct writeback_control *wbc, bool atomic,
> >>unsigned int *seq_id);
> >> diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
> >> index a4c1a419611d..f57622cfe058 100644
> >> --- a/fs/f2fs/gc.c
> >> +++ b/fs/f2fs/gc.c
> >> @@ -461,7 +461,7 @@ static int check_valid_map(struct f2fs_sb_info *sbi,
> >>   * On validity, copy that node with cold status, otherwise (invalid node)
> >>   * ignore that.
> >>   */
> >> -static void gc_node_segment(struct f2fs_sb_info *sbi,
> >> +static int gc_node_segment(struct f2fs_sb_info *sbi,
> >>struct f2fs_summary *sum, unsigned int segno, int gc_type)
> >>  {
> >>struct f2fs_summary *entry;
> >> @@ -469,6 +469,7 @@ static void gc_node_segment(struct f2fs_sb_info *sbi,
> >>int off;
> >>int phase = 0;
> >>bool fggc = (gc_type == FG_GC);
> >> +  int submitted = 0;
> >>  
> >>start_addr = START_BLOCK(sbi, segno);
> >>  
> >> @@ -482,10 +483,11 @@ static void gc_node_segment(struct f2fs_sb_info *sbi,
> >>nid_t nid = le32_to_cpu(entry->nid);
> >>struct page *node_page;
> >>struct node_info ni;
> >> +  int err;
> >>  
> >>/* stop BG_GC if there is not enough free sections. */
> >>if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
> >> -  return;
> >> +  return submitted;
> >>  
> >>if (check_valid_map(sbi, segno, off) == 0)
> >>continue;
> >> @@ -522,7 +524,9 @@ static void gc_node_segment(struct f2fs_sb_info *sbi,
> >>continue;
> >>}
> >>  
> >> -  f2fs_move_node_page(node_page, gc_type);
> >> +  err = f2fs_move_node_page(node_page, gc_type);
> >> +  if (!err && gc_type == FG_GC)
> >> +  submitted++;
> >>stat_inc_node_blk_count(sbi, 1, gc_type);
> >>}
> >>  
> >> @@ -531,6 +535,7 @@ static void gc_node_segment(struct f2fs_sb_info *sbi,
> >>  
> >>if (fggc)
> >>atomic_dec(>wb_sync_req[NODE]);
> >> +  return submitted;
> >>  }
> >>  
> >>  /*
> >> @@ -666,7 +671,7 @@ static int ra_data_block(struct inode *inode, pgoff_t 
> >> index)
> >>   * Move data block via META_MAPPING while keeping locked data page.
> >>   * This can be used to move blocks, aka LBAs, directly on disk.
> >>   */
> >> -static void move_data_block(struct inode *inode, block_t bidx,
> >> +static int move_data_block(struct inode *inode, block_t bidx,
> >>int gc_type, unsigned int segno, int off)
> > 
> > We don't need to submit IOs in this case.
> 
> Actually, previously, we missed to submit IOs for encrypted block only in
> BGGC, so we fix to submit for this case, all other codes are cleanups. Right?

The move_data_block migrates encrypted blocks all the time with meta page IOs.
I don't know what you're saying about BGGC.

> 
> Thanks,
> 
> > 
> >>  {
> >>struct f2fs_io_info fio = {
> >> @@ -685,25 +690,29 @@ static void move_data_block(struct inode *inode, 
> >> block_t bidx,
> >>struct node_info ni;
> >>struct page *page, *mpage;
> >>block_t newaddr;
> >> -  int err;
> >> +  int err = 0;
> >>bool lfs_mode = test_opt(fio.sbi, LFS);
> >>  
> >>/* do not read out */
> >>page = f2fs_grab_cache_page(inode->i_mapping, bidx, false);
> >>if (!page)
> >> -  return;
> >> +  return -ENOMEM;
> >>  
> >> -  if (!check_valid_map(F2FS_I_SB(inode), segno, off))
> >> +  if (!check_valid_map(F2FS_I_SB(inode), segno, off)) {
> >> +  err = -ENOENT;
> >>goto out;
> >> +  }
> >>  
> >>if 

Re: [f2fs-dev] [PATCH v3] f2fs: submit cached bio to avoid endless PageWriteback

2018-09-17 Thread Chao Yu
On 2018/9/18 9:04, Jaegeuk Kim wrote:
> On 09/13, Chao Yu wrote:
>> From: Chao Yu 
>>
>> When migrating encrypted block from background GC thread, we only add
>> them into f2fs inner bio cache, but forget to submit the cached bio, it
>> may cause potential deadlock when we are waiting page writebacked, fix
>> it.
>>
>> Signed-off-by: Chao Yu 
>> ---
>> v3:
>> clean up codes suggested by Jaegeuk.
>>  fs/f2fs/f2fs.h |  2 +-
>>  fs/f2fs/gc.c   | 71 +++---
>>  fs/f2fs/node.c | 13 ++---
>>  3 files changed, 61 insertions(+), 25 deletions(-)
>>
>> diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
>> index b676b82312e0..917b2ca76aac 100644
>> --- a/fs/f2fs/f2fs.h
>> +++ b/fs/f2fs/f2fs.h
>> @@ -2869,7 +2869,7 @@ struct page *f2fs_new_node_page(struct dnode_of_data 
>> *dn, unsigned int ofs);
>>  void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid);
>>  struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid);
>>  struct page *f2fs_get_node_page_ra(struct page *parent, int start);
>> -void f2fs_move_node_page(struct page *node_page, int gc_type);
>> +int f2fs_move_node_page(struct page *node_page, int gc_type);
>>  int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
>>  struct writeback_control *wbc, bool atomic,
>>  unsigned int *seq_id);
>> diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
>> index a4c1a419611d..f57622cfe058 100644
>> --- a/fs/f2fs/gc.c
>> +++ b/fs/f2fs/gc.c
>> @@ -461,7 +461,7 @@ static int check_valid_map(struct f2fs_sb_info *sbi,
>>   * On validity, copy that node with cold status, otherwise (invalid node)
>>   * ignore that.
>>   */
>> -static void gc_node_segment(struct f2fs_sb_info *sbi,
>> +static int gc_node_segment(struct f2fs_sb_info *sbi,
>>  struct f2fs_summary *sum, unsigned int segno, int gc_type)
>>  {
>>  struct f2fs_summary *entry;
>> @@ -469,6 +469,7 @@ static void gc_node_segment(struct f2fs_sb_info *sbi,
>>  int off;
>>  int phase = 0;
>>  bool fggc = (gc_type == FG_GC);
>> +int submitted = 0;
>>  
>>  start_addr = START_BLOCK(sbi, segno);
>>  
>> @@ -482,10 +483,11 @@ static void gc_node_segment(struct f2fs_sb_info *sbi,
>>  nid_t nid = le32_to_cpu(entry->nid);
>>  struct page *node_page;
>>  struct node_info ni;
>> +int err;
>>  
>>  /* stop BG_GC if there is not enough free sections. */
>>  if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
>> -return;
>> +return submitted;
>>  
>>  if (check_valid_map(sbi, segno, off) == 0)
>>  continue;
>> @@ -522,7 +524,9 @@ static void gc_node_segment(struct f2fs_sb_info *sbi,
>>  continue;
>>  }
>>  
>> -f2fs_move_node_page(node_page, gc_type);
>> +err = f2fs_move_node_page(node_page, gc_type);
>> +if (!err && gc_type == FG_GC)
>> +submitted++;
>>  stat_inc_node_blk_count(sbi, 1, gc_type);
>>  }
>>  
>> @@ -531,6 +535,7 @@ static void gc_node_segment(struct f2fs_sb_info *sbi,
>>  
>>  if (fggc)
>>  atomic_dec(>wb_sync_req[NODE]);
>> +return submitted;
>>  }
>>  
>>  /*
>> @@ -666,7 +671,7 @@ static int ra_data_block(struct inode *inode, pgoff_t 
>> index)
>>   * Move data block via META_MAPPING while keeping locked data page.
>>   * This can be used to move blocks, aka LBAs, directly on disk.
>>   */
>> -static void move_data_block(struct inode *inode, block_t bidx,
>> +static int move_data_block(struct inode *inode, block_t bidx,
>>  int gc_type, unsigned int segno, int off)
> 
> We don't need to submit IOs in this case.

Actually, previously, we missed to submit IOs for encrypted block only in
BGGC, so we fix to submit for this case, all other codes are cleanups. Right?

Thanks,

> 
>>  {
>>  struct f2fs_io_info fio = {
>> @@ -685,25 +690,29 @@ static void move_data_block(struct inode *inode, 
>> block_t bidx,
>>  struct node_info ni;
>>  struct page *page, *mpage;
>>  block_t newaddr;
>> -int err;
>> +int err = 0;
>>  bool lfs_mode = test_opt(fio.sbi, LFS);
>>  
>>  /* do not read out */
>>  page = f2fs_grab_cache_page(inode->i_mapping, bidx, false);
>>  if (!page)
>> -return;
>> +return -ENOMEM;
>>  
>> -if (!check_valid_map(F2FS_I_SB(inode), segno, off))
>> +if (!check_valid_map(F2FS_I_SB(inode), segno, off)) {
>> +err = -ENOENT;
>>  goto out;
>> +}
>>  
>>  if (f2fs_is_atomic_file(inode)) {
>>  F2FS_I(inode)->i_gc_failures[GC_FAILURE_ATOMIC]++;
>>  F2FS_I_SB(inode)->skipped_atomic_files[gc_type]++;
>> +err = -EAGAIN;
>>  goto out;
>>  }
>>  
>>  if (f2fs_is_pinned_file(inode)) {
>>  f2fs_pin_file_control(inode, 

Re: [f2fs-dev] [PATCH v3] f2fs: submit cached bio to avoid endless PageWriteback

2018-09-17 Thread Jaegeuk Kim
On 09/13, Chao Yu wrote:
> From: Chao Yu 
> 
> When migrating encrypted block from background GC thread, we only add
> them into f2fs inner bio cache, but forget to submit the cached bio, it
> may cause potential deadlock when we are waiting page writebacked, fix
> it.
> 
> Signed-off-by: Chao Yu 
> ---
> v3:
> clean up codes suggested by Jaegeuk.
>  fs/f2fs/f2fs.h |  2 +-
>  fs/f2fs/gc.c   | 71 +++---
>  fs/f2fs/node.c | 13 ++---
>  3 files changed, 61 insertions(+), 25 deletions(-)
> 
> diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
> index b676b82312e0..917b2ca76aac 100644
> --- a/fs/f2fs/f2fs.h
> +++ b/fs/f2fs/f2fs.h
> @@ -2869,7 +2869,7 @@ struct page *f2fs_new_node_page(struct dnode_of_data 
> *dn, unsigned int ofs);
>  void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid);
>  struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid);
>  struct page *f2fs_get_node_page_ra(struct page *parent, int start);
> -void f2fs_move_node_page(struct page *node_page, int gc_type);
> +int f2fs_move_node_page(struct page *node_page, int gc_type);
>  int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
>   struct writeback_control *wbc, bool atomic,
>   unsigned int *seq_id);
> diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
> index a4c1a419611d..f57622cfe058 100644
> --- a/fs/f2fs/gc.c
> +++ b/fs/f2fs/gc.c
> @@ -461,7 +461,7 @@ static int check_valid_map(struct f2fs_sb_info *sbi,
>   * On validity, copy that node with cold status, otherwise (invalid node)
>   * ignore that.
>   */
> -static void gc_node_segment(struct f2fs_sb_info *sbi,
> +static int gc_node_segment(struct f2fs_sb_info *sbi,
>   struct f2fs_summary *sum, unsigned int segno, int gc_type)
>  {
>   struct f2fs_summary *entry;
> @@ -469,6 +469,7 @@ static void gc_node_segment(struct f2fs_sb_info *sbi,
>   int off;
>   int phase = 0;
>   bool fggc = (gc_type == FG_GC);
> + int submitted = 0;
>  
>   start_addr = START_BLOCK(sbi, segno);
>  
> @@ -482,10 +483,11 @@ static void gc_node_segment(struct f2fs_sb_info *sbi,
>   nid_t nid = le32_to_cpu(entry->nid);
>   struct page *node_page;
>   struct node_info ni;
> + int err;
>  
>   /* stop BG_GC if there is not enough free sections. */
>   if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
> - return;
> + return submitted;
>  
>   if (check_valid_map(sbi, segno, off) == 0)
>   continue;
> @@ -522,7 +524,9 @@ static void gc_node_segment(struct f2fs_sb_info *sbi,
>   continue;
>   }
>  
> - f2fs_move_node_page(node_page, gc_type);
> + err = f2fs_move_node_page(node_page, gc_type);
> + if (!err && gc_type == FG_GC)
> + submitted++;
>   stat_inc_node_blk_count(sbi, 1, gc_type);
>   }
>  
> @@ -531,6 +535,7 @@ static void gc_node_segment(struct f2fs_sb_info *sbi,
>  
>   if (fggc)
>   atomic_dec(>wb_sync_req[NODE]);
> + return submitted;
>  }
>  
>  /*
> @@ -666,7 +671,7 @@ static int ra_data_block(struct inode *inode, pgoff_t 
> index)
>   * Move data block via META_MAPPING while keeping locked data page.
>   * This can be used to move blocks, aka LBAs, directly on disk.
>   */
> -static void move_data_block(struct inode *inode, block_t bidx,
> +static int move_data_block(struct inode *inode, block_t bidx,
>   int gc_type, unsigned int segno, int off)

We don't need to submit IOs in this case.

>  {
>   struct f2fs_io_info fio = {
> @@ -685,25 +690,29 @@ static void move_data_block(struct inode *inode, 
> block_t bidx,
>   struct node_info ni;
>   struct page *page, *mpage;
>   block_t newaddr;
> - int err;
> + int err = 0;
>   bool lfs_mode = test_opt(fio.sbi, LFS);
>  
>   /* do not read out */
>   page = f2fs_grab_cache_page(inode->i_mapping, bidx, false);
>   if (!page)
> - return;
> + return -ENOMEM;
>  
> - if (!check_valid_map(F2FS_I_SB(inode), segno, off))
> + if (!check_valid_map(F2FS_I_SB(inode), segno, off)) {
> + err = -ENOENT;
>   goto out;
> + }
>  
>   if (f2fs_is_atomic_file(inode)) {
>   F2FS_I(inode)->i_gc_failures[GC_FAILURE_ATOMIC]++;
>   F2FS_I_SB(inode)->skipped_atomic_files[gc_type]++;
> + err = -EAGAIN;
>   goto out;
>   }
>  
>   if (f2fs_is_pinned_file(inode)) {
>   f2fs_pin_file_control(inode, true);
> + err = -EAGAIN;
>   goto out;
>   }
>  
> @@ -714,6 +723,7 @@ static void move_data_block(struct inode *inode, block_t 
> bidx,
>  
>   if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
>   ClearPageUptodate(page);
> + 

[f2fs-dev] [PATCH v3] f2fs: submit cached bio to avoid endless PageWriteback

2018-09-12 Thread Chao Yu
From: Chao Yu 

When migrating encrypted block from background GC thread, we only add
them into f2fs inner bio cache, but forget to submit the cached bio, it
may cause potential deadlock when we are waiting page writebacked, fix
it.

Signed-off-by: Chao Yu 
---
v3:
clean up codes suggested by Jaegeuk.
 fs/f2fs/f2fs.h |  2 +-
 fs/f2fs/gc.c   | 71 +++---
 fs/f2fs/node.c | 13 ++---
 3 files changed, 61 insertions(+), 25 deletions(-)

diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index b676b82312e0..917b2ca76aac 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -2869,7 +2869,7 @@ struct page *f2fs_new_node_page(struct dnode_of_data *dn, 
unsigned int ofs);
 void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid);
 struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid);
 struct page *f2fs_get_node_page_ra(struct page *parent, int start);
-void f2fs_move_node_page(struct page *node_page, int gc_type);
+int f2fs_move_node_page(struct page *node_page, int gc_type);
 int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
struct writeback_control *wbc, bool atomic,
unsigned int *seq_id);
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index a4c1a419611d..f57622cfe058 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -461,7 +461,7 @@ static int check_valid_map(struct f2fs_sb_info *sbi,
  * On validity, copy that node with cold status, otherwise (invalid node)
  * ignore that.
  */
-static void gc_node_segment(struct f2fs_sb_info *sbi,
+static int gc_node_segment(struct f2fs_sb_info *sbi,
struct f2fs_summary *sum, unsigned int segno, int gc_type)
 {
struct f2fs_summary *entry;
@@ -469,6 +469,7 @@ static void gc_node_segment(struct f2fs_sb_info *sbi,
int off;
int phase = 0;
bool fggc = (gc_type == FG_GC);
+   int submitted = 0;
 
start_addr = START_BLOCK(sbi, segno);
 
@@ -482,10 +483,11 @@ static void gc_node_segment(struct f2fs_sb_info *sbi,
nid_t nid = le32_to_cpu(entry->nid);
struct page *node_page;
struct node_info ni;
+   int err;
 
/* stop BG_GC if there is not enough free sections. */
if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
-   return;
+   return submitted;
 
if (check_valid_map(sbi, segno, off) == 0)
continue;
@@ -522,7 +524,9 @@ static void gc_node_segment(struct f2fs_sb_info *sbi,
continue;
}
 
-   f2fs_move_node_page(node_page, gc_type);
+   err = f2fs_move_node_page(node_page, gc_type);
+   if (!err && gc_type == FG_GC)
+   submitted++;
stat_inc_node_blk_count(sbi, 1, gc_type);
}
 
@@ -531,6 +535,7 @@ static void gc_node_segment(struct f2fs_sb_info *sbi,
 
if (fggc)
atomic_dec(>wb_sync_req[NODE]);
+   return submitted;
 }
 
 /*
@@ -666,7 +671,7 @@ static int ra_data_block(struct inode *inode, pgoff_t index)
  * Move data block via META_MAPPING while keeping locked data page.
  * This can be used to move blocks, aka LBAs, directly on disk.
  */
-static void move_data_block(struct inode *inode, block_t bidx,
+static int move_data_block(struct inode *inode, block_t bidx,
int gc_type, unsigned int segno, int off)
 {
struct f2fs_io_info fio = {
@@ -685,25 +690,29 @@ static void move_data_block(struct inode *inode, block_t 
bidx,
struct node_info ni;
struct page *page, *mpage;
block_t newaddr;
-   int err;
+   int err = 0;
bool lfs_mode = test_opt(fio.sbi, LFS);
 
/* do not read out */
page = f2fs_grab_cache_page(inode->i_mapping, bidx, false);
if (!page)
-   return;
+   return -ENOMEM;
 
-   if (!check_valid_map(F2FS_I_SB(inode), segno, off))
+   if (!check_valid_map(F2FS_I_SB(inode), segno, off)) {
+   err = -ENOENT;
goto out;
+   }
 
if (f2fs_is_atomic_file(inode)) {
F2FS_I(inode)->i_gc_failures[GC_FAILURE_ATOMIC]++;
F2FS_I_SB(inode)->skipped_atomic_files[gc_type]++;
+   err = -EAGAIN;
goto out;
}
 
if (f2fs_is_pinned_file(inode)) {
f2fs_pin_file_control(inode, true);
+   err = -EAGAIN;
goto out;
}
 
@@ -714,6 +723,7 @@ static void move_data_block(struct inode *inode, block_t 
bidx,
 
if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
ClearPageUptodate(page);
+   err = -ENOENT;
goto put_out;
}
 
@@ -796,6 +806,7 @@ static void move_data_block(struct inode *inode, block_t 
bidx,
fio.new_blkaddr = newaddr;