RE: [f2fs-dev] [PATCH 4/5] f2fs: optimize restore_node_summary slightly

2014-03-09 Thread Chao Yu
Hi,

> -Original Message-
> From: Jaegeuk Kim [mailto:jaegeuk@samsung.com]
> Sent: Monday, March 10, 2014 1:38 PM
> To: Chao Yu
> Cc: 'Gu Zheng'; 'linux-kernel'; 'f2fs'
> Subject: RE: [f2fs-dev] [PATCH 4/5] f2fs: optimize restore_node_summary 
> slightly
> 
> Hi,
> 
> 2014-03-10 (월), 13:13 +0800, Chao Yu:
> > Hi Gu, Kim:
> >
> > One more comment.
> >
> > > -Original Message-
> > > From: Jaegeuk Kim [mailto:jaegeuk@samsung.com]
> > > Sent: Monday, March 10, 2014 12:46 PM
> > > To: Gu Zheng
> > > Cc: linux-kernel; f2fs
> > > Subject: Re: [f2fs-dev] [PATCH 4/5] f2fs: optimize restore_node_summary 
> > > slightly
> > >
> > > Hi Gu,
> > >
> > > 2014-03-07 (금), 18:43 +0800, Gu Zheng:
> > > > Previously, we ra_sum_pages to pre-read contiguous pages as more
> > > > as possible, and if we fail to alloc more pages, an ENOMEM error
> > > > will be reported upstream, even though we have alloced some pages
> > > > yet. In fact, we can use the available pages to do the job partly,
> > > > and continue the rest in the following circle. Only reporting ENOMEM
> > > > upstream if we really can not alloc any available page.
> > > >
> > > > And another fix is ignoring dealing with the following pages if an
> > > > EIO occurs when reading page from page_list.
> > > >
> > > > Signed-off-by: Gu Zheng 
> >
> > Reviewed-by: Chao Yu 
> >
> > > > ---
> > > >  fs/f2fs/node.c|   44 
> > > >  fs/f2fs/segment.c |7 +--
> > > >  2 files changed, 25 insertions(+), 26 deletions(-)
> > > >
> > > > diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
> > > > index 8787469..4b7861d 100644
> > > > --- a/fs/f2fs/node.c
> > > > +++ b/fs/f2fs/node.c
> > > > @@ -1588,15 +1588,8 @@ static int ra_sum_pages(struct f2fs_sb_info 
> > > > *sbi, struct list_head
> > > *pages,
> > > > for (; page_idx < start + nrpages; page_idx++) {
> > > > /* alloc temporal page for read node summary info*/
> > > > page = alloc_page(GFP_F2FS_ZERO);
> > > > -   if (!page) {
> > > > -   struct page *tmp;
> > > > -   list_for_each_entry_safe(page, tmp, pages, lru) 
> > > > {
> > > > -   list_del(&page->lru);
> > > > -   unlock_page(page);
> > > > -   __free_pages(page, 0);
> > > > -   }
> > > > -   return -ENOMEM;
> > > > -   }
> > > > +   if (!page)
> > > > +   break;
> > > >
> > > > lock_page(page);
> > > > page->index = page_idx;
> > > > @@ -1607,7 +1600,8 @@ static int ra_sum_pages(struct f2fs_sb_info *sbi, 
> > > > struct list_head
> > > *pages,
> > > > f2fs_submit_page_mbio(sbi, page, page->index, &fio);
> > > >
> > > > f2fs_submit_merged_bio(sbi, META, READ);
> > > > -   return 0;
> > > > +
> > > > +   return page_idx - start;
> > > >  }
> > > >
> > > >  int restore_node_summary(struct f2fs_sb_info *sbi,
> > > > @@ -1630,28 +1624,30 @@ int restore_node_summary(struct f2fs_sb_info 
> > > > *sbi,
> > > > nrpages = min(last_offset - i, bio_blocks);
> > > >
> > > > /* read ahead node pages */
> > > > -   err = ra_sum_pages(sbi, &page_list, addr, nrpages);
> > > > -   if (err)
> > > > -   return err;
> > > > +   nrpages = ra_sum_pages(sbi, &page_list, addr, nrpages);
> > > > +   if (!nrpages)
> > > > +   return -ENOMEM;
> > > >
> > > > list_for_each_entry_safe(page, tmp, &page_list, lru) {
> > > > -
> > >
> > > Here we can just add:
> > >   if (err)
> > >   goto skip;
> > >   lock_page();
> > >   ...
> > 

RE: [f2fs-dev] [PATCH 4/5] f2fs: optimize restore_node_summary slightly

2014-03-09 Thread Jaegeuk Kim
Hi,

2014-03-10 (월), 13:13 +0800, Chao Yu:
> Hi Gu, Kim:
> 
> One more comment.
> 
> > -Original Message-
> > From: Jaegeuk Kim [mailto:jaegeuk@samsung.com]
> > Sent: Monday, March 10, 2014 12:46 PM
> > To: Gu Zheng
> > Cc: linux-kernel; f2fs
> > Subject: Re: [f2fs-dev] [PATCH 4/5] f2fs: optimize restore_node_summary 
> > slightly
> > 
> > Hi Gu,
> > 
> > 2014-03-07 (금), 18:43 +0800, Gu Zheng:
> > > Previously, we ra_sum_pages to pre-read contiguous pages as more
> > > as possible, and if we fail to alloc more pages, an ENOMEM error
> > > will be reported upstream, even though we have alloced some pages
> > > yet. In fact, we can use the available pages to do the job partly,
> > > and continue the rest in the following circle. Only reporting ENOMEM
> > > upstream if we really can not alloc any available page.
> > >
> > > And another fix is ignoring dealing with the following pages if an
> > > EIO occurs when reading page from page_list.
> > >
> > > Signed-off-by: Gu Zheng 
> 
> Reviewed-by: Chao Yu 
> 
> > > ---
> > >  fs/f2fs/node.c|   44 
> > >  fs/f2fs/segment.c |7 +--
> > >  2 files changed, 25 insertions(+), 26 deletions(-)
> > >
> > > diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
> > > index 8787469..4b7861d 100644
> > > --- a/fs/f2fs/node.c
> > > +++ b/fs/f2fs/node.c
> > > @@ -1588,15 +1588,8 @@ static int ra_sum_pages(struct f2fs_sb_info *sbi, 
> > > struct list_head
> > *pages,
> > >   for (; page_idx < start + nrpages; page_idx++) {
> > >   /* alloc temporal page for read node summary info*/
> > >   page = alloc_page(GFP_F2FS_ZERO);
> > > - if (!page) {
> > > - struct page *tmp;
> > > - list_for_each_entry_safe(page, tmp, pages, lru) {
> > > - list_del(&page->lru);
> > > - unlock_page(page);
> > > - __free_pages(page, 0);
> > > - }
> > > - return -ENOMEM;
> > > - }
> > > + if (!page)
> > > + break;
> > >
> > >   lock_page(page);
> > >   page->index = page_idx;
> > > @@ -1607,7 +1600,8 @@ static int ra_sum_pages(struct f2fs_sb_info *sbi, 
> > > struct list_head
> > *pages,
> > >   f2fs_submit_page_mbio(sbi, page, page->index, &fio);
> > >
> > >   f2fs_submit_merged_bio(sbi, META, READ);
> > > - return 0;
> > > +
> > > + return page_idx - start;
> > >  }
> > >
> > >  int restore_node_summary(struct f2fs_sb_info *sbi,
> > > @@ -1630,28 +1624,30 @@ int restore_node_summary(struct f2fs_sb_info *sbi,
> > >   nrpages = min(last_offset - i, bio_blocks);
> > >
> > >   /* read ahead node pages */
> > > - err = ra_sum_pages(sbi, &page_list, addr, nrpages);
> > > - if (err)
> > > - return err;
> > > + nrpages = ra_sum_pages(sbi, &page_list, addr, nrpages);
> > > + if (!nrpages)
> > > + return -ENOMEM;
> > >
> > >   list_for_each_entry_safe(page, tmp, &page_list, lru) {
> > > -
> > 
> > Here we can just add:
> > if (err)
> > goto skip;
> > lock_page();
> > ...
> > unlock_page();
> > skip:
> > list_del();
> > __free_pages();
> > 
> > IMO, it's more neat, so if you have any objection, let me know.
> > Otherwise, I'll handle this by myself. :)
> > Thanks,
> > 
> > > - lock_page(page);
> > > - if (unlikely(!PageUptodate(page))) {
> > > - err = -EIO;
> > > - } else {
> > > - rn = F2FS_NODE(page);
> > > - sum_entry->nid = rn->footer.nid;
> > > - sum_entry->version = 0;
> > > - sum_entry->ofs_in_node = 0;
> > > - sum_entry++;
> > > + if (!err) {
> 
> If we skip here, next round we will

RE: [f2fs-dev] [PATCH 4/5] f2fs: optimize restore_node_summary slightly

2014-03-09 Thread Chao Yu
Hi Gu, Kim:

One more comment.

> -Original Message-
> From: Jaegeuk Kim [mailto:jaegeuk@samsung.com]
> Sent: Monday, March 10, 2014 12:46 PM
> To: Gu Zheng
> Cc: linux-kernel; f2fs
> Subject: Re: [f2fs-dev] [PATCH 4/5] f2fs: optimize restore_node_summary 
> slightly
> 
> Hi Gu,
> 
> 2014-03-07 (금), 18:43 +0800, Gu Zheng:
> > Previously, we ra_sum_pages to pre-read contiguous pages as more
> > as possible, and if we fail to alloc more pages, an ENOMEM error
> > will be reported upstream, even though we have alloced some pages
> > yet. In fact, we can use the available pages to do the job partly,
> > and continue the rest in the following circle. Only reporting ENOMEM
> > upstream if we really can not alloc any available page.
> >
> > And another fix is ignoring dealing with the following pages if an
> > EIO occurs when reading page from page_list.
> >
> > Signed-off-by: Gu Zheng 

Reviewed-by: Chao Yu 

> > ---
> >  fs/f2fs/node.c|   44 
> >  fs/f2fs/segment.c |7 +--
> >  2 files changed, 25 insertions(+), 26 deletions(-)
> >
> > diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
> > index 8787469..4b7861d 100644
> > --- a/fs/f2fs/node.c
> > +++ b/fs/f2fs/node.c
> > @@ -1588,15 +1588,8 @@ static int ra_sum_pages(struct f2fs_sb_info *sbi, 
> > struct list_head
> *pages,
> > for (; page_idx < start + nrpages; page_idx++) {
> > /* alloc temporal page for read node summary info*/
> > page = alloc_page(GFP_F2FS_ZERO);
> > -   if (!page) {
> > -   struct page *tmp;
> > -   list_for_each_entry_safe(page, tmp, pages, lru) {
> > -   list_del(&page->lru);
> > -   unlock_page(page);
> > -   __free_pages(page, 0);
> > -   }
> > -   return -ENOMEM;
> > -   }
> > +   if (!page)
> > +   break;
> >
> > lock_page(page);
> > page->index = page_idx;
> > @@ -1607,7 +1600,8 @@ static int ra_sum_pages(struct f2fs_sb_info *sbi, 
> > struct list_head
> *pages,
> > f2fs_submit_page_mbio(sbi, page, page->index, &fio);
> >
> > f2fs_submit_merged_bio(sbi, META, READ);
> > -   return 0;
> > +
> > +   return page_idx - start;
> >  }
> >
> >  int restore_node_summary(struct f2fs_sb_info *sbi,
> > @@ -1630,28 +1624,30 @@ int restore_node_summary(struct f2fs_sb_info *sbi,
> > nrpages = min(last_offset - i, bio_blocks);
> >
> > /* read ahead node pages */
> > -   err = ra_sum_pages(sbi, &page_list, addr, nrpages);
> > -   if (err)
> > -   return err;
> > +   nrpages = ra_sum_pages(sbi, &page_list, addr, nrpages);
> > +   if (!nrpages)
> > +   return -ENOMEM;
> >
> > list_for_each_entry_safe(page, tmp, &page_list, lru) {
> > -
> 
> Here we can just add:
>   if (err)
>   goto skip;
>   lock_page();
>   ...
>   unlock_page();
>   skip:
>   list_del();
>   __free_pages();
> 
> IMO, it's more neat, so if you have any objection, let me know.
> Otherwise, I'll handle this by myself. :)
> Thanks,
> 
> > -   lock_page(page);
> > -   if (unlikely(!PageUptodate(page))) {
> > -   err = -EIO;
> > -   } else {
> > -   rn = F2FS_NODE(page);
> > -   sum_entry->nid = rn->footer.nid;
> > -   sum_entry->version = 0;
> > -   sum_entry->ofs_in_node = 0;
> > -   sum_entry++;
> > +   if (!err) {

If we skip here, next round we will fill these summary page entries with
wrong info because we skip the code 'sum_entry++;'.

> > +   lock_page(page);
> > +   if (unlikely(!PageUptodate(page))) {
> > +   err = -EIO;
> > +   } else {
> > +   rn = F2FS_NODE(page);
> > +   sum_entry->nid = rn->footer.nid;
> > +