Re: [f2fs-dev] [PATCH v3] dump.f2fs: add -I nid to dump inode by scan full disk

2022-06-14 Thread Yufen Yu via Linux-f2fs-devel

gentry ping...

On 2022/6/7 11:40, Yufen Yu wrote:

Usage:
dump.f2fs -I [inode nid] /dev/sda

This feature can be useful for some bugs caused by system crash.
We not only need dump current valid node page, but alse the history
data in disk, which can give some clues for status change of the inode.

Signed-off-by: Yufen Yu 
---
  fsck/dump.c | 33 +
  fsck/fsck.h |  1 +
  fsck/main.c | 14 +-
  man/dump.f2fs.8 |  7 +++
  4 files changed, 54 insertions(+), 1 deletion(-)

diff --git a/fsck/dump.c b/fsck/dump.c
index fce86c9..b8f6144 100644
--- a/fsck/dump.c
+++ b/fsck/dump.c
@@ -539,6 +539,39 @@ static bool is_sit_bitmap_set(struct f2fs_sb_info *sbi, 
u32 blk_addr)
(const char *)se->cur_valid_map) != 0;
  }
  
+void dump_node_scan_disk(struct f2fs_sb_info *sbi, nid_t nid)

+{
+   struct f2fs_node *node_blk;
+   pgoff_t blkaddr;
+   int ret;
+   pgoff_t start_blkaddr = SM_I(sbi)->main_blkaddr;
+   pgoff_t end_blkaddr = start_blkaddr +
+   (SM_I(sbi)->main_segments << sbi->log_blocks_per_seg);
+
+   node_blk = calloc(BLOCK_SZ, 1);
+   ASSERT(node_blk);
+   MSG(0, "Info: scan all nid: %u from block_addr [%lu: %lu]\n",
+   nid, start_blkaddr, end_blkaddr);
+
+   for (blkaddr = start_blkaddr; blkaddr < end_blkaddr; blkaddr++) {
+   struct seg_entry *se = get_seg_entry(sbi, GET_SEGNO(sbi, 
blkaddr));
+   if (se->type < CURSEG_HOT_NODE)
+   continue;
+
+   ret = dev_read_block(node_blk, blkaddr);
+   ASSERT(ret >= 0);
+   if (le32_to_cpu(node_blk->footer.ino) != nid ||
+   le32_to_cpu(node_blk->footer.nid) != nid)
+   continue;
+   MSG(0, "Info: nid: %u, blkaddr: %lu\n", nid, blkaddr);
+   MSG(0, "node_blk.footer.flag [0x%x]\n", 
le32_to_cpu(node_blk->footer.flag));
+   MSG(0, "node_blk.footer.cp_ver [%x]\n", 
(u32)(cpver_of_node(node_blk)));
+   print_inode_info(sbi, node_blk, 0);
+   }
+
+   free(node_blk);
+}
+
  int dump_node(struct f2fs_sb_info *sbi, nid_t nid, int force)
  {
struct node_info ni;
diff --git a/fsck/fsck.h b/fsck/fsck.h
index ce5fffe..0c819f0 100644
--- a/fsck/fsck.h
+++ b/fsck/fsck.h
@@ -262,6 +262,7 @@ struct dump_option {
int start_ssa;
int end_ssa;
int32_t blk_addr;
+   nid_t scan_nid;
  };
  
  extern void nat_dump(struct f2fs_sb_info *, nid_t, nid_t);

diff --git a/fsck/main.c b/fsck/main.c
index e4cfdf4..c7ad5ad 100644
--- a/fsck/main.c
+++ b/fsck/main.c
@@ -93,6 +93,7 @@ void dump_usage()
MSG(0, "[options]:\n");
MSG(0, "  -d debug level [default:0]\n");
MSG(0, "  -i inode no (hex)\n");
+   MSG(0, "  -I inode no (hex) scan full disk\n");
MSG(0, "  -n [NAT dump nid from #1~#2 (decimal), for all 0~-1]\n");
MSG(0, "  -M show a block map\n");
MSG(0, "  -s [SIT dump segno from #1~#2 (decimal), for all 0~-1]\n");
@@ -382,7 +383,7 @@ void f2fs_parse_options(int argc, char *argv[])
}
} else if (!strcmp("dump.f2fs", prog)) {
  #ifdef WITH_DUMP
-   const char *option_string = "d:i:n:Ms:Sa:b:V";
+   const char *option_string = "d:i:I:n:Ms:Sa:b:V";
static struct dump_option dump_opt = {
.nid = 0,   /* default root ino */
.start_nat = -1,
@@ -392,6 +393,7 @@ void f2fs_parse_options(int argc, char *argv[])
.start_ssa = -1,
.end_ssa = -1,
.blk_addr = -1,
+   .scan_nid = 0,
};
  
  		c.func = DUMP;

@@ -424,6 +426,14 @@ void f2fs_parse_options(int argc, char *argv[])
ret = sscanf(optarg, "%x",
_opt.nid);
break;
+   case 'I':
+   if (strncmp(optarg, "0x", 2))
+   ret = sscanf(optarg, "%d",
+   _opt.scan_nid);
+   else
+   ret = sscanf(optarg, "%x",
+   _opt.scan_nid);
+   break;
case 'n':
ret = sscanf(optarg, "%d~%d",
_opt.start_nat,
@@ -916,6 +926,8 @@ static void do_dump(struct f2fs_sb_info *sbi)
dump_info_from_blkaddr(sbi, opt->blk_addr);
if (opt->nid)
dump_node(sbi, opt->nid, 0);
+   if (opt->scan_nid)
+   dump_node_scan_disk(sbi, opt->scan_nid);
  
  	print_cp_state(flag);
  
diff --git 

[f2fs-dev] [PATCH 1/3] Fix the struct f2fs_dentry_block size check

2022-06-14 Thread Bart Van Assche
Fix the f2fs-tools build on systems for which PAGE_SIZE != 4096.

Cc: Peter Collingbourne 
Reported-by: Peter Collingbourne 
Signed-off-by: Bart Van Assche 
---
 include/f2fs_fs.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/include/f2fs_fs.h b/include/f2fs_fs.h
index 21a7e70d952d..3b1bd6eb7dc7 100644
--- a/include/f2fs_fs.h
+++ b/include/f2fs_fs.h
@@ -1341,7 +1341,7 @@ struct f2fs_dentry_block {
__u8 filename[NR_DENTRY_IN_BLOCK][F2FS_SLOT_LEN];
 };
 
-static_assert(sizeof(struct f2fs_dentry_block) == 4096, "");
+static_assert(sizeof(struct f2fs_dentry_block) == PAGE_SIZE, "");
 
 /* for inline stuff */
 #define DEF_INLINE_RESERVED_SIZE   1


___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


[f2fs-dev] [PATCH 0/3] Three f2fs patches

2022-06-14 Thread Bart Van Assche
Hi Jaegeuk,

This patch series fixes one issue reported by Peter Collingbourne and two
issues I discovered by reading the zoned block device source code. Please
consider these patches for inclusion in the official f2fs-tools repository.

Thanks,

Bart.

Bart Van Assche (3):
  Fix the struct f2fs_dentry_block size check
  Fix f2fs_report_zone()
  Improve compile-time type checking for f2fs_report_zone()

 include/f2fs_fs.h   |  6 --
 lib/libf2fs_zoned.c | 21 +
 2 files changed, 17 insertions(+), 10 deletions(-)



___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


[f2fs-dev] [PATCH 2/3] Fix f2fs_report_zone()

2022-06-14 Thread Bart Van Assche
The definition of struct blk_zone_report is as follows:

struct blk_zone_report {
__u64   sector;
__u32   nr_zones;
__u32   flags;
struct blk_zone zones[0];
};

Since f2fs_report_zone() allocates the above data structure with
malloc() and since f2fs_report_zone() only initializes the sector and
nr_zones members, the flags member is not initialized. Modify
f2fs_report_zone() such that 0 is passed as flags to the
BLKREPORTZONE ioctl instead of a random value. This has been
discovered by reading the source code.

Cc: Shin'ichiro Kawasaki 
Fixes: 6d7c7b785feb ("libf2fs_zoned: Introduce f2fs_report_zone() helper 
function")
Signed-off-by: Bart Van Assche 
---
 lib/libf2fs_zoned.c | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/lib/libf2fs_zoned.c b/lib/libf2fs_zoned.c
index f383ce275342..d8de66b82029 100644
--- a/lib/libf2fs_zoned.c
+++ b/lib/libf2fs_zoned.c
@@ -206,7 +206,8 @@ int f2fs_report_zone(int i, uint64_t sector, void *blkzone)
struct blk_zone_report *rep;
int ret = -1;
 
-   rep = malloc(sizeof(struct blk_zone_report) + sizeof(struct blk_zone));
+   rep = calloc(1, sizeof(struct blk_zone_report) +
+sizeof(struct blk_zone));
if (!rep) {
ERR_MSG("No memory for report zones\n");
return -ENOMEM;


___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


[f2fs-dev] [PATCH 3/3] Improve compile-time type checking for f2fs_report_zone()

2022-06-14 Thread Bart Van Assche
Change the type of the third argument of f2fs_report_zone() from void *
into struct blk_zone * to enable type checking.

Signed-off-by: Bart Van Assche 
---
 include/f2fs_fs.h   |  4 +++-
 lib/libf2fs_zoned.c | 22 +-
 2 files changed, 16 insertions(+), 10 deletions(-)

diff --git a/include/f2fs_fs.h b/include/f2fs_fs.h
index 3b1bd6eb7dc7..2dbb56967676 100644
--- a/include/f2fs_fs.h
+++ b/include/f2fs_fs.h
@@ -1560,9 +1560,11 @@ blk_zone_cond_str(struct blk_zone *blkz)
 
 #endif
 
+struct blk_zone;
+
 extern int f2fs_get_zoned_model(int);
 extern int f2fs_get_zone_blocks(int);
-extern int f2fs_report_zone(int, uint64_t, void *);
+extern int f2fs_report_zone(int, uint64_t, struct blk_zone *);
 typedef int (report_zones_cb_t)(int i, void *, void *);
 extern int f2fs_report_zones(int, report_zones_cb_t *, void *);
 extern int f2fs_check_zones(int);
diff --git a/lib/libf2fs_zoned.c b/lib/libf2fs_zoned.c
index d8de66b82029..f0df15220d56 100644
--- a/lib/libf2fs_zoned.c
+++ b/lib/libf2fs_zoned.c
@@ -200,21 +200,24 @@ int f2fs_get_zone_blocks(int i)
return 0;
 }
 
-int f2fs_report_zone(int i, uint64_t sector, void *blkzone)
+int f2fs_report_zone(int i, uint64_t sector, struct blk_zone *blkzone)
 {
-   struct blk_zone *blkz = (struct blk_zone *)blkzone;
-   struct blk_zone_report *rep;
+   struct one_zone_report {
+   struct blk_zone_report  rep;
+   struct blk_zone zone;
+   } *rep;
int ret = -1;
 
-   rep = calloc(1, sizeof(struct blk_zone_report) +
-sizeof(struct blk_zone));
+   rep = calloc(1, sizeof(*rep));
if (!rep) {
ERR_MSG("No memory for report zones\n");
return -ENOMEM;
}
 
-   rep->sector = sector;
-   rep->nr_zones = 1;
+   rep->rep = (struct blk_zone_report){
+   .sector = sector,
+   .nr_zones = 1,
+   };
ret = ioctl(c.devices[i].fd, BLKREPORTZONE, rep);
if (ret != 0) {
ret = -errno;
@@ -222,7 +225,7 @@ int f2fs_report_zone(int i, uint64_t sector, void *blkzone)
goto out;
}
 
-   *blkz = *(struct blk_zone *)(rep + 1);
+   *blkzone = rep->zone;
 out:
free(rep);
return ret;
@@ -531,7 +534,8 @@ uint32_t f2fs_get_usable_segments(struct f2fs_super_block 
*sb)
 
 #else
 
-int f2fs_report_zone(int i, uint64_t UNUSED(sector), void *UNUSED(blkzone))
+int f2fs_report_zone(int i, uint64_t UNUSED(sector),
+struct blk_zone *UNUSED(blkzone))
 {
ERR_MSG("%d: Unsupported zoned block device\n", i);
return -1;


___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


[f2fs-dev] [linux-next:master] BUILD REGRESSION 35d872b9ea5b3ad784d7479ea728dcb688df2db7

2022-06-14 Thread kernel test robot
tree/branch: 
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git master
branch HEAD: 35d872b9ea5b3ad784d7479ea728dcb688df2db7  Add linux-next specific 
files for 20220614

Error/Warning: (recently discovered and may have been fixed)

drivers/gpu/drm/amd/amdgpu/../display/include/ddc_service_types.h:130:17: 
warning: 'DP_SINK_BRANCH_DEV_NAME_7580' defined but not used 
[-Wunused-const-variable=]

Unverified Error/Warning (likely false positive, please contact us if 
interested):

drivers/gpu/drm/amd/amdgpu/../display/amdgpu_dm/amdgpu_dm.c:9139:27: warning: 
variable 'abo' set but not used [-Wunused-but-set-variable]
drivers/staging/vme_user/vme.c:662:20: warning: dereference of NULL 'bridge' 
[CWE-476] [-Wanalyzer-null-dereference]
fs/f2fs/data.c:136:33: error: too many arguments to function 
'f2fs_end_read_compressed_page'
fs/f2fs/data.c:138:25: error: too many arguments to function 'f2fs_put_page_dic'

Error/Warning ids grouped by kconfigs:

gcc_recent_errors
|-- alpha-allyesconfig
|   |-- 
drivers-gpu-drm-amd-amdgpu-..-display-amdgpu_dm-amdgpu_dm.c:warning:variable-abo-set-but-not-used
|   `-- 
drivers-staging-rtl8723bs-hal-hal_btcoex.c:warning:variable-pHalData-set-but-not-used
|-- arc-allyesconfig
|   |-- 
drivers-gpu-drm-amd-amdgpu-..-display-amdgpu_dm-amdgpu_dm.c:warning:variable-abo-set-but-not-used
|   `-- 
drivers-staging-rtl8723bs-hal-hal_btcoex.c:warning:variable-pHalData-set-but-not-used
|-- arc-randconfig-r015-20220613
|   `-- 
drivers-gpu-drm-amd-amdgpu-..-display-amdgpu_dm-amdgpu_dm.c:warning:variable-abo-set-but-not-used
|-- arm-allmodconfig
|   |-- 
drivers-gpu-drm-amd-amdgpu-..-display-amdgpu_dm-amdgpu_dm.c:warning:variable-abo-set-but-not-used
|   `-- 
drivers-staging-rtl8723bs-hal-hal_btcoex.c:warning:variable-pHalData-set-but-not-used
|-- arm-allyesconfig
|   |-- 
drivers-gpu-drm-amd-amdgpu-..-display-amdgpu_dm-amdgpu_dm.c:warning:variable-abo-set-but-not-used
|   `-- 
drivers-staging-rtl8723bs-hal-hal_btcoex.c:warning:variable-pHalData-set-but-not-used
|-- arm-randconfig-c002-20220612
|   `-- drivers-staging-vme_user-vme.c:warning:dereference-of-NULL-bridge-CWE
|-- arm64-allyesconfig
|   |-- 
drivers-gpu-drm-amd-amdgpu-..-display-amdgpu_dm-amdgpu_dm.c:warning:variable-abo-set-but-not-used
|   `-- 
drivers-staging-rtl8723bs-hal-hal_btcoex.c:warning:variable-pHalData-set-but-not-used
|-- i386-allyesconfig
|   |-- 
drivers-gpu-drm-amd-amdgpu-..-display-amdgpu_dm-amdgpu_dm.c:warning:variable-abo-set-but-not-used
|   `-- 
drivers-staging-rtl8723bs-hal-hal_btcoex.c:warning:variable-pHalData-set-but-not-used
|-- i386-debian-10.3
|   |-- 
fs-f2fs-data.c:error:too-many-arguments-to-function-f2fs_end_read_compressed_page
|   `-- fs-f2fs-data.c:error:too-many-arguments-to-function-f2fs_put_page_dic
|-- i386-debian-10.3-kselftests
|   |-- 
fs-f2fs-data.c:error:too-many-arguments-to-function-f2fs_end_read_compressed_page
|   `-- fs-f2fs-data.c:error:too-many-arguments-to-function-f2fs_put_page_dic
|-- ia64-allmodconfig
|   |-- 
drivers-gpu-drm-amd-amdgpu-..-display-amdgpu_dm-amdgpu_dm.c:warning:variable-abo-set-but-not-used
|   `-- 
drivers-staging-rtl8723bs-hal-hal_btcoex.c:warning:variable-pHalData-set-but-not-used
|-- ia64-allyesconfig
|   `-- 
drivers-staging-rtl8723bs-hal-hal_btcoex.c:warning:variable-pHalData-set-but-not-used
|-- m68k-allmodconfig
|   `-- 
drivers-staging-rtl8723bs-hal-hal_btcoex.c:warning:variable-pHalData-set-but-not-used
|-- m68k-allyesconfig
|   `-- 
drivers-staging-rtl8723bs-hal-hal_btcoex.c:warning:variable-pHalData-set-but-not-used
|-- m68k-randconfig-r005-20220613
|   |-- 
fs-f2fs-data.c:error:too-many-arguments-to-function-f2fs_end_read_compressed_page
|   `-- fs-f2fs-data.c:error:too-many-arguments-to-function-f2fs_put_page_dic
|-- mips-allmodconfig
|   |-- 
drivers-gpu-drm-amd-amdgpu-..-display-amdgpu_dm-amdgpu_dm.c:warning:variable-abo-set-but-not-used
|   `-- 
drivers-staging-rtl8723bs-hal-hal_btcoex.c:warning:variable-pHalData-set-but-not-used
|-- mips-allyesconfig
|   |-- 
drivers-gpu-drm-amd-amdgpu-..-display-amdgpu_dm-amdgpu_dm.c:warning:variable-abo-set-but-not-used
|   `-- 
drivers-staging-rtl8723bs-hal-hal_btcoex.c:warning:variable-pHalData-set-but-not-used
|-- nios2-allyesconfig
|   `-- 
drivers-staging-rtl8723bs-hal-hal_btcoex.c:warning:variable-pHalData-set-but-not-used
|-- parisc-allyesconfig
|   |-- 
drivers-gpu-drm-amd-amdgpu-..-display-amdgpu_dm-amdgpu_dm.c:warning:variable-abo-set-but-not-used
|   `-- 
drivers-staging-rtl8723bs-hal-hal_btcoex.c:warning:variable-pHalData-set-but-not-used
|-- powerpc-allmodconfig
|   |-- 
drivers-gpu-drm-amd-amdgpu-..-display-amdgpu_dm-amdgpu_dm.c:warning:variable-abo-set-but-not-used
|   `-- 
drivers-staging-rtl8723bs-hal-hal_btcoex.c:warning:variable-pHalData-set-but-not-used
|-- powerpc-allyesconfig
|   |-- 
drivers-gpu-drm-amd-amdgpu-..-display-amdgpu_dm-amdgpu_dm.c:warning:variable-abo-set-but-not-used
|   `-- 
drivers-staging-rtl8723bs-hal-hal_btcoex.c:warning:variable-pHalData-set

Re: [f2fs-dev] [PATCH] f2fs: handle decompress only post processing in softirq

2022-06-14 Thread Gao Xiang
On Tue, Jun 14, 2022 at 10:49:37AM -0700, Daeho Jeong wrote:
> > Yeah, I heard that you folks are really suffered from the scheduling
> > issues. But for my own previous experience, extra memory footprints are
> > really critical in Android low memory scenarios (no matter low-ended
> > devices or artificial workloads), it tossed me a lot. So I finally
> > ntroduced many inplace I/O to handle/minimize that, including inplace
> > I/O for compressed pages and temporary pages.
> >
> > But I'm not quite sure what's currently happening now, since we once
> > didn't have such non-deterministic workqueues, and I don't hear from
> > other landed vendors.  I think it'd be better to analyse what's going
> > on for these kworkers from scheduling POV and why they don't schedule
> > in time.
> >
> > I also have an idea is much like what I'm doing now for sync
> > decompression, is that just before lock page and ->read_folio, we can
> > trigger some decompression in addition to kworker decompression, but it
> > needs some MM modification, as below:
> >
> >!PageUptodate(page)
> >
> >some callback to decompress in addition to kworker
> >
> >lock_page()
> >->read_folio()
> >
> > If mm folks don't like it, I think RT thread is also fine after we
> > analysed the root cause of the kworker delay I think.
> >
> > Thanks,
> > Gao Xiang
> >
> > >
> > > Thanks,
> 
> I don't think this is not a problem with most devices, since the
> allocated memory is not too big and it'll be kept just as long as I/O
> processing is on. However, I still understand what you're worried
> about, so I think I can make a new mount option like "memory=low",
> which can be used to give a hint to F2FS to have a priority on as
> little memory as possible. In this mode, we will try to keep minimal
> memory and we can use the previous implementation for decompression.

Okay, one of our previous tests was that how many applications are
still there after many other applications boot. That makes sense since
most users need to leave as many apps as possible, I know for now we
have swap-like thing, but once it was done without swap. If you reserve
too much memory (with page mempool or even for inflight I/O), it will
impact the alive app numbers compared to uncompressed cases for all
devices (even high-ended devices).

BTW, most crypto algorithms have hardware instructions to boost up,
actually we have some in-house neon lz4 assembly as well. but it still
somewhat slow than crypto algorithms, not to mention some algorithms
like zstd or lzma. Anyway, I personally prefer RT Thread way since it's
more flexible, also for dm-verity at least try with WQ_HIGHPRI, and I've
seen:
https://android-review.googlesource.com/c/kernel/common/+/204421

But I'm not sure why it wasn't upstreamed though.

Thanks,
Gao Xiang

> 
> Thanks,


___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


Re: [f2fs-dev] [PATCH] f2fs: handle decompress only post processing in softirq

2022-06-14 Thread Daeho Jeong
> Yeah, I heard that you folks are really suffered from the scheduling
> issues. But for my own previous experience, extra memory footprints are
> really critical in Android low memory scenarios (no matter low-ended
> devices or artificial workloads), it tossed me a lot. So I finally
> ntroduced many inplace I/O to handle/minimize that, including inplace
> I/O for compressed pages and temporary pages.
>
> But I'm not quite sure what's currently happening now, since we once
> didn't have such non-deterministic workqueues, and I don't hear from
> other landed vendors.  I think it'd be better to analyse what's going
> on for these kworkers from scheduling POV and why they don't schedule
> in time.
>
> I also have an idea is much like what I'm doing now for sync
> decompression, is that just before lock page and ->read_folio, we can
> trigger some decompression in addition to kworker decompression, but it
> needs some MM modification, as below:
>
>!PageUptodate(page)
>
>some callback to decompress in addition to kworker
>
>lock_page()
>->read_folio()
>
> If mm folks don't like it, I think RT thread is also fine after we
> analysed the root cause of the kworker delay I think.
>
> Thanks,
> Gao Xiang
>
> >
> > Thanks,

I don't think this is not a problem with most devices, since the
allocated memory is not too big and it'll be kept just as long as I/O
processing is on. However, I still understand what you're worried
about, so I think I can make a new mount option like "memory=low",
which can be used to give a hint to F2FS to have a priority on as
little memory as possible. In this mode, we will try to keep minimal
memory and we can use the previous implementation for decompression.

Thanks,


___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


[f2fs-dev] [PATCH] f2fs: fix stubs when F2FS_FS_COMPRESSION is not enabled

2022-06-14 Thread Randy Dunlap
Fix build errors when F2FS_FS_COMPRESSION is not set:

../fs/f2fs/data.c: In function ‘f2fs_finish_read_bio’:
../fs/f2fs/data.c:136:5: error: too many arguments to function 
‘f2fs_end_read_compressed_page’
 f2fs_end_read_compressed_page(page, true, 0,
 ^
In file included from ../fs/f2fs/data.c:25:0:
../fs/f2fs/f2fs.h:4228:20: note: declared here
 static inline void f2fs_end_read_compressed_page(struct page *page,
^
../fs/f2fs/data.c:138:4: error: too many arguments to function 
‘f2fs_put_page_dic’
f2fs_put_page_dic(page, in_softirq);
^
In file included from ../fs/f2fs/data.c:25:0:
../fs/f2fs/f2fs.h:4233:20: note: declared here
 static inline void f2fs_put_page_dic(struct page *page)
^
../fs/f2fs/data.c: In function ‘f2fs_handle_step_decompress’:
../fs/f2fs/data.c:241:4: error: too many arguments to function 
‘f2fs_end_read_compressed_page’
f2fs_end_read_compressed_page(page, PageError(page),
^
In file included from ../fs/f2fs/data.c:25:0:
../fs/f2fs/f2fs.h:4228:20: note: declared here
 static inline void f2fs_end_read_compressed_page(struct page *page,
^

../fs/f2fs/data.c: In function ‘f2fs_finish_read_bio’:
../fs/f2fs/data.c:138:4: error: too many arguments to function 
‘f2fs_put_page_dic’
f2fs_put_page_dic(page, in_softirq);
^
In file included from ../fs/f2fs/data.c:25:0:
../fs/f2fs/f2fs.h:4234:20: note: declared here
 static inline void f2fs_put_page_dic(struct page *page)
^

Fixes: 1b565702dffe ("f2fs: handle decompress only post processing in softirq")
Signed-off-by: Randy Dunlap 
Cc: Daeho Jeong 
Cc: Jaegeuk Kim 
Cc: Chao Yu 
Cc: linux-f2fs-devel@lists.sourceforge.net
---
 fs/f2fs/f2fs.h |5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -4226,11 +4226,12 @@ static inline int f2fs_init_compress_mem
 static inline void f2fs_destroy_compress_mempool(void) { }
 static inline void f2fs_decompress_cluster(struct decompress_io_ctx *dic) { }
 static inline void f2fs_end_read_compressed_page(struct page *page,
-   bool failed, block_t blkaddr)
+   bool failed, block_t blkaddr,
+   bool in_softirq)
 {
WARN_ON_ONCE(1);
 }
-static inline void f2fs_put_page_dic(struct page *page)
+static inline void f2fs_put_page_dic(struct page *page, bool in_softirq)
 {
WARN_ON_ONCE(1);
 }


___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


Re: [f2fs-dev] [PATCH] f2fs: handle decompress only post processing in softirq

2022-06-14 Thread Gao Xiang
Hi Daeho,

On Tue, Jun 14, 2022 at 09:46:50AM -0700, Daeho Jeong wrote:
> >
> > Some my own previous thoughts about this strategy:
> >
> >  - If we allocate all memory and map these before I/Os, all inflight I/Os
> >will keep such temporary pages all the time until decompression is
> >finished. In contrast, if we allocate or reuse such pages just before
> >decompression, it would minimize the memory footprints.
> >
> >I think it will impact the memory numbers at least on the very
> >low-ended devices with bslow storage. (I've seen f2fs has some big
> >mempool already)
> >
> >  - Many compression algorithms are not suitable in the softirq contexts,
> >also I vaguely remembered if softirq context lasts for > 2ms, it will
> >push into ksoftirqd instead so it's actually another process context.
> >And it may delay other important interrupt handling.
> >
> >  - Go back to the non-deterministic scheduling of workqueues. I guess it
> >may be just due to scheduling punishment due to a lot of CPU consuming
> >due to decompression before so the priority becomes low, but that is
> >just a pure guess. May be we need to use RT scheduling policy instead.
> >
> >At least with WQ_HIGHPRI for dm-verity at least, but I don't find
> >WQ_HIGHPRI mark for dm-verity.
> >
> > Thanks,
> > Gao Xiang
> 
> I totally understand what you are worried about. However, in the real
> world, non-determinism from workqueues is more harsh than we expected.
> As you know, reading I/Os in the system are critical paths most of the
> time and now I/O variations with workqueue are too bad.
> 
> I also think it's better that we have RT scheduling like things here.
> We could think about it more.

Yeah, I heard that you folks are really suffered from the scheduling
issues. But for my own previous experience, extra memory footprints are
really critical in Android low memory scenarios (no matter low-ended
devices or artificial workloads), it tossed me a lot. So I finally 
ntroduced many inplace I/O to handle/minimize that, including inplace
I/O for compressed pages and temporary pages.

But I'm not quite sure what's currently happening now, since we once
didn't have such non-deterministic workqueues, and I don't hear from
other landed vendors.  I think it'd be better to analyse what's going
on for these kworkers from scheduling POV and why they don't schedule
in time.

I also have an idea is much like what I'm doing now for sync
decompression, is that just before lock page and ->read_folio, we can
trigger some decompression in addition to kworker decompression, but it
needs some MM modification, as below:

   !PageUptodate(page)

   some callback to decompress in addition to kworker

   lock_page()
   ->read_folio()

If mm folks don't like it, I think RT thread is also fine after we
analysed the root cause of the kworker delay I think.

Thanks,
Gao Xiang

> 
> Thanks,


___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


Re: [f2fs-dev] [PATCH] f2fs: handle decompress only post processing in softirq

2022-06-14 Thread Daeho Jeong
>
> Some my own previous thoughts about this strategy:
>
>  - If we allocate all memory and map these before I/Os, all inflight I/Os
>will keep such temporary pages all the time until decompression is
>finished. In contrast, if we allocate or reuse such pages just before
>decompression, it would minimize the memory footprints.
>
>I think it will impact the memory numbers at least on the very
>low-ended devices with bslow storage. (I've seen f2fs has some big
>mempool already)
>
>  - Many compression algorithms are not suitable in the softirq contexts,
>also I vaguely remembered if softirq context lasts for > 2ms, it will
>push into ksoftirqd instead so it's actually another process context.
>And it may delay other important interrupt handling.
>
>  - Go back to the non-deterministic scheduling of workqueues. I guess it
>may be just due to scheduling punishment due to a lot of CPU consuming
>due to decompression before so the priority becomes low, but that is
>just a pure guess. May be we need to use RT scheduling policy instead.
>
>At least with WQ_HIGHPRI for dm-verity at least, but I don't find
>WQ_HIGHPRI mark for dm-verity.
>
> Thanks,
> Gao Xiang

I totally understand what you are worried about. However, in the real
world, non-determinism from workqueues is more harsh than we expected.
As you know, reading I/Os in the system are critical paths most of the
time and now I/O variations with workqueue are too bad.

I also think it's better that we have RT scheduling like things here.
We could think about it more.

Thanks,


___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


Re: [f2fs-dev] [PATCH] f2fs: handle decompress only post processing in softirq

2022-06-14 Thread Daeho Jeong
> One question: is this (the bio endio callback) actually guaranteed to be
> executed from a softirq?  If you look at dm-crypt's support for workqueue-less
> decryption, for example, it explicitly checks 'in_hardirq() || 
> irqs_disabled()'
> and schedules a tasklet if either of those is the case.
>
> - Eric

Oh, you're right. Even though it's safe to defer all the release
process as a work in end_io function, it's better to check the
condition and process the release process right away if possible.

Thanks,


___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


[f2fs-dev] [PATCH 1/2] resize.f2fs: add option to manually specify new overprovision

2022-06-14 Thread qixiaoyu1
From: liuchao12 

Make.f2fs supports manually specifying overprovision, and we expect
resize.f2fs to support it as well.

This change add a new '-o' option to manually specify overprovision.

Signed-off-by: liuchao12 
---
 fsck/main.c   |  8 ++--
 fsck/resize.c | 12 ++--
 2 files changed, 16 insertions(+), 4 deletions(-)

diff --git a/fsck/main.c b/fsck/main.c
index aef797e..3b4da0f 100644
--- a/fsck/main.c
+++ b/fsck/main.c
@@ -121,7 +121,8 @@ void resize_usage()
MSG(0, "[options]:\n");
MSG(0, "  -d debug level [default:0]\n");
MSG(0, "  -i extended node bitmap, node ratio is 20%% by default\n");
-   MSG(0, "  -s safe resize (Does not resize metadata)");
+   MSG(0, "  -o overprovision percentage [default:auto]\n");
+   MSG(0, "  -s safe resize (Does not resize metadata)\n");
MSG(0, "  -t target sectors [default: device size]\n");
MSG(0, "  -V print the version number and exit\n");
exit(1);
@@ -527,7 +528,7 @@ void f2fs_parse_options(int argc, char *argv[])
 #endif
} else if (!strcmp("resize.f2fs", prog)) {
 #ifdef WITH_RESIZE
-   const char *option_string = "d:fst:iV";
+   const char *option_string = "d:fst:io:V";
 
c.func = RESIZE;
while ((option = getopt(argc, argv, option_string)) != EOF) {
@@ -561,6 +562,9 @@ void f2fs_parse_options(int argc, char *argv[])
case 'i':
c.large_nat_bitmap = 1;
break;
+   case 'o':
+   c.new_overprovision = atof(optarg);
+   break;
case 'V':
show_version(prog);
exit(0);
diff --git a/fsck/resize.c b/fsck/resize.c
index f1b7701..d19c6fa 100644
--- a/fsck/resize.c
+++ b/fsck/resize.c
@@ -146,12 +146,15 @@ safe_resize:
get_sb(segs_per_sec));
 
/* Let's determine the best reserved and overprovisioned space */
-   c.new_overprovision = get_best_overprovision(sb);
+   if (c.new_overprovision == 0)
+   c.new_overprovision = get_best_overprovision(sb);
+
c.new_reserved_segments =
(2 * (100 / c.new_overprovision + 1) + 6) *
get_sb(segs_per_sec);
 
-   if ((get_sb(segment_count_main) - 2) < c.new_reserved_segments ||
+   if (c.new_overprovision == 0 ||
+   (get_sb(segment_count_main) - 2) < c.new_reserved_segments ||
get_sb(segment_count_main) * blks_per_seg >
get_sb(block_count)) {
MSG(0, "\tError: Device size is not sufficient for F2FS volume, 
"
@@ -476,6 +479,11 @@ static void rebuild_checkpoint(struct f2fs_sb_info *sbi,
set_cp(overprov_segment_count, get_cp(overprov_segment_count) +
get_cp(rsvd_segment_count));
 
+   DBG(0, "Info: Overprovision ratio = %.3lf%%\n", c.new_overprovision);
+   DBG(0, "Info: Overprovision segments = %u (GC reserved = %u)\n",
+   get_cp(overprov_segment_count),
+   c.new_reserved_segments);
+
free_segment_count = get_free_segments(sbi);
new_segment_count = get_newsb(segment_count_main) -
get_sb(segment_count_main);
-- 
2.36.1



___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


[f2fs-dev] [PATCH 2/2] f2fs-tools: fix to check free space before grow

2022-06-14 Thread qixiaoyu1
Otherwise, after grow, kernel may report below error message
when we mount the image if -o parameter is specified during resize:

F2FS-fs (loop0): invalid crc_offset: 0
F2FS-fs (loop0): Wrong valid_user_blocks: 16404, user_block_count: 13312
F2FS-fs (loop0): Failed to get valid F2FS checkpoint
mount(2) system call failed: Structure needs cleaning.

Signed-off-by: qixiaoyu1 
---
 fsck/resize.c | 36 
 1 file changed, 24 insertions(+), 12 deletions(-)

diff --git a/fsck/resize.c b/fsck/resize.c
index d19c6fa..e135b66 100644
--- a/fsck/resize.c
+++ b/fsck/resize.c
@@ -599,6 +599,26 @@ static void rebuild_checkpoint(struct f2fs_sb_info *sbi,
DBG(0, "Info: Done to rebuild checkpoint blocks\n");
 }
 
+static int f2fs_resize_check(struct f2fs_sb_info *sbi, struct f2fs_super_block 
*new_sb)
+{
+   struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
+   block_t user_block_count;
+   unsigned int overprov_segment_count;
+
+   overprov_segment_count = (get_newsb(segment_count_main) -
+   c.new_reserved_segments) *
+   c.new_overprovision / 100;
+   overprov_segment_count += c.new_reserved_segments;
+
+   user_block_count = (get_newsb(segment_count_main) -
+   overprov_segment_count) * c.blks_per_seg;
+
+   if (get_cp(valid_block_count) > user_block_count)
+   return -1;
+
+   return 0;
+}
+
 static int f2fs_resize_grow(struct f2fs_sb_info *sbi)
 {
struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
@@ -616,6 +636,9 @@ static int f2fs_resize_grow(struct f2fs_sb_info *sbi)
if (get_new_sb(new_sb))
return -1;
 
+   if (f2fs_resize_check(sbi, new_sb) < 0)
+   return -1;
+
/* check nat availability */
if (get_sb(segment_count_nat) > get_newsb(segment_count_nat)) {
err = shrink_nats(sbi, new_sb);
@@ -659,11 +682,8 @@ static int f2fs_resize_shrink(struct f2fs_sb_info *sbi)
struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
struct f2fs_super_block new_sb_raw;
struct f2fs_super_block *new_sb = _sb_raw;
-   struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
block_t old_end_blkaddr, old_main_blkaddr;
block_t new_end_blkaddr, new_main_blkaddr, tmp_end_blkaddr;
-   block_t user_block_count;
-   unsigned int overprov_segment_count;
unsigned int offset;
int err = -1;
 
@@ -674,15 +694,7 @@ static int f2fs_resize_shrink(struct f2fs_sb_info *sbi)
if (get_new_sb(new_sb))
return -1;
 
-   overprov_segment_count = (get_newsb(segment_count_main) -
-   c.new_reserved_segments) *
-   c.new_overprovision / 100;
-   overprov_segment_count += c.new_reserved_segments;
-
-   user_block_count = (get_newsb(segment_count_main) -
-   overprov_segment_count) * c.blks_per_seg;
-
-   if (get_cp(valid_block_count) > user_block_count)
+   if (f2fs_resize_check(sbi, new_sb) < 0)
return -1;
 
/* check nat availability */
-- 
2.36.1



___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


Re: [f2fs-dev] [PATCH] f2fs: handle decompress only post processing in softirq

2022-06-14 Thread Gao Xiang
Hi all,

On Mon, Jun 13, 2022 at 10:38:25PM -0700, Eric Biggers wrote:
> [+Cc Nathan Huckleberry who is looking into a similar problem in dm-verity]
> 
> On Mon, Jun 13, 2022 at 08:56:12AM -0700, Daeho Jeong wrote:
> > From: Daeho Jeong 
> > 
> > Now decompression is being handled in workqueue and it makes read I/O
> > latency non-deterministic, because of the non-deterministic scheduling
> > nature of workqueues. So, I made it handled in softirq context only if
> > possible.
> > 
> > Signed-off-by: Daeho Jeong 

...

> 
> One question: is this (the bio endio callback) actually guaranteed to be
> executed from a softirq?  If you look at dm-crypt's support for workqueue-less
> decryption, for example, it explicitly checks 'in_hardirq() || 
> irqs_disabled()'
> and schedules a tasklet if either of those is the case.
> 
> - Eric
> 

Some my own previous thoughts about this strategy:

 - If we allocate all memory and map these before I/Os, all inflight I/Os
   will keep such temporary pages all the time until decompression is
   finished. In contrast, if we allocate or reuse such pages just before
   decompression, it would minimize the memory footprints.

   I think it will impact the memory numbers at least on the very
   low-ended devices with bslow storage. (I've seen f2fs has some big
   mempool already)

 - Many compression algorithms are not suitable in the softirq contexts,
   also I vaguely remembered if softirq context lasts for > 2ms, it will
   push into ksoftirqd instead so it's actually another process context.
   And it may delay other important interrupt handling.

 - Go back to the non-deterministic scheduling of workqueues. I guess it
   may be just due to scheduling punishment due to a lot of CPU consuming
   due to decompression before so the priority becomes low, but that is
   just a pure guess. May be we need to use RT scheduling policy instead.

   At least with WQ_HIGHPRI for dm-verity at least, but I don't find
   WQ_HIGHPRI mark for dm-verity.

Thanks,
Gao Xiang


___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel