All callers now have a folio so pass it in.
Signed-off-by: Matthew Wilcox (Oracle) <[email protected]>
---
fs/f2fs/extent_cache.c | 2 +-
fs/f2fs/file.c | 2 +-
fs/f2fs/gc.c | 2 +-
fs/f2fs/node.c | 4 ++--
fs/f2fs/node.h | 6 +++---
fs/f2fs/recovery.c | 14 +++++++-------
6 files changed, 15 insertions(+), 15 deletions(-)
diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c
index a5d5bc8c0da5..941e85fe091e 100644
--- a/fs/f2fs/extent_cache.c
+++ b/fs/f2fs/extent_cache.c
@@ -934,7 +934,7 @@ static void __update_extent_cache(struct dnode_of_data *dn,
enum extent_type typ
if (!__may_extent_tree(dn->inode, type))
return;
- ei.fofs = f2fs_start_bidx_of_node(ofs_of_node(&dn->node_folio->page),
dn->inode) +
+ ei.fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_folio),
dn->inode) +
dn->ofs_in_node;
ei.len = 1;
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index ded2f48df351..b37a1aae2708 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -708,7 +708,7 @@ void f2fs_truncate_data_blocks_range(struct dnode_of_data
*dn, int count)
* once we invalidate valid blkaddr in range [ofs, ofs + count],
* we will invalidate all blkaddr in the whole range.
*/
- fofs =
f2fs_start_bidx_of_node(ofs_of_node(&dn->node_folio->page),
+ fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_folio),
dn->inode) + ofs;
f2fs_update_read_extent_cache_range(dn, fofs, 0, len);
f2fs_update_age_extent_cache_range(dn, fofs, len);
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index b2645a18a596..b681d0f23c57 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -1177,7 +1177,7 @@ static bool is_alive(struct f2fs_sb_info *sbi, struct
f2fs_summary *sum,
return false;
}
- *nofs = ofs_of_node(&node_folio->page);
+ *nofs = ofs_of_node(node_folio);
source_blkaddr = data_blkaddr(NULL, node_folio, ofs_in_node);
f2fs_folio_put(node_folio, true);
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index a2258883ca77..db81c54ef7f6 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -1479,12 +1479,12 @@ static int sanity_check_node_footer(struct f2fs_sb_info
*sbi,
if (unlikely(nid != nid_of_node(folio) ||
(ntype == NODE_TYPE_INODE && !IS_INODE(page)) ||
(ntype == NODE_TYPE_XATTR &&
- !f2fs_has_xattr_block(ofs_of_node(page))) ||
+ !f2fs_has_xattr_block(ofs_of_node(folio))) ||
time_to_inject(sbi, FAULT_INCONSISTENT_FOOTER))) {
f2fs_warn(sbi, "inconsistent node block, node_type:%d, nid:%lu,
"
"node_footer[nid:%u,ino:%u,ofs:%u,cpver:%llu,blkaddr:%u]",
ntype, nid, nid_of_node(folio), ino_of_node(folio),
- ofs_of_node(page), cpver_of_node(folio),
+ ofs_of_node(folio), cpver_of_node(folio),
next_blkaddr_of_node(folio));
set_sbi_flag(sbi, SBI_NEED_FSCK);
f2fs_handle_error(sbi, ERROR_INCONSISTENT_FOOTER);
diff --git a/fs/f2fs/node.h b/fs/f2fs/node.h
index d366e2e25498..fecbd0c4aea5 100644
--- a/fs/f2fs/node.h
+++ b/fs/f2fs/node.h
@@ -255,9 +255,9 @@ static inline nid_t nid_of_node(const struct folio
*node_folio)
return le32_to_cpu(rn->footer.nid);
}
-static inline unsigned int ofs_of_node(const struct page *node_page)
+static inline unsigned int ofs_of_node(const struct folio *node_folio)
{
- struct f2fs_node *rn = F2FS_NODE(node_page);
+ struct f2fs_node *rn = F2FS_NODE(&node_folio->page);
unsigned flag = le32_to_cpu(rn->footer.flag);
return flag >> OFFSET_BIT_SHIFT;
}
@@ -352,7 +352,7 @@ static inline bool is_recoverable_dnode(const struct folio
*folio)
*/
static inline bool IS_DNODE(const struct folio *node_folio)
{
- unsigned int ofs = ofs_of_node(&node_folio->page);
+ unsigned int ofs = ofs_of_node(node_folio);
if (f2fs_has_xattr_block(ofs))
return true;
diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
index 3bfcf5c297a5..7f5d7499bd68 100644
--- a/fs/f2fs/recovery.c
+++ b/fs/f2fs/recovery.c
@@ -552,7 +552,7 @@ static int check_index_in_prev_nodes(struct f2fs_sb_info
*sbi,
if (IS_ERR(node_folio))
return PTR_ERR(node_folio);
- offset = ofs_of_node(&node_folio->page);
+ offset = ofs_of_node(node_folio);
ino = ino_of_node(node_folio);
f2fs_folio_put(node_folio, true);
@@ -632,7 +632,7 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct
inode *inode,
err = f2fs_recover_inline_xattr(inode, folio);
if (err)
goto out;
- } else if (f2fs_has_xattr_block(ofs_of_node(&folio->page))) {
+ } else if (f2fs_has_xattr_block(ofs_of_node(folio))) {
err = f2fs_recover_xattr_data(inode, folio);
if (!err)
recovered++;
@@ -648,7 +648,7 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct
inode *inode,
}
/* step 3: recover data indices */
- start = f2fs_start_bidx_of_node(ofs_of_node(&folio->page), inode);
+ start = f2fs_start_bidx_of_node(ofs_of_node(folio), inode);
end = start + ADDRS_PER_PAGE(&folio->page, inode);
set_new_dnode(&dn, inode, NULL, NULL, 0);
@@ -670,10 +670,10 @@ static int do_recover_data(struct f2fs_sb_info *sbi,
struct inode *inode,
f2fs_bug_on(sbi, ni.ino != ino_of_node(folio));
- if (ofs_of_node(&dn.node_folio->page) != ofs_of_node(&folio->page)) {
+ if (ofs_of_node(dn.node_folio) != ofs_of_node(folio)) {
f2fs_warn(sbi, "Inconsistent ofs_of_node, ino:%lu, ofs:%u, %u",
- inode->i_ino, ofs_of_node(&dn.node_folio->page),
- ofs_of_node(&folio->page));
+ inode->i_ino, ofs_of_node(dn.node_folio),
+ ofs_of_node(folio));
err = -EFSCORRUPTED;
f2fs_handle_error(sbi, ERROR_INCONSISTENT_FOOTER);
goto err;
@@ -760,7 +760,7 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct
inode *inode,
copy_node_footer(dn.node_folio, folio);
fill_node_footer(dn.node_folio, dn.nid, ni.ino,
- ofs_of_node(&folio->page), false);
+ ofs_of_node(folio), false);
folio_mark_dirty(dn.node_folio);
err:
f2fs_put_dnode(&dn);
--
2.47.2
_______________________________________________
Linux-f2fs-devel mailing list
[email protected]
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel