Add support for generating / verifying protection information in the file system. This is largely done by simply setting the IOMAP_F_INTEGRITY flag and letting iomap do all of the work. XFS just has to ensure that the data read completions for integrity data are run from user context.
For zoned writeback, XFS also has to generate the integrity data itself as the zoned writeback path is not using the generic writeback_submit implementation. Signed-off-by: Christoph Hellwig <[email protected]> Reviewed-by: "Darrick J. Wong" <[email protected]> Tested-by: Anuj Gupta <[email protected]> --- fs/xfs/xfs_aops.c | 47 ++++++++++++++++++++++++++++++++++++++++++---- fs/xfs/xfs_iomap.c | 9 ++++++--- 2 files changed, 49 insertions(+), 7 deletions(-) diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c index 76678814f46f..f279055fcea0 100644 --- a/fs/xfs/xfs_aops.c +++ b/fs/xfs/xfs_aops.c @@ -22,6 +22,7 @@ #include "xfs_icache.h" #include "xfs_zone_alloc.h" #include "xfs_rtgroup.h" +#include <linux/bio-integrity.h> struct xfs_writepage_ctx { struct iomap_writepage_ctx ctx; @@ -661,6 +662,8 @@ xfs_zoned_writeback_submit( bio_endio(&ioend->io_bio); return error; } + if (wpc->iomap.flags & IOMAP_F_INTEGRITY) + fs_bio_integrity_generate(&ioend->io_bio); xfs_zone_alloc_and_submit(ioend, &XFS_ZWPC(wpc)->open_zone); return 0; } @@ -741,12 +744,45 @@ xfs_vm_bmap( return iomap_bmap(mapping, block, &xfs_read_iomap_ops); } +static void +xfs_bio_submit_read( + const struct iomap_iter *iter, + struct iomap_read_folio_ctx *ctx) +{ + struct bio *bio = ctx->read_ctx; + + /* defer read completions to the ioend workqueue */ + iomap_init_ioend(iter->inode, bio, ctx->read_ctx_file_offset, 0); + bio->bi_end_io = xfs_end_bio; + submit_bio(bio); +} + +static const struct iomap_read_ops xfs_iomap_read_ops = { + .read_folio_range = iomap_bio_read_folio_range, + .submit_read = xfs_bio_submit_read, + .bio_set = &iomap_ioend_bioset, +}; + +static inline const struct iomap_read_ops * +xfs_get_iomap_read_ops( + const struct address_space *mapping) +{ + struct xfs_inode *ip = XFS_I(mapping->host); + + if (bdev_has_integrity_csum(xfs_inode_buftarg(ip)->bt_bdev)) + return &xfs_iomap_read_ops; + return &iomap_bio_read_ops; +} + STATIC int xfs_vm_read_folio( - struct file *unused, - struct folio *folio) + struct file *file, + struct folio *folio) { - iomap_bio_read_folio(folio, &xfs_read_iomap_ops); + struct iomap_read_folio_ctx ctx = { .cur_folio = folio }; + + ctx.ops = xfs_get_iomap_read_ops(folio->mapping); + iomap_read_folio(&xfs_read_iomap_ops, &ctx, NULL); return 0; } @@ -754,7 +790,10 @@ STATIC void xfs_vm_readahead( struct readahead_control *rac) { - iomap_bio_readahead(rac, &xfs_read_iomap_ops); + struct iomap_read_folio_ctx ctx = { .rac = rac }; + + ctx.ops = xfs_get_iomap_read_ops(rac->mapping), + iomap_readahead(&xfs_read_iomap_ops, &ctx, NULL); } static int diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c index be86d43044df..9c2f12d5fec9 100644 --- a/fs/xfs/xfs_iomap.c +++ b/fs/xfs/xfs_iomap.c @@ -143,11 +143,14 @@ xfs_bmbt_to_iomap( } iomap->offset = XFS_FSB_TO_B(mp, imap->br_startoff); iomap->length = XFS_FSB_TO_B(mp, imap->br_blockcount); - if (mapping_flags & IOMAP_DAX) + iomap->flags = iomap_flags; + if (mapping_flags & IOMAP_DAX) { iomap->dax_dev = target->bt_daxdev; - else + } else { iomap->bdev = target->bt_bdev; - iomap->flags = iomap_flags; + if (bdev_has_integrity_csum(iomap->bdev)) + iomap->flags |= IOMAP_F_INTEGRITY; + } /* * If the inode is dirty for datasync purposes, let iomap know so it -- 2.47.3
