Also nacking this patch since you refused to test it. I said I'd test since you haven't, but you couldn't wait.
- Joe On Wed, Mar 22, 2023 at 6:19 PM Mike Snitzer <snit...@kernel.org> wrote: > The block core (bio_split_discard) will already split discards based > on the 'discard_granularity' and 'max_discard_sectors' queue_limits. > But the DM thin target also needs to ensure that it doesn't receive a > discard that spans a 'max_discard_sectors' boundary. > > Introduce a dm_target 'max_discard_granularity' flag that if set will > cause DM core to split discard bios relative to 'max_discard_sectors'. > This treats 'discard_granularity' as a "min_discard_granularity" and > 'max_discard_sectors' as a "max_discard_granularity". > > Requested-by: Joe Thornber <e...@redhat.com> > Signed-off-by: Mike Snitzer <snit...@kernel.org> > --- > drivers/md/dm.c | 54 +++++++++++++++++++---------------- > include/linux/device-mapper.h | 6 ++++ > include/uapi/linux/dm-ioctl.h | 4 +-- > 3 files changed, 37 insertions(+), 27 deletions(-) > > diff --git a/drivers/md/dm.c b/drivers/md/dm.c > index b6ace995b9ca..eeb58f89369e 100644 > --- a/drivers/md/dm.c > +++ b/drivers/md/dm.c > @@ -1162,7 +1162,8 @@ static inline sector_t > max_io_len_target_boundary(struct dm_target *ti, > return ti->len - target_offset; > } > > -static sector_t max_io_len(struct dm_target *ti, sector_t sector) > +static sector_t __max_io_len(struct dm_target *ti, sector_t sector, > + unsigned int max_granularity) > { > sector_t target_offset = dm_target_offset(ti, sector); > sector_t len = max_io_len_target_boundary(ti, target_offset); > @@ -1173,11 +1174,16 @@ static sector_t max_io_len(struct dm_target *ti, > sector_t sector) > * explains why stacked chunk_sectors based splitting via > * bio_split_to_limits() isn't possible here. > */ > - if (!ti->max_io_len) > + if (!max_granularity) > return len; > return min_t(sector_t, len, > min(queue_max_sectors(ti->table->md->queue), > - blk_chunk_sectors_left(target_offset, > ti->max_io_len))); > + blk_chunk_sectors_left(target_offset, > max_granularity))); > +} > + > +static inline sector_t max_io_len(struct dm_target *ti, sector_t sector) > +{ > + return __max_io_len(ti, sector, ti->max_io_len); > } > > int dm_set_target_max_io_len(struct dm_target *ti, sector_t len) > @@ -1561,26 +1567,6 @@ static void __send_empty_flush(struct clone_info > *ci) > bio_uninit(ci->bio); > } > > -static void __send_changing_extent_only(struct clone_info *ci, struct > dm_target *ti, > - unsigned int num_bios) > -{ > - unsigned int len, bios; > - > - len = min_t(sector_t, ci->sector_count, > - max_io_len_target_boundary(ti, dm_target_offset(ti, > ci->sector))); > - > - atomic_add(num_bios, &ci->io->io_count); > - bios = __send_duplicate_bios(ci, ti, num_bios, &len); > - /* > - * alloc_io() takes one extra reference for submission, so the > - * reference won't reach 0 without the following (+1) subtraction > - */ > - atomic_sub(num_bios - bios + 1, &ci->io->io_count); > - > - ci->sector += len; > - ci->sector_count -= len; > -} > - > static bool is_abnormal_io(struct bio *bio) > { > enum req_op op = bio_op(bio); > @@ -1602,11 +1588,16 @@ static bool is_abnormal_io(struct bio *bio) > static blk_status_t __process_abnormal_io(struct clone_info *ci, > struct dm_target *ti) > { > - unsigned int num_bios = 0; > + unsigned int bios, num_bios = 0; > + unsigned int len, max_granularity = 0; > > switch (bio_op(ci->bio)) { > case REQ_OP_DISCARD: > num_bios = ti->num_discard_bios; > + if (ti->max_discard_granularity) { > + struct queue_limits *limits = > dm_get_queue_limits(ti->table->md); > + max_granularity = limits->max_discard_sectors; > + } > break; > case REQ_OP_SECURE_ERASE: > num_bios = ti->num_secure_erase_bios; > @@ -1627,7 +1618,20 @@ static blk_status_t __process_abnormal_io(struct > clone_info *ci, > if (unlikely(!num_bios)) > return BLK_STS_NOTSUPP; > > - __send_changing_extent_only(ci, ti, num_bios); > + len = min_t(sector_t, ci->sector_count, > + __max_io_len(ti, ci->sector, max_granularity)); > + > + atomic_add(num_bios, &ci->io->io_count); > + bios = __send_duplicate_bios(ci, ti, num_bios, &len); > + /* > + * alloc_io() takes one extra reference for submission, so the > + * reference won't reach 0 without the following (+1) subtraction > + */ > + atomic_sub(num_bios - bios + 1, &ci->io->io_count); > + > + ci->sector += len; > + ci->sector_count -= len; > + > return BLK_STS_OK; > } > > diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h > index 7975483816e4..8aa6b3ea91fa 100644 > --- a/include/linux/device-mapper.h > +++ b/include/linux/device-mapper.h > @@ -358,6 +358,12 @@ struct dm_target { > */ > bool discards_supported:1; > > + /* > + * Set if this target requires that discards be split on both > + * 'discard_granularity' and 'max_discard_sectors' boundaries. > + */ > + bool max_discard_granularity:1; > + > /* > * Set if we need to limit the number of in-flight bios when > swapping. > */ > diff --git a/include/uapi/linux/dm-ioctl.h b/include/uapi/linux/dm-ioctl.h > index 7edf335778ba..1990b5700f69 100644 > --- a/include/uapi/linux/dm-ioctl.h > +++ b/include/uapi/linux/dm-ioctl.h > @@ -286,9 +286,9 @@ enum { > #define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, > struct dm_ioctl) > > #define DM_VERSION_MAJOR 4 > -#define DM_VERSION_MINOR 47 > +#define DM_VERSION_MINOR 48 > #define DM_VERSION_PATCHLEVEL 0 > -#define DM_VERSION_EXTRA "-ioctl (2022-07-28)" > +#define DM_VERSION_EXTRA "-ioctl (2023-03-01)" > > /* Status bits */ > #define DM_READONLY_FLAG (1 << 0) /* In/Out */ > -- > 2.40.0 > >
-- dm-devel mailing list dm-devel@redhat.com https://listman.redhat.com/mailman/listinfo/dm-devel