Re: [dm-devel] [PATCH RFC] dm thin: Add support for online trim to dm-thinpool

2023-10-09 Thread Joe Thornber
On Sat, Oct 7, 2023 at 2:33 AM Sarthak Kukreti 
wrote:

> Currently, dm-thinpool only supports offline trim: there is
> a userspace tool called `thin_trim` (part of `thin-provisioning-tools`),
> that will look up all the unmapped regions within the thinpool
> and issue discards to these regions. However, this can take up a lot of
> time, specially on larger storage devices.
>

I think this will work.  Give me a couple of days to do some testing, then
I'll get back to you.
--
dm-devel mailing list
dm-devel@redhat.com
https://listman.redhat.com/mailman/listinfo/dm-devel


[dm-devel] [PATCH RFC] dm thin: Add support for online trim to dm-thinpool

2023-10-06 Thread Sarthak Kukreti
Currently, dm-thinpool only supports offline trim: there is
a userspace tool called `thin_trim` (part of `thin-provisioning-tools`),
that will look up all the unmapped regions within the thinpool
and issue discards to these regions. However, this can take up a lot of
time, specially on larger storage devices.

This patch augments dm-thinpool's no passdown mode with a message
mechanism that allows userspace to issue 'trim' messages to regions
of the thinpool. In turn, dm-thinpool re-uses the existing discard
mechanism to issue a discard bio to only send discards to regions in
the data device that are currently not mapped on other dm-thin devices.

This patch is intended as an RFC with the following open questions
(assuming that the approach is even viable):

- Most of the patch is modifying current discard functions to work
  without an attached dm-thin context (tc). Would it be preferrable to
  keep this path independent of the thin volume discard path?

- Current RFC patch doesn't protect against userspace attempting to
  discard large ranges of extents in the thinpool (which puts the
  thinpool in read-only mode). What would the optimal way to prevent
  trim from locking up the space in the thinpool (chunking requests?)?

- The patch currently only sets up the 'trim' capability if no passdown
  is enabled: the reasoning behind that is if passdown is enabled,
  thin_trim doesn't necessarily need the trim capability iff all the
  dm-thin devices can passdown requests from the filesystem already.
  Would it be preferrable to have this capability across all discard
  modes?

Signed-off-by: Sarthak Kukreti 
---
 drivers/md/dm-thin.c | 156 +--
 1 file changed, 121 insertions(+), 35 deletions(-)

diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 07c7f9795b10..9a8eeebd9f49 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -280,6 +280,7 @@ struct pool {
 
process_mapping_fn process_prepared_mapping;
process_mapping_fn process_prepared_discard;
+   process_mapping_fn process_prepared_trim;
process_mapping_fn process_prepared_discard_pt2;
 
struct dm_bio_prison_cell **cell_sort_array;
@@ -379,17 +380,17 @@ static sector_t block_to_sectors(struct pool *pool, 
dm_block_t b)
 /**/
 
 struct discard_op {
-   struct thin_c *tc;
+   struct pool *pool;
struct blk_plug plug;
struct bio *parent_bio;
struct bio *bio;
 };
 
-static void begin_discard(struct discard_op *op, struct thin_c *tc, struct bio 
*parent)
+static void begin_discard(struct discard_op *op, struct pool *pool, struct bio 
*parent)
 {
BUG_ON(!parent);
 
-   op->tc = tc;
+   op->pool = pool;
blk_start_plug(>plug);
op->parent_bio = parent;
op->bio = NULL;
@@ -397,11 +398,11 @@ static void begin_discard(struct discard_op *op, struct 
thin_c *tc, struct bio *
 
 static int issue_discard(struct discard_op *op, dm_block_t data_b, dm_block_t 
data_e)
 {
-   struct thin_c *tc = op->tc;
-   sector_t s = block_to_sectors(tc->pool, data_b);
-   sector_t len = block_to_sectors(tc->pool, data_e - data_b);
+   struct pool *pool = op->pool;
+   sector_t s = block_to_sectors(pool, data_b);
+   sector_t len = block_to_sectors(pool, data_e - data_b);
 
-   return __blkdev_issue_discard(tc->pool_dev->bdev, s, len, GFP_NOIO, 
>bio);
+   return __blkdev_issue_discard(pool->data_dev, s, len, GFP_NOIO, 
>bio);
 }
 
 static void end_discard(struct discard_op *op, int r)
@@ -813,6 +814,7 @@ struct dm_thin_new_mapping {
 
blk_status_t status;
struct thin_c *tc;
+   struct pool *pool;
dm_block_t virt_begin, virt_end;
dm_block_t data_block;
struct dm_bio_prison_cell *cell;
@@ -829,7 +831,7 @@ struct dm_thin_new_mapping {
 
 static void __complete_mapping_preparation(struct dm_thin_new_mapping *m)
 {
-   struct pool *pool = m->tc->pool;
+   struct pool *pool = m->pool;
 
if (atomic_dec_and_test(>prepare_actions)) {
list_add_tail(>list, >prepared_mappings);
@@ -840,7 +842,7 @@ static void __complete_mapping_preparation(struct 
dm_thin_new_mapping *m)
 static void complete_mapping_preparation(struct dm_thin_new_mapping *m)
 {
unsigned long flags;
-   struct pool *pool = m->tc->pool;
+   struct pool *pool = m->pool;
 
spin_lock_irqsave(>lock, flags);
__complete_mapping_preparation(m);
@@ -955,9 +957,9 @@ static void inc_remap_and_issue_cell(struct thin_c *tc,
 
 static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m)
 {
-   cell_error(m->tc->pool, m->cell);
+   cell_error(m->pool, m->cell);
list_del(>list);
-   mempool_free(m, >tc->pool->mapping_pool);
+   mempool_free(m, >pool->mapping_pool);
 }
 
 static void complete_overwrite_bio(struct thin_c *tc, struct bio *bio)
@@