On Tue, 07/23 18:29, Benoît Canet wrote: > This metrics show how many percent of the time I/Os are put on hold by the > throttling algorithm. > This metric could be used by system administrators or cloud stack developpers > to decide when the throttling settings must be changed. > > Signed-off-by: Benoit Canet <ben...@irqsave.net> > --- > block.c | 27 ++++++++++++++++++++++++++- > block/qapi.c | 4 ++++ > hmp.c | 6 ++++-- > include/block/block_int.h | 2 ++ > qapi-schema.json | 4 +++- > 5 files changed, 39 insertions(+), 4 deletions(-) > > diff --git a/block.c b/block.c > index bb4f8e4..6bb8570 100644 > --- a/block.c > +++ b/block.c > @@ -118,12 +118,21 @@ int is_windows_drive(const char *filename) > #endif > > /* throttling disk I/O limits */ > +static void bdrv_reset_throttling_metrics(BlockDriverState *bs) > +{ > + /* iddle -> reset values */ > + bs->throttling_percentage = 0; > + bs->full_since = 0; > +} > + > void bdrv_io_limits_disable(BlockDriverState *bs) > { > bs->io_limits_enabled = false; > > while (qemu_co_enter_next(&bs->throttled_reqs)) { > } > + > + bdrv_reset_throttling_metrics(bs); > } > > static void bdrv_make_bps_buckets_leak(BlockDriverState *bs, int64_t delta) > @@ -213,7 +222,8 @@ static void bdrv_make_iops_buckets_leak(BlockDriverState > *bs, int64_t delta) > static void bdrv_leak_if_needed(BlockDriverState *bs) > { > int64_t now; > - int64_t delta; > + int64_t delta; /* the delta that would be ideally the timer period */ > + int64_t delta_full; /* the delta where the bucket is full */ > > if (!bs->must_leak) { > return; > @@ -223,6 +233,14 @@ static void bdrv_leak_if_needed(BlockDriverState *bs) > > now = qemu_get_clock_ns(rt_clock); > delta = now - bs->previous_leak; > + /* compute throttle percentage reflecting how long IO are hold on > average */ > + if (bs->full_since) { > + delta_full = now - bs->full_since; > + bs->throttling_percentage = (delta_full * 100) / delta; > + bs->full_since = 0;
If I understand it, the percentage is recalculated every leak check. So it only reflects the instant io flow, instead of historical statistics? But I think for system admin purpose, it's good to know a longer range io activity character. Or do you think management tool should sample it? > + } else { > + bs->throttling_percentage = 0; > + } > bs->previous_leak = now; > > bdrv_make_bps_buckets_leak(bs, delta); > @@ -260,6 +278,7 @@ static void bdrv_block_timer(void *opaque) > /* disable throttling time on iddle for economy purpose */ > if (bdrv_throttling_is_iddle(bs)) { > bdrv_block_timer_disable(bs); > + bdrv_reset_throttling_metrics(bs); > return; > } > > @@ -280,6 +299,7 @@ static void bdrv_block_timer_enable(BlockDriverState *bs) > > bs->block_timer = qemu_new_timer_ns(vm_clock, bdrv_block_timer, bs); > bs->previous_leak = qemu_get_clock_ns(rt_clock); > + bdrv_reset_throttling_metrics(bs); > qemu_mod_timer(bs->block_timer, > qemu_get_clock_ns(vm_clock) + > BLOCK_IO_THROTTLE_PERIOD); > @@ -432,6 +452,11 @@ static void bdrv_io_limits_intercept(BlockDriverState > *bs, > * not full > */ > while (bdrv_is_any_threshold_exceeded(bs, nb_sectors, is_write)) { > + /* remember since when the code decided to block the first I/O */ > + if (qemu_co_queue_empty(&bs->throttled_reqs)) { > + bs->full_since = qemu_get_clock_ns(rt_clock); > + } > + > bdrv_leak_if_needed(bs); > qemu_co_queue_wait_insert_head(&bs->throttled_reqs); > bdrv_leak_if_needed(bs); > diff --git a/block/qapi.c b/block/qapi.c > index f81081c..bd1c6af 100644 > --- a/block/qapi.c > +++ b/block/qapi.c > @@ -263,6 +263,10 @@ void bdrv_query_info(BlockDriverState *bs, > bs->io_limits.iops_sector_count; > info->inserted->iops_sector_count = > bs->io_limits.iops_sector_count; > + info->inserted->has_throttling_percentage = > + bs->throttling_percentage; > + info->inserted->throttling_percentage = > + bs->throttling_percentage; > } > > bs0 = bs; > diff --git a/hmp.c b/hmp.c > index 3912305..9dc4862 100644 > --- a/hmp.c > +++ b/hmp.c > @@ -348,7 +348,8 @@ void hmp_info_block(Monitor *mon, const QDict *qdict) > " iops_threshold=%" PRId64 > " iops_rd_threshold=%" PRId64 > " iops_wr_threshold=%" PRId64 > - " iops_sector_count=%" PRId64 "\n", > + " iops_sector_count=%" PRId64 > + " throttling_percentage=%" PRId64 "\n", > info->value->inserted->bps, > info->value->inserted->bps_rd, > info->value->inserted->bps_wr, > @@ -361,7 +362,8 @@ void hmp_info_block(Monitor *mon, const QDict *qdict) > info->value->inserted->iops_threshold, > info->value->inserted->iops_rd_threshold, > info->value->inserted->iops_wr_threshold, > - info->value->inserted->iops_sector_count); > + info->value->inserted->iops_sector_count, > + info->value->inserted->throttling_percentage); > } else { > monitor_printf(mon, " [not inserted]"); > } > diff --git a/include/block/block_int.h b/include/block/block_int.h > index 74d7503..4487cd9 100644 > --- a/include/block/block_int.h > +++ b/include/block/block_int.h > @@ -271,6 +271,8 @@ struct BlockDriverState { > BlockIOLimit io_limits; > BlockIOBaseValue leaky_buckets; > int64_t previous_leak; > + int64_t full_since; > + int throttling_percentage; > bool must_leak; > CoQueue throttled_reqs; > QEMUTimer *block_timer; > diff --git a/qapi-schema.json b/qapi-schema.json > index d579fda..14a02e7 100644 > --- a/qapi-schema.json > +++ b/qapi-schema.json > @@ -783,6 +783,8 @@ > # > # @iops_sector_count: #optional an I/O size in sector (Since 1.6) > # > +# @throttling_percentage: #optional reflect throttling activity (Since 1.6) > +# > # Since: 0.14.0 > # > # Notes: This interface is only found in @BlockInfo. > @@ -797,7 +799,7 @@ > '*bps_threshold': 'int', '*bps_rd_threshold': 'int', > '*bps_wr_threshold': 'int', '*iops_threshold': 'int', > '*iops_rd_threshold': 'int', '*iops_wr_threshold': 'int', > - '*iops_sector_count': 'int' } } > + '*iops_sector_count': 'int', '*throttling_percentage': 'int' } } > > ## > # @BlockDeviceIoStatus: > -- > 1.7.10.4 > > -- Fam