[PATCH v9 3/5] block: add accounting for zone append operation

2023-03-27 Thread Sam Li
Taking account of the new zone append write operation for zoned devices,
BLOCK_ACCT_ZONE_APPEND enum is introduced as other I/O request type (read,
write, flush).

Signed-off-by: Sam Li 
---
 block/qapi-sysemu.c| 11 ++
 block/qapi.c   | 18 ++
 hw/block/virtio-blk.c  |  4 +++
 include/block/accounting.h |  1 +
 qapi/block-core.json   | 68 --
 qapi/block.json|  4 +++
 6 files changed, 95 insertions(+), 11 deletions(-)

diff --git a/block/qapi-sysemu.c b/block/qapi-sysemu.c
index 7bd7554150..cec3c1afb4 100644
--- a/block/qapi-sysemu.c
+++ b/block/qapi-sysemu.c
@@ -517,6 +517,7 @@ void qmp_block_latency_histogram_set(
 bool has_boundaries, uint64List *boundaries,
 bool has_boundaries_read, uint64List *boundaries_read,
 bool has_boundaries_write, uint64List *boundaries_write,
+bool has_boundaries_append, uint64List *boundaries_append,
 bool has_boundaries_flush, uint64List *boundaries_flush,
 Error **errp)
 {
@@ -557,6 +558,16 @@ void qmp_block_latency_histogram_set(
 }
 }
 
+if (has_boundaries || has_boundaries_append) {
+ret = block_latency_histogram_set(
+stats, BLOCK_ACCT_ZONE_APPEND,
+has_boundaries_append ? boundaries_append : boundaries);
+if (ret) {
+error_setg(errp, "Device '%s' set append write boundaries fail", 
id);
+return;
+}
+}
+
 if (has_boundaries || has_boundaries_flush) {
 ret = block_latency_histogram_set(
 stats, BLOCK_ACCT_FLUSH,
diff --git a/block/qapi.c b/block/qapi.c
index c84147849d..2684484e9d 100644
--- a/block/qapi.c
+++ b/block/qapi.c
@@ -533,27 +533,36 @@ static void bdrv_query_blk_stats(BlockDeviceStats *ds, 
BlockBackend *blk)
 
 ds->rd_bytes = stats->nr_bytes[BLOCK_ACCT_READ];
 ds->wr_bytes = stats->nr_bytes[BLOCK_ACCT_WRITE];
+ds->zone_append_bytes = stats->nr_bytes[BLOCK_ACCT_ZONE_APPEND];
 ds->unmap_bytes = stats->nr_bytes[BLOCK_ACCT_UNMAP];
 ds->rd_operations = stats->nr_ops[BLOCK_ACCT_READ];
 ds->wr_operations = stats->nr_ops[BLOCK_ACCT_WRITE];
+ds->zone_append_operations = stats->nr_ops[BLOCK_ACCT_ZONE_APPEND];
 ds->unmap_operations = stats->nr_ops[BLOCK_ACCT_UNMAP];
 
 ds->failed_rd_operations = stats->failed_ops[BLOCK_ACCT_READ];
 ds->failed_wr_operations = stats->failed_ops[BLOCK_ACCT_WRITE];
+ds->failed_zone_append_operations =
+stats->failed_ops[BLOCK_ACCT_ZONE_APPEND];
 ds->failed_flush_operations = stats->failed_ops[BLOCK_ACCT_FLUSH];
 ds->failed_unmap_operations = stats->failed_ops[BLOCK_ACCT_UNMAP];
 
 ds->invalid_rd_operations = stats->invalid_ops[BLOCK_ACCT_READ];
 ds->invalid_wr_operations = stats->invalid_ops[BLOCK_ACCT_WRITE];
+ds->invalid_zone_append_operations =
+stats->invalid_ops[BLOCK_ACCT_ZONE_APPEND];
 ds->invalid_flush_operations =
 stats->invalid_ops[BLOCK_ACCT_FLUSH];
 ds->invalid_unmap_operations = stats->invalid_ops[BLOCK_ACCT_UNMAP];
 
 ds->rd_merged = stats->merged[BLOCK_ACCT_READ];
 ds->wr_merged = stats->merged[BLOCK_ACCT_WRITE];
+ds->zone_append_merged = stats->merged[BLOCK_ACCT_ZONE_APPEND];
 ds->unmap_merged = stats->merged[BLOCK_ACCT_UNMAP];
 ds->flush_operations = stats->nr_ops[BLOCK_ACCT_FLUSH];
 ds->wr_total_time_ns = stats->total_time_ns[BLOCK_ACCT_WRITE];
+ds->zone_append_total_time_ns =
+stats->total_time_ns[BLOCK_ACCT_ZONE_APPEND];
 ds->rd_total_time_ns = stats->total_time_ns[BLOCK_ACCT_READ];
 ds->flush_total_time_ns = stats->total_time_ns[BLOCK_ACCT_FLUSH];
 ds->unmap_total_time_ns = stats->total_time_ns[BLOCK_ACCT_UNMAP];
@@ -571,6 +580,7 @@ static void bdrv_query_blk_stats(BlockDeviceStats *ds, 
BlockBackend *blk)
 
 TimedAverage *rd = >latency[BLOCK_ACCT_READ];
 TimedAverage *wr = >latency[BLOCK_ACCT_WRITE];
+TimedAverage *zap = >latency[BLOCK_ACCT_ZONE_APPEND];
 TimedAverage *fl = >latency[BLOCK_ACCT_FLUSH];
 
 dev_stats->interval_length = ts->interval_length;
@@ -583,6 +593,10 @@ static void bdrv_query_blk_stats(BlockDeviceStats *ds, 
BlockBackend *blk)
 dev_stats->max_wr_latency_ns = timed_average_max(wr);
 dev_stats->avg_wr_latency_ns = timed_average_avg(wr);
 
+dev_stats->min_zone_append_latency_ns = timed_average_min(zap);
+dev_stats->max_zone_append_latency_ns = timed_average_max(zap);
+dev_stats->avg_zone_append_latency_ns = timed_average_avg(zap);
+
 dev_stats->min_flush_latency_ns = timed_average_min(fl);
 dev_stats->max_flush_latency_ns = timed_average_max(fl);
 dev_stats->avg_flush_latency_ns = timed_average_avg(fl);
@@ -591,6 +605,8 @@ static void bdrv_query_blk_stats(BlockDeviceStats *ds, 
BlockBackend *blk)
 block_acct_queue_depth(ts, BLOCK_ACCT_READ);
 dev_stats->avg_wr_queue_depth =
 

[PATCH v9 3/5] block: add accounting for zone append operation

2023-03-24 Thread Sam Li
Taking account of the new zone append write operation for zoned devices,
BLOCK_ACCT_ZONE_APPEND enum is introduced as other I/O request type (read,
write, flush).

Signed-off-by: Sam Li 
---
 block/qapi-sysemu.c| 11 ++
 block/qapi.c   | 18 ++
 hw/block/virtio-blk.c  |  4 +++
 include/block/accounting.h |  1 +
 qapi/block-core.json   | 68 --
 qapi/block.json|  4 +++
 6 files changed, 95 insertions(+), 11 deletions(-)

diff --git a/block/qapi-sysemu.c b/block/qapi-sysemu.c
index 7bd7554150..cec3c1afb4 100644
--- a/block/qapi-sysemu.c
+++ b/block/qapi-sysemu.c
@@ -517,6 +517,7 @@ void qmp_block_latency_histogram_set(
 bool has_boundaries, uint64List *boundaries,
 bool has_boundaries_read, uint64List *boundaries_read,
 bool has_boundaries_write, uint64List *boundaries_write,
+bool has_boundaries_append, uint64List *boundaries_append,
 bool has_boundaries_flush, uint64List *boundaries_flush,
 Error **errp)
 {
@@ -557,6 +558,16 @@ void qmp_block_latency_histogram_set(
 }
 }
 
+if (has_boundaries || has_boundaries_append) {
+ret = block_latency_histogram_set(
+stats, BLOCK_ACCT_ZONE_APPEND,
+has_boundaries_append ? boundaries_append : boundaries);
+if (ret) {
+error_setg(errp, "Device '%s' set append write boundaries fail", 
id);
+return;
+}
+}
+
 if (has_boundaries || has_boundaries_flush) {
 ret = block_latency_histogram_set(
 stats, BLOCK_ACCT_FLUSH,
diff --git a/block/qapi.c b/block/qapi.c
index c84147849d..2684484e9d 100644
--- a/block/qapi.c
+++ b/block/qapi.c
@@ -533,27 +533,36 @@ static void bdrv_query_blk_stats(BlockDeviceStats *ds, 
BlockBackend *blk)
 
 ds->rd_bytes = stats->nr_bytes[BLOCK_ACCT_READ];
 ds->wr_bytes = stats->nr_bytes[BLOCK_ACCT_WRITE];
+ds->zone_append_bytes = stats->nr_bytes[BLOCK_ACCT_ZONE_APPEND];
 ds->unmap_bytes = stats->nr_bytes[BLOCK_ACCT_UNMAP];
 ds->rd_operations = stats->nr_ops[BLOCK_ACCT_READ];
 ds->wr_operations = stats->nr_ops[BLOCK_ACCT_WRITE];
+ds->zone_append_operations = stats->nr_ops[BLOCK_ACCT_ZONE_APPEND];
 ds->unmap_operations = stats->nr_ops[BLOCK_ACCT_UNMAP];
 
 ds->failed_rd_operations = stats->failed_ops[BLOCK_ACCT_READ];
 ds->failed_wr_operations = stats->failed_ops[BLOCK_ACCT_WRITE];
+ds->failed_zone_append_operations =
+stats->failed_ops[BLOCK_ACCT_ZONE_APPEND];
 ds->failed_flush_operations = stats->failed_ops[BLOCK_ACCT_FLUSH];
 ds->failed_unmap_operations = stats->failed_ops[BLOCK_ACCT_UNMAP];
 
 ds->invalid_rd_operations = stats->invalid_ops[BLOCK_ACCT_READ];
 ds->invalid_wr_operations = stats->invalid_ops[BLOCK_ACCT_WRITE];
+ds->invalid_zone_append_operations =
+stats->invalid_ops[BLOCK_ACCT_ZONE_APPEND];
 ds->invalid_flush_operations =
 stats->invalid_ops[BLOCK_ACCT_FLUSH];
 ds->invalid_unmap_operations = stats->invalid_ops[BLOCK_ACCT_UNMAP];
 
 ds->rd_merged = stats->merged[BLOCK_ACCT_READ];
 ds->wr_merged = stats->merged[BLOCK_ACCT_WRITE];
+ds->zone_append_merged = stats->merged[BLOCK_ACCT_ZONE_APPEND];
 ds->unmap_merged = stats->merged[BLOCK_ACCT_UNMAP];
 ds->flush_operations = stats->nr_ops[BLOCK_ACCT_FLUSH];
 ds->wr_total_time_ns = stats->total_time_ns[BLOCK_ACCT_WRITE];
+ds->zone_append_total_time_ns =
+stats->total_time_ns[BLOCK_ACCT_ZONE_APPEND];
 ds->rd_total_time_ns = stats->total_time_ns[BLOCK_ACCT_READ];
 ds->flush_total_time_ns = stats->total_time_ns[BLOCK_ACCT_FLUSH];
 ds->unmap_total_time_ns = stats->total_time_ns[BLOCK_ACCT_UNMAP];
@@ -571,6 +580,7 @@ static void bdrv_query_blk_stats(BlockDeviceStats *ds, 
BlockBackend *blk)
 
 TimedAverage *rd = >latency[BLOCK_ACCT_READ];
 TimedAverage *wr = >latency[BLOCK_ACCT_WRITE];
+TimedAverage *zap = >latency[BLOCK_ACCT_ZONE_APPEND];
 TimedAverage *fl = >latency[BLOCK_ACCT_FLUSH];
 
 dev_stats->interval_length = ts->interval_length;
@@ -583,6 +593,10 @@ static void bdrv_query_blk_stats(BlockDeviceStats *ds, 
BlockBackend *blk)
 dev_stats->max_wr_latency_ns = timed_average_max(wr);
 dev_stats->avg_wr_latency_ns = timed_average_avg(wr);
 
+dev_stats->min_zone_append_latency_ns = timed_average_min(zap);
+dev_stats->max_zone_append_latency_ns = timed_average_max(zap);
+dev_stats->avg_zone_append_latency_ns = timed_average_avg(zap);
+
 dev_stats->min_flush_latency_ns = timed_average_min(fl);
 dev_stats->max_flush_latency_ns = timed_average_max(fl);
 dev_stats->avg_flush_latency_ns = timed_average_avg(fl);
@@ -591,6 +605,8 @@ static void bdrv_query_blk_stats(BlockDeviceStats *ds, 
BlockBackend *blk)
 block_acct_queue_depth(ts, BLOCK_ACCT_READ);
 dev_stats->avg_wr_queue_depth =