Since we verify that "ready_delay" parameter has to be smaller than CRWMT, we know that the namespace will always become ready. Therefore the "Namespace Not Ready" status code will never have the DNR bit set.
Add a new parameter "never_ready" that can be used to emulate a namespace that never gets ready, such that the DNR bit gets set after CRWMT amount of time. Signed-off-by: Niklas Cassel <niklas.cas...@wdc.com> --- hw/nvme/ctrl.c | 28 +++++++++++++++++++++++++++- hw/nvme/ns.c | 1 + hw/nvme/nvme.h | 2 ++ 3 files changed, 30 insertions(+), 1 deletion(-) diff --git a/hw/nvme/ctrl.c b/hw/nvme/ctrl.c index 66d96714c3..5ec22ff13d 100644 --- a/hw/nvme/ctrl.c +++ b/hw/nvme/ctrl.c @@ -134,6 +134,12 @@ * before being marked ready. Only applicable if CC.CRIME is set by the user. * The value is in units of 500 milliseconds (to be consistent with `crwmt`). * + * - `never_ready` + * This parameter specifies that a namespace should never be marked as ready. + * When `crwmt` amount of time has passed after enabling the controller, + * status code "Namespace Not Ready" will have the DNR bit set. If specified + * together with `ready_delay`, `never_ready` will take precedence. + * * Setting `zoned` to true selects Zoned Command Set at the namespace. * In this case, the following namespace properties are available to configure * zoned operation: @@ -4118,6 +4124,14 @@ static uint16_t nvme_zone_mgmt_recv(NvmeCtrl *n, NvmeRequest *req) return status; } +static bool nvme_ready_has_passed_timeout(NvmeCtrl *n) +{ + int64_t current_time = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL); + int64_t elapsed_time = current_time - n->cc_enable_timestamp; + + return elapsed_time > n->params.crwmt * 500; +} + static uint16_t nvme_io_cmd(NvmeCtrl *n, NvmeRequest *req) { NvmeNamespace *ns; @@ -4164,7 +4178,11 @@ static uint16_t nvme_io_cmd(NvmeCtrl *n, NvmeRequest *req) } if (!(ns->id_indep_ns.nstat & NVME_NSTAT_NRDY)) { - return NVME_NS_NOT_READY; + uint16_t ret = NVME_NS_NOT_READY; + if (ns->params.never_ready && nvme_ready_has_passed_timeout(n)) { + ret |= NVME_DNR; + } + return ret; } if (ns->status) { @@ -5537,6 +5555,10 @@ static void nvme_set_ready_or_start_timer(NvmeCtrl *n, NvmeNamespace *ns) { int64_t expire_time; + if (ns->params.never_ready) { + return; + } + if (!NVME_CC_CRIME(ldl_le_p(&n->bar.cc)) || ns->params.ready_delay == 0) { ns->id_indep_ns.nstat |= NVME_NSTAT_NRDY; return; @@ -5979,6 +6001,7 @@ static void nvme_ctrl_reset(NvmeCtrl *n) n->aer_queued = 0; n->outstanding_aers = 0; n->qs_created = false; + n->cc_enable_timestamp = 0; } static void nvme_ctrl_shutdown(NvmeCtrl *n) @@ -6000,6 +6023,8 @@ static void nvme_ctrl_shutdown(NvmeCtrl *n) nvme_ns_shutdown(ns); } + + n->cc_enable_timestamp = 0; } static void nvme_ctrl_per_ns_action_on_start(NvmeCtrl *n) @@ -6109,6 +6134,7 @@ static int nvme_start_ctrl(NvmeCtrl *n) NVME_CAP_SET_TO(cap, new_cap_timeout); stq_le_p(&n->bar.cap, cap); + n->cc_enable_timestamp = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL); n->page_bits = page_bits; n->page_size = page_size; n->max_prp_ents = n->page_size / sizeof(uint64_t); diff --git a/hw/nvme/ns.c b/hw/nvme/ns.c index c4e9f0e5c8..89c31658de 100644 --- a/hw/nvme/ns.c +++ b/hw/nvme/ns.c @@ -658,6 +658,7 @@ static Property nvme_ns_props[] = { DEFINE_PROP_BOOL("eui64-default", NvmeNamespace, params.eui64_default, false), DEFINE_PROP_UINT16("ready_delay", NvmeNamespace, params.ready_delay, 0), + DEFINE_PROP_BOOL("never_ready", NvmeNamespace, params.never_ready, false), DEFINE_PROP_END_OF_LIST(), }; diff --git a/hw/nvme/nvme.h b/hw/nvme/nvme.h index c9934d0097..6ff9725f21 100644 --- a/hw/nvme/nvme.h +++ b/hw/nvme/nvme.h @@ -122,6 +122,7 @@ typedef struct NvmeNamespaceParams { uint64_t zrwafg; uint16_t ready_delay; + bool never_ready; } NvmeNamespaceParams; typedef struct NvmeNamespace { @@ -436,6 +437,7 @@ typedef struct NvmeCtrl { int cq_pending; uint64_t host_timestamp; /* Timestamp sent by the host */ uint64_t timestamp_set_qemu_clock_ms; /* QEMU clock time */ + uint64_t cc_enable_timestamp; /* QEMU clock time */ uint64_t starttime_ms; uint16_t temperature; uint8_t smart_critical_warning; -- 2.36.1