Re: [RFC PATCH v2] hw/nvme:Adding Support for namespace management

2021-10-28 Thread Lukasz Maniak
On Thu, Aug 19, 2021 at 06:39:57PM +0530, Naveen Nagar wrote:
> From: Naveen 
> 
> This patch supports namespace management : create and delete operations.
> 
> Since v1:
> - Modified and moved nvme_ns_identify_common in ns.c file 
> - Added check for CSI field in NS management
> - Indentation fix in namespace create
> 
> This patch has been tested with the following command and size of image
> file for unallocated namespaces is taken as 0GB. ns_create will look into
> the list of unallocated namespaces and it will initialize the same and 
> return the nsid of the same. A new mandatory field has been added called
> tnvmcap and we have ensured that the total capacity of namespace created
> does not exceed tnvmcap
> 
> -device nvme-subsys,id=subsys0,tnvmcap=8
> -device nvme,serial=foo,id=nvme0,subsys=subsys0
> -device nvme,serial=bar,id=nvme1,subsys=subsys0
> -drive id=ns1,file=ns1.img,if=none
> -device nvme-ns,drive=ns1,bus=nvme0,nsid=1,zoned=false,shared=true
> -drive id=ns2,file=ns2.img,if=none
> -device nvme-ns,drive=ns2,bus=nvme0,nsid=2,zoned=false,shared=true
> -drive id=ns3,file=ns3.img,if=none
> -device nvme-ns,drive=ns3,bus=nvme0,nsid=3,zoned=false,shared=true
> -drive id=ns4,file=ns4.img,if=none
> -device nvme-ns,drive=ns4,bus=nvme0,nsid=4,zoned=false,shared=true
> 
> Please review and suggest if any changes are required.
> 
> Signed-off-by: Naveen Nagar 
> Reviewed-by: Klaus Jensen 
>   
> ---
>  hw/nvme/ctrl.c   | 237 +--
>  hw/nvme/ns.c |  61 ++-
>  hw/nvme/nvme.h   |   7 +-
>  hw/nvme/subsys.c |   3 +
>  include/block/nvme.h |  18 +++-
>  5 files changed, 285 insertions(+), 41 deletions(-)
> 
> diff --git a/hw/nvme/ctrl.c b/hw/nvme/ctrl.c
> index 6baf9e0420..992aaa7d02 100644
> --- a/hw/nvme/ctrl.c
> +++ b/hw/nvme/ctrl.c
> @@ -219,6 +219,7 @@ static const uint32_t nvme_cse_acs[256] = {
>  [NVME_ADM_CMD_SET_FEATURES] = NVME_CMD_EFF_CSUPP,
>  [NVME_ADM_CMD_GET_FEATURES] = NVME_CMD_EFF_CSUPP,
>  [NVME_ADM_CMD_ASYNC_EV_REQ] = NVME_CMD_EFF_CSUPP,
> +[NVME_ADM_CMD_NS_MANAGEMENT]= NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_NIC,
>  [NVME_ADM_CMD_NS_ATTACHMENT]= NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_NIC,
>  [NVME_ADM_CMD_FORMAT_NVM]   = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC,
>  };
> @@ -4450,11 +4451,19 @@ static uint16_t nvme_identify_ns(NvmeCtrl *n, 
> NvmeRequest *req, bool active)
>  NvmeNamespace *ns;
>  NvmeIdentify *c = (NvmeIdentify *)>cmd;
>  uint32_t nsid = le32_to_cpu(c->nsid);
> +NvmeIdNs *id_ns = NULL;
> +uint16_t ret;
>  
>  trace_pci_nvme_identify_ns(nsid);
>  
> -if (!nvme_nsid_valid(n, nsid) || nsid == NVME_NSID_BROADCAST) {
> +if (!nvme_nsid_valid(n, nsid)) {
>  return NVME_INVALID_NSID | NVME_DNR;
> +} else if (nsid == NVME_NSID_BROADCAST) {
> +id_ns = g_new0(NvmeIdNs, 1);
> +nvme_ns_identify_common(id_ns);
> +ret = nvme_c2h(n, (uint8_t *)id_ns, sizeof(NvmeIdNs), req);
> +g_free(id_ns);
> +return ret;
>  }
>  
>  ns = nvme_ns(n, nsid);
> @@ -5184,6 +5193,200 @@ static void nvme_select_iocs_ns(NvmeCtrl *n, 
> NvmeNamespace *ns)
>  }
>  }
>  
> +static int nvme_blk_truncate(BlockBackend *blk, size_t len, Error **errp)
> +{
> +int ret;
> +uint64_t perm, shared_perm;
> +
> +blk_get_perm(blk, , _perm);
> +
> +ret = blk_set_perm(blk, perm | BLK_PERM_RESIZE, shared_perm, errp);
> +if (ret < 0) {
> +return ret;
> +}
> +
> +ret = blk_truncate(blk, len, false, PREALLOC_MODE_OFF, 0, errp);
> +if (ret < 0) {
> +return ret;
> +}
> +
> +ret = blk_set_perm(blk, perm, shared_perm, errp);
> +if (ret < 0) {
> +return ret;
> +}
> +
> +return 0;
> +}
> +
> +static uint32_t nvme_allocate_nsid(NvmeCtrl *n)
> +{
> +uint32_t nsid = 0;
> +for (int i = 1; i <= NVME_MAX_NAMESPACES; i++) {
> +if (nvme_ns(n, i) || nvme_subsys_ns(n->subsys, i)) {
> +continue;
> +}
> +
> +nsid = i;
> +return nsid;
> +}
> +return nsid;
> +}
> +
> +static uint16_t nvme_namespace_create(NvmeCtrl *n, NvmeRequest *req)
> +{
> +uint32_t ret;
> +NvmeIdNs id_ns_host;
> +NvmeSubsystem *subsys = n->subsys;
> +Error *err = NULL;
> +uint8_t flbas_host;
> +uint64_t ns_size;
> +int lba_index;
> +NvmeNamespace *ns;
> +NvmeCtrl *ctrl;
> +NvmeIdNs *id_ns;
> +
> +ret = nvme_h2c(n, (uint8_t *)_ns_host, sizeof(id_ns_host), req);
> +if (ret) {
> +return ret;
> +}
> +
> +if (id_ns_host.ncap < id_ns_host.nsze) {
> +return NVME_THIN_PROVISION_NO_SUPP | NVME_DNR;
> +} else if (id_ns_host.ncap > id_ns_host.nsze) {
> +return NVME_INVALID_FIELD | NVME_DNR;
> +}
> +
> +if (!id_ns_host.nsze) {
> +return NVME_INVALID_FIELD | NVME_DNR;
> +}
> +
> +if (QSLIST_EMPTY(>unallocated_namespaces)) {
> +return 

Re: [RFC PATCH v2] hw/nvme:Adding Support for namespace management

2021-09-23 Thread Klaus Jensen
On Aug 19 18:39, Naveen Nagar wrote:
> From: Naveen 
> 
> This patch supports namespace management : create and delete operations.
> 
> Since v1:
> - Modified and moved nvme_ns_identify_common in ns.c file 
> - Added check for CSI field in NS management
> - Indentation fix in namespace create
> 
> This patch has been tested with the following command and size of image
> file for unallocated namespaces is taken as 0GB. ns_create will look into
> the list of unallocated namespaces and it will initialize the same and 
> return the nsid of the same. A new mandatory field has been added called
> tnvmcap and we have ensured that the total capacity of namespace created
> does not exceed tnvmcap
> 
> -device nvme-subsys,id=subsys0,tnvmcap=8
> -device nvme,serial=foo,id=nvme0,subsys=subsys0
> -device nvme,serial=bar,id=nvme1,subsys=subsys0
> -drive id=ns1,file=ns1.img,if=none
> -device nvme-ns,drive=ns1,bus=nvme0,nsid=1,zoned=false,shared=true
> -drive id=ns2,file=ns2.img,if=none
> -device nvme-ns,drive=ns2,bus=nvme0,nsid=2,zoned=false,shared=true
> -drive id=ns3,file=ns3.img,if=none
> -device nvme-ns,drive=ns3,bus=nvme0,nsid=3,zoned=false,shared=true
> -drive id=ns4,file=ns4.img,if=none
> -device nvme-ns,drive=ns4,bus=nvme0,nsid=4,zoned=false,shared=true
> 
> Please review and suggest if any changes are required.
> 
> Signed-off-by: Naveen Nagar 
> Reviewed-by: Klaus Jensen 
>   

So, I think this is a fine approach.

However, I think we should let it simmer until we know if my -object
refactoring will be accepted as a way forward. In that case, I'd like to
only add it there and likely as a new namespace "type" (i.e.
x-nvme-ns-unallocated) that will be replaced with a dynamically created
object depending on CSI.


signature.asc
Description: PGP signature


Re: [RFC PATCH v2] hw/nvme:Adding Support for namespace management

2021-08-19 Thread Klaus Jensen
On Aug 19 18:39, Naveen Nagar wrote:
> From: Naveen 
> 
> This patch supports namespace management : create and delete operations.
> 
> Since v1:
> - Modified and moved nvme_ns_identify_common in ns.c file 
> - Added check for CSI field in NS management
> - Indentation fix in namespace create
> 
> This patch has been tested with the following command and size of image
> file for unallocated namespaces is taken as 0GB. ns_create will look into
> the list of unallocated namespaces and it will initialize the same and 
> return the nsid of the same. A new mandatory field has been added called
> tnvmcap and we have ensured that the total capacity of namespace created
> does not exceed tnvmcap
> 
> -device nvme-subsys,id=subsys0,tnvmcap=8
> -device nvme,serial=foo,id=nvme0,subsys=subsys0
> -device nvme,serial=bar,id=nvme1,subsys=subsys0
> -drive id=ns1,file=ns1.img,if=none
> -device nvme-ns,drive=ns1,bus=nvme0,nsid=1,zoned=false,shared=true
> -drive id=ns2,file=ns2.img,if=none
> -device nvme-ns,drive=ns2,bus=nvme0,nsid=2,zoned=false,shared=true
> -drive id=ns3,file=ns3.img,if=none
> -device nvme-ns,drive=ns3,bus=nvme0,nsid=3,zoned=false,shared=true
> -drive id=ns4,file=ns4.img,if=none
> -device nvme-ns,drive=ns4,bus=nvme0,nsid=4,zoned=false,shared=true
> 
> Please review and suggest if any changes are required.
> 
> Signed-off-by: Naveen Nagar 
> Reviewed-by: Klaus Jensen 
>   

Woops.

Looks like you sent it to the wrong mailing list - I'd be happy to
comment on this on qemu-{block,devel} instead :)


signature.asc
Description: PGP signature


[RFC PATCH v2] hw/nvme:Adding Support for namespace management

2021-08-19 Thread Naveen Nagar
From: Naveen 

This patch supports namespace management : create and delete operations.

Since v1:
- Modified and moved nvme_ns_identify_common in ns.c file 
- Added check for CSI field in NS management
- Indentation fix in namespace create

This patch has been tested with the following command and size of image
file for unallocated namespaces is taken as 0GB. ns_create will look into
the list of unallocated namespaces and it will initialize the same and 
return the nsid of the same. A new mandatory field has been added called
tnvmcap and we have ensured that the total capacity of namespace created
does not exceed tnvmcap

-device nvme-subsys,id=subsys0,tnvmcap=8
-device nvme,serial=foo,id=nvme0,subsys=subsys0
-device nvme,serial=bar,id=nvme1,subsys=subsys0
-drive id=ns1,file=ns1.img,if=none
-device nvme-ns,drive=ns1,bus=nvme0,nsid=1,zoned=false,shared=true
-drive id=ns2,file=ns2.img,if=none
-device nvme-ns,drive=ns2,bus=nvme0,nsid=2,zoned=false,shared=true
-drive id=ns3,file=ns3.img,if=none
-device nvme-ns,drive=ns3,bus=nvme0,nsid=3,zoned=false,shared=true
-drive id=ns4,file=ns4.img,if=none
-device nvme-ns,drive=ns4,bus=nvme0,nsid=4,zoned=false,shared=true

Please review and suggest if any changes are required.

Signed-off-by: Naveen Nagar 
Reviewed-by: Klaus Jensen 
  
---
 hw/nvme/ctrl.c   | 237 +--
 hw/nvme/ns.c |  61 ++-
 hw/nvme/nvme.h   |   7 +-
 hw/nvme/subsys.c |   3 +
 include/block/nvme.h |  18 +++-
 5 files changed, 285 insertions(+), 41 deletions(-)

diff --git a/hw/nvme/ctrl.c b/hw/nvme/ctrl.c
index 6baf9e0420..992aaa7d02 100644
--- a/hw/nvme/ctrl.c
+++ b/hw/nvme/ctrl.c
@@ -219,6 +219,7 @@ static const uint32_t nvme_cse_acs[256] = {
 [NVME_ADM_CMD_SET_FEATURES] = NVME_CMD_EFF_CSUPP,
 [NVME_ADM_CMD_GET_FEATURES] = NVME_CMD_EFF_CSUPP,
 [NVME_ADM_CMD_ASYNC_EV_REQ] = NVME_CMD_EFF_CSUPP,
+[NVME_ADM_CMD_NS_MANAGEMENT]= NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_NIC,
 [NVME_ADM_CMD_NS_ATTACHMENT]= NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_NIC,
 [NVME_ADM_CMD_FORMAT_NVM]   = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC,
 };
@@ -4450,11 +4451,19 @@ static uint16_t nvme_identify_ns(NvmeCtrl *n, 
NvmeRequest *req, bool active)
 NvmeNamespace *ns;
 NvmeIdentify *c = (NvmeIdentify *)>cmd;
 uint32_t nsid = le32_to_cpu(c->nsid);
+NvmeIdNs *id_ns = NULL;
+uint16_t ret;
 
 trace_pci_nvme_identify_ns(nsid);
 
-if (!nvme_nsid_valid(n, nsid) || nsid == NVME_NSID_BROADCAST) {
+if (!nvme_nsid_valid(n, nsid)) {
 return NVME_INVALID_NSID | NVME_DNR;
+} else if (nsid == NVME_NSID_BROADCAST) {
+id_ns = g_new0(NvmeIdNs, 1);
+nvme_ns_identify_common(id_ns);
+ret = nvme_c2h(n, (uint8_t *)id_ns, sizeof(NvmeIdNs), req);
+g_free(id_ns);
+return ret;
 }
 
 ns = nvme_ns(n, nsid);
@@ -5184,6 +5193,200 @@ static void nvme_select_iocs_ns(NvmeCtrl *n, 
NvmeNamespace *ns)
 }
 }
 
+static int nvme_blk_truncate(BlockBackend *blk, size_t len, Error **errp)
+{
+int ret;
+uint64_t perm, shared_perm;
+
+blk_get_perm(blk, , _perm);
+
+ret = blk_set_perm(blk, perm | BLK_PERM_RESIZE, shared_perm, errp);
+if (ret < 0) {
+return ret;
+}
+
+ret = blk_truncate(blk, len, false, PREALLOC_MODE_OFF, 0, errp);
+if (ret < 0) {
+return ret;
+}
+
+ret = blk_set_perm(blk, perm, shared_perm, errp);
+if (ret < 0) {
+return ret;
+}
+
+return 0;
+}
+
+static uint32_t nvme_allocate_nsid(NvmeCtrl *n)
+{
+uint32_t nsid = 0;
+for (int i = 1; i <= NVME_MAX_NAMESPACES; i++) {
+if (nvme_ns(n, i) || nvme_subsys_ns(n->subsys, i)) {
+continue;
+}
+
+nsid = i;
+return nsid;
+}
+return nsid;
+}
+
+static uint16_t nvme_namespace_create(NvmeCtrl *n, NvmeRequest *req)
+{
+uint32_t ret;
+NvmeIdNs id_ns_host;
+NvmeSubsystem *subsys = n->subsys;
+Error *err = NULL;
+uint8_t flbas_host;
+uint64_t ns_size;
+int lba_index;
+NvmeNamespace *ns;
+NvmeCtrl *ctrl;
+NvmeIdNs *id_ns;
+
+ret = nvme_h2c(n, (uint8_t *)_ns_host, sizeof(id_ns_host), req);
+if (ret) {
+return ret;
+}
+
+if (id_ns_host.ncap < id_ns_host.nsze) {
+return NVME_THIN_PROVISION_NO_SUPP | NVME_DNR;
+} else if (id_ns_host.ncap > id_ns_host.nsze) {
+return NVME_INVALID_FIELD | NVME_DNR;
+}
+
+if (!id_ns_host.nsze) {
+return NVME_INVALID_FIELD | NVME_DNR;
+}
+
+if (QSLIST_EMPTY(>unallocated_namespaces)) {
+return NVME_NS_ID_UNAVAILABLE;
+}
+
+ns = QSLIST_FIRST(>unallocated_namespaces);
+id_ns = >id_ns;
+flbas_host = (id_ns_host.flbas) & (0xF);
+
+if (flbas_host > id_ns->nlbaf) {
+return NVME_INVALID_FORMAT | NVME_DNR;
+}
+
+ret = nvme_ns_setup(ns, );
+if (ret) {
+return ret;
+}
+
+id_ns->flbas = id_ns_host.flbas;
+id_ns->dps