[PATCH v3 01/18] hw/block/nvme: bump spec data structures to v1.3

2020-07-05 Thread Klaus Jensen
From: Klaus Jensen 

Add missing fields in the Identify Controller and Identify Namespace
data structures to bring them in line with NVMe v1.3.

This also adds data structures and defines for SGL support which
requires a couple of trivial changes to the nvme block driver as well.

Signed-off-by: Klaus Jensen 
Acked-by: Fam Zheng 
Reviewed-by: Maxim Levitsky 
---
 block/nvme.c |  18 ++---
 hw/block/nvme.c  |  12 ++--
 include/block/nvme.h | 156 ++-
 3 files changed, 154 insertions(+), 32 deletions(-)

diff --git a/block/nvme.c b/block/nvme.c
index 374e26891573..c1c4c07ac6cc 100644
--- a/block/nvme.c
+++ b/block/nvme.c
@@ -518,7 +518,7 @@ static void nvme_identify(BlockDriverState *bs, int 
namespace, Error **errp)
 error_setg(errp, "Cannot map buffer for DMA");
 goto out;
 }
-cmd.prp1 = cpu_to_le64(iova);
+cmd.dptr.prp1 = cpu_to_le64(iova);
 
 if (nvme_cmd_sync(bs, s->queues[0], &cmd)) {
 error_setg(errp, "Failed to identify controller");
@@ -629,7 +629,7 @@ static bool nvme_add_io_queue(BlockDriverState *bs, Error 
**errp)
 }
 cmd = (NvmeCmd) {
 .opcode = NVME_ADM_CMD_CREATE_CQ,
-.prp1 = cpu_to_le64(q->cq.iova),
+.dptr.prp1 = cpu_to_le64(q->cq.iova),
 .cdw10 = cpu_to_le32(((queue_size - 1) << 16) | (n & 0x)),
 .cdw11 = cpu_to_le32(0x3),
 };
@@ -640,7 +640,7 @@ static bool nvme_add_io_queue(BlockDriverState *bs, Error 
**errp)
 }
 cmd = (NvmeCmd) {
 .opcode = NVME_ADM_CMD_CREATE_SQ,
-.prp1 = cpu_to_le64(q->sq.iova),
+.dptr.prp1 = cpu_to_le64(q->sq.iova),
 .cdw10 = cpu_to_le32(((queue_size - 1) << 16) | (n & 0x)),
 .cdw11 = cpu_to_le32(0x1 | (n << 16)),
 };
@@ -988,16 +988,16 @@ try_map:
 case 0:
 abort();
 case 1:
-cmd->prp1 = pagelist[0];
-cmd->prp2 = 0;
+cmd->dptr.prp1 = pagelist[0];
+cmd->dptr.prp2 = 0;
 break;
 case 2:
-cmd->prp1 = pagelist[0];
-cmd->prp2 = pagelist[1];
+cmd->dptr.prp1 = pagelist[0];
+cmd->dptr.prp2 = pagelist[1];
 break;
 default:
-cmd->prp1 = pagelist[0];
-cmd->prp2 = cpu_to_le64(req->prp_list_iova + sizeof(uint64_t));
+cmd->dptr.prp1 = pagelist[0];
+cmd->dptr.prp2 = cpu_to_le64(req->prp_list_iova + sizeof(uint64_t));
 break;
 }
 trace_nvme_cmd_map_qiov(s, cmd, req, qiov, entries);
diff --git a/hw/block/nvme.c b/hw/block/nvme.c
index 1aee042d4cb2..71b388aa0e20 100644
--- a/hw/block/nvme.c
+++ b/hw/block/nvme.c
@@ -397,8 +397,8 @@ static uint16_t nvme_rw(NvmeCtrl *n, NvmeNamespace *ns, 
NvmeCmd *cmd,
 NvmeRwCmd *rw = (NvmeRwCmd *)cmd;
 uint32_t nlb  = le32_to_cpu(rw->nlb) + 1;
 uint64_t slba = le64_to_cpu(rw->slba);
-uint64_t prp1 = le64_to_cpu(rw->prp1);
-uint64_t prp2 = le64_to_cpu(rw->prp2);
+uint64_t prp1 = le64_to_cpu(rw->dptr.prp1);
+uint64_t prp2 = le64_to_cpu(rw->dptr.prp2);
 
 uint8_t lba_index  = NVME_ID_NS_FLBAS_INDEX(ns->id_ns.flbas);
 uint8_t data_shift = ns->id_ns.lbaf[lba_index].ds;
@@ -795,8 +795,8 @@ static inline uint64_t nvme_get_timestamp(const NvmeCtrl *n)
 
 static uint16_t nvme_get_feature_timestamp(NvmeCtrl *n, NvmeCmd *cmd)
 {
-uint64_t prp1 = le64_to_cpu(cmd->prp1);
-uint64_t prp2 = le64_to_cpu(cmd->prp2);
+uint64_t prp1 = le64_to_cpu(cmd->dptr.prp1);
+uint64_t prp2 = le64_to_cpu(cmd->dptr.prp2);
 
 uint64_t timestamp = nvme_get_timestamp(n);
 
@@ -834,8 +834,8 @@ static uint16_t nvme_set_feature_timestamp(NvmeCtrl *n, 
NvmeCmd *cmd)
 {
 uint16_t ret;
 uint64_t timestamp;
-uint64_t prp1 = le64_to_cpu(cmd->prp1);
-uint64_t prp2 = le64_to_cpu(cmd->prp2);
+uint64_t prp1 = le64_to_cpu(cmd->dptr.prp1);
+uint64_t prp2 = le64_to_cpu(cmd->dptr.prp2);
 
 ret = nvme_dma_write_prp(n, (uint8_t *)×tamp,
 sizeof(timestamp), prp1, prp2);
diff --git a/include/block/nvme.h b/include/block/nvme.h
index 1720ee1d5158..2a80d2a7ed89 100644
--- a/include/block/nvme.h
+++ b/include/block/nvme.h
@@ -377,15 +377,53 @@ enum NvmePmrmscMask {
 #define NVME_PMRMSC_SET_CBA(pmrmsc, val)   \
 (pmrmsc |= (uint64_t)(val & PMRMSC_CBA_MASK) << PMRMSC_CBA_SHIFT)
 
+enum NvmeSglDescriptorType {
+NVME_SGL_DESCR_TYPE_DATA_BLOCK  = 0x0,
+NVME_SGL_DESCR_TYPE_BIT_BUCKET  = 0x1,
+NVME_SGL_DESCR_TYPE_SEGMENT = 0x2,
+NVME_SGL_DESCR_TYPE_LAST_SEGMENT= 0x3,
+NVME_SGL_DESCR_TYPE_KEYED_DATA_BLOCK= 0x4,
+
+NVME_SGL_DESCR_TYPE_VENDOR_SPECIFIC = 0xf,
+};
+
+enum NvmeSglDescriptorSubtype {
+NVME_SGL_DESCR_SUBTYPE_ADDRESS = 0x0,
+};
+
+typedef struct NvmeSglDescriptor {
+uint64_t addr;
+uint32_t len;
+uint8_t  rsvd[3];
+uint8_t  type;
+} NvmeSglDescriptor;
+
+#define NVME_SGL_TYPE(type) ((type >> 4) & 0xf)
+#define NVME_SGL_SUBTYPE(type)  (type & 0xf)
+
+typ

Re: [PATCH v3 01/18] hw/block/nvme: bump spec data structures to v1.3

2020-07-08 Thread Dmitry Fomichev
Looks good with a small nit (see below),

Reviewed-by: Dmitry Fomichev 

> 
On Mon, 2020-07-06 at 08:12 +0200, Klaus Jensen wrote:
> From: Klaus Jensen 
> 
> Add missing fields in the Identify Controller and Identify Namespace
> data structures to bring them in line with NVMe v1.3.
> 
> This also adds data structures and defines for SGL support which
> requires a couple of trivial changes to the nvme block driver as well.
> 
> Signed-off-by: Klaus Jensen 
> Acked-by: Fam Zheng 
> Reviewed-by: Maxim Levitsky 
> ---
>  block/nvme.c |  18 ++---
>  hw/block/nvme.c  |  12 ++--
>  include/block/nvme.h | 156 ++-
>  3 files changed, 154 insertions(+), 32 deletions(-)
> 
> diff --git a/block/nvme.c b/block/nvme.c
> index 374e26891573..c1c4c07ac6cc 100644
> --- a/block/nvme.c
> +++ b/block/nvme.c
> @@ -518,7 +518,7 @@ static void nvme_identify(BlockDriverState *bs, int 
> namespace, Error **errp)
>  error_setg(errp, "Cannot map buffer for DMA");
>  goto out;
>  }
> -cmd.prp1 = cpu_to_le64(iova);
> +cmd.dptr.prp1 = cpu_to_le64(iova);
>  
>  if (nvme_cmd_sync(bs, s->queues[0], &cmd)) {
>  error_setg(errp, "Failed to identify controller");
> @@ -629,7 +629,7 @@ static bool nvme_add_io_queue(BlockDriverState *bs, Error 
> **errp)
>  }
>  cmd = (NvmeCmd) {
>  .opcode = NVME_ADM_CMD_CREATE_CQ,
> -.prp1 = cpu_to_le64(q->cq.iova),
> +.dptr.prp1 = cpu_to_le64(q->cq.iova),
>  .cdw10 = cpu_to_le32(((queue_size - 1) << 16) | (n & 0x)),
>  .cdw11 = cpu_to_le32(0x3),
>  };
> @@ -640,7 +640,7 @@ static bool nvme_add_io_queue(BlockDriverState *bs, Error 
> **errp)
>  }
>  cmd = (NvmeCmd) {
>  .opcode = NVME_ADM_CMD_CREATE_SQ,
> -.prp1 = cpu_to_le64(q->sq.iova),
> +.dptr.prp1 = cpu_to_le64(q->sq.iova),
>  .cdw10 = cpu_to_le32(((queue_size - 1) << 16) | (n & 0x)),
>  .cdw11 = cpu_to_le32(0x1 | (n << 16)),
>  };
> @@ -988,16 +988,16 @@ try_map:
>  case 0:
>  abort();
>  case 1:
> -cmd->prp1 = pagelist[0];
> -cmd->prp2 = 0;
> +cmd->dptr.prp1 = pagelist[0];
> +cmd->dptr.prp2 = 0;
>  break;
>  case 2:
> -cmd->prp1 = pagelist[0];
> -cmd->prp2 = pagelist[1];
> +cmd->dptr.prp1 = pagelist[0];
> +cmd->dptr.prp2 = pagelist[1];
>  break;
>  default:
> -cmd->prp1 = pagelist[0];
> -cmd->prp2 = cpu_to_le64(req->prp_list_iova + sizeof(uint64_t));
> +cmd->dptr.prp1 = pagelist[0];
> +cmd->dptr.prp2 = cpu_to_le64(req->prp_list_iova + sizeof(uint64_t));
>  break;
>  }
>  trace_nvme_cmd_map_qiov(s, cmd, req, qiov, entries);
> diff --git a/hw/block/nvme.c b/hw/block/nvme.c
> index 1aee042d4cb2..71b388aa0e20 100644
> --- a/hw/block/nvme.c
> +++ b/hw/block/nvme.c
> @@ -397,8 +397,8 @@ static uint16_t nvme_rw(NvmeCtrl *n, NvmeNamespace *ns, 
> NvmeCmd *cmd,
>  NvmeRwCmd *rw = (NvmeRwCmd *)cmd;
>  uint32_t nlb  = le32_to_cpu(rw->nlb) + 1;
>  uint64_t slba = le64_to_cpu(rw->slba);
> -uint64_t prp1 = le64_to_cpu(rw->prp1);
> -uint64_t prp2 = le64_to_cpu(rw->prp2);
> +uint64_t prp1 = le64_to_cpu(rw->dptr.prp1);
> +uint64_t prp2 = le64_to_cpu(rw->dptr.prp2);
>  
>  uint8_t lba_index  = NVME_ID_NS_FLBAS_INDEX(ns->id_ns.flbas);
>  uint8_t data_shift = ns->id_ns.lbaf[lba_index].ds;
> @@ -795,8 +795,8 @@ static inline uint64_t nvme_get_timestamp(const NvmeCtrl 
> *n)
>  
>  static uint16_t nvme_get_feature_timestamp(NvmeCtrl *n, NvmeCmd *cmd)
>  {
> -uint64_t prp1 = le64_to_cpu(cmd->prp1);
> -uint64_t prp2 = le64_to_cpu(cmd->prp2);
> +uint64_t prp1 = le64_to_cpu(cmd->dptr.prp1);
> +uint64_t prp2 = le64_to_cpu(cmd->dptr.prp2);
>  
>  uint64_t timestamp = nvme_get_timestamp(n);
>  
> @@ -834,8 +834,8 @@ static uint16_t nvme_set_feature_timestamp(NvmeCtrl *n, 
> NvmeCmd *cmd)
>  {
>  uint16_t ret;
>  uint64_t timestamp;
> -uint64_t prp1 = le64_to_cpu(cmd->prp1);
> -uint64_t prp2 = le64_to_cpu(cmd->prp2);
> +uint64_t prp1 = le64_to_cpu(cmd->dptr.prp1);
> +uint64_t prp2 = le64_to_cpu(cmd->dptr.prp2);
>  
>  ret = nvme_dma_write_prp(n, (uint8_t *)×tamp,
>  sizeof(timestamp), prp1, prp2);
> diff --git a/include/block/nvme.h b/include/block/nvme.h
> index 1720ee1d5158..2a80d2a7ed89 100644
> --- a/include/block/nvme.h
> +++ b/include/block/nvme.h
> @@ -377,15 +377,53 @@ enum NvmePmrmscMask {
>  #define NVME_PMRMSC_SET_CBA(pmrmsc, val)   \
>  (pmrmsc |= (uint64_t)(val & PMRMSC_CBA_MASK) << PMRMSC_CBA_SHIFT)
>  
> +enum NvmeSglDescriptorType {
> +NVME_SGL_DESCR_TYPE_DATA_BLOCK  = 0x0,
> +NVME_SGL_DESCR_TYPE_BIT_BUCKET  = 0x1,
> +NVME_SGL_DESCR_TYPE_SEGMENT = 0x2,
> +NVME_SGL_DESCR_TYPE_LAST_SEGMENT= 0x3,
> +NVME_SGL_DESCR_TYPE_KEYED_DATA_BLOCK= 0x4,
>

Re: [PATCH v3 01/18] hw/block/nvme: bump spec data structures to v1.3

2020-07-08 Thread Klaus Jensen
On Jul  8 19:19, Dmitry Fomichev wrote:
> Looks good with a small nit (see below),
> 
> Reviewed-by: Dmitry Fomichev 
> 
> > 
> On Mon, 2020-07-06 at 08:12 +0200, Klaus Jensen wrote:
> > +#define NVME_TEMP_TMPTH(temp) ((temp >>  0) & 0x)
> 
> There is an extra space after temp >>
> 

Good catch! I won't repost for this ;) - but I'll fix it and add it in
the qemu-nvme tree.



RE: [PATCH v3 01/18] hw/block/nvme: bump spec data structures to v1.3

2020-07-08 Thread Dmitry Fomichev

> -Original Message-
> From: Klaus Jensen 
> Sent: Wednesday, July 8, 2020 5:24 PM
> To: Dmitry Fomichev 
> Cc: qemu-block@nongnu.org; qemu-de...@nongnu.org; f...@euphon.net;
> javier.g...@samsung.com; kw...@redhat.com; mre...@redhat.com;
> mlevi...@redhat.com; phi...@redhat.com; kbu...@kernel.org;
> k.jen...@samsung.com
> Subject: Re: [PATCH v3 01/18] hw/block/nvme: bump spec data structures to
> v1.3
> 
> On Jul  8 19:19, Dmitry Fomichev wrote:
> > Looks good with a small nit (see below),
> >
> > Reviewed-by: Dmitry Fomichev 
> >
> > >
> > On Mon, 2020-07-06 at 08:12 +0200, Klaus Jensen wrote:
> > > +#define NVME_TEMP_TMPTH(temp) ((temp >>  0) & 0x)
> >
> > There is an extra space after temp >>
> >
> 
> Good catch! I won't repost for this ;) - but I'll fix it and add it in
> the qemu-nvme tree.

Yes, no need to repost :) Thanks for reviewing our ZNS series! I am working
on addressing your comments and I am also starting to review your
"AIO and address mapping refactoring" patchset.

Cheers,
Dmitry


Re: [PATCH v3 01/18] hw/block/nvme: bump spec data structures to v1.3

2020-07-08 Thread Klaus Jensen
On Jul  8 21:47, Dmitry Fomichev wrote:
> 
> > -Original Message-
> > From: Klaus Jensen 
> > Sent: Wednesday, July 8, 2020 5:24 PM
> > To: Dmitry Fomichev 
> > Cc: qemu-block@nongnu.org; qemu-de...@nongnu.org; f...@euphon.net;
> > javier.g...@samsung.com; kw...@redhat.com; mre...@redhat.com;
> > mlevi...@redhat.com; phi...@redhat.com; kbu...@kernel.org;
> > k.jen...@samsung.com
> > Subject: Re: [PATCH v3 01/18] hw/block/nvme: bump spec data structures to
> > v1.3
> > 
> > On Jul  8 19:19, Dmitry Fomichev wrote:
> > > Looks good with a small nit (see below),
> > >
> > > Reviewed-by: Dmitry Fomichev 
> > >
> > > >
> > > On Mon, 2020-07-06 at 08:12 +0200, Klaus Jensen wrote:
> > > > +#define NVME_TEMP_TMPTH(temp) ((temp >>  0) & 0x)
> > >
> > > There is an extra space after temp >>
> > >
> > 
> > Good catch! I won't repost for this ;) - but I'll fix it and add it in
> > the qemu-nvme tree.
> 
> Yes, no need to repost :) Thanks for reviewing our ZNS series! I am working
> on addressing your comments and I am also starting to review your
> "AIO and address mapping refactoring" patchset.
> 

Since I think this patchset gets merged on nvme-next today, there is a
v2 on the way for that set.