From: Klaus Jensen <k.jen...@samsung.com>

The nvme_dma function doesn't just do DMA (QEMUSGList-based) memory transfers;
it also handles QEMUIOVector copies.

Introduce the NvmeTxDirection enum and rename to nvme_tx. Remove mapping
of PRPs/SGLs from nvme_tx and assert that they have been mapped
previously. This allows more fine-grained use. Also expose
nvme_tx_{qsg,iov} versions in preparation for end-to-end data protection
support.

Add new, better named, helpers, nvme_{c2h,h2c}, that does both PRP/SGL
mapping and transfer.

Signed-off-by: Klaus Jensen <k.jen...@samsung.com>
---
 hw/block/nvme.c | 133 +++++++++++++++++++++++++++++-------------------
 1 file changed, 80 insertions(+), 53 deletions(-)

diff --git a/hw/block/nvme.c b/hw/block/nvme.c
index e4922c37c94d..8d580c121bcc 100644
--- a/hw/block/nvme.c
+++ b/hw/block/nvme.c
@@ -682,48 +682,86 @@ static uint16_t nvme_map_dptr(NvmeCtrl *n, size_t len, 
NvmeRequest *req)
     }
 }
 
-static uint16_t nvme_dma(NvmeCtrl *n, uint8_t *ptr, uint32_t len,
-                         DMADirection dir, NvmeRequest *req)
+typedef enum NvmeTxDirection {
+    NVME_TX_DIRECTION_TO_DEVICE   = 0,
+    NVME_TX_DIRECTION_FROM_DEVICE = 1,
+} NvmeTxDirection;
+
+static uint16_t nvme_tx_qsg(uint8_t *ptr, uint32_t len, QEMUSGList *qsg,
+                            NvmeTxDirection dir)
 {
-    uint16_t status = NVME_SUCCESS;
+    uint64_t residual;
+
+    if (dir == NVME_TX_DIRECTION_TO_DEVICE) {
+        residual = dma_buf_write(ptr, len, qsg);
+    } else {
+        residual = dma_buf_read(ptr, len, qsg);
+    }
+
+    if (unlikely(residual)) {
+        trace_pci_nvme_err_invalid_dma();
+        return NVME_INVALID_FIELD | NVME_DNR;
+    }
+
+    return NVME_SUCCESS;
+}
+
+static uint16_t nvme_tx_iov(uint8_t *ptr, uint32_t len, QEMUIOVector *iov,
+                            NvmeTxDirection dir)
+{
+    size_t bytes;
+
+    if (dir == NVME_TX_DIRECTION_TO_DEVICE) {
+        bytes = qemu_iovec_to_buf(iov, 0, ptr, len);
+    } else {
+        bytes = qemu_iovec_from_buf(iov, 0, ptr, len);
+    }
+
+    if (unlikely(bytes != len)) {
+        trace_pci_nvme_err_invalid_dma();
+        return NVME_INVALID_FIELD | NVME_DNR;
+    }
+
+    return NVME_SUCCESS;
+}
+
+static uint16_t nvme_tx(NvmeCtrl *n, uint8_t *ptr, uint32_t len,
+                        NvmeTxDirection dir, NvmeRequest *req)
+{
+    /* assert that exactly one of qsg and iov carries data */
+    assert((req->qsg.nsg > 0) != (req->iov.niov > 0));
+
+    if (req->qsg.nsg > 0) {
+        return nvme_tx_qsg(ptr, len, &req->qsg, dir);
+    }
+
+    return nvme_tx_iov(ptr, len, &req->iov, dir);
+}
+
+static inline uint16_t nvme_c2h(NvmeCtrl *n, uint8_t *ptr, uint32_t len,
+                         NvmeRequest *req)
+{
+    uint16_t status;
 
     status = nvme_map_dptr(n, len, req);
     if (status) {
         return status;
     }
 
-    /* assert that only one of qsg and iov carries data */
-    assert((req->qsg.nsg > 0) != (req->iov.niov > 0));
+    return nvme_tx(n, ptr, len, NVME_TX_DIRECTION_FROM_DEVICE, req);
+}
 
-    if (req->qsg.nsg > 0) {
-        uint64_t residual;
+static inline uint16_t nvme_h2c(NvmeCtrl *n, uint8_t *ptr, uint32_t len,
+                         NvmeRequest *req)
+{
+    uint16_t status;
 
-        if (dir == DMA_DIRECTION_TO_DEVICE) {
-            residual = dma_buf_write(ptr, len, &req->qsg);
-        } else {
-            residual = dma_buf_read(ptr, len, &req->qsg);
-        }
-
-        if (unlikely(residual)) {
-            trace_pci_nvme_err_invalid_dma();
-            status = NVME_INVALID_FIELD | NVME_DNR;
-        }
-    } else {
-        size_t bytes;
-
-        if (dir == DMA_DIRECTION_TO_DEVICE) {
-            bytes = qemu_iovec_to_buf(&req->iov, 0, ptr, len);
-        } else {
-            bytes = qemu_iovec_from_buf(&req->iov, 0, ptr, len);
-        }
-
-        if (unlikely(bytes != len)) {
-            trace_pci_nvme_err_invalid_dma();
-            status = NVME_INVALID_FIELD | NVME_DNR;
-        }
+    status = nvme_map_dptr(n, len, req);
+    if (status) {
+        return status;
     }
 
-    return status;
+    return nvme_tx(n, ptr, len, NVME_TX_DIRECTION_TO_DEVICE, req);
 }
 
 static void nvme_post_cqes(void *opaque)
@@ -1025,8 +1063,7 @@ static void nvme_compare_cb(void *opaque, int ret)
 
     buf = g_malloc(ctx->len);
 
-    status = nvme_dma(nvme_ctrl(req), buf, ctx->len, DMA_DIRECTION_TO_DEVICE,
-                      req);
+    status = nvme_h2c(nvme_ctrl(req), buf, ctx->len, req);
     if (status) {
         req->status = status;
         goto out;
@@ -1062,8 +1099,7 @@ static uint16_t nvme_dsm(NvmeCtrl *n, NvmeRequest *req)
         NvmeDsmRange range[nr];
         uintptr_t *discards = (uintptr_t *)&req->opaque;
 
-        status = nvme_dma(n, (uint8_t *)range, sizeof(range),
-                          DMA_DIRECTION_TO_DEVICE, req);
+        status = nvme_h2c(n, (uint8_t *)range, sizeof(range), req);
         if (status) {
             return status;
         }
@@ -1498,8 +1534,7 @@ static uint16_t nvme_smart_info(NvmeCtrl *n, uint8_t rae, 
uint32_t buf_len,
         nvme_clear_events(n, NVME_AER_TYPE_SMART);
     }
 
-    return nvme_dma(n, (uint8_t *) &smart + off, trans_len,
-                    DMA_DIRECTION_FROM_DEVICE, req);
+    return nvme_c2h(n, (uint8_t *) &smart + off, trans_len, req);
 }
 
 static uint16_t nvme_fw_log_info(NvmeCtrl *n, uint32_t buf_len, uint64_t off,
@@ -1517,8 +1552,7 @@ static uint16_t nvme_fw_log_info(NvmeCtrl *n, uint32_t 
buf_len, uint64_t off,
     strpadcpy((char *)&fw_log.frs1, sizeof(fw_log.frs1), "1.0", ' ');
     trans_len = MIN(sizeof(fw_log) - off, buf_len);
 
-    return nvme_dma(n, (uint8_t *) &fw_log + off, trans_len,
-                    DMA_DIRECTION_FROM_DEVICE, req);
+    return nvme_c2h(n, (uint8_t *) &fw_log + off, trans_len, req);
 }
 
 static uint16_t nvme_error_info(NvmeCtrl *n, uint8_t rae, uint32_t buf_len,
@@ -1538,8 +1572,7 @@ static uint16_t nvme_error_info(NvmeCtrl *n, uint8_t rae, 
uint32_t buf_len,
     memset(&errlog, 0x0, sizeof(errlog));
     trans_len = MIN(sizeof(errlog) - off, buf_len);
 
-    return nvme_dma(n, (uint8_t *)&errlog, trans_len,
-                    DMA_DIRECTION_FROM_DEVICE, req);
+    return nvme_c2h(n, (uint8_t *)&errlog, trans_len, req);
 }
 
 static uint16_t nvme_get_log(NvmeCtrl *n, NvmeRequest *req)
@@ -1702,8 +1735,7 @@ static uint16_t nvme_identify_ctrl(NvmeCtrl *n, 
NvmeRequest *req)
 {
     trace_pci_nvme_identify_ctrl();
 
-    return nvme_dma(n, (uint8_t *)&n->id_ctrl, sizeof(n->id_ctrl),
-                    DMA_DIRECTION_FROM_DEVICE, req);
+    return nvme_c2h(n, (uint8_t *)&n->id_ctrl, sizeof(n->id_ctrl), req);
 }
 
 static uint16_t nvme_identify_ns(NvmeCtrl *n, NvmeRequest *req)
@@ -1726,8 +1758,7 @@ static uint16_t nvme_identify_ns(NvmeCtrl *n, NvmeRequest 
*req)
         id_ns = &ns->id_ns;
     }
 
-    return nvme_dma(n, (uint8_t *)id_ns, sizeof(NvmeIdNs),
-                    DMA_DIRECTION_FROM_DEVICE, req);
+    return nvme_c2h(n, (uint8_t *)id_ns, sizeof(NvmeIdNs), req);
 }
 
 static uint16_t nvme_identify_nslist(NvmeCtrl *n, NvmeRequest *req)
@@ -1761,8 +1792,7 @@ static uint16_t nvme_identify_nslist(NvmeCtrl *n, 
NvmeRequest *req)
             break;
         }
     }
-    ret = nvme_dma(n, (uint8_t *)list, data_len, DMA_DIRECTION_FROM_DEVICE,
-                   req);
+    ret = nvme_c2h(n, (uint8_t *)list, data_len, req);
     g_free(list);
     return ret;
 }
@@ -1804,8 +1834,7 @@ static uint16_t nvme_identify_ns_descr_list(NvmeCtrl *n, 
NvmeRequest *req)
     ns_descrs->uuid.hdr.nidl = NVME_NIDT_UUID_LEN;
     stl_be_p(&ns_descrs->uuid.v, nsid);
 
-    return nvme_dma(n, list, NVME_IDENTIFY_DATA_SIZE,
-                    DMA_DIRECTION_FROM_DEVICE, req);
+    return nvme_c2h(n, list, NVME_IDENTIFY_DATA_SIZE, req);
 }
 
 static uint16_t nvme_identify(NvmeCtrl *n, NvmeRequest *req)
@@ -1878,8 +1907,7 @@ static uint16_t nvme_get_feature_timestamp(NvmeCtrl *n, 
NvmeRequest *req)
 {
     uint64_t timestamp = nvme_get_timestamp(n);
 
-    return nvme_dma(n, (uint8_t *)&timestamp, sizeof(timestamp),
-                    DMA_DIRECTION_FROM_DEVICE, req);
+    return nvme_c2h(n, (uint8_t *)&timestamp, sizeof(timestamp), req);
 }
 
 static uint16_t nvme_get_feature(NvmeCtrl *n, NvmeRequest *req)
@@ -2026,8 +2054,7 @@ static uint16_t nvme_set_feature_timestamp(NvmeCtrl *n, 
NvmeRequest *req)
     uint16_t ret;
     uint64_t timestamp;
 
-    ret = nvme_dma(n, (uint8_t *)&timestamp, sizeof(timestamp),
-                   DMA_DIRECTION_TO_DEVICE, req);
+    ret = nvme_h2c(n, (uint8_t *)&timestamp, sizeof(timestamp), req);
     if (ret != NVME_SUCCESS) {
         return ret;
     }
-- 
2.29.2


Reply via email to