Re: [PATCH v3 2/5] qla2xxx_nvmet: Add files for FC-NVMe Target support

2018-10-26 Thread Madhani, Himanshu
Hi James, 

> On Oct 25, 2018, at 11:23 AM, James Smart  wrote:
> 
> External Email
> 
> On 9/28/2018 3:46 PM, Himanshu Madhani wrote:
>> + .target_features= NVMET_FCTGTFEAT_READDATA_RSP |
>> + NVMET_FCTGTFEAT_CMD_IN_ISR |
>> + NVMET_FCTGTFEAT_OPDONE_IN_ISR,
>> 
> 
> Himanshu,
> 
> I'm looking at these but had a quick question.   Did you really want the
> IN_ISR flags set ?  they schedule processing vs calling the nvmet
> routines inline. The intent was the queueing was only needed if in the
> hard isr routine. Last contact I had with your group said you were in
> soft isr routines and inline calling would be used.  I'm asking because
> I had intended to remove these flags/features.
> 
> -- james
> 

Looks like there was a miss to remove these flags when we rebased code on 
4.20/scsi-queue. 
After the original submission where this flag was present, we have removed this 
flag in our 
internal testing but the code sent out after rebase missed that update. I’ll 
send v4 with 
the flag removed. 

Please let me know you have any other comments that I can incorporate in v4 

Thanks,
- Himanshu



Re: [PATCH v3 2/5] qla2xxx_nvmet: Add files for FC-NVMe Target support

2018-10-25 Thread James Smart




On 9/28/2018 3:46 PM, Himanshu Madhani wrote:

+   .target_features= NVMET_FCTGTFEAT_READDATA_RSP |
+   NVMET_FCTGTFEAT_CMD_IN_ISR |
+   NVMET_FCTGTFEAT_OPDONE_IN_ISR,



Himanshu,

I'm looking at these but had a quick question.   Did you really want the 
IN_ISR flags set ?  they schedule processing vs calling the nvmet 
routines inline. The intent was the queueing was only needed if in the 
hard isr routine. Last contact I had with your group said you were in 
soft isr routines and inline calling would be used.  I'm asking because 
I had intended to remove these flags/features.


-- james



[PATCH v3 2/5] qla2xxx_nvmet: Add files for FC-NVMe Target support

2018-09-28 Thread Himanshu Madhani
From: Anil Gurumurthy 

This patch adds files to enable NVMe Target Support

Signed-off-by: Anil Gurumurthy 
Signed-off-by: Giridhar Malavali 
Signed-off-by: Darren Trapp 
Signed-off-by: Himanshu Madhani 
---
 drivers/scsi/qla2xxx/qla_nvmet.c | 797 +++
 drivers/scsi/qla2xxx/qla_nvmet.h | 129 +++
 2 files changed, 926 insertions(+)
 create mode 100644 drivers/scsi/qla2xxx/qla_nvmet.c
 create mode 100644 drivers/scsi/qla2xxx/qla_nvmet.h

diff --git a/drivers/scsi/qla2xxx/qla_nvmet.c b/drivers/scsi/qla2xxx/qla_nvmet.c
new file mode 100644
index ..9d9a0ed68501
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_nvmet.c
@@ -0,0 +1,797 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c)  2003-2017 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+
+#include 
+#include 
+#include 
+#include 
+
+#include "qla_nvme.h"
+#include "qla_nvmet.h"
+
+static void qla_nvmet_send_resp_ctio(struct qla_qpair *qpair,
+   struct qla_nvmet_cmd *cmd, struct nvmefc_tgt_fcp_req *rsp);
+static void qla_nvmet_send_abts_ctio(struct scsi_qla_host *vha,
+   struct abts_recv_from_24xx *abts, bool flag);
+
+/*
+ * qla_nvmet_targetport_delete -
+ * Invoked by the nvmet to indicate that the target port has
+ * been deleted
+ */
+static void
+qla_nvmet_targetport_delete(struct nvmet_fc_target_port *targetport)
+{
+   struct qla_nvmet_tgtport *tport = targetport->private;
+
+   if (!IS_ENABLED(CONFIG_NVME_TARGET_FC))
+   return;
+
+   complete(&tport->tport_del);
+}
+
+/*
+ * qlt_nvmet_ls_done -
+ * Invoked by the firmware interface to indicate the completion
+ * of an LS cmd
+ * Free all associated resources of the LS cmd
+ */
+static void qlt_nvmet_ls_done(void *ptr, int res)
+{
+   struct srb *sp = ptr;
+   struct srb_iocb   *nvme = &sp->u.iocb_cmd;
+   struct nvmefc_tgt_ls_req *rsp = nvme->u.nvme.desc;
+   struct qla_nvmet_cmd *tgt_cmd = nvme->u.nvme.cmd;
+
+   if (!IS_ENABLED(CONFIG_NVME_TARGET_FC))
+   return;
+
+   ql_dbg(ql_dbg_nvme, sp->vha, 0x11001,
+   "%s: sp %p vha %p, rsp %p, cmd %p\n", __func__,
+   sp, sp->vha, nvme->u.nvme.desc, nvme->u.nvme.cmd);
+
+   rsp->done(rsp);
+
+   /* Free tgt_cmd */
+   kfree(tgt_cmd->buf);
+   kfree(tgt_cmd);
+   qla2x00_rel_sp(sp);
+}
+
+/*
+ * qla_nvmet_ls_rsp -
+ * Invoked by the nvme-t to complete the LS req.
+ * Prepare and send a response CTIO to the firmware.
+ */
+static int
+qla_nvmet_ls_rsp(struct nvmet_fc_target_port *tgtport,
+   struct nvmefc_tgt_ls_req *rsp)
+{
+   struct qla_nvmet_cmd *tgt_cmd =
+   container_of(rsp, struct qla_nvmet_cmd, cmd.ls_req);
+   struct scsi_qla_host *vha = tgt_cmd->vha;
+   struct srb_iocb   *nvme;
+   int rval = QLA_FUNCTION_FAILED;
+   srb_t *sp;
+
+   ql_dbg(ql_dbg_nvme + ql_dbg_buffer, vha, 0x11002,
+   "Dumping the NVMET-LS response buffer\n");
+   ql_dump_buffer(ql_dbg_nvme + ql_dbg_buffer, vha, 0x2075,
+   (uint8_t *)rsp->rspbuf, rsp->rsplen);
+
+   /* Alloc SRB structure */
+   sp = qla2x00_get_sp(vha, NULL, GFP_ATOMIC);
+   if (!sp) {
+   ql_log(ql_log_info, vha, 0x11003, "Failed to allocate SRB\n");
+   return -ENOMEM;
+   }
+
+   sp->type = SRB_NVMET_LS;
+   sp->done = qlt_nvmet_ls_done;
+   sp->vha = vha;
+   sp->fcport = tgt_cmd->fcport;
+
+   nvme = &sp->u.iocb_cmd;
+   nvme->u.nvme.rsp_dma = rsp->rspdma;
+   nvme->u.nvme.rsp_len = rsp->rsplen;
+   nvme->u.nvme.exchange_address = tgt_cmd->atio.u.pt_ls4.exchange_address;
+   nvme->u.nvme.nport_handle = tgt_cmd->atio.u.pt_ls4.nport_handle;
+   nvme->u.nvme.vp_index = tgt_cmd->atio.u.pt_ls4.vp_index;
+
+   nvme->u.nvme.cmd = tgt_cmd; /* To be freed */
+   nvme->u.nvme.desc = rsp; /* Call back to nvmet */
+
+   rval = qla2x00_start_sp(sp);
+   if (rval != QLA_SUCCESS) {
+   ql_log(ql_log_warn, vha, 0x11004,
+   "qla2x00_start_sp failed = %d\n", rval);
+   return rval;
+   }
+
+   return 0;
+}
+
+/*
+ * qla_nvmet_fcp_op -
+ * Invoked by the nvme-t to complete the IO.
+ * Prepare and send a response CTIO to the firmware.
+ */
+static int
+qla_nvmet_fcp_op(struct nvmet_fc_target_port *tgtport,
+   struct nvmefc_tgt_fcp_req *rsp)
+{
+   struct qla_nvmet_cmd *tgt_cmd =
+   container_of(rsp, struct qla_nvmet_cmd, cmd.fcp_req);
+   struct scsi_qla_host *vha = tgt_cmd->vha;
+
+   if (!IS_ENABLED(CONFIG_NVME_TARGET_FC))
+   return 0;
+
+   /* Prepare and send CTIO 82h */
+   qla_nvmet_send_resp_ctio(vha->qpair, tgt_cmd, rsp);
+
+   return 0;
+}
+
+/*
+ * qla_nvmet_fcp_abort_done
+ * free up the used resources
+ */
+static void qla_nvmet_fcp_abort_done(void *ptr, int res)
+{
+   srb_t *sp = ptr;
+
+