Re: [RFC 6/6] qedi: Add support for data path.

2016-10-20 Thread Rangankar, Manish


On 19/10/16 3:54 PM, "Hannes Reinecke"  wrote:

>On 10/19/2016 07:01 AM, manish.rangan...@cavium.com wrote:
>> From: Manish Rangankar 
>> 
>> This patch adds support for data path and TMF handling.
>> 
>> Signed-off-by: Nilesh Javali 
>> Signed-off-by: Adheer Chandravanshi 
>> Signed-off-by: Chad Dupuis 
>> Signed-off-by: Saurav Kashyap 
>> Signed-off-by: Arun Easi 
>> Signed-off-by: Manish Rangankar 
>> ---
>>  drivers/scsi/qedi/qedi_fw.c| 1282
>>
>>  drivers/scsi/qedi/qedi_gbl.h   |6 +
>>  drivers/scsi/qedi/qedi_iscsi.c |6 +
>>  drivers/scsi/qedi/qedi_main.c  |4 +
>>  4 files changed, 1298 insertions(+)
>> 
>> diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c
>> index a820785..af1e14d 100644
>> --- a/drivers/scsi/qedi/qedi_fw.c
>> +++ b/drivers/scsi/qedi/qedi_fw.c
>> @@ -147,6 +147,114 @@ static void qedi_process_text_resp(struct
>>qedi_ctx *qedi,
>>  spin_unlock(>back_lock);
>>  }

--snipped--
>> +void qedi_trace_io(struct qedi_ctx *qedi, struct iscsi_task *task,
>> +   u16 tid, int8_t direction)
>> +{
>> +struct qedi_io_log *io_log;
>> +struct iscsi_conn *conn = task->conn;
>> +struct qedi_conn *qedi_conn = conn->dd_data;
>> +struct scsi_cmnd *sc_cmd = task->sc;
>> +unsigned long flags;
>> +u8 op;
>> +
>> +spin_lock_irqsave(>io_trace_lock, flags);
>> +
>> +io_log = >io_trace_buf[qedi->io_trace_idx];
>> +io_log->direction = direction;
>> +io_log->task_id = tid;
>> +io_log->cid = qedi_conn->iscsi_conn_id;
>> +io_log->lun = sc_cmd->device->lun;
>> +io_log->op = sc_cmd->cmnd[0];
>> +op = sc_cmd->cmnd[0];
>> +
>> +if (op == READ_10 || op == WRITE_10) {
>> +io_log->lba[0] = sc_cmd->cmnd[2];
>> +io_log->lba[1] = sc_cmd->cmnd[3];
>> +io_log->lba[2] = sc_cmd->cmnd[4];
>> +io_log->lba[3] = sc_cmd->cmnd[5];
>> +} else {
>> +io_log->lba[0] = 0;
>> +io_log->lba[1] = 0;
>> +io_log->lba[2] = 0;
>> +io_log->lba[3] = 0;
>> +}
>Only for READ_10 and WRITE_10? What about the other read or write
>commands?

We will add support for other scsi commands in the next revision.

>
>> +io_log->bufflen = scsi_bufflen(sc_cmd);
>> +io_log->sg_count = scsi_sg_count(sc_cmd);
>> +io_log->fast_sgs = qedi->fast_sgls;
>> +io_log->cached_sgs = qedi->cached_sgls;
>> +io_log->slow_sgs = qedi->slow_sgls;
>> +io_log->cached_sge = qedi->use_cached_sge;
>> +io_log->slow_sge = qedi->use_slow_sge;
>> +io_log->fast_sge = qedi->use_fast_sge;
>> +io_log->result = sc_cmd->result;
>> +io_log->jiffies = jiffies;
>> +io_log->blk_req_cpu = smp_processor_id();
>> +
>> +if (direction == QEDI_IO_TRACE_REQ) {
>> +/* For requests we only care about the submission CPU */
>> +io_log->req_cpu = smp_processor_id() % qedi->num_queues;
>> +io_log->intr_cpu = 0;
>> +io_log->blk_rsp_cpu = 0;
>> +} else if (direction == QEDI_IO_TRACE_RSP) {
>> +io_log->req_cpu = smp_processor_id() % qedi->num_queues;
>> +io_log->intr_cpu = qedi->intr_cpu;
>> +io_log->blk_rsp_cpu = smp_processor_id();
>> +}
>> +
>> +qedi->io_trace_idx++;
>> +if (qedi->io_trace_idx == QEDI_IO_TRACE_SIZE)
>> +qedi->io_trace_idx = 0;
>> +
>> +qedi->use_cached_sge = false;
>> +qedi->use_slow_sge = false;
>> +qedi->use_fast_sge = false;
>> +
>> +spin_unlock_irqrestore(>io_trace_lock, flags);
>> +}
>> +
>> +int qedi_iscsi_send_ioreq(struct iscsi_task *task)
>> +{
>> +struct iscsi_conn *conn = task->conn;
>> +struct iscsi_session *session = conn->session;
>> +struct Scsi_Host *shost =
>>iscsi_session_to_shost(session->cls_session);
>> +struct qedi_ctx *qedi = iscsi_host_priv(shost);
>> +struct qedi_conn *qedi_conn = conn->dd_data;
>> +struct qedi_cmd *cmd = task->dd_data;
>> +struct scsi_cmnd *sc = task->sc;
>> +struct iscsi_task_context *fw_task_ctx;
>> +struct iscsi_cached_sge_ctx *cached_sge;
>> +struct iscsi_phys_sgl_ctx *phys_sgl;
>> +struct iscsi_virt_sgl_ctx *virt_sgl;
>> +struct ystorm_iscsi_task_st_ctx *yst_cxt;
>> +struct mstorm_iscsi_task_st_ctx *mst_cxt;
>> +struct iscsi_sgl *sgl_struct;
>> +struct iscsi_sge *single_sge;
>> +struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr;
>> +struct iscsi_sge *bd = cmd->io_tbl.sge_tbl;
>> +enum iscsi_task_type task_type;
>> +struct iscsi_cmd_hdr *fw_cmd;
>> +u32 scsi_lun[2];
>> +u16 cq_idx = smp_processor_id() % qedi->num_queues;
>> +s16 ptu_invalidate = 0;
>> +s16 tid = 0;
>> +u8 num_fast_sgs;
>> +
>> +tid = 

Re: [RFC 6/6] qedi: Add support for data path.

2016-10-19 Thread Hannes Reinecke
On 10/19/2016 07:01 AM, manish.rangan...@cavium.com wrote:
> From: Manish Rangankar 
> 
> This patch adds support for data path and TMF handling.
> 
> Signed-off-by: Nilesh Javali 
> Signed-off-by: Adheer Chandravanshi 
> Signed-off-by: Chad Dupuis 
> Signed-off-by: Saurav Kashyap 
> Signed-off-by: Arun Easi 
> Signed-off-by: Manish Rangankar 
> ---
>  drivers/scsi/qedi/qedi_fw.c| 1282 
> 
>  drivers/scsi/qedi/qedi_gbl.h   |6 +
>  drivers/scsi/qedi/qedi_iscsi.c |6 +
>  drivers/scsi/qedi/qedi_main.c  |4 +
>  4 files changed, 1298 insertions(+)
> 
> diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c
> index a820785..af1e14d 100644
> --- a/drivers/scsi/qedi/qedi_fw.c
> +++ b/drivers/scsi/qedi/qedi_fw.c
> @@ -147,6 +147,114 @@ static void qedi_process_text_resp(struct qedi_ctx 
> *qedi,
>   spin_unlock(>back_lock);
>  }
>  
> +static void qedi_tmf_resp_work(struct work_struct *work)
> +{
> + struct qedi_cmd *qedi_cmd =
> + container_of(work, struct qedi_cmd, tmf_work);
> + struct qedi_conn *qedi_conn = qedi_cmd->conn;
> + struct qedi_ctx *qedi = qedi_conn->qedi;
> + struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
> + struct iscsi_session *session = conn->session;
> + struct iscsi_tm_rsp *resp_hdr_ptr;
> + struct iscsi_cls_session *cls_sess;
> + int rval = 0;
> +
> + set_bit(QEDI_CONN_FW_CLEANUP, _conn->flags);
> + resp_hdr_ptr =  (struct iscsi_tm_rsp *)qedi_cmd->tmf_resp_buf;
> + cls_sess = iscsi_conn_to_session(qedi_conn->cls_conn);
> +
> + iscsi_block_session(session->cls_session);
> + rval = qedi_cleanup_all_io(qedi, qedi_conn, qedi_cmd->task, true);
> + if (rval) {
> + clear_bit(QEDI_CONN_FW_CLEANUP, _conn->flags);
> + qedi_clear_task_idx(qedi, qedi_cmd->task_id);
> + iscsi_unblock_session(session->cls_session);
> + return;
> + }
> +
> + iscsi_unblock_session(session->cls_session);
> + qedi_clear_task_idx(qedi, qedi_cmd->task_id);
> +
> + spin_lock(>back_lock);
> + __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr_ptr, NULL, 0);
> + spin_unlock(>back_lock);
> + kfree(resp_hdr_ptr);
> + clear_bit(QEDI_CONN_FW_CLEANUP, _conn->flags);
> +}
> +
> +static void qedi_process_tmf_resp(struct qedi_ctx *qedi,
> +   union iscsi_cqe *cqe,
> +   struct iscsi_task *task,
> +   struct qedi_conn *qedi_conn)
> +
> +{
> + struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
> + struct iscsi_session *session = conn->session;
> + struct iscsi_tmf_response_hdr *cqe_tmp_response;
> + struct iscsi_tm_rsp *resp_hdr_ptr;
> + struct iscsi_tm *tmf_hdr;
> + struct qedi_cmd *qedi_cmd = NULL;
> + u32 *tmp;
> +
> + cqe_tmp_response = >cqe_common.iscsi_hdr.tmf_response;
> +
> + qedi_cmd = task->dd_data;
> + qedi_cmd->tmf_resp_buf = kzalloc(sizeof(*resp_hdr_ptr), GFP_KERNEL);
> + if (!qedi_cmd->tmf_resp_buf) {
> + QEDI_ERR(>dbg_ctx,
> +  "Failed to allocate resp buf, cid=0x%x\n",
> +   qedi_conn->iscsi_conn_id);
> + return;
> + }
> +
> + spin_lock(>back_lock);
> + resp_hdr_ptr =  (struct iscsi_tm_rsp *)qedi_cmd->tmf_resp_buf;
> + memset(resp_hdr_ptr, 0, sizeof(struct iscsi_tm_rsp));
> +
> + /* Fill up the header */
> + resp_hdr_ptr->opcode = cqe_tmp_response->opcode;
> + resp_hdr_ptr->flags = cqe_tmp_response->hdr_flags;
> + resp_hdr_ptr->response = cqe_tmp_response->hdr_response;
> + resp_hdr_ptr->hlength = 0;
> +
> + hton24(resp_hdr_ptr->dlength,
> +(cqe_tmp_response->hdr_second_dword &
> + ISCSI_TMF_RESPONSE_HDR_DATA_SEG_LEN_MASK));
> + tmp = (u32 *)resp_hdr_ptr->dlength;
> + resp_hdr_ptr->itt = build_itt(cqe->cqe_solicited.itid,
> +   conn->session->age);
> + resp_hdr_ptr->statsn = cpu_to_be32(cqe_tmp_response->stat_sn);
> + resp_hdr_ptr->exp_cmdsn  = cpu_to_be32(cqe_tmp_response->exp_cmd_sn);
> + resp_hdr_ptr->max_cmdsn = cpu_to_be32(cqe_tmp_response->max_cmd_sn);
> +
> + tmf_hdr = (struct iscsi_tm *)qedi_cmd->task->hdr;
> +
> + if (likely(qedi_cmd->io_cmd_in_list)) {
> + qedi_cmd->io_cmd_in_list = false;
> + list_del_init(_cmd->io_cmd);
> + qedi_conn->active_cmd_count--;
> + }
> +
> + if (((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
> +   ISCSI_TM_FUNC_LOGICAL_UNIT_RESET) ||
> + ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
> +   ISCSI_TM_FUNC_TARGET_WARM_RESET) ||
> + ((tmf_hdr->flags & 

[RFC 6/6] qedi: Add support for data path.

2016-10-18 Thread manish.rangankar
From: Manish Rangankar 

This patch adds support for data path and TMF handling.

Signed-off-by: Nilesh Javali 
Signed-off-by: Adheer Chandravanshi 
Signed-off-by: Chad Dupuis 
Signed-off-by: Saurav Kashyap 
Signed-off-by: Arun Easi 
Signed-off-by: Manish Rangankar 
---
 drivers/scsi/qedi/qedi_fw.c| 1282 
 drivers/scsi/qedi/qedi_gbl.h   |6 +
 drivers/scsi/qedi/qedi_iscsi.c |6 +
 drivers/scsi/qedi/qedi_main.c  |4 +
 4 files changed, 1298 insertions(+)

diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c
index a820785..af1e14d 100644
--- a/drivers/scsi/qedi/qedi_fw.c
+++ b/drivers/scsi/qedi/qedi_fw.c
@@ -147,6 +147,114 @@ static void qedi_process_text_resp(struct qedi_ctx *qedi,
spin_unlock(>back_lock);
 }
 
+static void qedi_tmf_resp_work(struct work_struct *work)
+{
+   struct qedi_cmd *qedi_cmd =
+   container_of(work, struct qedi_cmd, tmf_work);
+   struct qedi_conn *qedi_conn = qedi_cmd->conn;
+   struct qedi_ctx *qedi = qedi_conn->qedi;
+   struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
+   struct iscsi_session *session = conn->session;
+   struct iscsi_tm_rsp *resp_hdr_ptr;
+   struct iscsi_cls_session *cls_sess;
+   int rval = 0;
+
+   set_bit(QEDI_CONN_FW_CLEANUP, _conn->flags);
+   resp_hdr_ptr =  (struct iscsi_tm_rsp *)qedi_cmd->tmf_resp_buf;
+   cls_sess = iscsi_conn_to_session(qedi_conn->cls_conn);
+
+   iscsi_block_session(session->cls_session);
+   rval = qedi_cleanup_all_io(qedi, qedi_conn, qedi_cmd->task, true);
+   if (rval) {
+   clear_bit(QEDI_CONN_FW_CLEANUP, _conn->flags);
+   qedi_clear_task_idx(qedi, qedi_cmd->task_id);
+   iscsi_unblock_session(session->cls_session);
+   return;
+   }
+
+   iscsi_unblock_session(session->cls_session);
+   qedi_clear_task_idx(qedi, qedi_cmd->task_id);
+
+   spin_lock(>back_lock);
+   __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr_ptr, NULL, 0);
+   spin_unlock(>back_lock);
+   kfree(resp_hdr_ptr);
+   clear_bit(QEDI_CONN_FW_CLEANUP, _conn->flags);
+}
+
+static void qedi_process_tmf_resp(struct qedi_ctx *qedi,
+ union iscsi_cqe *cqe,
+ struct iscsi_task *task,
+ struct qedi_conn *qedi_conn)
+
+{
+   struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
+   struct iscsi_session *session = conn->session;
+   struct iscsi_tmf_response_hdr *cqe_tmp_response;
+   struct iscsi_tm_rsp *resp_hdr_ptr;
+   struct iscsi_tm *tmf_hdr;
+   struct qedi_cmd *qedi_cmd = NULL;
+   u32 *tmp;
+
+   cqe_tmp_response = >cqe_common.iscsi_hdr.tmf_response;
+
+   qedi_cmd = task->dd_data;
+   qedi_cmd->tmf_resp_buf = kzalloc(sizeof(*resp_hdr_ptr), GFP_KERNEL);
+   if (!qedi_cmd->tmf_resp_buf) {
+   QEDI_ERR(>dbg_ctx,
+"Failed to allocate resp buf, cid=0x%x\n",
+ qedi_conn->iscsi_conn_id);
+   return;
+   }
+
+   spin_lock(>back_lock);
+   resp_hdr_ptr =  (struct iscsi_tm_rsp *)qedi_cmd->tmf_resp_buf;
+   memset(resp_hdr_ptr, 0, sizeof(struct iscsi_tm_rsp));
+
+   /* Fill up the header */
+   resp_hdr_ptr->opcode = cqe_tmp_response->opcode;
+   resp_hdr_ptr->flags = cqe_tmp_response->hdr_flags;
+   resp_hdr_ptr->response = cqe_tmp_response->hdr_response;
+   resp_hdr_ptr->hlength = 0;
+
+   hton24(resp_hdr_ptr->dlength,
+  (cqe_tmp_response->hdr_second_dword &
+   ISCSI_TMF_RESPONSE_HDR_DATA_SEG_LEN_MASK));
+   tmp = (u32 *)resp_hdr_ptr->dlength;
+   resp_hdr_ptr->itt = build_itt(cqe->cqe_solicited.itid,
+ conn->session->age);
+   resp_hdr_ptr->statsn = cpu_to_be32(cqe_tmp_response->stat_sn);
+   resp_hdr_ptr->exp_cmdsn  = cpu_to_be32(cqe_tmp_response->exp_cmd_sn);
+   resp_hdr_ptr->max_cmdsn = cpu_to_be32(cqe_tmp_response->max_cmd_sn);
+
+   tmf_hdr = (struct iscsi_tm *)qedi_cmd->task->hdr;
+
+   if (likely(qedi_cmd->io_cmd_in_list)) {
+   qedi_cmd->io_cmd_in_list = false;
+   list_del_init(_cmd->io_cmd);
+   qedi_conn->active_cmd_count--;
+   }
+
+   if (((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
+ ISCSI_TM_FUNC_LOGICAL_UNIT_RESET) ||
+   ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
+ ISCSI_TM_FUNC_TARGET_WARM_RESET) ||
+   ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
+ ISCSI_TM_FUNC_TARGET_COLD_RESET)) {
+   INIT_WORK(_cmd->tmf_work, qedi_tmf_resp_work);
+