On 19/10/16 3:54 PM, "Hannes Reinecke" <h...@suse.de> wrote:

>On 10/19/2016 07:01 AM, manish.rangan...@cavium.com wrote:
>> From: Manish Rangankar <manish.rangan...@cavium.com>
>> 
>> This patch adds support for data path and TMF handling.
>> 
>> Signed-off-by: Nilesh Javali <nilesh.jav...@cavium.com>
>> Signed-off-by: Adheer Chandravanshi <adheer.chandravan...@qlogic.com>
>> Signed-off-by: Chad Dupuis <chad.dup...@cavium.com>
>> Signed-off-by: Saurav Kashyap <saurav.kash...@cavium.com>
>> Signed-off-by: Arun Easi <arun.e...@cavium.com>
>> Signed-off-by: Manish Rangankar <manish.rangan...@cavium.com>
>> ---
>>  drivers/scsi/qedi/qedi_fw.c    | 1282
>>++++++++++++++++++++++++++++++++++++++++
>>  drivers/scsi/qedi/qedi_gbl.h   |    6 +
>>  drivers/scsi/qedi/qedi_iscsi.c |    6 +
>>  drivers/scsi/qedi/qedi_main.c  |    4 +
>>  4 files changed, 1298 insertions(+)
>> 
>> diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c
>> index a820785..af1e14d 100644
>> --- a/drivers/scsi/qedi/qedi_fw.c
>> +++ b/drivers/scsi/qedi/qedi_fw.c
>> @@ -147,6 +147,114 @@ static void qedi_process_text_resp(struct
>>qedi_ctx *qedi,
>>      spin_unlock(&session->back_lock);
>>  }

--snipped--
>> +void qedi_trace_io(struct qedi_ctx *qedi, struct iscsi_task *task,
>> +               u16 tid, int8_t direction)
>> +{
>> +    struct qedi_io_log *io_log;
>> +    struct iscsi_conn *conn = task->conn;
>> +    struct qedi_conn *qedi_conn = conn->dd_data;
>> +    struct scsi_cmnd *sc_cmd = task->sc;
>> +    unsigned long flags;
>> +    u8 op;
>> +
>> +    spin_lock_irqsave(&qedi->io_trace_lock, flags);
>> +
>> +    io_log = &qedi->io_trace_buf[qedi->io_trace_idx];
>> +    io_log->direction = direction;
>> +    io_log->task_id = tid;
>> +    io_log->cid = qedi_conn->iscsi_conn_id;
>> +    io_log->lun = sc_cmd->device->lun;
>> +    io_log->op = sc_cmd->cmnd[0];
>> +    op = sc_cmd->cmnd[0];
>> +
>> +    if (op == READ_10 || op == WRITE_10) {
>> +            io_log->lba[0] = sc_cmd->cmnd[2];
>> +            io_log->lba[1] = sc_cmd->cmnd[3];
>> +            io_log->lba[2] = sc_cmd->cmnd[4];
>> +            io_log->lba[3] = sc_cmd->cmnd[5];
>> +    } else {
>> +            io_log->lba[0] = 0;
>> +            io_log->lba[1] = 0;
>> +            io_log->lba[2] = 0;
>> +            io_log->lba[3] = 0;
>> +    }
>Only for READ_10 and WRITE_10? What about the other read or write
>commands?

We will add support for other scsi commands in the next revision.

>
>> +    io_log->bufflen = scsi_bufflen(sc_cmd);
>> +    io_log->sg_count = scsi_sg_count(sc_cmd);
>> +    io_log->fast_sgs = qedi->fast_sgls;
>> +    io_log->cached_sgs = qedi->cached_sgls;
>> +    io_log->slow_sgs = qedi->slow_sgls;
>> +    io_log->cached_sge = qedi->use_cached_sge;
>> +    io_log->slow_sge = qedi->use_slow_sge;
>> +    io_log->fast_sge = qedi->use_fast_sge;
>> +    io_log->result = sc_cmd->result;
>> +    io_log->jiffies = jiffies;
>> +    io_log->blk_req_cpu = smp_processor_id();
>> +
>> +    if (direction == QEDI_IO_TRACE_REQ) {
>> +            /* For requests we only care about the submission CPU */
>> +            io_log->req_cpu = smp_processor_id() % qedi->num_queues;
>> +            io_log->intr_cpu = 0;
>> +            io_log->blk_rsp_cpu = 0;
>> +    } else if (direction == QEDI_IO_TRACE_RSP) {
>> +            io_log->req_cpu = smp_processor_id() % qedi->num_queues;
>> +            io_log->intr_cpu = qedi->intr_cpu;
>> +            io_log->blk_rsp_cpu = smp_processor_id();
>> +    }
>> +
>> +    qedi->io_trace_idx++;
>> +    if (qedi->io_trace_idx == QEDI_IO_TRACE_SIZE)
>> +            qedi->io_trace_idx = 0;
>> +
>> +    qedi->use_cached_sge = false;
>> +    qedi->use_slow_sge = false;
>> +    qedi->use_fast_sge = false;
>> +
>> +    spin_unlock_irqrestore(&qedi->io_trace_lock, flags);
>> +}
>> +
>> +int qedi_iscsi_send_ioreq(struct iscsi_task *task)
>> +{
>> +    struct iscsi_conn *conn = task->conn;
>> +    struct iscsi_session *session = conn->session;
>> +    struct Scsi_Host *shost =
>>iscsi_session_to_shost(session->cls_session);
>> +    struct qedi_ctx *qedi = iscsi_host_priv(shost);
>> +    struct qedi_conn *qedi_conn = conn->dd_data;
>> +    struct qedi_cmd *cmd = task->dd_data;
>> +    struct scsi_cmnd *sc = task->sc;
>> +    struct iscsi_task_context *fw_task_ctx;
>> +    struct iscsi_cached_sge_ctx *cached_sge;
>> +    struct iscsi_phys_sgl_ctx *phys_sgl;
>> +    struct iscsi_virt_sgl_ctx *virt_sgl;
>> +    struct ystorm_iscsi_task_st_ctx *yst_cxt;
>> +    struct mstorm_iscsi_task_st_ctx *mst_cxt;
>> +    struct iscsi_sgl *sgl_struct;
>> +    struct iscsi_sge *single_sge;
>> +    struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr;
>> +    struct iscsi_sge *bd = cmd->io_tbl.sge_tbl;
>> +    enum iscsi_task_type task_type;
>> +    struct iscsi_cmd_hdr *fw_cmd;
>> +    u32 scsi_lun[2];
>> +    u16 cq_idx = smp_processor_id() % qedi->num_queues;
>> +    s16 ptu_invalidate = 0;
>> +    s16 tid = 0;
>> +    u8 num_fast_sgs;
>> +
>> +    tid = qedi_get_task_idx(qedi);
>> +    if (tid == -1)
>> +            return -ENOMEM;
>> +
>> +    qedi_iscsi_map_sg_list(cmd);
>> +
>> +    int_to_scsilun(sc->device->lun, (struct scsi_lun *)scsi_lun);
>> +    fw_task_ctx =
>> +          (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
>>tid);
>> +
>> +    memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
>> +    cmd->task_id = tid;
>> +
>> +    /* Ystrom context */
>Ystrom or Ystorm?

Noted

>
>> +    fw_cmd = &fw_task_ctx->ystorm_st_context.pdu_hdr.cmd;
>> +    SET_FIELD(fw_cmd->flags_attr, ISCSI_CMD_HDR_ATTR, ISCSI_ATTR_SIMPLE);
>> +
>> +    if (sc->sc_data_direction == DMA_TO_DEVICE) {
>> +            if (conn->session->initial_r2t_en) {
>> +                    fw_task_ctx->ustorm_ag_context.exp_data_acked =
>> +                            min((conn->session->imm_data_en *
>> +                                conn->max_xmit_dlength),
>> +                                conn->session->first_burst);
>> +                    fw_task_ctx->ustorm_ag_context.exp_data_acked =
>> +                          min(fw_task_ctx->ustorm_ag_context.exp_data_acked,
>> +                              scsi_bufflen(sc));
>> +            } else {
>> +                    fw_task_ctx->ustorm_ag_context.exp_data_acked =
>> +                          min(conn->session->first_burst, scsi_bufflen(sc));
>> +            }
>> +
>> +            SET_FIELD(fw_cmd->flags_attr, ISCSI_CMD_HDR_WRITE, 1);
>> +            task_type = ISCSI_TASK_TYPE_INITIATOR_WRITE;
>> +    } else {
>> +            if (scsi_bufflen(sc))
>> +                    SET_FIELD(fw_cmd->flags_attr, ISCSI_CMD_HDR_READ, 1);
>> +            task_type = ISCSI_TASK_TYPE_INITIATOR_READ;
>> +    }
>> +
>> +    fw_cmd->lun.lo = be32_to_cpu(scsi_lun[0]);
>> +    fw_cmd->lun.hi = be32_to_cpu(scsi_lun[1]);
>> +
>> +    qedi_update_itt_map(qedi, tid, task->itt);
>> +    fw_cmd->itt = qedi_set_itt(tid, get_itt(task->itt));
>> +    fw_cmd->expected_transfer_length = scsi_bufflen(sc);
>> +    fw_cmd->cmd_sn = be32_to_cpu(hdr->cmdsn);
>> +    fw_cmd->opcode = hdr->opcode;
>> +    qedi_cpy_scsi_cdb(sc, (u32 *)fw_cmd->cdb);
>> +
>> +    /* Mstorm context */
>> +    fw_task_ctx->mstorm_st_context.sense_db.lo =
>>(u32)cmd->sense_buffer_dma;
>> +    fw_task_ctx->mstorm_st_context.sense_db.hi =
>> +                                    (u32)((u64)cmd->sense_buffer_dma >> 32);
>> +    fw_task_ctx->mstorm_ag_context.task_cid = qedi_conn->iscsi_conn_id;
>> +    fw_task_ctx->mstorm_st_context.task_type = task_type;
>> +
>> +    if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) {
>> +            ptu_invalidate = 1;
>> +            qedi->tid_reuse_count[tid] = 0;
>> +    }
>> +    fw_task_ctx->ystorm_st_context.state.reuse_count =
>> +                                                 qedi->tid_reuse_count[tid];
>> +    fw_task_ctx->mstorm_st_context.reuse_count =
>> +                                               qedi->tid_reuse_count[tid]++;
>> +
>> +    /* Ustrorm context */
>Ustrorm?

Noted

Thanks,
Manish R.

--
To unsubscribe from this list: send the line "unsubscribe linux-scsi" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to