Re: [PATCH 6/9] srp: use the new CQ API

2015-11-18 Thread Christoph Hellwig
On Tue, Nov 17, 2015 at 11:56:39AM -0800, Bart Van Assche wrote:
> On 11/13/2015 05:46 AM, Christoph Hellwig wrote:
>> +static void srp_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc)
>> +{
>> +srp_handle_qp_err(cq, wc, "INV RKEY");
>> +}
> >
>> [ ... ]
> >
>> +static void srp_reg_mr_done(struct ib_cq *cq, struct ib_wc *wc)
>> +{
>> +srp_handle_qp_err(cq, wc, "FAST REG");
>> +}
>
> How about using names like srp_inv_rkey_err() and srp_reg_mr_err() to make 
> clear that these completion functions are only called if an error occurred 
> ?

I can do that if you like.
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 6/9] srp: use the new CQ API

2015-11-13 Thread Christoph Hellwig
This also moves recv completion handling from hardirq context into
softirq context.

Signed-off-by: Christoph Hellwig 
---
 drivers/infiniband/ulp/srp/ib_srp.c | 198 ++--
 drivers/infiniband/ulp/srp/ib_srp.h |   7 +-
 2 files changed, 76 insertions(+), 129 deletions(-)

diff --git a/drivers/infiniband/ulp/srp/ib_srp.c 
b/drivers/infiniband/ulp/srp/ib_srp.c
index 3027824..57237e1 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -132,8 +132,9 @@ MODULE_PARM_DESC(ch_count,
 
 static void srp_add_one(struct ib_device *device);
 static void srp_remove_one(struct ib_device *device, void *client_data);
-static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr);
-static void srp_send_completion(struct ib_cq *cq, void *ch_ptr);
+static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc);
+static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
+   const char *opname);
 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
 
 static struct scsi_transport_template *ib_srp_transport_template;
@@ -445,41 +446,6 @@ static struct srp_fr_pool *srp_alloc_fr_pool(struct 
srp_target_port *target)
  dev->max_pages_per_mr);
 }
 
-/**
- * srp_destroy_qp() - destroy an RDMA queue pair
- * @ch: SRP RDMA channel.
- *
- * Change a queue pair into the error state and wait until all receive
- * completions have been processed before destroying it. This avoids that
- * the receive completion handler can access the queue pair while it is
- * being destroyed.
- */
-static void srp_destroy_qp(struct srp_rdma_ch *ch)
-{
-   static struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
-   static struct ib_recv_wr wr = { 0 };
-   struct ib_recv_wr *bad_wr;
-   int ret;
-
-   wr.wr_id = SRP_LAST_WR_ID;
-   /* Destroying a QP and reusing ch->done is only safe if not connected */
-   WARN_ON_ONCE(ch->connected);
-
-   ret = ib_modify_qp(ch->qp, , IB_QP_STATE);
-   WARN_ONCE(ret, "ib_cm_init_qp_attr() returned %d\n", ret);
-   if (ret)
-   goto out;
-
-   init_completion(>done);
-   ret = ib_post_recv(ch->qp, , _wr);
-   WARN_ONCE(ret, "ib_post_recv() returned %d\n", ret);
-   if (ret == 0)
-   wait_for_completion(>done);
-
-out:
-   ib_destroy_qp(ch->qp);
-}
-
 static int srp_create_ch_ib(struct srp_rdma_ch *ch)
 {
struct srp_target_port *target = ch->target;
@@ -490,34 +456,27 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch)
struct ib_fmr_pool *fmr_pool = NULL;
struct srp_fr_pool *fr_pool = NULL;
const int m = 1 + dev->use_fast_reg;
-   struct ib_cq_init_attr cq_attr = {};
int ret;
 
init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
if (!init_attr)
return -ENOMEM;
 
-   /* + 1 for SRP_LAST_WR_ID */
-   cq_attr.cqe = target->queue_size + 1;
-   cq_attr.comp_vector = ch->comp_vector;
-   recv_cq = ib_create_cq(dev->dev, srp_recv_completion, NULL, ch,
-  _attr);
+   /* queue_size + 1 for ib_drain_qp */
+   recv_cq = ib_alloc_cq(dev->dev, ch, target->queue_size + 1, 
ch->comp_vector,
+   IB_POLL_SOFTIRQ);
if (IS_ERR(recv_cq)) {
ret = PTR_ERR(recv_cq);
goto err;
}
 
-   cq_attr.cqe = m * target->queue_size;
-   cq_attr.comp_vector = ch->comp_vector;
-   send_cq = ib_create_cq(dev->dev, srp_send_completion, NULL, ch,
-  _attr);
+   send_cq = ib_alloc_cq(dev->dev, ch, m * target->queue_size, 
ch->comp_vector,
+   IB_POLL_DIRECT);
if (IS_ERR(send_cq)) {
ret = PTR_ERR(send_cq);
goto err_recv_cq;
}
 
-   ib_req_notify_cq(recv_cq, IB_CQ_NEXT_COMP);
-
init_attr->event_handler   = srp_qp_event;
init_attr->cap.max_send_wr = m * target->queue_size;
init_attr->cap.max_recv_wr = target->queue_size + 1;
@@ -557,11 +516,11 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch)
}
 
if (ch->qp)
-   srp_destroy_qp(ch);
+   ib_destroy_qp(ch->qp);
if (ch->recv_cq)
-   ib_destroy_cq(ch->recv_cq);
+   ib_free_cq(ch->recv_cq);
if (ch->send_cq)
-   ib_destroy_cq(ch->send_cq);
+   ib_free_cq(ch->send_cq);
 
ch->qp = qp;
ch->recv_cq = recv_cq;
@@ -584,10 +543,10 @@ err_qp:
ib_destroy_qp(qp);
 
 err_send_cq:
-   ib_destroy_cq(send_cq);
+   ib_free_cq(send_cq);
 
 err_recv_cq:
-   ib_destroy_cq(recv_cq);
+   ib_free_cq(recv_cq);
 
 err:
kfree(init_attr);
@@ -623,9 +582,11 @@ static void srp_free_ch_ib(struct srp_target_port *target,
if (ch->fmr_pool)