tree 499d78432420c3fc4339d784940348a9f06010e5
parent 2c153b934dca08d58e0aafde18a182e0891aa201
author Hal Rosenstock <[EMAIL PROTECTED]> Thu, 28 Jul 2005 01:45:32 -0700
committer Linus Torvalds <[EMAIL PROTECTED]> Thu, 28 Jul 2005 06:26:11 -0700

[PATCH] IB: Add ib_modify_mad API to MAD

Add new MAD layer call to modify (ib_modify_mad) the timeout of a sent MAD,
and simplify cancel code.

Signed-off-by: Sean Hefty <[EMAIL PROTECTED]>
Signed-off-by: Hal Rosenstock <[EMAIL PROTECTED]>
Cc: Roland Dreier <[EMAIL PROTECTED]>
Signed-off-by: Andrew Morton <[EMAIL PROTECTED]>
Signed-off-by: Linus Torvalds <[EMAIL PROTECTED]>

 drivers/infiniband/core/mad.c       |   83 ++++++++++++------------------------
 drivers/infiniband/core/mad_priv.h  |    2 
 drivers/infiniband/include/ib_mad.h |   14 +++++-
 3 files changed, 40 insertions(+), 59 deletions(-)

diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -65,7 +65,6 @@ static void cancel_mads(struct ib_mad_ag
 static void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
                                    struct ib_mad_send_wc *mad_send_wc);
 static void timeout_sends(void *data);
-static void cancel_sends(void *data);
 static void local_completions(void *data);
 static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
                              struct ib_mad_agent_private *agent_priv,
@@ -346,8 +345,6 @@ struct ib_mad_agent *ib_register_mad_age
        INIT_LIST_HEAD(&mad_agent_priv->local_list);
        INIT_WORK(&mad_agent_priv->local_work, local_completions,
                   mad_agent_priv);
-       INIT_LIST_HEAD(&mad_agent_priv->canceled_list);
-       INIT_WORK(&mad_agent_priv->canceled_work, cancel_sends, mad_agent_priv);
        atomic_set(&mad_agent_priv->refcount, 1);
        init_waitqueue_head(&mad_agent_priv->wait);
 
@@ -1775,6 +1772,13 @@ static void wait_for_response(struct ib_
        }
 }
 
+void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr,
+                         int timeout_ms)
+{
+       mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
+       wait_for_response(mad_send_wr);
+}
+
 /*
  * Process a send work completion
  */
@@ -2034,41 +2038,7 @@ find_send_by_wr_id(struct ib_mad_agent_p
        return NULL;
 }
 
-void cancel_sends(void *data)
-{
-       struct ib_mad_agent_private *mad_agent_priv;
-       struct ib_mad_send_wr_private *mad_send_wr;
-       struct ib_mad_send_wc mad_send_wc;
-       unsigned long flags;
-
-       mad_agent_priv = data;
-
-       mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
-       mad_send_wc.vendor_err = 0;
-
-       spin_lock_irqsave(&mad_agent_priv->lock, flags);
-       while (!list_empty(&mad_agent_priv->canceled_list)) {
-               mad_send_wr = list_entry(mad_agent_priv->canceled_list.next,
-                                        struct ib_mad_send_wr_private,
-                                        agent_list);
-
-               list_del(&mad_send_wr->agent_list);
-               spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
-
-               mad_send_wc.wr_id = mad_send_wr->wr_id;
-               mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
-                                                  &mad_send_wc);
-
-               kfree(mad_send_wr);
-               if (atomic_dec_and_test(&mad_agent_priv->refcount))
-                       wake_up(&mad_agent_priv->wait);
-               spin_lock_irqsave(&mad_agent_priv->lock, flags);
-       }
-       spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
-}
-
-void ib_cancel_mad(struct ib_mad_agent *mad_agent,
-                 u64 wr_id)
+int ib_modify_mad(struct ib_mad_agent *mad_agent, u64 wr_id, u32 timeout_ms)
 {
        struct ib_mad_agent_private *mad_agent_priv;
        struct ib_mad_send_wr_private *mad_send_wr;
@@ -2078,29 +2048,30 @@ void ib_cancel_mad(struct ib_mad_agent *
                                      agent);
        spin_lock_irqsave(&mad_agent_priv->lock, flags);
        mad_send_wr = find_send_by_wr_id(mad_agent_priv, wr_id);
-       if (!mad_send_wr) {
+       if (!mad_send_wr || mad_send_wr->status != IB_WC_SUCCESS) {
                spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
-               goto out;
+               return -EINVAL;
        }
 
-       if (mad_send_wr->status == IB_WC_SUCCESS)
-               mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
-
-       if (mad_send_wr->refcount != 0) {
+       if (!timeout_ms) {
                mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
-               spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
-               goto out;
+               mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
        }
 
-       list_del(&mad_send_wr->agent_list);
-       list_add_tail(&mad_send_wr->agent_list, &mad_agent_priv->canceled_list);
-       adjust_timeout(mad_agent_priv);
+       mad_send_wr->send_wr.wr.ud.timeout_ms = timeout_ms;
+       if (!mad_send_wr->timeout || mad_send_wr->refcount > 1)
+               mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
+       else
+               ib_reset_mad_timeout(mad_send_wr, timeout_ms);
+
        spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
+       return 0;
+}
+EXPORT_SYMBOL(ib_modify_mad);
 
-       queue_work(mad_agent_priv->qp_info->port_priv->wq,
-                  &mad_agent_priv->canceled_work);
-out:
-       return;
+void ib_cancel_mad(struct ib_mad_agent *mad_agent, u64 wr_id)
+{
+       ib_modify_mad(mad_agent, wr_id, 0);
 }
 EXPORT_SYMBOL(ib_cancel_mad);
 
@@ -2207,8 +2178,6 @@ static void timeout_sends(void *data)
        unsigned long flags, delay;
 
        mad_agent_priv = (struct ib_mad_agent_private *)data;
-
-       mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR;
        mad_send_wc.vendor_err = 0;
 
        spin_lock_irqsave(&mad_agent_priv->lock, flags);
@@ -2233,6 +2202,10 @@ static void timeout_sends(void *data)
 
                spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
 
+               if (mad_send_wr->status == IB_WC_SUCCESS)
+                       mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR;
+               else
+                       mad_send_wc.status = mad_send_wr->status;
                mad_send_wc.wr_id = mad_send_wr->wr_id;
                mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
                                                   &mad_send_wc);
diff --git a/drivers/infiniband/core/mad_priv.h 
b/drivers/infiniband/core/mad_priv.h
--- a/drivers/infiniband/core/mad_priv.h
+++ b/drivers/infiniband/core/mad_priv.h
@@ -97,8 +97,6 @@ struct ib_mad_agent_private {
        unsigned long timeout;
        struct list_head local_list;
        struct work_struct local_work;
-       struct list_head canceled_list;
-       struct work_struct canceled_work;
 
        atomic_t refcount;
        wait_queue_head_t wait;
diff --git a/drivers/infiniband/include/ib_mad.h 
b/drivers/infiniband/include/ib_mad.h
--- a/drivers/infiniband/include/ib_mad.h
+++ b/drivers/infiniband/include/ib_mad.h
@@ -385,8 +385,18 @@ void ib_free_recv_mad(struct ib_mad_recv
  * MADs will be returned to the user through the corresponding
  * ib_mad_send_handler.
  */
-void ib_cancel_mad(struct ib_mad_agent *mad_agent,
-                  u64 wr_id);
+void ib_cancel_mad(struct ib_mad_agent *mad_agent, u64 wr_id);
+
+/**
+ * ib_modify_mad - Modifies an outstanding send MAD operation.
+ * @mad_agent: Specifies the registration associated with sent MAD.
+ * @wr_id: Indicates the work request identifier of the MAD to modify.
+ * @timeout_ms: New timeout value for sent MAD.
+ *
+ * This call will reset the timeout value for a sent MAD to the specified
+ * value.
+ */
+int ib_modify_mad(struct ib_mad_agent *mad_agent, u64 wr_id, u32 timeout_ms);
 
 /**
  * ib_redirect_mad_qp - Registers a QP for MAD services.
-
To unsubscribe from this list: send the line "unsubscribe bk-commits-head" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to