Roland,

Attached are 6 git patches pulled from SVN to queue for 2.6.19.  They're from
SVN versions:

4578 - include atomic as default QP attribute
8628 - fix reject message if GID is invalid
8434 - add dual-sided RMPP
8826 - remove unnecessary include
8827 - remove unnecessary include
9088 - randomize starting local comm id

Let me know if you'd prefer these in another format (such as inline).

- Sean

>From d697059a6f69e19c18a50c87df20894d253d3d8f Mon Sep 17 00:00:00 2001
From: Sean Hefty <[EMAIL PROTECTED]>
Date: Mon, 28 Aug 2006 15:15:18 -0700
Subject: [PATCH] Randomize the starting local comm ID to avoid getting a 
rejected connection
due to a stale connection after a system reboot or reloading of the ib_cm.

Signed-off-by: Sean Hefty <[EMAIL PROTECTED]>
---
 drivers/infiniband/core/cm.c |   24 ++++++++++++++++--------
 1 files changed, 16 insertions(+), 8 deletions(-)

diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 1aad33e..47fd03c 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2004, 2005 Intel Corporation.  All rights reserved.
+ * Copyright (c) 2004-2006 Intel Corporation.  All rights reserved.
  * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
  * Copyright (c) 2004, 2005 Voltaire Corporation.  All rights reserved.
  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
@@ -41,6 +41,7 @@ #include <linux/err.h>
 #include <linux/idr.h>
 #include <linux/interrupt.h>
 #include <linux/pci.h>
+#include <linux/random.h>
 #include <linux/rbtree.h>
 #include <linux/spinlock.h>
 #include <linux/workqueue.h>
@@ -73,6 +74,7 @@ static struct ib_cm {
        struct rb_root remote_id_table;
        struct rb_root remote_sidr_table;
        struct idr local_id_table;
+       __be32 random_id_operand;
        struct workqueue_struct *wq;
 } cm;
 
@@ -299,15 +301,17 @@ static int cm_init_av_by_path(struct ib_
 static int cm_alloc_id(struct cm_id_private *cm_id_priv)
 {
        unsigned long flags;
-       int ret;
+       int ret, id;
        static int next_id;
 
        do {
                spin_lock_irqsave(&cm.lock, flags);
-               ret = idr_get_new_above(&cm.local_id_table, cm_id_priv, 
next_id++,
-                                       (__force int *) 
&cm_id_priv->id.local_id);
+               ret = idr_get_new_above(&cm.local_id_table, cm_id_priv,
+                                       next_id++, &id);
                spin_unlock_irqrestore(&cm.lock, flags);
        } while( (ret == -EAGAIN) && idr_pre_get(&cm.local_id_table, 
GFP_KERNEL) );
+       
+       cm_id_priv->id.local_id = (__force __be32) (id ^ cm.random_id_operand);
        return ret;
 }
 
@@ -316,7 +320,8 @@ static void cm_free_id(__be32 local_id)
        unsigned long flags;
 
        spin_lock_irqsave(&cm.lock, flags);
-       idr_remove(&cm.local_id_table, (__force int) local_id);
+       idr_remove(&cm.local_id_table,
+                  (__force int) (local_id ^ cm.random_id_operand));
        spin_unlock_irqrestore(&cm.lock, flags);
 }
 
@@ -324,7 +329,8 @@ static struct cm_id_private * cm_get_id(
 {
        struct cm_id_private *cm_id_priv;
 
-       cm_id_priv = idr_find(&cm.local_id_table, (__force int) local_id);
+       cm_id_priv = idr_find(&cm.local_id_table,
+                             (__force int) (local_id ^ cm.random_id_operand));
        if (cm_id_priv) {
                if (cm_id_priv->id.remote_id == remote_id)
                        atomic_inc(&cm_id_priv->refcount);
@@ -2082,8 +2088,9 @@ static struct cm_id_private * cm_acquire
                        spin_unlock_irqrestore(&cm.lock, flags);
                        return NULL;
                }
-               cm_id_priv = idr_find(&cm.local_id_table,
-                                     (__force int) 
timewait_info->work.local_id);
+               cm_id_priv = idr_find(&cm.local_id_table, (__force int)
+                                     (timewait_info->work.local_id ^
+                                      cm.random_id_operand));
                if (cm_id_priv) {
                        if (cm_id_priv->id.remote_id == remote_id)
                                atomic_inc(&cm_id_priv->refcount);
@@ -3360,6 +3367,7 @@ static int __init ib_cm_init(void)
        cm.remote_qp_table = RB_ROOT;
        cm.remote_sidr_table = RB_ROOT;
        idr_init(&cm.local_id_table);
+       get_random_bytes(&cm.random_id_operand, sizeof cm.random_id_operand);
        idr_pre_get(&cm.local_id_table, GFP_KERNEL);
 
        cm.wq = create_workqueue("ib_cm");
-- 
1.4.2

>From 5cabb459ff4a8f22ab6b44e0704ee8b2e28e4104 Mon Sep 17 00:00:00 2001
From: Sean Hefty <[EMAIL PROTECTED]>
Date: Mon, 28 Aug 2006 11:55:52 -0700
Subject: [PATCH] Enable atomic operations along with RDMA reads if a local RDMA 
read/atomic
depth is provided by the user.

Signed-off-by: Sean Hefty <[EMAIL PROTECTED]>
---
 drivers/infiniband/core/cm.c |    3 ++-
 1 files changed, 2 insertions(+), 1 deletions(-)

diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 0de335b..0df1454 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -3125,7 +3125,8 @@ static int cm_init_qp_init_attr(struct c
                qp_attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE |
                                           IB_ACCESS_REMOTE_WRITE;
                if (cm_id_priv->responder_resources)
-                       qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ;
+                       qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ |
+                                                   IB_ACCESS_REMOTE_ATOMIC;
                qp_attr->pkey_index = cm_id_priv->av.pkey_index;
                qp_attr->port_num = cm_id_priv->av.port->port_num;
                ret = 0;
-- 
1.4.2

>From 03138e447d67e0c154823cfd67cad7ed85481ff8 Mon Sep 17 00:00:00 2001
From: Sean Hefty <[EMAIL PROTECTED]>
Date: Mon, 28 Aug 2006 11:57:42 -0700
Subject: [PATCH] Set the reject code properly when rejecting a request that 
contains an
invalid GID.  A suitable GID is returned by the IB CM in the additional
reject information (ARI).  This is a spec compliancy issue.

Signed-off-by: Sean Hefty <[EMAIL PROTECTED]>
---
 drivers/infiniband/core/cm.c |   32 +++++++++++++++++++++-----------
 1 files changed, 21 insertions(+), 11 deletions(-)

diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 0df1454..1aad33e 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -1354,7 +1354,7 @@ static int cm_req_handler(struct cm_work
                                                            id.local_id);
        if (IS_ERR(cm_id_priv->timewait_info)) {
                ret = PTR_ERR(cm_id_priv->timewait_info);
-               goto error1;
+               goto destroy;
        }
        cm_id_priv->timewait_info->work.remote_id = req_msg->local_comm_id;
        cm_id_priv->timewait_info->remote_ca_guid = req_msg->local_ca_guid;
@@ -1363,7 +1363,8 @@ static int cm_req_handler(struct cm_work
        listen_cm_id_priv = cm_match_req(work, cm_id_priv);
        if (!listen_cm_id_priv) {
                ret = -EINVAL;
-               goto error2;
+               kfree(cm_id_priv->timewait_info);
+               goto destroy;
        }
 
        cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
@@ -1373,12 +1374,22 @@ static int cm_req_handler(struct cm_work
 
        cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]);
        ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av);
-       if (ret)
-               goto error3;
+       if (ret) {
+               ib_get_cached_gid(work->port->cm_dev->device,
+                                 work->port->port_num, 0, &work->path[0].sgid);
+               ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_GID,
+                              &work->path[0].sgid, sizeof work->path[0].sgid,
+                              NULL, 0);
+               goto rejected;
+       }
        if (req_msg->alt_local_lid) {
                ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av);
-               if (ret)
-                       goto error3;
+               if (ret) {
+                       ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_ALT_GID,
+                                      &work->path[0].sgid,
+                                      sizeof work->path[0].sgid, NULL, 0);
+                       goto rejected;
+               }
        }
        cm_id_priv->tid = req_msg->hdr.tid;
        cm_id_priv->timeout_ms = cm_convert_to_ms(
@@ -1400,12 +1411,11 @@ static int cm_req_handler(struct cm_work
        cm_deref_id(listen_cm_id_priv);
        return 0;
 
-error3:        atomic_dec(&cm_id_priv->refcount);
+rejected:
+       atomic_dec(&cm_id_priv->refcount);
        cm_deref_id(listen_cm_id_priv);
-       cm_cleanup_timewait(cm_id_priv->timewait_info);
-error2:        kfree(cm_id_priv->timewait_info);
-       cm_id_priv->timewait_info = NULL;
-error1:        ib_destroy_cm_id(&cm_id_priv->id);
+destroy:
+       ib_destroy_cm_id(cm_id);
        return ret;
 }
 
-- 
1.4.2

>From 69fd025f17b20b2fa3de8bf435f4601f9feeb0fa Mon Sep 17 00:00:00 2001
From: Sean Hefty <[EMAIL PROTECTED]>
Date: Mon, 28 Aug 2006 15:10:32 -0700
Subject: [PATCH] Add support for dual-sided RMPP transfers.

The implementation assumes that any RMPP request that requires a response uses
DS RMPP.  Based on the RMPP start-up scenarios defined by the spec, this should
be a valid assumption.  That is, there is no start-up scenario defined where an
RMPP request is followed by a non-RMPP response.  By having this assumption we
avoid any API changes.

In order for a node that supports DS RMPP to communicate with one that does not,
RMPP responses assume a new window size of 1 if a DS ACK has not been received.
(By DS ACK, I'm referring to the turn-around ACK after the final ACK of the
request.)  This is a slight spec deviation, but is necessary to allow
communication with nodes that do not generate the DS ACK.  It also handles the
case when a response is sent after the request state has been discarded.

Signed-off-by: Sean Hefty <[EMAIL PROTECTED]>
---
 drivers/infiniband/core/mad_rmpp.c |   90 +++++++++++++++++++++++++++++++++++-
 1 files changed, 87 insertions(+), 3 deletions(-)

diff --git a/drivers/infiniband/core/mad_rmpp.c 
b/drivers/infiniband/core/mad_rmpp.c
index ebcd5b1..26469c1 100644
--- a/drivers/infiniband/core/mad_rmpp.c
+++ b/drivers/infiniband/core/mad_rmpp.c
@@ -60,6 +60,7 @@ struct mad_rmpp_recv {
        int last_ack;
        int seg_num;
        int newwin;
+       int repwin;
 
        __be64 tid;
        u32 src_qp;
@@ -170,6 +171,32 @@ static struct ib_mad_send_buf *alloc_res
        return msg;
 }
 
+static void ack_ds_ack(struct ib_mad_agent_private *agent,
+                      struct ib_mad_recv_wc *recv_wc)
+{
+       struct ib_mad_send_buf *msg;
+       struct ib_rmpp_mad *rmpp_mad;
+       int ret;
+
+       msg = alloc_response_msg(&agent->agent, recv_wc);
+       if (IS_ERR(msg))
+               return;
+
+       rmpp_mad = msg->mad;
+       memcpy(rmpp_mad, recv_wc->recv_buf.mad, msg->hdr_len);
+
+       rmpp_mad->mad_hdr.method ^= IB_MGMT_METHOD_RESP;
+       ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
+       rmpp_mad->rmpp_hdr.seg_num = 0;
+       rmpp_mad->rmpp_hdr.paylen_newwin = cpu_to_be32(1);
+
+       ret = ib_post_send_mad(msg, NULL);
+       if (ret) {
+               ib_destroy_ah(msg->ah);
+               ib_free_send_mad(msg);
+       }
+}
+
 void ib_rmpp_send_handler(struct ib_mad_send_wc *mad_send_wc)
 {
        struct ib_rmpp_mad *rmpp_mad = mad_send_wc->send_buf->mad;
@@ -271,6 +298,7 @@ create_rmpp_recv(struct ib_mad_agent_pri
        rmpp_recv->newwin = 1;
        rmpp_recv->seg_num = 1;
        rmpp_recv->last_ack = 0;
+       rmpp_recv->repwin = 1;
 
        mad_hdr = &mad_recv_wc->recv_buf.mad->mad_hdr;
        rmpp_recv->tid = mad_hdr->tid;
@@ -591,6 +619,16 @@ static inline void adjust_last_ack(struc
                        break;
 }
 
+static void process_ds_ack(struct ib_mad_agent_private *agent,
+                          struct ib_mad_recv_wc *mad_recv_wc, int newwin)
+{
+       struct mad_rmpp_recv *rmpp_recv;
+
+       rmpp_recv = find_rmpp_recv(agent, mad_recv_wc);
+       if (rmpp_recv && rmpp_recv->state == RMPP_STATE_COMPLETE)
+               rmpp_recv->repwin = newwin;
+}
+
 static void process_rmpp_ack(struct ib_mad_agent_private *agent,
                             struct ib_mad_recv_wc *mad_recv_wc)
 {
@@ -616,8 +654,18 @@ static void process_rmpp_ack(struct ib_m
 
        spin_lock_irqsave(&agent->lock, flags);
        mad_send_wr = ib_find_send_mad(agent, mad_recv_wc);
-       if (!mad_send_wr)
-               goto out;       /* Unmatched ACK */
+       if (!mad_send_wr) {
+               if (!seg_num)
+                       process_ds_ack(agent, mad_recv_wc, newwin);
+               goto out;       /* Unmatched or DS RMPP ACK */
+       }
+
+       if ((mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) &&
+           (mad_send_wr->timeout)) {
+               spin_unlock_irqrestore(&agent->lock, flags);
+               ack_ds_ack(agent, mad_recv_wc);
+               return;         /* Repeated ACK for DS RMPP transaction */
+       }
 
        if ((mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) ||
            (!mad_send_wr->timeout) || (mad_send_wr->status != IB_WC_SUCCESS))
@@ -656,6 +704,9 @@ static void process_rmpp_ack(struct ib_m
                if (mad_send_wr->refcount == 1)
                        ib_reset_mad_timeout(mad_send_wr,
                                             mad_send_wr->send_buf.timeout_ms);
+               spin_unlock_irqrestore(&agent->lock, flags);
+               ack_ds_ack(agent, mad_recv_wc);
+               return;
        } else if (mad_send_wr->refcount == 1 &&
                   mad_send_wr->seg_num < mad_send_wr->newwin &&
                   mad_send_wr->seg_num < mad_send_wr->send_buf.seg_count) {
@@ -772,6 +823,39 @@ out:
        return NULL;
 }
 
+static int init_newwin(struct ib_mad_send_wr_private *mad_send_wr)
+{
+       struct ib_mad_agent_private *agent = mad_send_wr->mad_agent_priv;
+       struct ib_mad_hdr *mad_hdr = mad_send_wr->send_buf.mad;
+       struct mad_rmpp_recv *rmpp_recv;
+       struct ib_ah_attr ah_attr;
+       unsigned long flags;
+       int newwin = 1;
+
+       if (!(mad_hdr->method & IB_MGMT_METHOD_RESP))
+               goto out;
+
+       spin_lock_irqsave(&agent->lock, flags);
+       list_for_each_entry(rmpp_recv, &agent->rmpp_list, list) {
+               if (rmpp_recv->tid != mad_hdr->tid ||
+                   rmpp_recv->mgmt_class != mad_hdr->mgmt_class ||
+                   rmpp_recv->class_version != mad_hdr->class_version ||
+                   (rmpp_recv->method & IB_MGMT_METHOD_RESP))
+                       continue;
+               
+               if (ib_query_ah(mad_send_wr->send_buf.ah, &ah_attr))
+                       continue;
+
+               if (rmpp_recv->slid == ah_attr.dlid) {
+                       newwin = rmpp_recv->repwin;
+                       break;
+               }
+       }
+       spin_unlock_irqrestore(&agent->lock, flags);
+out:
+       return newwin;
+}
+
 int ib_send_rmpp_mad(struct ib_mad_send_wr_private *mad_send_wr)
 {
        struct ib_rmpp_mad *rmpp_mad;
@@ -787,7 +871,7 @@ int ib_send_rmpp_mad(struct ib_mad_send_
                return IB_RMPP_RESULT_INTERNAL;
        }
 
-       mad_send_wr->newwin = 1;
+       mad_send_wr->newwin = init_newwin(mad_send_wr);
 
        /* We need to wait for the final ACK even if there isn't a response */
        mad_send_wr->refcount += (mad_send_wr->timeout == 0);
-- 
1.4.2

>From e13596c5861d2e8371bfbb4f8dc980134234f26d Mon Sep 17 00:00:00 2001
From: Sean Hefty <[EMAIL PROTECTED]>
Date: Mon, 28 Aug 2006 15:12:04 -0700
Subject: [PATCH] The ib_mad module does not use a kthread function, but 
mad_priv.h includes
kthread.h.  Remove the include.

Signed-off-by: James Lentini <[EMAIL PROTECTED]>
Signed-off-by: Sean Hefty <[EMAIL PROTECTED]>
---
 drivers/infiniband/core/mad_priv.h |    1 -
 1 files changed, 0 insertions(+), 1 deletions(-)

diff --git a/drivers/infiniband/core/mad_priv.h 
b/drivers/infiniband/core/mad_priv.h
index d147f3b..1da9adb 100644
--- a/drivers/infiniband/core/mad_priv.h
+++ b/drivers/infiniband/core/mad_priv.h
@@ -39,7 +39,6 @@ #define __IB_MAD_PRIV_H__
 
 #include <linux/completion.h>
 #include <linux/pci.h>
-#include <linux/kthread.h>
 #include <linux/workqueue.h>
 #include <rdma/ib_mad.h>
 #include <rdma/ib_smi.h>
-- 
1.4.2

>From 526b67f066a3afafc6459217d8e5820641d3af26 Mon Sep 17 00:00:00 2001
From: Sean Hefty <[EMAIL PROTECTED]>
Date: Mon, 28 Aug 2006 15:13:07 -0700
Subject: [PATCH] Remove unnecessary include.

Signed-off-by: James Lentini <[EMAIL PROTECTED]>
Signed-off-by: Sean Hefty <[EMAIL PROTECTED]>
---
 drivers/infiniband/core/mad_rmpp.c |    2 --
 1 files changed, 0 insertions(+), 2 deletions(-)

diff --git a/drivers/infiniband/core/mad_rmpp.c 
b/drivers/infiniband/core/mad_rmpp.c
index 26469c1..f2becdf 100644
--- a/drivers/infiniband/core/mad_rmpp.c
+++ b/drivers/infiniband/core/mad_rmpp.c
@@ -33,8 +33,6 @@
  * $Id: mad_rmpp.c 1921 2005-03-02 22:58:44Z sean.hefty $
  */
 
-#include <linux/dma-mapping.h>
-
 #include "mad_priv.h"
 #include "mad_rmpp.h"
 
-- 
1.4.2

_______________________________________________
openib-general mailing list
openib-general@openib.org
http://openib.org/mailman/listinfo/openib-general

To unsubscribe, please visit http://openib.org/mailman/listinfo/openib-general

Reply via email to