Patch separates listen requests from peer-to-peer connection requests.
Peer-to-peer requests are disabled as a result (they were broken).
Listen requests were slightly optimized.

signed-off-by: Sean Hefty <[EMAIL PROTECTED]>

Index: core/cm.c
===================================================================
--- core/cm.c   (revision 1607)
+++ core/cm.c   (working copy)
@@ -60,7 +60,8 @@ static struct ib_client cm_client = {
 
 static struct ib_cm {
        spinlock_t lock;
-       struct rb_root service_table;
+       struct rb_root listen_service_table;
+       /* struct rb_root peer_service_table; todo: fix peer to peer */
        struct rb_root remote_qp_table;
        struct rb_root remote_id_table;
        struct rb_root remote_sidr_table;
@@ -109,6 +110,7 @@ struct cm_id_private {
        int timeout_ms;
        u8 max_cm_retries;
        u8 passive;
+       u8 peer_to_peer;
 };
 
 struct cm_recv_work {
@@ -270,9 +272,9 @@ static struct cm_id_private * cm_acquire
        return cm_id_priv;
 }
 
-static void cm_insert_service(struct cm_id_private *cm_id_priv)
+static struct cm_id_private * cm_insert_listen(struct cm_id_private 
*cm_id_priv)
 {
-       struct rb_node **link = &cm.service_table.rb_node;
+       struct rb_node **link = &cm.listen_service_table.rb_node;
        struct rb_node *parent = NULL;
        struct cm_id_private *cur_cm_id_priv;
        u64 service_id = cm_id_priv->id.service_id;
@@ -281,18 +283,23 @@ static void cm_insert_service(struct cm_
                parent = *link;
                cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
                                          service_node);
+               if ((cur_cm_id_priv->id.service_mask & service_id) ==
+                   (cur_cm_id_priv->id.service_mask &
+                    cur_cm_id_priv->id.service_id))
+                       return cm_id_priv;
                if (service_id < cur_cm_id_priv->id.service_id)
                        link = &(*link)->rb_left;
                else
                        link = &(*link)->rb_right;
        }
        rb_link_node(&cm_id_priv->service_node, parent, link);
-       rb_insert_color(&cm_id_priv->service_node, &cm.service_table);
+       rb_insert_color(&cm_id_priv->service_node, &cm.listen_service_table);
+       return NULL;
 }
 
-static struct cm_id_private * cm_find_service(u64 service_id)
+static struct cm_id_private * cm_find_listen(u64 service_id)
 {
-       struct rb_node *node = cm.service_table.rb_node;
+       struct rb_node *node = cm.listen_service_table.rb_node;
        struct cm_id_private *cm_id_priv;
 
        while (node) {
@@ -300,7 +307,6 @@ static struct cm_id_private * cm_find_se
                if ((cm_id_priv->id.service_mask & service_id) ==
                    (cm_id_priv->id.service_mask & cm_id_priv->id.service_id))
                        return cm_id_priv;
-
                if (service_id < cm_id_priv->id.service_id)
                        node = node->rb_left;
                else
@@ -521,7 +527,7 @@ retest:
                cm_id->state = IB_CM_IDLE;
                spin_unlock_irqrestore(&cm_id_priv->lock, flags);
                spin_lock_irqsave(&cm.lock, flags);
-               rb_erase(&cm_id_priv->service_node, &cm.service_table);
+               rb_erase(&cm_id_priv->service_node, &cm.listen_service_table);
                spin_unlock_irqrestore(&cm.lock, flags);
                break;
        case IB_CM_SIDR_REQ_SENT:
@@ -578,23 +584,25 @@ int ib_cm_listen(struct ib_cm_id *cm_id,
                 u64 service_id,
                 u64 service_mask)
 {
-       struct cm_id_private *cm_id_priv;
+       struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
        unsigned long flags;
        int ret = 0;
 
        cm_id_priv = container_of(cm_id, struct cm_id_private, id);
        BUG_ON(cm_id->state != IB_CM_IDLE);
 
+       cm_id->state = IB_CM_LISTEN;
        cm_id->service_id = service_id;
        cm_id->service_mask = service_mask ? service_mask : ~0ULL;
 
        spin_lock_irqsave(&cm.lock, flags);
-       if (!cm_find_service(service_id)) {
-               cm_id->state = IB_CM_LISTEN;
-               cm_insert_service(cm_id_priv);
-       } else
-               ret = -EBUSY;
+       cur_cm_id_priv = cm_insert_listen(cm_id_priv);
        spin_unlock_irqrestore(&cm.lock, flags);
+
+       if (cur_cm_id_priv) {
+               cm_id->state = IB_CM_IDLE;
+               ret = -EBUSY;
+       }
        return ret;
 }
 EXPORT_SYMBOL(ib_cm_listen);
@@ -736,6 +744,8 @@ int ib_send_cm_req(struct ib_cm_id *cm_i
        if (ret)
                goto out;
 
+       cm_id->service_id = param->service_id;
+       cm_id->service_mask = ~0ULL;
        cm_id_priv->timeout_ms = cm_convert_to_ms(
                                    param->primary_path->packet_life_time) * 2 +
                                 cm_convert_to_ms(
@@ -759,9 +769,8 @@ int ib_send_cm_req(struct ib_cm_id *cm_i
         * simplifies error recovery if the send fails.
         */
        if (param->peer_to_peer) {
-               spin_lock_irqsave(&cm.lock, flags);
-               cm_insert_service(cm_id_priv);
-               spin_unlock_irqrestore(&cm.lock, flags);
+               ret = -EINVAL;
+               goto out;
        }
 
        spin_lock_irqsave(&cm_id_priv->lock, flags);
@@ -770,11 +779,9 @@ int ib_send_cm_req(struct ib_cm_id *cm_i
 
        if (ret) {
                spin_unlock_irqrestore(&cm_id_priv->lock, flags);
-               if (param->peer_to_peer) {
-                       spin_lock_irqsave(&cm.lock, flags);
-                       rb_erase(&cm_id_priv->service_node, &cm.service_table);
-                       spin_unlock_irqrestore(&cm.lock, flags);
-               }
+               /* if (param->peer_to_peer) {
+                       cleanup peer_service_table
+               } */
                cm_free_msg(cm_id_priv->msg);
                goto out;
        }
@@ -842,7 +849,7 @@ static void cm_req_handler(struct cm_rec
        struct ib_cm_id *cm_id;
        struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
        struct cm_req_msg *req_msg;
-       unsigned long flags, flags2;
+       unsigned long flags;
        struct ib_cm_req_event_param *param;
        struct ib_wc *wc;
        int ret;
@@ -865,18 +872,10 @@ static void cm_req_handler(struct cm_rec
                spin_unlock_irqrestore(&cm.lock, flags);
                goto out; /* Duplicate message. */
        }
-       /* Find matching listen/peer request. */
-       cur_cm_id_priv = cm_find_service(req_msg->service_id);
-       if (!cur_cm_id_priv) {
-               spin_unlock_irqrestore(&cm.lock, flags);
-               /* todo: reject with no match */
-               goto out; /* No match. */
-       }
-       spin_lock_irqsave(&cur_cm_id_priv->lock, flags2);
-       switch (cur_cm_id_priv->id.state) {
-       case IB_CM_LISTEN:
+       /* Find matching listen request. */
+       cur_cm_id_priv = cm_find_listen(req_msg->service_id);
+       if (cur_cm_id_priv) {
                atomic_inc(&cur_cm_id_priv->refcount);
-               spin_unlock_irqrestore(&cur_cm_id_priv->lock, flags2);
                cm_insert_remote_id(cm_id_priv);
                spin_unlock_irqrestore(&cm.lock, flags);
 
@@ -885,9 +884,14 @@ static void cm_req_handler(struct cm_rec
                cm_id_priv->id.service_id = req_msg->service_id;
                cm_id_priv->id.service_mask = ~0ULL;
                cm_id_priv->id.state = IB_CM_REQ_RCVD;
-               break;
-       case IB_CM_REQ_SENT:
-               /* Process peer requests. */
+       } else {
+               /* Search for a peer request. */
+                /* todo: fix peer-to-peer */
+               if (!cur_cm_id_priv) {
+                       spin_unlock_irqrestore(&cm.lock, flags);
+                       /* todo: reject with no match */
+                       goto out;
+               }
                if (cm_is_active_peer(recv_work->port->ca_guid,
                                      req_msg->local_ca_guid,
                                      cur_cm_id_priv->local_qpn,
@@ -897,22 +901,17 @@ static void cm_req_handler(struct cm_rec
                }
                atomic_inc(&cur_cm_id_priv->refcount);
                cur_cm_id_priv->id.state = IB_CM_REQ_RCVD;
-               spin_unlock_irqrestore(&cur_cm_id_priv->lock, flags2);
-               rb_erase(&cur_cm_id_priv->service_node, &cm.service_table);
+               /*rb_erase(&cur_cm_id_priv->service_node, &cm.service_table);*/
+               cur_cm_id_priv->peer_to_peer = 0;
                cur_cm_id_priv->remote_ca_guid = req_msg->local_ca_guid;
                cur_cm_id_priv->id.remote_id = req_msg->local_comm_id;
                cm_insert_remote_id(cur_cm_id_priv);
                spin_unlock_irqrestore(&cm.lock, flags);
 
                ib_cancel_mad(recv_work->port->mad_agent,
-                             (unsigned long) cur_cm_id_priv->msg);
+                       (unsigned long) cur_cm_id_priv->msg);
                ib_destroy_cm_id(&cm_id_priv->id);
                cm_id_priv = cur_cm_id_priv;
-               break;
-       default:
-               spin_unlock_irqrestore(&cm.lock, flags);
-               /* todo: reject with no match */
-               goto out; /* No match. */
        }
        cm_id_priv->port = recv_work->port;
        cm_id_priv->timeout_ms = cm_convert_to_ms(
@@ -1151,6 +1150,15 @@ static void cm_rep_handler(struct cm_rec
        ib_cancel_mad(recv_work->port->mad_agent,
                      (unsigned long) cm_id_priv->msg);
 
+       /* todo: handle peer_to_peer
+       if (cm_id_priv->peer_to_peer) {
+               cm_id_priv->peer_to_peer = 0;
+               spin_lock_irqsave(&cm.lock, flags);
+               rb_erase(&cm_id_priv->service_node, &cm.service_table);
+               spin_unlock_irqrestore(&cm.lock, flags);
+       }
+       */
+
        cm_id_priv->id.remote_id = rep_msg->local_comm_id;
        cm_id_priv->remote_ca_guid = rep_msg->local_ca_guid;
        cm_id_priv->remote_qpn = cm_rep_get_local_qpn(rep_msg);
@@ -1917,6 +1925,8 @@ int ib_send_cm_sidr_req(struct ib_cm_id 
        if (ret)
                goto out;
 
+       cm_id->service_id = param->service_id;
+       cm_id->service_mask = ~0ULL;
        cm_id_priv->timeout_ms = param->timeout_ms;
        cm_id_priv->max_cm_retries = param->max_cm_retries;
        cm_set_ah_attr(&cm_id_priv->ah_attr, cm_id_priv->port->port_num,
@@ -1980,8 +1990,8 @@ static void cm_sidr_req_handler(struct c
                spin_unlock_irqrestore(&cm.lock, flags);
                goto out; /* Duplicate message. */
        }
-       cur_cm_id_priv = cm_find_service(sidr_req_msg->service_id);
-       if (!cur_cm_id_priv || cur_cm_id_priv->id.state != IB_CM_LISTEN) {
+       cur_cm_id_priv = cm_find_listen(sidr_req_msg->service_id);
+       if (!cur_cm_id_priv) {
                rb_erase(&cm_id_priv->remote_id_node, &cm.remote_sidr_table);
                spin_unlock_irqrestore(&cm.lock, flags);
                /* todo: reject with no match */
@@ -2405,7 +2415,7 @@ static int __init ib_cm_init(void)
 
        memset(&cm, 0, sizeof cm);
        spin_lock_init(&cm.lock);
-       cm.service_table = RB_ROOT;
+       cm.listen_service_table = RB_ROOT;
        cm.remote_id_table = RB_ROOT;
        cm.remote_qp_table = RB_ROOT;
        cm.remote_sidr_table = RB_ROOT;
_______________________________________________
openib-general mailing list
[email protected]
http://openib.org/mailman/listinfo/openib-general

To unsubscribe, please visit http://openib.org/mailman/listinfo/openib-general

Reply via email to