rdma_join_multicast() allocates struct cma_multicast and then proceeds to join
to a multicast address. However, the join operation completes in another
context and the allocated struct could be released if the user destroys either
the rdma_id object or decides to leave the multicast group while the join is in
progress. This patch uses reference counting to to avoid such situation. It
also protects removal from id_priv->mc_list in cma_leave_mc_groups().

Signed-off-by: Eli Cohen <[email protected]>
---
 drivers/infiniband/core/cma.c |   23 +++++++++++++++++++----
 1 files changed, 19 insertions(+), 4 deletions(-)

diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 851de83..8fee477 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -157,6 +157,7 @@ struct cma_multicast {
        struct list_head        list;
        void                    *context;
        struct sockaddr_storage addr;
+       atomic_t                refcount;
 };
 
 struct cma_work {
@@ -290,6 +291,12 @@ static inline void cma_deref_dev(struct cma_device 
*cma_dev)
                complete(&cma_dev->comp);
 }
 
+void cma_deref_mc(struct cma_multicast *mc)
+{
+       if (atomic_dec_and_test(&mc->refcount))
+               kfree(mc);
+}
+
 static void cma_detach_from_dev(struct rdma_id_private *id_priv)
 {
        list_del(&id_priv->list);
@@ -822,13 +829,17 @@ static void cma_leave_mc_groups(struct rdma_id_private 
*id_priv)
 {
        struct cma_multicast *mc;
 
+       spin_lock_irq(&id_priv->lock);
        while (!list_empty(&id_priv->mc_list)) {
                mc = container_of(id_priv->mc_list.next,
                                  struct cma_multicast, list);
                list_del(&mc->list);
+               spin_unlock_irq(&id_priv->lock);
                ib_sa_free_multicast(mc->multicast.ib);
-               kfree(mc);
+               cma_deref_mc(mc);
+               spin_lock_irq(&id_priv->lock);
        }
+       spin_unlock_irq(&id_priv->lock);
 }
 
 void rdma_destroy_id(struct rdma_cm_id *id)
@@ -2643,7 +2654,7 @@ static int cma_ib_mc_handler(int status, struct 
ib_sa_multicast *multicast)
        id_priv = mc->id_priv;
        if (cma_disable_callback(id_priv, CMA_ADDR_BOUND) &&
            cma_disable_callback(id_priv, CMA_ADDR_RESOLVED))
-               return 0;
+               goto out;
 
        mutex_lock(&id_priv->qp_mutex);
        if (!status && id_priv->id.qp)
@@ -2669,10 +2680,12 @@ static int cma_ib_mc_handler(int status, struct 
ib_sa_multicast *multicast)
                cma_exch(id_priv, CMA_DESTROYING);
                mutex_unlock(&id_priv->handler_mutex);
                rdma_destroy_id(&id_priv->id);
-               return 0;
+               goto out;
        }
 
        mutex_unlock(&id_priv->handler_mutex);
+out:
+       cma_deref_mc(mc);
        return 0;
 }
 
@@ -2759,11 +2772,13 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct 
sockaddr *addr,
        memcpy(&mc->addr, addr, ip_addr_size(addr));
        mc->context = context;
        mc->id_priv = id_priv;
+       atomic_set(&mc->refcount, 1);
 
        spin_lock(&id_priv->lock);
        list_add(&mc->list, &id_priv->mc_list);
        spin_unlock(&id_priv->lock);
 
+       atomic_inc(&mc->refcount);
        switch (rdma_node_get_transport(id->device->node_type)) {
        case RDMA_TRANSPORT_IB:
                ret = cma_join_ib_multicast(id_priv, mc);
@@ -2800,7 +2815,7 @@ void rdma_leave_multicast(struct rdma_cm_id *id, struct 
sockaddr *addr)
                                                &mc->multicast.ib->rec.mgid,
                                                mc->multicast.ib->rec.mlid);
                        ib_sa_free_multicast(mc->multicast.ib);
-                       kfree(mc);
+                       cma_deref_mc(mc);
                        return;
                }
        }
-- 
1.6.3.3

_______________________________________________
general mailing list
[email protected]
http://lists.openfabrics.org/cgi-bin/mailman/listinfo/general

To unsubscribe, please visit http://openib.org/mailman/listinfo/openib-general

Reply via email to