Here's the updated implementation.  It compiles, but that's it.

Signed-off-by: Sean Hefty <[EMAIL PROTECTED]>


/*
 * Copyright (c) 2005 Voltaire Inc.  All rights reserved.
 * Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved.
 * Copyright (c) 1999-2005, Mellanox Technologies, Inc. All rights reserved.
 * Copyright (c) 2005 Intel Corporation.  All rights reserved.
 *
 * This Software is licensed under one of the following licenses:
 *
 * 1) under the terms of the "Common Public License 1.0" a copy of which is
 *    available from the Open Source Initiative, see
 *    http://www.opensource.org/licenses/cpl.php.
 *
 * 2) under the terms of the "The BSD License" a copy of which is
 *    available from the Open Source Initiative, see
 *    http://www.opensource.org/licenses/bsd-license.php.
 *
 * 3) under the terms of the "GNU General Public License (GPL) Version 2" a
 *    copy of which is available from the Open Source Initiative, see
 *    http://www.opensource.org/licenses/gpl-license.php.
 *
 * Licensee has the right to choose one of the above licenses.
 *
 * Redistributions of source code must retain the above copyright
 * notice and one of the license notices.
 *
 * Redistributions in binary form must reproduce both the above copyright
 * notice, one of the license notices in the documentation
 * and/or other materials provided with the distribution.
 *
 */
#include <rdma/rdma_cma.h>
#include <rdma/ib_cm.h>

MODULE_AUTHOR("Guy German");
MODULE_DESCRIPTION("Generic RDMA CM Agent");
MODULE_LICENSE("Dual BSD/GPL");

#define PFX "rdma_cma: "

#define CMA_CM_RESPONSE_TIMEOUT 20
#define CMA_MAX_CM_RETRIES 3

struct cma_id_private {
        struct rdma_cma_id cma_id;
        struct ib_cm_id *cm_id;

        /* TODO: add state if needed */
        /* TOOD: might need refcount for route queries */
        /* atomic_t refcount; */
        spinlock_t lock;
};

struct cma_id_private* cma_alloc_id(struct ib_device *device, void *context,
                                    rdma_cma_event_handler event_handler)
{
        struct cma_id_private *cma_id_priv;

        cma_id_priv = kmalloc(sizeof *cma_id_priv, GFP_KERNEL);
        if (!cma_id_priv)
                return NULL;
        memset(cma_id_priv, 0, sizeof *cma_id_priv);

        cma_id_priv->cma_id.device = device;
        cma_id_priv->cma_id.context = context;
        cma_id_priv->cma_id.event_handler = event_handler;
        spin_lock_init(&cma_id_priv->lock);

        return cma_id_priv;
}

static int cma_modify_ib_qp_rtr(struct cma_id_private *cma_id_priv)
{
        struct ib_qp_attr qp_attr;
        int qp_attr_mask, ret;

        qp_attr.qp_state = IB_QPS_RTR;
        ret = ib_cm_init_qp_attr(cma_id_priv->cm_id, &qp_attr, &qp_attr_mask);
        if (ret)
                return ret;

        qp_attr.rq_psn = cma_id_priv->cma_id.qp->qp_num;
        return ib_modify_qp(cma_id_priv->cma_id.qp, &qp_attr, qp_attr_mask);
}

static int cma_modify_ib_qp_rts(struct cma_id_private *cma_id_priv)
{
        struct ib_qp_attr qp_attr;
        int qp_attr_mask, ret;

        qp_attr.qp_state = IB_QPS_RTS;
        ret = ib_cm_init_qp_attr(cma_id_priv->cm_id, &qp_attr, &qp_attr_mask);
        if (ret)
                return ret;

        return ib_modify_qp(cma_id_priv->cma_id.qp, &qp_attr, qp_attr_mask);
}

static struct cma_id_private* cma_req_recv(struct cma_id_private *listen_id,
                                           struct ib_cm_event *ib_event)
{
        struct cma_id_private *cma_id_priv;
        struct rdma_route *route;

        cma_id_priv = cma_alloc_id(listen_id->cma_id.device,
                                   listen_id->cma_id.context,
                                   listen_id->cma_id.event_handler);
        if (!cma_id_priv)
                return NULL;

        route = kmalloc(sizeof *route, GFP_KERNEL);
        if (!route)
                goto err;
        memset(route, 0, sizeof *route);

        /* TODO: get route information from private data */
        route->path_rec = *ib_event->param.req_rcvd.primary_path;
        cma_id_priv->cma_id.route = route;

        return cma_id_priv;
err:
        kfree(cma_id_priv);
        return NULL;
}

static enum rdma_cma_event_type cma_rep_recv(struct cma_id_private *cma_id_priv)
{
        int ret;

        ret = cma_modify_ib_qp_rtr(cma_id_priv);
        if (ret)
                goto reject;

        ret = cma_modify_ib_qp_rts(cma_id_priv);
        if (ret)
                goto reject;
        
        ret = ib_send_cm_rtu(cma_id_priv->cm_id, NULL, 0);
        if (ret)
                goto reject;

        return RDMA_CMA_EVENT_ESTABLISHED;
reject:
        /* TODO: set QP state to ERROR? INIT? RESET? */
        rdma_cma_reject(&cma_id_priv->cma_id, NULL, 0);
        return RDMA_CMA_EVENT_CONNECT_ERROR;
}

static enum rdma_cma_event_type cma_rtu_recv(struct cma_id_private *cma_id_priv)
{
        int ret;

        ret = cma_modify_ib_qp_rts(cma_id_priv);
        if (ret)
                goto reject;

        return RDMA_CMA_EVENT_ESTABLISHED;
reject:
        /* TODO: set QP state to ERROR? INIT? RESET? */
        rdma_cma_reject(&cma_id_priv->cma_id, NULL, 0);
        return RDMA_CMA_EVENT_CONNECT_ERROR;
}

static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
{
        struct cma_id_private *cma_id_priv;
        struct rdma_cma_event event;

        cma_id_priv = cm_id->context;

        switch (ib_event->event) {
        case IB_CM_REQ_ERROR:
        case IB_CM_REP_ERROR:
                event.event = RDMA_CMA_EVENT_UNREACHABLE;
                break;
        case IB_CM_REQ_RECEIVED:
                cma_id_priv = cma_req_recv(cma_id_priv, ib_event);
                if (!cma_id_priv)
                        return -ENOMEM;
                event.event = RDMA_CMA_EVENT_CONNECT_REQUEST;
                break;
        case IB_CM_REP_RECEIVED:
                event.event = cma_rep_recv(cma_id_priv);
                break;
        case IB_CM_RTU_RECEIVED:
                event.event = cma_rtu_recv(cma_id_priv);
                break;
        case IB_CM_DREQ_RECEIVED:
        case IB_CM_DREQ_ERROR:
        case IB_CM_DREP_RECEIVED:
                event.event = RDMA_CMA_EVENT_DISCONNECTED;
                break;
        case IB_CM_TIMEWAIT_EXIT:
        case IB_CM_MRA_RECEIVED:
                /* ignore event */
                break;
        case IB_CM_REJ_RECEIVED:
                /* TODO: set QP state to ERROR? INIT? RESET? */
                event.event = RDMA_CMA_EVENT_REJECTED;
                break;
        default:
                printk(KERN_ERR PFX "unexpected IB CM event: %d",
                       ib_event->event);
                return 0;
        }

        event.private_data = ib_event->private_data;
        cma_id_priv->cma_id.event_handler(&cma_id_priv->cma_id, &event);
        return 0;
}

struct rdma_cma_id* rdma_cma_create_id(struct ib_device *device, void *context,
                                       rdma_cma_event_handler event_handler)
{
        struct cma_id_private *cma_id_priv;
        int ret;

        cma_id_priv = cma_alloc_id(device, context, event_handler);
        if (!cma_id_priv)
                return ERR_PTR(-ENOMEM);

        switch (device->node_type) {
        case IB_NODE_CA:
                cma_id_priv->cm_id = ib_create_cm_id(device, cma_ib_handler,
                                                     cma_id_priv);
                ret = IS_ERR(cma_id_priv->cm_id) ?
                      PTR_ERR(cma_id_priv->cm_id) : 0;
                break;
        default:
                ret = -ENOSYS;
                break;
        }

        if (ret)
                goto err;

        return &cma_id_priv->cma_id;
err:
        kfree(cma_id_priv);
        return ERR_PTR(ret);
}
EXPORT_SYMBOL(rdma_cma_create_id);

void rdma_cma_destroy_id(struct rdma_cma_id *cma_id)
{
        struct cma_id_private *cma_id_priv;

        cma_id_priv = container_of(cma_id, struct cma_id_private, cma_id);
        
        /* TODO: cancel route lookup if active */

        switch (cma_id->device->node_type) {
        case IB_NODE_CA:
                ib_destroy_cm_id(cma_id_priv->cm_id);
                break;
        default:
                break;
        }

        kfree(cma_id->route);
        kfree(cma_id_priv);
}
EXPORT_SYMBOL(rdma_cma_destroy_id);

static __be64 cma_get_service_id(struct sockaddr *addr)
{
        /* TODO: write me */
        return 42;
}

int rdma_cma_listen(struct rdma_cma_id *cma_id, struct sockaddr *addr)
{
        struct cma_id_private *cma_id_priv;
        int ret;

        cma_id_priv = container_of(cma_id, struct cma_id_private, cma_id);

        cma_id->route = kmalloc(sizeof *cma_id->route, GFP_KERNEL);
        if (!cma_id->route)
                return -ENOMEM;
        memset(cma_id->route, 0, sizeof *cma_id->route);

        cma_id->route->src_addr = *addr;

        switch (cma_id->device->node_type) {
        case IB_NODE_CA:
                ret = ib_cm_listen(cma_id_priv->cm_id,
                                   cma_get_service_id(addr), 0);
                break;
        default:
                ret = -ENOSYS;
                break;
        }

        if (ret)
                goto err;

        return 0;
err:
        kfree(cma_id->route);
        return ret;
};
EXPORT_SYMBOL(rdma_cma_listen);

/*
static void cma_path_handler(u64 req_id, void *context, int rec_num)
{
        struct cma_context *cma_id = context;
        enum ib_cma_event event;
        int status = 0;

        if (rec_num <= 0) {
                event = IB_CMA_EVENT_UNREACHABLE;
                goto error;
        }

        cma_id->cma_param.primary_path = &cma_id->cma_path;
        cma_id->cma_param.alternate_path = NULL;

        printk(KERN_DEBUG PFX "%s: dlid=%d slid=%d pkey=%d mtu=%d sid=%llx "
                "qpn=%d qpt=%d psn=%d prd=%s respres=%d rcm=%d flc=%d "
                "cmt=%d rtrc=%d rntrtr=%d maxcm=%d \n",__func__,
                cma_id->cma_param.primary_path->dlid ,
                cma_id->cma_param.primary_path->slid ,
                cma_id->cma_param.primary_path->pkey ,
                cma_id->cma_param.primary_path->mtu ,
                cma_id->cma_param.service_id,
                cma_id->cma_param.qp_num,
                cma_id->cma_param.qp_type,
                cma_id->cma_param.starting_psn,
                (char *)cma_id->cma_param.private_data,
                cma_id->cma_param.responder_resources,
                cma_id->cma_param.remote_cm_response_timeout,
                cma_id->cma_param.flow_control,
                cma_id->cma_param.local_cm_response_timeout,
                cma_id->cma_param.retry_count,
                cma_id->cma_param.rnr_retry_count,
                cma_id->cma_param.max_cm_retries);

        status = ib_send_cm_req(cma_id->cm_id, &cma_id->cma_param);
        if (status) {
                printk(KERN_ERR PFX "%s: cm_req failed %d\n",__func__, status);
                event = IB_CMA_EVENT_REJECTED;
                goto error;
        }

        return;

error:
        printk(KERN_ERR PFX "%s: return error %d \n",__func__, status);
        cma_connection_callback(cma_id, event, NULL);
}

static void cma_route_handler(u64 req_id, void *context, int rec_num)
{
        struct cma_context *cma_id = context;
        enum ib_cma_event event;
        int status = 0;
        
        if (rec_num <= 0) {
                event = IB_CMA_EVENT_UNREACHABLE;
                goto error;
        }
        cma_id->ibat_comp.fn = &cma_path_handler;
        cma_id->ibat_comp.context = cma_id;

        status = ib_at_paths_by_route(&cma_id->cma_route, 0,
                                      &cma_id->cma_path, 1,
                                      &cma_id->ibat_comp);

        if (status) {
                event = IB_CMA_EVENT_DISCONNECTED;
                goto error;
        }
        return;

error:
        printk(KERN_ERR PFX "%s: return error %d \n",__func__, status);
        cma_connection_callback(cma_id, event ,NULL);
}
*/

int cma_get_route_ib(struct cma_id_private *cma_id_priv,
                     struct sockaddr *src_addr, struct sockaddr *dest_addr)
{
        /* TODO: Get remote GID from ARP table, query for path record */
        return -ENOSYS;
}

int rdma_cma_get_route(struct rdma_cma_id *cma_id,
                       struct sockaddr *src_addr, struct sockaddr *dest_addr)
{
        struct cma_id_private *cma_id_priv;
        int ret;

        cma_id_priv = container_of(cma_id, struct cma_id_private, cma_id);

        switch (cma_id->device->node_type) {
        case IB_NODE_CA:
                ret = cma_get_route_ib(cma_id_priv, src_addr, dest_addr);
                break;
        default:
                ret = -ENOSYS;
                break;
        }

        return ret;
}
EXPORT_SYMBOL(rdma_cma_get_route);

static int cma_connect_ib(struct cma_id_private *cma_id_priv,
                          struct rdma_cma_conn_param *conn_param)
{
        struct ib_cm_req_param req;
        struct rdma_route *route;

        route = cma_id_priv->cma_id.route;

        memset(&req, 0, sizeof req);
        req.primary_path = &route->path_rec;
        req.service_id = cma_get_service_id(&route->dest_addr);
        req.qp_num = conn_param->qp->qp_num;
        req.qp_type = IB_QPT_RC;
        req.starting_psn = req.qp_num;
        req.private_data = conn_param->private_data;
        req.private_data_len = conn_param->private_data_len;
        req.responder_resources = conn_param->responder_resources;
        req.initiator_depth = conn_param->initiator_depth;
        req.flow_control = conn_param->flow_control;
        req.retry_count = conn_param->retry_count;
        req.rnr_retry_count = conn_param->rnr_retry_count;
        req.remote_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT;
        req.local_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT;
        req.max_cm_retries = CMA_MAX_CM_RETRIES;
        req.srq = conn_param->qp->srq ? 1 : 0;

        return ib_send_cm_req(cma_id_priv->cm_id, &req);
}

int rdma_cma_connect(struct rdma_cma_id *cma_id,
                     struct rdma_cma_conn_param *conn_param)
{
        struct cma_id_private *cma_id_priv;
        int ret;

        cma_id_priv = container_of(cma_id, struct cma_id_private, cma_id);

        cma_id->qp = conn_param->qp;

        switch (cma_id->device->node_type) {
        case IB_NODE_CA:
                ret = cma_connect_ib(cma_id_priv, conn_param);
                break;
        default:
                ret = -ENOSYS;
                break;
        }

        return ret;
}
EXPORT_SYMBOL(rdma_cma_connect);

static int cma_accept_ib(struct cma_id_private *cma_id_priv,
                         struct rdma_cma_conn_param *conn_param)
{
        struct ib_cm_rep_param rep;
        int ret;

        ret = cma_modify_ib_qp_rtr(cma_id_priv);
        if (ret)
                return ret;

        memset(&rep, 0, sizeof rep);
        rep.qp_num = conn_param->qp->qp_num;
        rep.starting_psn = rep.qp_num;
        rep.private_data = conn_param->private_data;
        rep.private_data_len = conn_param->private_data_len;
        rep.responder_resources = conn_param->responder_resources;
        rep.initiator_depth = conn_param->initiator_depth;
        rep.target_ack_delay = CMA_CM_RESPONSE_TIMEOUT;
        rep.failover_accepted = 0;
        rep.flow_control = conn_param->flow_control;
        rep.rnr_retry_count = conn_param->rnr_retry_count;
        rep.srq = conn_param->qp->srq ? 1 : 0;

        return ib_send_cm_rep(cma_id_priv->cm_id, &rep);
}

int rdma_cma_accept(struct rdma_cma_id *cma_id,
                     struct rdma_cma_conn_param *conn_param)
{
        struct cma_id_private *cma_id_priv;
        int ret;

        cma_id_priv = container_of(cma_id, struct cma_id_private, cma_id);

        cma_id->qp = conn_param->qp;

        switch (cma_id->device->node_type) {
        case IB_NODE_CA:
                ret = cma_accept_ib(cma_id_priv, conn_param);
                break;
        default:
                ret = -ENOSYS;
                break;
        }

        if (ret)
                goto reject;

        return 0;
reject:
        /* TODO: set QP state to ERROR? INIT? RESET? */
        rdma_cma_reject(cma_id, NULL, 0);
        return ret;
}
EXPORT_SYMBOL(rdma_cma_accept);

int rdma_cma_reject(struct rdma_cma_id *cma_id,
                    const void *private_data, u8 private_data_len)
{
        struct cma_id_private *cma_id_priv;
        int ret;

        cma_id_priv = container_of(cma_id, struct cma_id_private, cma_id);

        switch (cma_id->device->node_type) {
        case IB_NODE_CA:
                ret = ib_send_cm_rej(cma_id_priv->cm_id,
                                     IB_CM_REJ_CONSUMER_DEFINED,
                                     NULL, 0, private_data, private_data_len);
                break;
        default:
                ret = -ENOSYS;
                break;
        }

        return ret;
};
EXPORT_SYMBOL(rdma_cma_reject);

int rdma_cma_disconnect(struct rdma_cma_id *cma_id)
{
        struct cma_id_private *cma_id_priv;
        struct ib_qp_attr qp_attr;
        int ret;

        cma_id_priv = container_of(cma_id, struct cma_id_private, cma_id);

        /* TODO: Should we transition here?  Compare with error handling
           processing accept or a CM reply. */
        qp_attr.qp_state = IB_QPS_ERR;
        ret = ib_modify_qp(cma_id_priv->cma_id.qp, &qp_attr, IB_QP_STATE);
        if (ret)
                return ret;

        switch (cma_id->device->node_type) {
        case IB_NODE_CA:
                /* Initiate or respond to a disconnect. */
                ret = ib_send_cm_dreq(cma_id_priv->cm_id, NULL, 0);
                if (ret)
                        ib_send_cm_drep(cma_id_priv->cm_id, NULL, 0);
                break;
        default:
                break;
        }

        return 0;
}
EXPORT_SYMBOL(rdma_cma_disconnect);

static int cma_init(void)
{
        return 0;
}

static void cma_cleanup(void)
{
}

module_init(cma_init);
module_exit(cma_cleanup);



_______________________________________________
openib-general mailing list
[email protected]
http://openib.org/mailman/listinfo/openib-general

To unsubscribe, please visit http://openib.org/mailman/listinfo/openib-general

Reply via email to