>  drivers/infiniband/sw/rdmavt/qp.h |    5 -
>  include/rdma/rdma_vt.h            |  233
> +++++++++++++++++++++++++++++++++++++
>  2 files changed, 233 insertions(+), 5 deletions(-)
> 
> diff --git a/drivers/infiniband/sw/rdmavt/qp.h
> b/drivers/infiniband/sw/rdmavt/qp.h
> index 4e4709f..c80d326 100644
> --- a/drivers/infiniband/sw/rdmavt/qp.h
> +++ b/drivers/infiniband/sw/rdmavt/qp.h
> @@ -53,11 +53,6 @@
> 
>  #include <rdma/rdma_vt.h>
> 
> -struct rvt_qp {
> -     struct ib_qp *ibqp;
> -     /* Other stuff */
> -};
> -
>  struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
>                           struct ib_qp_init_attr *init_attr,
>                           struct ib_udata *udata);
> diff --git a/include/rdma/rdma_vt.h b/include/rdma/rdma_vt.h
> index 39a0737..8d3a41a 100644
> --- a/include/rdma/rdma_vt.h
> +++ b/include/rdma/rdma_vt.h
> @@ -109,6 +109,239 @@ struct rvt_lkey_table {
>  /* End Memmory Region */
> 
>  /*
> + * Things needed for the Queue Pair definition. Like the MR stuff above
> the
> + * following should probably get moved to qp.h once drivers stop trying
> to make
> + * and manipulate thier own QPs. For the few instnaces where a driver may
> need
> + * to look into a queue pair there should be a pointer to a driver
> priavte data
> + * structure that they can look at.
> + */
> +
> +/*
> + * These keep track of the copy progress within a memory region.
> + * Used by the verbs layer.
> + */
> +struct rvt_sge {
> +     struct rvt_mregion *mr;
> +     void *vaddr;            /* kernel virtual address of segment */
> +     u32 sge_length;         /* length of the SGE */
> +     u32 length;             /* remaining length of the segment */
> +     u16 m;                  /* current index: mr->map[m] */

Rename to cur_map?

> +     u16 n;                  /* current index: mr->map[m]->segs[n] */

cur_seg?

> +};
> +
> +/*
> + * Send work request queue entry.
> + * The size of the sg_list is determined when the QP is created and
> stored
> + * in qp->s_max_sge.
> + */
> +struct rvt_swqe {
> +     union {
> +             struct ib_send_wr wr;   /* don't use wr.sg_list */
> +             struct ib_ud_wr ud_wr;
> +             struct ib_reg_wr reg_wr;
> +             struct ib_rdma_wr rdma_wr;
> +             struct ib_atomic_wr atomic_wr;
> +     };
> +     u32 psn;                /* first packet sequence number */
> +     u32 lpsn;               /* last packet sequence number */
> +     u32 ssn;                /* send sequence number */
> +     u32 length;             /* total length of data in sg_list */
> +     struct rvt_sge sg_list[0];
> +};
> +
> +/*
> + * Receive work request queue entry.
> + * The size of the sg_list is determined when the QP (or SRQ) is created
> + * and stored in qp->r_rq.max_sge (or srq->rq.max_sge).
> + */
> +struct rvt_rwqe {
> +     u64 wr_id;
> +     u8 num_sge;
> +     struct ib_sge sg_list[0];
> +};
> +
> +/*
> + * This structure is used to contain the head pointer, tail pointer,
> + * and receive work queue entries as a single memory allocation so
> + * it can be mmap'ed into user space.
> + * Note that the wq array elements are variable size so you can't
> + * just index into the array to get the N'th element;
> + * use get_rwqe_ptr() instead.

Can you add/use an entry_size field?


> + */
> +struct rvt_rwq {
> +     u32 head;               /* new work requests posted to the head */
> +     u32 tail;               /* receives pull requests from here. */
> +     struct rvt_rwqe wq[0];
> +};
> +
> +struct rvt_rq {
> +     struct rvt_rwq *wq;
> +     u32 size;               /* size of RWQE array */
> +     u8 max_sge;
> +     /* protect changes in this struct */
> +     spinlock_t lock ____cacheline_aligned_in_smp;
> +};
N�����r��y����b�X��ǧv�^�)޺{.n�+����{��ٚ�{ay�ʇڙ�,j��f���h���z��w���
���j:+v���w�j�m��������zZ+�����ݢj"��!�i

Reply via email to