This patch adds some general cleanup/consistency to RMPP handling.

* Update MAD API documentation.
* Saves RMPP information when a MAD is created, avoiding the need to
  re-calculate it when it is sent.
* Hides RMPP segmentation details from the user.  Makes ib_rmpp_segment
  an internal data structure, and changes ib_get_rmpp_segment to return
  the data buffer directly.
* ib_get_rmpp_segment is updated to work with all segment numbers, rather
  than just segments >= 2.
* Fixes RMPP usage of ib_create_send_mad when allocating non-data MADs.
* Overall, this reduces about 50 lines of code.

This patch includes removing the receive side data copy for userspace, as
it depends on that patch.

Signed-off-by: Sean Hefty <[EMAIL PROTECTED]>

---

Index: core/mad.c
===================================================================
--- core/mad.c  (revision 5552)
+++ core/mad.c  (working copy)
@@ -765,18 +765,16 @@ out:
        return ret;
 }
 
-static int get_buf_length(int hdr_len, int data_len)
+static int get_pad_size(int hdr_len, int data_len)
 {
        int seg_size, pad;
 
        seg_size = sizeof(struct ib_mad) - hdr_len;
        if (data_len && seg_size) {
                pad = seg_size - data_len % seg_size;
-               if (pad == seg_size)
-                       pad = 0;
+               return pad == seg_size ? 0 : pad;
        } else
-               pad = seg_size;
-       return hdr_len + data_len + pad;
+               return seg_size;
 }
 
 static void free_send_rmpp_list(struct ib_mad_send_wr_private *mad_send_wr)
@@ -789,40 +787,44 @@ static void free_send_rmpp_list(struct i
        }
 }
 
-static inline int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr,
-                                      int message_size, int hdr_len,
-                                      int data_len, u8 rmpp_version,
-                                      gfp_t gfp_mask)
-{
-       struct ib_rmpp_segment *seg;
-       struct ib_rmpp_mad *rmpp_mad = send_wr->send_buf.mad;
-       int seg_size, i = 2;
-
-       rmpp_mad->rmpp_hdr.paylen_newwin =
-                       cpu_to_be32(hdr_len - IB_MGMT_RMPP_HDR + data_len);
-       rmpp_mad->rmpp_hdr.rmpp_version = rmpp_version;
-       rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_DATA;
-       ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
-       send_wr->total_length = message_size;
-       /* allocate RMPP buffers */
-       message_size -= sizeof(struct ib_mad);
-       seg_size = sizeof(struct ib_mad) - hdr_len;
-       while (message_size > 0) {
-               seg = kmalloc(sizeof(struct ib_rmpp_segment) + seg_size,
-                                    gfp_mask);
+static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr,
+                               gfp_t gfp_mask)
+{
+       struct ib_mad_send_buf *send_buf = &send_wr->send_buf;
+       struct ib_rmpp_mad *rmpp_mad = send_buf->mad;
+       struct ib_rmpp_segment *seg = NULL;
+       int left, seg_size, pad;
+
+       send_buf->seg_size = sizeof (struct ib_mad) - send_buf->hdr_len;
+       seg_size = send_buf->seg_size;
+       pad = send_wr->pad;
+
+       /* Allocate data segments. */
+       for (left = send_buf->data_len + pad; left > 0; left -= seg_size) {
+               seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask);
                if (!seg) {
-                       printk(KERN_ERR "ib_create_send_mad: RMPP mem "
+                       printk(KERN_ERR "alloc_send_rmpp_segs: RMPP mem "
                               "alloc failed for len %zd, gfp %#x\n",
-                              sizeof(struct ib_rmpp_segment) + seg_size,
-                              gfp_mask);
+                              sizeof (*seg) + seg_size, gfp_mask);
                        free_send_rmpp_list(send_wr);
                        return -ENOMEM;
                }
-               seg->size = seg_size;
-               seg->num = i++;
+               seg->num = ++send_buf->seg_count;
                list_add_tail(&seg->list, &send_wr->rmpp_list);
-               message_size -= seg_size;
        }
+
+       /* Zero any padding */
+       if (pad)
+               memset(seg->data + seg_size - pad, 0, pad);
+
+       rmpp_mad->rmpp_hdr.rmpp_version = send_wr->mad_agent_priv->
+                                         agent.rmpp_version;
+       rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_DATA;
+       ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
+
+       send_wr->cur_seg = container_of(send_wr->rmpp_list.next,
+                                       struct ib_rmpp_segment, list);
+       send_wr->last_ack_seg = send_wr->cur_seg;
        return 0;
 }
 
@@ -834,28 +836,30 @@ struct ib_mad_send_buf * ib_create_send_
 {
        struct ib_mad_agent_private *mad_agent_priv;
        struct ib_mad_send_wr_private *mad_send_wr;
-       int length, message_size, ret;
+       int pad, message_size, ret, size;
        void *buf;
 
        mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
                                      agent);
-       message_size = get_buf_length(hdr_len, data_len);
+       pad = get_pad_size(hdr_len, data_len);
+       message_size = hdr_len + data_len + pad;
 
        if ((!mad_agent->rmpp_version &&
             (rmpp_active || message_size > sizeof(struct ib_mad))) ||
            (!rmpp_active && message_size > sizeof(struct ib_mad)))
                return ERR_PTR(-EINVAL);
 
-       length = sizeof *mad_send_wr + message_size;
-       buf = kzalloc(sizeof *mad_send_wr + sizeof(struct ib_mad), gfp_mask);
-
+       size = rmpp_active ? hdr_len : sizeof(struct ib_mad);
+       buf = kzalloc(sizeof *mad_send_wr + size, gfp_mask);
        if (!buf)
                return ERR_PTR(-ENOMEM);
 
-       mad_send_wr = buf + sizeof(struct ib_mad);
+       mad_send_wr = buf + size;
        INIT_LIST_HEAD(&mad_send_wr->rmpp_list);
        mad_send_wr->send_buf.mad = buf;
-       mad_send_wr->mad_payload = buf + hdr_len;
+       mad_send_wr->send_buf.hdr_len = hdr_len;
+       mad_send_wr->send_buf.data_len = data_len;
+       mad_send_wr->pad = pad;
 
        mad_send_wr->mad_agent_priv = mad_agent_priv;
        mad_send_wr->sg_list[0].length = hdr_len;
@@ -871,13 +875,9 @@ struct ib_mad_send_buf * ib_create_send_
        mad_send_wr->send_wr.wr.ud.remote_qpn = remote_qpn;
        mad_send_wr->send_wr.wr.ud.remote_qkey = IB_QP_SET_QKEY;
        mad_send_wr->send_wr.wr.ud.pkey_index = pkey_index;
-       mad_send_wr->last_ack_seg = NULL;
-       mad_send_wr->cur_seg = NULL;
 
        if (rmpp_active) {
-               ret = alloc_send_rmpp_list(mad_send_wr, message_size, hdr_len,
-                                          data_len, mad_agent->rmpp_version,
-                                          gfp_mask);
+               ret = alloc_send_rmpp_list(mad_send_wr, gfp_mask);
                if (ret) {
                        kfree(buf);
                        return ERR_PTR(ret);
@@ -890,55 +890,37 @@ struct ib_mad_send_buf * ib_create_send_
 }
 EXPORT_SYMBOL(ib_create_send_mad);
 
-struct ib_rmpp_segment *ib_get_segment(struct ib_mad_send_wr_private 
*mad_send_wr,
-                                      int seg_num)
+void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num)
 {
-       struct ib_rmpp_segment *seg;
+       struct ib_mad_send_wr_private *mad_send_wr;
+       struct list_head *list;
 
-       if (seg_num == 2) {
-               mad_send_wr->cur_seg = container_of(mad_send_wr->rmpp_list.next,
-                                                   struct ib_rmpp_segment, 
list);
-               return mad_send_wr->cur_seg;
-       }
-
-       /* get first list entry if was not already done */
-       if (!mad_send_wr->cur_seg)
-               mad_send_wr->cur_seg = container_of(mad_send_wr->rmpp_list.next,
-                                                   struct ib_rmpp_segment, 
list);
-
-       if (mad_send_wr->cur_seg->num == seg_num)
-               return mad_send_wr->cur_seg;
-       else if (mad_send_wr->cur_seg->num < seg_num) {
-               list_for_each_entry(seg, &mad_send_wr->cur_seg->list, list) {
-                       if (seg->num == seg_num) {
-                               mad_send_wr->cur_seg = seg;
-                               return mad_send_wr->cur_seg;
-                       }
-               }
-       } else {
-               list_for_each_entry_reverse(seg, &mad_send_wr->cur_seg->list,
-                                           list) {
-                       if (seg->num == seg_num) {
-                               mad_send_wr->cur_seg = seg;
-                               return mad_send_wr->cur_seg;
-                       }
-               }
+       mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
+                                  send_buf);
+       list = &mad_send_wr->cur_seg->list;
+
+       if (mad_send_wr->cur_seg->num < seg_num) {
+               list_for_each_entry(mad_send_wr->cur_seg, list, list)
+                       if (mad_send_wr->cur_seg->num == seg_num)
+                               break;
+       } else if (mad_send_wr->cur_seg->num > seg_num) {
+               list_for_each_entry_reverse(mad_send_wr->cur_seg, list, list)
+                       if (mad_send_wr->cur_seg->num == seg_num)
+                               break;
        }
-       return NULL;
+       return mad_send_wr->cur_seg->data;
 }
+EXPORT_SYMBOL(ib_get_rmpp_segment);
 
-struct ib_rmpp_segment *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf,
-                                           int seg_num)
+static inline void *ib_get_payload(struct ib_mad_send_wr_private *mad_send_wr)
 {
-       struct ib_mad_send_wr_private *wr;
-
-       if (seg_num < 2)
-               return NULL;
-
-       wr = container_of(send_buf, struct ib_mad_send_wr_private, send_buf);
-       return ib_get_segment(wr, seg_num);
+       if (mad_send_wr->send_buf.seg_count)
+               return ib_get_rmpp_segment(&mad_send_wr->send_buf,
+                                          mad_send_wr->seg_num);
+       else
+               return mad_send_wr->send_buf.mad +
+                      mad_send_wr->send_buf.hdr_len;
 }
-EXPORT_SYMBOL(ib_get_rmpp_segment);
 
 void ib_free_send_mad(struct ib_mad_send_buf *send_buf)
 {
@@ -981,7 +963,7 @@ int ib_send_mad(struct ib_mad_send_wr_pr
        pci_unmap_addr_set(mad_send_wr, header_mapping, sge[0].addr);
 
        sge[1].addr = dma_map_single(mad_agent->device->dma_device,
-                                    mad_send_wr->mad_payload,
+                                    ib_get_payload(mad_send_wr),
                                     sge[1].length,
                                     DMA_TO_DEVICE);
        pci_unmap_addr_set(mad_send_wr, payload_mapping, sge[1].addr);
@@ -1005,7 +987,6 @@ int ib_send_mad(struct ib_mad_send_wr_pr
                dma_unmap_single(mad_agent->device->dma_device,
                                 pci_unmap_addr(mad_send_wr, header_mapping),
                                 sge[0].length, DMA_TO_DEVICE);
-
                dma_unmap_single(mad_agent->device->dma_device,
                                 pci_unmap_addr(mad_send_wr, payload_mapping),
                                 sge[1].length, DMA_TO_DEVICE);
Index: core/mad_priv.h
===================================================================
--- core/mad_priv.h     (revision 5552)
+++ core/mad_priv.h     (working copy)
@@ -85,6 +85,12 @@ struct ib_mad_private {
        } mad;
 } __attribute__ ((packed));
 
+struct ib_rmpp_segment {
+       struct list_head list;
+       u32 num;
+       u8 data[0];
+};
+
 struct ib_mad_agent_private {
        struct list_head agent_list;
        struct ib_mad_agent agent;
@@ -124,7 +130,6 @@ struct ib_mad_send_wr_private {
        struct ib_send_wr send_wr;
        struct ib_sge sg_list[IB_MAD_SEND_REQ_MAX_SG];
        __be64 tid;
-       void *mad_payload; /* RMPP: changed per segment */
        unsigned long timeout;
        int retries;
        int retry;
@@ -138,9 +143,6 @@ struct ib_mad_send_wr_private {
        int last_ack;
        int seg_num;
        int newwin;
-       int total_length;
-       int total_seg;
-       int data_offset;
        int pad;
 };
 
@@ -225,7 +227,4 @@ void ib_mark_mad_done(struct ib_mad_send
 void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr,
                          int timeout_ms);
 
-struct ib_rmpp_segment *ib_get_segment(struct ib_mad_send_wr_private 
*mad_send_wr,
-                                      int seg_num);
-
 #endif /* __IB_MAD_PRIV_H__ */
Index: core/mad_rmpp.c
===================================================================
--- core/mad_rmpp.c     (revision 5552)
+++ core/mad_rmpp.c     (working copy)
@@ -111,14 +111,14 @@ static int data_offset(u8 mgmt_class)
                return IB_MGMT_RMPP_HDR;
 }
 
-static void format_ack(struct ib_rmpp_mad *ack,
+static void format_ack(struct ib_mad_send_buf *msg,
                       struct ib_rmpp_mad *data,
                       struct mad_rmpp_recv *rmpp_recv)
 {
+       struct ib_rmpp_mad *ack = msg->mad;
        unsigned long flags;
 
-       memcpy(&ack->mad_hdr, &data->mad_hdr,
-              data_offset(data->mad_hdr.mgmt_class));
+       memcpy(ack, &data->mad_hdr, msg->hdr_len);
 
        ack->mad_hdr.method ^= IB_MGMT_METHOD_RESP;
        ack->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_ACK;
@@ -135,16 +135,16 @@ static void ack_recv(struct mad_rmpp_rec
                     struct ib_mad_recv_wc *recv_wc)
 {
        struct ib_mad_send_buf *msg;
-       int ret;
+       int ret, hdr_len;
 
+       hdr_len = data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class);
        msg = ib_create_send_mad(&rmpp_recv->agent->agent, recv_wc->wc->src_qp,
-                                recv_wc->wc->pkey_index, 1, IB_MGMT_RMPP_HDR,
-                                IB_MGMT_RMPP_DATA, GFP_KERNEL);
+                                recv_wc->wc->pkey_index, 1, hdr_len,
+                                0, GFP_KERNEL);
        if (!msg)
                return;
 
-       format_ack(msg->mad, (struct ib_rmpp_mad *) recv_wc->recv_buf.mad,
-                  rmpp_recv);
+       format_ack(msg, (struct ib_rmpp_mad *) recv_wc->recv_buf.mad, 
rmpp_recv);
        msg->ah = rmpp_recv->ah;
        ret = ib_post_send_mad(msg, NULL);
        if (ret)
@@ -156,16 +156,17 @@ static struct ib_mad_send_buf *alloc_res
 {
        struct ib_mad_send_buf *msg;
        struct ib_ah *ah;
+       int hdr_len;
 
        ah = ib_create_ah_from_wc(agent->qp->pd, recv_wc->wc,
                                  recv_wc->recv_buf.grh, agent->port_num);
        if (IS_ERR(ah))
                return (void *) ah;
 
+       hdr_len = data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class);
        msg = ib_create_send_mad(agent, recv_wc->wc->src_qp,
                                 recv_wc->wc->pkey_index, 1,
-                                IB_MGMT_RMPP_HDR, IB_MGMT_RMPP_DATA,
-                                GFP_KERNEL);
+                                hdr_len, 0, GFP_KERNEL);
        if (IS_ERR(msg))
                ib_destroy_ah(ah);
        else
@@ -195,8 +196,7 @@ static void nack_recv(struct ib_mad_agen
                return;
 
        rmpp_mad = msg->mad;
-       memcpy(rmpp_mad, recv_wc->recv_buf.mad,
-              data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class));
+       memcpy(rmpp_mad, recv_wc->recv_buf.mad, msg->hdr_len);
 
        rmpp_mad->mad_hdr.method ^= IB_MGMT_METHOD_RESP;
        rmpp_mad->rmpp_hdr.rmpp_version = IB_MGMT_RMPP_VERSION;
@@ -535,42 +535,30 @@ start_rmpp(struct ib_mad_agent_private *
 static int send_next_seg(struct ib_mad_send_wr_private *mad_send_wr)
 {
        struct ib_rmpp_mad *rmpp_mad;
-       struct ib_rmpp_segment *seg;
        int timeout;
-       u32 paylen;
+       u32 paylen = 0;
 
        rmpp_mad = mad_send_wr->send_buf.mad;
        ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
-       rmpp_mad->rmpp_hdr.seg_num = cpu_to_be32(mad_send_wr->seg_num);
+       rmpp_mad->rmpp_hdr.seg_num = cpu_to_be32(++mad_send_wr->seg_num);
 
        if (mad_send_wr->seg_num == 1) {
                rmpp_mad->rmpp_hdr.rmpp_rtime_flags |= IB_MGMT_RMPP_FLAG_FIRST;
-               paylen = mad_send_wr->total_seg * IB_MGMT_RMPP_DATA -
+               paylen = mad_send_wr->send_buf.seg_count * IB_MGMT_RMPP_DATA -
                         mad_send_wr->pad;
-               rmpp_mad->rmpp_hdr.paylen_newwin = cpu_to_be32(paylen);
-       } else {
-               seg = ib_get_segment(mad_send_wr, mad_send_wr->seg_num);
-               if (!seg) {
-                       printk(KERN_ERR PFX "send_next_seg: "
-                              "could not find segment %d\n",
-                              mad_send_wr->seg_num);
-                       return -EINVAL;
-               }
-               mad_send_wr->mad_payload = seg->data;
-               rmpp_mad->rmpp_hdr.paylen_newwin = 0;
        }
 
-       if (mad_send_wr->seg_num == mad_send_wr->total_seg) {
+       if (mad_send_wr->seg_num == mad_send_wr->send_buf.seg_count) {
                rmpp_mad->rmpp_hdr.rmpp_rtime_flags |= IB_MGMT_RMPP_FLAG_LAST;
                paylen = IB_MGMT_RMPP_DATA - mad_send_wr->pad;
-               rmpp_mad->rmpp_hdr.paylen_newwin = cpu_to_be32(paylen);
        }
+       rmpp_mad->rmpp_hdr.paylen_newwin = cpu_to_be32(paylen);
 
        /* 2 seconds for an ACK until we can find the packet lifetime */
        timeout = mad_send_wr->send_buf.timeout_ms;
        if (!timeout || timeout > 2000)
                mad_send_wr->timeout = msecs_to_jiffies(2000);
-       mad_send_wr->seg_num++;
+
        return ib_send_mad(mad_send_wr);
 }
 
@@ -586,7 +574,7 @@ static void abort_send(struct ib_mad_age
        if (!mad_send_wr)
                goto out;       /* Unmatched send */
 
-       if ((mad_send_wr->last_ack == mad_send_wr->total_seg) ||
+       if ((mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) ||
            (!mad_send_wr->timeout) || (mad_send_wr->status != IB_WC_SUCCESS))
                goto out;       /* Send is already done */
 
@@ -602,26 +590,16 @@ out:
        spin_unlock_irqrestore(&agent->lock, flags);
 }
 
-static inline void adjust_last_ack(struct ib_mad_send_wr_private *wr)
+static inline void adjust_last_ack(struct ib_mad_send_wr_private *wr,
+                                  int seg_num)
 {
-       struct ib_rmpp_segment *seg;
+       struct list_head *list;
 
-       if (wr->last_ack < 2)
-               return;
-       else if (!wr->last_ack_seg)
-               list_for_each_entry(seg, &wr->rmpp_list, list) {
-                       if (wr->last_ack == seg->num) {
-                               wr->last_ack_seg = seg;
-                               break;
-                       }
-               }
-       else
-               list_for_each_entry(seg, &wr->last_ack_seg->list, list) {
-                       if (wr->last_ack == seg->num) {
-                               wr->last_ack_seg = seg;
-                               break;
-                       }
-               }
+       wr->last_ack = seg_num;
+       list = &wr->last_ack_seg->list;
+       list_for_each_entry(wr->last_ack_seg, list, list)
+               if (wr->last_ack_seg->num == seg_num)
+                       break;
 }
 
 static void process_rmpp_ack(struct ib_mad_agent_private *agent,
@@ -652,11 +630,12 @@ static void process_rmpp_ack(struct ib_m
        if (!mad_send_wr)
                goto out;       /* Unmatched ACK */
 
-       if ((mad_send_wr->last_ack == mad_send_wr->total_seg) ||
+       if ((mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) ||
            (!mad_send_wr->timeout) || (mad_send_wr->status != IB_WC_SUCCESS))
                goto out;       /* Send is already done */
 
-       if (seg_num > mad_send_wr->total_seg || seg_num > mad_send_wr->newwin) {
+       if (seg_num > mad_send_wr->send_buf.seg_count ||
+           seg_num > mad_send_wr->newwin) {
                spin_unlock_irqrestore(&agent->lock, flags);
                abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_S2B);
                nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_S2B);
@@ -667,12 +646,11 @@ static void process_rmpp_ack(struct ib_m
                goto out;       /* Old ACK */
 
        if (seg_num > mad_send_wr->last_ack) {
-               mad_send_wr->last_ack = seg_num;
-               adjust_last_ack(mad_send_wr);
+               adjust_last_ack(mad_send_wr, seg_num);
                mad_send_wr->retries = mad_send_wr->send_buf.retries;
        }
        mad_send_wr->newwin = newwin;
-       if (mad_send_wr->last_ack == mad_send_wr->total_seg) {
+       if (mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) {
                /* If no response is expected, the ACK completes the send */
                if (!mad_send_wr->send_buf.timeout_ms) {
                        struct ib_mad_send_wc wc;
@@ -691,7 +669,7 @@ static void process_rmpp_ack(struct ib_m
                                             mad_send_wr->send_buf.timeout_ms);
        } else if (mad_send_wr->refcount == 1 &&
                   mad_send_wr->seg_num < mad_send_wr->newwin &&
-                  mad_send_wr->seg_num <= mad_send_wr->total_seg) {
+                  mad_send_wr->seg_num < mad_send_wr->send_buf.seg_count) {
                /* Send failure will just result in a timeout/retry */
                ret = send_next_seg(mad_send_wr);
                if (ret)
@@ -816,20 +794,12 @@ int ib_send_rmpp_mad(struct ib_mad_send_
              IB_MGMT_RMPP_FLAG_ACTIVE))
                return IB_RMPP_RESULT_UNHANDLED;
 
-       if (rmpp_mad->rmpp_hdr.rmpp_type != IB_MGMT_RMPP_TYPE_DATA)
+       if (rmpp_mad->rmpp_hdr.rmpp_type != IB_MGMT_RMPP_TYPE_DATA) {
+               mad_send_wr->seg_num = 1;
                return IB_RMPP_RESULT_INTERNAL;
+       }
 
-       if (mad_send_wr->send_wr.num_sge != 2)
-               return -EINVAL;
-
-       mad_send_wr->seg_num = 1;
        mad_send_wr->newwin = 1;
-       mad_send_wr->data_offset = data_offset(rmpp_mad->mad_hdr.mgmt_class);
-
-       mad_send_wr->total_seg = (mad_send_wr->total_length - 
mad_send_wr->data_offset) /
-                       (sizeof(struct ib_rmpp_mad) - mad_send_wr->data_offset);
-       mad_send_wr->pad = mad_send_wr->total_length - IB_MGMT_RMPP_HDR -
-                          be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin);
 
        /* We need to wait for the final ACK even if there isn't a response */
        mad_send_wr->refcount += (mad_send_wr->timeout == 0);
@@ -860,14 +830,14 @@ int ib_process_rmpp_send_wc(struct ib_ma
        if (!mad_send_wr->timeout)
                return IB_RMPP_RESULT_PROCESSED; /* Response received */
 
-       if (mad_send_wr->last_ack == mad_send_wr->total_seg) {
+       if (mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) {
                mad_send_wr->timeout =
                        msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms);
                return IB_RMPP_RESULT_PROCESSED; /* Send done */
        }
 
-       if (mad_send_wr->seg_num > mad_send_wr->newwin ||
-           mad_send_wr->seg_num > mad_send_wr->total_seg)
+       if (mad_send_wr->seg_num == mad_send_wr->newwin ||
+           mad_send_wr->seg_num == mad_send_wr->send_buf.seg_count)
                return IB_RMPP_RESULT_PROCESSED; /* Wait for ACK */
 
        ret = send_next_seg(mad_send_wr);
@@ -888,10 +858,10 @@ int ib_retry_rmpp(struct ib_mad_send_wr_
              IB_MGMT_RMPP_FLAG_ACTIVE))
                return IB_RMPP_RESULT_UNHANDLED; /* RMPP not active */
 
-       if (mad_send_wr->last_ack == mad_send_wr->total_seg)
+       if (mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count)
                return IB_RMPP_RESULT_PROCESSED;
 
-       mad_send_wr->seg_num = mad_send_wr->last_ack + 1;
+       mad_send_wr->seg_num = mad_send_wr->last_ack;
        mad_send_wr->cur_seg = mad_send_wr->last_ack_seg;
 
        ret = send_next_seg(mad_send_wr);
Index: core/user_mad.c
===================================================================
--- core/user_mad.c     (revision 5552)
+++ core/user_mad.c     (working copy)
@@ -121,9 +121,9 @@ struct ib_umad_file {
 
 struct ib_umad_packet {
        struct ib_mad_send_buf *msg;
+       struct ib_mad_recv_wc  *recv_wc;
        struct list_head   list;
        int                length;
-       struct list_head   seg_list;
        struct ib_user_mad mad;
 };
 
@@ -188,62 +188,6 @@ static int data_offset(u8 mgmt_class)
                return IB_MGMT_RMPP_HDR;
 }
 
-static int copy_recv_mad(struct ib_mad_recv_wc *mad_recv_wc,
-                        struct ib_umad_packet *packet)
-{
-       struct ib_mad_recv_buf *seg_buf;
-       struct ib_rmpp_mad *rmpp_mad;
-       void *data;
-       struct ib_rmpp_segment *seg;
-       int size, len, offset;
-       u8 flags;
-
-       len = mad_recv_wc->mad_len;
-       if (len <= sizeof(struct ib_mad)) {
-               memcpy(&packet->mad.data, mad_recv_wc->recv_buf.mad, len);
-               return 0;
-       }
-
-       /* Multipacket (RMPP) MAD */
-       offset = data_offset(mad_recv_wc->recv_buf.mad->mad_hdr.mgmt_class);
-
-       list_for_each_entry(seg_buf, &mad_recv_wc->rmpp_list, list) {
-               rmpp_mad = (struct ib_rmpp_mad *) seg_buf->mad;
-               flags = ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr);
-
-               if (flags & IB_MGMT_RMPP_FLAG_FIRST) {
-                       size = sizeof(*rmpp_mad);
-                       memcpy(&packet->mad.data, rmpp_mad, size);
-               } else {
-                       data = (void *) rmpp_mad + offset;
-                       if (flags & IB_MGMT_RMPP_FLAG_LAST)
-                               size = len;
-                       else
-                               size = sizeof(*rmpp_mad) - offset;
-                       seg = kmalloc(sizeof(struct ib_rmpp_segment) +
-                                     sizeof(struct ib_rmpp_mad) - offset,
-                                     GFP_KERNEL);
-                       if (!seg)
-                               return -ENOMEM;
-                       memcpy(seg->data, data, size);
-                       list_add_tail(&seg->list, &packet->seg_list);
-               }
-               len -= size;
-       }
-       return 0;
-}
-
-static void free_packet(struct ib_umad_packet *packet)
-{
-       struct ib_rmpp_segment *seg, *tmp;
-
-       list_for_each_entry_safe(seg, tmp, &packet->seg_list, list) {
-               list_del(&seg->list);
-               kfree(seg);
-       }
-       kfree(packet);
-}
-
 static void send_handler(struct ib_mad_agent *agent,
                         struct ib_mad_send_wc *send_wc)
 {
@@ -268,25 +212,20 @@ static void recv_handler(struct ib_mad_a
 {
        struct ib_umad_file *file = agent->context;
        struct ib_umad_packet *packet;
-       int length;
 
        if (mad_recv_wc->wc->status != IB_WC_SUCCESS)
-               goto out;
+               goto err1;
 
-       length = mad_recv_wc->mad_len;
-       packet = kzalloc(sizeof *packet + sizeof(struct ib_mad), GFP_KERNEL);
+       packet = kzalloc(sizeof *packet, GFP_KERNEL);
        if (!packet)
-               goto out;
-       INIT_LIST_HEAD(&packet->seg_list);
-       packet->length = length;
+               goto err1;
 
-       if (copy_recv_mad(mad_recv_wc, packet)) {
-               free_packet(packet);
-               goto out;
-       }
+       packet->length = mad_recv_wc->mad_len;
+       packet->recv_wc = mad_recv_wc;
 
        packet->mad.hdr.status    = 0;
-       packet->mad.hdr.length    = length + sizeof (struct ib_user_mad);
+       packet->mad.hdr.length    = sizeof (struct ib_user_mad) +
+                                   mad_recv_wc->mad_len;
        packet->mad.hdr.qpn       = cpu_to_be32(mad_recv_wc->wc->src_qp);
        packet->mad.hdr.lid       = cpu_to_be16(mad_recv_wc->wc->slid);
        packet->mad.hdr.sl        = mad_recv_wc->wc->sl;
@@ -302,21 +241,87 @@ static void recv_handler(struct ib_mad_a
        }
 
        if (queue_packet(file, agent, packet))
-               free_packet(packet);
+               goto err2;
+       return;
 
-out:
+err2:
+       kfree(packet);
+err1:
        ib_free_recv_mad(mad_recv_wc);
 }
 
+static ssize_t copy_recv_mad(char __user *buf, struct ib_umad_packet *packet,
+                            size_t count)
+{
+       struct ib_mad_recv_buf *recv_buf;
+       int left, seg_payload, offset, max_seg_payload;
+
+       /* We need enough room to copy the first (or only) MAD segment. */
+       recv_buf = &packet->recv_wc->recv_buf;
+       if ((packet->length <= sizeof (*recv_buf->mad) &&
+            count < sizeof (packet->mad) + packet->length) ||
+           (packet->length > sizeof (*recv_buf->mad) &&
+            count < sizeof (packet->mad) + sizeof (*recv_buf->mad)))
+               return -EINVAL;
+
+       if (copy_to_user(buf, &packet->mad, sizeof (packet->mad)))
+               return -EFAULT;
+
+       buf += sizeof (packet->mad);
+       seg_payload = min_t(int, packet->length, sizeof (*recv_buf->mad));
+       if (copy_to_user(buf, recv_buf->mad, seg_payload))
+               return -EFAULT;
+
+       if (seg_payload < packet->length) {
+               /*
+                * Multipacket RMPP MAD message. Copy remainder of message.
+                * Note that last segment may have a shorter payload.
+                */
+               if (count < sizeof (packet->mad) + packet->length) {
+                       /*
+                       * The buffer is too small, return the first RMPP 
segment,
+                       * which includes the RMPP message length.
+                       */
+                       return -ENOSPC;
+               }
+               offset = data_offset(recv_buf->mad->mad_hdr.mgmt_class);
+               max_seg_payload = sizeof (struct ib_mad) - offset;
+
+               for (left = packet->length - seg_payload, buf += seg_payload;
+                    left; left -= seg_payload, buf += seg_payload) {
+                       recv_buf = container_of(recv_buf->list.next,
+                                               struct ib_mad_recv_buf, list);
+                       seg_payload = min(left, max_seg_payload);
+                       if (copy_to_user(buf, ((void *) recv_buf->mad) + offset,
+                                        seg_payload))
+                               return -EFAULT;
+               }
+       }
+       return sizeof (packet->mad) + packet->length;
+}
+
+static ssize_t copy_send_mad(char __user *buf, struct ib_umad_packet *packet,
+                            size_t count)
+{
+       ssize_t size = sizeof (packet->mad) + packet->length;
+
+       if (count < size)
+               return -EINVAL;
+
+       if (copy_to_user(buf, &packet->mad, size))
+               return -EFAULT;
+
+       return size;
+}
+
 static ssize_t ib_umad_read(struct file *filp, char __user *buf,
                            size_t count, loff_t *pos)
 {
        struct ib_umad_file *file = filp->private_data;
-       struct ib_rmpp_segment *seg;
        struct ib_umad_packet *packet;
-       ssize_t ret, size;
+       ssize_t ret;
 
-       if (count < sizeof (struct ib_user_mad) + sizeof (struct ib_mad))
+       if (count < sizeof (struct ib_user_mad))
                return -EINVAL;
 
        spin_lock_irq(&file->recv_lock);
@@ -339,55 +344,44 @@ static ssize_t ib_umad_read(struct file 
 
        spin_unlock_irq(&file->recv_lock);
 
-       size = min_t(int, sizeof (struct ib_mad), packet->length);
-       if (copy_to_user(buf, &packet->mad,
-                        sizeof(struct ib_user_mad) + size)) {
-               ret = -EFAULT;
-               goto err;
-       }
+       if (packet->recv_wc)
+               ret = copy_recv_mad(buf, packet, count);
+       else
+               ret = copy_send_mad(buf, packet, count);
 
-       if (count < packet->length + sizeof (struct ib_user_mad))
-               /*
-                * User buffer too small. Return first RMPP segment (which
-                * includes RMPP message length).
-                */
-               ret = -ENOSPC;
-       else if (packet->length <= sizeof(struct ib_mad))
-               ret = packet->length + sizeof(struct ib_user_mad);
-       else {
-               int len = packet->length - sizeof(struct ib_mad);
-               struct ib_rmpp_mad *rmpp_mad =
-                               (struct ib_rmpp_mad *) packet->mad.data;
-               int max_seg_payload = sizeof(struct ib_mad) -
-                                     data_offset(rmpp_mad->mad_hdr.mgmt_class);
-               int seg_payload;
-               /*
-                * Multipacket RMPP MAD message. Copy remainder of message.
-                * Note that last segment may have a shorter payload.
-                */
-               buf += sizeof(struct ib_user_mad) + sizeof(struct ib_mad);
-               list_for_each_entry(seg, &packet->seg_list, list) {
-                       seg_payload = min_t(int, len, max_seg_payload);
-                       if (copy_to_user(buf, seg->data, seg_payload)) {
-                               ret = -EFAULT;
-                               goto err;
-                       }
-                       buf += seg_payload;
-                       len -= seg_payload;
-               }
-               ret = packet->length + sizeof (struct ib_user_mad);
-       }
-err:
        if (ret < 0) {
                /* Requeue packet */
                spin_lock_irq(&file->recv_lock);
                list_add(&packet->list, &file->recv_list);
                spin_unlock_irq(&file->recv_lock);
-       } else
-               free_packet(packet);
+       } else {
+               if (packet->recv_wc)
+                       ib_free_recv_mad(packet->recv_wc);
+               kfree(packet);
+       }
        return ret;
 }
 
+static int copy_rmpp_mad(struct ib_mad_send_buf *msg, const char __user *buf)
+{
+       int left, seg;
+
+       /* Copy class specific header */
+       if ((msg->hdr_len > IB_MGMT_RMPP_HDR) &&
+           copy_from_user(msg->mad + IB_MGMT_RMPP_HDR, buf + IB_MGMT_RMPP_HDR,
+                          msg->hdr_len - IB_MGMT_RMPP_HDR))
+               return -EFAULT;
+
+       /* All headers are in place.  Copy data segments. */
+       for (seg = 1, left = msg->data_len, buf += msg->hdr_len; left > 0;
+            seg++, left -= msg->seg_size, buf += msg->seg_size) {
+               if (copy_from_user(ib_get_rmpp_segment(msg, seg), buf,
+                                  min(left, msg->seg_size)))
+                       return -EFAULT;
+       }
+       return 0;
+}
+
 static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
                             size_t count, loff_t *pos)
 {
@@ -399,16 +393,12 @@ static ssize_t ib_umad_write(struct file
        struct ib_rmpp_mad *rmpp_mad;
        u8 method;
        __be64 *tid;
-       int ret, length, hdr_len, copy_offset;
-       int rmpp_active, has_rmpp_header;
-       int s, seg_num;
-       struct ib_rmpp_segment *seg;
+       int ret, data_len, hdr_len, copy_offset, rmpp_active;
 
        if (count < sizeof (struct ib_user_mad) + IB_MGMT_RMPP_HDR)
                return -EINVAL;
 
-       length = count - sizeof (struct ib_user_mad);
-       packet = kmalloc(sizeof *packet + IB_MGMT_RMPP_HDR, GFP_KERNEL);
+       packet = kzalloc(sizeof *packet + IB_MGMT_RMPP_HDR, GFP_KERNEL);
        if (!packet)
                return -ENOMEM;
 
@@ -455,40 +445,25 @@ static ssize_t ib_umad_write(struct file
        if (rmpp_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_ADM) {
                hdr_len = IB_MGMT_SA_HDR;
                copy_offset = IB_MGMT_RMPP_HDR;
-               has_rmpp_header = 1;
+               rmpp_active = ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
+                             IB_MGMT_RMPP_FLAG_ACTIVE;
        } else if (rmpp_mad->mad_hdr.mgmt_class >= 
IB_MGMT_CLASS_VENDOR_RANGE2_START &&
                   rmpp_mad->mad_hdr.mgmt_class <= 
IB_MGMT_CLASS_VENDOR_RANGE2_END) {
-                       hdr_len = IB_MGMT_VENDOR_HDR;
-                       copy_offset = IB_MGMT_RMPP_HDR;
-                       has_rmpp_header = 1;
+               hdr_len = IB_MGMT_VENDOR_HDR;
+               copy_offset = IB_MGMT_RMPP_HDR;
+               rmpp_active = ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
+                             IB_MGMT_RMPP_FLAG_ACTIVE;
        } else {
                hdr_len = IB_MGMT_MAD_HDR;
                copy_offset = IB_MGMT_MAD_HDR;
-               has_rmpp_header = 0;
-       }
-
-       if (has_rmpp_header)
-               rmpp_active = ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
-                             IB_MGMT_RMPP_FLAG_ACTIVE;
-       else
                rmpp_active = 0;
-
-       /* Validate that the management class can support RMPP */
-       if (rmpp_active && !agent->rmpp_version) {
-               ret = -EINVAL;
-               goto err_ah;
-       }
-
-       if (!rmpp_active && length > sizeof(struct ib_mad)) {
-               ret = -EINVAL;
-               goto err_ah;
        }
 
+       data_len = count - sizeof (struct ib_user_mad) - hdr_len;
        packet->msg = ib_create_send_mad(agent,
                                         be32_to_cpu(packet->mad.hdr.qpn),
-                                        0, rmpp_active,
-                                        hdr_len, length - hdr_len,
-                                        GFP_KERNEL);
+                                        0, rmpp_active, hdr_len,
+                                        data_len, GFP_KERNEL);
        if (IS_ERR(packet->msg)) {
                ret = PTR_ERR(packet->msg);
                goto err_ah;
@@ -499,32 +474,21 @@ static ssize_t ib_umad_write(struct file
        packet->msg->retries    = packet->mad.hdr.retries;
        packet->msg->context[0] = packet;
 
-       /* Copy MAD headers (RMPP header in place) */
+       /* Copy MAD header.  Any RMPP header is already in place. */
        memcpy(packet->msg->mad, packet->mad.data, IB_MGMT_MAD_HDR);
-       /* complete copying first 256 bytes of message into send buffer */
-       if (copy_from_user(packet->msg->mad + copy_offset,
-                          buf + sizeof (struct ib_user_mad) + copy_offset,
-                          min_t(int, length, sizeof(struct ib_mad)) - 
copy_offset)) {
-               ret = -EFAULT;
-               goto err_msg;
-       }
+       buf += sizeof (struct ib_user_mad);
 
-       /* if RMPP, copy rest of send message from user to multipacket list */
-       length -= sizeof(struct ib_mad);
-       if (length > 0) {
-               buf +=  sizeof (struct ib_user_mad) + sizeof(struct ib_mad);
-               for (seg_num = 2; length > 0; ++seg_num, buf += s, length -= s) 
{
-                       seg = ib_get_rmpp_segment(packet->msg, seg_num);
-                       BUG_ON(!seg);
-                       s = min_t(int, length, seg->size);
-                       if (copy_from_user(seg->data, buf, s)) {
-                               ret = -EFAULT;
-                               goto err_msg;
-                       }
+       if (!rmpp_active) {
+               if (copy_from_user(packet->msg->mad + copy_offset,
+                                  buf + copy_offset,
+                                  hdr_len + data_len - copy_offset)) {
+                       ret = -EFAULT;
+                       goto err_msg;
                }
-               /* Pad last segment with zeroes. */
-               if (seg->size - s)
-                       memset(seg->data + s, 0, seg->size - s);
+       } else {
+               ret = copy_rmpp_mad(packet->msg, buf);
+               if (ret)
+                       goto err_msg;
        }
 
        /*
@@ -548,18 +512,14 @@ static ssize_t ib_umad_write(struct file
                goto err_msg;
 
        up_read(&file->port->mutex);
-
        return count;
 
 err_msg:
        ib_free_send_mad(packet->msg);
-
 err_ah:
        ib_destroy_ah(ah);
-
 err_up:
        up_read(&file->port->mutex);
-
 err:
        kfree(packet);
        return ret;
Index: include/rdma/ib_mad.h
===================================================================
--- include/rdma/ib_mad.h       (revision 5552)
+++ include/rdma/ib_mad.h       (working copy)
@@ -141,13 +141,6 @@ struct ib_rmpp_hdr {
        __be32  paylen_newwin;
 };
 
-struct ib_rmpp_segment {
-       struct list_head list;
-       u32 num;
-       u16 size;
-       u8 data[0];
-};
-
 typedef u64 __bitwise ib_sa_comp_mask;
 
 #define IB_SA_COMP_MASK(n) ((__force ib_sa_comp_mask) cpu_to_be64(1ull << n))
@@ -215,15 +208,23 @@ struct ib_class_port_info
 /**
  * ib_mad_send_buf - MAD data buffer and work request for sends.
  * @next: A pointer used to chain together MADs for posting.
- * @mad: References an allocated MAD data buffer.
+ * @mad: References an allocated MAD data buffer for MADs that do not have
+ *   RMPP active.  For MADs using RMPP, references the common and management
+ *   class specific headers.
  * @mad_agent: MAD agent that allocated the buffer.
  * @ah: The address handle to use when sending the MAD.
  * @context: User-controlled context fields.
+ * @hdr_len: Indicates the size of the data header of the MAD.  This length
+ *   includes the common MAD, RMPP, and class specific headers.
+ * @data_len: Indicates the total size of user-transferred data.
+ * @seg_count: The number of RMPP segments allocated for this send.
+ * @seg_size: Size of each RMPP segment.
  * @timeout_ms: Time to wait for a response.
  * @retries: Number of times to retry a request for a response.
  *
  * Users are responsible for initializing the MAD buffer itself, with the
- * exception of specifying the payload length field in any RMPP MAD.
+ * exception of any RMPP header.  Additional segment buffer space allocated
+ * beyond data_len is padding.
  */
 struct ib_mad_send_buf {
        struct ib_mad_send_buf  *next;
@@ -231,6 +232,10 @@ struct ib_mad_send_buf {
        struct ib_mad_agent     *mad_agent;
        struct ib_ah            *ah;
        void                    *context[2];
+       int                     hdr_len;
+       int                     data_len;
+       int                     seg_count;
+       int                     seg_size;
        int                     timeout_ms;
        int                     retries;
 };
@@ -586,9 +591,10 @@ int ib_process_mad_wc(struct ib_mad_agen
  * with an initialized work request structure.  Users may modify the returned
  * MAD data buffer before posting the send.
  *
- * The returned data buffer will be cleared.  Users are responsible for
- * initializing the common MAD and any class specific headers.  If @rmpp_active
- * is set, the RMPP header will be initialized for sending.
+ * The returned MAD header, class specific headers, and any padding will be
+ * cleared.  Users are responsible for initializing the common MAD header,
+ * any class specific header, and MAD data area.
+ * If @rmpp_active is set, the RMPP header will be initialized for sending.
  */
 struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
                                            u32 remote_qpn, u16 pkey_index,
@@ -597,14 +603,14 @@ struct ib_mad_send_buf * ib_create_send_
                                            gfp_t gfp_mask);
 
 /**
- * *ib_get_rmpp_segment - returns a given RMPP segment.
+ * ib_get_rmpp_segment - returns the data buffer for a given RMPP segment.
  * @send_buf: Previously allocated send data buffer.
  * @seg_num: number of segment to return
  *
- * This routine returns a pointer to a segment of an RMPP message.
+ * This routine returns a pointer to the data buffer of an RMPP MAD.
+ * Users must provide synchronization to @send_buf around this call.
  */
-struct ib_rmpp_segment *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf,
-                                           int seg_num);
+void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num);
 
 /**
  * ib_free_send_mad - Returns data buffers used to send a MAD.



_______________________________________________
openib-general mailing list
[email protected]
http://openib.org/mailman/listinfo/openib-general

To unsubscribe, please visit http://openib.org/mailman/listinfo/openib-general

Reply via email to