Hello Vlad, Here is the IPoIB-4K MTU patch for OFED-1.3-RC3 release agains 2.6.24 kernel. I create an attachment as well since my email has some problem. Regarding the backport, one line is needed to add for priv-stats vs. dev-stats. I don't have the backport patch, if you could help me that would be nice. If this is any issue, I will ask Nam to help out.
I have touch tested mthca for 2K MTU for the updated patch. More test are going on. thanks Shirley Signed-off-by Shirley Ma <[EMAIL PROTECTED]> --- drivers/infiniband/ulp/ipoib/ipoib.h | 28 +++- drivers/infiniband/ulp/ipoib/ipoib_ib.c | 218 +++++++++++++++++------- drivers/infiniband/ulp/ipoib/ipoib_main.c | 19 ++- drivers/infiniband/ulp/ipoib/ipoib_multicast.c | 3 +- drivers/infiniband/ulp/ipoib/ipoib_verbs.c | 16 ++- 5 files changed, 212 insertions(+), 72 deletions(-) diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index 8eb6aa2..cb3aeab 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h @@ -56,11 +56,11 @@ /* constants */ enum { - IPOIB_PACKET_SIZE = 2048, - IPOIB_BUF_SIZE = IPOIB_PACKET_SIZE + IB_GRH_BYTES, - IPOIB_ENCAP_LEN = 4, + IPOIB_UD_HEAD_SIZE = IB_GRH_BYTES + IPOIB_ENCAP_LEN, + IPOIB_UD_RX_SG = 2, /* for 4K MTU */ + IPOIB_CM_MTU = 0x10000 - 0x10, /* padding to align header to 16 */ IPOIB_CM_BUF_SIZE = IPOIB_CM_MTU + IPOIB_ENCAP_LEN, IPOIB_CM_HEAD_SIZE = IPOIB_CM_BUF_SIZE % PAGE_SIZE, @@ -135,9 +135,9 @@ struct ipoib_mcast { struct net_device *dev; }; -struct ipoib_rx_buf { +struct ipoib_sg_rx_buf { struct sk_buff *skb; - u64 mapping; + u64 mapping[IPOIB_UD_RX_SG]; }; struct ipoib_tx_buf { @@ -286,7 +286,7 @@ struct ipoib_dev_priv { unsigned int admin_mtu; unsigned int mcast_mtu; - struct ipoib_rx_buf *rx_ring; + struct ipoib_sg_rx_buf *rx_ring; spinlock_t tx_lock; struct ipoib_tx_buf *tx_ring; @@ -315,6 +315,9 @@ struct ipoib_dev_priv { struct dentry *mcg_dentry; struct dentry *path_dentry; #endif + int max_ib_mtu; + struct ib_sge rx_sge[IPOIB_UD_RX_SG]; + struct ib_recv_wr rx_wr; }; struct ipoib_ah { @@ -355,6 +358,19 @@ struct ipoib_neigh { struct list_head list; }; +#define IPOIB_UD_MTU(ib_mtu) (ib_mtu - IPOIB_ENCAP_LEN) +#define IPOIB_UD_BUF_SIZE(ib_mtu) (ib_mtu + IB_GRH_BYTES) +static inline int ipoib_ud_need_sg(int ib_mtu) +{ + return (IPOIB_UD_BUF_SIZE(ib_mtu) > PAGE_SIZE) ? 1 : 0; +} +static inline void ipoib_sg_dma_unmap_rx(struct ipoib_dev_priv *priv, + u64 mapping[IPOIB_UD_RX_SG]) +{ + ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_UD_HEAD_SIZE, DMA_FROM_DEVICE); + ib_dma_unmap_single(priv->ca, mapping[1], PAGE_SIZE, DMA_FROM_DEVICE); +} + /* * We stash a pointer to our private neighbour information after our * hardware address in neigh->ha. The ALIGN() expression here makes diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index 5063dd5..6c9eefe 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c @@ -87,32 +87,93 @@ void ipoib_free_ah(struct kref *kref) spin_unlock_irqrestore(&priv->lock, flags); } +/* Adjust length of skb with fragments to match received data */ +static void ipoib_ud_skb_put_frags(struct sk_buff *skb, unsigned int length, + struct sk_buff *toskb) +{ + unsigned int size; + skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; + + /* put header into skb */ + size = min(length, (unsigned)IPOIB_UD_HEAD_SIZE); + skb->tail += size; + skb->len += size; + length -= size; + + if (length == 0) { + /* don't need this page */ + skb_fill_page_desc(toskb, 0, frag->page, 0, PAGE_SIZE); + --skb_shinfo(skb)->nr_frags; + } else { + size = min(length, (unsigned) PAGE_SIZE); + frag->size = size; + skb->data_len += size; + skb->truesize += size; + skb->len += size; + length -= size; + } +} + +static struct sk_buff *ipoib_sg_alloc_rx_skb(struct net_device *dev, + int id, u64 mapping[IPOIB_UD_RX_SG]) +{ + struct ipoib_dev_priv *priv = netdev_priv(dev); + struct page *page; + struct sk_buff *skb; + + skb = dev_alloc_skb(IPOIB_UD_HEAD_SIZE); + + if (unlikely(!skb)) + return NULL; + + mapping[0] = ib_dma_map_single(priv->ca, skb->data, IPOIB_UD_HEAD_SIZE, + DMA_FROM_DEVICE); + if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0]))) { + dev_kfree_skb_any(skb); + return NULL; + } + + page = alloc_page(GFP_ATOMIC); + if (!page) + goto partial_error; + + skb_fill_page_desc(skb, 0, page, 0, PAGE_SIZE); + mapping[1] = ib_dma_map_page(priv->ca, skb_shinfo(skb)->frags[0].page, + 0, PAGE_SIZE, DMA_FROM_DEVICE); + if (unlikely(ib_dma_mapping_error(priv->ca, mapping[1]))) + goto partial_error; + + priv->rx_ring[id].skb = skb; + return skb; + +partial_error: + ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_UD_HEAD_SIZE, DMA_FROM_DEVICE); + dev_kfree_skb_any(skb); + return NULL; +} + static int ipoib_ib_post_receive(struct net_device *dev, int id) { struct ipoib_dev_priv *priv = netdev_priv(dev); - struct ib_sge list; - struct ib_recv_wr param; struct ib_recv_wr *bad_wr; int ret; - list.addr = priv->rx_ring[id].mapping; - list.length = IPOIB_BUF_SIZE; - list.lkey = priv->mr->lkey; + priv->rx_wr.wr_id = id | IPOIB_OP_RECV; + priv->rx_sge[0].addr = priv->rx_ring[id].mapping[0]; + priv->rx_sge[1].addr = priv->rx_ring[id].mapping[1]; - param.next = NULL; - param.wr_id = id | IPOIB_OP_RECV; - param.sg_list = &list; - param.num_sge = 1; - - ret = ib_post_recv(priv->qp, ¶m, &bad_wr); + ret = ib_post_recv(priv->qp, &priv->rx_wr, &bad_wr); if (unlikely(ret)) { + if (ipoib_ud_need_sg(priv->max_ib_mtu)) + ipoib_sg_dma_unmap_rx(priv, priv->rx_ring[id].mapping); + else + ib_dma_unmap_single(priv->ca, priv->rx_ring[id].mapping[0], + IPOIB_UD_BUF_SIZE(priv->max_ib_mtu), + DMA_FROM_DEVICE); ipoib_warn(priv, "receive failed for buf %d (%d)\n", id, ret); - ib_dma_unmap_single(priv->ca, priv->rx_ring[id].mapping, - IPOIB_BUF_SIZE, DMA_FROM_DEVICE); dev_kfree_skb_any(priv->rx_ring[id].skb); priv->rx_ring[id].skb = NULL; } - return ret; } @@ -122,7 +183,7 @@ static int ipoib_alloc_rx_skb(struct net_device *dev, int id) struct sk_buff *skb; u64 addr; - skb = dev_alloc_skb(IPOIB_BUF_SIZE + 4); + skb = dev_alloc_skb(IPOIB_UD_BUF_SIZE(priv->max_ib_mtu) + 4); if (!skb) return -ENOMEM; @@ -133,7 +194,8 @@ static int ipoib_alloc_rx_skb(struct net_device *dev, int id) */ skb_reserve(skb, 4); - addr = ib_dma_map_single(priv->ca, skb->data, IPOIB_BUF_SIZE, + addr = ib_dma_map_single(priv->ca, skb->data, + IPOIB_UD_BUF_SIZE(priv->max_ib_mtu), DMA_FROM_DEVICE); if (unlikely(ib_dma_mapping_error(priv->ca, addr))) { dev_kfree_skb_any(skb); @@ -141,7 +203,7 @@ static int ipoib_alloc_rx_skb(struct net_device *dev, int id) } priv->rx_ring[id].skb = skb; - priv->rx_ring[id].mapping = addr; + priv->rx_ring[id].mapping[0] = addr; return 0; } @@ -149,10 +211,15 @@ static int ipoib_alloc_rx_skb(struct net_device *dev, int id) static int ipoib_ib_post_receives(struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev); - int i; + int i, ret; for (i = 0; i < ipoib_recvq_size; ++i) { - if (ipoib_alloc_rx_skb(dev, i)) { + if (ipoib_ud_need_sg(priv->max_ib_mtu)) + ret = !(ipoib_sg_alloc_rx_skb(dev, i, + priv->rx_ring[i].mapping)); + else + ret = ipoib_alloc_rx_skb(dev, i); + if (ret) { ipoib_warn(priv, "failed to allocate receive buffer %d\n", i); return -ENOMEM; } @@ -170,7 +237,7 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) struct ipoib_dev_priv *priv = netdev_priv(dev); unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV; struct sk_buff *skb; - u64 addr; + u64 mapping[IPOIB_UD_RX_SG]; ipoib_dbg_data(priv, "recv completion: id %d, status: %d\n", wr_id, wc->status); @@ -182,42 +249,73 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) } skb = priv->rx_ring[wr_id].skb; - addr = priv->rx_ring[wr_id].mapping; + /* duplicate the code to save fast path condition check times */ + if (ipoib_ud_need_sg(priv->max_ib_mtu)) { + struct sk_buff *newskb; + if (unlikely(wc->status != IB_WC_SUCCESS)) { + if (wc->status != IB_WC_WR_FLUSH_ERR) + ipoib_warn(priv, "failed recv event " + "(status=%d, wrid=%d vend_err %x)\n", + wc->status, wr_id, wc->vendor_err); + ipoib_sg_dma_unmap_rx(priv, priv->rx_ring[wr_id].mapping); + dev_kfree_skb_any(skb); + priv->rx_ring[wr_id].skb = NULL; + return; + } + /* + * Drop packets that this interface sent, ie multicast packets + * that the HCA has replicated. + */ + if (wc->slid == priv->local_lid && wc->src_qp == priv->qp->qp_num) + goto repost; + newskb = ipoib_sg_alloc_rx_skb(dev, wr_id, mapping); + if (unlikely(!newskb)) { + ++dev->stats.rx_dropped; + goto repost; + } + ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n", + wc->byte_len, wc->slid); + ipoib_sg_dma_unmap_rx(priv, priv->rx_ring[wr_id].mapping); + memcpy(priv->rx_ring[wr_id].mapping, mapping, + IPOIB_UD_RX_SG * sizeof *mapping); + ipoib_ud_skb_put_frags(skb, wc->byte_len, newskb); + } else { + u64 addr = priv->rx_ring[wr_id].mapping[0]; + if (unlikely(wc->status != IB_WC_SUCCESS)) { + if (wc->status != IB_WC_WR_FLUSH_ERR) + ipoib_warn(priv, "failed recv event " + "(status=%d, wrid=%d vend_err %x)\n", + wc->status, wr_id, wc->vendor_err); + ib_dma_unmap_single(priv->ca, addr, + IPOIB_UD_BUF_SIZE(priv->max_ib_mtu), + DMA_FROM_DEVICE); + dev_kfree_skb_any(skb); + priv->rx_ring[wr_id].skb = NULL; + return; + } + /* + * Drop packets that this interface sent, ie multicast packets + * that the HCA has replicated. + */ + if (wc->slid == priv->local_lid && wc->src_qp == priv->qp->qp_num) + goto repost; + + /* + * If we can't allocate a new RX buffer, dump + * this packet and reuse the old buffer. + */ + if (unlikely(ipoib_alloc_rx_skb(dev, wr_id))) { + ++dev->stats.rx_dropped; + goto repost; + } - if (unlikely(wc->status != IB_WC_SUCCESS)) { - if (wc->status != IB_WC_WR_FLUSH_ERR) - ipoib_warn(priv, "failed recv event " - "(status=%d, wrid=%d vend_err %x)\n", - wc->status, wr_id, wc->vendor_err); + ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n", + wc->byte_len, wc->slid); ib_dma_unmap_single(priv->ca, addr, - IPOIB_BUF_SIZE, DMA_FROM_DEVICE); - dev_kfree_skb_any(skb); - priv->rx_ring[wr_id].skb = NULL; - return; + IPOIB_UD_BUF_SIZE(priv->max_ib_mtu), + DMA_FROM_DEVICE); + skb_put(skb, wc->byte_len); } - - /* - * Drop packets that this interface sent, ie multicast packets - * that the HCA has replicated. - */ - if (wc->slid == priv->local_lid && wc->src_qp == priv->qp->qp_num) - goto repost; - - /* - * If we can't allocate a new RX buffer, dump - * this packet and reuse the old buffer. - */ - if (unlikely(ipoib_alloc_rx_skb(dev, wr_id))) { - ++dev->stats.rx_dropped; - goto repost; - } - - ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n", - wc->byte_len, wc->slid); - - ib_dma_unmap_single(priv->ca, addr, IPOIB_BUF_SIZE, DMA_FROM_DEVICE); - - skb_put(skb, wc->byte_len); skb_pull(skb, IB_GRH_BYTES); skb->protocol = ((struct ipoib_header *) skb->data)->proto; @@ -625,15 +723,19 @@ int ipoib_ib_dev_stop(struct net_device *dev, int flush) } for (i = 0; i < ipoib_recvq_size; ++i) { - struct ipoib_rx_buf *rx_req; + struct ipoib_sg_rx_buf *rx_req; rx_req = &priv->rx_ring[i]; if (!rx_req->skb) continue; - ib_dma_unmap_single(priv->ca, - rx_req->mapping, - IPOIB_BUF_SIZE, - DMA_FROM_DEVICE); + if (ipoib_ud_need_sg(priv->max_ib_mtu)) + ipoib_sg_dma_unmap_rx(priv, + priv->rx_ring[i].mapping); + else + ib_dma_unmap_single(priv->ca, + rx_req->mapping[0], + IPOIB_UD_BUF_SIZE(priv->max_ib_mtu), + DMA_FROM_DEVICE); dev_kfree_skb_any(rx_req->skb); rx_req->skb = NULL; } diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 46b67f8..9f18a9e 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -193,7 +193,7 @@ static int ipoib_change_mtu(struct net_device *dev, int new_mtu) return 0; } - if (new_mtu > IPOIB_PACKET_SIZE - IPOIB_ENCAP_LEN) + if (new_mtu > IPOIB_UD_MTU(priv->max_ib_mtu)) return -EINVAL; priv->admin_mtu = new_mtu; @@ -977,10 +977,6 @@ static void ipoib_setup(struct net_device *dev) dev->tx_queue_len = ipoib_sendq_size * 2; dev->features = NETIF_F_VLAN_CHALLENGED | NETIF_F_LLTX; - /* MTU will be reset when mcast join happens */ - dev->mtu = IPOIB_PACKET_SIZE - IPOIB_ENCAP_LEN; - priv->mcast_mtu = priv->admin_mtu = dev->mtu; - memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN); netif_carrier_off(dev); @@ -1112,6 +1108,7 @@ static struct net_device *ipoib_add_port(const char *format, struct ib_device *hca, u8 port) { struct ipoib_dev_priv *priv; + struct ib_port_attr attr; int result = -ENOMEM; priv = ipoib_intf_alloc(format); @@ -1120,6 +1117,18 @@ static struct net_device *ipoib_add_port(const char *format, SET_NETDEV_DEV(priv->dev, hca->dma_device); + if (!ib_query_port(hca, port, &attr)) + priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu); + else { + printk(KERN_WARNING "%s: ib_query_port %d failed\n", + hca->name, port); + goto device_init_failed; + } + + /* MTU will be reset when mcast join happens */ + priv->dev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu); + priv->mcast_mtu = priv->admin_mtu = priv->dev->mtu; + result = ib_query_pkey(hca, port, 0, &priv->pkey); if (result) { printk(KERN_WARNING "%s: ib_query_pkey port %d failed (ret = %d)\n", diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index 9bcfc7a..19b1895 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c @@ -567,8 +567,7 @@ void ipoib_mcast_join_task(struct work_struct *work) return; } - priv->mcast_mtu = ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu) - - IPOIB_ENCAP_LEN; + priv->mcast_mtu = IPOIB_UD_MTU(ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu)); if (!ipoib_cm_admin_enabled(dev)) dev->mtu = min(priv->mcast_mtu, priv->admin_mtu); diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c index 3c6e45d..5654ebc 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c @@ -150,7 +150,7 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca) .max_send_wr = ipoib_sendq_size, .max_recv_wr = ipoib_recvq_size, .max_send_sge = 1, - .max_recv_sge = 1 + .max_recv_sge = IPOIB_UD_RX_SG }, .sq_sig_type = IB_SIGNAL_ALL_WR, .qp_type = IB_QPT_UD @@ -204,6 +204,20 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca) priv->tx_wr.num_sge = 1; priv->tx_wr.send_flags = IB_SEND_SIGNALED; + priv->rx_sge[0].lkey = priv->mr->lkey; + priv->rx_sge[1].lkey = priv->mr->lkey; + if (ipoib_ud_need_sg(priv->max_ib_mtu)) { + priv->rx_sge[0].length = IPOIB_UD_HEAD_SIZE; + priv->rx_sge[1].length = PAGE_SIZE; + priv->rx_wr.num_sge = IPOIB_UD_RX_SG; + } else { + priv->rx_sge[0].length = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu); + priv->rx_sge[1].length = 0; + priv->rx_wr.num_sge = 1; + } + priv->rx_wr.next = NULL; + priv->rx_wr.sg_list = priv->rx_sge; + return 0; out_free_cq:
drivers/infiniband/ulp/ipoib/ipoib.h | 28 +++- drivers/infiniband/ulp/ipoib/ipoib_ib.c | 218 +++++++++++++++++------- drivers/infiniband/ulp/ipoib/ipoib_main.c | 19 ++- drivers/infiniband/ulp/ipoib/ipoib_multicast.c | 3 +- drivers/infiniband/ulp/ipoib/ipoib_verbs.c | 16 ++- 5 files changed, 212 insertions(+), 72 deletions(-) diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index 8eb6aa2..cb3aeab 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h @@ -56,11 +56,11 @@ /* constants */ enum { - IPOIB_PACKET_SIZE = 2048, - IPOIB_BUF_SIZE = IPOIB_PACKET_SIZE + IB_GRH_BYTES, - IPOIB_ENCAP_LEN = 4, + IPOIB_UD_HEAD_SIZE = IB_GRH_BYTES + IPOIB_ENCAP_LEN, + IPOIB_UD_RX_SG = 2, /* for 4K MTU */ + IPOIB_CM_MTU = 0x10000 - 0x10, /* padding to align header to 16 */ IPOIB_CM_BUF_SIZE = IPOIB_CM_MTU + IPOIB_ENCAP_LEN, IPOIB_CM_HEAD_SIZE = IPOIB_CM_BUF_SIZE % PAGE_SIZE, @@ -135,9 +135,9 @@ struct ipoib_mcast { struct net_device *dev; }; -struct ipoib_rx_buf { +struct ipoib_sg_rx_buf { struct sk_buff *skb; - u64 mapping; + u64 mapping[IPOIB_UD_RX_SG]; }; struct ipoib_tx_buf { @@ -286,7 +286,7 @@ struct ipoib_dev_priv { unsigned int admin_mtu; unsigned int mcast_mtu; - struct ipoib_rx_buf *rx_ring; + struct ipoib_sg_rx_buf *rx_ring; spinlock_t tx_lock; struct ipoib_tx_buf *tx_ring; @@ -315,6 +315,9 @@ struct ipoib_dev_priv { struct dentry *mcg_dentry; struct dentry *path_dentry; #endif + int max_ib_mtu; + struct ib_sge rx_sge[IPOIB_UD_RX_SG]; + struct ib_recv_wr rx_wr; }; struct ipoib_ah { @@ -355,6 +358,19 @@ struct ipoib_neigh { struct list_head list; }; +#define IPOIB_UD_MTU(ib_mtu) (ib_mtu - IPOIB_ENCAP_LEN) +#define IPOIB_UD_BUF_SIZE(ib_mtu) (ib_mtu + IB_GRH_BYTES) +static inline int ipoib_ud_need_sg(int ib_mtu) +{ + return (IPOIB_UD_BUF_SIZE(ib_mtu) > PAGE_SIZE) ? 1 : 0; +} +static inline void ipoib_sg_dma_unmap_rx(struct ipoib_dev_priv *priv, + u64 mapping[IPOIB_UD_RX_SG]) +{ + ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_UD_HEAD_SIZE, DMA_FROM_DEVICE); + ib_dma_unmap_single(priv->ca, mapping[1], PAGE_SIZE, DMA_FROM_DEVICE); +} + /* * We stash a pointer to our private neighbour information after our * hardware address in neigh->ha. The ALIGN() expression here makes diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index 5063dd5..6c9eefe 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c @@ -87,32 +87,93 @@ void ipoib_free_ah(struct kref *kref) spin_unlock_irqrestore(&priv->lock, flags); } +/* Adjust length of skb with fragments to match received data */ +static void ipoib_ud_skb_put_frags(struct sk_buff *skb, unsigned int length, + struct sk_buff *toskb) +{ + unsigned int size; + skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; + + /* put header into skb */ + size = min(length, (unsigned)IPOIB_UD_HEAD_SIZE); + skb->tail += size; + skb->len += size; + length -= size; + + if (length == 0) { + /* don't need this page */ + skb_fill_page_desc(toskb, 0, frag->page, 0, PAGE_SIZE); + --skb_shinfo(skb)->nr_frags; + } else { + size = min(length, (unsigned) PAGE_SIZE); + frag->size = size; + skb->data_len += size; + skb->truesize += size; + skb->len += size; + length -= size; + } +} + +static struct sk_buff *ipoib_sg_alloc_rx_skb(struct net_device *dev, + int id, u64 mapping[IPOIB_UD_RX_SG]) +{ + struct ipoib_dev_priv *priv = netdev_priv(dev); + struct page *page; + struct sk_buff *skb; + + skb = dev_alloc_skb(IPOIB_UD_HEAD_SIZE); + + if (unlikely(!skb)) + return NULL; + + mapping[0] = ib_dma_map_single(priv->ca, skb->data, IPOIB_UD_HEAD_SIZE, + DMA_FROM_DEVICE); + if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0]))) { + dev_kfree_skb_any(skb); + return NULL; + } + + page = alloc_page(GFP_ATOMIC); + if (!page) + goto partial_error; + + skb_fill_page_desc(skb, 0, page, 0, PAGE_SIZE); + mapping[1] = ib_dma_map_page(priv->ca, skb_shinfo(skb)->frags[0].page, + 0, PAGE_SIZE, DMA_FROM_DEVICE); + if (unlikely(ib_dma_mapping_error(priv->ca, mapping[1]))) + goto partial_error; + + priv->rx_ring[id].skb = skb; + return skb; + +partial_error: + ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_UD_HEAD_SIZE, DMA_FROM_DEVICE); + dev_kfree_skb_any(skb); + return NULL; +} + static int ipoib_ib_post_receive(struct net_device *dev, int id) { struct ipoib_dev_priv *priv = netdev_priv(dev); - struct ib_sge list; - struct ib_recv_wr param; struct ib_recv_wr *bad_wr; int ret; - list.addr = priv->rx_ring[id].mapping; - list.length = IPOIB_BUF_SIZE; - list.lkey = priv->mr->lkey; + priv->rx_wr.wr_id = id | IPOIB_OP_RECV; + priv->rx_sge[0].addr = priv->rx_ring[id].mapping[0]; + priv->rx_sge[1].addr = priv->rx_ring[id].mapping[1]; - param.next = NULL; - param.wr_id = id | IPOIB_OP_RECV; - param.sg_list = &list; - param.num_sge = 1; - - ret = ib_post_recv(priv->qp, ¶m, &bad_wr); + ret = ib_post_recv(priv->qp, &priv->rx_wr, &bad_wr); if (unlikely(ret)) { + if (ipoib_ud_need_sg(priv->max_ib_mtu)) + ipoib_sg_dma_unmap_rx(priv, priv->rx_ring[id].mapping); + else + ib_dma_unmap_single(priv->ca, priv->rx_ring[id].mapping[0], + IPOIB_UD_BUF_SIZE(priv->max_ib_mtu), + DMA_FROM_DEVICE); ipoib_warn(priv, "receive failed for buf %d (%d)\n", id, ret); - ib_dma_unmap_single(priv->ca, priv->rx_ring[id].mapping, - IPOIB_BUF_SIZE, DMA_FROM_DEVICE); dev_kfree_skb_any(priv->rx_ring[id].skb); priv->rx_ring[id].skb = NULL; } - return ret; } @@ -122,7 +183,7 @@ static int ipoib_alloc_rx_skb(struct net_device *dev, int id) struct sk_buff *skb; u64 addr; - skb = dev_alloc_skb(IPOIB_BUF_SIZE + 4); + skb = dev_alloc_skb(IPOIB_UD_BUF_SIZE(priv->max_ib_mtu) + 4); if (!skb) return -ENOMEM; @@ -133,7 +194,8 @@ static int ipoib_alloc_rx_skb(struct net_device *dev, int id) */ skb_reserve(skb, 4); - addr = ib_dma_map_single(priv->ca, skb->data, IPOIB_BUF_SIZE, + addr = ib_dma_map_single(priv->ca, skb->data, + IPOIB_UD_BUF_SIZE(priv->max_ib_mtu), DMA_FROM_DEVICE); if (unlikely(ib_dma_mapping_error(priv->ca, addr))) { dev_kfree_skb_any(skb); @@ -141,7 +203,7 @@ static int ipoib_alloc_rx_skb(struct net_device *dev, int id) } priv->rx_ring[id].skb = skb; - priv->rx_ring[id].mapping = addr; + priv->rx_ring[id].mapping[0] = addr; return 0; } @@ -149,10 +211,15 @@ static int ipoib_alloc_rx_skb(struct net_device *dev, int id) static int ipoib_ib_post_receives(struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev); - int i; + int i, ret; for (i = 0; i < ipoib_recvq_size; ++i) { - if (ipoib_alloc_rx_skb(dev, i)) { + if (ipoib_ud_need_sg(priv->max_ib_mtu)) + ret = !(ipoib_sg_alloc_rx_skb(dev, i, + priv->rx_ring[i].mapping)); + else + ret = ipoib_alloc_rx_skb(dev, i); + if (ret) { ipoib_warn(priv, "failed to allocate receive buffer %d\n", i); return -ENOMEM; } @@ -170,7 +237,7 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) struct ipoib_dev_priv *priv = netdev_priv(dev); unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV; struct sk_buff *skb; - u64 addr; + u64 mapping[IPOIB_UD_RX_SG]; ipoib_dbg_data(priv, "recv completion: id %d, status: %d\n", wr_id, wc->status); @@ -182,42 +249,73 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) } skb = priv->rx_ring[wr_id].skb; - addr = priv->rx_ring[wr_id].mapping; + /* duplicate the code to save fast path condition check times */ + if (ipoib_ud_need_sg(priv->max_ib_mtu)) { + struct sk_buff *newskb; + if (unlikely(wc->status != IB_WC_SUCCESS)) { + if (wc->status != IB_WC_WR_FLUSH_ERR) + ipoib_warn(priv, "failed recv event " + "(status=%d, wrid=%d vend_err %x)\n", + wc->status, wr_id, wc->vendor_err); + ipoib_sg_dma_unmap_rx(priv, priv->rx_ring[wr_id].mapping); + dev_kfree_skb_any(skb); + priv->rx_ring[wr_id].skb = NULL; + return; + } + /* + * Drop packets that this interface sent, ie multicast packets + * that the HCA has replicated. + */ + if (wc->slid == priv->local_lid && wc->src_qp == priv->qp->qp_num) + goto repost; + newskb = ipoib_sg_alloc_rx_skb(dev, wr_id, mapping); + if (unlikely(!newskb)) { + ++dev->stats.rx_dropped; + goto repost; + } + ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n", + wc->byte_len, wc->slid); + ipoib_sg_dma_unmap_rx(priv, priv->rx_ring[wr_id].mapping); + memcpy(priv->rx_ring[wr_id].mapping, mapping, + IPOIB_UD_RX_SG * sizeof *mapping); + ipoib_ud_skb_put_frags(skb, wc->byte_len, newskb); + } else { + u64 addr = priv->rx_ring[wr_id].mapping[0]; + if (unlikely(wc->status != IB_WC_SUCCESS)) { + if (wc->status != IB_WC_WR_FLUSH_ERR) + ipoib_warn(priv, "failed recv event " + "(status=%d, wrid=%d vend_err %x)\n", + wc->status, wr_id, wc->vendor_err); + ib_dma_unmap_single(priv->ca, addr, + IPOIB_UD_BUF_SIZE(priv->max_ib_mtu), + DMA_FROM_DEVICE); + dev_kfree_skb_any(skb); + priv->rx_ring[wr_id].skb = NULL; + return; + } + /* + * Drop packets that this interface sent, ie multicast packets + * that the HCA has replicated. + */ + if (wc->slid == priv->local_lid && wc->src_qp == priv->qp->qp_num) + goto repost; + + /* + * If we can't allocate a new RX buffer, dump + * this packet and reuse the old buffer. + */ + if (unlikely(ipoib_alloc_rx_skb(dev, wr_id))) { + ++dev->stats.rx_dropped; + goto repost; + } - if (unlikely(wc->status != IB_WC_SUCCESS)) { - if (wc->status != IB_WC_WR_FLUSH_ERR) - ipoib_warn(priv, "failed recv event " - "(status=%d, wrid=%d vend_err %x)\n", - wc->status, wr_id, wc->vendor_err); + ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n", + wc->byte_len, wc->slid); ib_dma_unmap_single(priv->ca, addr, - IPOIB_BUF_SIZE, DMA_FROM_DEVICE); - dev_kfree_skb_any(skb); - priv->rx_ring[wr_id].skb = NULL; - return; + IPOIB_UD_BUF_SIZE(priv->max_ib_mtu), + DMA_FROM_DEVICE); + skb_put(skb, wc->byte_len); } - - /* - * Drop packets that this interface sent, ie multicast packets - * that the HCA has replicated. - */ - if (wc->slid == priv->local_lid && wc->src_qp == priv->qp->qp_num) - goto repost; - - /* - * If we can't allocate a new RX buffer, dump - * this packet and reuse the old buffer. - */ - if (unlikely(ipoib_alloc_rx_skb(dev, wr_id))) { - ++dev->stats.rx_dropped; - goto repost; - } - - ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n", - wc->byte_len, wc->slid); - - ib_dma_unmap_single(priv->ca, addr, IPOIB_BUF_SIZE, DMA_FROM_DEVICE); - - skb_put(skb, wc->byte_len); skb_pull(skb, IB_GRH_BYTES); skb->protocol = ((struct ipoib_header *) skb->data)->proto; @@ -625,15 +723,19 @@ int ipoib_ib_dev_stop(struct net_device *dev, int flush) } for (i = 0; i < ipoib_recvq_size; ++i) { - struct ipoib_rx_buf *rx_req; + struct ipoib_sg_rx_buf *rx_req; rx_req = &priv->rx_ring[i]; if (!rx_req->skb) continue; - ib_dma_unmap_single(priv->ca, - rx_req->mapping, - IPOIB_BUF_SIZE, - DMA_FROM_DEVICE); + if (ipoib_ud_need_sg(priv->max_ib_mtu)) + ipoib_sg_dma_unmap_rx(priv, + priv->rx_ring[i].mapping); + else + ib_dma_unmap_single(priv->ca, + rx_req->mapping[0], + IPOIB_UD_BUF_SIZE(priv->max_ib_mtu), + DMA_FROM_DEVICE); dev_kfree_skb_any(rx_req->skb); rx_req->skb = NULL; } diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 46b67f8..9f18a9e 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -193,7 +193,7 @@ static int ipoib_change_mtu(struct net_device *dev, int new_mtu) return 0; } - if (new_mtu > IPOIB_PACKET_SIZE - IPOIB_ENCAP_LEN) + if (new_mtu > IPOIB_UD_MTU(priv->max_ib_mtu)) return -EINVAL; priv->admin_mtu = new_mtu; @@ -977,10 +977,6 @@ static void ipoib_setup(struct net_device *dev) dev->tx_queue_len = ipoib_sendq_size * 2; dev->features = NETIF_F_VLAN_CHALLENGED | NETIF_F_LLTX; - /* MTU will be reset when mcast join happens */ - dev->mtu = IPOIB_PACKET_SIZE - IPOIB_ENCAP_LEN; - priv->mcast_mtu = priv->admin_mtu = dev->mtu; - memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN); netif_carrier_off(dev); @@ -1112,6 +1108,7 @@ static struct net_device *ipoib_add_port(const char *format, struct ib_device *hca, u8 port) { struct ipoib_dev_priv *priv; + struct ib_port_attr attr; int result = -ENOMEM; priv = ipoib_intf_alloc(format); @@ -1120,6 +1117,18 @@ static struct net_device *ipoib_add_port(const char *format, SET_NETDEV_DEV(priv->dev, hca->dma_device); + if (!ib_query_port(hca, port, &attr)) + priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu); + else { + printk(KERN_WARNING "%s: ib_query_port %d failed\n", + hca->name, port); + goto device_init_failed; + } + + /* MTU will be reset when mcast join happens */ + priv->dev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu); + priv->mcast_mtu = priv->admin_mtu = priv->dev->mtu; + result = ib_query_pkey(hca, port, 0, &priv->pkey); if (result) { printk(KERN_WARNING "%s: ib_query_pkey port %d failed (ret = %d)\n", diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index 9bcfc7a..19b1895 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c @@ -567,8 +567,7 @@ void ipoib_mcast_join_task(struct work_struct *work) return; } - priv->mcast_mtu = ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu) - - IPOIB_ENCAP_LEN; + priv->mcast_mtu = IPOIB_UD_MTU(ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu)); if (!ipoib_cm_admin_enabled(dev)) dev->mtu = min(priv->mcast_mtu, priv->admin_mtu); diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c index 3c6e45d..5654ebc 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c @@ -150,7 +150,7 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca) .max_send_wr = ipoib_sendq_size, .max_recv_wr = ipoib_recvq_size, .max_send_sge = 1, - .max_recv_sge = 1 + .max_recv_sge = IPOIB_UD_RX_SG }, .sq_sig_type = IB_SIGNAL_ALL_WR, .qp_type = IB_QPT_UD @@ -204,6 +204,20 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca) priv->tx_wr.num_sge = 1; priv->tx_wr.send_flags = IB_SEND_SIGNALED; + priv->rx_sge[0].lkey = priv->mr->lkey; + priv->rx_sge[1].lkey = priv->mr->lkey; + if (ipoib_ud_need_sg(priv->max_ib_mtu)) { + priv->rx_sge[0].length = IPOIB_UD_HEAD_SIZE; + priv->rx_sge[1].length = PAGE_SIZE; + priv->rx_wr.num_sge = IPOIB_UD_RX_SG; + } else { + priv->rx_sge[0].length = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu); + priv->rx_sge[1].length = 0; + priv->rx_wr.num_sge = 1; + } + priv->rx_wr.next = NULL; + priv->rx_wr.sg_list = priv->rx_sge; + return 0; out_free_cq:
_______________________________________________ ewg mailing list ewg@lists.openfabrics.org http://lists.openfabrics.org/cgi-bin/mailman/listinfo/ewg