When DMA mapping for RX fails because of the limitation, retry the allocation
in ZONE_DMA. When the network stack passes us TX buffers that cannot be mapped
because of the limitation, allocate a bounce buffer in ZONE_DMA and copy the
packet there.

Signed-off-by: Will Dyson <[EMAIL PROTECTED]>
---
 .../net/wireless/mac80211/bcm43xx/bcm43xx_dma.c    |   71 ++++++++++++++++++-
 1 files changed, 67 insertions(+), 4 deletions(-)

diff --git a/drivers/net/wireless/mac80211/bcm43xx/bcm43xx_dma.c 
b/drivers/net/wireless/mac80211/bcm43xx/bcm43xx_dma.c
index d93e219..51a2def 100644
--- a/drivers/net/wireless/mac80211/bcm43xx/bcm43xx_dma.c
+++ b/drivers/net/wireless/mac80211/bcm43xx/bcm43xx_dma.c
@@ -525,8 +525,23 @@ static int setup_rx_descbuffer(struct bcm43xx_dmaring 
*ring,
                return -ENOMEM;
        dmaaddr = map_descbuffer(ring, skb->data,
                                 ring->rx_buffersize, 0);
-       if (dma_mapping_error(dmaaddr))
+       if (dma_mapping_error(dmaaddr)) {
+               /* ugh. try to realloc in zone_dma */
+               gfp_flags |= GFP_DMA;
+
+               dev_kfree_skb_any(skb);
+
+               skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
+               if (unlikely(!skb))
+                       return -ENOMEM;
+               dmaaddr = map_descbuffer(ring, skb->data,
+                                        ring->rx_buffersize, 0);
+       }
+
+       if (dma_mapping_error(dmaaddr)) {
+               dev_kfree_skb_any(skb);
                return -EIO;
+       }
 
        meta->skb = skb;
        meta->dmaaddr = dmaaddr;
@@ -731,6 +746,7 @@ struct bcm43xx_dmaring * bcm43xx_setup_dmaring(struct 
bcm43xx_wldev *dev,
        struct bcm43xx_dmaring *ring;
        int err;
        int nr_slots;
+       dma_addr_t dma_test;
 
        ring = kzalloc(sizeof(*ring), GFP_KERNEL);
        if (!ring)
@@ -750,6 +766,32 @@ struct bcm43xx_dmaring * bcm43xx_setup_dmaring(struct 
bcm43xx_wldev *dev,
                                            GFP_KERNEL);
                if (!ring->txhdr_cache)
                        goto err_kfree_meta;
+
+               /* test for ability to dma to txhdr_cache */
+               dma_test = dma_map_single(dev->dev->dev,
+                               ring->txhdr_cache, sizeof(struct 
bcm43xx_txhdr_fw4),
+                               DMA_TO_DEVICE);
+
+               if (dma_mapping_error(dma_test)) {
+                       /* ugh realloc */
+                       kfree(ring->txhdr_cache);
+                       ring->txhdr_cache = kcalloc(nr_slots,
+                                                       sizeof(struct 
bcm43xx_txhdr_fw4),
+                                                       GFP_KERNEL | GFP_DMA);
+                       if (!ring->txhdr_cache)
+                               goto err_kfree_meta;
+
+                       dma_test = dma_map_single(dev->dev->dev,
+                                       ring->txhdr_cache, sizeof(struct 
bcm43xx_txhdr_fw4),
+                                       DMA_TO_DEVICE);
+
+                       if (dma_mapping_error(dma_test))
+                               goto err_kfree_txhdr_cache;
+               }
+
+               dma_unmap_single(dev->dev->dev,
+                               dma_test, sizeof(struct bcm43xx_txhdr_fw4),
+                               DMA_TO_DEVICE);
        }
 
        ring->dev = dev;
@@ -1030,9 +1072,11 @@ static int dma_tx_fragment(struct bcm43xx_dmaring *ring,
        const struct bcm43xx_dma_ops *ops = ring->ops;
        u8 *header;
        int slot;
+       int err;
        struct bcm43xx_dmadesc_generic *desc;
        struct bcm43xx_dmadesc_meta *meta;
        struct bcm43xx_dmadesc_meta *meta_hdr;
+       struct sk_buff *bounce_skb;
 
 #define SLOTS_PER_PACKET  2
        assert(skb_shinfo(skb)->nr_frags == 0);
@@ -1062,9 +1106,26 @@ static int dma_tx_fragment(struct bcm43xx_dmaring *ring,
        memcpy(&meta->txstat.control, ctl, sizeof(*ctl));
        meta->skb = skb;
        meta->is_last_fragment = 1;
+
        meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
-       if (dma_mapping_error(meta->dmaaddr))
-               goto out_unmap_hdr;
+       /* create a bounce buffer in zone_dma on mapping failure. */
+       if (dma_mapping_error(meta->dmaaddr)) {
+               bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
+               if (!bounce_skb) {
+                       err = -ENOMEM;
+                       goto out_unmap_hdr;
+               }
+
+               memcpy(skb_put(bounce_skb, skb->len), skb->data, skb->len);
+               dev_kfree_skb_any(skb);
+               skb = bounce_skb;
+               meta->skb = skb;
+               meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
+               if (dma_mapping_error(meta->dmaaddr)) {
+                       err = -EIO;
+                       goto out_free_bounce;
+               }
+       }
 
        ops->fill_descriptor(ring, desc, meta->dmaaddr,
                             skb->len, 0, 1, 1);
@@ -1074,10 +1135,12 @@ static int dma_tx_fragment(struct bcm43xx_dmaring *ring,
        ops->poke_tx(ring, next_slot(ring, slot));
        return 0;
 
+out_free_bounce:
+       dev_kfree_skb_any(skb);
 out_unmap_hdr:
        unmap_descbuffer(ring, meta_hdr->dmaaddr,
                        sizeof(struct bcm43xx_txhdr_fw4), 1);
-       return -EIO;
+       return err;
 }
 
 int bcm43xx_dma_tx(struct bcm43xx_wldev *dev,
-- 
1.5.1

_______________________________________________
Bcm43xx-dev mailing list
Bcm43xx-dev@lists.berlios.de
https://lists.berlios.de/mailman/listinfo/bcm43xx-dev

Reply via email to