Currently swiotlb is the only consumer for swiotlb_bounce.  Since that is the
case it doesn't make much sense to be exporting it so make it a static
function only.

In addition we can save a few more lines of code by making it so that it
accepts the DMA address as a physical address instead of a virtual one.  This
is the last piece in essentially pushing all of the DMA address values to use
physical addresses in swiotlb.

Signed-off-by: Alexander Duyck <alexander.h.du...@intel.com>
---

 include/linux/swiotlb.h |    3 ---
 lib/swiotlb.c           |   30 +++++++++++++-----------------
 2 files changed, 13 insertions(+), 20 deletions(-)

diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index ba1bd38..8e635d1 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -53,9 +53,6 @@ extern void swiotlb_tbl_sync_single(struct device *hwdev,
                                    enum dma_sync_target target);
 
 /* Accessory functions. */
-extern void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size,
-                          enum dma_data_direction dir);
-
 extern void
 *swiotlb_alloc_coherent(struct device *hwdev, size_t size,
                        dma_addr_t *dma_handle, gfp_t flags);
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 7cfe850..a2ad781 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -351,10 +351,11 @@ static int is_swiotlb_buffer(phys_addr_t paddr)
 /*
  * Bounce: copy the swiotlb buffer back to the original dma location
  */
-void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size,
-                   enum dma_data_direction dir)
+static void swiotlb_bounce(phys_addr_t phys, phys_addr_t dma_addr,
+                          size_t size, enum dma_data_direction dir)
 {
        unsigned long pfn = PFN_DOWN(phys);
+       unsigned char *vaddr = phys_to_virt(dma_addr);
 
        if (PageHighMem(pfn_to_page(pfn))) {
                /* The buffer does not have a mapping.  Map it in and copy */
@@ -369,25 +370,23 @@ void swiotlb_bounce(phys_addr_t phys, char *dma_addr, 
size_t size,
                        local_irq_save(flags);
                        buffer = kmap_atomic(pfn_to_page(pfn));
                        if (dir == DMA_TO_DEVICE)
-                               memcpy(dma_addr, buffer + offset, sz);
+                               memcpy(vaddr, buffer + offset, sz);
                        else
-                               memcpy(buffer + offset, dma_addr, sz);
+                               memcpy(buffer + offset, vaddr, sz);
                        kunmap_atomic(buffer);
                        local_irq_restore(flags);
 
                        size -= sz;
                        pfn++;
-                       dma_addr += sz;
+                       vaddr += sz;
                        offset = 0;
                }
+       } else if (dir == DMA_TO_DEVICE) {
+               memcpy(vaddr, phys_to_virt(phys), size);
        } else {
-               if (dir == DMA_TO_DEVICE)
-                       memcpy(dma_addr, phys_to_virt(phys), size);
-               else
-                       memcpy(phys_to_virt(phys), dma_addr, size);
+               memcpy(phys_to_virt(phys), vaddr, size);
        }
 }
-EXPORT_SYMBOL_GPL(swiotlb_bounce);
 
 phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
                                   dma_addr_t tbl_dma_addr,
@@ -489,7 +488,7 @@ found:
        for (i = 0; i < nslots; i++)
                io_tlb_orig_addr[index+i] = phys + (i << IO_TLB_SHIFT);
        if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
-               swiotlb_bounce(phys, phys_to_virt(dma_addr), size, 
DMA_TO_DEVICE);
+               swiotlb_bounce(phys, dma_addr, size, DMA_TO_DEVICE);
 
        return dma_addr;
 }
@@ -522,8 +521,7 @@ void swiotlb_tbl_unmap_single(struct device *hwdev, 
phys_addr_t dma_addr,
         * First, sync the memory before unmapping the entry
         */
        if (phys && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
-               swiotlb_bounce(phys, phys_to_virt(dma_addr),
-                              size, DMA_FROM_DEVICE);
+               swiotlb_bounce(phys, dma_addr, size, DMA_FROM_DEVICE);
 
        /*
         * Return the buffer to the free list by setting the corresponding
@@ -564,15 +562,13 @@ void swiotlb_tbl_sync_single(struct device *hwdev, 
phys_addr_t dma_addr,
        switch (target) {
        case SYNC_FOR_CPU:
                if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
-                       swiotlb_bounce(phys, phys_to_virt(dma_addr),
-                                      size, DMA_FROM_DEVICE);
+                       swiotlb_bounce(phys, dma_addr, size, DMA_FROM_DEVICE);
                else
                        BUG_ON(dir != DMA_TO_DEVICE);
                break;
        case SYNC_FOR_DEVICE:
                if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
-                       swiotlb_bounce(phys, phys_to_virt(dma_addr),
-                                      size, DMA_TO_DEVICE);
+                       swiotlb_bounce(phys, dma_addr, size, DMA_TO_DEVICE);
                else
                        BUG_ON(dir != DMA_FROM_DEVICE);
                break;

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to