This change makes it so that swiotlb_tbl_map_single will return a physical
address instead of a virtual address when called.  The advantage to this once
again is that we are avoiding a number of virt_to_phys and phys_to_virt
translations by working with everything as a physical address.

One change I had to make in order to support using physical addresses is that
I could no longer trust 0 to be a invalid physical address on all platforms.
So instead I made it so that ~0 is returned on error.  This should never be a
valid return value as it implies that only one byte would be available for
use.

Signed-off-by: Alexander Duyck <alexander.h.du...@intel.com>
---

 drivers/xen/swiotlb-xen.c |   22 +++++++-------
 include/linux/swiotlb.h   |   11 +++++--
 lib/swiotlb.c             |   73 +++++++++++++++++++++++----------------------
 3 files changed, 56 insertions(+), 50 deletions(-)

diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 58db6df..8a6035a 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -338,9 +338,8 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct 
page *page,
                                enum dma_data_direction dir,
                                struct dma_attrs *attrs)
 {
-       phys_addr_t phys = page_to_phys(page) + offset;
+       phys_addr_t map, phys = page_to_phys(page) + offset;
        dma_addr_t dev_addr = xen_phys_to_bus(phys);
-       void *map;
 
        BUG_ON(dir == DMA_NONE);
        /*
@@ -356,16 +355,16 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, 
struct page *page,
         * Oh well, have to allocate and map a bounce buffer.
         */
        map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir);
-       if (!map)
+       if (map == SWIOTLB_MAP_ERROR)
                return DMA_ERROR_CODE;
 
-       dev_addr = xen_virt_to_bus(map);
+       dev_addr = xen_phys_to_bus(map);
 
        /*
         * Ensure that the address returned is DMA'ble
         */
        if (!dma_capable(dev, dev_addr, size)) {
-               swiotlb_tbl_unmap_single(dev, map, size, dir);
+               swiotlb_tbl_unmap_single(dev, phys_to_virt(map), size, dir);
                dev_addr = 0;
        }
        return dev_addr;
@@ -494,11 +493,12 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct 
scatterlist *sgl,
                if (swiotlb_force ||
                    !dma_capable(hwdev, dev_addr, sg->length) ||
                    range_straddles_page_boundary(paddr, sg->length)) {
-                       void *map = swiotlb_tbl_map_single(hwdev,
-                                                          start_dma_addr,
-                                                          sg_phys(sg),
-                                                          sg->length, dir);
-                       if (!map) {
+                       phys_addr_t map = swiotlb_tbl_map_single(hwdev,
+                                                                start_dma_addr,
+                                                                sg_phys(sg),
+                                                                sg->length,
+                                                                dir);
+                       if (map == SWIOTLB_MAP_ERROR) {
                                /* Don't panic here, we expect map_sg users
                                   to do proper error handling. */
                                xen_swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir,
@@ -506,7 +506,7 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct 
scatterlist *sgl,
                                sgl[0].dma_length = 0;
                                return DMA_ERROR_CODE;
                        }
-                       sg->dma_address = xen_virt_to_bus(map);
+                       sg->dma_address = xen_phys_to_bus(map);
                } else
                        sg->dma_address = dev_addr;
                sg->dma_length = sg->length;
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index 8d08b3e..1995f3e 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -34,9 +34,14 @@ enum dma_sync_target {
        SYNC_FOR_CPU = 0,
        SYNC_FOR_DEVICE = 1,
 };
-extern void *swiotlb_tbl_map_single(struct device *hwdev, dma_addr_t 
tbl_dma_addr,
-                                   phys_addr_t phys, size_t size,
-                                   enum dma_data_direction dir);
+
+/* define the last possible byte of physical address space as a mapping error 
*/
+#define SWIOTLB_MAP_ERROR (~(phys_addr_t)0x0)
+
+extern phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
+                                         dma_addr_t tbl_dma_addr,
+                                         phys_addr_t phys, size_t size,
+                                         enum dma_data_direction dir);
 
 extern void swiotlb_tbl_unmap_single(struct device *hwdev, char *dma_addr,
                                     size_t size, enum dma_data_direction dir);
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 62848fb..55e052e 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -389,12 +389,13 @@ void swiotlb_bounce(phys_addr_t phys, char *dma_addr, 
size_t size,
 }
 EXPORT_SYMBOL_GPL(swiotlb_bounce);
 
-void *swiotlb_tbl_map_single(struct device *hwdev, dma_addr_t tbl_dma_addr,
-                            phys_addr_t phys, size_t size,
-                            enum dma_data_direction dir)
+phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
+                                  dma_addr_t tbl_dma_addr,
+                                  phys_addr_t phys, size_t size,
+                                  enum dma_data_direction dir)
 {
        unsigned long flags;
-       char *dma_addr;
+       phys_addr_t dma_addr;
        unsigned int nslots, stride, index, wrap;
        int i;
        unsigned long mask;
@@ -458,7 +459,7 @@ void *swiotlb_tbl_map_single(struct device *hwdev, 
dma_addr_t tbl_dma_addr,
                                io_tlb_list[i] = 0;
                        for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != 
IO_TLB_SEGSIZE - 1) && io_tlb_list[i]; i--)
                                io_tlb_list[i] = ++count;
-                       dma_addr = (char *)phys_to_virt(io_tlb_start) + (index 
<< IO_TLB_SHIFT);
+                       dma_addr = io_tlb_start + (index << IO_TLB_SHIFT);
 
                        /*
                         * Update the indices to avoid searching in the next
@@ -476,7 +477,7 @@ void *swiotlb_tbl_map_single(struct device *hwdev, 
dma_addr_t tbl_dma_addr,
 
 not_found:
        spin_unlock_irqrestore(&io_tlb_lock, flags);
-       return NULL;
+       return SWIOTLB_MAP_ERROR;
 found:
        spin_unlock_irqrestore(&io_tlb_lock, flags);
 
@@ -488,7 +489,7 @@ found:
        for (i = 0; i < nslots; i++)
                io_tlb_orig_addr[index+i] = phys + (i << IO_TLB_SHIFT);
        if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
-               swiotlb_bounce(phys, dma_addr, size, DMA_TO_DEVICE);
+               swiotlb_bounce(phys, phys_to_virt(dma_addr), size, 
DMA_TO_DEVICE);
 
        return dma_addr;
 }
@@ -498,9 +499,8 @@ EXPORT_SYMBOL_GPL(swiotlb_tbl_map_single);
  * Allocates bounce buffer and returns its kernel virtual address.
  */
 
-static void *
-map_single(struct device *hwdev, phys_addr_t phys, size_t size,
-          enum dma_data_direction dir)
+phys_addr_t map_single(struct device *hwdev, phys_addr_t phys, size_t size,
+                      enum dma_data_direction dir)
 {
        dma_addr_t start_dma_addr = phys_to_dma(hwdev, io_tlb_start);
 
@@ -594,12 +594,15 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
                dma_mask = hwdev->coherent_dma_mask;
 
        ret = (void *)__get_free_pages(flags, order);
-       if (ret && swiotlb_virt_to_bus(hwdev, ret) + size - 1 > dma_mask) {
-               /*
-                * The allocated memory isn't reachable by the device.
-                */
-               free_pages((unsigned long) ret, order);
-               ret = NULL;
+       if (ret) {
+               dev_addr = swiotlb_virt_to_bus(hwdev, ret);
+               if (dev_addr + size - 1 > dma_mask) {
+                       /*
+                        * The allocated memory isn't reachable by the device.
+                        */
+                       free_pages((unsigned long) ret, order);
+                       ret = NULL;
+               }
        }
        if (!ret) {
                /*
@@ -607,13 +610,13 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
                 * GFP_DMA memory; fall back on map_single(), which
                 * will grab memory from the lowest available address range.
                 */
-               ret = map_single(hwdev, 0, size, DMA_FROM_DEVICE);
-               if (!ret)
+               phys_addr_t paddr = map_single(hwdev, 0, size, DMA_FROM_DEVICE);
+               if (paddr == SWIOTLB_MAP_ERROR)
                        return NULL;
-       }
 
-       memset(ret, 0, size);
-       dev_addr = swiotlb_virt_to_bus(hwdev, ret);
+               ret = phys_to_virt(paddr);
+               dev_addr = phys_to_dma(hwdev, paddr);
+       }
 
        /* Confirm address can be DMA'd by device */
        if (dev_addr + size - 1 > dma_mask) {
@@ -625,7 +628,10 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
                swiotlb_tbl_unmap_single(hwdev, ret, size, DMA_TO_DEVICE);
                return NULL;
        }
+
        *dma_handle = dev_addr;
+       memset(ret, 0, size);
+
        return ret;
 }
 EXPORT_SYMBOL(swiotlb_alloc_coherent);
@@ -682,9 +688,8 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page 
*page,
                            enum dma_data_direction dir,
                            struct dma_attrs *attrs)
 {
-       phys_addr_t phys = page_to_phys(page) + offset;
+       phys_addr_t map, phys = page_to_phys(page) + offset;
        dma_addr_t dev_addr = phys_to_dma(dev, phys);
-       void *map;
 
        BUG_ON(dir == DMA_NONE);
        /*
@@ -695,22 +700,18 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct 
page *page,
        if (dma_capable(dev, dev_addr, size) && !swiotlb_force)
                return dev_addr;
 
-       /*
-        * Oh well, have to allocate and map a bounce buffer.
-        */
+       /* Oh well, have to allocate and map a bounce buffer. */
        map = map_single(dev, phys, size, dir);
-       if (!map) {
+       if (map == SWIOTLB_MAP_ERROR) {
                swiotlb_full(dev, size, dir, 1);
                return phys_to_dma(dev, io_tlb_overflow_buffer);
        }
 
-       dev_addr = swiotlb_virt_to_bus(dev, map);
+       dev_addr = phys_to_dma(dev, map);
 
-       /*
-        * Ensure that the address returned is DMA'ble
-        */
+       /* Ensure that the address returned is DMA'ble */
        if (!dma_capable(dev, dev_addr, size)) {
-               swiotlb_tbl_unmap_single(dev, map, size, dir);
+               swiotlb_tbl_unmap_single(dev, phys_to_virt(map), size, dir);
                return phys_to_dma(dev, io_tlb_overflow_buffer);
        }
 
@@ -836,9 +837,9 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct 
scatterlist *sgl, int nelems,
 
                if (swiotlb_force ||
                    !dma_capable(hwdev, dev_addr, sg->length)) {
-                       void *map = map_single(hwdev, sg_phys(sg),
-                                              sg->length, dir);
-                       if (!map) {
+                       phys_addr_t map = map_single(hwdev, sg_phys(sg),
+                                                    sg->length, dir);
+                       if (map == SWIOTLB_MAP_ERROR) {
                                /* Don't panic here, we expect map_sg users
                                   to do proper error handling. */
                                swiotlb_full(hwdev, sg->length, dir, 0);
@@ -847,7 +848,7 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct 
scatterlist *sgl, int nelems,
                                sgl[0].dma_length = 0;
                                return 0;
                        }
-                       sg->dma_address = swiotlb_virt_to_bus(hwdev, map);
+                       sg->dma_address = phys_to_dma(hwdev, map);
                } else
                        sg->dma_address = dev_addr;
                sg->dma_length = sg->length;

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to