On Fri, 2007-04-27 at 18:13 -0600, Alex Williamson wrote:
> We need to get to a CONFIG_IA64_GENERIC kernel working, and the first
> step to doing that seems to be fixing our usage of the i386 swiotlb and
> pci-dma-xen.  The patch below paravirtualizes lib/swiotlb.c and rips out
> a lot of the code we had around for supporting the i386 versions.  It
> should be a little more straight-forward to switch to the generic kernel
> flavor after this.  This hasn't had much testing, so please don't run
> this on a machine with data you care about.  I'm running it with
> swiotlb=force to try to exercise it, and it's holding up well.  Comments
> welcome.  Thanks,

   FWIW, here's changes from the stock swiotlb for paravirtualization.
Thanks,

        Alex

--- swiotlb.c   2006-09-19 21:42:06.000000000 -0600
+++ swiotlb.c   2007-04-27 17:43:14.000000000 -0600
@@ -32,11 +32,20 @@
 #include <linux/init.h>
 #include <linux/bootmem.h>
 
+#ifdef CONFIG_XEN
+#define VIRT_TO_PHYS(x)                                virt_to_bus(x)
+#define PHYS_TO_VIRT(x)                                bus_to_virt(x)
+#else
+#define VIRT_TO_PHYS(x)                                virt_to_phys(x)
+#define PHYS_TO_VIRT(x)                                phys_to_virt(x)
+#define range_straddles_page_boundary(x, y)    (0)
+#endif
+
 #define OFFSET(val,align) ((unsigned long)     \
                           ( (val) & ( (align) - 1)))
 
 #define SG_ENT_VIRT_ADDRESS(sg)        (page_address((sg)->page) + 
(sg)->offset)
-#define SG_ENT_PHYS_ADDRESS(SG)        virt_to_phys(SG_ENT_VIRT_ADDRESS(SG))
+#define SG_ENT_PHYS_ADDRESS(SG)        VIRT_TO_PHYS(SG_ENT_VIRT_ADDRESS(SG))
 
 /*
  * Maximum allowable number of contiguous slabs to map,
@@ -139,6 +148,13 @@
                io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
        }
 
+#ifdef CONFIG_XEN
+       if (is_running_on_xen()) {
+               /* Round up to a power of two */
+               while (io_tlb_nslabs & (io_tlb_nslabs - 1))
+                       io_tlb_nslabs += io_tlb_nslabs & ~(io_tlb_nslabs - 1);
+       }
+#endif
        /*
         * Get IO TLB memory from the low pages
         */
@@ -147,6 +163,17 @@
                panic("Cannot allocate SWIOTLB buffer");
        io_tlb_end = io_tlb_start + io_tlb_nslabs * (1 << IO_TLB_SHIFT);
 
+#ifdef CONFIG_XEN
+        /* TODO: add logic for trying to get lower pages */
+       for (i = 0 ; i < io_tlb_nslabs ; i += IO_TLB_SEGSIZE) {
+               if (xen_create_contiguous_region(
+                               (unsigned long)io_tlb_start +
+                               (i << IO_TLB_SHIFT),
+                               get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT), 32))
+                       panic("Failed to setup Xen contiguous region");
+       }
+#endif
+
        /*
         * Allocate and initialize the free list array.  This array is used
         * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
@@ -162,6 +189,11 @@
         * Get the overflow emergency buffer
         */
        io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow);
+#ifdef CONFIG_XEN
+       if (xen_create_contiguous_region((unsigned long)io_tlb_overflow_buffer,
+                                        get_order(io_tlb_overflow), 32))
+               panic("Failed to setup Xen contiguous region for overflow");
+#endif
        printk(KERN_INFO "Placing software IO TLB between 0x%lx - 0x%lx\n",
               virt_to_phys(io_tlb_start), virt_to_phys(io_tlb_end));
 }
@@ -188,6 +220,13 @@
                io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
        }
 
+#ifdef CONFIG_XEN
+       if (is_running_on_xen()) {
+               /* Round up to a power of two */
+               while (io_tlb_nslabs & (io_tlb_nslabs - 1))
+                       io_tlb_nslabs += io_tlb_nslabs & ~(io_tlb_nslabs - 1);
+       }
+#endif
        /*
         * Get IO TLB memory from the low pages
         */
@@ -213,6 +252,16 @@
        io_tlb_end = io_tlb_start + io_tlb_nslabs * (1 << IO_TLB_SHIFT);
        memset(io_tlb_start, 0, io_tlb_nslabs * (1 << IO_TLB_SHIFT));
 
+#ifdef CONFIG_XEN
+        /* TODO: add logic for trying to get lower pages */
+       for (i = 0 ; i < io_tlb_nslabs ; i += IO_TLB_SEGSIZE) {
+               if (xen_create_contiguous_region(
+                               (unsigned long)io_tlb_start +
+                               (i << IO_TLB_SHIFT),
+                               get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT), 32))
+                       panic("Failed to setup Xen contiguous region");
+       }
+#endif
        /*
         * Allocate and initialize the free list array.  This array is used
         * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
@@ -242,6 +291,11 @@
        if (!io_tlb_overflow_buffer)
                goto cleanup4;
 
+#ifdef CONFIG_XEN
+       if (xen_create_contiguous_region((unsigned long)io_tlb_overflow_buffer,
+                                        get_order(io_tlb_overflow), 32))
+               panic("Failed to setup Xen contiguous region for overflow");
+#endif
        printk(KERN_INFO "Placing %ldMB software IO TLB between 0x%lx - "
               "0x%lx\n", (io_tlb_nslabs * (1 << IO_TLB_SHIFT)) >> 20,
               virt_to_phys(io_tlb_start), virt_to_phys(io_tlb_end));
@@ -445,7 +499,18 @@
        flags |= GFP_DMA;
 
        ret = (void *)__get_free_pages(flags, order);
-       if (ret && address_needs_mapping(hwdev, virt_to_phys(ret))) {
+#ifdef CONFIG_XEN
+       if (ret && is_running_on_xen()) {
+               u64 mask = hwdev->coherent_dma_mask ? hwdev->coherent_dma_mask :
+                                                     DMA_32BIT_MASK;
+               if (xen_create_contiguous_region((unsigned long)ret, order,
+                                                fls64(mask))) {
+                       free_pages((unsigned long)ret, order);
+                       ret = NULL;
+               }
+       }
+#endif
+       if (ret && address_needs_mapping(hwdev, VIRT_TO_PHYS(ret))) {
                /*
                 * The allocated memory isn't reachable by the device.
                 * Fall back on swiotlb_map_single().
@@ -465,11 +530,11 @@
                if (swiotlb_dma_mapping_error(handle))
                        return NULL;
 
-               ret = phys_to_virt(handle);
+               ret = PHYS_TO_VIRT(handle);
        }
 
        memset(ret, 0, size);
-       dev_addr = virt_to_phys(ret);
+       dev_addr = VIRT_TO_PHYS(ret);
 
        /* Confirm address can be DMA'd by device */
        if (address_needs_mapping(hwdev, dev_addr)) {
@@ -487,9 +552,14 @@
                      dma_addr_t dma_handle)
 {
        if (!(vaddr >= (void *)io_tlb_start
-                    && vaddr < (void *)io_tlb_end))
+                    && vaddr < (void *)io_tlb_end)) {
+#ifdef CONFIG_XEN
+               if (is_running_on_xen())
+                       xen_destroy_contiguous_region((unsigned long)vaddr,
+                                                     get_order(size));
+#endif
                free_pages((unsigned long) vaddr, get_order(size));
-       else
+       } else
                /* DMA_TO_DEVICE to avoid memcpy in unmap_single */
                swiotlb_unmap_single (hwdev, dma_handle, size, DMA_TO_DEVICE);
 }
@@ -525,7 +595,7 @@
 dma_addr_t
 swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
 {
-       unsigned long dev_addr = virt_to_phys(ptr);
+       unsigned long dev_addr = VIRT_TO_PHYS(ptr);
        void *map;
 
        BUG_ON(dir == DMA_NONE);
@@ -534,7 +604,8 @@
         * we can safely return the device addr and not worry about bounce
         * buffering it.
         */
-       if (!address_needs_mapping(hwdev, dev_addr) && !swiotlb_force)
+       if (!range_straddles_page_boundary(ptr, size) &&
+           !address_needs_mapping(hwdev, dev_addr) && !swiotlb_force)
                return dev_addr;
 
        /*
@@ -546,7 +617,7 @@
                map = io_tlb_overflow_buffer;
        }
 
-       dev_addr = virt_to_phys(map);
+       dev_addr = VIRT_TO_PHYS(map);
 
        /*
         * Ensure that the address returned is DMA'ble
@@ -588,7 +659,7 @@
 swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size,
                     int dir)
 {
-       char *dma_addr = phys_to_virt(dev_addr);
+       char *dma_addr = PHYS_TO_VIRT(dev_addr);
 
        BUG_ON(dir == DMA_NONE);
        if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
@@ -611,7 +682,7 @@
 swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
                    size_t size, int dir, int target)
 {
-       char *dma_addr = phys_to_virt(dev_addr);
+       char *dma_addr = PHYS_TO_VIRT(dev_addr);
 
        BUG_ON(dir == DMA_NONE);
        if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
@@ -642,7 +713,7 @@
                          unsigned long offset, size_t size,
                          int dir, int target)
 {
-       char *dma_addr = phys_to_virt(dev_addr) + offset;
+       char *dma_addr = PHYS_TO_VIRT(dev_addr) + offset;
 
        BUG_ON(dir == DMA_NONE);
        if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
@@ -695,7 +766,7 @@
 
        for (i = 0; i < nelems; i++, sg++) {
                addr = SG_ENT_VIRT_ADDRESS(sg);
-               dev_addr = virt_to_phys(addr);
+               dev_addr = VIRT_TO_PHYS(addr);
                if (swiotlb_force || address_needs_mapping(hwdev, dev_addr)) {
                        void *map = map_single(hwdev, addr, sg->length, dir);
                        sg->dma_address = virt_to_bus(map);
@@ -728,7 +799,7 @@
 
        for (i = 0; i < nelems; i++, sg++)
                if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
-                       unmap_single(hwdev, (void *) 
phys_to_virt(sg->dma_address), sg->dma_length, dir);
+                       unmap_single(hwdev, (void *) 
PHYS_TO_VIRT(sg->dma_address), sg->dma_length, dir);
                else if (dir == DMA_FROM_DEVICE)
                        mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length);
 }
@@ -771,7 +842,7 @@
 int
 swiotlb_dma_mapping_error(dma_addr_t dma_addr)
 {
-       return (dma_addr == virt_to_phys(io_tlb_overflow_buffer));
+       return (dma_addr == VIRT_TO_PHYS(io_tlb_overflow_buffer));
 }
 
 /*
@@ -783,7 +854,7 @@
 int
 swiotlb_dma_supported (struct device *hwdev, u64 mask)
 {
-       return (virt_to_phys (io_tlb_end) - 1) <= mask;
+       return (VIRT_TO_PHYS(io_tlb_end) - 1) <= mask;
 }
 
 EXPORT_SYMBOL(swiotlb_init);



_______________________________________________
Xen-ia64-devel mailing list
Xen-ia64-devel@lists.xensource.com
http://lists.xensource.com/xen-ia64-devel

Reply via email to