Author: kib
Date: Tue Feb 12 16:57:20 2013
New Revision: 246713
URL: http://svnweb.freebsd.org/changeset/base/246713

Log:
  Reform the busdma API so that new types may be added without modifying
  every architecture's busdma_machdep.c.  It is done by unifying the
  bus_dmamap_load_buffer() routines so that they may be called from MI
  code.  The MD busdma is then given a chance to do any final processing
  in the complete() callback.
  
  The cam changes unify the bus_dmamap_load* handling in cam drivers.
  
  The arm and mips implementations are updated to track virtual
  addresses for sync().  Previously this was done in a type specific
  way.  Now it is done in a generic way by recording the list of
  virtuals in the map.
  
  Submitted by: jeff (sponsored by EMC/Isilon)
  Reviewed by:  kan (previous version), scottl,
        mjacob (isp(4), no objections for target mode changes)
  Discussed with:            ian (arm changes)
  Tested by:    marius (sparc64), mips (jmallet), isci(4) on x86 (jharris),
        amd64 (Fabian Keil <freebsd-lis...@fabiankeil.de>)

Added:
  head/sys/kern/subr_bus_dma.c   (contents, props changed)
  head/sys/sys/memdesc.h   (contents, props changed)
Modified:
  head/sys/arm/arm/busdma_machdep-v6.c
  head/sys/arm/arm/busdma_machdep.c
  head/sys/cam/cam_ccb.h
  head/sys/cam/cam_xpt.c
  head/sys/cam/ctl/ctl_frontend_cam_sim.c
  head/sys/cam/ctl/scsi_ctl.c
  head/sys/cam/scsi/scsi_pass.c
  head/sys/cam/scsi/scsi_target.c
  head/sys/conf/files
  head/sys/dev/aac/aac_cam.c
  head/sys/dev/advansys/advansys.c
  head/sys/dev/advansys/adwcam.c
  head/sys/dev/aha/aha.c
  head/sys/dev/ahb/ahb.c
  head/sys/dev/ahci/ahci.c
  head/sys/dev/aic/aic.c
  head/sys/dev/aic7xxx/aic79xx_osm.c
  head/sys/dev/aic7xxx/aic7xxx_osm.c
  head/sys/dev/amr/amr_cam.c
  head/sys/dev/arcmsr/arcmsr.c
  head/sys/dev/ata/ata-dma.c
  head/sys/dev/ata/atapi-cam.c
  head/sys/dev/buslogic/bt.c
  head/sys/dev/ciss/ciss.c
  head/sys/dev/ciss/cissvar.h
  head/sys/dev/dpt/dpt_scsi.c
  head/sys/dev/firewire/sbp.c
  head/sys/dev/hpt27xx/osm_bsd.c
  head/sys/dev/hptiop/hptiop.c
  head/sys/dev/hptmv/entry.c
  head/sys/dev/hptrr/hptrr_osm_bsd.c
  head/sys/dev/iir/iir.c
  head/sys/dev/isci/isci_io_request.c
  head/sys/dev/isp/isp_pci.c
  head/sys/dev/isp/isp_sbus.c
  head/sys/dev/mfi/mfi.c
  head/sys/dev/mfi/mfi_cam.c
  head/sys/dev/mfi/mfivar.h
  head/sys/dev/mly/mly.c
  head/sys/dev/mly/mlyvar.h
  head/sys/dev/mps/mps.c
  head/sys/dev/mps/mps_sas.c
  head/sys/dev/mps/mpsvar.h
  head/sys/dev/mpt/mpt_cam.c
  head/sys/dev/mvs/mvs.c
  head/sys/dev/siis/siis.c
  head/sys/dev/sym/sym_hipd.c
  head/sys/dev/trm/trm.c
  head/sys/dev/twa/tw_osl.h
  head/sys/dev/twa/tw_osl_cam.c
  head/sys/dev/twa/tw_osl_freebsd.c
  head/sys/dev/tws/tws.h
  head/sys/dev/tws/tws_cam.c
  head/sys/dev/virtio/scsi/virtio_scsi.c
  head/sys/dev/wds/wd7000.c
  head/sys/ia64/ia64/busdma_machdep.c
  head/sys/kern/subr_uio.c
  head/sys/mips/mips/busdma_machdep.c
  head/sys/powerpc/powerpc/busdma_machdep.c
  head/sys/powerpc/ps3/ps3cdrom.c
  head/sys/sparc64/include/bus_dma.h
  head/sys/sparc64/sparc64/bus_machdep.c
  head/sys/sparc64/sparc64/iommu.c
  head/sys/sys/bus_dma.h
  head/sys/sys/uio.h
  head/sys/x86/x86/busdma_machdep.c

Modified: head/sys/arm/arm/busdma_machdep-v6.c
==============================================================================
--- head/sys/arm/arm/busdma_machdep-v6.c        Tue Feb 12 16:51:43 2013        
(r246712)
+++ head/sys/arm/arm/busdma_machdep-v6.c        Tue Feb 12 16:57:20 2013        
(r246713)
@@ -46,11 +46,11 @@ __FBSDID("$FreeBSD$");
 #include <sys/kernel.h>
 #include <sys/ktr.h>
 #include <sys/lock.h>
+#include <sys/memdesc.h>
 #include <sys/proc.h>
 #include <sys/mutex.h>
-#include <sys/mbuf.h>
-#include <sys/uio.h>
 #include <sys/sysctl.h>
+#include <sys/uio.h>
 
 #include <vm/vm.h>
 #include <vm/vm_page.h>
@@ -111,6 +111,7 @@ struct bounce_page {
        vm_offset_t     vaddr;          /* kva of bounce buffer */
        bus_addr_t      busaddr;        /* Physical address */
        vm_offset_t     datavaddr;      /* kva of client data */
+       bus_addr_t      dataaddr;       /* client physical address */
        bus_size_t      datacount;      /* client data count */
        STAILQ_ENTRY(bounce_page) links;
 };
@@ -119,7 +120,6 @@ struct sync_list {
        vm_offset_t     vaddr;          /* kva of bounce buffer */
        bus_addr_t      busaddr;        /* Physical address */
        bus_size_t      datacount;      /* client data count */
-       STAILQ_ENTRY(sync_list) slinks;
 };
 
 int busdma_swi_pending;
@@ -156,15 +156,15 @@ struct bus_dmamap {
        int                    pagesneeded;
        int                    pagesreserved;
        bus_dma_tag_t          dmat;
-       void                  *buf;             /* unmapped buffer pointer */
-       bus_size_t             buflen;          /* unmapped buffer length */
+       struct memdesc         mem;
        pmap_t                 pmap;
        bus_dmamap_callback_t *callback;
        void                  *callback_arg;
        int                   flags;
 #define DMAMAP_COHERENT                (1 << 0)
        STAILQ_ENTRY(bus_dmamap) links;
-       STAILQ_HEAD(,sync_list) slist;
+       int                    sync_count;
+       struct sync_list       slist[];
 };
 
 static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
@@ -176,11 +176,16 @@ static int alloc_bounce_pages(bus_dma_ta
 static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
                                int commit);
 static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
-                                  vm_offset_t vaddr, bus_size_t size);
+                                 vm_offset_t vaddr, bus_addr_t addr,
+                                 bus_size_t size);
 static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
 int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr);
-static int _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
+static void _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
     void *buf, bus_size_t buflen, int flags);
+static void _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map,
+    vm_paddr_t buf, bus_size_t buflen, int flags);
+static int _bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
+    int flags);
 
 static busdma_bufalloc_t coherent_allocator;   /* Cache of coherent buffers */
 static busdma_bufalloc_t standard_allocator;   /* Cache of standard buffers */
@@ -493,17 +498,18 @@ out:
 int
 bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
 {
+       int mapsize;
        int error;
 
        error = 0;
 
-       *mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF,
-                                            M_NOWAIT | M_ZERO);
+       mapsize = sizeof(**mapp) + (sizeof(struct sync_list) * dmat->nsegments);
+       *mapp = (bus_dmamap_t)malloc(mapsize, M_DEVBUF, M_NOWAIT | M_ZERO);
        if (*mapp == NULL) {
                CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM);
                return (ENOMEM);
        }
-       STAILQ_INIT(&((*mapp)->slist));
+       (*mapp)->sync_count = 0;
 
        if (dmat->segments == NULL) {
                dmat->segments = (bus_dma_segment_t *)malloc(
@@ -578,8 +584,7 @@ bus_dmamap_create(bus_dma_tag_t dmat, in
 int
 bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
 {
-       if (STAILQ_FIRST(&map->bpages) != NULL ||
-           STAILQ_FIRST(&map->slist) != NULL) {
+       if (STAILQ_FIRST(&map->bpages) != NULL || map->sync_count != 0) {
                CTR3(KTR_BUSDMA, "%s: tag %p error %d",
                    __func__, dmat, EBUSY);
                return (EBUSY);
@@ -606,6 +611,7 @@ bus_dmamem_alloc(bus_dma_tag_t dmat, voi
        struct busdma_bufzone *bufzone;
        vm_memattr_t memattr;
        int mflags;
+       int mapsize;
 
        if (flags & BUS_DMA_NOWAIT)
                mflags = M_NOWAIT;
@@ -614,15 +620,15 @@ bus_dmamem_alloc(bus_dma_tag_t dmat, voi
 
        /* ARM non-snooping caches need a map for the VA cache sync structure */
 
-       *mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF,
-                                            M_NOWAIT | M_ZERO);
+       mapsize = sizeof(**mapp) + (sizeof(struct sync_list) * dmat->nsegments);
+       *mapp = (bus_dmamap_t)malloc(mapsize, M_DEVBUF, M_NOWAIT | M_ZERO);
        if (*mapp == NULL) {
                CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
                    __func__, dmat, dmat->flags, ENOMEM);
                return (ENOMEM);
        }
 
-       STAILQ_INIT(&((*mapp)->slist));
+       (*mapp)->sync_count = 0;
 
        if (dmat->segments == NULL) {
                dmat->segments = (bus_dma_segment_t *)malloc(
@@ -733,7 +739,37 @@ bus_dmamem_free(bus_dma_tag_t dmat, void
        CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags);
 }
 
-static int
+static void
+_bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf,
+    bus_size_t buflen, int flags)
+{
+       bus_addr_t curaddr;
+       bus_size_t sgsize;
+
+       if (map->pagesneeded == 0) {
+               CTR5(KTR_BUSDMA, "lowaddr= %d, boundary= %d, alignment= %d"
+                   " map= %p, pagesneeded= %d",
+                   dmat->lowaddr, dmat->boundary, dmat->alignment,
+                   map, map->pagesneeded);
+               /*
+                * Count the number of bounce pages
+                * needed in order to complete this transfer
+                */
+               curaddr = buf;
+               while (buflen != 0) {
+                       sgsize = MIN(buflen, dmat->maxsegsz);
+                       if (run_filter(dmat, curaddr) != 0) {
+                               sgsize = MIN(sgsize, PAGE_SIZE);
+                               map->pagesneeded++;
+                       }
+                       curaddr += sgsize;
+                       buflen -= sgsize;
+               }
+               CTR1(KTR_BUSDMA, "pagesneeded= %d", map->pagesneeded);
+       }
+}
+
+static void
 _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
     void *buf, bus_size_t buflen, int flags)
 {
@@ -754,12 +790,11 @@ _bus_dmamap_count_pages(bus_dma_tag_t dm
                vendaddr = (vm_offset_t)buf + buflen;
 
                while (vaddr < vendaddr) {
-                       if (__predict_true(map->pmap == pmap_kernel()))
+                       if (__predict_true(map->pmap == kernel_pmap))
                                paddr = pmap_kextract(vaddr);
                        else
                                paddr = pmap_extract(map->pmap, vaddr);
-                       if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
-                           run_filter(dmat, paddr) != 0) {
+                       if (run_filter(dmat, paddr) != 0) {
                                map->pagesneeded++;
                        }
                        vaddr += (PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK));
@@ -767,72 +802,190 @@ _bus_dmamap_count_pages(bus_dma_tag_t dm
                }
                CTR1(KTR_BUSDMA, "pagesneeded= %d", map->pagesneeded);
        }
+}
+
+static int
+_bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int flags)
+{
 
        /* Reserve Necessary Bounce Pages */
-       if (map->pagesneeded != 0) {
-               mtx_lock(&bounce_lock);
-               if (flags & BUS_DMA_NOWAIT) {
-                       if (reserve_bounce_pages(dmat, map, 0) != 0) {
-                               map->pagesneeded = 0;
-                               mtx_unlock(&bounce_lock);
-                               return (ENOMEM);
-                       }
-               } else {
-                       if (reserve_bounce_pages(dmat, map, 1) != 0) {
-                               /* Queue us for resources */
-                               map->dmat = dmat;
-                               map->buf = buf;
-                               map->buflen = buflen;
-                               STAILQ_INSERT_TAIL(&bounce_map_waitinglist,
-                                   map, links);
-                               mtx_unlock(&bounce_lock);
-                               return (EINPROGRESS);
-                       }
+       mtx_lock(&bounce_lock);
+       if (flags & BUS_DMA_NOWAIT) {
+               if (reserve_bounce_pages(dmat, map, 0) != 0) {
+                       map->pagesneeded = 0;
+                       mtx_unlock(&bounce_lock);
+                       return (ENOMEM);
+               }
+       } else {
+               if (reserve_bounce_pages(dmat, map, 1) != 0) {
+                       /* Queue us for resources */
+                       STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links);
+                       mtx_unlock(&bounce_lock);
+                       return (EINPROGRESS);
                }
-               mtx_unlock(&bounce_lock);
        }
+       mtx_unlock(&bounce_lock);
 
        return (0);
 }
 
 /*
- * Utility function to load a linear buffer. lastaddrp holds state
- * between invocations (for multiple-buffer loads).  segp contains
+ * Add a single contiguous physical range to the segment list.
+ */
+static int
+_bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr,
+                  bus_size_t sgsize, bus_dma_segment_t *segs, int *segp)
+{
+       bus_addr_t baddr, bmask;
+       int seg;
+
+       /*
+        * Make sure we don't cross any boundaries.
+        */
+       bmask = ~(dmat->boundary - 1);
+       if (dmat->boundary > 0) {
+               baddr = (curaddr + dmat->boundary) & bmask;
+               if (sgsize > (baddr - curaddr))
+                       sgsize = (baddr - curaddr);
+       }
+
+       if (dmat->ranges) {
+               struct arm32_dma_range *dr;
+
+               dr = _bus_dma_inrange(dmat->ranges, dmat->_nranges,
+                   curaddr);
+               if (dr == NULL) {
+                       _bus_dmamap_unload(dmat, map);
+                       return (EINVAL);
+               }
+               /*
+                * In a valid DMA range.  Translate the physical
+                * memory address to an address in the DMA window.
+                */
+               curaddr = (curaddr - dr->dr_sysbase) + dr->dr_busbase;
+       }
+
+       /*
+        * Insert chunk into a segment, coalescing with
+        * previous segment if possible.
+        */
+       seg = *segp;
+       if (seg == -1) {
+               seg = 0;
+               segs[seg].ds_addr = curaddr;
+               segs[seg].ds_len = sgsize;
+       } else {
+               if (curaddr == segs[seg].ds_addr + segs[seg].ds_len &&
+                   (segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
+                   (dmat->boundary == 0 ||
+                    (segs[seg].ds_addr & bmask) == (curaddr & bmask)))
+                       segs[seg].ds_len += sgsize;
+               else {
+                       if (++seg >= dmat->nsegments)
+                               return (0);
+                       segs[seg].ds_addr = curaddr;
+                       segs[seg].ds_len = sgsize;
+               }
+       }
+       *segp = seg;
+       return (sgsize);
+}
+
+/*
+ * Utility function to load a physical buffer.  segp contains
  * the starting segment on entrace, and the ending segment on exit.
- * first indicates if this is the first invocation of this function.
  */
-static __inline int
+int
+_bus_dmamap_load_phys(bus_dma_tag_t dmat,
+                     bus_dmamap_t map,
+                     vm_paddr_t buf, bus_size_t buflen,
+                     int flags,
+                     bus_dma_segment_t *segs,
+                     int *segp)
+{
+       bus_addr_t curaddr;
+       bus_size_t sgsize;
+       int error;
+
+       if (segs == NULL)
+               segs = dmat->segments;
+
+       if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) {
+               _bus_dmamap_count_phys(dmat, map, buf, buflen, flags);
+               if (map->pagesneeded != 0) {
+                       error = _bus_dmamap_reserve_pages(dmat, map, flags);
+                       if (error)
+                               return (error);
+               }
+       }
+
+       while (buflen > 0) {
+               curaddr = buf;
+               sgsize = MIN(buflen, dmat->maxsegsz);
+               if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
+                   map->pagesneeded != 0 && run_filter(dmat, curaddr)) {
+                       sgsize = MIN(sgsize, PAGE_SIZE);
+                       curaddr = add_bounce_page(dmat, map, 0, curaddr,
+                                                 sgsize);
+               }
+               sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
+                   segp);
+               if (sgsize == 0)
+                       break;
+               buf += sgsize;
+               buflen -= sgsize;
+       }
+
+       /*
+        * Did we fit?
+        */
+       if (buflen != 0) {
+               _bus_dmamap_unload(dmat, map);
+               return (EFBIG); /* XXX better return value here? */
+       }
+       return (0);
+}
+
+/*
+ * Utility function to load a linear buffer.  segp contains
+ * the starting segment on entrace, and the ending segment on exit.
+ */
+int
 _bus_dmamap_load_buffer(bus_dma_tag_t dmat,
                        bus_dmamap_t map,
                        void *buf, bus_size_t buflen,
+                       pmap_t pmap,
                        int flags,
-                       bus_addr_t *lastaddrp,
                        bus_dma_segment_t *segs,
-                       int *segp,
-                       int first)
+                       int *segp)
 {
        bus_size_t sgsize;
-       bus_addr_t curaddr, lastaddr, baddr, bmask;
+       bus_addr_t curaddr;
        vm_offset_t vaddr;
        struct sync_list *sl;
-       int seg, error;
+       int error;
+
+       if (segs == NULL)
+               segs = dmat->segments;
 
        if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) {
-               error = _bus_dmamap_count_pages(dmat, map, buf, buflen, flags);
-               if (error)
-                       return (error);
+               _bus_dmamap_count_pages(dmat, map, buf, buflen, flags);
+               if (map->pagesneeded != 0) {
+                       error = _bus_dmamap_reserve_pages(dmat, map, flags);
+                       if (error)
+                               return (error);
+               }
        }
 
        sl = NULL;
        vaddr = (vm_offset_t)buf;
-       lastaddr = *lastaddrp;
-       bmask = ~(dmat->boundary - 1);
+       map->pmap = pmap;
 
-       for (seg = *segp; buflen > 0 ; ) {
+       while (buflen > 0) {
                /*
                 * Get the physical address for this segment.
                 */
-               if (__predict_true(map->pmap == pmap_kernel()))
+               if (__predict_true(map->pmap == kernel_pmap))
                        curaddr = pmap_kextract(vaddr);
                else
                        curaddr = pmap_extract(map->pmap, vaddr);
@@ -846,259 +999,63 @@ _bus_dmamap_load_buffer(bus_dma_tag_t dm
                if (buflen < sgsize)
                        sgsize = buflen;
 
-               /*
-                * Make sure we don't cross any boundaries.
-                */
-               if (dmat->boundary > 0) {
-                       baddr = (curaddr + dmat->boundary) & bmask;
-                       if (sgsize > (baddr - curaddr))
-                               sgsize = (baddr - curaddr);
-               }
-
                if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
                    map->pagesneeded != 0 && run_filter(dmat, curaddr)) {
-                       curaddr = add_bounce_page(dmat, map, vaddr, sgsize);
+                       curaddr = add_bounce_page(dmat, map, vaddr, curaddr,
+                                                 sgsize);
                } else {
-                       /* add_sync_list(dmat, map, vaddr, sgsize, cflag); */
-                       sl = (struct sync_list *)malloc(sizeof(struct 
sync_list),
-                                               M_DEVBUF, M_NOWAIT | M_ZERO);
-                       if (sl == NULL)
-                               goto cleanup;
-                       STAILQ_INSERT_TAIL(&(map->slist), sl, slinks);
-                       sl->vaddr = vaddr;
-                       sl->datacount = sgsize;
-                       sl->busaddr = curaddr;
-               }
-
-
-               if (dmat->ranges) {
-                       struct arm32_dma_range *dr;
-
-                       dr = _bus_dma_inrange(dmat->ranges, dmat->_nranges,
-                           curaddr);
-                       if (dr == NULL) {
-                               _bus_dmamap_unload(dmat, map);
-                               return (EINVAL);
-                       }
-                       /*
-                        * In a valid DMA range.  Translate the physical
-                        * memory address to an address in the DMA window.
-                        */
-                       curaddr = (curaddr - dr->dr_sysbase) + dr->dr_busbase;
-               }
-
-               /*
-                * Insert chunk into a segment, coalescing with
-                * previous segment if possible.
-                */
-               if (first) {
-                       segs[seg].ds_addr = curaddr;
-                       segs[seg].ds_len = sgsize;
-                       first = 0;
-               } else {
-                       if (curaddr == lastaddr &&
-                           (segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
-                           (dmat->boundary == 0 ||
-                            (segs[seg].ds_addr & bmask) == (curaddr & bmask)))
-                               segs[seg].ds_len += sgsize;
-                       else {
-                               if (++seg >= dmat->nsegments)
-                                       break;
-                               segs[seg].ds_addr = curaddr;
-                               segs[seg].ds_len = sgsize;
-                       }
+                       sl = &map->slist[map->sync_count - 1];
+                       if (map->sync_count == 0 ||
+                           vaddr != sl->vaddr + sl->datacount) {
+                               if (++map->sync_count > dmat->nsegments)
+                                       goto cleanup;
+                               sl++;
+                               sl->vaddr = vaddr;
+                               sl->datacount = sgsize;
+                               sl->busaddr = curaddr;
+                       } else
+                               sl->datacount += sgsize;
                }
-
-               lastaddr = curaddr + sgsize;
+               sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
+                                           segp);
+               if (sgsize == 0)
+                       break;
                vaddr += sgsize;
                buflen -= sgsize;
        }
 
-       *segp = seg;
-       *lastaddrp = lastaddr;
 cleanup:
        /*
         * Did we fit?
         */
        if (buflen != 0) {
                _bus_dmamap_unload(dmat, map);
-               return(EFBIG); /* XXX better return value here? */
+               return (EFBIG); /* XXX better return value here? */
        }
        return (0);
 }
 
-/*
- * Map the buffer buf into bus space using the dmamap map.
- */
-int
-bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
-               bus_size_t buflen, bus_dmamap_callback_t *callback,
-               void *callback_arg, int flags)
+
+void
+__bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map,
+                   struct memdesc *mem, bus_dmamap_callback_t *callback,
+                   void *callback_arg)
 {
-       bus_addr_t              lastaddr = 0;
-       int                     error, nsegs = 0;
 
-       flags |= BUS_DMA_WAITOK;
+       map->mem = *mem;
+       map->dmat = dmat;
        map->callback = callback;
        map->callback_arg = callback_arg;
-       map->pmap = kernel_pmap;
-
-       error = _bus_dmamap_load_buffer(dmat, map, buf, buflen, flags,
-                    &lastaddr, dmat->segments, &nsegs, 1);
-
-       CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
-           __func__, dmat, dmat->flags, error, nsegs + 1);
-
-       if (error == EINPROGRESS) {
-               return (error);
-       }
-
-       if (error)
-               (*callback)(callback_arg, dmat->segments, 0, error);
-       else
-               (*callback)(callback_arg, dmat->segments, nsegs + 1, 0);
-
-       /*
-        * Return ENOMEM to the caller so that it can pass it up the stack.
-        * This error only happens when NOWAIT is set, so deferal is disabled.
-        */
-       if (error == ENOMEM)
-               return (error);
-
-       return (0);
-}
-
-
-/*
- * Like _bus_dmamap_load(), but for mbufs.
- */
-static __inline int
-_bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map,
-                       struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs,
-                       int flags)
-{
-       int error;
-
-       M_ASSERTPKTHDR(m0);
-       map->pmap = kernel_pmap;
-
-       flags |= BUS_DMA_NOWAIT;
-       *nsegs = 0;
-       error = 0;
-       if (m0->m_pkthdr.len <= dmat->maxsize) {
-               int first = 1;
-               bus_addr_t lastaddr = 0;
-               struct mbuf *m;
-
-               for (m = m0; m != NULL && error == 0; m = m->m_next) {
-                       if (m->m_len > 0) {
-                               error = _bus_dmamap_load_buffer(dmat, map,
-                                               m->m_data, m->m_len,
-                                               flags, &lastaddr,
-                                               segs, nsegs, first);
-                               first = 0;
-                       }
-               }
-       } else {
-               error = EINVAL;
-       }
-
-       /* XXX FIXME: Having to increment nsegs is really annoying */
-       ++*nsegs;
-       CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
-           __func__, dmat, dmat->flags, error, *nsegs);
-       return (error);
-}
-
-int
-bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map,
-                    struct mbuf *m0,
-                    bus_dmamap_callback2_t *callback, void *callback_arg,
-                    int flags)
-{
-       int nsegs, error;
-
-       error = _bus_dmamap_load_mbuf_sg(dmat, map, m0, dmat->segments, &nsegs,
-                   flags);
-
-       if (error) {
-               /* force "no valid mappings" in callback */
-               (*callback)(callback_arg, dmat->segments, 0, 0, error);
-       } else {
-               (*callback)(callback_arg, dmat->segments,
-                           nsegs, m0->m_pkthdr.len, error);
-       }
-       CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
-           __func__, dmat, dmat->flags, error, nsegs);
-
-       return (error);
 }
 
-int
-bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map,
-                       struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs,
-                       int flags)
+bus_dma_segment_t *
+_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map,
+                    bus_dma_segment_t *segs, int nsegs, int error)
 {
-       return (_bus_dmamap_load_mbuf_sg(dmat, map, m0, segs, nsegs, flags));
-}
-
-/*
- * Like _bus_dmamap_load(), but for uios.
- */
-int
-bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map,
-                   struct uio *uio,
-                   bus_dmamap_callback2_t *callback, void *callback_arg,
-                   int flags)
-{
-       bus_addr_t lastaddr;
-       int nsegs, error, first, i;
-       bus_size_t resid;
-       struct iovec *iov;
-
-       flags |= BUS_DMA_NOWAIT;
-       resid = uio->uio_resid;
-       iov = uio->uio_iov;
-
-       if (uio->uio_segflg == UIO_USERSPACE) {
-               KASSERT(uio->uio_td != NULL,
-                       ("bus_dmamap_load_uio: USERSPACE but no proc"));
-               map->pmap = vmspace_pmap(uio->uio_td->td_proc->p_vmspace);
-       } else
-               map->pmap = kernel_pmap;
 
-       nsegs = 0;
-       error = 0;
-       first = 1;
-       lastaddr = (bus_addr_t) 0;
-       for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) {
-               /*
-                * Now at the first iovec to load.  Load each iovec
-                * until we have exhausted the residual count.
-                */
-               bus_size_t minlen =
-                       resid < iov[i].iov_len ? resid : iov[i].iov_len;
-               caddr_t addr = (caddr_t) iov[i].iov_base;
-
-               if (minlen > 0) {
-                       error = _bus_dmamap_load_buffer(dmat, map,
-                                       addr, minlen, flags, &lastaddr,
-                                       dmat->segments, &nsegs, first);
-                       first = 0;
-                       resid -= minlen;
-               }
-       }
-
-       if (error) {
-               /* force "no valid mappings" in callback */
-               (*callback)(callback_arg, dmat->segments, 0, 0, error);
-       } else {
-               (*callback)(callback_arg, dmat->segments,
-                           nsegs+1, uio->uio_resid, error);
-       }
-       CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
-           __func__, dmat, dmat->flags, error, nsegs + 1);
-       return (error);
+       if (segs == NULL)
+               segs = dmat->segments;
+       return (segs);
 }
 
 /*
@@ -1109,12 +1066,6 @@ _bus_dmamap_unload(bus_dma_tag_t dmat, b
 {
        struct bounce_page *bpage;
        struct bounce_zone *bz;
-       struct sync_list *sl;
-
-        while ((sl = STAILQ_FIRST(&map->slist)) != NULL) {
-                STAILQ_REMOVE_HEAD(&map->slist, slinks);
-                free(sl, M_DEVBUF);
-        }
 
        if ((bz = dmat->bounce_zone) != NULL) {
                while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
@@ -1128,6 +1079,7 @@ _bus_dmamap_unload(bus_dma_tag_t dmat, b
                map->pagesreserved = 0;
                map->pagesneeded = 0;
        }
+       map->sync_count = 0;
 }
 
 #ifdef notyetbounceuser
@@ -1187,15 +1139,13 @@ void
 _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
 {
        struct bounce_page *bpage;
-       struct sync_list *sl;
+       struct sync_list *sl, *end;
        bus_size_t len, unalign;
        vm_offset_t buf, ebuf;
 #ifdef FIX_DMAP_BUS_DMASYNC_POSTREAD
        vm_offset_t bbuf;
        char _tmp_cl[arm_dcache_align], _tmp_clend[arm_dcache_align];
 #endif
-       int listcount = 0;
-
                /* if buffer was from user space, it it possible that this
                 * is not the same vm map. The fix is to map each page in
                 * the buffer into the current address space (KVM) and then
@@ -1215,9 +1165,14 @@ _bus_dmamap_sync(bus_dma_tag_t dmat, bus
 
                if (op & BUS_DMASYNC_PREWRITE) {
                        while (bpage != NULL) {
-                               bcopy((void *)bpage->datavaddr,
-                                     (void *)bpage->vaddr,
-                                     bpage->datacount);
+                               if (bpage->datavaddr != 0)
+                                       bcopy((void *)bpage->datavaddr,
+                                             (void *)bpage->vaddr,
+                                             bpage->datacount);
+                               else
+                                       physcopyout(bpage->dataaddr,
+                                             (void *)bpage->vaddr,
+                                             bpage->datacount);
                                cpu_dcache_wb_range((vm_offset_t)bpage->vaddr,
                                        bpage->datacount);
                                l2cache_wb_range((vm_offset_t)bpage->vaddr,
@@ -1254,9 +1209,14 @@ _bus_dmamap_sync(bus_dma_tag_t dmat, bus
                                            arm_dcache_align;
                                cpu_dcache_inv_range(startv, len);
                                l2cache_inv_range(startv, startp, len);
-                               bcopy((void *)bpage->vaddr,
-                                     (void *)bpage->datavaddr,
-                                     bpage->datacount);
+                               if (bpage->datavaddr != 0)
+                                       bcopy((void *)bpage->vaddr,
+                                             (void *)bpage->datavaddr,
+                                             bpage->datacount);
+                               else
+                                       physcopyin((void *)bpage->vaddr,
+                                             bpage->dataaddr,
+                                             bpage->datacount);
                                bpage = STAILQ_NEXT(bpage, links);
                        }
                        dmat->bounce_zone->total_bounced++;
@@ -1265,29 +1225,26 @@ _bus_dmamap_sync(bus_dma_tag_t dmat, bus
        if (map->flags & DMAMAP_COHERENT)
                return;
 
-       sl = STAILQ_FIRST(&map->slist);
-       while (sl) {
-               listcount++;
-               sl = STAILQ_NEXT(sl, slinks);
-       }
-       if ((sl = STAILQ_FIRST(&map->slist)) != NULL) {
+       if (map->sync_count != 0) {
                /* ARM caches are not self-snooping for dma */
 
+               sl = &map->slist[0];
+               end = &map->slist[map->sync_count];
                CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x "
                    "performing sync", __func__, dmat, dmat->flags, op);
 
                switch (op) {
                case BUS_DMASYNC_PREWRITE:
-                       while (sl != NULL) {
+                       while (sl != end) {
                            cpu_dcache_wb_range(sl->vaddr, sl->datacount);
                            l2cache_wb_range(sl->vaddr, sl->busaddr,
                                sl->datacount);
-                           sl = STAILQ_NEXT(sl, slinks);
+                           sl++;
                        }
                        break;
 
                case BUS_DMASYNC_PREREAD:
-                       while (sl != NULL) {
+                       while (sl != end) {
                                        /* write back the unaligned portions */
                                vm_paddr_t physaddr = sl->busaddr, ephysaddr;
                                buf = sl->vaddr;
@@ -1327,16 +1284,16 @@ _bus_dmamap_sync(bus_dma_tag_t dmat, bus
                                        cpu_dcache_inv_range(buf, len);
                                        l2cache_inv_range(buf, physaddr, len);
                                }
-                               sl = STAILQ_NEXT(sl, slinks);
+                               sl++;
                        }
                        break;
 
                case BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD:
-                       while (sl != NULL) {
+                       while (sl != end) {
                                cpu_dcache_wbinv_range(sl->vaddr, 
sl->datacount);
                                l2cache_wbinv_range(sl->vaddr,
                                    sl->busaddr, sl->datacount);
-                               sl = STAILQ_NEXT(sl, slinks);
+                               sl++;
                        }
                        break;
 
@@ -1344,7 +1301,7 @@ _bus_dmamap_sync(bus_dma_tag_t dmat, bus
                case BUS_DMASYNC_POSTREAD:
                        if (!pmap_dmap_iscurrent(map->pmap))
                             panic("_bus_dmamap_sync: wrong user map. apply 
fix");
-                       while (sl != NULL) {
+                       while (sl != end) {
                                        /* write back the unaligned portions */
                                vm_paddr_t physaddr;
                                register_t s = 0;
@@ -1391,7 +1348,7 @@ _bus_dmamap_sync(bus_dma_tag_t dmat, bus
 
                                        intr_restore(s);
                                }
-                               sl = STAILQ_NEXT(sl, slinks);
+                               sl++;
                        }
                                break;
 #endif /* FIX_DMAP_BUS_DMASYNC_POSTREAD */
@@ -1559,7 +1516,7 @@ reserve_bounce_pages(bus_dma_tag_t dmat,
 
 static bus_addr_t
 add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
-               bus_size_t size)
+               bus_addr_t addr, bus_size_t size)
 {
        struct bounce_zone *bz;
        struct bounce_page *bpage;
@@ -1593,6 +1550,7 @@ add_bounce_page(bus_dma_tag_t dmat, bus_
                bpage->busaddr |= vaddr & PAGE_MASK;
        }
        bpage->datavaddr = vaddr;
+       bpage->dataaddr = addr;
        bpage->datacount = size;
        STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);
        return (bpage->busaddr);
@@ -1646,8 +1604,8 @@ busdma_swi(void)
                mtx_unlock(&bounce_lock);
                dmat = map->dmat;
                (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK);
-               bus_dmamap_load(map->dmat, map, map->buf, map->buflen,
-                               map->callback, map->callback_arg, /*flags*/0);
+               bus_dmamap_load_mem(map->dmat, map, &map->mem, map->callback,
+                                   map->callback_arg, BUS_DMA_WAITOK);
                (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK);
                mtx_lock(&bounce_lock);
        }

Modified: head/sys/arm/arm/busdma_machdep.c
==============================================================================
--- head/sys/arm/arm/busdma_machdep.c   Tue Feb 12 16:51:43 2013        
(r246712)
+++ head/sys/arm/arm/busdma_machdep.c   Tue Feb 12 16:57:20 2013        
(r246713)
@@ -61,12 +61,12 @@ __FBSDID("$FreeBSD$");
 #include <sys/interrupt.h>
 #include <sys/lock.h>
 #include <sys/proc.h>
+#include <sys/memdesc.h>
 #include <sys/mutex.h>
-#include <sys/mbuf.h>
-#include <sys/uio.h>
 #include <sys/ktr.h>
 #include <sys/kernel.h>
 #include <sys/sysctl.h>
+#include <sys/uio.h>
 
 #include <vm/uma.h>
 #include <vm/vm.h>
@@ -125,10 +125,17 @@ struct bounce_page {
        vm_offset_t     vaddr_nocache;  /* kva of bounce buffer uncached */
        bus_addr_t      busaddr;        /* Physical address */
        vm_offset_t     datavaddr;      /* kva of client data */
+       bus_addr_t      dataaddr;       /* client physical address */
        bus_size_t      datacount;      /* client data count */
        STAILQ_ENTRY(bounce_page) links;
 };
 
+struct sync_list {
+       vm_offset_t     vaddr;          /* kva of bounce buffer */
+       bus_addr_t      busaddr;        /* Physical address */
+       bus_size_t      datacount;      /* client data count */
+};
+
 int busdma_swi_pending;
 
 struct bounce_zone {
@@ -158,24 +165,21 @@ static SYSCTL_NODE(_hw, OID_AUTO, busdma
 SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0,
           "Total bounce pages");
 
-#define DMAMAP_LINEAR          0x1
-#define DMAMAP_MBUF            0x2
-#define DMAMAP_UIO             0x4
-#define DMAMAP_CACHE_ALIGNED   0x10
-#define DMAMAP_TYPE_MASK       (DMAMAP_LINEAR|DMAMAP_MBUF|DMAMAP_UIO)
 #define DMAMAP_COHERENT                0x8
+#define DMAMAP_CACHE_ALIGNED   0x10
+
 struct bus_dmamap {
        struct bp_list  bpages;
        int             pagesneeded;
        int             pagesreserved;
         bus_dma_tag_t  dmat;
+       struct memdesc  mem;
        int             flags;
-       void            *buffer;
-       int             len;
        STAILQ_ENTRY(bus_dmamap) links;
        bus_dmamap_callback_t *callback;
        void                  *callback_arg;
-
+       int                    sync_count;
+       struct sync_list       *slist;
 };
 
 static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
@@ -191,7 +195,8 @@ static int alloc_bounce_pages(bus_dma_ta
 static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
                                int commit);
 static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
-                                  vm_offset_t vaddr, bus_size_t size);
+                                 vm_offset_t vaddr, bus_addr_t addr,
+                                 bus_size_t size);
 static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
 
 /* Default tag, as most drivers provide no parent tag. */
@@ -564,13 +569,20 @@ bus_dma_tag_destroy(bus_dma_tag_t dmat)
 int
 bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
 {
+       struct sync_list *slist;
        bus_dmamap_t map;
        int error = 0;
 
+       slist = malloc(sizeof(*slist) * dmat->nsegments, M_DEVBUF, M_NOWAIT);
+       if (slist == NULL)
+               return (ENOMEM);
+
        map = uma_zalloc_arg(dmamap_zone, dmat, M_NOWAIT);
        *mapp = map;
-       if (map == NULL)
+       if (map == NULL) {
+               free(slist, M_DEVBUF);
                return (ENOMEM);
+       }
 
        /*
         * If the tag's segments haven't been allocated yet we need to do it
@@ -580,6 +592,7 @@ bus_dmamap_create(bus_dma_tag_t dmat, in
                dmat->segments = malloc(dmat->nsegments * 
                    sizeof(*dmat->segments), M_DEVBUF, M_NOWAIT);
                if (dmat->segments == NULL) {
+                       free(slist, M_DEVBUF);
                        uma_zfree(dmamap_zone, map);
                        *mapp = NULL;
                        return (ENOMEM);
@@ -599,6 +612,7 @@ bus_dmamap_create(bus_dma_tag_t dmat, in
 
                if (dmat->bounce_zone == NULL) {
                        if ((error = alloc_bounce_zone(dmat)) != 0) {
+                               free(slist, M_DEVBUF);
                                uma_zfree(dmamap_zone, map);
                                *mapp = NULL;
                                return (error);
@@ -633,6 +647,8 @@ bus_dmamap_create(bus_dma_tag_t dmat, in
                }
                bz->map_count++;
        }
+       map->sync_count = 0;
+       map->slist = slist;
        CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
            __func__, dmat, dmat->flags, error);
 
@@ -647,11 +663,12 @@ int

*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***
_______________________________________________
svn-src-all@freebsd.org mailing list
http://lists.freebsd.org/mailman/listinfo/svn-src-all
To unsubscribe, send any mail to "svn-src-all-unsubscr...@freebsd.org"

Reply via email to