Expose the DMA operations functions. Until now only the dma_ops
structs in a whole or some dma operation were exposed. This patch
exposes all the dma coherents operations. They can be reused when an
architecture or a driver need to create its own set of dma_operation.

Signed-off-by: Gregory CLEMENT <[email protected]>
---
 arch/arm/include/asm/dma-mapping.h |   48 ++++++++++++++++++++++++++++++++++++
 arch/arm/mm/dma-mapping.c          |   25 ++++---------------
 2 files changed, 53 insertions(+), 20 deletions(-)

diff --git a/arch/arm/include/asm/dma-mapping.h 
b/arch/arm/include/asm/dma-mapping.h
index 2300484..b12d7c0 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -112,6 +112,54 @@ static inline void dma_free_noncoherent(struct device 
*dev, size_t size,
 extern int dma_supported(struct device *dev, u64 mask);
 
 /**
+ * arm_dma_map_page - map a portion of a page for streaming DMA
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+ * @page: page that buffer resides in
+ * @offset: offset into page for start of buffer
+ * @size: size of buffer to map
+ * @dir: DMA transfer direction
+ *
+ * Ensure that any data held in the cache is appropriately discarded
+ * or written back.
+ *
+ * The device owns this memory once this call has completed.  The CPU
+ * can regain ownership by calling dma_unmap_page().
+ */
+extern dma_addr_t arm_dma_map_page(struct device *dev, struct page *page,
+                                   unsigned long offset, size_t size,
+                                   enum dma_data_direction dir,
+                                   struct dma_attrs *attrs);
+
+/**
+ * arm_dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+ * @handle: DMA address of buffer
+ * @size: size of buffer (same as passed to dma_map_page)
+ * @dir: DMA transfer direction (same as passed to dma_map_page)
+ *
+ * Unmap a page streaming mode DMA translation.  The handle and size
+ * must match what was provided in the previous dma_map_page() call.
+ * All other usages are undefined.
+ *
+ * After this call, reads by the CPU to the buffer are guaranteed to see
+ * whatever the device wrote there.
+ */
+extern void arm_dma_unmap_page(struct device *dev, dma_addr_t handle,
+                               size_t size, enum dma_data_direction dir,
+                               struct dma_attrs *attrs);
+
+extern void arm_dma_sync_single_for_cpu(struct device *dev,
+                                       dma_addr_t handle, size_t size,
+                                       enum dma_data_direction dir);
+
+extern void arm_dma_sync_single_for_device(struct device *dev,
+                                       dma_addr_t handle, size_t size,
+                                       enum dma_data_direction dir);
+
+extern int arm_dma_set_mask(struct device *dev, u64 dma_mask);
+
+
+/**
  * arm_dma_alloc - allocate consistent memory for DMA
  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
  * @size: required memory size
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 58bc3e4..dbb67ce 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -56,20 +56,13 @@ static void __dma_page_dev_to_cpu(struct page *, unsigned 
long,
                size_t, enum dma_data_direction);
 
 /**
- * arm_dma_map_page - map a portion of a page for streaming DMA
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @page: page that buffer resides in
- * @offset: offset into page for start of buffer
- * @size: size of buffer to map
- * @dir: DMA transfer direction
- *
  * Ensure that any data held in the cache is appropriately discarded
  * or written back.
  *
  * The device owns this memory once this call has completed.  The CPU
  * can regain ownership by calling dma_unmap_page().
  */
-static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page,
+dma_addr_t arm_dma_map_page(struct device *dev, struct page *page,
             unsigned long offset, size_t size, enum dma_data_direction dir,
             struct dma_attrs *attrs)
 {
@@ -86,12 +79,6 @@ static dma_addr_t arm_coherent_dma_map_page(struct device 
*dev, struct page *pag
 }
 
 /**
- * arm_dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @handle: DMA address of buffer
- * @size: size of buffer (same as passed to dma_map_page)
- * @dir: DMA transfer direction (same as passed to dma_map_page)
- *
  * Unmap a page streaming mode DMA translation.  The handle and size
  * must match what was provided in the previous dma_map_page() call.
  * All other usages are undefined.
@@ -99,7 +86,7 @@ static dma_addr_t arm_coherent_dma_map_page(struct device 
*dev, struct page *pag
  * After this call, reads by the CPU to the buffer are guaranteed to see
  * whatever the device wrote there.
  */
-static void arm_dma_unmap_page(struct device *dev, dma_addr_t handle,
+void arm_dma_unmap_page(struct device *dev, dma_addr_t handle,
                size_t size, enum dma_data_direction dir,
                struct dma_attrs *attrs)
 {
@@ -108,7 +95,7 @@ static void arm_dma_unmap_page(struct device *dev, 
dma_addr_t handle,
                                      handle & ~PAGE_MASK, size, dir);
 }
 
-static void arm_dma_sync_single_for_cpu(struct device *dev,
+void arm_dma_sync_single_for_cpu(struct device *dev,
                dma_addr_t handle, size_t size, enum dma_data_direction dir)
 {
        unsigned int offset = handle & (PAGE_SIZE - 1);
@@ -116,7 +103,7 @@ static void arm_dma_sync_single_for_cpu(struct device *dev,
        __dma_page_dev_to_cpu(page, offset, size, dir);
 }
 
-static void arm_dma_sync_single_for_device(struct device *dev,
+void arm_dma_sync_single_for_device(struct device *dev,
                dma_addr_t handle, size_t size, enum dma_data_direction dir)
 {
        unsigned int offset = handle & (PAGE_SIZE - 1);
@@ -124,8 +111,6 @@ static void arm_dma_sync_single_for_device(struct device 
*dev,
        __dma_page_cpu_to_dev(page, offset, size, dir);
 }
 
-static int arm_dma_set_mask(struct device *dev, u64 dma_mask);
-
 struct dma_map_ops arm_dma_ops = {
        .alloc                  = arm_dma_alloc,
        .free                   = arm_dma_free,
@@ -971,7 +956,7 @@ int dma_supported(struct device *dev, u64 mask)
 }
 EXPORT_SYMBOL(dma_supported);
 
-static int arm_dma_set_mask(struct device *dev, u64 dma_mask)
+int arm_dma_set_mask(struct device *dev, u64 dma_mask)
 {
        if (!dev->dma_mask || !dma_supported(dev, dma_mask))
                return -EIO;
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to