Re: [PATCH 1/2] drivers: dma-contiguous: clean source code and prepare for device tree

2013-02-14 Thread Laura Abbott

Hi,

On 2/14/2013 4:45 AM, Marek Szyprowski wrote:

This patch cleans the initialization of dma contiguous framework. The
all-in-one dma_declare_contiguous() function is now separated into
dma_contiguous_reserve_area() which only steals the the memory from
memblock allocator and dma_contiguous_add_device() function, which
assigns given device to the specified reserved memory area. This improves
the flexibility in defining contiguous memory areas and assigning device
to them, because now it is possible to assign more than one device to
the given contiguous memory area. This split in initialization is also
required for upcoming device tree support.

Signed-off-by: Marek Szyprowski 
Acked-by: Kyungmin Park 
---
  drivers/base/dma-contiguous.c|  210 +-
  include/asm-generic/dma-contiguous.h |4 +-
  include/linux/dma-contiguous.h   |   32 +-
  3 files changed, 161 insertions(+), 85 deletions(-)

diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c
index 0ca5442..085389c 100644
--- a/drivers/base/dma-contiguous.c
+++ b/drivers/base/dma-contiguous.c
@@ -39,7 +39,33 @@ struct cma {
unsigned long   *bitmap;
  };

-struct cma *dma_contiguous_default_area;
+static DEFINE_MUTEX(cma_mutex);
+
+struct cma *dma_contiguous_def_area;
+phys_addr_t dma_contiguous_def_base;
+
+static struct cma_area {
+   phys_addr_t base;
+   unsigned long size;
+   struct cma *cma;
+} cma_areas[MAX_CMA_AREAS] __initdata;
+static unsigned cma_area_count __initdata;
+


cma_areas and cma_area_count are accessed from cma_get_area which gets 
called from cma_assign_device_from_dt. You need to drop the __initdata 
since the notifier can be called at anytime.


Thanks,
Laura

--
Qualcomm Innovation Center, Inc. is a member of Code Aurora Forum,
hosted by The Linux Foundation
___
devicetree-discuss mailing list
devicetree-discuss@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/devicetree-discuss


[PATCH 1/2] drivers: dma-contiguous: clean source code and prepare for device tree

2013-02-14 Thread Marek Szyprowski
This patch cleans the initialization of dma contiguous framework. The
all-in-one dma_declare_contiguous() function is now separated into
dma_contiguous_reserve_area() which only steals the the memory from
memblock allocator and dma_contiguous_add_device() function, which
assigns given device to the specified reserved memory area. This improves
the flexibility in defining contiguous memory areas and assigning device
to them, because now it is possible to assign more than one device to
the given contiguous memory area. This split in initialization is also
required for upcoming device tree support.

Signed-off-by: Marek Szyprowski 
Acked-by: Kyungmin Park 
---
 drivers/base/dma-contiguous.c|  210 +-
 include/asm-generic/dma-contiguous.h |4 +-
 include/linux/dma-contiguous.h   |   32 +-
 3 files changed, 161 insertions(+), 85 deletions(-)

diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c
index 0ca5442..085389c 100644
--- a/drivers/base/dma-contiguous.c
+++ b/drivers/base/dma-contiguous.c
@@ -39,7 +39,33 @@ struct cma {
unsigned long   *bitmap;
 };
 
-struct cma *dma_contiguous_default_area;
+static DEFINE_MUTEX(cma_mutex);
+
+struct cma *dma_contiguous_def_area;
+phys_addr_t dma_contiguous_def_base;
+
+static struct cma_area {
+   phys_addr_t base;
+   unsigned long size;
+   struct cma *cma;
+} cma_areas[MAX_CMA_AREAS] __initdata;
+static unsigned cma_area_count __initdata;
+
+
+static struct cma_map {
+   phys_addr_t base;
+   struct device *dev;
+} cma_maps[MAX_CMA_AREAS] __initdata;
+static unsigned cma_map_count __initdata;
+
+static struct cma *cma_get_area(phys_addr_t base)
+{
+   int i;
+   for (i = 0; i < cma_area_count; i++)
+   if (cma_areas[i].base == base)
+   return cma_areas[i].cma;
+   return NULL;
+}
 
 #ifdef CONFIG_CMA_SIZE_MBYTES
 #define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES
@@ -95,45 +121,6 @@ static inline __maybe_unused phys_addr_t 
cma_early_percent_memory(void)
 
 #endif
 
-/**
- * dma_contiguous_reserve() - reserve area for contiguous memory handling
- * @limit: End address of the reserved memory (optional, 0 for any).
- *
- * This function reserves memory from early allocator. It should be
- * called by arch specific code once the early allocator (memblock or bootmem)
- * has been activated and all other subsystems have already allocated/reserved
- * memory.
- */
-void __init dma_contiguous_reserve(phys_addr_t limit)
-{
-   phys_addr_t selected_size = 0;
-
-   pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit);
-
-   if (size_cmdline != -1) {
-   selected_size = size_cmdline;
-   } else {
-#ifdef CONFIG_CMA_SIZE_SEL_MBYTES
-   selected_size = size_bytes;
-#elif defined(CONFIG_CMA_SIZE_SEL_PERCENTAGE)
-   selected_size = cma_early_percent_memory();
-#elif defined(CONFIG_CMA_SIZE_SEL_MIN)
-   selected_size = min(size_bytes, cma_early_percent_memory());
-#elif defined(CONFIG_CMA_SIZE_SEL_MAX)
-   selected_size = max(size_bytes, cma_early_percent_memory());
-#endif
-   }
-
-   if (selected_size) {
-   pr_debug("%s: reserving %ld MiB for global area\n", __func__,
-(unsigned long)selected_size / SZ_1M);
-
-   dma_declare_contiguous(NULL, selected_size, 0, limit);
-   }
-};
-
-static DEFINE_MUTEX(cma_mutex);
-
 static __init int cma_activate_area(unsigned long base_pfn, unsigned long 
count)
 {
unsigned long pfn = base_pfn;
@@ -190,55 +177,73 @@ no_mem:
return ERR_PTR(ret);
 }
 
-static struct cma_reserved {
-   phys_addr_t start;
-   unsigned long size;
-   struct device *dev;
-} cma_reserved[MAX_CMA_AREAS] __initdata;
-static unsigned cma_reserved_count __initdata;
-
-static int __init cma_init_reserved_areas(void)
+/**
+ * dma_contiguous_reserve() - reserve area for contiguous memory handling
+ * @limit: End address of the reserved memory (optional, 0 for any).
+ *
+ * This function reserves memory from early allocator. It should be
+ * called by arch specific code once the early allocator (memblock or bootmem)
+ * has been activated and all other subsystems have already allocated/reserved
+ * memory. It reserves contiguous areas for global, device independent
+ * allocations and (optionally) all areas defined in device tree structures.
+ */
+void __init dma_contiguous_reserve(phys_addr_t limit)
 {
-   struct cma_reserved *r = cma_reserved;
-   unsigned i = cma_reserved_count;
+   phys_addr_t sel_size = 0;
 
-   pr_debug("%s()\n", __func__);
+   pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit);
 
-   for (; i; --i, ++r) {
-   struct cma *cma;
-   cma = cma_create_area(PFN_DOWN(r->start),
- r->size >> PAGE_SHIFT);
-   if (!IS_ERR(cma))
-   d