Refactor internal dma_contiguous_init_reserved_mem() function, which creates CMA area from previously reserved memory region and add support for handling 'shared-dma-pool' reserved-memory device tree nodes.
Based on previous code provided by Josh Cartwright <jo...@codeaurora.org> Signed-off-by: Marek Szyprowski <m.szyprow...@samsung.com> --- drivers/base/dma-contiguous.c | 129 ++++++++++++++++++++++++++++++++++------- 1 file changed, 107 insertions(+), 22 deletions(-) diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c index 165c2c299e57..0efdf1986990 100644 --- a/drivers/base/dma-contiguous.c +++ b/drivers/base/dma-contiguous.c @@ -182,6 +182,49 @@ static int __init cma_init_reserved_areas(void) core_initcall(cma_init_reserved_areas); /** + * dma_contiguous_init_reserved_mem() - reserve custom contiguous area + * @size: Size of the reserved area (in bytes), + * @base: Base address of the reserved area optional, use 0 for any + * @limit: End address of the reserved memory (optional, 0 for any). + * @res_cma: Pointer to store the created cma region. + * + * This function reserves memory from early allocator. It should be + * called by arch specific code once the early allocator (memblock or bootmem) + * has been activated and all other subsystems have already allocated/reserved + * memory. This function allows to create custom reserved areas for specific + * devices. + */ +static int __init dma_contiguous_init_reserved_mem(phys_addr_t size, + phys_addr_t base, struct cma **res_cma) +{ + struct cma *cma = &cma_areas[cma_area_count]; + phys_addr_t alignment; + + /* Sanity checks */ + if (cma_area_count == ARRAY_SIZE(cma_areas)) { + pr_err("Not enough slots for CMA reserved regions!\n"); + return -ENOSPC; + } + + if (!size || !memblock_is_region_reserved(base, size)) + return -EINVAL; + + /* Sanitise input arguments */ + alignment = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order); + if (ALIGN(base, alignment) != base || ALIGN(size, alignment) != size) + return -EINVAL; + + cma->base_pfn = PFN_DOWN(base); + cma->count = size >> PAGE_SHIFT; + *res_cma = cma; + cma_area_count++; + + /* Architecture specific contiguous memory fixup. */ + dma_contiguous_early_fixup(base, size); + return 0; +} + +/** * dma_contiguous_reserve_area() - reserve custom contiguous area * @size: Size of the reserved area (in bytes), * @base: Base address of the reserved area optional, use 0 for any @@ -197,7 +240,6 @@ core_initcall(cma_init_reserved_areas); int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base, phys_addr_t limit, struct cma **res_cma) { - struct cma *cma = &cma_areas[cma_area_count]; phys_addr_t alignment; int ret = 0; @@ -205,12 +247,6 @@ int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base, (unsigned long)size, (unsigned long)base, (unsigned long)limit); - /* Sanity checks */ - if (cma_area_count == ARRAY_SIZE(cma_areas)) { - pr_err("Not enough slots for CMA reserved regions!\n"); - return -ENOSPC; - } - if (!size) return -EINVAL; @@ -241,21 +277,12 @@ int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base, } } - /* - * Each reserved area must be initialised later, when more kernel - * subsystems (like slab allocator) are available. - */ - cma->base_pfn = PFN_DOWN(base); - cma->count = size >> PAGE_SHIFT; - *res_cma = cma; - cma_area_count++; - - pr_info("CMA: reserved %ld MiB at %08lx\n", (unsigned long)size / SZ_1M, - (unsigned long)base); - - /* Architecture specific contiguous memory fixup. */ - dma_contiguous_early_fixup(base, size); - return 0; + ret = dma_contiguous_init_reserved_mem(size, base, res_cma); + if (ret == 0) { + pr_info("CMA: reserved %ld MiB at %08lx\n", + (unsigned long)size / SZ_1M, (unsigned long)base); + return 0; + } err: pr_err("CMA: failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M); return ret; @@ -357,3 +384,61 @@ bool dma_release_from_contiguous(struct device *dev, struct page *pages, return true; } + +/* + * Support for reserved memory regions defined in device tree + */ +#ifdef CONFIG_OF_RESERVED_MEM +#include <linux/of.h> +#include <linux/of_fdt.h> +#include <linux/of_reserved_mem.h> + +#undef pr_fmt +#define pr_fmt(fmt) fmt + +static void rmem_cma_device_init(struct reserved_mem *rmem, struct device *dev) +{ + struct cma *cma = rmem->priv; + dev_set_cma_area(dev, cma); +} + +static const struct reserved_mem_ops rmem_cma_ops = { + .device_init = rmem_cma_device_init, +}; + +static int __init rmem_cma_setup(struct reserved_mem *rmem, + unsigned long node, + const char *uname) +{ + phys_addr_t align = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order); + phys_addr_t mask = align - 1; + struct cma *cma; + int err; + + if (!of_get_flat_dt_prop(node, "reusable", NULL)) + return -EINVAL; + + if ((rmem->base & mask) || (rmem->size & mask)) { + pr_err("Reserved memory: incorrect alignment of CMA region\n"); + return -EINVAL; + } + + err = dma_contiguous_init_reserved_mem(rmem->size, rmem->base, &cma); + if (err) { + pr_err("Reserved memory: unable to setup CMA region\n"); + return err; + } + + if (of_get_flat_dt_prop(node, "linux,cma-default", NULL)) + dma_contiguous_set_default(cma); + + rmem->ops = &rmem_cma_ops; + rmem->priv = cma; + + pr_info("Reserved memory: created CMA memory pool at %pa, size %ld MiB\n", + &rmem->base, (unsigned long)rmem->size / SZ_1M); + + return 0; +} +RESERVEDMEM_OF_DECLARE(cma, "shared-dma-pool", rmem_cma_setup); +#endif -- 1.7.9.5 -- To unsubscribe from this list: send the line "unsubscribe devicetree" in the body of a message to majord...@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html