Re: [PATCH 9/9] csky: use the generic remapping dma alloc implementation

2018-11-08 Thread Christoph Hellwig
On Tue, Nov 06, 2018 at 03:01:41PM +0800, Guo Ren wrote:
> > +   return dma_atomic_pool_init(GFP_KERNEL, pgprot_noncached(PAGE_KERNEL));
> >  }
> >  postcore_initcall(atomic_pool_init);
> Seems also could remove atomic_pool_init from csky, why not put them in
> common?

The code basically moved to common code, but the architecture needs
to pick the gfp mask (GFP_DMA32 on arm vs GFP_KERNEL on csky for example)
and the pgprot it needs for uncached remappings.

> Reviewed-by: Guo Ren 
> 
> Compile is OK, qemu boot OK. Functions are the same and just move to common.

Thanks for your review!
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH 9/9] csky: use the generic remapping dma alloc implementation

2018-11-05 Thread Guo Ren
On Mon, Nov 05, 2018 at 01:19:31PM +0100, Christoph Hellwig wrote:
> The csky code was largely copied from arm/arm64, so switch to the
> generic arm64-based implementation instead.
> 
> Signed-off-by: Christoph Hellwig 
> ---
>  arch/csky/Kconfig  |   2 +-
>  arch/csky/mm/dma-mapping.c | 142 +
>  2 files changed, 3 insertions(+), 141 deletions(-)
> 
> diff --git a/arch/csky/Kconfig b/arch/csky/Kconfig
> index c0cf8e948821..ea74f3a9eeaf 100644
> --- a/arch/csky/Kconfig
> +++ b/arch/csky/Kconfig
> @@ -8,7 +8,7 @@ config CSKY
>   select CLKSRC_MMIO
>   select CLKSRC_OF
>   select DMA_DIRECT_OPS
> - select DMA_REMAP
> + select DMA_DIRECT_REMAP
>   select IRQ_DOMAIN
>   select HANDLE_DOMAIN_IRQ
>   select DW_APB_TIMER_OF
> diff --git a/arch/csky/mm/dma-mapping.c b/arch/csky/mm/dma-mapping.c
> index ad4046939713..80783bb71c5c 100644
> --- a/arch/csky/mm/dma-mapping.c
> +++ b/arch/csky/mm/dma-mapping.c
> @@ -14,73 +14,13 @@
>  #include 
>  #include 
>  
> -static struct gen_pool *atomic_pool;
> -static size_t atomic_pool_size __initdata = SZ_256K;
> -
> -static int __init early_coherent_pool(char *p)
> -{
> - atomic_pool_size = memparse(p, &p);
> - return 0;
> -}
> -early_param("coherent_pool", early_coherent_pool);
> -
>  static int __init atomic_pool_init(void)
>  {
> - struct page *page;
> - size_t size = atomic_pool_size;
> - void *ptr;
> - int ret;
> -
> - atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
> - if (!atomic_pool)
> - BUG();
> -
> - page = alloc_pages(GFP_KERNEL, get_order(size));
> - if (!page)
> - BUG();
> -
> - ptr = dma_common_contiguous_remap(page, size, VM_ALLOC,
> -   pgprot_noncached(PAGE_KERNEL),
> -   __builtin_return_address(0));
> - if (!ptr)
> - BUG();
> -
> - ret = gen_pool_add_virt(atomic_pool, (unsigned long)ptr,
> - page_to_phys(page), atomic_pool_size, -1);
> - if (ret)
> - BUG();
> -
> - gen_pool_set_algo(atomic_pool, gen_pool_first_fit_order_align, NULL);
> -
> - pr_info("DMA: preallocated %zu KiB pool for atomic coherent pool\n",
> - atomic_pool_size / 1024);
> -
> - pr_info("DMA: vaddr: 0x%x phy: 0x%lx,\n", (unsigned int)ptr,
> - page_to_phys(page));
> -
> - return 0;
> + return dma_atomic_pool_init(GFP_KERNEL, pgprot_noncached(PAGE_KERNEL));
>  }
>  postcore_initcall(atomic_pool_init);
Seems also could remove atomic_pool_init from csky, why not put them in
common?

>  
> -static void *csky_dma_alloc_atomic(struct device *dev, size_t size,
> -dma_addr_t *dma_handle)
> -{
> - unsigned long addr;
> -
> - addr = gen_pool_alloc(atomic_pool, size);
> - if (addr)
> - *dma_handle = gen_pool_virt_to_phys(atomic_pool, addr);
> -
> - return (void *)addr;
> -}
> -
> -static void csky_dma_free_atomic(struct device *dev, size_t size, void 
> *vaddr,
> -  dma_addr_t dma_handle, unsigned long attrs)
> -{
> - gen_pool_free(atomic_pool, (unsigned long)vaddr, size);
> -}
> -
> -static void __dma_clear_buffer(struct page *page, size_t size)
> +void arch_dma_prep_coherent(struct page *page, size_t size)
>  {
>   if (PageHighMem(page)) {
>   unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
> @@ -107,84 +47,6 @@ static void __dma_clear_buffer(struct page *page, size_t 
> size)
>   }
>  }
>  
> -static void *csky_dma_alloc_nonatomic(struct device *dev, size_t size,
> -   dma_addr_t *dma_handle, gfp_t gfp,
> -   unsigned long attrs)
> -{
> - void  *vaddr;
> - struct page *page;
> - unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
> -
> - if (DMA_ATTR_NON_CONSISTENT & attrs) {
> - pr_err("csky %s can't support DMA_ATTR_NON_CONSISTENT.\n", 
> __func__);
> - return NULL;
> - }
> -
> - if (IS_ENABLED(CONFIG_DMA_CMA))
> - page = dma_alloc_from_contiguous(dev, count, get_order(size),
> -  gfp);
> - else
> - page = alloc_pages(gfp, get_order(size));
> -
> - if (!page) {
> - pr_err("csky %s no more free pages.\n", __func__);
> - return NULL;
> - }
> -
> - *dma_handle = page_to_phys(page);
> -
> - __dma_clear_buffer(page, size);
> -
> - if (attrs & DMA_ATTR_NO_KERNEL_MAPPING)
> - return page;
> -
> - vaddr = dma_common_contiguous_remap(page, PAGE_ALIGN(size), VM_USERMAP,
> - pgprot_noncached(PAGE_KERNEL), __builtin_return_address(0));
> - if (!vaddr)
> - BUG();
> -
> - return vaddr;
> -}
> -
> -static void csky_dma_free_nonatomic(
> - struct device *dev,
> - size_t size,
> - void *vaddr,
> -

[PATCH 9/9] csky: use the generic remapping dma alloc implementation

2018-11-05 Thread Christoph Hellwig
The csky code was largely copied from arm/arm64, so switch to the
generic arm64-based implementation instead.

Signed-off-by: Christoph Hellwig 
---
 arch/csky/Kconfig  |   2 +-
 arch/csky/mm/dma-mapping.c | 142 +
 2 files changed, 3 insertions(+), 141 deletions(-)

diff --git a/arch/csky/Kconfig b/arch/csky/Kconfig
index c0cf8e948821..ea74f3a9eeaf 100644
--- a/arch/csky/Kconfig
+++ b/arch/csky/Kconfig
@@ -8,7 +8,7 @@ config CSKY
select CLKSRC_MMIO
select CLKSRC_OF
select DMA_DIRECT_OPS
-   select DMA_REMAP
+   select DMA_DIRECT_REMAP
select IRQ_DOMAIN
select HANDLE_DOMAIN_IRQ
select DW_APB_TIMER_OF
diff --git a/arch/csky/mm/dma-mapping.c b/arch/csky/mm/dma-mapping.c
index ad4046939713..80783bb71c5c 100644
--- a/arch/csky/mm/dma-mapping.c
+++ b/arch/csky/mm/dma-mapping.c
@@ -14,73 +14,13 @@
 #include 
 #include 
 
-static struct gen_pool *atomic_pool;
-static size_t atomic_pool_size __initdata = SZ_256K;
-
-static int __init early_coherent_pool(char *p)
-{
-   atomic_pool_size = memparse(p, &p);
-   return 0;
-}
-early_param("coherent_pool", early_coherent_pool);
-
 static int __init atomic_pool_init(void)
 {
-   struct page *page;
-   size_t size = atomic_pool_size;
-   void *ptr;
-   int ret;
-
-   atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
-   if (!atomic_pool)
-   BUG();
-
-   page = alloc_pages(GFP_KERNEL, get_order(size));
-   if (!page)
-   BUG();
-
-   ptr = dma_common_contiguous_remap(page, size, VM_ALLOC,
- pgprot_noncached(PAGE_KERNEL),
- __builtin_return_address(0));
-   if (!ptr)
-   BUG();
-
-   ret = gen_pool_add_virt(atomic_pool, (unsigned long)ptr,
-   page_to_phys(page), atomic_pool_size, -1);
-   if (ret)
-   BUG();
-
-   gen_pool_set_algo(atomic_pool, gen_pool_first_fit_order_align, NULL);
-
-   pr_info("DMA: preallocated %zu KiB pool for atomic coherent pool\n",
-   atomic_pool_size / 1024);
-
-   pr_info("DMA: vaddr: 0x%x phy: 0x%lx,\n", (unsigned int)ptr,
-   page_to_phys(page));
-
-   return 0;
+   return dma_atomic_pool_init(GFP_KERNEL, pgprot_noncached(PAGE_KERNEL));
 }
 postcore_initcall(atomic_pool_init);
 
-static void *csky_dma_alloc_atomic(struct device *dev, size_t size,
-  dma_addr_t *dma_handle)
-{
-   unsigned long addr;
-
-   addr = gen_pool_alloc(atomic_pool, size);
-   if (addr)
-   *dma_handle = gen_pool_virt_to_phys(atomic_pool, addr);
-
-   return (void *)addr;
-}
-
-static void csky_dma_free_atomic(struct device *dev, size_t size, void *vaddr,
-dma_addr_t dma_handle, unsigned long attrs)
-{
-   gen_pool_free(atomic_pool, (unsigned long)vaddr, size);
-}
-
-static void __dma_clear_buffer(struct page *page, size_t size)
+void arch_dma_prep_coherent(struct page *page, size_t size)
 {
if (PageHighMem(page)) {
unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
@@ -107,84 +47,6 @@ static void __dma_clear_buffer(struct page *page, size_t 
size)
}
 }
 
-static void *csky_dma_alloc_nonatomic(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp,
- unsigned long attrs)
-{
-   void  *vaddr;
-   struct page *page;
-   unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
-
-   if (DMA_ATTR_NON_CONSISTENT & attrs) {
-   pr_err("csky %s can't support DMA_ATTR_NON_CONSISTENT.\n", 
__func__);
-   return NULL;
-   }
-
-   if (IS_ENABLED(CONFIG_DMA_CMA))
-   page = dma_alloc_from_contiguous(dev, count, get_order(size),
-gfp);
-   else
-   page = alloc_pages(gfp, get_order(size));
-
-   if (!page) {
-   pr_err("csky %s no more free pages.\n", __func__);
-   return NULL;
-   }
-
-   *dma_handle = page_to_phys(page);
-
-   __dma_clear_buffer(page, size);
-
-   if (attrs & DMA_ATTR_NO_KERNEL_MAPPING)
-   return page;
-
-   vaddr = dma_common_contiguous_remap(page, PAGE_ALIGN(size), VM_USERMAP,
-   pgprot_noncached(PAGE_KERNEL), __builtin_return_address(0));
-   if (!vaddr)
-   BUG();
-
-   return vaddr;
-}
-
-static void csky_dma_free_nonatomic(
-   struct device *dev,
-   size_t size,
-   void *vaddr,
-   dma_addr_t dma_handle,
-   unsigned long attrs
-   )
-{
-   struct page *page = phys_to_page(dma_handle);
-   unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
-
-   if ((unsigned int)vaddr >= VMALLOC_START)
-   dma_common_free_remap(vaddr, size, VM_USERMAP);