Re: [RFC] switch nds32 to use the generic remapping DMA allocator

2019-06-14 Thread Greentime Hu
Christoph Hellwig  於 2019年6月14日 週五 下午6:09寫道:
>
> Hi Greentime and Vicent,
>
> can you take a look at the (untested) patch below?  It converts nds32
> to use the generic remapping DMA allocator, which is also used by
> arm64 and csky.

Hi Christoph,

It looks good to me. I just verified in nds32 platform and it works fine.
Should I put it in my next-tree or you will pick it up in your tree? :)

Tested-by: Greentime Hu 
Reviewed-by: Greentime Hu 
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

Re: [RFC] switch nds32 to use the generic remapping DMA allocator

2019-06-16 Thread Greentime Hu
Christoph Hellwig  於 2019年6月14日 週五 下午8:22寫道:
>
> On Fri, Jun 14, 2019 at 07:35:29PM +0800, Greentime Hu wrote:
> > It looks good to me. I just verified in nds32 platform and it works fine.
> > Should I put it in my next-tree or you will pick it up in your tree? :)
>
> Either way works for me, let me know what you prefer.

I prefer to put in your tree. Thanks. :)
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

Re: [PATCH 3/3] nds32: use generic dma_noncoherent_ops

2018-05-29 Thread Greentime Hu
_t size, enum dma_data_direction dir)
>  {
> switch (dir) {
> case DMA_TO_DEVICE:
> break;
> case DMA_FROM_DEVICE:
> case DMA_BIDIRECTIONAL:
> -   cache_op(handle, size, cpu_dma_inval_range);
> +   cache_op(paddr, size, cpu_dma_inval_range);
> break;
> default:
> BUG();
> }
>  }
> -
> -static dma_addr_t nds32_dma_map_page(struct device *dev, struct page *page,
> -unsigned long offset, size_t size,
> -enum dma_data_direction dir,
> -unsigned long attrs)
> -{
> -   dma_addr_t dma_addr = page_to_phys(page) + offset;
> -
> -   if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
> -   nds32_dma_sync_single_for_device(dev, handle, size, dir);
> -   return dma_addr;
> -}
> -
> -static void nds32_dma_unmap_page(struct device *dev, dma_addr_t handle,
> -size_t size, enum dma_data_direction dir,
> -unsigned long attrs)
> -{
> -   if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
> -   nds32_dma_sync_single_for_cpu(dev, handle, size, dir);
> -}
> -
> -static void
> -nds32_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
> -int nents, enum dma_data_direction dir)
> -{
> -   int i;
> -
> -   for (i = 0; i < nents; i++, sg++) {
> -   nds32_dma_sync_single_for_device(dev, sg_dma_address(sg),
> -   sg->length, dir);
> -   }
> -}
> -
> -static void
> -nds32_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int 
> nents,
> - enum dma_data_direction dir)
> -{
> -   int i;
> -
> -   for (i = 0; i < nents; i++, sg++) {
> -   nds32_dma_sync_single_for_cpu(dev, sg_dma_address(sg),
> -   sg->length, dir);
> -   }
> -}
> -
> -static int nds32_dma_map_sg(struct device *dev, struct scatterlist *sg,
> -   int nents, enum dma_data_direction dir,
> -   unsigned long attrs)
> -{
> -   int i;
> -
> -   for (i = 0; i < nents; i++, sg++) {
> -   nds32_dma_sync_single_for_device(dev, sg_dma_address(sg),
> -   sg->length, dir);
> -   }
> -   return nents;
> -}
> -
> -static void nds32_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
> -  int nhwentries, enum dma_data_direction dir,
> -  unsigned long attrs)
> -{
> -   int i;
> -
> -   for (i = 0; i < nhwentries; i++, sg++) {
> -   nds32_dma_sync_single_for_cpu(dev, sg_dma_address(sg),
> -       sg->length, dir);
> -   }
> -}
> -
> -struct dma_map_ops nds32_dma_ops = {
> -   .alloc = nds32_dma_alloc_coherent,
> -   .free = nds32_dma_free,
> -   .map_page = nds32_dma_map_page,
> -   .unmap_page = nds32_dma_unmap_page,
> -   .map_sg = nds32_dma_map_sg,
> -   .unmap_sg = nds32_dma_unmap_sg,
> -   .sync_single_for_device = nds32_dma_sync_single_for_device,
> -   .sync_single_for_cpu = nds32_dma_sync_single_for_cpu,
> -   .sync_sg_for_cpu = nds32_dma_sync_sg_for_cpu,
> -   .sync_sg_for_device = nds32_dma_sync_sg_for_device,
> -};
> -
> -EXPORT_SYMBOL(nds32_dma_ops);

Acked-by: Greentime Hu 
Tested-by: Greentime Hu 
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH 1/3] nds32: consolidate DMA cache maintainance routines

2018-05-29 Thread Greentime Hu
IZE - offset;
> +   }
> +
> +   addr = kmap_atomic(page);
> +   start = (unsigned long)(addr + offset);
> +   fn(start, start + len);
> +   kunmap_atomic(addr);
> } else {
> -   if (sg->offset > PAGE_SIZE)
> -   panic("sg->offset:%08x > PAGE_SIZE\n",
> - sg->offset);
> -   virt = page_address(page) + sg->offset;
> -   consistent_sync(virt, sg->length, dir, FOR_CPU);
> +   start = (unsigned long)phys_to_virt(paddr);
> +   fn(start, start + size);
> }
> -   }
> -   return nents;
> +   offset = 0;
> +   page++;
> +   left -= len;
> +   } while (left);
>  }
>
> -static void nds32_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
> -  int nhwentries, enum dma_data_direction dir,
> -  unsigned long attrs)
> +static void
> +nds32_dma_sync_single_for_device(struct device *dev, dma_addr_t handle,
> +size_t size, enum dma_data_direction dir)
>  {
> +   switch (dir) {
> +   case DMA_FROM_DEVICE:
> +   break;
> +   case DMA_TO_DEVICE:
> +   case DMA_BIDIRECTIONAL:
> +   cache_op(handle, size, cpu_dma_wb_range);
> +   break;
> +   default:
> +   BUG();
> +   }
>  }
>
>  static void
>  nds32_dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle,
>   size_t size, enum dma_data_direction dir)
>  {
> -   consistent_sync((void *)phys_to_virt(handle), size, dir, FOR_CPU);
> +   switch (dir) {
> +   case DMA_TO_DEVICE:
> +   break;
> +   case DMA_FROM_DEVICE:
> +   case DMA_BIDIRECTIONAL:
> +   cache_op(handle, size, cpu_dma_inval_range);
> +   break;
> +   default:
> +   BUG();
> +   }
> +}
> +
> +static dma_addr_t nds32_dma_map_page(struct device *dev, struct page *page,
> +unsigned long offset, size_t size,
> +enum dma_data_direction dir,
> +unsigned long attrs)
> +{
> +   dma_addr_t dma_addr = page_to_phys(page) + offset;
> +
> +   if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
> +   nds32_dma_sync_single_for_device(dev, handle, size, dir);
> +   return dma_addr;
> +}
> +
> +static void nds32_dma_unmap_page(struct device *dev, dma_addr_t handle,
> +size_t size, enum dma_data_direction dir,
> +unsigned long attrs)
> +{
> +   if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
> +   nds32_dma_sync_single_for_cpu(dev, handle, size, dir);
>  }
>
>  static void
> -nds32_dma_sync_single_for_device(struct device *dev, dma_addr_t handle,
> -size_t size, enum dma_data_direction dir)
> +nds32_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
> +int nents, enum dma_data_direction dir)
>  {
> -   consistent_sync((void *)phys_to_virt(handle), size, dir, FOR_DEVICE);
> +   int i;
> +
> +   for (i = 0; i < nents; i++, sg++) {
> +   nds32_dma_sync_single_for_device(dev, sg_dma_address(sg),
> +   sg->length, dir);
> +   }
>  }
>
>  static void
> @@ -442,23 +436,28 @@ nds32_dma_sync_sg_for_cpu(struct device *dev, struct 
> scatterlist *sg, int nents,
> int i;
>
> for (i = 0; i < nents; i++, sg++) {
> -   char *virt =
> -   page_address((struct page *)sg->page_link) + sg->offset;
> -   consistent_sync(virt, sg->length, dir, FOR_CPU);
> +   nds32_dma_sync_single_for_cpu(dev, sg_dma_address(sg),
> +   sg->length, dir);
> }
>  }
>
> -static void
> -nds32_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
> -int nents, enum dma_data_direction dir)
> +static int nds32_dma_map_sg(struct device *dev, struct scatterlist *sg,
> +       int nents, enum dma_data_direction dir,
> +   unsigned long attrs)
>  {
> int i;
>
> for (i = 0; i < nents; i++, sg++) {
> -   char *virt =
> -   page_address((struct page *)sg->page_link) + sg->offset;
> -   consistent_sync(virt, sg->length, dir, FOR_DEVICE);
> +   nds32_dma_sync_single_for_device(dev, sg_dma_address(sg),
> +   sg->length, dir);
> }
> +   return nents;
> +}
> +
> +static void nds32_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
> +  int nhwentries, enum dma_data_direction dir,
> +  unsigned long attrs)
> +{
>  }
>
>  struct dma_map_ops nds32_dma_ops = {

Acked-by: Greentime Hu 
Tested-by: Greentime Hu 
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH 2/3] nds32: implement the unmap_sg DMA operation

2018-05-29 Thread Greentime Hu
2018-05-29 17:48 GMT+08:00 Christoph Hellwig :
> This matches the implementation of the more commonly used unmap_single
> routines and the sync_sg_for_cpu method which should provide equivalent
> cache maintainance.
>
> Signed-off-by: Christoph Hellwig 
> ---
>  arch/nds32/kernel/dma.c | 6 ++
>  1 file changed, 6 insertions(+)
>
> diff --git a/arch/nds32/kernel/dma.c b/arch/nds32/kernel/dma.c
> index e0c94a2889c5..b9973317c734 100644
> --- a/arch/nds32/kernel/dma.c
> +++ b/arch/nds32/kernel/dma.c
> @@ -458,6 +458,12 @@ static void nds32_dma_unmap_sg(struct device *dev, 
> struct scatterlist *sg,
>int nhwentries, enum dma_data_direction dir,
>unsigned long attrs)
>  {
> +   int i;
> +
> +   for (i = 0; i < nhwentries; i++, sg++) {
> +   nds32_dma_sync_single_for_cpu(dev, sg_dma_address(sg),
> +   sg->length, dir);
> +   }
>  }
>
>  struct dma_map_ops nds32_dma_ops = {

Acked-by: Greentime Hu 
Tested-by: Greentime Hu 
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu