Re: [PATCH v11 09/12] swiotlb: Add restricted DMA alloc/free support

2021-06-15 Thread Claire Chang
On Wed, Jun 16, 2021 at 12:59 PM Christoph Hellwig  wrote:
>
> On Wed, Jun 16, 2021 at 12:04:16PM +0800, Claire Chang wrote:
> > Just noticed that after propagating swiotlb_force setting into
> > io_tlb_default_mem->force, the memory allocation behavior for
> > swiotlb_force will change (i.e. always skipping arch_dma_alloc and
> > dma_direct_alloc_from_pool).
>
> Yes, I think we need to split a "use_for_alloc" flag from the force flag.

How about splitting is_dev_swiotlb_force into is_swiotlb_force_bounce
(io_tlb_mem->force_bounce) and is_swiotlb_force_alloc
(io_tlb_mem->force_alloc)?


Re: [PATCH v11 09/12] swiotlb: Add restricted DMA alloc/free support

2021-06-15 Thread Claire Chang
On Wed, Jun 16, 2021 at 11:54 AM Claire Chang  wrote:
>
> Add the functions, swiotlb_{alloc,free} to support the memory allocation
> from restricted DMA pool.
>
> The restricted DMA pool is preferred if available.
>
> Note that since coherent allocation needs remapping, one must set up
> another device coherent pool by shared-dma-pool and use
> dma_alloc_from_dev_coherent instead for atomic coherent allocation.
>
> Signed-off-by: Claire Chang 
> Reviewed-by: Christoph Hellwig 
> ---
>  include/linux/swiotlb.h | 15 +
>  kernel/dma/direct.c | 50 ++---
>  kernel/dma/swiotlb.c| 45 +++--
>  3 files changed, 95 insertions(+), 15 deletions(-)
>
> diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
> index efcd56e3a16c..2d5ec670e064 100644
> --- a/include/linux/swiotlb.h
> +++ b/include/linux/swiotlb.h
> @@ -156,4 +156,19 @@ static inline void swiotlb_adjust_size(unsigned long 
> size)
>  extern void swiotlb_print_info(void);
>  extern void swiotlb_set_max_segment(unsigned int);
>
> +#ifdef CONFIG_DMA_RESTRICTED_POOL
> +struct page *swiotlb_alloc(struct device *dev, size_t size);
> +bool swiotlb_free(struct device *dev, struct page *page, size_t size);
> +#else
> +static inline struct page *swiotlb_alloc(struct device *dev, size_t size)
> +{
> +   return NULL;
> +}
> +static inline bool swiotlb_free(struct device *dev, struct page *page,
> +   size_t size)
> +{
> +   return false;
> +}
> +#endif /* CONFIG_DMA_RESTRICTED_POOL */
> +
>  #endif /* __LINUX_SWIOTLB_H */
> diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
> index 3713461d6fe0..da0e09621230 100644
> --- a/kernel/dma/direct.c
> +++ b/kernel/dma/direct.c
> @@ -75,6 +75,15 @@ static bool dma_coherent_ok(struct device *dev, 
> phys_addr_t phys, size_t size)
> min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit);
>  }
>
> +static void __dma_direct_free_pages(struct device *dev, struct page *page,
> +   size_t size)
> +{
> +   if (IS_ENABLED(CONFIG_DMA_RESTRICTED_POOL) &&
> +   swiotlb_free(dev, page, size))
> +   return;
> +   dma_free_contiguous(dev, page, size);
> +}
> +
>  static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
> gfp_t gfp)
>  {
> @@ -86,7 +95,16 @@ static struct page *__dma_direct_alloc_pages(struct device 
> *dev, size_t size,
>
> gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
>_limit);
> -   page = dma_alloc_contiguous(dev, size, gfp);
> +   if (IS_ENABLED(CONFIG_DMA_RESTRICTED_POOL)) {
> +   page = swiotlb_alloc(dev, size);
> +   if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
> +   __dma_direct_free_pages(dev, page, size);
> +   return NULL;
> +   }
> +   }
> +
> +   if (!page)
> +   page = dma_alloc_contiguous(dev, size, gfp);
> if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
> dma_free_contiguous(dev, page, size);
> page = NULL;
> @@ -142,7 +160,7 @@ void *dma_direct_alloc(struct device *dev, size_t size,
> gfp |= __GFP_NOWARN;
>
> if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
> -   !force_dma_unencrypted(dev)) {
> +   !force_dma_unencrypted(dev) && !is_dev_swiotlb_force(dev)) {
> page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO);
> if (!page)
> return NULL;
> @@ -155,18 +173,23 @@ void *dma_direct_alloc(struct device *dev, size_t size,
> }
>
> if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
> -   !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
> -   !dev_is_dma_coherent(dev))
> +   !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && !dev_is_dma_coherent(dev) 
> &&
> +   !is_dev_swiotlb_force(dev))
> return arch_dma_alloc(dev, size, dma_handle, gfp, attrs);

Just noticed that after propagating swiotlb_force setting into
io_tlb_default_mem->force, the memory allocation behavior for
swiotlb_force will change (i.e. always skipping arch_dma_alloc and
dma_direct_alloc_from_pool).

>
> /*
>  * Remapping or decrypting memory may block. If either is required and
>  * we can't block, allocate the memory from the atomic pools.
> +* If restricted DMA (i.e., is_dev_swiotlb_force) is required, one 
> must
> +* set up another device coherent pool by shared-dma-pool and use
> +* dma_alloc_from_dev_coherent instead.
>  */
> if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) &&
> !gfpflags_allow_blocking(gfp) &&
> (force_dma_unencrypted(dev) ||
> -(IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && 
>