On Tue, 19 Oct 2021, Christoph Hellwig wrote:

> Factor out helpers the make dealing with memory encryption a little less
> cumbersome.
> 
> Signed-off-by: Christoph Hellwig <h...@lst.de>
> ---
>  kernel/dma/direct.c | 55 +++++++++++++++++++++------------------------
>  1 file changed, 25 insertions(+), 30 deletions(-)
> 
> diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
> index 4c6c5e0635e34..96f02248e7fa2 100644
> --- a/kernel/dma/direct.c
> +++ b/kernel/dma/direct.c
> @@ -75,6 +75,20 @@ static bool dma_coherent_ok(struct device *dev, 
> phys_addr_t phys, size_t size)
>               min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit);
>  }
>  
> +static int dma_set_decrypted(struct device *dev, void *vaddr, size_t size)
> +{
> +     if (!force_dma_unencrypted(dev))
> +             return 0;
> +     return set_memory_decrypted((unsigned long)vaddr, 1 << get_order(size));
> +}
> +
> +static int dma_set_encrypted(struct device *dev, void *vaddr, size_t size)
> +{
> +     if (!force_dma_unencrypted(dev))
> +             return 0;
> +     return set_memory_encrypted((unsigned long)vaddr, 1 << get_order(size));
> +}
> +
>  static void __dma_direct_free_pages(struct device *dev, struct page *page,
>                                   size_t size)
>  {
> @@ -216,12 +230,7 @@ void *dma_direct_alloc(struct device *dev, size_t size,
>                               __builtin_return_address(0));
>               if (!ret)
>                       goto out_free_pages;
> -             if (force_dma_unencrypted(dev)) {
> -                     err = set_memory_decrypted((unsigned long)ret,
> -                                                1 << get_order(size));
> -                     if (err)
> -                             goto out_free_pages;
> -             }
> +             err = dma_set_decrypted(dev, ret, size);

Should be

        if (dma_set_decrypted(dev, ret, size))
                goto out_free_pages;

?  Otherwise we ignore the value?

>               memset(ret, 0, size);
>               goto done;
>       }
> @@ -238,13 +247,9 @@ void *dma_direct_alloc(struct device *dev, size_t size,
>       }
>  
>       ret = page_address(page);
> -     if (force_dma_unencrypted(dev)) {
> -             err = set_memory_decrypted((unsigned long)ret,
> -                                        1 << get_order(size));
> -             if (err)
> -                     goto out_free_pages;
> -     }
> -
> +     err = dma_set_decrypted(dev, ret, size);
> +     if (err)
> +             goto out_free_pages;
>       memset(ret, 0, size);
>  
>       if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
> @@ -259,13 +264,9 @@ void *dma_direct_alloc(struct device *dev, size_t size,
>       return ret;
>  
>  out_encrypt_pages:
> -     if (force_dma_unencrypted(dev)) {
> -             err = set_memory_encrypted((unsigned long)page_address(page),
> -                                        1 << get_order(size));
> -             /* If memory cannot be re-encrypted, it must be leaked */
> -             if (err)
> -                     return NULL;
> -     }
> +     /* If memory cannot be re-encrypted, it must be leaked */
> +     if (dma_set_encrypted(dev, page_address(page), size))
> +             return NULL;
>  out_free_pages:
>       __dma_direct_free_pages(dev, page, size);
>       return NULL;
> @@ -304,8 +305,7 @@ void dma_direct_free(struct device *dev, size_t size,
>           dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size)))
>               return;
>  
> -     if (force_dma_unencrypted(dev))
> -             set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order);
> +     dma_set_encrypted(dev, cpu_addr, 1 << page_order);
>  
>       if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr))
>               vunmap(cpu_addr);
> @@ -341,11 +341,8 @@ struct page *dma_direct_alloc_pages(struct device *dev, 
> size_t size,
>       }
>  
>       ret = page_address(page);
> -     if (force_dma_unencrypted(dev)) {
> -             if (set_memory_decrypted((unsigned long)ret,
> -                             1 << get_order(size)))
> -                     goto out_free_pages;
> -     }
> +     if (dma_set_decrypted(dev, ret, size))
> +             goto out_free_pages;
>       memset(ret, 0, size);
>       *dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
>       return page;
> @@ -366,9 +363,7 @@ void dma_direct_free_pages(struct device *dev, size_t 
> size,
>           dma_free_from_pool(dev, vaddr, size))
>               return;
>  
> -     if (force_dma_unencrypted(dev))
> -             set_memory_encrypted((unsigned long)vaddr, 1 << page_order);
> -
> +     dma_set_encrypted(dev, vaddr, 1 << page_order);
>       __dma_direct_free_pages(dev, page, size);
>  }
>  
> -- 
> 2.30.2
> 
> 
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

Reply via email to