Re: [PATCH 01/10] dma-direct: factor out dma_set_{de,en}crypted helpers

2021-11-04 Thread Robin Murphy

On 2021-10-21 10:06, Christoph Hellwig wrote:

Factor out helpers the make dealing with memory encryption a little less
cumbersome.


Reviewed-by: Robin Murphy 


Signed-off-by: Christoph Hellwig 
---
  kernel/dma/direct.c | 56 -
  1 file changed, 25 insertions(+), 31 deletions(-)

diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index 4c6c5e0635e34..d4d54af31a341 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -75,6 +75,20 @@ static bool dma_coherent_ok(struct device *dev, phys_addr_t 
phys, size_t size)
min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit);
  }
  
+static int dma_set_decrypted(struct device *dev, void *vaddr, size_t size)

+{
+   if (!force_dma_unencrypted(dev))
+   return 0;
+   return set_memory_decrypted((unsigned long)vaddr, 1 << get_order(size));
+}
+
+static int dma_set_encrypted(struct device *dev, void *vaddr, size_t size)
+{
+   if (!force_dma_unencrypted(dev))
+   return 0;
+   return set_memory_encrypted((unsigned long)vaddr, 1 << get_order(size));
+}
+
  static void __dma_direct_free_pages(struct device *dev, struct page *page,
size_t size)
  {
@@ -154,7 +168,6 @@ void *dma_direct_alloc(struct device *dev, size_t size,
  {
struct page *page;
void *ret;
-   int err;
  
  	size = PAGE_ALIGN(size);

if (attrs & DMA_ATTR_NO_WARN)
@@ -216,12 +229,8 @@ void *dma_direct_alloc(struct device *dev, size_t size,
__builtin_return_address(0));
if (!ret)
goto out_free_pages;
-   if (force_dma_unencrypted(dev)) {
-   err = set_memory_decrypted((unsigned long)ret,
-  1 << get_order(size));
-   if (err)
-   goto out_free_pages;
-   }
+   if (dma_set_decrypted(dev, ret, size))
+   goto out_free_pages;
memset(ret, 0, size);
goto done;
}
@@ -238,13 +247,8 @@ void *dma_direct_alloc(struct device *dev, size_t size,
}
  
  	ret = page_address(page);

-   if (force_dma_unencrypted(dev)) {
-   err = set_memory_decrypted((unsigned long)ret,
-  1 << get_order(size));
-   if (err)
-   goto out_free_pages;
-   }
-
+   if (dma_set_decrypted(dev, ret, size))
+   goto out_free_pages;
memset(ret, 0, size);
  
  	if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&

@@ -259,13 +263,9 @@ void *dma_direct_alloc(struct device *dev, size_t size,
return ret;
  
  out_encrypt_pages:

-   if (force_dma_unencrypted(dev)) {
-   err = set_memory_encrypted((unsigned long)page_address(page),
-  1 << get_order(size));
-   /* If memory cannot be re-encrypted, it must be leaked */
-   if (err)
-   return NULL;
-   }
+   /* If memory cannot be re-encrypted, it must be leaked */
+   if (dma_set_encrypted(dev, page_address(page), size))
+   return NULL;
  out_free_pages:
__dma_direct_free_pages(dev, page, size);
return NULL;
@@ -304,8 +304,7 @@ void dma_direct_free(struct device *dev, size_t size,
dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size)))
return;
  
-	if (force_dma_unencrypted(dev))

-   set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order);
+   dma_set_encrypted(dev, cpu_addr, 1 << page_order);
  
  	if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr))

vunmap(cpu_addr);
@@ -341,11 +340,8 @@ struct page *dma_direct_alloc_pages(struct device *dev, 
size_t size,
}
  
  	ret = page_address(page);

-   if (force_dma_unencrypted(dev)) {
-   if (set_memory_decrypted((unsigned long)ret,
-   1 << get_order(size)))
-   goto out_free_pages;
-   }
+   if (dma_set_decrypted(dev, ret, size))
+   goto out_free_pages;
memset(ret, 0, size);
*dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
return page;
@@ -366,9 +362,7 @@ void dma_direct_free_pages(struct device *dev, size_t size,
dma_free_from_pool(dev, vaddr, size))
return;
  
-	if (force_dma_unencrypted(dev))

-   set_memory_encrypted((unsigned long)vaddr, 1 << page_order);
-
+   dma_set_encrypted(dev, vaddr, 1 << page_order);
__dma_direct_free_pages(dev, page, size);
  }
  


___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH 01/10] dma-direct: factor out dma_set_{de,en}crypted helpers

2021-10-21 Thread Christoph Hellwig
Factor out helpers the make dealing with memory encryption a little less
cumbersome.

Signed-off-by: Christoph Hellwig 
---
 kernel/dma/direct.c | 56 -
 1 file changed, 25 insertions(+), 31 deletions(-)

diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index 4c6c5e0635e34..d4d54af31a341 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -75,6 +75,20 @@ static bool dma_coherent_ok(struct device *dev, phys_addr_t 
phys, size_t size)
min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit);
 }
 
+static int dma_set_decrypted(struct device *dev, void *vaddr, size_t size)
+{
+   if (!force_dma_unencrypted(dev))
+   return 0;
+   return set_memory_decrypted((unsigned long)vaddr, 1 << get_order(size));
+}
+
+static int dma_set_encrypted(struct device *dev, void *vaddr, size_t size)
+{
+   if (!force_dma_unencrypted(dev))
+   return 0;
+   return set_memory_encrypted((unsigned long)vaddr, 1 << get_order(size));
+}
+
 static void __dma_direct_free_pages(struct device *dev, struct page *page,
size_t size)
 {
@@ -154,7 +168,6 @@ void *dma_direct_alloc(struct device *dev, size_t size,
 {
struct page *page;
void *ret;
-   int err;
 
size = PAGE_ALIGN(size);
if (attrs & DMA_ATTR_NO_WARN)
@@ -216,12 +229,8 @@ void *dma_direct_alloc(struct device *dev, size_t size,
__builtin_return_address(0));
if (!ret)
goto out_free_pages;
-   if (force_dma_unencrypted(dev)) {
-   err = set_memory_decrypted((unsigned long)ret,
-  1 << get_order(size));
-   if (err)
-   goto out_free_pages;
-   }
+   if (dma_set_decrypted(dev, ret, size))
+   goto out_free_pages;
memset(ret, 0, size);
goto done;
}
@@ -238,13 +247,8 @@ void *dma_direct_alloc(struct device *dev, size_t size,
}
 
ret = page_address(page);
-   if (force_dma_unencrypted(dev)) {
-   err = set_memory_decrypted((unsigned long)ret,
-  1 << get_order(size));
-   if (err)
-   goto out_free_pages;
-   }
-
+   if (dma_set_decrypted(dev, ret, size))
+   goto out_free_pages;
memset(ret, 0, size);
 
if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
@@ -259,13 +263,9 @@ void *dma_direct_alloc(struct device *dev, size_t size,
return ret;
 
 out_encrypt_pages:
-   if (force_dma_unencrypted(dev)) {
-   err = set_memory_encrypted((unsigned long)page_address(page),
-  1 << get_order(size));
-   /* If memory cannot be re-encrypted, it must be leaked */
-   if (err)
-   return NULL;
-   }
+   /* If memory cannot be re-encrypted, it must be leaked */
+   if (dma_set_encrypted(dev, page_address(page), size))
+   return NULL;
 out_free_pages:
__dma_direct_free_pages(dev, page, size);
return NULL;
@@ -304,8 +304,7 @@ void dma_direct_free(struct device *dev, size_t size,
dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size)))
return;
 
-   if (force_dma_unencrypted(dev))
-   set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order);
+   dma_set_encrypted(dev, cpu_addr, 1 << page_order);
 
if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr))
vunmap(cpu_addr);
@@ -341,11 +340,8 @@ struct page *dma_direct_alloc_pages(struct device *dev, 
size_t size,
}
 
ret = page_address(page);
-   if (force_dma_unencrypted(dev)) {
-   if (set_memory_decrypted((unsigned long)ret,
-   1 << get_order(size)))
-   goto out_free_pages;
-   }
+   if (dma_set_decrypted(dev, ret, size))
+   goto out_free_pages;
memset(ret, 0, size);
*dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
return page;
@@ -366,9 +362,7 @@ void dma_direct_free_pages(struct device *dev, size_t size,
dma_free_from_pool(dev, vaddr, size))
return;
 
-   if (force_dma_unencrypted(dev))
-   set_memory_encrypted((unsigned long)vaddr, 1 << page_order);
-
+   dma_set_encrypted(dev, vaddr, 1 << page_order);
__dma_direct_free_pages(dev, page, size);
 }
 
-- 
2.30.2

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu