[PATCH 04/10] dma-direct: clean up the remapping checks in dma_direct_alloc

2021-10-21 Thread Christoph Hellwig
Add a local variable to track if we want to remap the returned address
using vmap and use that to simplify the code flow.

Signed-off-by: Christoph Hellwig 
---
 kernel/dma/direct.c | 47 +++--
 1 file changed, 24 insertions(+), 23 deletions(-)

diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index 60cb75aa6778e..a6b6fe72af4d1 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -166,6 +166,7 @@ static void *dma_direct_alloc_from_pool(struct device *dev, 
size_t size,
 void *dma_direct_alloc(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
 {
+   bool remap = false;
struct page *page;
void *ret;
 
@@ -217,9 +218,23 @@ void *dma_direct_alloc(struct device *dev, size_t size,
if (!page)
return NULL;
 
-   if ((IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
-!dev_is_dma_coherent(dev)) ||
-   (IS_ENABLED(CONFIG_DMA_REMAP) && PageHighMem(page))) {
+   if (!dev_is_dma_coherent(dev) && IS_ENABLED(CONFIG_DMA_DIRECT_REMAP)) {
+   remap = true;
+   } else if (PageHighMem(page)) {
+   /*
+* Depending on the cma= arguments and per-arch setup,
+* dma_alloc_contiguous could return highmem pages.
+* Without remapping there is no way to return them here, so
+* log an error and fail.
+*/
+   if (!IS_ENABLED(CONFIG_DMA_REMAP)) {
+   dev_info(dev, "Rejecting highmem page from CMA.\n");
+   goto out_free_pages;
+   }
+   remap = true;
+   }
+
+   if (remap) {
/* remove any dirty cache lines on the kernel alias */
arch_dma_prep_coherent(page, size);
 
@@ -229,36 +244,22 @@ void *dma_direct_alloc(struct device *dev, size_t size,
__builtin_return_address(0));
if (!ret)
goto out_free_pages;
-   if (dma_set_decrypted(dev, ret, size))
-   goto out_unmap_pages;
-   memset(ret, 0, size);
-   goto done;
-   }
-
-   if (PageHighMem(page)) {
-   /*
-* Depending on the cma= arguments and per-arch setup
-* dma_alloc_contiguous could return highmem pages.
-* Without remapping there is no way to return them here,
-* so log an error and fail.
-*/
-   dev_info(dev, "Rejecting highmem page from CMA.\n");
-   goto out_free_pages;
+   } else {
+   ret = page_address(page);
}
 
-   ret = page_address(page);
if (dma_set_decrypted(dev, ret, size))
-   goto out_free_pages;
+   goto out_unmap_pages;
memset(ret, 0, size);
 
-   if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
-   !dev_is_dma_coherent(dev)) {
+   if (!dev_is_dma_coherent(dev) && !remap &&
+   IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED)) {
arch_dma_prep_coherent(page, size);
ret = arch_dma_set_uncached(ret, size);
if (IS_ERR(ret))
goto out_encrypt_pages;
}
-done:
+
*dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
return ret;
 
-- 
2.30.2

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH 04/10] dma-direct: clean up the remapping checks in dma_direct_alloc

2021-11-04 Thread Robin Murphy

On 2021-10-21 10:06, Christoph Hellwig wrote:

Add a local variable to track if we want to remap the returned address
using vmap and use that to simplify the code flow.

Signed-off-by: Christoph Hellwig 
---
  kernel/dma/direct.c | 47 +++--
  1 file changed, 24 insertions(+), 23 deletions(-)

diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index 60cb75aa6778e..a6b6fe72af4d1 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -166,6 +166,7 @@ static void *dma_direct_alloc_from_pool(struct device *dev, 
size_t size,
  void *dma_direct_alloc(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
  {
+   bool remap = false;


How about also adding a "bool set_uncached = false"...


struct page *page;
void *ret;
  
@@ -217,9 +218,23 @@ void *dma_direct_alloc(struct device *dev, size_t size,

if (!page)
return NULL;
  
-	if ((IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&

-!dev_is_dma_coherent(dev)) ||
-   (IS_ENABLED(CONFIG_DMA_REMAP) && PageHighMem(page))) {
+   if (!dev_is_dma_coherent(dev) && IS_ENABLED(CONFIG_DMA_DIRECT_REMAP)) {
+   remap = true;
+   } else if (PageHighMem(page)) {
+   /*
+* Depending on the cma= arguments and per-arch setup,
+* dma_alloc_contiguous could return highmem pages.
+* Without remapping there is no way to return them here, so
+* log an error and fail.
+*/
+   if (!IS_ENABLED(CONFIG_DMA_REMAP)) {
+   dev_info(dev, "Rejecting highmem page from CMA.\n");
+   goto out_free_pages;
+   }
+   remap = true;
+   }


...then "else if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED))
set_uncached = true;"...


+
+   if (remap) {
/* remove any dirty cache lines on the kernel alias */
arch_dma_prep_coherent(page, size);
  
@@ -229,36 +244,22 @@ void *dma_direct_alloc(struct device *dev, size_t size,

__builtin_return_address(0));
if (!ret)
goto out_free_pages;
-   if (dma_set_decrypted(dev, ret, size))
-   goto out_unmap_pages;
-   memset(ret, 0, size);
-   goto done;
-   }
-
-   if (PageHighMem(page)) {
-   /*
-* Depending on the cma= arguments and per-arch setup
-* dma_alloc_contiguous could return highmem pages.
-* Without remapping there is no way to return them here,
-* so log an error and fail.
-*/
-   dev_info(dev, "Rejecting highmem page from CMA.\n");
-   goto out_free_pages;
+   } else {
+   ret = page_address(page);


As before, I'm thinking that dma_set_decrypted() probably belongs in here.


}
  
-	ret = page_address(page);

if (dma_set_decrypted(dev, ret, size))
-   goto out_free_pages;
+   goto out_unmap_pages;
memset(ret, 0, size);
  
-	if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&

-   !dev_is_dma_coherent(dev)) {
+   if (!dev_is_dma_coherent(dev) && !remap &&
+   IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED)) {


...then from earlier we'd have just a nice "if (set_uncached)" here?


arch_dma_prep_coherent(page, size);
ret = arch_dma_set_uncached(ret, size);
if (IS_ERR(ret))
goto out_encrypt_pages;


From a quick Kconfig survey, this is a purely theoretical exercise in 
dead code generation. Let's just be pragmatic, stick in a
"BUILD_BUG_ON(IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) && 
IS_ENABLED(CONFIG_ARCH_HAS_MEM_ENCRYPT));" and leave this re-encryption 
case for theoretical future arch maintainers to worry about.


All that said, I'm also now wondering why the arch_set_dma_uncached() 
call is down here in the first place. If we could do it at as an else 
case of the remapping stage (given that it's a semantically equivalent 
operation), the complexity inherently falls away.



}
-done:
+
*dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
return ret;
  



If the "out_unmap_pages" step is even still necessary, I think the 
condition there should now simplify down to "if (remap)" as well.


Robin.
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH 04/10] dma-direct: clean up the remapping checks in dma_direct_alloc

2021-11-09 Thread Christoph Hellwig
On Thu, Nov 04, 2021 at 12:35:59PM +, Robin Murphy wrote:
>> @@ -166,6 +166,7 @@ static void *dma_direct_alloc_from_pool(struct device 
>> *dev, size_t size,
>>   void *dma_direct_alloc(struct device *dev, size_t size,
>>  dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
>>   {
>> +bool remap = false;
>
> How about also adding a "bool set_uncached = false"...

Done.
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu