On 2021-11-11 06:50, Christoph Hellwig wrote:
Add two local variables to track if we want to remap the returned
address using vmap or call dma_set_uncached and use that to simplify
the code flow.

I still wonder about the asymmetry between the remap and set_uncached cases WRT the memset(), which stands out even more the further we clean things up, but that's another matter.

Reviewed-by: Robin Murphy <robin.mur...@arm.com>

Signed-off-by: Christoph Hellwig <h...@lst.de>
---
  kernel/dma/direct.c | 48 ++++++++++++++++++++++++---------------------
  1 file changed, 26 insertions(+), 22 deletions(-)

diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index d7a489be48470..3d1718dc077e9 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -171,6 +171,7 @@ static void *dma_direct_alloc_from_pool(struct device *dev, 
size_t size,
  void *dma_direct_alloc(struct device *dev, size_t size,
                dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
  {
+       bool remap = false, set_uncached = false;
        struct page *page;
        void *ret;
@@ -222,9 +223,25 @@ void *dma_direct_alloc(struct device *dev, size_t size,
        if (!page)
                return NULL;
- if ((IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
-            !dev_is_dma_coherent(dev)) ||
-           (IS_ENABLED(CONFIG_DMA_REMAP) && PageHighMem(page))) {
+       if (!dev_is_dma_coherent(dev) && IS_ENABLED(CONFIG_DMA_DIRECT_REMAP)) {
+               remap = true;
+       } else if (PageHighMem(page)) {
+               /*
+                * Depending on the cma= arguments and per-arch setup,
+                * dma_alloc_contiguous could return highmem pages.
+                * Without remapping there is no way to return them here, so
+                * log an error and fail.
+                */
+               if (!IS_ENABLED(CONFIG_DMA_REMAP)) {
+                       dev_info(dev, "Rejecting highmem page from CMA.\n");
+                       goto out_free_pages;
+               }
+               remap = true;
+       } else if (!dev_is_dma_coherent(dev) &&
+                  IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED))
+               set_uncached = true;
+
+       if (remap) {
                /* remove any dirty cache lines on the kernel alias */
                arch_dma_prep_coherent(page, size);
@@ -234,34 +251,21 @@ void *dma_direct_alloc(struct device *dev, size_t size,
                                __builtin_return_address(0));
                if (!ret)
                        goto out_free_pages;
-               memset(ret, 0, size);
-               goto done;
-       }
-
-       if (PageHighMem(page)) {
-               /*
-                * Depending on the cma= arguments and per-arch setup
-                * dma_alloc_contiguous could return highmem pages.
-                * Without remapping there is no way to return them here,
-                * so log an error and fail.
-                */
-               dev_info(dev, "Rejecting highmem page from CMA.\n");
-               goto out_free_pages;
+       } else {
+               ret = page_address(page);
+               if (dma_set_decrypted(dev, ret, size))
+                       goto out_free_pages;
        }
- ret = page_address(page);
-       if (dma_set_decrypted(dev, ret, size))
-               goto out_free_pages;
        memset(ret, 0, size);
- if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
-           !dev_is_dma_coherent(dev)) {
+       if (set_uncached) {
                arch_dma_prep_coherent(page, size);
                ret = arch_dma_set_uncached(ret, size);
                if (IS_ERR(ret))
                        goto out_encrypt_pages;
        }
-done:
+
        *dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
        return ret;
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

Reply via email to