This ensures dma_direct_alloc_pages will use the right gfp mask, as
well as keeping the code for that common.

Signed-off-by: Christoph Hellwig <h...@lst.de>
---
 kernel/dma/direct.c | 41 ++++++++++++++++++++---------------------
 1 file changed, 20 insertions(+), 21 deletions(-)

diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index b5d56810130b22..ace9159c992f65 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -147,6 +147,22 @@ static struct page *__dma_direct_alloc_pages(struct device 
*dev, size_t size,
        return page;
 }
 
+static void *dma_direct_alloc_from_pool(struct device *dev, size_t size,
+               dma_addr_t *dma_handle, gfp_t gfp)
+{
+       struct page *page;
+       u64 phys_mask;
+       void *ret;
+
+       gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
+                                          &phys_mask);
+       page = dma_alloc_from_pool(dev, size, &ret, gfp, dma_coherent_ok);
+       if (!page)
+               return NULL;
+       *dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
+       return ret;
+}
+
 void *dma_direct_alloc(struct device *dev, size_t size,
                dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
 {
@@ -163,17 +179,8 @@ void *dma_direct_alloc(struct device *dev, size_t size,
        if (attrs & DMA_ATTR_NO_WARN)
                gfp |= __GFP_NOWARN;
 
-       if (dma_should_alloc_from_pool(dev, gfp, attrs)) {
-               u64 phys_mask;
-
-               gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
-                               &phys_mask);
-               page = dma_alloc_from_pool(dev, size, &ret, gfp,
-                               dma_coherent_ok);
-               if (!page)
-                       return NULL;
-               goto done;
-       }
+       if (dma_should_alloc_from_pool(dev, gfp, attrs))
+               return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
 
        /* we always manually zero the memory once we are done */
        page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO);
@@ -297,15 +304,8 @@ struct page *dma_direct_alloc_pages(struct device *dev, 
size_t size,
 {
        struct page *page;
 
-       if (dma_should_alloc_from_pool(dev, gfp, 0)) {
-               void *ret;
-
-               page = dma_alloc_from_pool(dev, size, &ret, gfp,
-                               dma_coherent_ok);
-               if (!page)
-                       return NULL;
-               goto done;
-       }
+       if (dma_should_alloc_from_pool(dev, gfp, 0))
+               return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
 
        page = __dma_direct_alloc_pages(dev, size, gfp | __GFP_ZERO);
        if (!page)
@@ -326,7 +326,6 @@ struct page *dma_direct_alloc_pages(struct device *dev, 
size_t size,
                                1 << get_order(size)))
                        goto out_free_pages;
        }
-done:
        *dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
        return page;
 out_free_pages:
-- 
2.28.0

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

Reply via email to