Convert iommu/rockchip-iommu.c to use the new page allocation functions
provided in iommu-pages.h.

Signed-off-by: Pasha Tatashin <pasha.tatas...@soleen.com>
---
 drivers/iommu/iommufd/iova_bitmap.c |  2 ++
 drivers/iommu/rockchip-iommu.c      | 14 ++++++++------
 2 files changed, 10 insertions(+), 6 deletions(-)

diff --git a/drivers/iommu/iommufd/iova_bitmap.c 
b/drivers/iommu/iommufd/iova_bitmap.c
index 6b8575b93f17..4d5d1be807fe 100644
--- a/drivers/iommu/iommufd/iova_bitmap.c
+++ b/drivers/iommu/iommufd/iova_bitmap.c
@@ -8,6 +8,8 @@
 #include <linux/slab.h>
 #include <linux/highmem.h>
 
+#include "../iommu-pages.h"
+
 #define BITS_PER_PAGE (PAGE_SIZE * BITS_PER_BYTE)
 
 /*
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index 2685861c0a12..e04f22d481d0 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -26,6 +26,8 @@
 #include <linux/slab.h>
 #include <linux/spinlock.h>
 
+#include "iommu-pages.h"
+
 /** MMU register offsets */
 #define RK_MMU_DTE_ADDR                0x00    /* Directory table address */
 #define RK_MMU_STATUS          0x04
@@ -727,14 +729,14 @@ static u32 *rk_dte_get_page_table(struct rk_iommu_domain 
*rk_domain,
        if (rk_dte_is_pt_valid(dte))
                goto done;
 
-       page_table = (u32 *)get_zeroed_page(GFP_ATOMIC | rk_ops->gfp_flags);
+       page_table = iommu_alloc_page(GFP_ATOMIC | rk_ops->gfp_flags);
        if (!page_table)
                return ERR_PTR(-ENOMEM);
 
        pt_dma = dma_map_single(dma_dev, page_table, SPAGE_SIZE, DMA_TO_DEVICE);
        if (dma_mapping_error(dma_dev, pt_dma)) {
                dev_err(dma_dev, "DMA mapping error while allocating page 
table\n");
-               free_page((unsigned long)page_table);
+               iommu_free_page(page_table);
                return ERR_PTR(-ENOMEM);
        }
 
@@ -1061,7 +1063,7 @@ static struct iommu_domain 
*rk_iommu_domain_alloc_paging(struct device *dev)
         * Each level1 (dt) and level2 (pt) table has 1024 4-byte entries.
         * Allocate one 4 KiB page for each table.
         */
-       rk_domain->dt = (u32 *)get_zeroed_page(GFP_KERNEL | rk_ops->gfp_flags);
+       rk_domain->dt = iommu_alloc_page(GFP_KERNEL | rk_ops->gfp_flags);
        if (!rk_domain->dt)
                goto err_free_domain;
 
@@ -1083,7 +1085,7 @@ static struct iommu_domain 
*rk_iommu_domain_alloc_paging(struct device *dev)
        return &rk_domain->domain;
 
 err_free_dt:
-       free_page((unsigned long)rk_domain->dt);
+       iommu_free_page(rk_domain->dt);
 err_free_domain:
        kfree(rk_domain);
 
@@ -1104,13 +1106,13 @@ static void rk_iommu_domain_free(struct iommu_domain 
*domain)
                        u32 *page_table = phys_to_virt(pt_phys);
                        dma_unmap_single(dma_dev, pt_phys,
                                         SPAGE_SIZE, DMA_TO_DEVICE);
-                       free_page((unsigned long)page_table);
+                       iommu_free_page(page_table);
                }
        }
 
        dma_unmap_single(dma_dev, rk_domain->dt_dma,
                         SPAGE_SIZE, DMA_TO_DEVICE);
-       free_page((unsigned long)rk_domain->dt);
+       iommu_free_page(rk_domain->dt);
 
        kfree(rk_domain);
 }
-- 
2.43.0.rc2.451.g8631bc7472-goog


Reply via email to