Add "struct mtk_iommu_data *" in the "struct mtk_iommu_domain",
reduce the call mtk_iommu_get_m4u_data().
No functional change.

Signed-off-by: Yong Wu <yong...@mediatek.com>
---
 drivers/iommu/mtk_iommu.c | 20 +++++++++++---------
 1 file changed, 11 insertions(+), 9 deletions(-)

diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index f3666b0d7577..f1941608ccb7 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -126,6 +126,7 @@ struct mtk_iommu_domain {
        struct io_pgtable_cfg           cfg;
        struct io_pgtable_ops           *iop;
 
+       struct mtk_iommu_data           *data;
        struct iommu_domain             domain;
 };
 
@@ -351,6 +352,7 @@ static int mtk_iommu_domain_finalise(struct 
mtk_iommu_domain *dom)
                return -EINVAL;
        }
 
+       dom->data = data;
        /* Update our support page sizes bitmap */
        dom->domain.pgsize_bitmap = dom->cfg.pgsize_bitmap;
        return 0;
@@ -442,10 +444,9 @@ static int mtk_iommu_map(struct iommu_domain *domain, 
unsigned long iova,
                         phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
 {
        struct mtk_iommu_domain *dom = to_mtk_domain(domain);
-       struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
 
        /* The "4GB mode" M4U physically can not use the lower remap of Dram. */
-       if (data->enable_4GB)
+       if (dom->data->enable_4GB)
                paddr |= BIT_ULL(32);
 
        /* Synchronize with the tlb_lock */
@@ -468,36 +469,37 @@ static size_t mtk_iommu_unmap(struct iommu_domain *domain,
 
 static void mtk_iommu_flush_iotlb_all(struct iommu_domain *domain)
 {
-       mtk_iommu_tlb_flush_all(mtk_iommu_get_m4u_data());
+       struct mtk_iommu_domain *dom = to_mtk_domain(domain);
+
+       mtk_iommu_tlb_flush_all(dom->data);
 }
 
 static void mtk_iommu_iotlb_sync(struct iommu_domain *domain,
                                 struct iommu_iotlb_gather *gather)
 {
-       struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
+       struct mtk_iommu_domain *dom = to_mtk_domain(domain);
        size_t length = gather->end - gather->start + 1;
 
        mtk_iommu_tlb_flush_range_sync(gather->start, length, gather->pgsize,
-                                      data);
+                                      dom->data);
 }
 
 static void mtk_iommu_sync_map(struct iommu_domain *domain, unsigned long iova,
                               size_t size)
 {
-       struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
+       struct mtk_iommu_domain *dom = to_mtk_domain(domain);
 
-       mtk_iommu_tlb_flush_range_sync(iova, size, size, data);
+       mtk_iommu_tlb_flush_range_sync(iova, size, size, dom->data);
 }
 
 static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain,
                                          dma_addr_t iova)
 {
        struct mtk_iommu_domain *dom = to_mtk_domain(domain);
-       struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
        phys_addr_t pa;
 
        pa = dom->iop->iova_to_phys(dom->iop, iova);
-       if (data->enable_4GB && pa >= MTK_IOMMU_4GB_MODE_REMAP_BASE)
+       if (dom->data->enable_4GB && pa >= MTK_IOMMU_4GB_MODE_REMAP_BASE)
                pa &= ~BIT_ULL(32);
 
        return pa;
-- 
2.18.0

Reply via email to