To keep naming consistent we should stick with *iotlb*. This patch
renames a few remaining functions.

Signed-off-by: Tom Murphy <murph...@tcd.ie>
---
 drivers/iommu/dma-iommu.c       |  2 +-
 drivers/iommu/iommu.c           |  4 ++--
 drivers/vfio/vfio_iommu_type1.c |  2 +-
 include/linux/io-pgtable.h      |  2 +-
 include/linux/iommu.h           | 10 +++++-----
 5 files changed, 10 insertions(+), 10 deletions(-)

diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 79e6d8d799a3..59adb1a0aefc 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -503,7 +503,7 @@ static void __iommu_dma_unmap(struct device *dev, 
dma_addr_t dma_addr,
                        domain->ops->flush_iotlb_range(domain, dma_addr, size,
                                        freelist);
                else
-                       iommu_tlb_sync(domain, &iotlb_gather);
+                       iommu_iotlb_sync(domain, &iotlb_gather);
        }
 
        iommu_dma_free_iova(cookie, dma_addr, size, freelist);
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 9065127d7e9c..70a85f41876f 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -762,7 +762,7 @@ static int iommu_create_device_direct_mappings(struct 
iommu_group *group,
 
        }
 
-       iommu_flush_tlb_all(domain);
+       iommu_flush_iotlb_all(domain);
 
 out:
        iommu_put_resv_regions(dev, &mappings);
@@ -2317,7 +2317,7 @@ size_t iommu_unmap(struct iommu_domain *domain,
        if (ops->flush_iotlb_range)
                ops->flush_iotlb_range(domain, iova, ret, freelist);
        else
-               iommu_tlb_sync(domain, &iotlb_gather);
+               iommu_iotlb_sync(domain, &iotlb_gather);
 
        return ret;
 }
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 570ebf878fea..d550ceb7b2aa 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -774,7 +774,7 @@ static long vfio_sync_unpin(struct vfio_dma *dma, struct 
vfio_domain *domain,
        long unlocked = 0;
        struct vfio_regions *entry, *next;
 
-       iommu_tlb_sync(domain->domain, iotlb_gather);
+       iommu_iotlb_sync(domain->domain, iotlb_gather);
 
        list_for_each_entry_safe(entry, next, regions, list) {
                unlocked += vfio_unpin_pages_remote(dma,
diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h
index 53d53c6c2be9..d3f2bd4a3ac4 100644
--- a/include/linux/io-pgtable.h
+++ b/include/linux/io-pgtable.h
@@ -31,7 +31,7 @@ enum io_pgtable_fmt {
  *                  single page.  IOMMUs that cannot batch TLB invalidation
  *                  operations efficiently will typically issue them here, but
  *                  others may decide to update the iommu_iotlb_gather 
structure
- *                  and defer the invalidation until iommu_tlb_sync() instead.
+ *                  and defer the invalidation until iommu_iotlb_sync() 
instead.
  *
  * Note that these can all be called in atomic context and must therefore
  * not block.
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 77e773d03f22..7b363f24bf99 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -542,7 +542,7 @@ extern void iommu_domain_window_disable(struct iommu_domain 
*domain, u32 wnd_nr)
 extern int report_iommu_fault(struct iommu_domain *domain, struct device *dev,
                              unsigned long iova, int flags);
 
-static inline void iommu_flush_tlb_all(struct iommu_domain *domain)
+static inline void iommu_flush_iotlb_all(struct iommu_domain *domain)
 {
        if (domain->ops->flush_iotlb_all)
                domain->ops->flush_iotlb_all(domain);
@@ -556,7 +556,7 @@ static inline void flush_iotlb_range(struct iommu_domain 
*domain,
                domain->ops->flush_iotlb_range(domain, iova, size, freelist);
 }
 
-static inline void iommu_tlb_sync(struct iommu_domain *domain,
+static inline void iommu_iotlb_sync(struct iommu_domain *domain,
                                  struct iommu_iotlb_gather *iotlb_gather)
 {
        if (domain->ops->iotlb_sync)
@@ -579,7 +579,7 @@ static inline void iommu_iotlb_gather_add_page(struct 
iommu_domain *domain,
        if (gather->pgsize != size ||
            end < gather->start || start > gather->end) {
                if (gather->pgsize)
-                       iommu_tlb_sync(domain, gather);
+                       iommu_iotlb_sync(domain, gather);
                gather->pgsize = size;
        }
 
@@ -762,11 +762,11 @@ static inline size_t iommu_map_sg_atomic(struct 
iommu_domain *domain,
        return 0;
 }
 
-static inline void iommu_flush_tlb_all(struct iommu_domain *domain)
+static inline void iommu_flush_iotlb_all(struct iommu_domain *domain)
 {
 }
 
-static inline void iommu_tlb_sync(struct iommu_domain *domain,
+static inline void iommu_iotlb_sync(struct iommu_domain *domain,
                                  struct iommu_iotlb_gather *iotlb_gather)
 {
 }
-- 
2.20.1

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

Reply via email to