These two functions provide support for mapping and
unmapping physical addresses to io virtual addresses. The
difference to the iommu_(un)map_range() is that the new
functions take a gfp_order parameter instead of a size. This
allows the IOMMU backend implementations to detect easier if
a given range can be mapped by larger page sizes.
These new functions should replace the old ones in the long
term.

Signed-off-by: Joerg Roedel <joerg.roe...@amd.com>
---
 drivers/base/iommu.c  |   31 +++++++++++++++++++++++++++++++
 include/linux/iommu.h |   16 ++++++++++++++++
 2 files changed, 47 insertions(+), 0 deletions(-)

diff --git a/drivers/base/iommu.c b/drivers/base/iommu.c
index f4c86c4..cf7cbec 100644
--- a/drivers/base/iommu.c
+++ b/drivers/base/iommu.c
@@ -107,3 +107,34 @@ int iommu_domain_has_cap(struct iommu_domain *domain,
        return iommu_ops->domain_has_cap(domain, cap);
 }
 EXPORT_SYMBOL_GPL(iommu_domain_has_cap);
+
+int iommu_map(struct iommu_domain *domain, unsigned long iova,
+             phys_addr_t paddr, int gfp_order, int prot)
+{
+       unsigned long invalid_mask;
+       size_t size;
+
+       size         = 0x1000UL << gfp_order;
+       invalid_mask = size - 1;
+
+       BUG_ON((iova | paddr) & invalid_mask);
+
+       return iommu_ops->map_range(domain, iova, paddr, size, prot);
+}
+EXPORT_SYMBOL_GPL(iommu_map);
+
+int iommu_unmap(struct iommu_domain *domain, unsigned long iova, int gfp_order)
+{
+       unsigned long invalid_mask;
+       size_t size;
+
+       size         = 0x1000UL << gfp_order;
+       invalid_mask = size - 1;
+
+       BUG_ON(iova & invalid_mask);
+
+       iommu_ops->unmap_range(domain, iova, size);
+
+       return gfp_order;
+}
+EXPORT_SYMBOL_GPL(iommu_unmap);
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 0f18f37..6d0035b 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -60,6 +60,10 @@ extern int iommu_map_range(struct iommu_domain *domain, 
unsigned long iova,
                           phys_addr_t paddr, size_t size, int prot);
 extern void iommu_unmap_range(struct iommu_domain *domain, unsigned long iova,
                              size_t size);
+extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
+                    phys_addr_t paddr, int gfp_order, int prot);
+extern int iommu_unmap(struct iommu_domain *domain, unsigned long iova,
+                      int gfp_order);
 extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain,
                                      unsigned long iova);
 extern int iommu_domain_has_cap(struct iommu_domain *domain,
@@ -108,6 +112,18 @@ static inline void iommu_unmap_range(struct iommu_domain 
*domain,
 {
 }
 
+static inline int iommu_map(struct iommu_domain *domain, unsigned long iova,
+                           phys_addr_t paddr, int gfp_order, int prot)
+{
+       return -ENODEV;
+}
+
+static inline int iommu_unmap(struct iommu_domain *domain, unsigned long iova,
+                             int gfp_order)
+{
+       return -ENODEV;
+}
+
 static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain,
                                             unsigned long iova)
 {
-- 
1.6.6


--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to