__IOMMU_DOMAIN_LP (large pages) indicates that a domain can handle
conditions where PAGE_SIZE might be smaller than the IOMMU page size.
Always allow attaching trusted devices to such domains and set the flag for
IOMMU_DOMAIN_DMA, which can now handle these situations.

Note that untrusted devices are not yet supported. Those require
additional changes to allow aligning swiotlb buffers to granularities
larger than PAGE_SIZE.

Signed-off-by: Sven Peter <s...@svenpeter.dev>
---
 drivers/iommu/iommu.c |  9 +++++++--
 include/linux/iommu.h | 13 +++++++++++--
 2 files changed, 18 insertions(+), 4 deletions(-)

diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 28896739964b..66bba6a6bb28 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -1976,10 +1976,15 @@ void iommu_domain_free(struct iommu_domain *domain)
 }
 EXPORT_SYMBOL_GPL(iommu_domain_free);
 
-static int iommu_check_page_size(struct iommu_domain *domain)
+static int iommu_check_page_size(struct iommu_domain *domain,
+                               struct device *dev)
 {
+       bool trusted = !(dev_is_pci(dev) && to_pci_dev(dev)->untrusted);
+
        if (!iommu_is_paging_domain(domain))
                return 0;
+       if (iommu_is_large_pages_domain(domain) && trusted)
+               return 0;
 
        if (!(domain->pgsize_bitmap & (PAGE_SIZE | (PAGE_SIZE - 1)))) {
                pr_warn("IOMMU pages cannot exactly represent CPU pages.\n");
@@ -2007,7 +2012,7 @@ static int __iommu_attach_device(struct iommu_domain 
*domain,
         * only limit domain->pgsize_bitmap after having attached the first
         * device.
         */
-       ret = iommu_check_page_size(domain);
+       ret = iommu_check_page_size(domain, dev);
        if (ret) {
                __iommu_detach_device(domain, dev);
                return ret;
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index cabd25879613..1f1af59d0522 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -62,6 +62,8 @@ struct iommu_domain_geometry {
                                              implementation              */
 #define __IOMMU_DOMAIN_PT      (1U << 2)  /* Domain is identity mapped   */
 #define __IOMMU_DOMAIN_DMA_FQ  (1U << 3)  /* DMA-API uses flush queue    */
+#define __IOMMU_DOMAIN_LP      (1U << 4)  /* Support for PAGE_SIZE smaller
+                                             than IOMMU page size        */
 
 /*
  * This are the possible domain-types
@@ -81,10 +83,12 @@ struct iommu_domain_geometry {
 #define IOMMU_DOMAIN_IDENTITY  (__IOMMU_DOMAIN_PT)
 #define IOMMU_DOMAIN_UNMANAGED (__IOMMU_DOMAIN_PAGING)
 #define IOMMU_DOMAIN_DMA       (__IOMMU_DOMAIN_PAGING |        \
-                                __IOMMU_DOMAIN_DMA_API)
+                                __IOMMU_DOMAIN_DMA_API |       \
+                                __IOMMU_DOMAIN_LP)
 #define IOMMU_DOMAIN_DMA_FQ    (__IOMMU_DOMAIN_PAGING |        \
                                 __IOMMU_DOMAIN_DMA_API |       \
-                                __IOMMU_DOMAIN_DMA_FQ)
+                                __IOMMU_DOMAIN_DMA_FQ |        \
+                                __IOMMU_DOMAIN_LP)
 
 struct iommu_domain {
        unsigned type;
@@ -106,6 +110,11 @@ static inline bool iommu_is_paging_domain(struct 
iommu_domain *domain)
        return domain->type & __IOMMU_DOMAIN_PAGING;
 }
 
+static inline bool iommu_is_large_pages_domain(struct iommu_domain *domain)
+{
+       return domain->type & __IOMMU_DOMAIN_LP;
+}
+
 enum iommu_cap {
        IOMMU_CAP_CACHE_COHERENCY,      /* IOMMU can enforce cache coherent DMA
                                           transactions */
-- 
2.25.1

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

Reply via email to