IO_PGTABLE_QUIRK_NON_STRICT was never a very comfortable fit, since it's
not a quirk of the pagetable format itself. Now that we have a more
appropriate way to convey non-strict unmaps, though, this last of the
non-quirk quirks can also go, and with the flush queue code also now
enforcing its own ordering we can have a lovely cleanup all round.

Signed-off-by: Robin Murphy <robin.mur...@arm.com>

---

v3: New
---
 drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c |  3 ---
 drivers/iommu/arm/arm-smmu/arm-smmu.c       |  3 ---
 drivers/iommu/io-pgtable-arm-v7s.c          | 12 ++----------
 drivers/iommu/io-pgtable-arm.c              | 12 ++----------
 include/linux/io-pgtable.h                  |  5 -----
 5 files changed, 4 insertions(+), 31 deletions(-)

diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c 
b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index 4c648da447bf..d9c93d8d193d 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -2174,9 +2174,6 @@ static int arm_smmu_domain_finalise(struct iommu_domain 
*domain,
                .iommu_dev      = smmu->dev,
        };
 
-       if (!iommu_get_dma_strict(domain))
-               pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT;
-
        pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
        if (!pgtbl_ops)
                return -ENOMEM;
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c 
b/drivers/iommu/arm/arm-smmu/arm-smmu.c
index 970d9e4dcd69..a325d4769c17 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c
@@ -765,9 +765,6 @@ static int arm_smmu_init_domain_context(struct iommu_domain 
*domain,
                .iommu_dev      = smmu->dev,
        };
 
-       if (!iommu_get_dma_strict(domain))
-               pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT;
-
        if (smmu->impl && smmu->impl->init_context) {
                ret = smmu->impl->init_context(smmu_domain, &pgtbl_cfg, dev);
                if (ret)
diff --git a/drivers/iommu/io-pgtable-arm-v7s.c 
b/drivers/iommu/io-pgtable-arm-v7s.c
index 5db90d7ce2ec..e84478d39705 100644
--- a/drivers/iommu/io-pgtable-arm-v7s.c
+++ b/drivers/iommu/io-pgtable-arm-v7s.c
@@ -700,14 +700,7 @@ static size_t __arm_v7s_unmap(struct arm_v7s_io_pgtable 
*data,
                                                ARM_V7S_BLOCK_SIZE(lvl + 1));
                                ptep = iopte_deref(pte[i], lvl, data);
                                __arm_v7s_free_table(ptep, lvl + 1, data);
-                       } else if (iop->cfg.quirks & 
IO_PGTABLE_QUIRK_NON_STRICT) {
-                               /*
-                                * Order the PTE update against queueing the 
IOVA, to
-                                * guarantee that a flush callback from a 
different CPU
-                                * has observed it before the TLBIALL can be 
issued.
-                                */
-                               smp_wmb();
-                       } else {
+                       } else if (!gather->queued) {
                                io_pgtable_tlb_add_page(iop, gather, iova, 
blk_size);
                        }
                        iova += blk_size;
@@ -791,8 +784,7 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct 
io_pgtable_cfg *cfg,
 
        if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS |
                            IO_PGTABLE_QUIRK_NO_PERMS |
-                           IO_PGTABLE_QUIRK_ARM_MTK_EXT |
-                           IO_PGTABLE_QUIRK_NON_STRICT))
+                           IO_PGTABLE_QUIRK_ARM_MTK_EXT))
                return NULL;
 
        /* If ARM_MTK_4GB is enabled, the NO_PERMS is also expected. */
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index 053df4048a29..48a5bd8f571d 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -638,14 +638,7 @@ static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable 
*data,
                                io_pgtable_tlb_flush_walk(iop, iova + i * size, 
size,
                                                          
ARM_LPAE_GRANULE(data));
                                __arm_lpae_free_pgtable(data, lvl + 1, 
iopte_deref(pte, data));
-                       } else if (iop->cfg.quirks & 
IO_PGTABLE_QUIRK_NON_STRICT) {
-                               /*
-                                * Order the PTE update against queueing the 
IOVA, to
-                                * guarantee that a flush callback from a 
different CPU
-                                * has observed it before the TLBIALL can be 
issued.
-                                */
-                               smp_wmb();
-                       } else {
+                       } else if (!gather->queued) {
                                io_pgtable_tlb_add_page(iop, gather, iova + i * 
size, size);
                        }
 
@@ -825,7 +818,6 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, 
void *cookie)
        bool tg1;
 
        if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS |
-                           IO_PGTABLE_QUIRK_NON_STRICT |
                            IO_PGTABLE_QUIRK_ARM_TTBR1 |
                            IO_PGTABLE_QUIRK_ARM_OUTER_WBWA))
                return NULL;
@@ -929,7 +921,7 @@ arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, 
void *cookie)
        typeof(&cfg->arm_lpae_s2_cfg.vtcr) vtcr = &cfg->arm_lpae_s2_cfg.vtcr;
 
        /* The NS quirk doesn't apply at stage 2 */
-       if (cfg->quirks & ~(IO_PGTABLE_QUIRK_NON_STRICT))
+       if (cfg->quirks)
                return NULL;
 
        data = arm_lpae_alloc_pgtable(cfg);
diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h
index c43f3b899d2a..9ba6d9ea316e 100644
--- a/include/linux/io-pgtable.h
+++ b/include/linux/io-pgtable.h
@@ -73,10 +73,6 @@ struct io_pgtable_cfg {
         *      to support up to 35 bits PA where the bit32, bit33 and bit34 are
         *      encoded in the bit9, bit4 and bit5 of the PTE respectively.
         *
-        * IO_PGTABLE_QUIRK_NON_STRICT: Skip issuing synchronous leaf TLBIs
-        *      on unmap, for DMA domains using the flush queue mechanism for
-        *      delayed invalidation.
-        *
         * IO_PGTABLE_QUIRK_ARM_TTBR1: (ARM LPAE format) Configure the table
         *      for use in the upper half of a split address space.
         *
@@ -86,7 +82,6 @@ struct io_pgtable_cfg {
        #define IO_PGTABLE_QUIRK_ARM_NS         BIT(0)
        #define IO_PGTABLE_QUIRK_NO_PERMS       BIT(1)
        #define IO_PGTABLE_QUIRK_ARM_MTK_EXT    BIT(3)
-       #define IO_PGTABLE_QUIRK_NON_STRICT     BIT(4)
        #define IO_PGTABLE_QUIRK_ARM_TTBR1      BIT(5)
        #define IO_PGTABLE_QUIRK_ARM_OUTER_WBWA BIT(6)
        unsigned long                   quirks;
-- 
2.25.1

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

Reply via email to