From: Kunkun Jiang <jiangkun...@huawei.com>

After dirty log is retrieved, user should clear dirty log to re-enable
dirty log tracking for these dirtied pages. This clears the dirty state
(As we just enable HTTU for stage1, so set the AP[2] bit) of these leaf
TTDs that are specified by the user provided bitmap.

Co-developed-by: Keqian Zhu <zhukeqi...@huawei.com>
Signed-off-by: Kunkun Jiang <jiangkun...@huawei.com>
---
 drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 25 ++++++
 drivers/iommu/io-pgtable-arm.c              | 95 +++++++++++++++++++++
 include/linux/io-pgtable.h                  |  4 +
 3 files changed, 124 insertions(+)

diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c 
b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index 9eb209a07acc..59bb1d198631 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -2656,6 +2656,30 @@ static int arm_smmu_sync_dirty_log(struct iommu_domain 
*domain,
                                   bitmap_pgshift);
 }
 
+static int arm_smmu_clear_dirty_log(struct iommu_domain *domain,
+                                   unsigned long iova, size_t size,
+                                   unsigned long *bitmap,
+                                   unsigned long base_iova,
+                                   unsigned long bitmap_pgshift)
+{
+       struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+       struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
+       struct arm_smmu_device *smmu = smmu_domain->smmu;
+
+       if (!(smmu->features & ARM_SMMU_FEAT_HD))
+               return -ENODEV;
+       if (smmu_domain->stage != ARM_SMMU_DOMAIN_S1)
+               return -EINVAL;
+
+       if (!ops || !ops->clear_dirty_log) {
+               pr_err("io-pgtable don't realize clear dirty log\n");
+               return -ENODEV;
+       }
+
+       return ops->clear_dirty_log(ops, iova, size, bitmap, base_iova,
+                                   bitmap_pgshift);
+}
+
 static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
 {
        return iommu_fwspec_add_ids(dev, args->args, 1);
@@ -2759,6 +2783,7 @@ static struct iommu_ops arm_smmu_ops = {
        .merge_page             = arm_smmu_merge_page,
        .switch_dirty_log       = arm_smmu_switch_dirty_log,
        .sync_dirty_log         = arm_smmu_sync_dirty_log,
+       .clear_dirty_log        = arm_smmu_clear_dirty_log,
        .of_xlate               = arm_smmu_of_xlate,
        .get_resv_regions       = arm_smmu_get_resv_regions,
        .put_resv_regions       = generic_iommu_put_resv_regions,
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index 67a208a05ab2..e3ef0f50611c 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -966,6 +966,100 @@ static int arm_lpae_sync_dirty_log(struct io_pgtable_ops 
*ops,
                                         bitmap, base_iova, bitmap_pgshift);
 }
 
+static int __arm_lpae_clear_dirty_log(struct arm_lpae_io_pgtable *data,
+                                     unsigned long iova, size_t size,
+                                     int lvl, arm_lpae_iopte *ptep,
+                                     unsigned long *bitmap,
+                                     unsigned long base_iova,
+                                     unsigned long bitmap_pgshift)
+{
+       arm_lpae_iopte pte;
+       struct io_pgtable *iop = &data->iop;
+       unsigned long offset;
+       size_t base, next_size;
+       int nbits, ret, i;
+
+       if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
+               return -EINVAL;
+
+       ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
+       pte = READ_ONCE(*ptep);
+       if (WARN_ON(!pte))
+               return -EINVAL;
+
+       if (size == ARM_LPAE_BLOCK_SIZE(lvl, data)) {
+               if (iopte_leaf(pte, lvl, iop->fmt)) {
+                       if (pte & ARM_LPAE_PTE_AP_RDONLY)
+                               return 0;
+
+                       /* Ensure all corresponding bits are set */
+                       nbits = size >> bitmap_pgshift;
+                       offset = (iova - base_iova) >> bitmap_pgshift;
+                       for (i = offset; i < offset + nbits; i++) {
+                               if (!test_bit(i, bitmap))
+                                       return 0;
+                       }
+
+                       /* Race does not exist */
+                       pte |= ARM_LPAE_PTE_AP_RDONLY;
+                       __arm_lpae_set_pte(ptep, pte, &iop->cfg);
+                       return 0;
+               } else {
+                       /* To traverse next level */
+                       next_size = ARM_LPAE_BLOCK_SIZE(lvl + 1, data);
+                       ptep = iopte_deref(pte, data);
+                       for (base = 0; base < size; base += next_size) {
+                               ret = __arm_lpae_clear_dirty_log(data,
+                                               iova + base, next_size, lvl + 1,
+                                               ptep, bitmap, base_iova,
+                                               bitmap_pgshift);
+                               if (ret)
+                                       return ret;
+                       }
+                       return 0;
+               }
+       } else if (iopte_leaf(pte, lvl, iop->fmt)) {
+               /* Though the size is too small, it is already clean */
+               if (pte & ARM_LPAE_PTE_AP_RDONLY)
+                       return 0;
+
+               return -EINVAL;
+       }
+
+       /* Keep on walkin */
+       ptep = iopte_deref(pte, data);
+       return __arm_lpae_clear_dirty_log(data, iova, size, lvl + 1, ptep,
+                       bitmap, base_iova, bitmap_pgshift);
+}
+
+static int arm_lpae_clear_dirty_log(struct io_pgtable_ops *ops,
+                                   unsigned long iova, size_t size,
+                                   unsigned long *bitmap,
+                                   unsigned long base_iova,
+                                   unsigned long bitmap_pgshift)
+{
+       struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
+       arm_lpae_iopte *ptep = data->pgd;
+       int lvl = data->start_level;
+       struct io_pgtable_cfg *cfg = &data->iop.cfg;
+       long iaext = (s64)iova >> cfg->ias;
+
+       if (WARN_ON(!size || (size & cfg->pgsize_bitmap) != size))
+               return -EINVAL;
+
+       if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1)
+               iaext = ~iaext;
+       if (WARN_ON(iaext))
+               return -EINVAL;
+
+       if (data->iop.fmt != ARM_64_LPAE_S1 &&
+           data->iop.fmt != ARM_32_LPAE_S1)
+               return -EINVAL;
+
+       return __arm_lpae_clear_dirty_log(data, iova, size, lvl, ptep,
+                       bitmap, base_iova, bitmap_pgshift);
+}
+
 static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg)
 {
        unsigned long granule, page_sizes;
@@ -1047,6 +1141,7 @@ arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
                .split_block    = arm_lpae_split_block,
                .merge_page     = arm_lpae_merge_page,
                .sync_dirty_log = arm_lpae_sync_dirty_log,
+               .clear_dirty_log = arm_lpae_clear_dirty_log,
        };
 
        return data;
diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h
index 9e2757d12066..ad65b9099be6 100644
--- a/include/linux/io-pgtable.h
+++ b/include/linux/io-pgtable.h
@@ -175,6 +175,10 @@ struct io_pgtable_ops {
                              unsigned long iova, size_t size,
                              unsigned long *bitmap, unsigned long base_iova,
                              unsigned long bitmap_pgshift);
+       int (*clear_dirty_log)(struct io_pgtable_ops *ops,
+                              unsigned long iova, size_t size,
+                              unsigned long *bitmap, unsigned long base_iova,
+                              unsigned long bitmap_pgshift);
 };
 
 /**
-- 
2.19.1

Reply via email to