From: Kunkun Jiang <[email protected]>

This realizes switch_dirty_log by invoking iommu_split_block() and
iommu_merge_page(). HTTU HD feature is required.

Co-developed-by: Keqian Zhu <[email protected]>
Signed-off-by: Kunkun Jiang <[email protected]>
---
 drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 38 +++++++++++++++++++++
 1 file changed, 38 insertions(+)

diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c 
b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index 4d8495d88be2..52c6f3e74d6f 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -2590,6 +2590,43 @@ static int arm_smmu_merge_page(struct iommu_domain 
*domain,
        return 0;
 }
 
+static int arm_smmu_switch_dirty_log(struct iommu_domain *domain, bool enable,
+                                    unsigned long iova, size_t size, int prot)
+{
+       struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+       struct arm_smmu_device *smmu = smmu_domain->smmu;
+
+       if (!(smmu->features & ARM_SMMU_FEAT_HD))
+               return -ENODEV;
+       if (smmu_domain->stage != ARM_SMMU_DOMAIN_S1)
+               return -EINVAL;
+
+       if (enable) {
+               /*
+                * For SMMU, the hardware dirty management is always enabled if
+                * hardware supports HTTU HD. The action to start dirty log is
+                * spliting block mapping.
+                *
+                * We don't return error even if the split operation fail, as we
+                * can still track dirty at block granule, which is still a much
+                * better choice compared to full dirty policy.
+                */
+               iommu_split_block(domain, iova, size);
+       } else {
+               /*
+                * For SMMU, the hardware dirty management is always enabled if
+                * hardware supports HTTU HD. The action to start dirty log is
+                * merging page mapping.
+                *
+                * We don't return error even if the merge operation fail, as it
+                * just effects performace of DMA transaction.
+                */
+               iommu_merge_page(domain, iova, size, prot);
+       }
+
+       return 0;
+}
+
 static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
 {
        return iommu_fwspec_add_ids(dev, args->args, 1);
@@ -2691,6 +2728,7 @@ static struct iommu_ops arm_smmu_ops = {
        .domain_set_attr        = arm_smmu_domain_set_attr,
        .split_block            = arm_smmu_split_block,
        .merge_page             = arm_smmu_merge_page,
+       .switch_dirty_log       = arm_smmu_switch_dirty_log,
        .of_xlate               = arm_smmu_of_xlate,
        .get_resv_regions       = arm_smmu_get_resv_regions,
        .put_resv_regions       = generic_iommu_put_resv_regions,
-- 
2.19.1

Reply via email to