Implement domain-selective, pasid selective and page-selective
IOTLB invalidations.

Signed-off-by: Eric Auger <eric.au...@redhat.com>

---
v4 -> v15:
- remove the redundant arm_smmu_cmdq_issue_sync(smmu)
  in IOMMU_INV_GRANU_ADDR case (Zenghui)
- if RIL is not supported by the host, make sure the granule_size
  that is passed by the userspace is supported or fix it
  (Chenxiang)

v13 -> v14:
- Add domain invalidation
- do global inval when asid is not provided with addr
  granularity

v7 -> v8:
- ASID based invalidation using iommu_inv_pasid_info
- check ARCHID/PASID flags in addr based invalidation
- use __arm_smmu_tlb_inv_context and __arm_smmu_tlb_inv_range_nosync

v6 -> v7
- check the uapi version

v3 -> v4:
- adapt to changes in the uapi
- add support for leaf parameter
- do not use arm_smmu_tlb_inv_range_nosync or arm_smmu_tlb_inv_context
  anymore

v2 -> v3:
- replace __arm_smmu_tlb_sync by arm_smmu_cmdq_issue_sync

v1 -> v2:
- properly pass the asid
---
 drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 89 +++++++++++++++++++++
 1 file changed, 89 insertions(+)

diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c 
b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index 56a301fbe75a..bfc112cc0d38 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -2961,6 +2961,94 @@ static void arm_smmu_detach_pasid_table(struct 
iommu_domain *domain)
        mutex_unlock(&smmu_domain->init_mutex);
 }
 
+static int
+arm_smmu_cache_invalidate(struct iommu_domain *domain, struct device *dev,
+                         struct iommu_cache_invalidate_info *inv_info)
+{
+       struct arm_smmu_cmdq_ent cmd = {.opcode = CMDQ_OP_TLBI_NSNH_ALL};
+       struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+       struct arm_smmu_device *smmu = smmu_domain->smmu;
+
+       if (smmu_domain->stage != ARM_SMMU_DOMAIN_NESTED)
+               return -EINVAL;
+
+       if (!smmu)
+               return -EINVAL;
+
+       if (inv_info->version != IOMMU_CACHE_INVALIDATE_INFO_VERSION_1)
+               return -EINVAL;
+
+       if (inv_info->cache & IOMMU_CACHE_INV_TYPE_PASID ||
+           inv_info->cache & IOMMU_CACHE_INV_TYPE_DEV_IOTLB) {
+               return -ENOENT;
+       }
+
+       if (!(inv_info->cache & IOMMU_CACHE_INV_TYPE_IOTLB))
+               return -EINVAL;
+
+       /* IOTLB invalidation */
+
+       switch (inv_info->granularity) {
+       case IOMMU_INV_GRANU_PASID:
+       {
+               struct iommu_inv_pasid_info *info =
+                       &inv_info->granu.pasid_info;
+
+               if (info->flags & IOMMU_INV_ADDR_FLAGS_PASID)
+                       return -ENOENT;
+               if (!(info->flags & IOMMU_INV_PASID_FLAGS_ARCHID))
+                       return -EINVAL;
+
+               __arm_smmu_tlb_inv_context(smmu_domain, info->archid);
+               return 0;
+       }
+       case IOMMU_INV_GRANU_ADDR:
+       {
+               struct iommu_inv_addr_info *info = &inv_info->granu.addr_info;
+               size_t granule_size  = info->granule_size;
+               size_t size = info->nb_granules * info->granule_size;
+               bool leaf = info->flags & IOMMU_INV_ADDR_FLAGS_LEAF;
+               int tg;
+
+               if (info->flags & IOMMU_INV_ADDR_FLAGS_PASID)
+                       return -ENOENT;
+
+               if (!(info->flags & IOMMU_INV_ADDR_FLAGS_ARCHID))
+                       break;
+
+               tg = __ffs(granule_size);
+               if (granule_size & ~(1 << tg))
+                       return -EINVAL;
+               /*
+                * When RIL is not supported, make sure the granule size that is
+                * passed is supported. In RIL mode, this is enforced in
+                * __arm_smmu_tlb_inv_range()
+                */
+               if (!(smmu->features & ARM_SMMU_FEAT_RANGE_INV) &&
+                   !(granule_size & smmu_domain->domain.pgsize_bitmap)) {
+                       tg = __ffs(smmu_domain->domain.pgsize_bitmap);
+                       granule_size = 1 << tg;
+                       size = size >> tg;
+               }
+
+               arm_smmu_tlb_inv_range_domain(info->addr, size,
+                                             granule_size, leaf,
+                                             info->archid, smmu_domain);
+               return 0;
+       }
+       case IOMMU_INV_GRANU_DOMAIN:
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       /* Global S1 invalidation */
+       cmd.tlbi.vmid   = smmu_domain->s2_cfg.vmid;
+       arm_smmu_cmdq_issue_cmd(smmu, &cmd);
+       arm_smmu_cmdq_issue_sync(smmu);
+       return 0;
+}
+
 static bool arm_smmu_dev_has_feature(struct device *dev,
                                     enum iommu_dev_features feat)
 {
@@ -3060,6 +3148,7 @@ static struct iommu_ops arm_smmu_ops = {
        .put_resv_regions       = generic_iommu_put_resv_regions,
        .attach_pasid_table     = arm_smmu_attach_pasid_table,
        .detach_pasid_table     = arm_smmu_detach_pasid_table,
+       .cache_invalidate       = arm_smmu_cache_invalidate,
        .dev_has_feat           = arm_smmu_dev_has_feature,
        .dev_feat_enabled       = arm_smmu_dev_feature_enabled,
        .dev_enable_feat        = arm_smmu_dev_enable_feature,
-- 
2.26.3

Reply via email to