Introduce helpers to invalidate a given asid/vmid or invalidate
address ranges associated to a given asid/vmid.

S1 helpers will be used to invalidate stage 1 caches upon
userspace request, in nested mode.

Signed-off-by: Eric Auger <eric.au...@redhat.com>

---
---
 drivers/iommu/arm-smmu-v3.c | 98 ++++++++++++++++++++++++++++---------
 1 file changed, 74 insertions(+), 24 deletions(-)

diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index d770977bfc92..724b86ab9a80 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -1601,20 +1601,15 @@ static void arm_smmu_tlb_sync(void *cookie)
        arm_smmu_cmdq_issue_sync(smmu_domain->smmu);
 }
 
-static void arm_smmu_tlb_inv_context(void *cookie)
+static void __arm_smmu_tlb_inv_asid(struct arm_smmu_domain *smmu_domain,
+                                   u16 vmid, u16 asid)
 {
-       struct arm_smmu_domain *smmu_domain = cookie;
        struct arm_smmu_device *smmu = smmu_domain->smmu;
-       struct arm_smmu_cmdq_ent cmd;
+       struct arm_smmu_cmdq_ent cmd = {};
 
-       if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
-               cmd.opcode      = CMDQ_OP_TLBI_NH_ASID;
-               cmd.tlbi.asid   = smmu_domain->s1_cfg->cd.asid;
-               cmd.tlbi.vmid   = 0;
-       } else {
-               cmd.opcode      = CMDQ_OP_TLBI_S12_VMALL;
-               cmd.tlbi.vmid   = smmu_domain->s2_cfg->vmid;
-       }
+       cmd.opcode      = CMDQ_OP_TLBI_NH_ASID;
+       cmd.tlbi.vmid   = vmid;
+       cmd.tlbi.asid   = asid;
 
        /*
         * NOTE: when io-pgtable is in non-strict mode, we may get here with
@@ -1626,32 +1621,87 @@ static void arm_smmu_tlb_inv_context(void *cookie)
        arm_smmu_cmdq_issue_sync(smmu);
 }
 
-static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
-                                         size_t granule, bool leaf, void 
*cookie)
+static void __arm_smmu_tlb_inv_vmid(struct arm_smmu_domain *smmu_domain,
+                                   u16 vmid)
+{
+       struct arm_smmu_device *smmu = smmu_domain->smmu;
+       struct arm_smmu_cmdq_ent cmd = {};
+
+       cmd.opcode      = CMDQ_OP_TLBI_S12_VMALL;
+       cmd.tlbi.vmid   = vmid;
+
+       /* See DSB related comment in __arm_smmu_tlb_inv_asid */
+       arm_smmu_cmdq_issue_cmd(smmu, &cmd);
+       arm_smmu_cmdq_issue_sync(smmu);
+}
+
+static void arm_smmu_tlb_inv_context(void *cookie)
 {
        struct arm_smmu_domain *smmu_domain = cookie;
-       struct arm_smmu_device *smmu = smmu_domain->smmu;
-       struct arm_smmu_cmdq_ent cmd = {
-               .tlbi = {
-                       .leaf   = leaf,
-                       .addr   = iova,
-               },
-       };
 
        if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
-               cmd.opcode      = CMDQ_OP_TLBI_NH_VA;
-               cmd.tlbi.asid   = smmu_domain->s1_cfg->cd.asid;
+               __arm_smmu_tlb_inv_asid(smmu_domain, 0,
+                                       smmu_domain->s1_cfg->cd.asid);
        } else {
-               cmd.opcode      = CMDQ_OP_TLBI_S2_IPA;
-               cmd.tlbi.vmid   = smmu_domain->s2_cfg->vmid;
+               __arm_smmu_tlb_inv_vmid(smmu_domain,
+                                       smmu_domain->s2_cfg->vmid);
        }
+}
 
+static void
+__arm_smmu_tlb_inv_s1_range_nosync(struct arm_smmu_domain *smmu_domain,
+                                  u16 vmid, u16 asid, unsigned long iova,
+                                  size_t size, size_t granule, bool leaf)
+{
+       struct arm_smmu_device *smmu = smmu_domain->smmu;
+       struct arm_smmu_cmdq_ent cmd = {};
+
+       cmd.opcode      = CMDQ_OP_TLBI_NH_VA;
+       cmd.tlbi.vmid   = vmid;
+       cmd.tlbi.asid   = asid;
+       cmd.tlbi.addr   = iova;
+       cmd.tlbi.leaf   = leaf;
        do {
                arm_smmu_cmdq_issue_cmd(smmu, &cmd);
                cmd.tlbi.addr += granule;
        } while (size -= granule);
 }
 
+static void
+__arm_smmu_tlb_inv_s2_range_nosync(struct arm_smmu_domain *smmu_domain,
+                                  u16 vmid, unsigned long iova, size_t size,
+                                  size_t granule, bool leaf)
+{
+       struct arm_smmu_device *smmu = smmu_domain->smmu;
+       struct arm_smmu_cmdq_ent cmd = {};
+
+       cmd.opcode      = CMDQ_OP_TLBI_S2_IPA;
+       cmd.tlbi.vmid   = vmid;
+       cmd.tlbi.addr   = iova;
+       cmd.tlbi.leaf   = leaf;
+       do {
+               arm_smmu_cmdq_issue_cmd(smmu, &cmd);
+               cmd.tlbi.addr += granule;
+       } while (size -= granule);
+}
+
+static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
+                                         size_t granule, bool leaf,
+                                         void *cookie)
+{
+       struct arm_smmu_domain *smmu_domain = cookie;
+
+       if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
+               __arm_smmu_tlb_inv_s1_range_nosync(smmu_domain, 0,
+                                                  smmu_domain->s1_cfg->cd.asid,
+                                                  iova, size, granule, leaf);
+       } else {
+               __arm_smmu_tlb_inv_s2_range_nosync(smmu_domain,
+                                                  smmu_domain->s2_cfg->vmid,
+                                                  iova, size, granule, leaf);
+       }
+}
+
 static const struct iommu_gather_ops arm_smmu_gather_ops = {
        .tlb_flush_all  = arm_smmu_tlb_inv_context,
        .tlb_add_flush  = arm_smmu_tlb_inv_range_nosync,
-- 
2.20.1

Reply via email to