With nested stage support, soon we will need to invalidate
S1 contexts and ranges tagged with an unmanaged asid, this
latter being managed by the guest. So let's introduce 2 helpers
that allow to invalidate with externally managed ASIDs

Signed-off-by: Eric Auger <eric.au...@redhat.com>

---

v13 -> v14
- Actually send the NH_ASID command (reported by Xingang Wang)
---
 drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 38 ++++++++++++++++-----
 1 file changed, 29 insertions(+), 9 deletions(-)

diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c 
b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index 5579ec4fccc8..4c19a1114de4 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -1843,9 +1843,9 @@ int arm_smmu_atc_inv_domain(struct arm_smmu_domain 
*smmu_domain, int ssid,
 }
 
 /* IO_PGTABLE API */
-static void arm_smmu_tlb_inv_context(void *cookie)
+static void __arm_smmu_tlb_inv_context(struct arm_smmu_domain *smmu_domain,
+                                      int ext_asid)
 {
-       struct arm_smmu_domain *smmu_domain = cookie;
        struct arm_smmu_device *smmu = smmu_domain->smmu;
        struct arm_smmu_cmdq_ent cmd;
 
@@ -1856,7 +1856,13 @@ static void arm_smmu_tlb_inv_context(void *cookie)
         * insertion to guarantee those are observed before the TLBI. Do be
         * careful, 007.
         */
-       if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
+       if (ext_asid >= 0) { /* guest stage 1 invalidation */
+               cmd.opcode      = CMDQ_OP_TLBI_NH_ASID;
+               cmd.tlbi.asid   = ext_asid;
+               cmd.tlbi.vmid   = smmu_domain->s2_cfg.vmid;
+               arm_smmu_cmdq_issue_cmd(smmu, &cmd);
+               arm_smmu_cmdq_issue_sync(smmu);
+       } else if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
                arm_smmu_tlb_inv_asid(smmu, smmu_domain->s1_cfg.cd.asid);
        } else {
                cmd.opcode      = CMDQ_OP_TLBI_S12_VMALL;
@@ -1867,6 +1873,13 @@ static void arm_smmu_tlb_inv_context(void *cookie)
        arm_smmu_atc_inv_domain(smmu_domain, 0, 0, 0);
 }
 
+static void arm_smmu_tlb_inv_context(void *cookie)
+{
+       struct arm_smmu_domain *smmu_domain = cookie;
+
+       __arm_smmu_tlb_inv_context(smmu_domain, -1);
+}
+
 static void __arm_smmu_tlb_inv_range(struct arm_smmu_cmdq_ent *cmd,
                                     unsigned long iova, size_t size,
                                     size_t granule,
@@ -1926,9 +1939,10 @@ static void __arm_smmu_tlb_inv_range(struct 
arm_smmu_cmdq_ent *cmd,
        arm_smmu_cmdq_batch_submit(smmu, &cmds);
 }
 
-static void arm_smmu_tlb_inv_range_domain(unsigned long iova, size_t size,
-                                         size_t granule, bool leaf,
-                                         struct arm_smmu_domain *smmu_domain)
+static void
+arm_smmu_tlb_inv_range_domain(unsigned long iova, size_t size,
+                             size_t granule, bool leaf, int ext_asid,
+                             struct arm_smmu_domain *smmu_domain)
 {
        struct arm_smmu_cmdq_ent cmd = {
                .tlbi = {
@@ -1936,7 +1950,12 @@ static void arm_smmu_tlb_inv_range_domain(unsigned long 
iova, size_t size,
                },
        };
 
-       if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
+       if (ext_asid >= 0) {  /* guest stage 1 invalidation */
+               cmd.opcode      = smmu_domain->smmu->features & 
ARM_SMMU_FEAT_E2H ?
+                                 CMDQ_OP_TLBI_EL2_VA : CMDQ_OP_TLBI_NH_VA;
+               cmd.tlbi.asid   = ext_asid;
+               cmd.tlbi.vmid   = smmu_domain->s2_cfg.vmid;
+       } else if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
                cmd.opcode      = smmu_domain->smmu->features & 
ARM_SMMU_FEAT_E2H ?
                                  CMDQ_OP_TLBI_EL2_VA : CMDQ_OP_TLBI_NH_VA;
                cmd.tlbi.asid   = smmu_domain->s1_cfg.cd.asid;
@@ -1944,6 +1963,7 @@ static void arm_smmu_tlb_inv_range_domain(unsigned long 
iova, size_t size,
                cmd.opcode      = CMDQ_OP_TLBI_S2_IPA;
                cmd.tlbi.vmid   = smmu_domain->s2_cfg.vmid;
        }
+
        __arm_smmu_tlb_inv_range(&cmd, iova, size, granule, smmu_domain);
 
        /*
@@ -1982,7 +2002,7 @@ static void arm_smmu_tlb_inv_page_nosync(struct 
iommu_iotlb_gather *gather,
 static void arm_smmu_tlb_inv_walk(unsigned long iova, size_t size,
                                  size_t granule, void *cookie)
 {
-       arm_smmu_tlb_inv_range_domain(iova, size, granule, false, cookie);
+       arm_smmu_tlb_inv_range_domain(iova, size, granule, false, -1, cookie);
 }
 
 static const struct iommu_flush_ops arm_smmu_flush_ops = {
@@ -2523,7 +2543,7 @@ static void arm_smmu_iotlb_sync(struct iommu_domain 
*domain,
 
        arm_smmu_tlb_inv_range_domain(gather->start,
                                      gather->end - gather->start + 1,
-                                     gather->pgsize, true, smmu_domain);
+                                     gather->pgsize, true, -1, smmu_domain);
 }
 
 static phys_addr_t
-- 
2.26.2

Reply via email to