With Shared Virtual Addressing (SVA), we need to mirror CPU TTBR, TCR,
MAIR and ASIDs in SMMU contexts. Each SMMU has a single ASID space split
into two sets, shared and private. Shared ASIDs correspond to those
obtained from the arch ASID allocator, and private ASIDs are used for
"classic" map/unmap DMA.

Cc: Suzuki K Poulose <suzuki.poul...@arm.com>
Signed-off-by: Jean-Philippe Brucker <jean-phili...@linaro.org>
---
 drivers/iommu/arm-smmu-v3.c | 161 +++++++++++++++++++++++++++++++++++-
 1 file changed, 157 insertions(+), 4 deletions(-)

diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index 96ee60002e85e..09f4f712fb103 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -22,6 +22,7 @@
 #include <linux/iommu.h>
 #include <linux/iopoll.h>
 #include <linux/module.h>
+#include <linux/mmu_context.h>
 #include <linux/msi.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
@@ -33,6 +34,8 @@
 
 #include <linux/amba/bus.h>
 
+#include "io-pgtable-arm.h"
+
 /* MMIO registers */
 #define ARM_SMMU_IDR0                  0x0
 #define IDR0_ST_LVL                    GENMASK(28, 27)
@@ -587,6 +590,9 @@ struct arm_smmu_ctx_desc {
        u64                             ttbr;
        u64                             tcr;
        u64                             mair;
+
+       refcount_t                      refs;
+       struct mm_struct                *mm;
 };
 
 struct arm_smmu_l1_ctx_desc {
@@ -1660,7 +1666,8 @@ static int arm_smmu_write_ctx_desc(struct arm_smmu_domain 
*smmu_domain,
 #ifdef __BIG_ENDIAN
                        CTXDESC_CD_0_ENDI |
 #endif
-                       CTXDESC_CD_0_R | CTXDESC_CD_0_A | CTXDESC_CD_0_ASET |
+                       CTXDESC_CD_0_R | CTXDESC_CD_0_A |
+                       (cd->mm ? 0 : CTXDESC_CD_0_ASET) |
                        CTXDESC_CD_0_AA64 |
                        FIELD_PREP(CTXDESC_CD_0_ASID, cd->asid) |
                        CTXDESC_CD_0_V;
@@ -1764,12 +1771,156 @@ static void arm_smmu_free_cd_tables(struct 
arm_smmu_domain *smmu_domain)
        cdcfg->cdtab = NULL;
 }
 
-static void arm_smmu_free_asid(struct arm_smmu_ctx_desc *cd)
+static void arm_smmu_init_cd(struct arm_smmu_ctx_desc *cd)
 {
+       refcount_set(&cd->refs, 1);
+}
+
+static bool arm_smmu_free_asid(struct arm_smmu_ctx_desc *cd)
+{
+       bool free;
+       struct arm_smmu_ctx_desc *old_cd;
+
        if (!cd->asid)
-               return;
+               return false;
+
+       xa_lock(&asid_xa);
+       free = refcount_dec_and_test(&cd->refs);
+       if (free) {
+               old_cd = __xa_erase(&asid_xa, cd->asid);
+               WARN_ON(old_cd != cd);
+       }
+       xa_unlock(&asid_xa);
+       return free;
+}
+
+static struct arm_smmu_ctx_desc *arm_smmu_share_asid(u16 asid)
+{
+       struct arm_smmu_ctx_desc *cd;
+
+       cd = xa_load(&asid_xa, asid);
+       if (!cd)
+               return NULL;
+
+       if (cd->mm) {
+               /*
+                * It's pretty common to find a stale CD when doing unbind-bind,
+                * given that the release happens after a RCU grace period.
+                * arm_smmu_free_asid() hasn't gone through yet, so reuse it.
+                */
+               refcount_inc(&cd->refs);
+               return cd;
+       }
+
+       /*
+        * Ouch, ASID is already in use for a private cd.
+        * TODO: seize it.
+        */
+       return ERR_PTR(-EEXIST);
+}
+
+__maybe_unused
+static struct arm_smmu_ctx_desc *arm_smmu_alloc_shared_cd(struct mm_struct *mm)
+{
+       u16 asid;
+       int ret = 0;
+       u64 tcr, par, reg;
+       struct arm_smmu_ctx_desc *cd;
+       struct arm_smmu_ctx_desc *old_cd = NULL;
+
+       asid = mm_context_get(mm);
+       if (!asid)
+               return ERR_PTR(-ESRCH);
+
+       cd = kzalloc(sizeof(*cd), GFP_KERNEL);
+       if (!cd) {
+               ret = -ENOMEM;
+               goto err_put_context;
+       }
+
+       arm_smmu_init_cd(cd);
+
+       xa_lock(&asid_xa);
+       old_cd = arm_smmu_share_asid(asid);
+       if (!old_cd) {
+               old_cd = __xa_store(&asid_xa, asid, cd, GFP_ATOMIC);
+               /*
+                * Keep error, clear valid pointers. If there was an old entry
+                * it has been moved already by arm_smmu_share_asid().
+                */
+               old_cd = ERR_PTR(xa_err(old_cd));
+               cd->asid = asid;
+       }
+       xa_unlock(&asid_xa);
+
+       if (IS_ERR(old_cd)) {
+               ret = PTR_ERR(old_cd);
+               goto err_free_cd;
+       } else if (old_cd) {
+               if (WARN_ON(old_cd->mm != mm)) {
+                       ret = -EINVAL;
+                       goto err_free_cd;
+               }
+               kfree(cd);
+               mm_context_put(mm);
+               return old_cd;
+       }
+
+       tcr = FIELD_PREP(CTXDESC_CD_0_TCR_T0SZ, 64ULL - VA_BITS) |
+             FIELD_PREP(CTXDESC_CD_0_TCR_IRGN0, ARM_LPAE_TCR_RGN_WBWA) |
+             FIELD_PREP(CTXDESC_CD_0_TCR_ORGN0, ARM_LPAE_TCR_RGN_WBWA) |
+             FIELD_PREP(CTXDESC_CD_0_TCR_SH0, ARM_LPAE_TCR_SH_IS) |
+             CTXDESC_CD_0_TCR_EPD1 | CTXDESC_CD_0_AA64;
+
+       switch (PAGE_SIZE) {
+       case SZ_4K:
+               tcr |= FIELD_PREP(CTXDESC_CD_0_TCR_TG0, ARM_LPAE_TCR_TG0_4K);
+               break;
+       case SZ_16K:
+               tcr |= FIELD_PREP(CTXDESC_CD_0_TCR_TG0, ARM_LPAE_TCR_TG0_16K);
+               break;
+       case SZ_64K:
+               tcr |= FIELD_PREP(CTXDESC_CD_0_TCR_TG0, ARM_LPAE_TCR_TG0_64K);
+               break;
+       default:
+               WARN_ON(1);
+               ret = -EINVAL;
+               goto err_free_asid;
+       }
+
+       reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
+       par = cpuid_feature_extract_unsigned_field(reg, 
ID_AA64MMFR0_PARANGE_SHIFT);
+       tcr |= FIELD_PREP(CTXDESC_CD_0_TCR_IPS, par);
+
+       cd->ttbr = virt_to_phys(mm->pgd);
+       cd->tcr = tcr;
+       /*
+        * MAIR value is pretty much constant and global, so we can just get it
+        * from the current CPU register
+        */
+       cd->mair = read_sysreg(mair_el1);
 
-       xa_erase(&asid_xa, cd->asid);
+       cd->mm = mm;
+
+       return cd;
+
+err_free_asid:
+       arm_smmu_free_asid(cd);
+err_free_cd:
+       kfree(cd);
+err_put_context:
+       mm_context_put(mm);
+       return ERR_PTR(ret);
+}
+
+__maybe_unused
+static void arm_smmu_free_shared_cd(struct arm_smmu_ctx_desc *cd)
+{
+       if (arm_smmu_free_asid(cd)) {
+               /* Unpin ASID */
+               mm_context_put(cd->mm);
+               kfree(cd);
+       }
 }
 
 /* Stream table manipulation functions */
@@ -2479,6 +2630,8 @@ static int arm_smmu_domain_finalise_s1(struct 
arm_smmu_domain *smmu_domain,
        struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
        typeof(&pgtbl_cfg->arm_lpae_s1_cfg.tcr) tcr = 
&pgtbl_cfg->arm_lpae_s1_cfg.tcr;
 
+       arm_smmu_init_cd(&cfg->cd);
+
        ret = xa_alloc(&asid_xa, &asid, &cfg->cd,
                       XA_LIMIT(1, (1 << smmu->asid_bits) - 1), GFP_KERNEL);
        if (ret)
-- 
2.26.0

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

Reply via email to