Aggregate all sanity-checks for sharing CPU page tables with the SMMU
under a single ARM_SMMU_FEAT_SVA bit. For PCIe SVA, users also need to
check FEAT_ATS and FEAT_PRI. For platform SVA, they will most likely have
to check FEAT_STALLS.

Cc: Suzuki K Poulose <suzuki.poul...@arm.com>
Signed-off-by: Jean-Philippe Brucker <jean-phili...@linaro.org>
---
v4->v5: bump feature bit
---
 drivers/iommu/arm-smmu-v3.c | 72 +++++++++++++++++++++++++++++++++++++
 1 file changed, 72 insertions(+)

diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index e7de8a7459fa4..d209d85402a83 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -657,6 +657,7 @@ struct arm_smmu_device {
 #define ARM_SMMU_FEAT_RANGE_INV                (1 << 15)
 #define ARM_SMMU_FEAT_E2H              (1 << 16)
 #define ARM_SMMU_FEAT_BTM              (1 << 17)
+#define ARM_SMMU_FEAT_SVA              (1 << 18)
        u32                             features;
 
 #define ARM_SMMU_OPT_SKIP_PREFETCH     (1 << 0)
@@ -3930,6 +3931,74 @@ static int arm_smmu_device_reset(struct arm_smmu_device 
*smmu, bool bypass)
        return 0;
 }
 
+static bool arm_smmu_supports_sva(struct arm_smmu_device *smmu)
+{
+       unsigned long reg, fld;
+       unsigned long oas;
+       unsigned long asid_bits;
+
+       u32 feat_mask = ARM_SMMU_FEAT_BTM | ARM_SMMU_FEAT_COHERENCY;
+
+       if ((smmu->features & feat_mask) != feat_mask)
+               return false;
+
+       if (!(smmu->pgsize_bitmap & PAGE_SIZE))
+               return false;
+
+       /*
+        * Get the smallest PA size of all CPUs (sanitized by cpufeature). We're
+        * not even pretending to support AArch32 here.
+        */
+       reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
+       fld = cpuid_feature_extract_unsigned_field(reg, 
ID_AA64MMFR0_PARANGE_SHIFT);
+       switch (fld) {
+       case 0x0:
+               oas = 32;
+               break;
+       case 0x1:
+               oas = 36;
+               break;
+       case 0x2:
+               oas = 40;
+               break;
+       case 0x3:
+               oas = 42;
+               break;
+       case 0x4:
+               oas = 44;
+               break;
+       case 0x5:
+               oas = 48;
+               break;
+       case 0x6:
+               oas = 52;
+               break;
+       default:
+               return false;
+       }
+
+       /* abort if MMU outputs addresses greater than what we support. */
+       if (smmu->oas < oas)
+               return false;
+
+       /* We can support bigger ASIDs than the CPU, but not smaller */
+       fld = cpuid_feature_extract_unsigned_field(reg, 
ID_AA64MMFR0_ASID_SHIFT);
+       asid_bits = fld ? 16 : 8;
+       if (smmu->asid_bits < asid_bits)
+               return false;
+
+       /*
+        * See max_pinned_asids in arch/arm64/mm/context.c. The following is
+        * generally the maximum number of bindable processes.
+        */
+       if (IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0))
+               asid_bits--;
+       dev_dbg(smmu->dev, "%d shared contexts\n", (1 << asid_bits) -
+               num_possible_cpus() - 2);
+
+       return true;
+}
+
 static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
 {
        u32 reg;
@@ -4142,6 +4211,9 @@ static int arm_smmu_device_hw_probe(struct 
arm_smmu_device *smmu)
 
        smmu->ias = max(smmu->ias, smmu->oas);
 
+       if (arm_smmu_supports_sva(smmu))
+               smmu->features |= ARM_SMMU_FEAT_SVA;
+
        dev_info(smmu->dev, "ias %lu-bit, oas %lu-bit (features 0x%08x)\n",
                 smmu->ias, smmu->oas, smmu->features);
        return 0;
-- 
2.26.0

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

Reply via email to