Add support for per-instance pagetables for a6xx targets. Add support
to handle split pagetables and create a new instance if the needed
IOMMU support exists and insert the necessary PM4 commands to trigger
a pagetable switch at the beginning of a user command.

Signed-off-by: Jordan Crouse <jcro...@codeaurora.org>
---

 drivers/gpu/drm/msm/adreno/a6xx_gpu.c | 124 ++++++++++++++++++++++++++++++++--
 drivers/gpu/drm/msm/adreno/a6xx_gpu.h |   1 +
 2 files changed, 121 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c 
b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index f2e0800..57d1909 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -10,6 +10,63 @@
 
 #include <linux/devfreq.h>
 
+static void a6xx_set_pagetable(struct msm_gpu *gpu, struct msm_ringbuffer 
*ring,
+       struct msm_file_private *ctx)
+{
+       u64 ttbr;
+       u32 asid;
+
+       if (!msm_iommu_get_ptinfo(ctx->aspace->mmu, &ttbr, &asid))
+               return;
+
+       ttbr = ttbr | ((u64) asid) << 48;
+
+       /* Turn off protected mode */
+       OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+       OUT_RING(ring, 0);
+
+       /* Turn on APIV mode to access critical regions */
+       OUT_PKT4(ring, REG_A6XX_CP_MISC_CNTL, 1);
+       OUT_RING(ring, 1);
+
+       /* Make sure the ME is synchronized before staring the update */
+       OUT_PKT7(ring, CP_WAIT_FOR_ME, 0);
+
+       /* Execute the table update */
+       OUT_PKT7(ring, CP_SMMU_TABLE_UPDATE, 4);
+       OUT_RING(ring, lower_32_bits(ttbr));
+       OUT_RING(ring, upper_32_bits(ttbr));
+       /* CONTEXTIDR is currently unused */
+       OUT_RING(ring, 0);
+       /* CONTEXTBANK is currently unused */
+       OUT_RING(ring, 0);
+
+       /*
+        * Write the new TTBR0 to the preemption records - this will be used to
+        * reload the pagetable if the current ring gets preempted out.
+        */
+       OUT_PKT7(ring, CP_MEM_WRITE, 4);
+       OUT_RING(ring, lower_32_bits(rbmemptr(ring, ttbr0)));
+       OUT_RING(ring, upper_32_bits(rbmemptr(ring, ttbr0)));
+       OUT_RING(ring, lower_32_bits(ttbr));
+       OUT_RING(ring, upper_32_bits(ttbr));
+
+       /* Invalidate the draw state so we start off fresh */
+       OUT_PKT7(ring, CP_SET_DRAW_STATE, 3);
+       OUT_RING(ring, 0x40000);
+       OUT_RING(ring, 1);
+       OUT_RING(ring, 0);
+
+       /* Turn off APRIV */
+       OUT_PKT4(ring, REG_A6XX_CP_MISC_CNTL, 1);
+       OUT_RING(ring, 0);
+
+       /* Turn off protected mode */
+       OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+       OUT_RING(ring, 1);
+}
+
+
 static inline bool _a6xx_check_idle(struct msm_gpu *gpu)
 {
        struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@@ -87,6 +144,8 @@ static void a6xx_submit(struct msm_gpu *gpu, struct 
msm_gem_submit *submit,
        struct msm_ringbuffer *ring = submit->ring;
        unsigned int i;
 
+       a6xx_set_pagetable(gpu, ring, ctx);
+
        get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP_0_LO,
                rbmemptr_stats(ring, index, cpcycles_start));
 
@@ -783,21 +842,77 @@ static unsigned long a6xx_gpu_busy(struct msm_gpu *gpu)
        return (unsigned long)busy_time;
 }
 
+static struct msm_gem_address_space *a6xx_new_address_space(struct msm_gpu 
*gpu)
+{
+       struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+       struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+       struct msm_gem_address_space *aspace;
+       int ret;
+
+       /* Return the default pagetable if per instance tables don't work */
+       if (!a6xx_gpu->per_instance_tables)
+               return gpu->aspace;
+
+       aspace = msm_gem_address_space_create_instance(&gpu->pdev->dev, "gpu",
+               0x100000000ULL, 0x1ffffffffULL);
+       if (IS_ERR(aspace))
+               return aspace;
+
+       ret = aspace->mmu->funcs->attach(aspace->mmu, NULL, 0);
+       if (ret) {
+               /* -ENODEV means that aux domains aren't supported */
+               if (ret == -ENODEV)
+                       return gpu->aspace;
+
+               return ERR_PTR(ret);
+       }
+
+       return aspace;
+}
+
 static struct msm_gem_address_space *
 a6xx_create_address_space(struct msm_gpu *gpu)
 {
+       struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+       struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+       struct device *dev = &gpu->pdev->dev;
        struct msm_gem_address_space *aspace;
        struct iommu_domain *iommu;
-       int ret;
+       int ret, val = 1;
+
+       a6xx_gpu->per_instance_tables = false;
 
        iommu = iommu_domain_alloc(&platform_bus_type);
        if (!iommu)
                return NULL;
 
-       iommu->geometry.aperture_start = 0x100000000ULL;
-       iommu->geometry.aperture_end = 0x1ffffffffULL;
+       /* Try to enable split pagetables */
+       if (iommu_domain_set_attr(iommu, DOMAIN_ATTR_SPLIT_TABLES, &val)) {
+               /*
+                * If split pagetables aren't available we won't be able to do
+                * per-instance pagetables so set up the global va space at our
+                * susual location
+                */
+               iommu->geometry.aperture_start = 0x100000000ULL;
+               iommu->geometry.aperture_end = 0x1ffffffffULL;
+       } else {
+               /*
+                * If split pagetables are available then we might be able to do
+                * per-instance pagetables. Put the default va-space in TTBR1 to
+                * prepare
+                */
+               iommu->geometry.aperture_start = 0xfffffff100000000ULL;
+               iommu->geometry.aperture_end = 0xffffff1ffffffffULL;
+
+               /*
+                * If both split pagetables and aux domains are supported we can
+                * do per_instance pagetables
+                */
+               a6xx_gpu->per_instance_tables =
+                       iommu_dev_has_feature(dev, IOMMU_DEV_FEAT_AUX);
+       }
 
-       aspace = msm_gem_address_space_create(&gpu->pdev->dev, iommu, "gpu");
+       aspace = msm_gem_address_space_create(dev, iommu, "gpu");
        if (IS_ERR(aspace)) {
                iommu_domain_free(iommu);
                DRM_DEV_ERROR(gpu->dev->dev, "failed to init mmu: %ld\n",
@@ -836,6 +951,7 @@ static const struct adreno_gpu_funcs funcs = {
                .gpu_state_get = a6xx_gpu_state_get,
                .gpu_state_put = a6xx_gpu_state_put,
                .create_address_space = a6xx_create_address_space,
+               .new_address_space = a6xx_new_address_space,
        },
        .get_timestamp = a6xx_get_timestamp,
 };
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h 
b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
index 528a4cf..4e9ce1c 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
@@ -21,6 +21,7 @@ struct a6xx_gpu {
        struct msm_ringbuffer *cur_ring;
 
        struct a6xx_gmu gmu;
+       bool per_instance_tables;
 };
 
 #define to_a6xx_gpu(x) container_of(x, struct a6xx_gpu, base)
-- 
2.7.4

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

Reply via email to