Move the refcount manipulation of the MMU context to the point where the
hardware state is programmed. At that point it is also known if a previous
MMU state is still there, or the state needs to be reprogrammed with a
potentially different context.

Cc: sta...@vger.kernel.org # 5.4
Signed-off-by: Lucas Stach <l.st...@pengutronix.de>
Tested-by: Michael Walle <mich...@walle.cc>
---
 drivers/gpu/drm/etnaviv/etnaviv_gpu.c      | 24 +++++++++++-----------
 drivers/gpu/drm/etnaviv/etnaviv_iommu.c    |  4 ++++
 drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c |  8 ++++++++
 3 files changed, 24 insertions(+), 12 deletions(-)

diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c 
b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index f420c4f14657..1fa98ce870f7 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -641,17 +641,19 @@ void etnaviv_gpu_start_fe(struct etnaviv_gpu *gpu, u32 
address, u16 prefetch)
        gpu->fe_running = true;
 }
 
-static void etnaviv_gpu_start_fe_idleloop(struct etnaviv_gpu *gpu)
+static void etnaviv_gpu_start_fe_idleloop(struct etnaviv_gpu *gpu,
+                                         struct etnaviv_iommu_context *context)
 {
-       u32 address = etnaviv_cmdbuf_get_va(&gpu->buffer,
-                               &gpu->mmu_context->cmdbuf_mapping);
        u16 prefetch;
+       u32 address;
 
        /* setup the MMU */
-       etnaviv_iommu_restore(gpu, gpu->mmu_context);
+       etnaviv_iommu_restore(gpu, context);
 
        /* Start command processor */
        prefetch = etnaviv_buffer_init(gpu);
+       address = etnaviv_cmdbuf_get_va(&gpu->buffer,
+                                       &gpu->mmu_context->cmdbuf_mapping);
 
        etnaviv_gpu_start_fe(gpu, address, prefetch);
 }
@@ -1369,14 +1371,12 @@ struct dma_fence *etnaviv_gpu_submit(struct 
etnaviv_gem_submit *submit)
                goto out_unlock;
        }
 
-       if (!gpu->fe_running) {
-               gpu->mmu_context = 
etnaviv_iommu_context_get(submit->mmu_context);
-               etnaviv_gpu_start_fe_idleloop(gpu);
-       } else {
-               if (submit->prev_mmu_context)
-                       etnaviv_iommu_context_put(submit->prev_mmu_context);
-               submit->prev_mmu_context = 
etnaviv_iommu_context_get(gpu->mmu_context);
-       }
+       if (!gpu->fe_running)
+               etnaviv_gpu_start_fe_idleloop(gpu, submit->mmu_context);
+
+       if (submit->prev_mmu_context)
+               etnaviv_iommu_context_put(submit->prev_mmu_context);
+       submit->prev_mmu_context = etnaviv_iommu_context_get(gpu->mmu_context);
 
        if (submit->nr_pmrs) {
                gpu->event[event[1]].sync_point = 
&sync_point_perfmon_sample_pre;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu.c 
b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
index 1a7c89a67bea..afe5dd6a9925 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
@@ -92,6 +92,10 @@ static void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu,
        struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
        u32 pgtable;
 
+       if (gpu->mmu_context)
+               etnaviv_iommu_context_put(gpu->mmu_context);
+       gpu->mmu_context = etnaviv_iommu_context_get(context);
+
        /* set base addresses */
        gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_RA, 
context->global->memory_base);
        gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_FE, 
context->global->memory_base);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c 
b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
index f8bf488e9d71..d664ae29ae20 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
@@ -172,6 +172,10 @@ static void etnaviv_iommuv2_restore_nonsec(struct 
etnaviv_gpu *gpu,
        if (gpu_read(gpu, VIVS_MMUv2_CONTROL) & VIVS_MMUv2_CONTROL_ENABLE)
                return;
 
+       if (gpu->mmu_context)
+               etnaviv_iommu_context_put(gpu->mmu_context);
+       gpu->mmu_context = etnaviv_iommu_context_get(context);
+
        prefetch = etnaviv_buffer_config_mmuv2(gpu,
                                (u32)v2_context->mtlb_dma,
                                (u32)context->global->bad_page_dma);
@@ -192,6 +196,10 @@ static void etnaviv_iommuv2_restore_sec(struct etnaviv_gpu 
*gpu,
        if (gpu_read(gpu, VIVS_MMUv2_SEC_CONTROL) & 
VIVS_MMUv2_SEC_CONTROL_ENABLE)
                return;
 
+       if (gpu->mmu_context)
+               etnaviv_iommu_context_put(gpu->mmu_context);
+       gpu->mmu_context = etnaviv_iommu_context_get(context);
+
        gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_LOW,
                  lower_32_bits(context->global->v2.pta_dma));
        gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_HIGH,
-- 
2.30.2

Reply via email to