Hi,
On Wed, Apr 17, 2019 at 03:50:19PM +0200, Lucas Stach wrote:
> This reworks the MMU handling to make it possible to have multiple
> MMU contexts, not one per GPU. This commit doesn't actually do anything
> with those contexts, aside from giving one of them to each GPU.
> 
> The code changes are pretty invasive, but there is no sane way to split
> this into smaller changes.

This one trips up checkpatch a bit mixed tabs/spaces and some other minor
things.
Cheers,
 -- Guido

> 
> Signed-off-by: Lucas Stach <l.st...@pengutronix.de>
> ---
>  drivers/gpu/drm/etnaviv/etnaviv_buffer.c   |   8 +-
>  drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c   |   8 +-
>  drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h   |   6 +-
>  drivers/gpu/drm/etnaviv/etnaviv_drv.c      |   1 +
>  drivers/gpu/drm/etnaviv/etnaviv_drv.h      |   4 +-
>  drivers/gpu/drm/etnaviv/etnaviv_gem.c      |  14 +-
>  drivers/gpu/drm/etnaviv/etnaviv_gem.h      |   2 +-
>  drivers/gpu/drm/etnaviv/etnaviv_gpu.c      |  24 +-
>  drivers/gpu/drm/etnaviv/etnaviv_gpu.h      |   3 +-
>  drivers/gpu/drm/etnaviv/etnaviv_iommu.c    | 150 ++++++------
>  drivers/gpu/drm/etnaviv/etnaviv_iommu.h    |  20 --
>  drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c | 264 +++++++++------------
>  drivers/gpu/drm/etnaviv/etnaviv_mmu.c      | 231 ++++++++++--------
>  drivers/gpu/drm/etnaviv/etnaviv_mmu.h      |  88 +++++--
>  14 files changed, 416 insertions(+), 407 deletions(-)
>  delete mode 100644 drivers/gpu/drm/etnaviv/etnaviv_iommu.h
> 
> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c 
> b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
> index d52c01c195bd..e1347630fa11 100644
> --- a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
> +++ b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
> @@ -205,7 +205,7 @@ u16 etnaviv_buffer_config_mmuv2(struct etnaviv_gpu *gpu, 
> u32 mtlb_addr, u32 safe
>       return buffer->user_size / 8;
>  }
>  
> -u16 etnaviv_buffer_config_pta(struct etnaviv_gpu *gpu)
> +u16 etnaviv_buffer_config_pta(struct etnaviv_gpu *gpu, unsigned short id)
>  {
>       struct etnaviv_cmdbuf *buffer = &gpu->buffer;
>  
> @@ -214,7 +214,7 @@ u16 etnaviv_buffer_config_pta(struct etnaviv_gpu *gpu)
>       buffer->user_size = 0;
>  
>       CMD_LOAD_STATE(buffer, VIVS_MMUv2_PTA_CONFIG,
> -                    VIVS_MMUv2_PTA_CONFIG_INDEX(0));
> +                    VIVS_MMUv2_PTA_CONFIG_INDEX(id));
>  
>       CMD_END(buffer);
>  
> @@ -336,7 +336,7 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 
> exec_state,
>  
>               /* flush command */
>               if (need_flush) {
> -                     if (gpu->mmu->version == ETNAVIV_IOMMU_V1)
> +                     if (gpu->mmu->global->version == ETNAVIV_IOMMU_V1)
>                               extra_dwords += 1;
>                       else
>                               extra_dwords += 3;
> @@ -350,7 +350,7 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 
> exec_state,
>  
>               if (need_flush) {
>                       /* Add the MMU flush */
> -                     if (gpu->mmu->version == ETNAVIV_IOMMU_V1) {
> +                     if (gpu->mmu->global->version == ETNAVIV_IOMMU_V1) {
>                               CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_MMU,
>                                              VIVS_GL_FLUSH_MMU_FLUSH_FEMMU |
>                                              VIVS_GL_FLUSH_MMU_FLUSH_UNK1 |
> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c 
> b/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c
> index a01ae32dcd88..4d7a2341e11c 100644
> --- a/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c
> +++ b/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c
> @@ -57,18 +57,18 @@ etnaviv_cmdbuf_suballoc_new(struct device *dev)
>  }
>  
>  int etnaviv_cmdbuf_suballoc_map(struct etnaviv_cmdbuf_suballoc *suballoc,
> -                             struct etnaviv_iommu *mmu,
> +                             struct etnaviv_iommu_context *context,
>                               struct etnaviv_vram_mapping *mapping,
>                               u32 memory_base)
>  {
> -     return etnaviv_iommu_get_suballoc_va(mmu, mapping, memory_base,
> +     return etnaviv_iommu_get_suballoc_va(context, mapping, memory_base,
>                                            suballoc->paddr, SUBALLOC_SIZE);
>  }
>  
> -void etnaviv_cmdbuf_suballoc_unmap(struct etnaviv_iommu *mmu,
> +void etnaviv_cmdbuf_suballoc_unmap(struct etnaviv_iommu_context *context,
>                                  struct etnaviv_vram_mapping *mapping)
>  {
> -     etnaviv_iommu_put_suballoc_va(mmu, mapping);
> +     etnaviv_iommu_put_suballoc_va(context, mapping);
>  }
>  
>  void etnaviv_cmdbuf_suballoc_destroy(struct etnaviv_cmdbuf_suballoc 
> *suballoc)
> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h 
> b/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h
> index 7fdc2e3fea5f..b59dffb8d940 100644
> --- a/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h
> +++ b/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h
> @@ -9,7 +9,7 @@
>  #include <linux/types.h>
>  
>  struct device;
> -struct etnaviv_iommu;
> +struct etnaviv_iommu_context;
>  struct etnaviv_vram_mapping;
>  struct etnaviv_cmdbuf_suballoc;
>  
> @@ -27,10 +27,10 @@ struct etnaviv_cmdbuf_suballoc *
>  etnaviv_cmdbuf_suballoc_new(struct device *dev);
>  void etnaviv_cmdbuf_suballoc_destroy(struct etnaviv_cmdbuf_suballoc 
> *suballoc);
>  int etnaviv_cmdbuf_suballoc_map(struct etnaviv_cmdbuf_suballoc *suballoc,
> -                             struct etnaviv_iommu *mmu,
> +                             struct etnaviv_iommu_context *context,
>                               struct etnaviv_vram_mapping *mapping,
>                               u32 memory_base);
> -void etnaviv_cmdbuf_suballoc_unmap(struct etnaviv_iommu *mmu,
> +void etnaviv_cmdbuf_suballoc_unmap(struct etnaviv_iommu_context *context,
>                                  struct etnaviv_vram_mapping *mapping);
>  
>  
> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c 
> b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
> index 138025bc5376..f8043cb2f0bb 100644
> --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
> +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
> @@ -562,6 +562,7 @@ static void etnaviv_unbind(struct device *dev)
>  
>       component_unbind_all(dev, drm);
>  
> +     etnaviv_iommu_global_fini(priv->mmu_global);
>       etnaviv_cmdbuf_suballoc_destroy(priv->cmdbuf_suballoc);
>  
>       dev->dma_parms = NULL;
> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.h 
> b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
> index 0291771e72fa..449a236bb00f 100644
> --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.h
> +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
> @@ -31,6 +31,7 @@ struct etnaviv_gpu;
>  struct etnaviv_mmu;
>  struct etnaviv_gem_object;
>  struct etnaviv_gem_submit;
> +struct etnaviv_iommu_global;
>  
>  struct etnaviv_file_private {
>       /*
> @@ -46,6 +47,7 @@ struct etnaviv_drm_private {
>       struct etnaviv_gpu *gpu[ETNA_MAX_PIPES];
>  
>       struct etnaviv_cmdbuf_suballoc *cmdbuf_suballoc;
> +     struct etnaviv_iommu_global *mmu_global;
>  
>       /* list of GEM objects: */
>       struct mutex gem_lock;
> @@ -79,7 +81,7 @@ int etnaviv_gem_new_userptr(struct drm_device *dev, struct 
> drm_file *file,
>       uintptr_t ptr, u32 size, u32 flags, u32 *handle);
>  u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu);
>  u16 etnaviv_buffer_config_mmuv2(struct etnaviv_gpu *gpu, u32 mtlb_addr, u32 
> safe_addr);
> -u16 etnaviv_buffer_config_pta(struct etnaviv_gpu *gpu);
> +u16 etnaviv_buffer_config_pta(struct etnaviv_gpu *gpu, unsigned short id);
>  void etnaviv_buffer_end(struct etnaviv_gpu *gpu);
>  void etnaviv_sync_point_queue(struct etnaviv_gpu *gpu, unsigned int event);
>  void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c 
> b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
> index 5c48915f492d..76c26d8f8d63 100644
> --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
> +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
> @@ -222,12 +222,12 @@ int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, 
> u64 *offset)
>  
>  static struct etnaviv_vram_mapping *
>  etnaviv_gem_get_vram_mapping(struct etnaviv_gem_object *obj,
> -                          struct etnaviv_iommu *mmu)
> +                          struct etnaviv_iommu_context *context)
>  {
>       struct etnaviv_vram_mapping *mapping;
>  
>       list_for_each_entry(mapping, &obj->vram_list, obj_node) {
> -             if (mapping->mmu == mmu)
> +             if (mapping->context == context)
>                       return mapping;
>       }
>  
> @@ -277,7 +277,7 @@ struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
>                */
>               if (mapping->use == 0) {
>                       mutex_lock(&gpu->mmu->lock);
> -                     if (mapping->mmu == gpu->mmu)
> +                     if (mapping->context == gpu->mmu)
>                               mapping->use += 1;
>                       else
>                               mapping = NULL;
> @@ -314,7 +314,7 @@ struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
>               list_del(&mapping->obj_node);
>       }
>  
> -     mapping->mmu = gpu->mmu;
> +     mapping->context = gpu->mmu;
>       mapping->use = 1;
>  
>       ret = etnaviv_iommu_map_gem(gpu->mmu, etnaviv_obj, gpu->memory_base,
> @@ -536,12 +536,12 @@ void etnaviv_gem_free_object(struct drm_gem_object *obj)
>  
>       list_for_each_entry_safe(mapping, tmp, &etnaviv_obj->vram_list,
>                                obj_node) {
> -             struct etnaviv_iommu *mmu = mapping->mmu;
> +             struct etnaviv_iommu_context *context = mapping->context;
>  
>               WARN_ON(mapping->use);
>  
> -             if (mmu)
> -                     etnaviv_iommu_unmap_gem(mmu, mapping);
> +             if (context)
> +                     etnaviv_iommu_unmap_gem(context, mapping);
>  
>               list_del(&mapping->obj_node);
>               kfree(mapping);
> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.h 
> b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
> index f0abb744ef95..cf94f0b24584 100644
> --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.h
> +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
> @@ -25,7 +25,7 @@ struct etnaviv_vram_mapping {
>       struct list_head scan_node;
>       struct list_head mmu_node;
>       struct etnaviv_gem_object *object;
> -     struct etnaviv_iommu *mmu;
> +     struct etnaviv_iommu_context *context;
>       struct drm_mm_node vram_node;
>       unsigned int use;
>       u32 iova;
> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c 
> b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
> index a5eed14cec8d..dcf9f177c103 100644
> --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
> +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
> @@ -677,7 +677,7 @@ static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu)
>       etnaviv_gpu_setup_pulse_eater(gpu);
>  
>       /* setup the MMU */
> -     etnaviv_iommu_restore(gpu);
> +     etnaviv_iommu_restore(gpu, gpu->mmu);
>  
>       /* Start command processor */
>       prefetch = etnaviv_buffer_init(gpu);
> @@ -689,6 +689,7 @@ static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu)
>  
>  int etnaviv_gpu_init(struct etnaviv_drm_private *priv, struct etnaviv_gpu 
> *gpu)
>  {
> +     enum etnaviv_iommu_version mmu_version = ETNAVIV_IOMMU_V1;
>       int ret, i;
>  
>       ret = pm_runtime_get_sync(gpu->dev);
> @@ -749,7 +750,20 @@ int etnaviv_gpu_init(struct etnaviv_drm_private *priv, 
> struct etnaviv_gpu *gpu)
>               goto fail;
>       }
>  
> -     gpu->mmu = etnaviv_iommu_new(gpu);
> +     if (gpu->identity.minor_features1 & chipMinorFeatures1_MMU_VERSION)
> +             mmu_version = ETNAVIV_IOMMU_V2;
> +
> +     if (!priv->mmu_global)
> +             priv->mmu_global = etnaviv_iommu_global_init(gpu->drm->dev,
> +                                                          mmu_version);
> +
> +     if (!priv->mmu_global || priv->mmu_global->version != mmu_version) {
> +             ret = -ENXIO;
> +             dev_err(gpu->dev, "failed to init IOMMU global state\n");
> +             goto fail;
> +     }
> +
> +     gpu->mmu = etnaviv_iommu_context_init(priv->mmu_global);
>       if (IS_ERR(gpu->mmu)) {
>               dev_err(gpu->dev, "Failed to instantiate GPU IOMMU\n");
>               ret = PTR_ERR(gpu->mmu);
> @@ -772,7 +786,7 @@ int etnaviv_gpu_init(struct etnaviv_drm_private *priv, 
> struct etnaviv_gpu *gpu)
>               goto unmap_suballoc;
>       }
>  
> -     if (gpu->mmu->version == ETNAVIV_IOMMU_V1 &&
> +     if (mmu_version == ETNAVIV_IOMMU_V1 &&
>           etnaviv_cmdbuf_get_va(&gpu->buffer, &gpu->cmdbuf_mapping) > 
> 0x80000000) {
>               ret = -EINVAL;
>               dev_err(gpu->dev,
> @@ -804,7 +818,7 @@ int etnaviv_gpu_init(struct etnaviv_drm_private *priv, 
> struct etnaviv_gpu *gpu)
>  unmap_suballoc:
>       etnaviv_cmdbuf_suballoc_unmap(gpu->mmu, &gpu->cmdbuf_mapping);
>  destroy_iommu:
> -     etnaviv_iommu_destroy(gpu->mmu);
> +     etnaviv_iommu_context_put(gpu->mmu);
>       gpu->mmu = NULL;
>  fail:
>       pm_runtime_mark_last_busy(gpu->dev);
> @@ -1683,7 +1697,7 @@ static void etnaviv_gpu_unbind(struct device *dev, 
> struct device *master,
>               etnaviv_cmdbuf_suballoc_unmap(gpu->mmu, &gpu->cmdbuf_mapping);
>  
>       if (gpu->mmu) {
> -             etnaviv_iommu_destroy(gpu->mmu);
> +             etnaviv_iommu_context_put(gpu->mmu);
>               gpu->mmu = NULL;
>       }
>  
> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h 
> b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
> index 9ab0b4548e55..50f03ee55500 100644
> --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
> +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
> @@ -8,6 +8,7 @@
>  
>  #include "etnaviv_cmdbuf.h"
>  #include "etnaviv_gem.h"
> +#include "etnaviv_mmu.h"
>  #include "etnaviv_drv.h"
>  
>  struct etnaviv_gem_submit;
> @@ -135,7 +136,7 @@ struct etnaviv_gpu {
>       void __iomem *mmio;
>       int irq;
>  
> -     struct etnaviv_iommu *mmu;
> +     struct etnaviv_iommu_context *mmu;
>       unsigned int flush_seq;
>  
>       /* Power Control: */
> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu.c 
> b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
> index b163bdbcb880..c93aa0b1ad9e 100644
> --- a/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
> +++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
> @@ -11,7 +11,6 @@
>  
>  #include "etnaviv_gpu.h"
>  #include "etnaviv_mmu.h"
> -#include "etnaviv_iommu.h"
>  #include "state_hi.xml.h"
>  
>  #define PT_SIZE              SZ_2M
> @@ -19,113 +18,78 @@
>  
>  #define GPU_MEM_START        0x80000000
>  
> -struct etnaviv_iommuv1_domain {
> -     struct etnaviv_iommu_domain base;
> +struct etnaviv_iommuv1_context {
> +     struct etnaviv_iommu_context base;
>       u32 *pgtable_cpu;
>       dma_addr_t pgtable_dma;
>  };
>  
> -static struct etnaviv_iommuv1_domain *
> -to_etnaviv_domain(struct etnaviv_iommu_domain *domain)
> +static struct etnaviv_iommuv1_context *
> +to_v1_context(struct etnaviv_iommu_context *context)
>  {
> -     return container_of(domain, struct etnaviv_iommuv1_domain, base);
> +     return container_of(context, struct etnaviv_iommuv1_context, base);
>  }
>  
> -static int __etnaviv_iommu_init(struct etnaviv_iommuv1_domain 
> *etnaviv_domain)
> +static void etnaviv_iommuv1_free(struct etnaviv_iommu_context *context)
>  {
> -     u32 *p;
> -     int i;
> -
> -     etnaviv_domain->base.bad_page_cpu =
> -                     dma_alloc_wc(etnaviv_domain->base.dev, SZ_4K,
> -                                  &etnaviv_domain->base.bad_page_dma,
> -                                  GFP_KERNEL);
> -     if (!etnaviv_domain->base.bad_page_cpu)
> -             return -ENOMEM;
> -
> -     p = etnaviv_domain->base.bad_page_cpu;
> -     for (i = 0; i < SZ_4K / 4; i++)
> -             *p++ = 0xdead55aa;
> -
> -     etnaviv_domain->pgtable_cpu = dma_alloc_wc(etnaviv_domain->base.dev,
> -                                                PT_SIZE,
> -                                                &etnaviv_domain->pgtable_dma,
> -                                                GFP_KERNEL);
> -     if (!etnaviv_domain->pgtable_cpu) {
> -             dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
> -                         etnaviv_domain->base.bad_page_cpu,
> -                         etnaviv_domain->base.bad_page_dma);
> -             return -ENOMEM;
> -     }
> -
> -     memset32(etnaviv_domain->pgtable_cpu, etnaviv_domain->base.bad_page_dma,
> -              PT_ENTRIES);
> -
> -     return 0;
> -}
> +     struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
>  
> -static void etnaviv_iommuv1_domain_free(struct etnaviv_iommu_domain *domain)
> -{
> -     struct etnaviv_iommuv1_domain *etnaviv_domain =
> -                     to_etnaviv_domain(domain);
> +     drm_mm_takedown(&context->mm);
>  
> -     dma_free_wc(etnaviv_domain->base.dev, PT_SIZE,
> -                 etnaviv_domain->pgtable_cpu, etnaviv_domain->pgtable_dma);
> +     dma_free_wc(context->global->dev, PT_SIZE, v1_context->pgtable_cpu,
> +                 v1_context->pgtable_dma);
>  
> -     dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
> -                 etnaviv_domain->base.bad_page_cpu,
> -                 etnaviv_domain->base.bad_page_dma);
> +     context->global->v1.shared_context = NULL;
>  
> -     kfree(etnaviv_domain);
> +     kfree(v1_context);
>  }
>  
> -static int etnaviv_iommuv1_map(struct etnaviv_iommu_domain *domain,
> +static int etnaviv_iommuv1_map(struct etnaviv_iommu_context *context,
>                              unsigned long iova, phys_addr_t paddr,
>                              size_t size, int prot)
>  {
> -     struct etnaviv_iommuv1_domain *etnaviv_domain = 
> to_etnaviv_domain(domain);
> +     struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
>       unsigned int index = (iova - GPU_MEM_START) / SZ_4K;
>  
>       if (size != SZ_4K)
>               return -EINVAL;
>  
> -     etnaviv_domain->pgtable_cpu[index] = paddr;
> +     v1_context->pgtable_cpu[index] = paddr;
>  
>       return 0;
>  }
>  
> -static size_t etnaviv_iommuv1_unmap(struct etnaviv_iommu_domain *domain,
> +static size_t etnaviv_iommuv1_unmap(struct etnaviv_iommu_context *context,
>       unsigned long iova, size_t size)
>  {
> -     struct etnaviv_iommuv1_domain *etnaviv_domain =
> -                     to_etnaviv_domain(domain);
> +     struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
>       unsigned int index = (iova - GPU_MEM_START) / SZ_4K;
>  
>       if (size != SZ_4K)
>               return -EINVAL;
>  
> -     etnaviv_domain->pgtable_cpu[index] = etnaviv_domain->base.bad_page_dma;
> +     v1_context->pgtable_cpu[index] = context->global->bad_page_dma;
>  
>       return SZ_4K;
>  }
>  
> -static size_t etnaviv_iommuv1_dump_size(struct etnaviv_iommu_domain *domain)
> +static size_t etnaviv_iommuv1_dump_size(struct etnaviv_iommu_context 
> *context)
>  {
>       return PT_SIZE;
>  }
>  
> -static void etnaviv_iommuv1_dump(struct etnaviv_iommu_domain *domain, void 
> *buf)
> +static void etnaviv_iommuv1_dump(struct etnaviv_iommu_context *context,
> +                              void *buf)
>  {
> -     struct etnaviv_iommuv1_domain *etnaviv_domain =
> -                     to_etnaviv_domain(domain);
> +     struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
>  
> -     memcpy(buf, etnaviv_domain->pgtable_cpu, PT_SIZE);
> +     memcpy(buf, v1_context->pgtable_cpu, PT_SIZE);
>  }
>  
> -void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu)
> +static void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu,
> +                          struct etnaviv_iommu_context *context)
>  {
> -     struct etnaviv_iommuv1_domain *etnaviv_domain =
> -                     to_etnaviv_domain(gpu->mmu->domain);
> +     struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
>       u32 pgtable;
>  
>       /* set base addresses */
> @@ -136,7 +100,7 @@ void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu)
>       gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PE, gpu->memory_base);
>  
>       /* set page table address in MC */
> -     pgtable = (u32)etnaviv_domain->pgtable_dma;
> +     pgtable = (u32)v1_context->pgtable_dma;
>  
>       gpu_write(gpu, VIVS_MC_MMU_FE_PAGE_TABLE, pgtable);
>       gpu_write(gpu, VIVS_MC_MMU_TX_PAGE_TABLE, pgtable);
> @@ -145,39 +109,61 @@ void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu)
>       gpu_write(gpu, VIVS_MC_MMU_RA_PAGE_TABLE, pgtable);
>  }
>  
> -static const struct etnaviv_iommu_domain_ops etnaviv_iommuv1_ops = {
> -     .free = etnaviv_iommuv1_domain_free,
> +
> +const struct etnaviv_iommu_ops etnaviv_iommuv1_ops = {
> +     .free = etnaviv_iommuv1_free,
>       .map = etnaviv_iommuv1_map,
>       .unmap = etnaviv_iommuv1_unmap,
>       .dump_size = etnaviv_iommuv1_dump_size,
>       .dump = etnaviv_iommuv1_dump,
> +     .restore = etnaviv_iommuv1_restore,
>  };
>  
> -struct etnaviv_iommu_domain *
> -etnaviv_iommuv1_domain_alloc(struct etnaviv_gpu *gpu)
> +struct etnaviv_iommu_context *
> +etnaviv_iommuv1_context_alloc(struct etnaviv_iommu_global *global)
>  {
> -     struct etnaviv_iommuv1_domain *etnaviv_domain;
> -     struct etnaviv_iommu_domain *domain;
> -     int ret;
> +     struct etnaviv_iommuv1_context *v1_context;
> +     struct etnaviv_iommu_context *context;
> +
> +     mutex_lock(&global->lock);
> +
> +     /* MMUv1 does not support switching between different contexts without
> +      * a stop the world operation, so we only support a single shared
> +      * context with this version.
> +      */
> +     if (global->v1.shared_context) {
> +             context = global->v1.shared_context;
> +             etnaviv_iommu_context_get(context);
> +             mutex_unlock(&global->lock);
> +             return context;
> +     }
>  
> -     etnaviv_domain = kzalloc(sizeof(*etnaviv_domain), GFP_KERNEL);
> -     if (!etnaviv_domain)
> +     v1_context = kzalloc(sizeof(*v1_context), GFP_KERNEL);
> +     if (!v1_context)
>               return NULL;
>  
> -     domain = &etnaviv_domain->base;
> +     v1_context->pgtable_cpu = dma_alloc_wc(global->dev, PT_SIZE,
> +                                            &v1_context->pgtable_dma,
> +                                            GFP_KERNEL);
> +     if (!v1_context->pgtable_cpu)
> +             goto out_free;
>  
> -     domain->dev = gpu->dev;
> -     domain->base = GPU_MEM_START;
> -     domain->size = PT_ENTRIES * SZ_4K;
> -     domain->ops = &etnaviv_iommuv1_ops;
> +     memset32(v1_context->pgtable_cpu, global->bad_page_dma, PT_ENTRIES);
>  
> -     ret = __etnaviv_iommu_init(etnaviv_domain);
> -     if (ret)
> -             goto out_free;
> +     context = &v1_context->base;
> +     context->global = global;
> +     kref_init(&context->refcount);
> +     mutex_init(&context->lock);
> +     INIT_LIST_HEAD(&context->mappings);
> +     drm_mm_init(&context->mm, GPU_MEM_START, PT_ENTRIES * SZ_4K);
> +     context->global->v1.shared_context = context;
> +
> +     mutex_unlock(&global->lock);
>  
> -     return &etnaviv_domain->base;
> +     return context;
>  
>  out_free:
> -     kfree(etnaviv_domain);
> +     mutex_unlock(&global->lock);
> +     kfree(v1_context);
>       return NULL;
>  }
> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu.h 
> b/drivers/gpu/drm/etnaviv/etnaviv_iommu.h
> deleted file mode 100644
> index b279404ce91a..000000000000
> --- a/drivers/gpu/drm/etnaviv/etnaviv_iommu.h
> +++ /dev/null
> @@ -1,20 +0,0 @@
> -/* SPDX-License-Identifier: GPL-2.0 */
> -/*
> - * Copyright (C) 2014-2018 Etnaviv Project
> - */
> -
> -#ifndef __ETNAVIV_IOMMU_H__
> -#define __ETNAVIV_IOMMU_H__
> -
> -struct etnaviv_gpu;
> -struct etnaviv_iommu_domain;
> -
> -struct etnaviv_iommu_domain *
> -etnaviv_iommuv1_domain_alloc(struct etnaviv_gpu *gpu);
> -void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu);
> -
> -struct etnaviv_iommu_domain *
> -etnaviv_iommuv2_domain_alloc(struct etnaviv_gpu *gpu);
> -void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu);
> -
> -#endif /* __ETNAVIV_IOMMU_H__ */
> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c 
> b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
> index f794e04be9e6..8b6b10354228 100644
> --- a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
> +++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
> @@ -12,7 +12,6 @@
>  #include "etnaviv_cmdbuf.h"
>  #include "etnaviv_gpu.h"
>  #include "etnaviv_mmu.h"
> -#include "etnaviv_iommu.h"
>  #include "state.xml.h"
>  #include "state_hi.xml.h"
>  
> @@ -27,11 +26,9 @@
>  
>  #define MMUv2_MAX_STLB_ENTRIES               1024
>  
> -struct etnaviv_iommuv2_domain {
> -     struct etnaviv_iommu_domain base;
> -     /* P(age) T(able) A(rray) */
> -     u64 *pta_cpu;
> -     dma_addr_t pta_dma;
> +struct etnaviv_iommuv2_context {
> +     struct etnaviv_iommu_context base;
> +     unsigned short id;
>       /* M(aster) TLB aka first level pagetable */
>       u32 *mtlb_cpu;
>       dma_addr_t mtlb_dma;
> @@ -40,41 +37,62 @@ struct etnaviv_iommuv2_domain {
>       dma_addr_t stlb_dma[MMUv2_MAX_STLB_ENTRIES];
>  };
>  
> -static struct etnaviv_iommuv2_domain *
> -to_etnaviv_domain(struct etnaviv_iommu_domain *domain)
> +static struct etnaviv_iommuv2_context *
> +to_v2_context(struct etnaviv_iommu_context *context)
>  {
> -     return container_of(domain, struct etnaviv_iommuv2_domain, base);
> +     return container_of(context, struct etnaviv_iommuv2_context, base);
>  }
>  
> +static void etnaviv_iommuv2_free(struct etnaviv_iommu_context *context)
> +{
> +     struct etnaviv_iommuv2_context *v2_context = to_v2_context(context);
> +     int i;
> +
> +     drm_mm_takedown(&context->mm);
> +
> +     for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) {
> +             if (v2_context->stlb_cpu[i])
> +                     dma_free_wc(context->global->dev, SZ_4K,
> +                                 v2_context->stlb_cpu[i],
> +                                 v2_context->stlb_dma[i]);
> +     }
> +
> +     dma_free_wc(context->global->dev, SZ_4K, v2_context->mtlb_cpu,
> +                 v2_context->mtlb_dma);
> +
> +     clear_bit(v2_context->id, context->global->v2.pta_alloc);
> +
> +     vfree(v2_context);
> +}
>  static int
> -etnaviv_iommuv2_ensure_stlb(struct etnaviv_iommuv2_domain *etnaviv_domain,
> +etnaviv_iommuv2_ensure_stlb(struct etnaviv_iommuv2_context *v2_context,
>                           int stlb)
>  {
> -     if (etnaviv_domain->stlb_cpu[stlb])
> +     if (v2_context->stlb_cpu[stlb])
>               return 0;
>  
> -     etnaviv_domain->stlb_cpu[stlb] =
> -                     dma_alloc_wc(etnaviv_domain->base.dev, SZ_4K,
> -                                  &etnaviv_domain->stlb_dma[stlb],
> +     v2_context->stlb_cpu[stlb] =
> +                     dma_alloc_wc(v2_context->base.global->dev, SZ_4K,
> +                                  &v2_context->stlb_dma[stlb],
>                                    GFP_KERNEL);
>  
> -     if (!etnaviv_domain->stlb_cpu[stlb])
> +     if (!v2_context->stlb_cpu[stlb])
>               return -ENOMEM;
>  
> -     memset32(etnaviv_domain->stlb_cpu[stlb], MMUv2_PTE_EXCEPTION,
> +     memset32(v2_context->stlb_cpu[stlb], MMUv2_PTE_EXCEPTION,
>                SZ_4K / sizeof(u32));
>  
> -     etnaviv_domain->mtlb_cpu[stlb] = etnaviv_domain->stlb_dma[stlb] |
> -                                                   MMUv2_PTE_PRESENT;
> +     v2_context->mtlb_cpu[stlb] =
> +                     v2_context->stlb_dma[stlb] | MMUv2_PTE_PRESENT;
> +
>       return 0;
>  }
>  
> -static int etnaviv_iommuv2_map(struct etnaviv_iommu_domain *domain,
> +static int etnaviv_iommuv2_map(struct etnaviv_iommu_context *context,
>                              unsigned long iova, phys_addr_t paddr,
>                              size_t size, int prot)
>  {
> -     struct etnaviv_iommuv2_domain *etnaviv_domain =
> -                     to_etnaviv_domain(domain);
> +     struct etnaviv_iommuv2_context *v2_context = to_v2_context(context);
>       int mtlb_entry, stlb_entry, ret;
>       u32 entry = lower_32_bits(paddr) | MMUv2_PTE_PRESENT;
>  
> @@ -90,20 +108,19 @@ static int etnaviv_iommuv2_map(struct 
> etnaviv_iommu_domain *domain,
>       mtlb_entry = (iova & MMUv2_MTLB_MASK) >> MMUv2_MTLB_SHIFT;
>       stlb_entry = (iova & MMUv2_STLB_MASK) >> MMUv2_STLB_SHIFT;
>  
> -     ret = etnaviv_iommuv2_ensure_stlb(etnaviv_domain, mtlb_entry);
> +     ret = etnaviv_iommuv2_ensure_stlb(v2_context, mtlb_entry);
>       if (ret)
>               return ret;
>  
> -     etnaviv_domain->stlb_cpu[mtlb_entry][stlb_entry] = entry;
> +     v2_context->stlb_cpu[mtlb_entry][stlb_entry] = entry;
>  
>       return 0;
>  }
>  
> -static size_t etnaviv_iommuv2_unmap(struct etnaviv_iommu_domain *domain,
> +static size_t etnaviv_iommuv2_unmap(struct etnaviv_iommu_context *context,
>                                   unsigned long iova, size_t size)
>  {
> -     struct etnaviv_iommuv2_domain *etnaviv_domain =
> -                     to_etnaviv_domain(domain);
> +     struct etnaviv_iommuv2_context *etnaviv_domain = to_v2_context(context);
>       int mtlb_entry, stlb_entry;
>  
>       if (size != SZ_4K)
> @@ -117,118 +134,35 @@ static size_t etnaviv_iommuv2_unmap(struct 
> etnaviv_iommu_domain *domain,
>       return SZ_4K;
>  }
>  
> -static int etnaviv_iommuv2_init(struct etnaviv_iommuv2_domain 
> *etnaviv_domain)
> -{
> -     int ret;
> -
> -     /* allocate scratch page */
> -     etnaviv_domain->base.bad_page_cpu =
> -                     dma_alloc_wc(etnaviv_domain->base.dev, SZ_4K,
> -                                  &etnaviv_domain->base.bad_page_dma,
> -                                  GFP_KERNEL);
> -     if (!etnaviv_domain->base.bad_page_cpu) {
> -             ret = -ENOMEM;
> -             goto fail_mem;
> -     }
> -
> -     memset32(etnaviv_domain->base.bad_page_cpu, 0xdead55aa,
> -              SZ_4K / sizeof(u32));
> -
> -     etnaviv_domain->pta_cpu = dma_alloc_wc(etnaviv_domain->base.dev,
> -                                            SZ_4K, &etnaviv_domain->pta_dma,
> -                                            GFP_KERNEL);
> -     if (!etnaviv_domain->pta_cpu) {
> -             ret = -ENOMEM;
> -             goto fail_mem;
> -     }
> -
> -     etnaviv_domain->mtlb_cpu = dma_alloc_wc(etnaviv_domain->base.dev,
> -                                             SZ_4K, 
> &etnaviv_domain->mtlb_dma,
> -                                             GFP_KERNEL);
> -     if (!etnaviv_domain->mtlb_cpu) {
> -             ret = -ENOMEM;
> -             goto fail_mem;
> -     }
> -
> -     memset32(etnaviv_domain->mtlb_cpu, MMUv2_PTE_EXCEPTION,
> -              MMUv2_MAX_STLB_ENTRIES);
> -
> -     return 0;
> -
> -fail_mem:
> -     if (etnaviv_domain->base.bad_page_cpu)
> -             dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
> -                         etnaviv_domain->base.bad_page_cpu,
> -                         etnaviv_domain->base.bad_page_dma);
> -
> -     if (etnaviv_domain->pta_cpu)
> -             dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
> -                         etnaviv_domain->pta_cpu, etnaviv_domain->pta_dma);
> -
> -     if (etnaviv_domain->mtlb_cpu)
> -             dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
> -                         etnaviv_domain->mtlb_cpu, etnaviv_domain->mtlb_dma);
> -
> -     return ret;
> -}
> -
> -static void etnaviv_iommuv2_domain_free(struct etnaviv_iommu_domain *domain)
> -{
> -     struct etnaviv_iommuv2_domain *etnaviv_domain =
> -                     to_etnaviv_domain(domain);
> -     int i;
> -
> -     dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
> -                 etnaviv_domain->base.bad_page_cpu,
> -                 etnaviv_domain->base.bad_page_dma);
> -
> -     dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
> -                 etnaviv_domain->pta_cpu, etnaviv_domain->pta_dma);
> -
> -     dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
> -                 etnaviv_domain->mtlb_cpu, etnaviv_domain->mtlb_dma);
> -
> -     for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) {
> -             if (etnaviv_domain->stlb_cpu[i])
> -                     dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
> -                                 etnaviv_domain->stlb_cpu[i],
> -                                 etnaviv_domain->stlb_dma[i]);
> -     }
> -
> -     vfree(etnaviv_domain);
> -}
> -
> -static size_t etnaviv_iommuv2_dump_size(struct etnaviv_iommu_domain *domain)
> +static size_t etnaviv_iommuv2_dump_size(struct etnaviv_iommu_context 
> *context)
>  {
> -     struct etnaviv_iommuv2_domain *etnaviv_domain =
> -                     to_etnaviv_domain(domain);
> +     struct etnaviv_iommuv2_context *v2_context = to_v2_context(context);
>       size_t dump_size = SZ_4K;
>       int i;
>  
>       for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++)
> -             if (etnaviv_domain->mtlb_cpu[i] & MMUv2_PTE_PRESENT)
> +             if (v2_context->mtlb_cpu[i] & MMUv2_PTE_PRESENT)
>                       dump_size += SZ_4K;
>  
>       return dump_size;
>  }
>  
> -static void etnaviv_iommuv2_dump(struct etnaviv_iommu_domain *domain, void 
> *buf)
> +static void etnaviv_iommuv2_dump(struct etnaviv_iommu_context *context, void 
> *buf)
>  {
> -     struct etnaviv_iommuv2_domain *etnaviv_domain =
> -                     to_etnaviv_domain(domain);
> +     struct etnaviv_iommuv2_context *v2_context = to_v2_context(context);
>       int i;
>  
> -     memcpy(buf, etnaviv_domain->mtlb_cpu, SZ_4K);
> +     memcpy(buf, v2_context->mtlb_cpu, SZ_4K);
>       buf += SZ_4K;
>       for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++, buf += SZ_4K)
> -             if (etnaviv_domain->mtlb_cpu[i] & MMUv2_PTE_PRESENT)
> -                     memcpy(buf, etnaviv_domain->stlb_cpu[i], SZ_4K);
> +             if (v2_context->mtlb_cpu[i] & MMUv2_PTE_PRESENT)
> +                     memcpy(buf, v2_context->stlb_cpu[i], SZ_4K);
>  }
>  
> -static void etnaviv_iommuv2_restore_nonsec(struct etnaviv_gpu *gpu)
> +static void etnaviv_iommuv2_restore_nonsec(struct etnaviv_gpu *gpu,
> +     struct etnaviv_iommu_context *context)
>  {
> -     struct etnaviv_iommuv2_domain *etnaviv_domain =
> -                     to_etnaviv_domain(gpu->mmu->domain);
> +     struct etnaviv_iommuv2_context *v2_context = to_v2_context(context);
>       u16 prefetch;
>  
>       /* If the MMU is already enabled the state is still there. */
> @@ -236,8 +170,8 @@ static void etnaviv_iommuv2_restore_nonsec(struct 
> etnaviv_gpu *gpu)
>               return;
>  
>       prefetch = etnaviv_buffer_config_mmuv2(gpu,
> -                             (u32)etnaviv_domain->mtlb_dma,
> -                             (u32)etnaviv_domain->base.bad_page_dma);
> +                             (u32)v2_context->mtlb_dma,
> +                             (u32)context->global->bad_page_dma);
>       etnaviv_gpu_start_fe(gpu, (u32)etnaviv_cmdbuf_get_pa(&gpu->buffer),
>                            prefetch);
>       etnaviv_gpu_wait_idle(gpu, 100);
> @@ -245,10 +179,10 @@ static void etnaviv_iommuv2_restore_nonsec(struct 
> etnaviv_gpu *gpu)
>       gpu_write(gpu, VIVS_MMUv2_CONTROL, VIVS_MMUv2_CONTROL_ENABLE);
>  }
>  
> -static void etnaviv_iommuv2_restore_sec(struct etnaviv_gpu *gpu)
> +static void etnaviv_iommuv2_restore_sec(struct etnaviv_gpu *gpu,
> +     struct etnaviv_iommu_context *context)
>  {
> -     struct etnaviv_iommuv2_domain *etnaviv_domain =
> -                             to_etnaviv_domain(gpu->mmu->domain);
> +     struct etnaviv_iommuv2_context *v2_context = to_v2_context(context);
>       u16 prefetch;
>  
>       /* If the MMU is already enabled the state is still there. */
> @@ -256,26 +190,26 @@ static void etnaviv_iommuv2_restore_sec(struct 
> etnaviv_gpu *gpu)
>               return;
>  
>       gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_LOW,
> -               lower_32_bits(etnaviv_domain->pta_dma));
> +               lower_32_bits(context->global->v2.pta_dma));
>       gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_HIGH,
> -               upper_32_bits(etnaviv_domain->pta_dma));
> +               upper_32_bits(context->global->v2.pta_dma));
>       gpu_write(gpu, VIVS_MMUv2_PTA_CONTROL, VIVS_MMUv2_PTA_CONTROL_ENABLE);
>  
>       gpu_write(gpu, VIVS_MMUv2_NONSEC_SAFE_ADDR_LOW,
> -               lower_32_bits(etnaviv_domain->base.bad_page_dma));
> +               lower_32_bits(context->global->bad_page_dma));
>       gpu_write(gpu, VIVS_MMUv2_SEC_SAFE_ADDR_LOW,
> -               lower_32_bits(etnaviv_domain->base.bad_page_dma));
> +               lower_32_bits(context->global->bad_page_dma));
>       gpu_write(gpu, VIVS_MMUv2_SAFE_ADDRESS_CONFIG,
>                 VIVS_MMUv2_SAFE_ADDRESS_CONFIG_NON_SEC_SAFE_ADDR_HIGH(
> -               upper_32_bits(etnaviv_domain->base.bad_page_dma)) |
> +               upper_32_bits(context->global->bad_page_dma)) |
>                 VIVS_MMUv2_SAFE_ADDRESS_CONFIG_SEC_SAFE_ADDR_HIGH(
> -               upper_32_bits(etnaviv_domain->base.bad_page_dma)));
> +               upper_32_bits(context->global->bad_page_dma)));
>  
> -     etnaviv_domain->pta_cpu[0] = etnaviv_domain->mtlb_dma |
> -                                  VIVS_MMUv2_CONFIGURATION_MODE_MODE4_K;
> +     context->global->v2.pta_cpu[0] = v2_context->mtlb_dma |
> +                                      VIVS_MMUv2_CONFIGURATION_MODE_MODE4_K;
>  
>       /* trigger a PTA load through the FE */
> -     prefetch = etnaviv_buffer_config_pta(gpu);
> +     prefetch = etnaviv_buffer_config_pta(gpu, v2_context->id);
>       etnaviv_gpu_start_fe(gpu, (u32)etnaviv_cmdbuf_get_pa(&gpu->buffer),
>                            prefetch);
>       etnaviv_gpu_wait_idle(gpu, 100);
> @@ -283,14 +217,15 @@ static void etnaviv_iommuv2_restore_sec(struct 
> etnaviv_gpu *gpu)
>       gpu_write(gpu, VIVS_MMUv2_SEC_CONTROL, VIVS_MMUv2_SEC_CONTROL_ENABLE);
>  }
>  
> -void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu)
> +static void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu,
> +                                 struct etnaviv_iommu_context *context)
>  {
>       switch (gpu->sec_mode) {
>       case ETNA_SEC_NONE:
> -             etnaviv_iommuv2_restore_nonsec(gpu);
> +             etnaviv_iommuv2_restore_nonsec(gpu, context);
>               break;
>       case ETNA_SEC_KERNEL:
> -             etnaviv_iommuv2_restore_sec(gpu);
> +             etnaviv_iommuv2_restore_sec(gpu, context);
>               break;
>       default:
>               WARN(1, "unhandled GPU security mode\n");
> @@ -298,39 +233,56 @@ void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu)
>       }
>  }
>  
> -static const struct etnaviv_iommu_domain_ops etnaviv_iommuv2_ops = {
> -     .free = etnaviv_iommuv2_domain_free,
> +const struct etnaviv_iommu_ops etnaviv_iommuv2_ops = {
> +     .free = etnaviv_iommuv2_free,
>       .map = etnaviv_iommuv2_map,
>       .unmap = etnaviv_iommuv2_unmap,
>       .dump_size = etnaviv_iommuv2_dump_size,
>       .dump = etnaviv_iommuv2_dump,
> +     .restore = etnaviv_iommuv2_restore,
>  };
>  
> -struct etnaviv_iommu_domain *
> -etnaviv_iommuv2_domain_alloc(struct etnaviv_gpu *gpu)
> +struct etnaviv_iommu_context *
> +etnaviv_iommuv2_context_alloc(struct etnaviv_iommu_global *global)
>  {
> -     struct etnaviv_iommuv2_domain *etnaviv_domain;
> -     struct etnaviv_iommu_domain *domain;
> -     int ret;
> +     struct etnaviv_iommuv2_context *v2_context;
> +     struct etnaviv_iommu_context *context;
>  
> -     etnaviv_domain = vzalloc(sizeof(*etnaviv_domain));
> -     if (!etnaviv_domain)
> +     v2_context = vzalloc(sizeof(*v2_context));
> +     if (!v2_context)
>               return NULL;
>  
> -     domain = &etnaviv_domain->base;
> +     mutex_lock(&global->lock);
> +     v2_context->id = find_first_zero_bit(global->v2.pta_alloc,
> +                                          ETNAVIV_PTA_ENTRIES);
> +     if (v2_context->id < ETNAVIV_PTA_ENTRIES) {
> +             set_bit(v2_context->id, global->v2.pta_alloc);
> +     } else {
> +             mutex_unlock(&global->lock);
> +             goto out_free;
> +     }
> +     mutex_unlock(&global->lock);
>  
> -     domain->dev = gpu->dev;
> -     domain->base = SZ_4K;
> -     domain->size = (u64)SZ_1G * 4 - SZ_4K;
> -     domain->ops = &etnaviv_iommuv2_ops;
> +     v2_context->mtlb_cpu = dma_alloc_wc(global->dev, SZ_4K,
> +                                         &v2_context->mtlb_dma, GFP_KERNEL);
> +     if (!v2_context->mtlb_cpu)
> +             goto out_free_id;
>  
> -     ret = etnaviv_iommuv2_init(etnaviv_domain);
> -     if (ret)
> -             goto out_free;
> +     memset32(v2_context->mtlb_cpu, MMUv2_PTE_EXCEPTION,
> +              MMUv2_MAX_STLB_ENTRIES);
> +
> +     context = &v2_context->base;
> +     context->global = global;
> +     kref_init(&context->refcount);
> +     mutex_init(&context->lock);
> +     INIT_LIST_HEAD(&context->mappings);
> +     drm_mm_init(&context->mm, SZ_4K, (u64)SZ_1G * 4 - SZ_4K);
>  
> -     return &etnaviv_domain->base;
> +     return context;
>  
> +out_free_id:
> +     clear_bit(v2_context->id, global->v2.pta_alloc);
>  out_free:
> -     vfree(etnaviv_domain);
> +     vfree(v2_context);
>       return NULL;
>  }
> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c 
> b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
> index c4092c8def4f..a155755424f8 100644
> --- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
> +++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
> @@ -8,10 +8,9 @@
>  #include "etnaviv_drv.h"
>  #include "etnaviv_gem.h"
>  #include "etnaviv_gpu.h"
> -#include "etnaviv_iommu.h"
>  #include "etnaviv_mmu.h"
>  
> -static void etnaviv_domain_unmap(struct etnaviv_iommu_domain *domain,
> +static void etnaviv_context_unmap(struct etnaviv_iommu_context *context,
>                                unsigned long iova, size_t size)
>  {
>       size_t unmapped_page, unmapped = 0;
> @@ -24,7 +23,8 @@ static void etnaviv_domain_unmap(struct 
> etnaviv_iommu_domain *domain,
>       }
>  
>       while (unmapped < size) {
> -             unmapped_page = domain->ops->unmap(domain, iova, pgsize);
> +             unmapped_page = context->global->ops->unmap(context, iova,
> +                                                         pgsize);
>               if (!unmapped_page)
>                       break;
>  
> @@ -33,7 +33,7 @@ static void etnaviv_domain_unmap(struct 
> etnaviv_iommu_domain *domain,
>       }
>  }
>  
> -static int etnaviv_domain_map(struct etnaviv_iommu_domain *domain,
> +static int etnaviv_context_map(struct etnaviv_iommu_context *context,
>                             unsigned long iova, phys_addr_t paddr,
>                             size_t size, int prot)
>  {
> @@ -49,7 +49,8 @@ static int etnaviv_domain_map(struct etnaviv_iommu_domain 
> *domain,
>       }
>  
>       while (size) {
> -             ret = domain->ops->map(domain, iova, paddr, pgsize, prot);
> +             ret = context->global->ops->map(context, iova, paddr, pgsize,
> +                                             prot);
>               if (ret)
>                       break;
>  
> @@ -60,21 +61,19 @@ static int etnaviv_domain_map(struct etnaviv_iommu_domain 
> *domain,
>  
>       /* unroll mapping in case something went wrong */
>       if (ret)
> -             etnaviv_domain_unmap(domain, orig_iova, orig_size - size);
> +             etnaviv_context_unmap(context, orig_iova, orig_size - size);
>  
>       return ret;
>  }
>  
> -static int etnaviv_iommu_map(struct etnaviv_iommu *iommu, u32 iova,
> +static int etnaviv_iommu_map(struct etnaviv_iommu_context *context, u32 iova,
>                            struct sg_table *sgt, unsigned len, int prot)
> -{
> -     struct etnaviv_iommu_domain *domain = iommu->domain;
> -     struct scatterlist *sg;
> +{    struct scatterlist *sg;
>       unsigned int da = iova;
>       unsigned int i, j;
>       int ret;
>  
> -     if (!domain || !sgt)
> +     if (!context || !sgt)
>               return -EINVAL;
>  
>       for_each_sg(sgt->sgl, sg, sgt->nents, i) {
> @@ -83,7 +82,7 @@ static int etnaviv_iommu_map(struct etnaviv_iommu *iommu, 
> u32 iova,
>  
>               VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes);
>  
> -             ret = etnaviv_domain_map(domain, da, pa, bytes, prot);
> +             ret = etnaviv_context_map(context, da, pa, bytes, prot);
>               if (ret)
>                       goto fail;
>  
> @@ -98,16 +97,15 @@ static int etnaviv_iommu_map(struct etnaviv_iommu *iommu, 
> u32 iova,
>       for_each_sg(sgt->sgl, sg, i, j) {
>               size_t bytes = sg_dma_len(sg) + sg->offset;
>  
> -             etnaviv_domain_unmap(domain, da, bytes);
> +             etnaviv_context_unmap(context, da, bytes);
>               da += bytes;
>       }
>       return ret;
>  }
>  
> -static void etnaviv_iommu_unmap(struct etnaviv_iommu *iommu, u32 iova,
> +static void etnaviv_iommu_unmap(struct etnaviv_iommu_context *context, u32 
> iova,
>                               struct sg_table *sgt, unsigned len)
>  {
> -     struct etnaviv_iommu_domain *domain = iommu->domain;
>       struct scatterlist *sg;
>       unsigned int da = iova;
>       int i;
> @@ -115,7 +113,7 @@ static void etnaviv_iommu_unmap(struct etnaviv_iommu 
> *iommu, u32 iova,
>       for_each_sg(sgt->sgl, sg, sgt->nents, i) {
>               size_t bytes = sg_dma_len(sg) + sg->offset;
>  
> -             etnaviv_domain_unmap(domain, da, bytes);
> +             etnaviv_context_unmap(context, da, bytes);
>  
>               VERB("unmap[%d]: %08x(%zx)", i, iova, bytes);
>  
> @@ -125,24 +123,24 @@ static void etnaviv_iommu_unmap(struct etnaviv_iommu 
> *iommu, u32 iova,
>       }
>  }
>  
> -static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu *mmu,
> +static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu_context 
> *context,
>       struct etnaviv_vram_mapping *mapping)
>  {
>       struct etnaviv_gem_object *etnaviv_obj = mapping->object;
>  
> -     etnaviv_iommu_unmap(mmu, mapping->vram_node.start,
> +     etnaviv_iommu_unmap(context, mapping->vram_node.start,
>                           etnaviv_obj->sgt, etnaviv_obj->base.size);
>       drm_mm_remove_node(&mapping->vram_node);
>  }
>  
> -static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
> +static int etnaviv_iommu_find_iova(struct etnaviv_iommu_context *context,
>                                  struct drm_mm_node *node, size_t size)
>  {
>       struct etnaviv_vram_mapping *free = NULL;
>       enum drm_mm_insert_mode mode = DRM_MM_INSERT_LOW;
>       int ret;
>  
> -     lockdep_assert_held(&mmu->lock);
> +     lockdep_assert_held(&context->lock);
>  
>       while (1) {
>               struct etnaviv_vram_mapping *m, *n;
> @@ -150,17 +148,17 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu 
> *mmu,
>               struct list_head list;
>               bool found;
>  
> -             ret = drm_mm_insert_node_in_range(&mmu->mm, node,
> +             ret = drm_mm_insert_node_in_range(&context->mm, node,
>                                                 size, 0, 0, 0, U64_MAX, mode);
>               if (ret != -ENOSPC)
>                       break;
>  
>               /* Try to retire some entries */
> -             drm_mm_scan_init(&scan, &mmu->mm, size, 0, 0, mode);
> +             drm_mm_scan_init(&scan, &context->mm, size, 0, 0, mode);
>  
>               found = 0;
>               INIT_LIST_HEAD(&list);
> -             list_for_each_entry(free, &mmu->mappings, mmu_node) {
> +             list_for_each_entry(free, &context->mappings, mmu_node) {
>                       /* If this vram node has not been used, skip this. */
>                       if (!free->vram_node.mm)
>                               continue;
> @@ -202,8 +200,8 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu 
> *mmu,
>                * this mapping.
>                */
>               list_for_each_entry_safe(m, n, &list, scan_node) {
> -                     etnaviv_iommu_remove_mapping(mmu, m);
> -                     m->mmu = NULL;
> +                     etnaviv_iommu_remove_mapping(context, m);
> +                     m->context = NULL;
>                       list_del_init(&m->mmu_node);
>                       list_del_init(&m->scan_node);
>               }
> @@ -219,7 +217,7 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu 
> *mmu,
>       return ret;
>  }
>  
> -int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
> +int etnaviv_iommu_map_gem(struct etnaviv_iommu_context *context,
>       struct etnaviv_gem_object *etnaviv_obj, u32 memory_base,
>       struct etnaviv_vram_mapping *mapping)
>  {
> @@ -229,17 +227,17 @@ int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
>  
>       lockdep_assert_held(&etnaviv_obj->lock);
>  
> -     mutex_lock(&mmu->lock);
> +     mutex_lock(&context->lock);
>  
>       /* v1 MMU can optimize single entry (contiguous) scatterlists */
> -     if (mmu->version == ETNAVIV_IOMMU_V1 &&
> +     if (context->global->version == ETNAVIV_IOMMU_V1 &&
>           sgt->nents == 1 && !(etnaviv_obj->flags & ETNA_BO_FORCE_MMU)) {
>               u32 iova;
>  
>               iova = sg_dma_address(sgt->sgl) - memory_base;
>               if (iova < 0x80000000 - sg_dma_len(sgt->sgl)) {
>                       mapping->iova = iova;
> -                     list_add_tail(&mapping->mmu_node, &mmu->mappings);
> +                     list_add_tail(&mapping->mmu_node, &context->mappings);
>                       ret = 0;
>                       goto unlock;
>               }
> @@ -247,12 +245,12 @@ int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
>  
>       node = &mapping->vram_node;
>  
> -     ret = etnaviv_iommu_find_iova(mmu, node, etnaviv_obj->base.size);
> +     ret = etnaviv_iommu_find_iova(context, node, etnaviv_obj->base.size);
>       if (ret < 0)
>               goto unlock;
>  
>       mapping->iova = node->start;
> -     ret = etnaviv_iommu_map(mmu, node->start, sgt, etnaviv_obj->base.size,
> +     ret = etnaviv_iommu_map(context, node->start, sgt, 
> etnaviv_obj->base.size,
>                               ETNAVIV_PROT_READ | ETNAVIV_PROT_WRITE);
>  
>       if (ret < 0) {
> @@ -260,79 +258,58 @@ int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
>               goto unlock;
>       }
>  
> -     list_add_tail(&mapping->mmu_node, &mmu->mappings);
> -     mmu->flush_seq++;
> +     list_add_tail(&mapping->mmu_node, &context->mappings);
> +     context->flush_seq++;
>  unlock:
> -     mutex_unlock(&mmu->lock);
> +     mutex_unlock(&context->lock);
>  
>       return ret;
>  }
>  
> -void etnaviv_iommu_unmap_gem(struct etnaviv_iommu *mmu,
> +void etnaviv_iommu_unmap_gem(struct etnaviv_iommu_context *context,
>       struct etnaviv_vram_mapping *mapping)
>  {
>       WARN_ON(mapping->use);
>  
> -     mutex_lock(&mmu->lock);
> +     mutex_lock(&context->lock);
>  
>       /* If the vram node is on the mm, unmap and remove the node */
> -     if (mapping->vram_node.mm == &mmu->mm)
> -             etnaviv_iommu_remove_mapping(mmu, mapping);
> +     if (mapping->vram_node.mm == &context->mm)
> +             etnaviv_iommu_remove_mapping(context, mapping);
>  
>       list_del(&mapping->mmu_node);
> -     mmu->flush_seq++;
> -     mutex_unlock(&mmu->lock);
> +     context->flush_seq++;
> +     mutex_unlock(&context->lock);
>  }
>  
> -void etnaviv_iommu_destroy(struct etnaviv_iommu *mmu)
> +static void etnaviv_iommu_context_free(struct kref *kref)
>  {
> -     drm_mm_takedown(&mmu->mm);
> -     mmu->domain->ops->free(mmu->domain);
> -     kfree(mmu);
> -}
> +     struct etnaviv_iommu_context *context =
> +             container_of(kref, struct etnaviv_iommu_context, refcount);
>  
> -struct etnaviv_iommu *etnaviv_iommu_new(struct etnaviv_gpu *gpu)
> +     context->global->ops->free(context);
> +}
> +void etnaviv_iommu_context_put(struct etnaviv_iommu_context *context)
>  {
> -     enum etnaviv_iommu_version version;
> -     struct etnaviv_iommu *mmu;
> -
> -     mmu = kzalloc(sizeof(*mmu), GFP_KERNEL);
> -     if (!mmu)
> -             return ERR_PTR(-ENOMEM);
> -
> -     if (!(gpu->identity.minor_features1 & chipMinorFeatures1_MMU_VERSION)) {
> -             mmu->domain = etnaviv_iommuv1_domain_alloc(gpu);
> -             version = ETNAVIV_IOMMU_V1;
> -     } else {
> -             mmu->domain = etnaviv_iommuv2_domain_alloc(gpu);
> -             version = ETNAVIV_IOMMU_V2;
> -     }
> -
> -     if (!mmu->domain) {
> -             dev_err(gpu->dev, "Failed to allocate GPU IOMMU domain\n");
> -             kfree(mmu);
> -             return ERR_PTR(-ENOMEM);
> -     }
> -
> -     mmu->gpu = gpu;
> -     mmu->version = version;
> -     mutex_init(&mmu->lock);
> -     INIT_LIST_HEAD(&mmu->mappings);
> -
> -     drm_mm_init(&mmu->mm, mmu->domain->base, mmu->domain->size);
> -
> -     return mmu;
> +     kref_put(&context->refcount, etnaviv_iommu_context_free);
>  }
>  
> -void etnaviv_iommu_restore(struct etnaviv_gpu *gpu)
> +struct etnaviv_iommu_context *
> +etnaviv_iommu_context_init(struct etnaviv_iommu_global *global)
>  {
> -     if (gpu->mmu->version == ETNAVIV_IOMMU_V1)
> -             etnaviv_iommuv1_restore(gpu);
> +     if (global->version == ETNAVIV_IOMMU_V1)
> +             return etnaviv_iommuv1_context_alloc(global);
>       else
> -             etnaviv_iommuv2_restore(gpu);
> +             return etnaviv_iommuv2_context_alloc(global);
> +}
> +
> +void etnaviv_iommu_restore(struct etnaviv_gpu *gpu,
> +                        struct etnaviv_iommu_context *context)
> +{
> +     context->global->ops->restore(gpu, context);
>  }
>  
> -int etnaviv_iommu_get_suballoc_va(struct etnaviv_iommu *mmu,
> +int etnaviv_iommu_get_suballoc_va(struct etnaviv_iommu_context *context,
>                                 struct etnaviv_vram_mapping *mapping,
>                                 u32 memory_base, dma_addr_t paddr,
>                                 size_t size)
> @@ -340,23 +317,23 @@ int etnaviv_iommu_get_suballoc_va(struct etnaviv_iommu 
> *mmu,
>       struct drm_mm_node *node;
>       int ret;
>  
> -     if (mmu->version == ETNAVIV_IOMMU_V1) {
> +     if (context->global->version == ETNAVIV_IOMMU_V1) {
>               mapping->iova = paddr - memory_base;
>               mapping->use = 1;
> -             list_add_tail(&mapping->mmu_node, &mmu->mappings);
> +             list_add_tail(&mapping->mmu_node, &context->mappings);
>               return 0;
>       }
>  
>       node = &mapping->vram_node;
>  
> -     mutex_lock(&mmu->lock);
> +     mutex_lock(&context->lock);
>  
> -     ret = etnaviv_iommu_find_iova(mmu, node, size);
> +     ret = etnaviv_iommu_find_iova(context, node, size);
>       if (ret < 0)
>               goto unlock;
>  
>       mapping->iova = node->start;
> -     ret = etnaviv_domain_map(mmu->domain, node->start, paddr, size,
> +     ret = etnaviv_context_map(context, node->start, paddr, size,
>                                ETNAVIV_PROT_READ);
>  
>       if (ret < 0) {
> @@ -364,36 +341,96 @@ int etnaviv_iommu_get_suballoc_va(struct etnaviv_iommu 
> *mmu,
>               goto unlock;
>       }
>  
> -     list_add_tail(&mapping->mmu_node, &mmu->mappings);
> -     mmu->flush_seq++;
> +     list_add_tail(&mapping->mmu_node, &context->mappings);
> +     context->flush_seq++;
>  unlock:
> -     mutex_unlock(&mmu->lock);
> +     mutex_unlock(&context->lock);
>  
>       return ret;
>  }
>  
> -void etnaviv_iommu_put_suballoc_va(struct etnaviv_iommu *mmu,
> +void etnaviv_iommu_put_suballoc_va(struct etnaviv_iommu_context *context,
>                 struct etnaviv_vram_mapping *mapping)
>  {
>       struct drm_mm_node *node = &mapping->vram_node;
>  
>       mapping->use = 0;
>  
> -     if (mmu->version == ETNAVIV_IOMMU_V1)
> +     if (context->global->version == ETNAVIV_IOMMU_V1)
>               return;
>  
> -     mutex_lock(&mmu->lock);
> -     etnaviv_domain_unmap(mmu->domain, node->start, node->size);
> +     mutex_lock(&context->lock);
> +     etnaviv_context_unmap(context, node->start, node->size);
>       drm_mm_remove_node(node);
> -     mutex_unlock(&mmu->lock);
> +     mutex_unlock(&context->lock);
>  }
>  
> -size_t etnaviv_iommu_dump_size(struct etnaviv_iommu *iommu)
> +size_t etnaviv_iommu_dump_size(struct etnaviv_iommu_context *context)
>  {
> -     return iommu->domain->ops->dump_size(iommu->domain);
> +     return context->global->ops->dump_size(context);
>  }
>  
> -void etnaviv_iommu_dump(struct etnaviv_iommu *iommu, void *buf)
> +void etnaviv_iommu_dump(struct etnaviv_iommu_context *context, void *buf)
> +{
> +     context->global->ops->dump(context, buf);
> +}
> +
> +extern const struct etnaviv_iommu_ops etnaviv_iommuv1_ops;
> +extern const struct etnaviv_iommu_ops etnaviv_iommuv2_ops;
> +
> +struct etnaviv_iommu_global *
> +etnaviv_iommu_global_init(struct device *dev,
> +                       enum etnaviv_iommu_version version)
>  {
> -     iommu->domain->ops->dump(iommu->domain, buf);
> +     struct etnaviv_iommu_global *global;
> +
> +     global = kzalloc(sizeof(*global), GFP_KERNEL);
> +     if (!global)
> +             return NULL;
> +
> +     global->bad_page_cpu = dma_alloc_wc(dev, SZ_4K, &global->bad_page_dma,
> +                                         GFP_KERNEL);
> +     if (!global->bad_page_cpu)
> +             goto free_global;
> +
> +     memset32(global->bad_page_cpu, 0xdead55aa, SZ_4K / sizeof(u32));
> +
> +     if (version == ETNAVIV_IOMMU_V2) {
> +             global->v2.pta_cpu = dma_alloc_wc(dev, ETNAVIV_PTA_SIZE,
> +                                            &global->v2.pta_dma, GFP_KERNEL);
> +             if (!global->v2.pta_cpu)
> +                     goto free_bad_page;
> +     }
> +
> +     global->dev = dev;
> +     global->version = version;
> +     mutex_init(&global->lock);
> +
> +     if (version == ETNAVIV_IOMMU_V1)
> +             global->ops = &etnaviv_iommuv1_ops;
> +     else
> +             global->ops = &etnaviv_iommuv2_ops;
> +
> +     return global;
> +
> +free_bad_page:
> +     dma_free_wc(dev, SZ_4K, global->bad_page_cpu, global->bad_page_dma);
> +free_global:
> +     kfree(global);
> +
> +     return NULL;
> +}
> +
> +void etnaviv_iommu_global_fini(struct etnaviv_iommu_global *global)
> +{
> +     if (global->v2.pta_cpu)
> +             dma_free_wc(global->dev, ETNAVIV_PTA_SIZE,
> +                         global->v2.pta_cpu, global->v2.pta_dma);
> +
> +     if (global->bad_page_cpu)
> +             dma_free_wc(global->dev, SZ_4K,
> +                         global->bad_page_cpu, global->bad_page_dma);
> +
> +     mutex_destroy(&global->lock);
> +     kfree(global);
>  }
> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.h 
> b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h
> index 34afe25df9ca..fbcefce873ca 100644
> --- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.h
> +++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h
> @@ -16,33 +16,54 @@ enum etnaviv_iommu_version {
>  
>  struct etnaviv_gpu;
>  struct etnaviv_vram_mapping;
> -struct etnaviv_iommu_domain;
> +struct etnaviv_iommu_global;
> +struct etnaviv_iommu_context;
>  
> -struct etnaviv_iommu_domain_ops {
> -     void (*free)(struct etnaviv_iommu_domain *);
> -     int (*map)(struct etnaviv_iommu_domain *domain, unsigned long iova,
> +struct etnaviv_iommu_ops {
> +     struct etnaviv_iommu_context *(*init)(struct etnaviv_iommu_global *);
> +     void (*free)(struct etnaviv_iommu_context *);
> +     int (*map)(struct etnaviv_iommu_context *context, unsigned long iova,
>                  phys_addr_t paddr, size_t size, int prot);
> -     size_t (*unmap)(struct etnaviv_iommu_domain *domain, unsigned long iova,
> +     size_t (*unmap)(struct etnaviv_iommu_context *context, unsigned long 
> iova,
>                       size_t size);
> -     size_t (*dump_size)(struct etnaviv_iommu_domain *);
> -     void (*dump)(struct etnaviv_iommu_domain *, void *);
> +     size_t (*dump_size)(struct etnaviv_iommu_context *);
> +     void (*dump)(struct etnaviv_iommu_context *, void *);
> +     void (*restore)(struct etnaviv_gpu *, struct etnaviv_iommu_context *);
>  };
>  
> -struct etnaviv_iommu_domain {
> +#define ETNAVIV_PTA_SIZE     SZ_4K
> +#define ETNAVIV_PTA_ENTRIES  (ETNAVIV_PTA_SIZE / sizeof(u64))
> +
> +struct etnaviv_iommu_global {
>       struct device *dev;
> +     enum etnaviv_iommu_version version;
> +     const struct etnaviv_iommu_ops *ops;
> +     struct mutex lock;
> +
>       void *bad_page_cpu;
>       dma_addr_t bad_page_dma;
> -     u64 base;
> -     u64 size;
>  
> -     const struct etnaviv_iommu_domain_ops *ops;
> +     /*
> +      * This union holds members needed by either MMUv1 or MMUv2, which
> +      * can not exist at the same time.
> +      */
> +     union {
> +             struct {
> +                     struct etnaviv_iommu_context *shared_context;
> +             } v1;
> +             struct {
> +                     /* P(age) T(able) A(rray) */
> +                     u64 *pta_cpu;
> +                     dma_addr_t pta_dma;
> +                     struct spinlock pta_lock;
> +                     DECLARE_BITMAP(pta_alloc, ETNAVIV_PTA_ENTRIES);
> +             } v2;
> +     };
>  };
>  
> -struct etnaviv_iommu {
> -     struct etnaviv_gpu *gpu;
> -     struct etnaviv_iommu_domain *domain;
> -
> -     enum etnaviv_iommu_version version;
> +struct etnaviv_iommu_context {
> +     struct kref refcount;
> +     struct etnaviv_iommu_global *global;
>  
>       /* memory manager for GPU address area */
>       struct mutex lock;
> @@ -51,26 +72,41 @@ struct etnaviv_iommu {
>       unsigned int flush_seq;
>  };
>  
> +struct etnaviv_iommu_global *etnaviv_iommu_global_init(struct device *dev,
> +             enum etnaviv_iommu_version version);
> +void etnaviv_iommu_global_fini(struct etnaviv_iommu_global *global);
> +
>  struct etnaviv_gem_object;
>  
> -int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
> +int etnaviv_iommu_map_gem(struct etnaviv_iommu_context *context,
>       struct etnaviv_gem_object *etnaviv_obj, u32 memory_base,
>       struct etnaviv_vram_mapping *mapping);
> -void etnaviv_iommu_unmap_gem(struct etnaviv_iommu *mmu,
> +void etnaviv_iommu_unmap_gem(struct etnaviv_iommu_context *context,
>       struct etnaviv_vram_mapping *mapping);
>  
> -int etnaviv_iommu_get_suballoc_va(struct etnaviv_iommu *mmu,
> +int etnaviv_iommu_get_suballoc_va(struct etnaviv_iommu_context *ctx,
>                                 struct etnaviv_vram_mapping *mapping,
>                                 u32 memory_base, dma_addr_t paddr,
>                                 size_t size);
> -void etnaviv_iommu_put_suballoc_va(struct etnaviv_iommu *mmu,
> +void etnaviv_iommu_put_suballoc_va(struct etnaviv_iommu_context *ctx,
>                                  struct etnaviv_vram_mapping *mapping);
>  
> -size_t etnaviv_iommu_dump_size(struct etnaviv_iommu *iommu);
> -void etnaviv_iommu_dump(struct etnaviv_iommu *iommu, void *buf);
> -
> -struct etnaviv_iommu *etnaviv_iommu_new(struct etnaviv_gpu *gpu);
> -void etnaviv_iommu_destroy(struct etnaviv_iommu *iommu);
> -void etnaviv_iommu_restore(struct etnaviv_gpu *gpu);
> +size_t etnaviv_iommu_dump_size(struct etnaviv_iommu_context *ctx);
> +void etnaviv_iommu_dump(struct etnaviv_iommu_context *ctx, void *buf);
> +
> +struct etnaviv_iommu_context *
> +etnaviv_iommu_context_init(struct etnaviv_iommu_global *global);
> +static inline void etnaviv_iommu_context_get(struct etnaviv_iommu_context 
> *ctx)
> +{
> +     kref_get(&ctx->refcount);
> +}
> +void etnaviv_iommu_context_put(struct etnaviv_iommu_context *ctx);
> +void etnaviv_iommu_restore(struct etnaviv_gpu *gpu,
> +                        struct etnaviv_iommu_context *ctx);
> +
> +struct etnaviv_iommu_context *
> +etnaviv_iommuv1_context_alloc(struct etnaviv_iommu_global *global);
> +struct etnaviv_iommu_context *
> +etnaviv_iommuv2_context_alloc(struct etnaviv_iommu_global *global);
>  
>  #endif /* __ETNAVIV_MMU_H__ */
> -- 
> 2.20.1
> 
> _______________________________________________
> etnaviv mailing list
> etna...@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/etnaviv
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

Reply via email to