[PATCH v3 5/8] drm/etnaviv: rework MMU handling
Guido Günther
agx at sigxcpu.org
Tue Aug 13 15:27:09 UTC 2019
Hi,
On Fri, Aug 09, 2019 at 02:04:21PM +0200, Lucas Stach wrote:
> This reworks the MMU handling to make it possible to have multiple MMU contexts.
> A context is basically one instance of GPU page tables. Currently we have one
> set of page tables per GPU, which isn't all that clever, as it has the
> following two consequences:
>
> 1. All GPU clients (aka processes) are sharing the same pagetables, which means
> there is no isolation between clients, but only between GPU assigned memory
> spaces and the rest of the system. Better than nothing, but also not great.
>
> 2. Clients operating on the same set of buffers with different etnaviv GPU
> cores, e.g. a workload using both the 2D and 3D GPU, need to map the used
> buffers into the pagetable sets of each used GPU.
>
> This patch reworks all the MMU handling to introduce the abstraction of the
> MMU context. A context can be shared across different GPU cores, as long as
> they have compatible MMU implementations, which is the case for all systems
> with Vivante GPUs seen in the wild.
>
> As MMUv1 is not able to change pagetables on the fly, without a
> "stop the world" operation, which stops GPU, changes pagetables via CPU
> interaction, restarts GPU, the implementation introduces a shared context on
> MMUv1, which is returned whenever there is a request for a new context.
>
> This patch assigns a MMU context to each GPU, so on MMUv2 systems there is
> still one set of pagetables per GPU, but due to the shared context MMUv1
> systems see a change in behavior as now a single pagetable set is used
> across all GPU cores.
>
> Signed-off-by: Lucas Stach <l.stach at pengutronix.de>
> Reviewed-by: Philipp Zabel <p.zabel at pengutronix.de>
Reviewed-by: Guido Günther <agx at sigxcpu.org>
> ---
> v3:
> - move ops declaration to header.
> - rename gpu struct mmu member to mmu_context for consistency
> ---
> drivers/gpu/drm/etnaviv/etnaviv_buffer.c | 10 +-
> drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c | 8 +-
> drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h | 6 +-
> drivers/gpu/drm/etnaviv/etnaviv_drv.c | 6 +-
> drivers/gpu/drm/etnaviv/etnaviv_drv.h | 4 +-
> drivers/gpu/drm/etnaviv/etnaviv_dump.c | 12 +-
> drivers/gpu/drm/etnaviv/etnaviv_gem.c | 24 +-
> drivers/gpu/drm/etnaviv/etnaviv_gem.h | 2 +-
> drivers/gpu/drm/etnaviv/etnaviv_gpu.c | 31 ++-
> drivers/gpu/drm/etnaviv/etnaviv_gpu.h | 3 +-
> drivers/gpu/drm/etnaviv/etnaviv_iommu.c | 151 ++++++------
> drivers/gpu/drm/etnaviv/etnaviv_iommu.h | 20 --
> drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c | 264 +++++++++------------
> drivers/gpu/drm/etnaviv/etnaviv_mmu.c | 259 ++++++++++++--------
> drivers/gpu/drm/etnaviv/etnaviv_mmu.h | 91 +++++--
> 15 files changed, 461 insertions(+), 430 deletions(-)
> delete mode 100644 drivers/gpu/drm/etnaviv/etnaviv_iommu.h
>
> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
> index a3cdb20bfc5f..4324b098689f 100644
> --- a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
> +++ b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
> @@ -207,7 +207,7 @@ u16 etnaviv_buffer_config_mmuv2(struct etnaviv_gpu *gpu, u32 mtlb_addr, u32 safe
> return buffer->user_size / 8;
> }
>
> -u16 etnaviv_buffer_config_pta(struct etnaviv_gpu *gpu)
> +u16 etnaviv_buffer_config_pta(struct etnaviv_gpu *gpu, unsigned short id)
> {
> struct etnaviv_cmdbuf *buffer = &gpu->buffer;
>
> @@ -216,7 +216,7 @@ u16 etnaviv_buffer_config_pta(struct etnaviv_gpu *gpu)
> buffer->user_size = 0;
>
> CMD_LOAD_STATE(buffer, VIVS_MMUv2_PTA_CONFIG,
> - VIVS_MMUv2_PTA_CONFIG_INDEX(0));
> + VIVS_MMUv2_PTA_CONFIG_INDEX(id));
>
> CMD_END(buffer);
>
> @@ -315,7 +315,7 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
> u32 return_target, return_dwords;
> u32 link_target, link_dwords;
> bool switch_context = gpu->exec_state != exec_state;
> - unsigned int new_flush_seq = READ_ONCE(gpu->mmu->flush_seq);
> + unsigned int new_flush_seq = READ_ONCE(gpu->mmu_context->flush_seq);
> bool need_flush = gpu->flush_seq != new_flush_seq;
>
> lockdep_assert_held(&gpu->lock);
> @@ -339,7 +339,7 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
>
> /* flush command */
> if (need_flush) {
> - if (gpu->mmu->version == ETNAVIV_IOMMU_V1)
> + if (gpu->mmu_context->global->version == ETNAVIV_IOMMU_V1)
> extra_dwords += 1;
> else
> extra_dwords += 3;
> @@ -353,7 +353,7 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
>
> if (need_flush) {
> /* Add the MMU flush */
> - if (gpu->mmu->version == ETNAVIV_IOMMU_V1) {
> + if (gpu->mmu_context->global->version == ETNAVIV_IOMMU_V1) {
> CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_MMU,
> VIVS_GL_FLUSH_MMU_FLUSH_FEMMU |
> VIVS_GL_FLUSH_MMU_FLUSH_UNK1 |
> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c b/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c
> index b0babc0f7230..f39430ba3593 100644
> --- a/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c
> +++ b/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c
> @@ -60,18 +60,18 @@ etnaviv_cmdbuf_suballoc_new(struct device *dev)
> }
>
> int etnaviv_cmdbuf_suballoc_map(struct etnaviv_cmdbuf_suballoc *suballoc,
> - struct etnaviv_iommu *mmu,
> + struct etnaviv_iommu_context *context,
> struct etnaviv_vram_mapping *mapping,
> u32 memory_base)
> {
> - return etnaviv_iommu_get_suballoc_va(mmu, mapping, memory_base,
> + return etnaviv_iommu_get_suballoc_va(context, mapping, memory_base,
> suballoc->paddr, SUBALLOC_SIZE);
> }
>
> -void etnaviv_cmdbuf_suballoc_unmap(struct etnaviv_iommu *mmu,
> +void etnaviv_cmdbuf_suballoc_unmap(struct etnaviv_iommu_context *context,
> struct etnaviv_vram_mapping *mapping)
> {
> - etnaviv_iommu_put_suballoc_va(mmu, mapping);
> + etnaviv_iommu_put_suballoc_va(context, mapping);
> }
>
> void etnaviv_cmdbuf_suballoc_destroy(struct etnaviv_cmdbuf_suballoc *suballoc)
> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h b/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h
> index a28668e46e26..ad6fd8eb0378 100644
> --- a/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h
> +++ b/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h
> @@ -9,7 +9,7 @@
> #include <linux/types.h>
>
> struct device;
> -struct etnaviv_iommu;
> +struct etnaviv_iommu_context;
> struct etnaviv_vram_mapping;
> struct etnaviv_cmdbuf_suballoc;
> struct etnaviv_perfmon_request;
> @@ -28,10 +28,10 @@ struct etnaviv_cmdbuf_suballoc *
> etnaviv_cmdbuf_suballoc_new(struct device *dev);
> void etnaviv_cmdbuf_suballoc_destroy(struct etnaviv_cmdbuf_suballoc *suballoc);
> int etnaviv_cmdbuf_suballoc_map(struct etnaviv_cmdbuf_suballoc *suballoc,
> - struct etnaviv_iommu *mmu,
> + struct etnaviv_iommu_context *context,
> struct etnaviv_vram_mapping *mapping,
> u32 memory_base);
> -void etnaviv_cmdbuf_suballoc_unmap(struct etnaviv_iommu *mmu,
> +void etnaviv_cmdbuf_suballoc_unmap(struct etnaviv_iommu_context *context,
> struct etnaviv_vram_mapping *mapping);
>
>
> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
> index 5fa3aa7bdbc5..eb0c23fe979a 100644
> --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
> +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
> @@ -119,9 +119,9 @@ static int etnaviv_mmu_show(struct etnaviv_gpu *gpu, struct seq_file *m)
>
> seq_printf(m, "Active Objects (%s):\n", dev_name(gpu->dev));
>
> - mutex_lock(&gpu->mmu->lock);
> - drm_mm_print(&gpu->mmu->mm, &p);
> - mutex_unlock(&gpu->mmu->lock);
> + mutex_lock(&gpu->mmu_context->lock);
> + drm_mm_print(&gpu->mmu_context->mm, &p);
> + mutex_unlock(&gpu->mmu_context->lock);
>
> return 0;
> }
> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.h b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
> index e052d7db66ae..5f8db08f1c17 100644
> --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.h
> +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
> @@ -22,6 +22,7 @@ struct etnaviv_gpu;
> struct etnaviv_mmu;
> struct etnaviv_gem_object;
> struct etnaviv_gem_submit;
> +struct etnaviv_iommu_global;
>
> struct etnaviv_file_private {
> /*
> @@ -37,6 +38,7 @@ struct etnaviv_drm_private {
> struct etnaviv_gpu *gpu[ETNA_MAX_PIPES];
>
> struct etnaviv_cmdbuf_suballoc *cmdbuf_suballoc;
> + struct etnaviv_iommu_global *mmu_global;
>
> /* list of GEM objects: */
> struct mutex gem_lock;
> @@ -69,7 +71,7 @@ int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
> uintptr_t ptr, u32 size, u32 flags, u32 *handle);
> u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu);
> u16 etnaviv_buffer_config_mmuv2(struct etnaviv_gpu *gpu, u32 mtlb_addr, u32 safe_addr);
> -u16 etnaviv_buffer_config_pta(struct etnaviv_gpu *gpu);
> +u16 etnaviv_buffer_config_pta(struct etnaviv_gpu *gpu, unsigned short id);
> void etnaviv_buffer_end(struct etnaviv_gpu *gpu);
> void etnaviv_sync_point_queue(struct etnaviv_gpu *gpu, unsigned int event);
> void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_dump.c b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
> index 2ce60baa4ad9..7e6791517693 100644
> --- a/drivers/gpu/drm/etnaviv/etnaviv_dump.c
> +++ b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
> @@ -93,7 +93,7 @@ static void etnaviv_core_dump_registers(struct core_dump_iterator *iter,
> }
>
> static void etnaviv_core_dump_mmu(struct core_dump_iterator *iter,
> - struct etnaviv_iommu *mmu, size_t mmu_size)
> + struct etnaviv_iommu_context *mmu, size_t mmu_size)
> {
> etnaviv_iommu_dump(mmu, iter->data);
>
> @@ -125,9 +125,9 @@ void etnaviv_core_dump(struct etnaviv_gem_submit *submit)
> return;
> etnaviv_dump_core = false;
>
> - mutex_lock(&gpu->mmu->lock);
> + mutex_lock(&gpu->mmu_context->lock);
>
> - mmu_size = etnaviv_iommu_dump_size(gpu->mmu);
> + mmu_size = etnaviv_iommu_dump_size(gpu->mmu_context);
>
> /* We always dump registers, mmu, ring, hanging cmdbuf and end marker */
> n_obj = 5;
> @@ -157,7 +157,7 @@ void etnaviv_core_dump(struct etnaviv_gem_submit *submit)
> iter.start = __vmalloc(file_size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
> PAGE_KERNEL);
> if (!iter.start) {
> - mutex_unlock(&gpu->mmu->lock);
> + mutex_unlock(&gpu->mmu_context->lock);
> dev_warn(gpu->dev, "failed to allocate devcoredump file\n");
> return;
> }
> @@ -169,7 +169,7 @@ void etnaviv_core_dump(struct etnaviv_gem_submit *submit)
> memset(iter.hdr, 0, iter.data - iter.start);
>
> etnaviv_core_dump_registers(&iter, gpu);
> - etnaviv_core_dump_mmu(&iter, gpu->mmu, mmu_size);
> + etnaviv_core_dump_mmu(&iter, gpu->mmu_context, mmu_size);
> etnaviv_core_dump_mem(&iter, ETDUMP_BUF_RING, gpu->buffer.vaddr,
> gpu->buffer.size,
> etnaviv_cmdbuf_get_va(&gpu->buffer,
> @@ -221,7 +221,7 @@ void etnaviv_core_dump(struct etnaviv_gem_submit *submit)
> obj->base.size);
> }
>
> - mutex_unlock(&gpu->mmu->lock);
> + mutex_unlock(&gpu->mmu_context->lock);
>
> etnaviv_core_dump_header(&iter, ETDUMP_BUF_END, iter.data);
>
> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
> index e199a6833ff0..0ccc3c4dffc4 100644
> --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
> +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
> @@ -223,12 +223,12 @@ int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset)
>
> static struct etnaviv_vram_mapping *
> etnaviv_gem_get_vram_mapping(struct etnaviv_gem_object *obj,
> - struct etnaviv_iommu *mmu)
> + struct etnaviv_iommu_context *context)
> {
> struct etnaviv_vram_mapping *mapping;
>
> list_for_each_entry(mapping, &obj->vram_list, obj_node) {
> - if (mapping->mmu == mmu)
> + if (mapping->context == context)
> return mapping;
> }
>
> @@ -256,7 +256,7 @@ struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
> int ret = 0;
>
> mutex_lock(&etnaviv_obj->lock);
> - mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, gpu->mmu);
> + mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, gpu->mmu_context);
> if (mapping) {
> /*
> * Holding the object lock prevents the use count changing
> @@ -265,12 +265,12 @@ struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
> * the MMU owns this mapping to close this race.
> */
> if (mapping->use == 0) {
> - mutex_lock(&gpu->mmu->lock);
> - if (mapping->mmu == gpu->mmu)
> + mutex_lock(&gpu->mmu_context->lock);
> + if (mapping->context == gpu->mmu_context)
> mapping->use += 1;
> else
> mapping = NULL;
> - mutex_unlock(&gpu->mmu->lock);
> + mutex_unlock(&gpu->mmu_context->lock);
> if (mapping)
> goto out;
> } else {
> @@ -303,11 +303,11 @@ struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
> list_del(&mapping->obj_node);
> }
>
> - mapping->mmu = gpu->mmu;
> + mapping->context = gpu->mmu_context;
> mapping->use = 1;
>
> - ret = etnaviv_iommu_map_gem(gpu->mmu, etnaviv_obj, gpu->memory_base,
> - mapping);
> + ret = etnaviv_iommu_map_gem(gpu->mmu_context, etnaviv_obj,
> + gpu->memory_base, mapping);
> if (ret < 0)
> kfree(mapping);
> else
> @@ -525,12 +525,12 @@ void etnaviv_gem_free_object(struct drm_gem_object *obj)
>
> list_for_each_entry_safe(mapping, tmp, &etnaviv_obj->vram_list,
> obj_node) {
> - struct etnaviv_iommu *mmu = mapping->mmu;
> + struct etnaviv_iommu_context *context = mapping->context;
>
> WARN_ON(mapping->use);
>
> - if (mmu)
> - etnaviv_iommu_unmap_gem(mmu, mapping);
> + if (context)
> + etnaviv_iommu_unmap_gem(context, mapping);
>
> list_del(&mapping->obj_node);
> kfree(mapping);
> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.h b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
> index d7d8a835f379..5a004d5e4eaa 100644
> --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.h
> +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
> @@ -25,7 +25,7 @@ struct etnaviv_vram_mapping {
> struct list_head scan_node;
> struct list_head mmu_node;
> struct etnaviv_gem_object *object;
> - struct etnaviv_iommu *mmu;
> + struct etnaviv_iommu_context *context;
> struct drm_mm_node vram_node;
> unsigned int use;
> u32 iova;
> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
> index 179bc6c544ca..885ca8f92338 100644
> --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
> +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
> @@ -681,7 +681,7 @@ static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu)
> etnaviv_gpu_setup_pulse_eater(gpu);
>
> /* setup the MMU */
> - etnaviv_iommu_restore(gpu);
> + etnaviv_iommu_restore(gpu, gpu->mmu_context);
>
> /* Start command processor */
> prefetch = etnaviv_buffer_init(gpu);
> @@ -754,14 +754,19 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
> goto fail;
> }
>
> - gpu->mmu = etnaviv_iommu_new(gpu);
> - if (IS_ERR(gpu->mmu)) {
> - dev_err(gpu->dev, "Failed to instantiate GPU IOMMU\n");
> - ret = PTR_ERR(gpu->mmu);
> + ret = etnaviv_iommu_global_init(gpu);
> + if (ret)
> goto fail;
> +
> + gpu->mmu_context = etnaviv_iommu_context_init(priv->mmu_global);
> + if (IS_ERR(gpu->mmu_context)) {
> + dev_err(gpu->dev, "Failed to instantiate GPU IOMMU\n");
> + ret = PTR_ERR(gpu->mmu_context);
> + goto iommu_global_fini;
> }
>
> - ret = etnaviv_cmdbuf_suballoc_map(priv->cmdbuf_suballoc, gpu->mmu,
> + ret = etnaviv_cmdbuf_suballoc_map(priv->cmdbuf_suballoc,
> + gpu->mmu_context,
> &gpu->cmdbuf_mapping,
> gpu->memory_base);
> if (ret) {
> @@ -777,7 +782,7 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
> goto unmap_suballoc;
> }
>
> - if (gpu->mmu->version == ETNAVIV_IOMMU_V1 &&
> + if (!(gpu->identity.minor_features1 & chipMinorFeatures1_MMU_VERSION) &&
> etnaviv_cmdbuf_get_va(&gpu->buffer, &gpu->cmdbuf_mapping) > 0x80000000) {
> ret = -EINVAL;
> dev_err(gpu->dev,
> @@ -808,9 +813,11 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
> free_buffer:
> etnaviv_cmdbuf_free(&gpu->buffer);
> unmap_suballoc:
> - etnaviv_cmdbuf_suballoc_unmap(gpu->mmu, &gpu->cmdbuf_mapping);
> + etnaviv_cmdbuf_suballoc_unmap(gpu->mmu_context, &gpu->cmdbuf_mapping);
> destroy_iommu:
> - etnaviv_iommu_destroy(gpu->mmu);
> + etnaviv_iommu_context_put(gpu->mmu_context);
> +iommu_global_fini:
> + etnaviv_iommu_global_fini(gpu);
> fail:
> pm_runtime_mark_last_busy(gpu->dev);
> pm_runtime_put_autosuspend(gpu->dev);
> @@ -1683,8 +1690,10 @@ static void etnaviv_gpu_unbind(struct device *dev, struct device *master,
>
> if (gpu->initialized) {
> etnaviv_cmdbuf_free(&gpu->buffer);
> - etnaviv_cmdbuf_suballoc_unmap(gpu->mmu, &gpu->cmdbuf_mapping);
> - etnaviv_iommu_destroy(gpu->mmu);
> + etnaviv_cmdbuf_suballoc_unmap(gpu->mmu_context,
> + &gpu->cmdbuf_mapping);
> + etnaviv_iommu_context_put(gpu->mmu_context);
> + etnaviv_iommu_global_fini(gpu);
> gpu->initialized = false;
> }
>
> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
> index 96380942cd8c..c0bd6018d53b 100644
> --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
> +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
> @@ -8,6 +8,7 @@
>
> #include "etnaviv_cmdbuf.h"
> #include "etnaviv_gem.h"
> +#include "etnaviv_mmu.h"
> #include "etnaviv_drv.h"
>
> struct etnaviv_gem_submit;
> @@ -136,7 +137,7 @@ struct etnaviv_gpu {
> void __iomem *mmio;
> int irq;
>
> - struct etnaviv_iommu *mmu;
> + struct etnaviv_iommu_context *mmu_context;
> unsigned int flush_seq;
>
> /* Power Control: */
> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
> index 18c627c5cae1..a2f1ff151822 100644
> --- a/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
> +++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
> @@ -11,7 +11,6 @@
>
> #include "etnaviv_gpu.h"
> #include "etnaviv_mmu.h"
> -#include "etnaviv_iommu.h"
> #include "state_hi.xml.h"
>
> #define PT_SIZE SZ_2M
> @@ -19,113 +18,78 @@
>
> #define GPU_MEM_START 0x80000000
>
> -struct etnaviv_iommuv1_domain {
> - struct etnaviv_iommu_domain base;
> +struct etnaviv_iommuv1_context {
> + struct etnaviv_iommu_context base;
> u32 *pgtable_cpu;
> dma_addr_t pgtable_dma;
> };
>
> -static struct etnaviv_iommuv1_domain *
> -to_etnaviv_domain(struct etnaviv_iommu_domain *domain)
> +static struct etnaviv_iommuv1_context *
> +to_v1_context(struct etnaviv_iommu_context *context)
> {
> - return container_of(domain, struct etnaviv_iommuv1_domain, base);
> + return container_of(context, struct etnaviv_iommuv1_context, base);
> }
>
> -static int __etnaviv_iommu_init(struct etnaviv_iommuv1_domain *etnaviv_domain)
> +static void etnaviv_iommuv1_free(struct etnaviv_iommu_context *context)
> {
> - u32 *p;
> - int i;
> -
> - etnaviv_domain->base.bad_page_cpu =
> - dma_alloc_wc(etnaviv_domain->base.dev, SZ_4K,
> - &etnaviv_domain->base.bad_page_dma,
> - GFP_KERNEL);
> - if (!etnaviv_domain->base.bad_page_cpu)
> - return -ENOMEM;
> -
> - p = etnaviv_domain->base.bad_page_cpu;
> - for (i = 0; i < SZ_4K / 4; i++)
> - *p++ = 0xdead55aa;
> -
> - etnaviv_domain->pgtable_cpu = dma_alloc_wc(etnaviv_domain->base.dev,
> - PT_SIZE,
> - &etnaviv_domain->pgtable_dma,
> - GFP_KERNEL);
> - if (!etnaviv_domain->pgtable_cpu) {
> - dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
> - etnaviv_domain->base.bad_page_cpu,
> - etnaviv_domain->base.bad_page_dma);
> - return -ENOMEM;
> - }
> -
> - memset32(etnaviv_domain->pgtable_cpu, etnaviv_domain->base.bad_page_dma,
> - PT_ENTRIES);
> -
> - return 0;
> -}
> + struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
>
> -static void etnaviv_iommuv1_domain_free(struct etnaviv_iommu_domain *domain)
> -{
> - struct etnaviv_iommuv1_domain *etnaviv_domain =
> - to_etnaviv_domain(domain);
> + drm_mm_takedown(&context->mm);
>
> - dma_free_wc(etnaviv_domain->base.dev, PT_SIZE,
> - etnaviv_domain->pgtable_cpu, etnaviv_domain->pgtable_dma);
> + dma_free_wc(context->global->dev, PT_SIZE, v1_context->pgtable_cpu,
> + v1_context->pgtable_dma);
>
> - dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
> - etnaviv_domain->base.bad_page_cpu,
> - etnaviv_domain->base.bad_page_dma);
> + context->global->v1.shared_context = NULL;
>
> - kfree(etnaviv_domain);
> + kfree(v1_context);
> }
>
> -static int etnaviv_iommuv1_map(struct etnaviv_iommu_domain *domain,
> +static int etnaviv_iommuv1_map(struct etnaviv_iommu_context *context,
> unsigned long iova, phys_addr_t paddr,
> size_t size, int prot)
> {
> - struct etnaviv_iommuv1_domain *etnaviv_domain = to_etnaviv_domain(domain);
> + struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
> unsigned int index = (iova - GPU_MEM_START) / SZ_4K;
>
> if (size != SZ_4K)
> return -EINVAL;
>
> - etnaviv_domain->pgtable_cpu[index] = paddr;
> + v1_context->pgtable_cpu[index] = paddr;
>
> return 0;
> }
>
> -static size_t etnaviv_iommuv1_unmap(struct etnaviv_iommu_domain *domain,
> +static size_t etnaviv_iommuv1_unmap(struct etnaviv_iommu_context *context,
> unsigned long iova, size_t size)
> {
> - struct etnaviv_iommuv1_domain *etnaviv_domain =
> - to_etnaviv_domain(domain);
> + struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
> unsigned int index = (iova - GPU_MEM_START) / SZ_4K;
>
> if (size != SZ_4K)
> return -EINVAL;
>
> - etnaviv_domain->pgtable_cpu[index] = etnaviv_domain->base.bad_page_dma;
> + v1_context->pgtable_cpu[index] = context->global->bad_page_dma;
>
> return SZ_4K;
> }
>
> -static size_t etnaviv_iommuv1_dump_size(struct etnaviv_iommu_domain *domain)
> +static size_t etnaviv_iommuv1_dump_size(struct etnaviv_iommu_context *context)
> {
> return PT_SIZE;
> }
>
> -static void etnaviv_iommuv1_dump(struct etnaviv_iommu_domain *domain, void *buf)
> +static void etnaviv_iommuv1_dump(struct etnaviv_iommu_context *context,
> + void *buf)
> {
> - struct etnaviv_iommuv1_domain *etnaviv_domain =
> - to_etnaviv_domain(domain);
> + struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
>
> - memcpy(buf, etnaviv_domain->pgtable_cpu, PT_SIZE);
> + memcpy(buf, v1_context->pgtable_cpu, PT_SIZE);
> }
>
> -void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu)
> +static void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu,
> + struct etnaviv_iommu_context *context)
> {
> - struct etnaviv_iommuv1_domain *etnaviv_domain =
> - to_etnaviv_domain(gpu->mmu->domain);
> + struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
> u32 pgtable;
>
> /* set base addresses */
> @@ -136,7 +100,7 @@ void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu)
> gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PE, gpu->memory_base);
>
> /* set page table address in MC */
> - pgtable = (u32)etnaviv_domain->pgtable_dma;
> + pgtable = (u32)v1_context->pgtable_dma;
>
> gpu_write(gpu, VIVS_MC_MMU_FE_PAGE_TABLE, pgtable);
> gpu_write(gpu, VIVS_MC_MMU_TX_PAGE_TABLE, pgtable);
> @@ -145,39 +109,62 @@ void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu)
> gpu_write(gpu, VIVS_MC_MMU_RA_PAGE_TABLE, pgtable);
> }
>
> -static const struct etnaviv_iommu_domain_ops etnaviv_iommuv1_ops = {
> - .free = etnaviv_iommuv1_domain_free,
> +
> +const struct etnaviv_iommu_ops etnaviv_iommuv1_ops = {
> + .free = etnaviv_iommuv1_free,
> .map = etnaviv_iommuv1_map,
> .unmap = etnaviv_iommuv1_unmap,
> .dump_size = etnaviv_iommuv1_dump_size,
> .dump = etnaviv_iommuv1_dump,
> + .restore = etnaviv_iommuv1_restore,
> };
>
> -struct etnaviv_iommu_domain *
> -etnaviv_iommuv1_domain_alloc(struct etnaviv_gpu *gpu)
> +struct etnaviv_iommu_context *
> +etnaviv_iommuv1_context_alloc(struct etnaviv_iommu_global *global)
> {
> - struct etnaviv_iommuv1_domain *etnaviv_domain;
> - struct etnaviv_iommu_domain *domain;
> - int ret;
> + struct etnaviv_iommuv1_context *v1_context;
> + struct etnaviv_iommu_context *context;
> +
> + mutex_lock(&global->lock);
> +
> + /*
> + * MMUv1 does not support switching between different contexts without
> + * a stop the world operation, so we only support a single shared
> + * context with this version.
> + */
> + if (global->v1.shared_context) {
> + context = global->v1.shared_context;
> + etnaviv_iommu_context_get(context);
> + mutex_unlock(&global->lock);
> + return context;
> + }
>
> - etnaviv_domain = kzalloc(sizeof(*etnaviv_domain), GFP_KERNEL);
> - if (!etnaviv_domain)
> + v1_context = kzalloc(sizeof(*v1_context), GFP_KERNEL);
> + if (!v1_context)
> return NULL;
>
> - domain = &etnaviv_domain->base;
> + v1_context->pgtable_cpu = dma_alloc_wc(global->dev, PT_SIZE,
> + &v1_context->pgtable_dma,
> + GFP_KERNEL);
> + if (!v1_context->pgtable_cpu)
> + goto out_free;
>
> - domain->dev = gpu->dev;
> - domain->base = GPU_MEM_START;
> - domain->size = PT_ENTRIES * SZ_4K;
> - domain->ops = &etnaviv_iommuv1_ops;
> + memset32(v1_context->pgtable_cpu, global->bad_page_dma, PT_ENTRIES);
>
> - ret = __etnaviv_iommu_init(etnaviv_domain);
> - if (ret)
> - goto out_free;
> + context = &v1_context->base;
> + context->global = global;
> + kref_init(&context->refcount);
> + mutex_init(&context->lock);
> + INIT_LIST_HEAD(&context->mappings);
> + drm_mm_init(&context->mm, GPU_MEM_START, PT_ENTRIES * SZ_4K);
> + context->global->v1.shared_context = context;
> +
> + mutex_unlock(&global->lock);
>
> - return &etnaviv_domain->base;
> + return context;
>
> out_free:
> - kfree(etnaviv_domain);
> + mutex_unlock(&global->lock);
> + kfree(v1_context);
> return NULL;
> }
> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu.h b/drivers/gpu/drm/etnaviv/etnaviv_iommu.h
> deleted file mode 100644
> index b279404ce91a..000000000000
> --- a/drivers/gpu/drm/etnaviv/etnaviv_iommu.h
> +++ /dev/null
> @@ -1,20 +0,0 @@
> -/* SPDX-License-Identifier: GPL-2.0 */
> -/*
> - * Copyright (C) 2014-2018 Etnaviv Project
> - */
> -
> -#ifndef __ETNAVIV_IOMMU_H__
> -#define __ETNAVIV_IOMMU_H__
> -
> -struct etnaviv_gpu;
> -struct etnaviv_iommu_domain;
> -
> -struct etnaviv_iommu_domain *
> -etnaviv_iommuv1_domain_alloc(struct etnaviv_gpu *gpu);
> -void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu);
> -
> -struct etnaviv_iommu_domain *
> -etnaviv_iommuv2_domain_alloc(struct etnaviv_gpu *gpu);
> -void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu);
> -
> -#endif /* __ETNAVIV_IOMMU_H__ */
> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
> index d7cc184da571..5ca2077c148d 100644
> --- a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
> +++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
> @@ -13,7 +13,6 @@
> #include "etnaviv_cmdbuf.h"
> #include "etnaviv_gpu.h"
> #include "etnaviv_mmu.h"
> -#include "etnaviv_iommu.h"
> #include "state.xml.h"
> #include "state_hi.xml.h"
>
> @@ -28,11 +27,9 @@
>
> #define MMUv2_MAX_STLB_ENTRIES 1024
>
> -struct etnaviv_iommuv2_domain {
> - struct etnaviv_iommu_domain base;
> - /* P(age) T(able) A(rray) */
> - u64 *pta_cpu;
> - dma_addr_t pta_dma;
> +struct etnaviv_iommuv2_context {
> + struct etnaviv_iommu_context base;
> + unsigned short id;
> /* M(aster) TLB aka first level pagetable */
> u32 *mtlb_cpu;
> dma_addr_t mtlb_dma;
> @@ -41,41 +38,62 @@ struct etnaviv_iommuv2_domain {
> dma_addr_t stlb_dma[MMUv2_MAX_STLB_ENTRIES];
> };
>
> -static struct etnaviv_iommuv2_domain *
> -to_etnaviv_domain(struct etnaviv_iommu_domain *domain)
> +static struct etnaviv_iommuv2_context *
> +to_v2_context(struct etnaviv_iommu_context *context)
> {
> - return container_of(domain, struct etnaviv_iommuv2_domain, base);
> + return container_of(context, struct etnaviv_iommuv2_context, base);
> }
>
> +static void etnaviv_iommuv2_free(struct etnaviv_iommu_context *context)
> +{
> + struct etnaviv_iommuv2_context *v2_context = to_v2_context(context);
> + int i;
> +
> + drm_mm_takedown(&context->mm);
> +
> + for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) {
> + if (v2_context->stlb_cpu[i])
> + dma_free_wc(context->global->dev, SZ_4K,
> + v2_context->stlb_cpu[i],
> + v2_context->stlb_dma[i]);
> + }
> +
> + dma_free_wc(context->global->dev, SZ_4K, v2_context->mtlb_cpu,
> + v2_context->mtlb_dma);
> +
> + clear_bit(v2_context->id, context->global->v2.pta_alloc);
> +
> + vfree(v2_context);
> +}
> static int
> -etnaviv_iommuv2_ensure_stlb(struct etnaviv_iommuv2_domain *etnaviv_domain,
> +etnaviv_iommuv2_ensure_stlb(struct etnaviv_iommuv2_context *v2_context,
> int stlb)
> {
> - if (etnaviv_domain->stlb_cpu[stlb])
> + if (v2_context->stlb_cpu[stlb])
> return 0;
>
> - etnaviv_domain->stlb_cpu[stlb] =
> - dma_alloc_wc(etnaviv_domain->base.dev, SZ_4K,
> - &etnaviv_domain->stlb_dma[stlb],
> + v2_context->stlb_cpu[stlb] =
> + dma_alloc_wc(v2_context->base.global->dev, SZ_4K,
> + &v2_context->stlb_dma[stlb],
> GFP_KERNEL);
>
> - if (!etnaviv_domain->stlb_cpu[stlb])
> + if (!v2_context->stlb_cpu[stlb])
> return -ENOMEM;
>
> - memset32(etnaviv_domain->stlb_cpu[stlb], MMUv2_PTE_EXCEPTION,
> + memset32(v2_context->stlb_cpu[stlb], MMUv2_PTE_EXCEPTION,
> SZ_4K / sizeof(u32));
>
> - etnaviv_domain->mtlb_cpu[stlb] = etnaviv_domain->stlb_dma[stlb] |
> - MMUv2_PTE_PRESENT;
> + v2_context->mtlb_cpu[stlb] =
> + v2_context->stlb_dma[stlb] | MMUv2_PTE_PRESENT;
> +
> return 0;
> }
>
> -static int etnaviv_iommuv2_map(struct etnaviv_iommu_domain *domain,
> +static int etnaviv_iommuv2_map(struct etnaviv_iommu_context *context,
> unsigned long iova, phys_addr_t paddr,
> size_t size, int prot)
> {
> - struct etnaviv_iommuv2_domain *etnaviv_domain =
> - to_etnaviv_domain(domain);
> + struct etnaviv_iommuv2_context *v2_context = to_v2_context(context);
> int mtlb_entry, stlb_entry, ret;
> u32 entry = lower_32_bits(paddr) | MMUv2_PTE_PRESENT;
>
> @@ -91,20 +109,19 @@ static int etnaviv_iommuv2_map(struct etnaviv_iommu_domain *domain,
> mtlb_entry = (iova & MMUv2_MTLB_MASK) >> MMUv2_MTLB_SHIFT;
> stlb_entry = (iova & MMUv2_STLB_MASK) >> MMUv2_STLB_SHIFT;
>
> - ret = etnaviv_iommuv2_ensure_stlb(etnaviv_domain, mtlb_entry);
> + ret = etnaviv_iommuv2_ensure_stlb(v2_context, mtlb_entry);
> if (ret)
> return ret;
>
> - etnaviv_domain->stlb_cpu[mtlb_entry][stlb_entry] = entry;
> + v2_context->stlb_cpu[mtlb_entry][stlb_entry] = entry;
>
> return 0;
> }
>
> -static size_t etnaviv_iommuv2_unmap(struct etnaviv_iommu_domain *domain,
> +static size_t etnaviv_iommuv2_unmap(struct etnaviv_iommu_context *context,
> unsigned long iova, size_t size)
> {
> - struct etnaviv_iommuv2_domain *etnaviv_domain =
> - to_etnaviv_domain(domain);
> + struct etnaviv_iommuv2_context *etnaviv_domain = to_v2_context(context);
> int mtlb_entry, stlb_entry;
>
> if (size != SZ_4K)
> @@ -118,118 +135,35 @@ static size_t etnaviv_iommuv2_unmap(struct etnaviv_iommu_domain *domain,
> return SZ_4K;
> }
>
> -static int etnaviv_iommuv2_init(struct etnaviv_iommuv2_domain *etnaviv_domain)
> -{
> - int ret;
> -
> - /* allocate scratch page */
> - etnaviv_domain->base.bad_page_cpu =
> - dma_alloc_wc(etnaviv_domain->base.dev, SZ_4K,
> - &etnaviv_domain->base.bad_page_dma,
> - GFP_KERNEL);
> - if (!etnaviv_domain->base.bad_page_cpu) {
> - ret = -ENOMEM;
> - goto fail_mem;
> - }
> -
> - memset32(etnaviv_domain->base.bad_page_cpu, 0xdead55aa,
> - SZ_4K / sizeof(u32));
> -
> - etnaviv_domain->pta_cpu = dma_alloc_wc(etnaviv_domain->base.dev,
> - SZ_4K, &etnaviv_domain->pta_dma,
> - GFP_KERNEL);
> - if (!etnaviv_domain->pta_cpu) {
> - ret = -ENOMEM;
> - goto fail_mem;
> - }
> -
> - etnaviv_domain->mtlb_cpu = dma_alloc_wc(etnaviv_domain->base.dev,
> - SZ_4K, &etnaviv_domain->mtlb_dma,
> - GFP_KERNEL);
> - if (!etnaviv_domain->mtlb_cpu) {
> - ret = -ENOMEM;
> - goto fail_mem;
> - }
> -
> - memset32(etnaviv_domain->mtlb_cpu, MMUv2_PTE_EXCEPTION,
> - MMUv2_MAX_STLB_ENTRIES);
> -
> - return 0;
> -
> -fail_mem:
> - if (etnaviv_domain->base.bad_page_cpu)
> - dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
> - etnaviv_domain->base.bad_page_cpu,
> - etnaviv_domain->base.bad_page_dma);
> -
> - if (etnaviv_domain->pta_cpu)
> - dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
> - etnaviv_domain->pta_cpu, etnaviv_domain->pta_dma);
> -
> - if (etnaviv_domain->mtlb_cpu)
> - dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
> - etnaviv_domain->mtlb_cpu, etnaviv_domain->mtlb_dma);
> -
> - return ret;
> -}
> -
> -static void etnaviv_iommuv2_domain_free(struct etnaviv_iommu_domain *domain)
> -{
> - struct etnaviv_iommuv2_domain *etnaviv_domain =
> - to_etnaviv_domain(domain);
> - int i;
> -
> - dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
> - etnaviv_domain->base.bad_page_cpu,
> - etnaviv_domain->base.bad_page_dma);
> -
> - dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
> - etnaviv_domain->pta_cpu, etnaviv_domain->pta_dma);
> -
> - dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
> - etnaviv_domain->mtlb_cpu, etnaviv_domain->mtlb_dma);
> -
> - for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) {
> - if (etnaviv_domain->stlb_cpu[i])
> - dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
> - etnaviv_domain->stlb_cpu[i],
> - etnaviv_domain->stlb_dma[i]);
> - }
> -
> - vfree(etnaviv_domain);
> -}
> -
> -static size_t etnaviv_iommuv2_dump_size(struct etnaviv_iommu_domain *domain)
> +static size_t etnaviv_iommuv2_dump_size(struct etnaviv_iommu_context *context)
> {
> - struct etnaviv_iommuv2_domain *etnaviv_domain =
> - to_etnaviv_domain(domain);
> + struct etnaviv_iommuv2_context *v2_context = to_v2_context(context);
> size_t dump_size = SZ_4K;
> int i;
>
> for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++)
> - if (etnaviv_domain->mtlb_cpu[i] & MMUv2_PTE_PRESENT)
> + if (v2_context->mtlb_cpu[i] & MMUv2_PTE_PRESENT)
> dump_size += SZ_4K;
>
> return dump_size;
> }
>
> -static void etnaviv_iommuv2_dump(struct etnaviv_iommu_domain *domain, void *buf)
> +static void etnaviv_iommuv2_dump(struct etnaviv_iommu_context *context, void *buf)
> {
> - struct etnaviv_iommuv2_domain *etnaviv_domain =
> - to_etnaviv_domain(domain);
> + struct etnaviv_iommuv2_context *v2_context = to_v2_context(context);
> int i;
>
> - memcpy(buf, etnaviv_domain->mtlb_cpu, SZ_4K);
> + memcpy(buf, v2_context->mtlb_cpu, SZ_4K);
> buf += SZ_4K;
> for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++, buf += SZ_4K)
> - if (etnaviv_domain->mtlb_cpu[i] & MMUv2_PTE_PRESENT)
> - memcpy(buf, etnaviv_domain->stlb_cpu[i], SZ_4K);
> + if (v2_context->mtlb_cpu[i] & MMUv2_PTE_PRESENT)
> + memcpy(buf, v2_context->stlb_cpu[i], SZ_4K);
> }
>
> -static void etnaviv_iommuv2_restore_nonsec(struct etnaviv_gpu *gpu)
> +static void etnaviv_iommuv2_restore_nonsec(struct etnaviv_gpu *gpu,
> + struct etnaviv_iommu_context *context)
> {
> - struct etnaviv_iommuv2_domain *etnaviv_domain =
> - to_etnaviv_domain(gpu->mmu->domain);
> + struct etnaviv_iommuv2_context *v2_context = to_v2_context(context);
> u16 prefetch;
>
> /* If the MMU is already enabled the state is still there. */
> @@ -237,8 +171,8 @@ static void etnaviv_iommuv2_restore_nonsec(struct etnaviv_gpu *gpu)
> return;
>
> prefetch = etnaviv_buffer_config_mmuv2(gpu,
> - (u32)etnaviv_domain->mtlb_dma,
> - (u32)etnaviv_domain->base.bad_page_dma);
> + (u32)v2_context->mtlb_dma,
> + (u32)context->global->bad_page_dma);
> etnaviv_gpu_start_fe(gpu, (u32)etnaviv_cmdbuf_get_pa(&gpu->buffer),
> prefetch);
> etnaviv_gpu_wait_idle(gpu, 100);
> @@ -246,10 +180,10 @@ static void etnaviv_iommuv2_restore_nonsec(struct etnaviv_gpu *gpu)
> gpu_write(gpu, VIVS_MMUv2_CONTROL, VIVS_MMUv2_CONTROL_ENABLE);
> }
>
> -static void etnaviv_iommuv2_restore_sec(struct etnaviv_gpu *gpu)
> +static void etnaviv_iommuv2_restore_sec(struct etnaviv_gpu *gpu,
> + struct etnaviv_iommu_context *context)
> {
> - struct etnaviv_iommuv2_domain *etnaviv_domain =
> - to_etnaviv_domain(gpu->mmu->domain);
> + struct etnaviv_iommuv2_context *v2_context = to_v2_context(context);
> u16 prefetch;
>
> /* If the MMU is already enabled the state is still there. */
> @@ -257,26 +191,26 @@ static void etnaviv_iommuv2_restore_sec(struct etnaviv_gpu *gpu)
> return;
>
> gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_LOW,
> - lower_32_bits(etnaviv_domain->pta_dma));
> + lower_32_bits(context->global->v2.pta_dma));
> gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_HIGH,
> - upper_32_bits(etnaviv_domain->pta_dma));
> + upper_32_bits(context->global->v2.pta_dma));
> gpu_write(gpu, VIVS_MMUv2_PTA_CONTROL, VIVS_MMUv2_PTA_CONTROL_ENABLE);
>
> gpu_write(gpu, VIVS_MMUv2_NONSEC_SAFE_ADDR_LOW,
> - lower_32_bits(etnaviv_domain->base.bad_page_dma));
> + lower_32_bits(context->global->bad_page_dma));
> gpu_write(gpu, VIVS_MMUv2_SEC_SAFE_ADDR_LOW,
> - lower_32_bits(etnaviv_domain->base.bad_page_dma));
> + lower_32_bits(context->global->bad_page_dma));
> gpu_write(gpu, VIVS_MMUv2_SAFE_ADDRESS_CONFIG,
> VIVS_MMUv2_SAFE_ADDRESS_CONFIG_NON_SEC_SAFE_ADDR_HIGH(
> - upper_32_bits(etnaviv_domain->base.bad_page_dma)) |
> + upper_32_bits(context->global->bad_page_dma)) |
> VIVS_MMUv2_SAFE_ADDRESS_CONFIG_SEC_SAFE_ADDR_HIGH(
> - upper_32_bits(etnaviv_domain->base.bad_page_dma)));
> + upper_32_bits(context->global->bad_page_dma)));
>
> - etnaviv_domain->pta_cpu[0] = etnaviv_domain->mtlb_dma |
> - VIVS_MMUv2_CONFIGURATION_MODE_MODE4_K;
> + context->global->v2.pta_cpu[0] = v2_context->mtlb_dma |
> + VIVS_MMUv2_CONFIGURATION_MODE_MODE4_K;
>
> /* trigger a PTA load through the FE */
> - prefetch = etnaviv_buffer_config_pta(gpu);
> + prefetch = etnaviv_buffer_config_pta(gpu, v2_context->id);
> etnaviv_gpu_start_fe(gpu, (u32)etnaviv_cmdbuf_get_pa(&gpu->buffer),
> prefetch);
> etnaviv_gpu_wait_idle(gpu, 100);
> @@ -284,14 +218,15 @@ static void etnaviv_iommuv2_restore_sec(struct etnaviv_gpu *gpu)
> gpu_write(gpu, VIVS_MMUv2_SEC_CONTROL, VIVS_MMUv2_SEC_CONTROL_ENABLE);
> }
>
> -void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu)
> +static void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu,
> + struct etnaviv_iommu_context *context)
> {
> switch (gpu->sec_mode) {
> case ETNA_SEC_NONE:
> - etnaviv_iommuv2_restore_nonsec(gpu);
> + etnaviv_iommuv2_restore_nonsec(gpu, context);
> break;
> case ETNA_SEC_KERNEL:
> - etnaviv_iommuv2_restore_sec(gpu);
> + etnaviv_iommuv2_restore_sec(gpu, context);
> break;
> default:
> WARN(1, "unhandled GPU security mode\n");
> @@ -299,39 +234,56 @@ void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu)
> }
> }
>
> -static const struct etnaviv_iommu_domain_ops etnaviv_iommuv2_ops = {
> - .free = etnaviv_iommuv2_domain_free,
> +const struct etnaviv_iommu_ops etnaviv_iommuv2_ops = {
> + .free = etnaviv_iommuv2_free,
> .map = etnaviv_iommuv2_map,
> .unmap = etnaviv_iommuv2_unmap,
> .dump_size = etnaviv_iommuv2_dump_size,
> .dump = etnaviv_iommuv2_dump,
> + .restore = etnaviv_iommuv2_restore,
> };
>
> -struct etnaviv_iommu_domain *
> -etnaviv_iommuv2_domain_alloc(struct etnaviv_gpu *gpu)
> +struct etnaviv_iommu_context *
> +etnaviv_iommuv2_context_alloc(struct etnaviv_iommu_global *global)
> {
> - struct etnaviv_iommuv2_domain *etnaviv_domain;
> - struct etnaviv_iommu_domain *domain;
> - int ret;
> + struct etnaviv_iommuv2_context *v2_context;
> + struct etnaviv_iommu_context *context;
>
> - etnaviv_domain = vzalloc(sizeof(*etnaviv_domain));
> - if (!etnaviv_domain)
> + v2_context = vzalloc(sizeof(*v2_context));
> + if (!v2_context)
> return NULL;
>
> - domain = &etnaviv_domain->base;
> + mutex_lock(&global->lock);
> + v2_context->id = find_first_zero_bit(global->v2.pta_alloc,
> + ETNAVIV_PTA_ENTRIES);
> + if (v2_context->id < ETNAVIV_PTA_ENTRIES) {
> + set_bit(v2_context->id, global->v2.pta_alloc);
> + } else {
> + mutex_unlock(&global->lock);
> + goto out_free;
> + }
> + mutex_unlock(&global->lock);
>
> - domain->dev = gpu->dev;
> - domain->base = SZ_4K;
> - domain->size = (u64)SZ_1G * 4 - SZ_4K;
> - domain->ops = &etnaviv_iommuv2_ops;
> + v2_context->mtlb_cpu = dma_alloc_wc(global->dev, SZ_4K,
> + &v2_context->mtlb_dma, GFP_KERNEL);
> + if (!v2_context->mtlb_cpu)
> + goto out_free_id;
>
> - ret = etnaviv_iommuv2_init(etnaviv_domain);
> - if (ret)
> - goto out_free;
> + memset32(v2_context->mtlb_cpu, MMUv2_PTE_EXCEPTION,
> + MMUv2_MAX_STLB_ENTRIES);
> +
> + context = &v2_context->base;
> + context->global = global;
> + kref_init(&context->refcount);
> + mutex_init(&context->lock);
> + INIT_LIST_HEAD(&context->mappings);
> + drm_mm_init(&context->mm, SZ_4K, (u64)SZ_1G * 4 - SZ_4K);
>
> - return &etnaviv_domain->base;
> + return context;
>
> +out_free_id:
> + clear_bit(v2_context->id, global->v2.pta_alloc);
> out_free:
> - vfree(etnaviv_domain);
> + vfree(v2_context);
> return NULL;
> }
> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
> index bbd1624a3df8..2f64eef773ed 100644
> --- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
> +++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
> @@ -3,6 +3,7 @@
> * Copyright (C) 2015-2018 Etnaviv Project
> */
>
> +#include <linux/dma-mapping.h>
> #include <linux/scatterlist.h>
>
> #include "common.xml.h"
> @@ -10,10 +11,9 @@
> #include "etnaviv_drv.h"
> #include "etnaviv_gem.h"
> #include "etnaviv_gpu.h"
> -#include "etnaviv_iommu.h"
> #include "etnaviv_mmu.h"
>
> -static void etnaviv_domain_unmap(struct etnaviv_iommu_domain *domain,
> +static void etnaviv_context_unmap(struct etnaviv_iommu_context *context,
> unsigned long iova, size_t size)
> {
> size_t unmapped_page, unmapped = 0;
> @@ -26,7 +26,8 @@ static void etnaviv_domain_unmap(struct etnaviv_iommu_domain *domain,
> }
>
> while (unmapped < size) {
> - unmapped_page = domain->ops->unmap(domain, iova, pgsize);
> + unmapped_page = context->global->ops->unmap(context, iova,
> + pgsize);
> if (!unmapped_page)
> break;
>
> @@ -35,7 +36,7 @@ static void etnaviv_domain_unmap(struct etnaviv_iommu_domain *domain,
> }
> }
>
> -static int etnaviv_domain_map(struct etnaviv_iommu_domain *domain,
> +static int etnaviv_context_map(struct etnaviv_iommu_context *context,
> unsigned long iova, phys_addr_t paddr,
> size_t size, int prot)
> {
> @@ -51,7 +52,8 @@ static int etnaviv_domain_map(struct etnaviv_iommu_domain *domain,
> }
>
> while (size) {
> - ret = domain->ops->map(domain, iova, paddr, pgsize, prot);
> + ret = context->global->ops->map(context, iova, paddr, pgsize,
> + prot);
> if (ret)
> break;
>
> @@ -62,21 +64,19 @@ static int etnaviv_domain_map(struct etnaviv_iommu_domain *domain,
>
> /* unroll mapping in case something went wrong */
> if (ret)
> - etnaviv_domain_unmap(domain, orig_iova, orig_size - size);
> + etnaviv_context_unmap(context, orig_iova, orig_size - size);
>
> return ret;
> }
>
> -static int etnaviv_iommu_map(struct etnaviv_iommu *iommu, u32 iova,
> +static int etnaviv_iommu_map(struct etnaviv_iommu_context *context, u32 iova,
> struct sg_table *sgt, unsigned len, int prot)
> -{
> - struct etnaviv_iommu_domain *domain = iommu->domain;
> - struct scatterlist *sg;
> +{ struct scatterlist *sg;
> unsigned int da = iova;
> unsigned int i, j;
> int ret;
>
> - if (!domain || !sgt)
> + if (!context || !sgt)
> return -EINVAL;
>
> for_each_sg(sgt->sgl, sg, sgt->nents, i) {
> @@ -85,7 +85,7 @@ static int etnaviv_iommu_map(struct etnaviv_iommu *iommu, u32 iova,
>
> VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes);
>
> - ret = etnaviv_domain_map(domain, da, pa, bytes, prot);
> + ret = etnaviv_context_map(context, da, pa, bytes, prot);
> if (ret)
> goto fail;
>
> @@ -100,16 +100,15 @@ static int etnaviv_iommu_map(struct etnaviv_iommu *iommu, u32 iova,
> for_each_sg(sgt->sgl, sg, i, j) {
> size_t bytes = sg_dma_len(sg) + sg->offset;
>
> - etnaviv_domain_unmap(domain, da, bytes);
> + etnaviv_context_unmap(context, da, bytes);
> da += bytes;
> }
> return ret;
> }
>
> -static void etnaviv_iommu_unmap(struct etnaviv_iommu *iommu, u32 iova,
> +static void etnaviv_iommu_unmap(struct etnaviv_iommu_context *context, u32 iova,
> struct sg_table *sgt, unsigned len)
> {
> - struct etnaviv_iommu_domain *domain = iommu->domain;
> struct scatterlist *sg;
> unsigned int da = iova;
> int i;
> @@ -117,7 +116,7 @@ static void etnaviv_iommu_unmap(struct etnaviv_iommu *iommu, u32 iova,
> for_each_sg(sgt->sgl, sg, sgt->nents, i) {
> size_t bytes = sg_dma_len(sg) + sg->offset;
>
> - etnaviv_domain_unmap(domain, da, bytes);
> + etnaviv_context_unmap(context, da, bytes);
>
> VERB("unmap[%d]: %08x(%zx)", i, iova, bytes);
>
> @@ -127,24 +126,24 @@ static void etnaviv_iommu_unmap(struct etnaviv_iommu *iommu, u32 iova,
> }
> }
>
> -static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu *mmu,
> +static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu_context *context,
> struct etnaviv_vram_mapping *mapping)
> {
> struct etnaviv_gem_object *etnaviv_obj = mapping->object;
>
> - etnaviv_iommu_unmap(mmu, mapping->vram_node.start,
> + etnaviv_iommu_unmap(context, mapping->vram_node.start,
> etnaviv_obj->sgt, etnaviv_obj->base.size);
> drm_mm_remove_node(&mapping->vram_node);
> }
>
> -static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
> +static int etnaviv_iommu_find_iova(struct etnaviv_iommu_context *context,
> struct drm_mm_node *node, size_t size)
> {
> struct etnaviv_vram_mapping *free = NULL;
> enum drm_mm_insert_mode mode = DRM_MM_INSERT_LOW;
> int ret;
>
> - lockdep_assert_held(&mmu->lock);
> + lockdep_assert_held(&context->lock);
>
> while (1) {
> struct etnaviv_vram_mapping *m, *n;
> @@ -152,17 +151,17 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
> struct list_head list;
> bool found;
>
> - ret = drm_mm_insert_node_in_range(&mmu->mm, node,
> + ret = drm_mm_insert_node_in_range(&context->mm, node,
> size, 0, 0, 0, U64_MAX, mode);
> if (ret != -ENOSPC)
> break;
>
> /* Try to retire some entries */
> - drm_mm_scan_init(&scan, &mmu->mm, size, 0, 0, mode);
> + drm_mm_scan_init(&scan, &context->mm, size, 0, 0, mode);
>
> found = 0;
> INIT_LIST_HEAD(&list);
> - list_for_each_entry(free, &mmu->mappings, mmu_node) {
> + list_for_each_entry(free, &context->mappings, mmu_node) {
> /* If this vram node has not been used, skip this. */
> if (!free->vram_node.mm)
> continue;
> @@ -204,8 +203,8 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
> * this mapping.
> */
> list_for_each_entry_safe(m, n, &list, scan_node) {
> - etnaviv_iommu_remove_mapping(mmu, m);
> - m->mmu = NULL;
> + etnaviv_iommu_remove_mapping(context, m);
> + m->context = NULL;
> list_del_init(&m->mmu_node);
> list_del_init(&m->scan_node);
> }
> @@ -221,7 +220,7 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
> return ret;
> }
>
> -int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
> +int etnaviv_iommu_map_gem(struct etnaviv_iommu_context *context,
> struct etnaviv_gem_object *etnaviv_obj, u32 memory_base,
> struct etnaviv_vram_mapping *mapping)
> {
> @@ -231,17 +230,17 @@ int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
>
> lockdep_assert_held(&etnaviv_obj->lock);
>
> - mutex_lock(&mmu->lock);
> + mutex_lock(&context->lock);
>
> /* v1 MMU can optimize single entry (contiguous) scatterlists */
> - if (mmu->version == ETNAVIV_IOMMU_V1 &&
> + if (context->global->version == ETNAVIV_IOMMU_V1 &&
> sgt->nents == 1 && !(etnaviv_obj->flags & ETNA_BO_FORCE_MMU)) {
> u32 iova;
>
> iova = sg_dma_address(sgt->sgl) - memory_base;
> if (iova < 0x80000000 - sg_dma_len(sgt->sgl)) {
> mapping->iova = iova;
> - list_add_tail(&mapping->mmu_node, &mmu->mappings);
> + list_add_tail(&mapping->mmu_node, &context->mappings);
> ret = 0;
> goto unlock;
> }
> @@ -249,12 +248,12 @@ int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
>
> node = &mapping->vram_node;
>
> - ret = etnaviv_iommu_find_iova(mmu, node, etnaviv_obj->base.size);
> + ret = etnaviv_iommu_find_iova(context, node, etnaviv_obj->base.size);
> if (ret < 0)
> goto unlock;
>
> mapping->iova = node->start;
> - ret = etnaviv_iommu_map(mmu, node->start, sgt, etnaviv_obj->base.size,
> + ret = etnaviv_iommu_map(context, node->start, sgt, etnaviv_obj->base.size,
> ETNAVIV_PROT_READ | ETNAVIV_PROT_WRITE);
>
> if (ret < 0) {
> @@ -262,84 +261,63 @@ int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
> goto unlock;
> }
>
> - list_add_tail(&mapping->mmu_node, &mmu->mappings);
> - mmu->flush_seq++;
> + list_add_tail(&mapping->mmu_node, &context->mappings);
> + context->flush_seq++;
> unlock:
> - mutex_unlock(&mmu->lock);
> + mutex_unlock(&context->lock);
>
> return ret;
> }
>
> -void etnaviv_iommu_unmap_gem(struct etnaviv_iommu *mmu,
> +void etnaviv_iommu_unmap_gem(struct etnaviv_iommu_context *context,
> struct etnaviv_vram_mapping *mapping)
> {
> WARN_ON(mapping->use);
>
> - mutex_lock(&mmu->lock);
> + mutex_lock(&context->lock);
>
> /* If the vram node is on the mm, unmap and remove the node */
> - if (mapping->vram_node.mm == &mmu->mm)
> - etnaviv_iommu_remove_mapping(mmu, mapping);
> + if (mapping->vram_node.mm == &context->mm)
> + etnaviv_iommu_remove_mapping(context, mapping);
>
> list_del(&mapping->mmu_node);
> - mmu->flush_seq++;
> - mutex_unlock(&mmu->lock);
> + context->flush_seq++;
> + mutex_unlock(&context->lock);
> }
>
> -void etnaviv_iommu_destroy(struct etnaviv_iommu *mmu)
> +static void etnaviv_iommu_context_free(struct kref *kref)
> {
> - drm_mm_takedown(&mmu->mm);
> - mmu->domain->ops->free(mmu->domain);
> - kfree(mmu);
> -}
> + struct etnaviv_iommu_context *context =
> + container_of(kref, struct etnaviv_iommu_context, refcount);
>
> -struct etnaviv_iommu *etnaviv_iommu_new(struct etnaviv_gpu *gpu)
> + context->global->ops->free(context);
> +}
> +void etnaviv_iommu_context_put(struct etnaviv_iommu_context *context)
> {
> - enum etnaviv_iommu_version version;
> - struct etnaviv_iommu *mmu;
> -
> - mmu = kzalloc(sizeof(*mmu), GFP_KERNEL);
> - if (!mmu)
> - return ERR_PTR(-ENOMEM);
> -
> - if (!(gpu->identity.minor_features1 & chipMinorFeatures1_MMU_VERSION)) {
> - mmu->domain = etnaviv_iommuv1_domain_alloc(gpu);
> - version = ETNAVIV_IOMMU_V1;
> - } else {
> - mmu->domain = etnaviv_iommuv2_domain_alloc(gpu);
> - version = ETNAVIV_IOMMU_V2;
> - }
> -
> - if (!mmu->domain) {
> - dev_err(gpu->dev, "Failed to allocate GPU IOMMU domain\n");
> - kfree(mmu);
> - return ERR_PTR(-ENOMEM);
> - }
> -
> - mmu->gpu = gpu;
> - mmu->version = version;
> - mutex_init(&mmu->lock);
> - INIT_LIST_HEAD(&mmu->mappings);
> -
> - drm_mm_init(&mmu->mm, mmu->domain->base, mmu->domain->size);
> -
> - return mmu;
> + kref_put(&context->refcount, etnaviv_iommu_context_free);
> }
>
> -void etnaviv_iommu_restore(struct etnaviv_gpu *gpu)
> +struct etnaviv_iommu_context *
> +etnaviv_iommu_context_init(struct etnaviv_iommu_global *global)
> {
> - if (gpu->mmu->version == ETNAVIV_IOMMU_V1)
> - etnaviv_iommuv1_restore(gpu);
> + if (global->version == ETNAVIV_IOMMU_V1)
> + return etnaviv_iommuv1_context_alloc(global);
> else
> - etnaviv_iommuv2_restore(gpu);
> + return etnaviv_iommuv2_context_alloc(global);
> +}
> +
> +void etnaviv_iommu_restore(struct etnaviv_gpu *gpu,
> + struct etnaviv_iommu_context *context)
> +{
> + context->global->ops->restore(gpu, context);
> }
>
> -int etnaviv_iommu_get_suballoc_va(struct etnaviv_iommu *mmu,
> +int etnaviv_iommu_get_suballoc_va(struct etnaviv_iommu_context *context,
> struct etnaviv_vram_mapping *mapping,
> u32 memory_base, dma_addr_t paddr,
> size_t size)
> {
> - mutex_lock(&mmu->lock);
> + mutex_lock(&context->lock);
>
> /*
> * For MMUv1 we don't add the suballoc region to the pagetables, as
> @@ -347,40 +325,40 @@ int etnaviv_iommu_get_suballoc_va(struct etnaviv_iommu *mmu,
> * window. Instead we manufacture a mapping to make it look uniform
> * to the upper layers.
> */
> - if (mmu->version == ETNAVIV_IOMMU_V1) {
> + if (context->global->version == ETNAVIV_IOMMU_V1) {
> mapping->iova = paddr - memory_base;
> } else {
> struct drm_mm_node *node = &mapping->vram_node;
> int ret;
>
> - ret = etnaviv_iommu_find_iova(mmu, node, size);
> + ret = etnaviv_iommu_find_iova(context, node, size);
> if (ret < 0) {
> - mutex_unlock(&mmu->lock);
> + mutex_unlock(&context->lock);
> return ret;
> }
>
> mapping->iova = node->start;
> - ret = etnaviv_domain_map(mmu->domain, node->start, paddr, size,
> - ETNAVIV_PROT_READ);
> + ret = etnaviv_context_map(context, node->start, paddr, size,
> + ETNAVIV_PROT_READ);
>
> if (ret < 0) {
> drm_mm_remove_node(node);
> - mutex_unlock(&mmu->lock);
> + mutex_unlock(&context->lock);
> return ret;
> }
>
> - mmu->flush_seq++;
> + context->flush_seq++;
> }
>
> - list_add_tail(&mapping->mmu_node, &mmu->mappings);
> + list_add_tail(&mapping->mmu_node, &context->mappings);
> mapping->use = 1;
>
> - mutex_unlock(&mmu->lock);
> + mutex_unlock(&context->lock);
>
> return 0;
> }
>
> -void etnaviv_iommu_put_suballoc_va(struct etnaviv_iommu *mmu,
> +void etnaviv_iommu_put_suballoc_va(struct etnaviv_iommu_context *context,
> struct etnaviv_vram_mapping *mapping)
> {
> struct drm_mm_node *node = &mapping->vram_node;
> @@ -390,21 +368,104 @@ void etnaviv_iommu_put_suballoc_va(struct etnaviv_iommu *mmu,
>
> mapping->use = 0;
>
> - if (mmu->version == ETNAVIV_IOMMU_V1)
> + if (context->global->version == ETNAVIV_IOMMU_V1)
> return;
>
> - mutex_lock(&mmu->lock);
> - etnaviv_domain_unmap(mmu->domain, node->start, node->size);
> + mutex_lock(&context->lock);
> + etnaviv_context_unmap(context, node->start, node->size);
> drm_mm_remove_node(node);
> - mutex_unlock(&mmu->lock);
> + mutex_unlock(&context->lock);
> +}
> +
> +size_t etnaviv_iommu_dump_size(struct etnaviv_iommu_context *context)
> +{
> + return context->global->ops->dump_size(context);
> +}
> +
> +void etnaviv_iommu_dump(struct etnaviv_iommu_context *context, void *buf)
> +{
> + context->global->ops->dump(context, buf);
> }
>
> -size_t etnaviv_iommu_dump_size(struct etnaviv_iommu *iommu)
> +int etnaviv_iommu_global_init(struct etnaviv_gpu *gpu)
> {
> - return iommu->domain->ops->dump_size(iommu->domain);
> + enum etnaviv_iommu_version version = ETNAVIV_IOMMU_V1;
> + struct etnaviv_drm_private *priv = gpu->drm->dev_private;
> + struct etnaviv_iommu_global *global;
> + struct device *dev = gpu->drm->dev;
> +
> + if (gpu->identity.minor_features1 & chipMinorFeatures1_MMU_VERSION)
> + version = ETNAVIV_IOMMU_V2;
> +
> + if (priv->mmu_global) {
> + if (priv->mmu_global->version != version) {
> + dev_err(gpu->dev,
> + "MMU version doesn't match global version\n");
> + return -ENXIO;
> + }
> +
> + priv->mmu_global->use++;
> + return 0;
> + }
> +
> + global = kzalloc(sizeof(*global), GFP_KERNEL);
> + if (!global)
> + return -ENOMEM;
> +
> + global->bad_page_cpu = dma_alloc_wc(dev, SZ_4K, &global->bad_page_dma,
> + GFP_KERNEL);
> + if (!global->bad_page_cpu)
> + goto free_global;
> +
> + memset32(global->bad_page_cpu, 0xdead55aa, SZ_4K / sizeof(u32));
> +
> + if (version == ETNAVIV_IOMMU_V2) {
> + global->v2.pta_cpu = dma_alloc_wc(dev, ETNAVIV_PTA_SIZE,
> + &global->v2.pta_dma, GFP_KERNEL);
> + if (!global->v2.pta_cpu)
> + goto free_bad_page;
> + }
> +
> + global->dev = dev;
> + global->version = version;
> + global->use = 1;
> + mutex_init(&global->lock);
> +
> + if (version == ETNAVIV_IOMMU_V1)
> + global->ops = &etnaviv_iommuv1_ops;
> + else
> + global->ops = &etnaviv_iommuv2_ops;
> +
> + priv->mmu_global = global;
> +
> + return 0;
> +
> +free_bad_page:
> + dma_free_wc(dev, SZ_4K, global->bad_page_cpu, global->bad_page_dma);
> +free_global:
> + kfree(global);
> +
> + return -ENOMEM;
> }
>
> -void etnaviv_iommu_dump(struct etnaviv_iommu *iommu, void *buf)
> +void etnaviv_iommu_global_fini(struct etnaviv_gpu *gpu)
> {
> - iommu->domain->ops->dump(iommu->domain, buf);
> + struct etnaviv_drm_private *priv = gpu->drm->dev_private;
> + struct etnaviv_iommu_global *global = priv->mmu_global;
> +
> + if (--global->use > 0)
> + return;
> +
> + if (global->v2.pta_cpu)
> + dma_free_wc(global->dev, ETNAVIV_PTA_SIZE,
> + global->v2.pta_cpu, global->v2.pta_dma);
> +
> + if (global->bad_page_cpu)
> + dma_free_wc(global->dev, SZ_4K,
> + global->bad_page_cpu, global->bad_page_dma);
> +
> + mutex_destroy(&global->lock);
> + kfree(global);
> +
> + priv->mmu_global = NULL;
> }
> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.h b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h
> index 34afe25df9ca..4438d66db6ab 100644
> --- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.h
> +++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h
> @@ -16,33 +16,58 @@ enum etnaviv_iommu_version {
>
> struct etnaviv_gpu;
> struct etnaviv_vram_mapping;
> -struct etnaviv_iommu_domain;
> +struct etnaviv_iommu_global;
> +struct etnaviv_iommu_context;
>
> -struct etnaviv_iommu_domain_ops {
> - void (*free)(struct etnaviv_iommu_domain *);
> - int (*map)(struct etnaviv_iommu_domain *domain, unsigned long iova,
> +struct etnaviv_iommu_ops {
> + struct etnaviv_iommu_context *(*init)(struct etnaviv_iommu_global *);
> + void (*free)(struct etnaviv_iommu_context *);
> + int (*map)(struct etnaviv_iommu_context *context, unsigned long iova,
> phys_addr_t paddr, size_t size, int prot);
> - size_t (*unmap)(struct etnaviv_iommu_domain *domain, unsigned long iova,
> + size_t (*unmap)(struct etnaviv_iommu_context *context, unsigned long iova,
> size_t size);
> - size_t (*dump_size)(struct etnaviv_iommu_domain *);
> - void (*dump)(struct etnaviv_iommu_domain *, void *);
> + size_t (*dump_size)(struct etnaviv_iommu_context *);
> + void (*dump)(struct etnaviv_iommu_context *, void *);
> + void (*restore)(struct etnaviv_gpu *, struct etnaviv_iommu_context *);
> };
>
> -struct etnaviv_iommu_domain {
> +extern const struct etnaviv_iommu_ops etnaviv_iommuv1_ops;
> +extern const struct etnaviv_iommu_ops etnaviv_iommuv2_ops;
> +
> +#define ETNAVIV_PTA_SIZE SZ_4K
> +#define ETNAVIV_PTA_ENTRIES (ETNAVIV_PTA_SIZE / sizeof(u64))
> +
> +struct etnaviv_iommu_global {
> struct device *dev;
> + enum etnaviv_iommu_version version;
> + const struct etnaviv_iommu_ops *ops;
> + unsigned int use;
> + struct mutex lock;
> +
> void *bad_page_cpu;
> dma_addr_t bad_page_dma;
> - u64 base;
> - u64 size;
>
> - const struct etnaviv_iommu_domain_ops *ops;
> + /*
> + * This union holds members needed by either MMUv1 or MMUv2, which
> + * can not exist at the same time.
> + */
> + union {
> + struct {
> + struct etnaviv_iommu_context *shared_context;
> + } v1;
> + struct {
> + /* P(age) T(able) A(rray) */
> + u64 *pta_cpu;
> + dma_addr_t pta_dma;
> + struct spinlock pta_lock;
> + DECLARE_BITMAP(pta_alloc, ETNAVIV_PTA_ENTRIES);
> + } v2;
> + };
> };
>
> -struct etnaviv_iommu {
> - struct etnaviv_gpu *gpu;
> - struct etnaviv_iommu_domain *domain;
> -
> - enum etnaviv_iommu_version version;
> +struct etnaviv_iommu_context {
> + struct kref refcount;
> + struct etnaviv_iommu_global *global;
>
> /* memory manager for GPU address area */
> struct mutex lock;
> @@ -51,26 +76,40 @@ struct etnaviv_iommu {
> unsigned int flush_seq;
> };
>
> +int etnaviv_iommu_global_init(struct etnaviv_gpu *gpu);
> +void etnaviv_iommu_global_fini(struct etnaviv_gpu *gpu);
> +
> struct etnaviv_gem_object;
>
> -int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
> +int etnaviv_iommu_map_gem(struct etnaviv_iommu_context *context,
> struct etnaviv_gem_object *etnaviv_obj, u32 memory_base,
> struct etnaviv_vram_mapping *mapping);
> -void etnaviv_iommu_unmap_gem(struct etnaviv_iommu *mmu,
> +void etnaviv_iommu_unmap_gem(struct etnaviv_iommu_context *context,
> struct etnaviv_vram_mapping *mapping);
>
> -int etnaviv_iommu_get_suballoc_va(struct etnaviv_iommu *mmu,
> +int etnaviv_iommu_get_suballoc_va(struct etnaviv_iommu_context *ctx,
> struct etnaviv_vram_mapping *mapping,
> u32 memory_base, dma_addr_t paddr,
> size_t size);
> -void etnaviv_iommu_put_suballoc_va(struct etnaviv_iommu *mmu,
> +void etnaviv_iommu_put_suballoc_va(struct etnaviv_iommu_context *ctx,
> struct etnaviv_vram_mapping *mapping);
>
> -size_t etnaviv_iommu_dump_size(struct etnaviv_iommu *iommu);
> -void etnaviv_iommu_dump(struct etnaviv_iommu *iommu, void *buf);
> -
> -struct etnaviv_iommu *etnaviv_iommu_new(struct etnaviv_gpu *gpu);
> -void etnaviv_iommu_destroy(struct etnaviv_iommu *iommu);
> -void etnaviv_iommu_restore(struct etnaviv_gpu *gpu);
> +size_t etnaviv_iommu_dump_size(struct etnaviv_iommu_context *ctx);
> +void etnaviv_iommu_dump(struct etnaviv_iommu_context *ctx, void *buf);
> +
> +struct etnaviv_iommu_context *
> +etnaviv_iommu_context_init(struct etnaviv_iommu_global *global);
> +static inline void etnaviv_iommu_context_get(struct etnaviv_iommu_context *ctx)
> +{
> + kref_get(&ctx->refcount);
> +}
> +void etnaviv_iommu_context_put(struct etnaviv_iommu_context *ctx);
> +void etnaviv_iommu_restore(struct etnaviv_gpu *gpu,
> + struct etnaviv_iommu_context *ctx);
> +
> +struct etnaviv_iommu_context *
> +etnaviv_iommuv1_context_alloc(struct etnaviv_iommu_global *global);
> +struct etnaviv_iommu_context *
> +etnaviv_iommuv2_context_alloc(struct etnaviv_iommu_global *global);
>
> #endif /* __ETNAVIV_MMU_H__ */
> --
> 2.20.1
>
> _______________________________________________
> etnaviv mailing list
> etnaviv at lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/etnaviv
More information about the dri-devel
mailing list