[PATCH 6/8] etnaviv: mmu: stop using iommu map/unmap functions

Philipp Zabel p.zabel at pengutronix.de
Mon Sep 18 18:35:53 UTC 2017


On Fri, 2017-09-15 at 19:04 +0200, Lucas Stach wrote:
> This is a preparation to remove the etnaviv dependency on the IOMMU
> subsystem by importing the relevant parts of the iommu map/unamp
> functions into the driver.
> 
> Signed-off-by: Lucas Stach <l.stach at pengutronix.de>
> ---
>  drivers/gpu/drm/etnaviv/etnaviv_mmu.c | 66
> +++++++++++++++++++++++++++++++----
>  1 file changed, 60 insertions(+), 6 deletions(-)
> 
> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
> b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
> index f3ed07db9b2d..0be28467af61 100644
> --- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
> +++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
> @@ -22,6 +22,60 @@
>  #include "etnaviv_iommu.h"
>  #include "etnaviv_mmu.h"
>  
> +size_t etnaviv_domain_unmap(struct iommu_domain *domain, unsigned
> long iova, size_t size)
> +{
> +	size_t unmapped_page, unmapped = 0;
> +	size_t pgsize = SZ_4K;
> +
> +	if (!IS_ALIGNED(iova | size, pgsize)) {
> +		pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz
> 0x%x\n",
> +		       iova, size, pgsize);
> +		return -EINVAL;
> +	}
> +
> +	while (unmapped < size) {
> +		unmapped_page = domain->ops->unmap(domain, iova,
> pgsize);
> +		if (!unmapped_page)
> +			break;
> +
> +		iova += unmapped_page;
> +		unmapped += unmapped_page;
> +	}
> +
> +	return unmapped;
> +}
> +
> +static int etnaviv_domain_map(struct iommu_domain *domain, unsigned
> long iova,
> +		     phys_addr_t paddr, size_t size, int prot)
> +{
> +	unsigned long orig_iova = iova;
> +	size_t pgsize = SZ_4K;
> +	size_t orig_size = size;
> +	int ret = 0;
> +
> +	if (!IS_ALIGNED(iova | paddr | size, pgsize)) {
> +		pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx
> min_pagesz 0x%x\n",
> +		       iova, &paddr, size, pgsize);
> +		return -EINVAL;
> +	}
> +
> +	while (size) {
> +		ret = domain->ops->map(domain, iova, paddr, pgsize,
> prot);
> +		if (ret)
> +			break;
> +
> +		iova += pgsize;
> +		paddr += pgsize;
> +		size -= pgsize;
> +	}
> +
> +	/* unroll mapping in case something went wrong */
> +	if (ret)
> +		etnaviv_domain_unmap(domain, orig_iova, orig_size -
> size);
> +
> +	return ret;
> +}
> +
>  int etnaviv_iommu_map(struct etnaviv_iommu *iommu, u32 iova,
>  		struct sg_table *sgt, unsigned len, int prot)
>  {
> @@ -40,7 +94,7 @@ int etnaviv_iommu_map(struct etnaviv_iommu *iommu,
> u32 iova,
>  
>  		VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes);
>  
> -		ret = iommu_map(domain, da, pa, bytes, prot);
> +		ret = etnaviv_domain_map(domain, da, pa, bytes,
> prot);
>  		if (ret)
>  			goto fail;
>  
> @@ -55,7 +109,7 @@ int etnaviv_iommu_map(struct etnaviv_iommu *iommu,
> u32 iova,
>  	for_each_sg(sgt->sgl, sg, i, j) {
>  		size_t bytes = sg_dma_len(sg) + sg->offset;
>  
> -		iommu_unmap(domain, da, bytes);
> +		etnaviv_domain_unmap(domain, da, bytes);
>  		da += bytes;
>  	}
>  	return ret;
> @@ -73,7 +127,7 @@ int etnaviv_iommu_unmap(struct etnaviv_iommu
> *iommu, u32 iova,

This could be changed to void now, the return value isn't checked
anywhere.

>  		size_t bytes = sg_dma_len(sg) + sg->offset;
>  		size_t unmapped;
>  
> -		unmapped = iommu_unmap(domain, da, bytes);
> +		unmapped = etnaviv_domain_unmap(domain, da, bytes);
>  		if (unmapped < bytes)
>  			return unmapped;
>  
> @@ -329,8 +383,8 @@ int etnaviv_iommu_get_suballoc_va(struct
> etnaviv_gpu *gpu, dma_addr_t paddr,
>  			mutex_unlock(&mmu->lock);
>  			return ret;
>  		}
> -		ret = iommu_map(mmu->domain, vram_node->start, paddr,
> size,
> -				IOMMU_READ);
> +		ret = etnaviv_domain_map(mmu->domain, vram_node-
> >start, paddr,
> +					 size, IOMMU_READ);
>  		if (ret < 0) {
>  			drm_mm_remove_node(vram_node);
>  			mutex_unlock(&mmu->lock);
> @@ -353,7 +407,7 @@ void etnaviv_iommu_put_suballoc_va(struct
> etnaviv_gpu *gpu,
>  
>  	if (mmu->version == ETNAVIV_IOMMU_V2) {
>  		mutex_lock(&mmu->lock);
> -		iommu_unmap(mmu->domain,iova, size);
> +		etnaviv_domain_unmap(mmu->domain, iova, size);
>  		drm_mm_remove_node(vram_node);
>  		mutex_unlock(&mmu->lock);
>  	}

regards
Philipp


More information about the dri-devel mailing list