[PATCH v2 1/2] drm/etnaviv: switch MMU page tables to writecombine memory

Lucas Stach l.stach at pengutronix.de
Tue May 8 13:23:15 UTC 2018


We are likely to write multiple page entries at once and already ensure
proper write buffer flushing before GPU submit, so this improves CPU
time usage in the submit path without any downsides.

Signed-off-by: Lucas Stach <l.stach at pengutronix.de>
---
v2: use shorter _wc function names.
---
 drivers/gpu/drm/etnaviv/etnaviv_iommu.c    | 34 +++++-----
 drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c | 74 ++++++++++------------
 2 files changed, 49 insertions(+), 59 deletions(-)

diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
index 4b9b11ca6f03..4ada19054443 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
@@ -47,11 +47,10 @@ static int __etnaviv_iommu_init(struct etnaviv_iommuv1_domain *etnaviv_domain)
 	u32 *p;
 	int i;
 
-	etnaviv_domain->base.bad_page_cpu = dma_alloc_coherent(
-						etnaviv_domain->base.dev,
-						SZ_4K,
-						&etnaviv_domain->base.bad_page_dma,
-						GFP_KERNEL);
+	etnaviv_domain->base.bad_page_cpu =
+			dma_alloc_wc(etnaviv_domain->base.dev, SZ_4K,
+				     &etnaviv_domain->base.bad_page_dma,
+				     GFP_KERNEL);
 	if (!etnaviv_domain->base.bad_page_cpu)
 		return -ENOMEM;
 
@@ -59,14 +58,14 @@ static int __etnaviv_iommu_init(struct etnaviv_iommuv1_domain *etnaviv_domain)
 	for (i = 0; i < SZ_4K / 4; i++)
 		*p++ = 0xdead55aa;
 
-	etnaviv_domain->pgtable_cpu =
-			dma_alloc_coherent(etnaviv_domain->base.dev, PT_SIZE,
-					   &etnaviv_domain->pgtable_dma,
-					   GFP_KERNEL);
+	etnaviv_domain->pgtable_cpu = dma_alloc_wc(etnaviv_domain->base.dev,
+						   PT_SIZE,
+						   &etnaviv_domain->pgtable_dma,
+						   GFP_KERNEL);
 	if (!etnaviv_domain->pgtable_cpu) {
-		dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
-				  etnaviv_domain->base.bad_page_cpu,
-				  etnaviv_domain->base.bad_page_dma);
+		dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
+			    etnaviv_domain->base.bad_page_cpu,
+			    etnaviv_domain->base.bad_page_dma);
 		return -ENOMEM;
 	}
 
@@ -81,13 +80,12 @@ static void etnaviv_iommuv1_domain_free(struct etnaviv_iommu_domain *domain)
 	struct etnaviv_iommuv1_domain *etnaviv_domain =
 			to_etnaviv_domain(domain);
 
-	dma_free_coherent(etnaviv_domain->base.dev, PT_SIZE,
-			  etnaviv_domain->pgtable_cpu,
-			  etnaviv_domain->pgtable_dma);
+	dma_free_wc(etnaviv_domain->base.dev, PT_SIZE,
+		    etnaviv_domain->pgtable_cpu, etnaviv_domain->pgtable_dma);
 
-	dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
-			  etnaviv_domain->base.bad_page_cpu,
-			  etnaviv_domain->base.bad_page_dma);
+	dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
+		    etnaviv_domain->base.bad_page_cpu,
+		    etnaviv_domain->base.bad_page_dma);
 
 	kfree(etnaviv_domain);
 }
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
index 9752dbd5d28b..47785d61cd95 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
@@ -104,11 +104,10 @@ static int etnaviv_iommuv2_init(struct etnaviv_iommuv2_domain *etnaviv_domain)
 	int ret, i, j;
 
 	/* allocate scratch page */
-	etnaviv_domain->base.bad_page_cpu = dma_alloc_coherent(
-						etnaviv_domain->base.dev,
-						SZ_4K,
-						&etnaviv_domain->base.bad_page_dma,
-						GFP_KERNEL);
+	etnaviv_domain->base.bad_page_cpu =
+			dma_alloc_wc(etnaviv_domain->base.dev, SZ_4K,
+				     &etnaviv_domain->base.bad_page_dma,
+				     GFP_KERNEL);
 	if (!etnaviv_domain->base.bad_page_cpu) {
 		ret = -ENOMEM;
 		goto fail_mem;
@@ -117,19 +116,17 @@ static int etnaviv_iommuv2_init(struct etnaviv_iommuv2_domain *etnaviv_domain)
 	for (i = 0; i < SZ_4K / 4; i++)
 		*p++ = 0xdead55aa;
 
-	etnaviv_domain->pta_cpu = dma_alloc_coherent(etnaviv_domain->base.dev,
-						     SZ_4K,
-						     &etnaviv_domain->pta_dma,
-						     GFP_KERNEL);
+	etnaviv_domain->pta_cpu = dma_alloc_wc(etnaviv_domain->base.dev,
+					       SZ_4K, &etnaviv_domain->pta_dma,
+					       GFP_KERNEL);
 	if (!etnaviv_domain->pta_cpu) {
 		ret = -ENOMEM;
 		goto fail_mem;
 	}
 
-	etnaviv_domain->mtlb_cpu = dma_alloc_coherent(etnaviv_domain->base.dev,
-						  SZ_4K,
-						  &etnaviv_domain->mtlb_dma,
-						  GFP_KERNEL);
+	etnaviv_domain->mtlb_cpu = dma_alloc_wc(etnaviv_domain->base.dev,
+						SZ_4K, &etnaviv_domain->mtlb_dma,
+						GFP_KERNEL);
 	if (!etnaviv_domain->mtlb_cpu) {
 		ret = -ENOMEM;
 		goto fail_mem;
@@ -138,10 +135,9 @@ static int etnaviv_iommuv2_init(struct etnaviv_iommuv2_domain *etnaviv_domain)
 	/* pre-populate STLB pages (may want to switch to on-demand later) */
 	for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) {
 		etnaviv_domain->stlb_cpu[i] =
-				dma_alloc_coherent(etnaviv_domain->base.dev,
-						   SZ_4K,
-						   &etnaviv_domain->stlb_dma[i],
-						   GFP_KERNEL);
+				dma_alloc_wc(etnaviv_domain->base.dev, SZ_4K,
+					     &etnaviv_domain->stlb_dma[i],
+					     GFP_KERNEL);
 		if (!etnaviv_domain->stlb_cpu[i]) {
 			ret = -ENOMEM;
 			goto fail_mem;
@@ -158,25 +154,23 @@ static int etnaviv_iommuv2_init(struct etnaviv_iommuv2_domain *etnaviv_domain)
 
 fail_mem:
 	if (etnaviv_domain->base.bad_page_cpu)
-		dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
-				  etnaviv_domain->base.bad_page_cpu,
-				  etnaviv_domain->base.bad_page_dma);
+		dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
+			    etnaviv_domain->base.bad_page_cpu,
+			    etnaviv_domain->base.bad_page_dma);
 
 	if (etnaviv_domain->pta_cpu)
-		dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
-				  etnaviv_domain->pta_cpu,
-				  etnaviv_domain->pta_dma);
+		dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
+			    etnaviv_domain->pta_cpu, etnaviv_domain->pta_dma);
 
 	if (etnaviv_domain->mtlb_cpu)
-		dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
-				  etnaviv_domain->mtlb_cpu,
-				  etnaviv_domain->mtlb_dma);
+		dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
+			    etnaviv_domain->mtlb_cpu, etnaviv_domain->mtlb_dma);
 
 	for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) {
 		if (etnaviv_domain->stlb_cpu[i])
-			dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
-					  etnaviv_domain->stlb_cpu[i],
-					  etnaviv_domain->stlb_dma[i]);
+			dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
+				    etnaviv_domain->stlb_cpu[i],
+				    etnaviv_domain->stlb_dma[i]);
 	}
 
 	return ret;
@@ -188,23 +182,21 @@ static void etnaviv_iommuv2_domain_free(struct etnaviv_iommu_domain *domain)
 			to_etnaviv_domain(domain);
 	int i;
 
-	dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
-			  etnaviv_domain->base.bad_page_cpu,
-			  etnaviv_domain->base.bad_page_dma);
+	dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
+		    etnaviv_domain->base.bad_page_cpu,
+		    etnaviv_domain->base.bad_page_dma);
 
-	dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
-			  etnaviv_domain->pta_cpu,
-			  etnaviv_domain->pta_dma);
+	dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
+		    etnaviv_domain->pta_cpu, etnaviv_domain->pta_dma);
 
-	dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
-			  etnaviv_domain->mtlb_cpu,
-			  etnaviv_domain->mtlb_dma);
+	dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
+		    etnaviv_domain->mtlb_cpu, etnaviv_domain->mtlb_dma);
 
 	for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) {
 		if (etnaviv_domain->stlb_cpu[i])
-			dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
-					  etnaviv_domain->stlb_cpu[i],
-					  etnaviv_domain->stlb_dma[i]);
+			dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
+				    etnaviv_domain->stlb_cpu[i],
+				    etnaviv_domain->stlb_dma[i]);
 	}
 
 	vfree(etnaviv_domain);
-- 
2.17.0



More information about the etnaviv mailing list