[PATCH RFC 049/111] staging: etnaviv: move MMU setup and teardown code to etnaviv_mmu.c

Lucas Stach l.stach at pengutronix.de
Thu Apr 2 08:29:51 PDT 2015


From: Russell King <rmk+kernel at arm.linux.org.uk>

Move the code which sets up and tears down the MMU mappings (iow,
allocates a node in the drm_mm, and then calls the iommu to setup
the actual mapping, and the reverse) into etnaviv_mmu.c

Signed-off-by: Russell King <rmk+kernel at arm.linux.org.uk>
---
 drivers/staging/etnaviv/etnaviv_gem.c | 33 ++++-----------------------
 drivers/staging/etnaviv/etnaviv_mmu.c | 43 +++++++++++++++++++++++++++++++++++
 drivers/staging/etnaviv/etnaviv_mmu.h |  6 +++++
 3 files changed, 53 insertions(+), 29 deletions(-)

diff --git a/drivers/staging/etnaviv/etnaviv_gem.c b/drivers/staging/etnaviv/etnaviv_gem.c
index 647815e4f1ba..ab7c6db4ec10 100644
--- a/drivers/staging/etnaviv/etnaviv_gem.c
+++ b/drivers/staging/etnaviv/etnaviv_gem.c
@@ -278,32 +278,12 @@ int etnaviv_gem_get_iova_locked(struct etnaviv_gpu *gpu,
 	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
 	int ret = 0;
 
-	if (!etnaviv_obj->iova  && !(etnaviv_obj->flags & ETNA_BO_CMDSTREAM)) {
-		struct etnaviv_drm_private *priv = obj->dev->dev_private;
-		struct etnaviv_iommu *mmu = priv->mmu;
+	if (!etnaviv_obj->iova && !(etnaviv_obj->flags & ETNA_BO_CMDSTREAM)) {
 		struct page **pages = etnaviv_gem_get_pages(etnaviv_obj);
-		uint32_t offset;
-		struct drm_mm_node *node = NULL;
-
 		if (IS_ERR(pages))
 			return PTR_ERR(pages);
 
-		node = kzalloc(sizeof(*node), GFP_KERNEL);
-		if (!node)
-			return -ENOMEM;
-
-		ret = drm_mm_insert_node(&mmu->mm, node, obj->size, 0,
-				DRM_MM_SEARCH_DEFAULT);
-
-		if (!ret) {
-			offset = node->start;
-			etnaviv_obj->iova = offset;
-			etnaviv_obj->gpu_vram_node = node;
-
-			ret = etnaviv_iommu_map(mmu, offset, etnaviv_obj->sgt,
-					obj->size, IOMMU_READ | IOMMU_WRITE);
-		} else
-			kfree(node);
+		ret = etnaviv_iommu_map_gem(gpu->mmu, etnaviv_obj);
 	}
 
 	if (!ret)
@@ -531,13 +511,8 @@ static void etnaviv_free_obj(struct drm_gem_object *obj)
 	struct etnaviv_drm_private *priv = obj->dev->dev_private;
 	struct etnaviv_iommu *mmu = priv->mmu;
 
-	if (mmu && etnaviv_obj->iova) {
-		uint32_t offset = etnaviv_obj->gpu_vram_node->start;
-
-		etnaviv_iommu_unmap(mmu, offset, etnaviv_obj->sgt, obj->size);
-		drm_mm_remove_node(etnaviv_obj->gpu_vram_node);
-		kfree(etnaviv_obj->gpu_vram_node);
-	}
+	if (mmu)
+		etnaviv_iommu_unmap_gem(mmu, etnaviv_obj);
 }
 
 static void etnaviv_gem_shmem_release(struct etnaviv_gem_object *etnaviv_obj)
diff --git a/drivers/staging/etnaviv/etnaviv_mmu.c b/drivers/staging/etnaviv/etnaviv_mmu.c
index 51d91e3d30ed..2effe75cb154 100644
--- a/drivers/staging/etnaviv/etnaviv_mmu.c
+++ b/drivers/staging/etnaviv/etnaviv_mmu.c
@@ -16,6 +16,7 @@
  */
 
 #include "etnaviv_drv.h"
+#include "etnaviv_gem.h"
 #include "etnaviv_mmu.h"
 
 static int etnaviv_fault_handler(struct iommu_domain *iommu, struct device *dev,
@@ -90,6 +91,48 @@ int etnaviv_iommu_unmap(struct etnaviv_iommu *iommu, uint32_t iova,
 	return 0;
 }
 
+int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
+	struct etnaviv_gem_object *etnaviv_obj)
+{
+	struct sg_table *sgt = etnaviv_obj->sgt;
+	uint32_t offset;
+	struct drm_mm_node *node = NULL;
+	int ret;
+
+	node = kzalloc(sizeof(*node), GFP_KERNEL);
+	if (!node)
+		return -ENOMEM;
+
+	ret = drm_mm_insert_node(&mmu->mm, node, etnaviv_obj->base.size, 0,
+				 DRM_MM_SEARCH_DEFAULT);
+
+	if (!ret) {
+		offset = node->start;
+		etnaviv_obj->iova = offset;
+		etnaviv_obj->gpu_vram_node = node;
+
+		ret = etnaviv_iommu_map(mmu, offset, sgt,
+					etnaviv_obj->base.size,
+					IOMMU_READ | IOMMU_WRITE);
+	} else
+		kfree(node);
+
+	return ret;
+}
+
+void etnaviv_iommu_unmap_gem(struct etnaviv_iommu *mmu,
+	struct etnaviv_gem_object *etnaviv_obj)
+{
+	if (etnaviv_obj->iova) {
+		uint32_t offset = etnaviv_obj->gpu_vram_node->start;
+
+		etnaviv_iommu_unmap(mmu, offset, etnaviv_obj->sgt,
+				    etnaviv_obj->base.size);
+		drm_mm_remove_node(etnaviv_obj->gpu_vram_node);
+		kfree(etnaviv_obj->gpu_vram_node);
+	}
+}
+
 void etnaviv_iommu_destroy(struct etnaviv_iommu *mmu)
 {
 	drm_mm_takedown(&mmu->mm);
diff --git a/drivers/staging/etnaviv/etnaviv_mmu.h b/drivers/staging/etnaviv/etnaviv_mmu.h
index 1adcc3ab4ebc..262c4e26e901 100644
--- a/drivers/staging/etnaviv/etnaviv_mmu.h
+++ b/drivers/staging/etnaviv/etnaviv_mmu.h
@@ -29,12 +29,18 @@ struct etnaviv_iommu {
 	bool need_flush;
 };
 
+struct etnaviv_gem_object;
+
 int etnaviv_iommu_attach(struct etnaviv_iommu *iommu, const char **names,
 	int cnt);
 int etnaviv_iommu_map(struct etnaviv_iommu *iommu, uint32_t iova,
 	struct sg_table *sgt, unsigned len, int prot);
 int etnaviv_iommu_unmap(struct etnaviv_iommu *iommu, uint32_t iova,
 	struct sg_table *sgt, unsigned len);
+int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
+	struct etnaviv_gem_object *etnaviv_obj);
+void etnaviv_iommu_unmap_gem(struct etnaviv_iommu *mmu,
+	struct etnaviv_gem_object *etnaviv_obj);
 void etnaviv_iommu_destroy(struct etnaviv_iommu *iommu);
 
 struct etnaviv_iommu *etnaviv_iommu_new(struct drm_device *dev,
-- 
2.1.4



More information about the dri-devel mailing list