[PATCH 2/2] drm/i915: support mmap_offset and legacy mmap for TTM smem

Adrian Larumbe adrian.larumbe at collabora.com
Fri Mar 18 03:33:57 UTC 2022


---
 drivers/gpu/drm/i915/gem/i915_gem_mman.c      | 47 +++++++++++++++----
 .../gpu/drm/i915/gem/i915_gem_object_types.h  | 20 ++++----
 drivers/gpu/drm/i915/gem/i915_gem_ttm.c       | 26 +++++++++-
 drivers/gpu/drm/i915/gem/i915_gem_ttm.h       |  3 ++
 4 files changed, 77 insertions(+), 19 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index c3ea243d414d..020d43d99c6b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -64,7 +64,9 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
 	struct drm_i915_private *i915 = to_i915(dev);
 	struct drm_i915_gem_mmap *args = data;
 	struct drm_i915_gem_object *obj;
+	struct file *filp;
 	unsigned long addr;
+	int ret;
 
 	/*
 	 * mmap ioctl is disallowed for all discrete platforms,
@@ -83,10 +85,15 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
 	if (!obj)
 		return -ENOENT;
 
-	/* prime objects have no backing filp to GEM mmap
-	 * pages from.
-	 */
-	if (!obj->base.filp) {
+	ret = i915_gem_object_pin_pages(obj);
+	if (ret) {
+		addr = ret;
+		goto err;
+	}
+
+	filp = gem_ttm_get_filep(obj);
+
+	if (!filp) {
 		addr = -ENXIO;
 		goto err;
 	}
@@ -96,7 +103,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
 		goto err;
 	}
 
-	addr = vm_mmap(obj->base.filp, 0, args->size,
+	addr = vm_mmap(filp, 0, args->size,
 		       PROT_READ | PROT_WRITE, MAP_SHARED,
 		       args->offset);
 	if (IS_ERR_VALUE(addr))
@@ -111,7 +118,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
 			goto err;
 		}
 		vma = find_vma(mm, addr);
-		if (vma && __vma_matches(vma, obj->base.filp, addr, args->size))
+		if (vma && __vma_matches(vma, filp, addr, args->size))
 			vma->vm_page_prot =
 				pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
 		else
@@ -700,10 +707,11 @@ __assign_mmap_offset(struct drm_i915_gem_object *obj,
 		return -ENODEV;
 
 	if (obj->ops->mmap_offset)  {
-		if (mmap_type != I915_MMAP_TYPE_FIXED)
+		if (i915_gem_object_is_lmem(obj) &&
+		    mmap_type != I915_MMAP_TYPE_FIXED)
 			return -ENODEV;
 
-		*offset = obj->ops->mmap_offset(obj);
+		*offset = obj->ops->mmap_offset(obj, mmap_type);
 		return 0;
 	}
 
@@ -922,6 +930,7 @@ int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
 	struct drm_vma_offset_node *node;
 	struct drm_file *priv = filp->private_data;
 	struct drm_device *dev = priv->minor->dev;
+        struct drm_i915_private *i915 = to_i915(dev);
 	struct drm_i915_gem_object *obj = NULL;
 	struct i915_mmap_offset *mmo = NULL;
 	struct file *anon;
@@ -987,7 +996,27 @@ int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
 	fput(anon);
 
 	if (obj->ops->mmap_ops) {
-		vma->vm_page_prot = pgprot_decrypted(vm_get_page_prot(vma->vm_flags));
+		switch (obj->mm.mmap_type) {
+		case I915_MMAP_TYPE_WC:
+			vma->vm_page_prot =pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+			break;
+		case I915_MMAP_TYPE_FIXED:
+			fallthrough;
+		case I915_MMAP_TYPE_WB:
+			vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+			break;
+		case I915_MMAP_TYPE_UC:
+			vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
+                        break;
+		case I915_MMAP_TYPE_GTT:
+			vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+			break;
+		default:
+                        drm_err(&i915->drm, "Wrong caching mode: %u\n", obj->mm.mmap_type);
+                        GEM_BUG_ON(true);
+		}
+		vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+
 		vma->vm_ops = obj->ops->mmap_ops;
 		vma->vm_private_data = node->driver_private;
 		return 0;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index 2c88bdb8ff7c..7f2dd0c60423 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -21,6 +21,14 @@ struct drm_i915_gem_object;
 struct intel_fronbuffer;
 struct intel_memory_region;
 
+enum i915_mmap_type {
+	I915_MMAP_TYPE_GTT = 0,
+	I915_MMAP_TYPE_WC,
+	I915_MMAP_TYPE_WB,
+	I915_MMAP_TYPE_UC,
+	I915_MMAP_TYPE_FIXED,
+};
+
 /*
  * struct i915_lut_handle tracks the fast lookups from handle to vma used
  * for execbuf. Although we use a radixtree for that mapping, in order to
@@ -83,7 +91,7 @@ struct drm_i915_gem_object_ops {
 		     const struct drm_i915_gem_pread *arg);
 	int (*pwrite)(struct drm_i915_gem_object *obj,
 		      const struct drm_i915_gem_pwrite *arg);
-	u64 (*mmap_offset)(struct drm_i915_gem_object *obj);
+	u64 (*mmap_offset)(struct drm_i915_gem_object *obj, enum i915_mmap_type);
 	void (*unmap_virtual)(struct drm_i915_gem_object *obj);
 
 	int (*dmabuf_export)(struct drm_i915_gem_object *obj);
@@ -203,14 +211,6 @@ enum i915_map_type {
 	I915_MAP_FORCE_WC = I915_MAP_WC | I915_MAP_OVERRIDE,
 };
 
-enum i915_mmap_type {
-	I915_MMAP_TYPE_GTT = 0,
-	I915_MMAP_TYPE_WC,
-	I915_MMAP_TYPE_WB,
-	I915_MMAP_TYPE_UC,
-	I915_MMAP_TYPE_FIXED,
-};
-
 struct i915_mmap_offset {
 	struct drm_vma_offset_node vma_node;
 	struct drm_i915_gem_object *obj;
@@ -598,6 +598,8 @@ struct drm_i915_gem_object {
 		 * pages were last acquired.
 		 */
 		bool dirty:1;
+
+                enum i915_mmap_type mmap_type;
 	} mm;
 
 	struct {
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
index 7d32534669bc..e52a87bf7ed8 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
@@ -20,6 +20,8 @@
 #include "gem/i915_gem_ttm.h"
 #include "gem/i915_gem_ttm_move.h"
 #include "gem/i915_gem_ttm_pm.h"
+#include "gem/i915_gem_object_types.h"
+
 
 #define I915_TTM_PRIO_PURGE     0
 #define I915_TTM_PRIO_NO_PAGES  1
@@ -1076,11 +1078,13 @@ static const struct vm_operations_struct vm_ops_ttm = {
 	.close = ttm_vm_close,
 };
 
-static u64 i915_ttm_mmap_offset(struct drm_i915_gem_object *obj)
+static u64 i915_ttm_mmap_offset(struct drm_i915_gem_object *obj, enum i915_mmap_type mmap_type)
 {
 	/* The ttm_bo must be allocated with I915_BO_ALLOC_USER */
 	GEM_BUG_ON(!drm_mm_node_allocated(&obj->base.vma_node.vm_node));
 
+	obj->mm.mmap_type = mmap_type;
+
 	return drm_vma_node_offset_addr(&obj->base.vma_node);
 }
 
@@ -1245,6 +1249,26 @@ static const struct intel_memory_region_ops ttm_system_region_ops = {
 	.release = intel_region_ttm_fini,
 };
 
+/**
+ * Return: filp.
+ */
+struct file *
+gem_ttm_get_filep(struct drm_i915_gem_object *obj)
+{
+	struct ttm_buffer_object *bo;
+	struct i915_ttm_tt *i915_tt;
+
+	bo = i915_gem_to_ttm(obj);
+	if (!bo->ttm) {
+		pr_err("ttm has not been allocated for bo\n");
+		return NULL;
+	}
+
+	i915_tt = container_of(bo->ttm, typeof(*i915_tt), ttm);
+
+	return i915_tt->filp;
+}
+
 struct intel_memory_region *
 i915_gem_ttm_system_setup(struct drm_i915_private *i915,
 			  u16 type, u16 instance)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.h b/drivers/gpu/drm/i915/gem/i915_gem_ttm.h
index 73e371aa3850..cfff3b77207e 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.h
@@ -92,4 +92,7 @@ static inline bool i915_ttm_cpu_maps_iomem(struct ttm_resource *mem)
 	/* Once / if we support GGTT, this is also false for cached ttm_tts */
 	return mem->mem_type != I915_PL_SYSTEM;
 }
+
+struct file * gem_ttm_get_filep(struct drm_i915_gem_object *obj);
+
 #endif
-- 
2.35.1



More information about the Intel-gfx-trybot mailing list