[PATCH 4/4] drm/i915: Optionally manage system memory with TTM and poolalloc

Adrian Larumbe adrian.larumbe at collabora.com
Sat Jul 30 15:12:06 UTC 2022


Adds a module parameter that enables selection of the memory region manager
for system memory, either the legacy shmem-based one or TTM, through its
pool allocator. This could should not affect how DGFX platforms with LMEM
work.

Signed-off-by: Adrian Larumbe <adrian.larumbe at collabora.com>
---
 drivers/gpu/drm/i915/gem/i915_gem_mman.c      |  56 ++++--
 drivers/gpu/drm/i915/gem/i915_gem_object.h    |   2 +-
 drivers/gpu/drm/i915/gem/i915_gem_phys.c      | 128 ++++++++-----
 drivers/gpu/drm/i915/gem/i915_gem_shmem.c     |   4 +-
 drivers/gpu/drm/i915/gem/i915_gem_ttm.c       | 168 +++++++++++++++++-
 drivers/gpu/drm/i915/gem/i915_gem_ttm.h       |  14 ++
 drivers/gpu/drm/i915/gem/i915_gem_userptr.c   |   2 +-
 .../drm/i915/gem/selftests/i915_gem_mman.c    |   2 +-
 drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c      |   6 +-
 drivers/gpu/drm/i915/i915_params.c            |   6 +
 drivers/gpu/drm/i915/i915_params.h            |   4 +-
 drivers/gpu/drm/i915/intel_memory_region.c    |  10 +-
 12 files changed, 326 insertions(+), 76 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index 0c5c43852e24..b8ae6a381108 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -83,6 +83,22 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
 	if (!obj)
 		return -ENOENT;
 
+	if (range_overflows(args->offset, args->size, (u64)obj->base.size)) {
+		addr = -EINVAL;
+		goto err;
+	}
+
+	if (i915_gem_object_is_ttm(obj)) {
+		GEM_WARN_ON(!i915->params.use_pool_alloc);
+
+		addr = i915_gem_ttm_mmap(obj, args);
+		if (IS_ERR_VALUE(addr))
+			goto err;
+
+		args->addr_ptr = (u64)addr;
+		return 0;
+	}
+
 	/* prime objects have no backing filp to GEM mmap
 	 * pages from.
 	 */
@@ -91,11 +107,6 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
 		goto err;
 	}
 
-	if (range_overflows(args->offset, args->size, (u64)obj->base.size)) {
-		addr = -EINVAL;
-		goto err;
-	}
-
 	addr = vm_mmap(obj->base.filp, 0, args->size,
 		       PROT_READ | PROT_WRITE, MAP_SHARED,
 		       args->offset);
@@ -552,9 +563,11 @@ void i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj)
 
 void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj)
 {
+	struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
 	struct i915_mmap_offset *mmo, *mn;
 
-	if (obj->ops->unmap_virtual)
+	if (obj->ops->unmap_virtual &&
+	    bo->type == ttm_bo_type_device)
 		obj->ops->unmap_virtual(obj);
 
 	spin_lock(&obj->mmo.lock);
@@ -641,11 +654,13 @@ mmap_offset_attach(struct drm_i915_gem_object *obj,
 		   enum i915_mmap_type mmap_type,
 		   struct drm_file *file)
 {
+	struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
 	struct i915_mmap_offset *mmo;
 	int err;
 
-	GEM_BUG_ON(obj->ops->mmap_offset || obj->ops->mmap_ops);
+	GEM_BUG_ON((obj->ops->mmap_offset || obj->ops->mmap_ops) &&
+		   bo->type == ttm_bo_type_device);
 
 	mmo = lookup_mmo(obj, mmap_type);
 	if (mmo)
@@ -694,12 +709,14 @@ __assign_mmap_offset(struct drm_i915_gem_object *obj,
 		     enum i915_mmap_type mmap_type,
 		     u64 *offset, struct drm_file *file)
 {
+	struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
 	struct i915_mmap_offset *mmo;
 
 	if (i915_gem_object_never_mmap(obj))
 		return -ENODEV;
 
-	if (obj->ops->mmap_offset)  {
+	if (obj->ops->mmap_offset &&
+	    bo->type == ttm_bo_type_device)  {
 		if (mmap_type != I915_MMAP_TYPE_FIXED)
 			return -ENODEV;
 
@@ -731,7 +748,6 @@ __assign_mmap_offset_handle(struct drm_file *file,
 {
 	struct drm_i915_gem_object *obj;
 	int err;
-
 	obj = i915_gem_object_lookup(file, handle);
 	if (!obj)
 		return -ENOENT;
@@ -739,6 +755,7 @@ __assign_mmap_offset_handle(struct drm_file *file,
 	err = i915_gem_object_lock_interruptible(obj, NULL);
 	if (err)
 		goto out_put;
+
 	err = __assign_mmap_offset(obj, mmap_type, offset, file);
 	i915_gem_object_unlock(obj);
 out_put:
@@ -922,7 +939,9 @@ int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
 	struct drm_vma_offset_node *node;
 	struct drm_file *priv = filp->private_data;
 	struct drm_device *dev = priv->minor->dev;
+	struct drm_i915_private *i915 = to_i915(dev);
 	struct drm_i915_gem_object *obj = NULL;
+	struct ttm_buffer_object *bo = NULL;
 	struct i915_mmap_offset *mmo = NULL;
 	struct file *anon;
 
@@ -944,7 +963,8 @@ int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
 			mmo = container_of(node, struct i915_mmap_offset, vma_node);
 			obj = i915_gem_object_get_rcu(mmo->obj);
 
-			GEM_BUG_ON(obj && obj->ops->mmap_ops);
+			if (!i915->params.use_pool_alloc)
+				GEM_BUG_ON(obj && obj->ops->mmap_ops);
 		} else {
 			obj = i915_gem_object_get_rcu
 				(container_of(node, struct drm_i915_gem_object,
@@ -958,6 +978,9 @@ int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
 	if (!obj)
 		return node ? -EACCES : -EINVAL;
 
+	if (i915_gem_object_is_ttm(obj))
+		bo = i915_gem_to_ttm(obj);
+
 	if (i915_gem_object_is_readonly(obj)) {
 		if (vma->vm_flags & VM_WRITE) {
 			i915_gem_object_put(obj);
@@ -987,10 +1010,15 @@ int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
 	fput(anon);
 
 	if (obj->ops->mmap_ops) {
-		vma->vm_page_prot = pgprot_decrypted(vm_get_page_prot(vma->vm_flags));
-		vma->vm_ops = obj->ops->mmap_ops;
-		vma->vm_private_data = node->driver_private;
-		return 0;
+		/* there could be an obj backend with mmap_ops that isn't TTM */
+		if (!i915_gem_object_is_ttm(obj) ||
+		    (i915_gem_object_is_ttm(obj) &&
+		     bo->type == ttm_bo_type_device)) {
+			vma->vm_page_prot = pgprot_decrypted(vm_get_page_prot(vma->vm_flags));
+			vma->vm_ops = obj->ops->mmap_ops;
+			vma->vm_private_data = node->driver_private;
+			return 0;
+		}
 	}
 
 	vma->vm_private_data = mmo;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index 6f0a3ce35567..c130db4d757f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -74,7 +74,7 @@ __i915_gem_object_create_user(struct drm_i915_private *i915, u64 size,
 
 extern const struct drm_i915_gem_object_ops i915_gem_shmem_ops;
 
-void __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
+void __i915_gem_object_release_smem(struct drm_i915_gem_object *obj,
 				     struct sg_table *pages,
 				     bool needs_clflush);
 
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
index 0d0e46dae559..341d4078d29f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
@@ -16,16 +16,15 @@
 #include "i915_gem_region.h"
 #include "i915_gem_tiling.h"
 #include "i915_scatterlist.h"
+#include "i915_gem_ttm.h"
 
 static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
 {
-	struct address_space *mapping = obj->base.filp->f_mapping;
 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
 	struct scatterlist *sg;
 	struct sg_table *st;
 	dma_addr_t dma;
 	void *vaddr;
-	void *dst;
 	int i;
 
 	if (GEM_WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
@@ -57,22 +56,40 @@ static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
 	sg_dma_address(sg) = dma;
 	sg_dma_len(sg) = obj->base.size;
 
-	dst = vaddr;
-	for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
-		struct page *page;
-		void *src;
+	if (i915_gem_object_is_ttm(obj)) {
+		void *objaddr;
 
-		page = shmem_read_mapping_page(mapping, i);
-		if (IS_ERR(page))
-			goto err_st;
+		objaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
+		if (IS_ERR(objaddr))
+			return PTR_ERR(objaddr);
 
-		src = kmap_atomic(page);
-		memcpy(dst, src, PAGE_SIZE);
-		drm_clflush_virt_range(dst, PAGE_SIZE);
-		kunmap_atomic(src);
+		drm_clflush_virt_range(objaddr, obj->base.size);
+		memcpy(vaddr, objaddr, obj->base.size);
 
-		put_page(page);
-		dst += PAGE_SIZE;
+		i915_gem_object_unpin_map(obj);
+
+		drm_clflush_virt_range(vaddr, obj->base.size);
+
+	} else {
+		struct address_space *mapping = obj->base.filp->f_mapping;
+		void *dst = vaddr;
+
+		for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
+			struct page *page;
+			void *src;
+
+			page = shmem_read_mapping_page(mapping, i);
+			if (IS_ERR(page))
+				goto err_st;
+
+			src = kmap_atomic(page);
+			memcpy(dst, src, PAGE_SIZE);
+			drm_clflush_virt_range(dst, PAGE_SIZE);
+			kunmap_atomic(src);
+
+			put_page(page);
+			dst += PAGE_SIZE;
+		}
 	}
 
 	intel_gt_chipset_flush(to_gt(i915));
@@ -99,32 +116,48 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
 	dma_addr_t dma = sg_dma_address(pages->sgl);
 	void *vaddr = sg_page(pages->sgl);
 
-	__i915_gem_object_release_shmem(obj, pages, false);
+	__i915_gem_object_release_smem(obj, pages, false);
 
 	if (obj->mm.dirty) {
-		struct address_space *mapping = obj->base.filp->f_mapping;
-		void *src = vaddr;
-		int i;
-
-		for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
-			struct page *page;
-			char *dst;
-
-			page = shmem_read_mapping_page(mapping, i);
-			if (IS_ERR(page))
-				continue;
-
-			dst = kmap_atomic(page);
-			drm_clflush_virt_range(src, PAGE_SIZE);
-			memcpy(dst, src, PAGE_SIZE);
-			kunmap_atomic(dst);
-
-			set_page_dirty(page);
-			if (obj->mm.madv == I915_MADV_WILLNEED)
-				mark_page_accessed(page);
-			put_page(page);
-
-			src += PAGE_SIZE;
+		if (!i915_gem_object_is_ttm(obj)) {
+			struct address_space *mapping = obj->base.filp->f_mapping;
+			void *src = vaddr;
+			int i;
+
+			for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
+				struct page *page;
+				char *dst;
+
+				page = shmem_read_mapping_page(mapping, i);
+				if (IS_ERR(page))
+					continue;
+
+				dst = kmap_atomic(page);
+				drm_clflush_virt_range(src, PAGE_SIZE);
+				memcpy(dst, src, PAGE_SIZE);
+				kunmap_atomic(dst);
+
+				set_page_dirty(page);
+				if (obj->mm.madv == I915_MADV_WILLNEED)
+					mark_page_accessed(page);
+				put_page(page);
+
+				src += PAGE_SIZE;
+			}
+		} else {
+			void *objaddr;
+
+			objaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
+			if (IS_ERR(objaddr)) {
+				drm_dbg(obj->base.dev,
+					"i915_gem_object_pin_map_unlocked failed\n");
+				return;
+			}
+
+			drm_clflush_virt_range(vaddr, PAGE_SIZE);
+			memcpy(objaddr, vaddr, obj->base.size);
+			drm_clflush_virt_range(objaddr, obj->base.size);
+			i915_gem_object_unpin_map(obj);
 		}
 		obj->mm.dirty = false;
 	}
@@ -188,8 +221,9 @@ int i915_gem_object_pread_phys(struct drm_i915_gem_object *obj,
 	return 0;
 }
 
-static int i915_gem_object_shmem_to_phys(struct drm_i915_gem_object *obj)
+static int i915_gem_object_smem_to_phys(struct drm_i915_gem_object *obj)
 {
+	struct drm_i915_private *i915 = to_i915(obj->base.dev);
 	struct sg_table *pages;
 	int err;
 
@@ -201,10 +235,12 @@ static int i915_gem_object_shmem_to_phys(struct drm_i915_gem_object *obj)
 
 	/* Perma-pin (until release) the physical set of pages */
 	__i915_gem_object_pin_pages(obj);
-
-	if (!IS_ERR_OR_NULL(pages))
-		i915_gem_object_put_pages_shmem(obj, pages);
-
+	if (!IS_ERR_OR_NULL(pages)) {
+		if (!i915->params.use_pool_alloc)
+			i915_gem_object_put_pages_shmem(obj, pages);
+		else
+			i915_gem_object_put_pages_ttm(obj, pages);
+	}
 	i915_gem_object_release_memory_region(obj);
 	return 0;
 
@@ -226,7 +262,7 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
 	if (align > obj->base.size)
 		return -EINVAL;
 
-	if (!i915_gem_object_is_shmem(obj))
+	if (!i915_gem_object_is_smem(obj))
 		return -EINVAL;
 
 	if (!i915_gem_object_has_struct_page(obj))
@@ -251,7 +287,7 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
 		return -EFAULT;
 	}
 
-	return i915_gem_object_shmem_to_phys(obj);
+	return i915_gem_object_smem_to_phys(obj);
 }
 
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index 4eed3dd90ba8..ef1192a6485c 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -351,7 +351,7 @@ static int shmem_shrink(struct drm_i915_gem_object *obj, unsigned int flags)
 }
 
 void
-__i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
+__i915_gem_object_release_smem(struct drm_i915_gem_object *obj,
 				struct sg_table *pages,
 				bool needs_clflush)
 {
@@ -382,7 +382,7 @@ __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
 
 void i915_gem_object_put_pages_shmem(struct drm_i915_gem_object *obj, struct sg_table *pages)
 {
-	__i915_gem_object_release_shmem(obj, pages, true);
+	__i915_gem_object_release_smem(obj, pages, true);
 
 	i915_gem_gtt_finish_pages(obj, pages);
 
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
index 0332d5214aab..cd00f0694fd9 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
@@ -4,9 +4,11 @@
  */
 
 #include <linux/shmem_fs.h>
+#include <linux/mman.h>
 
 #include <drm/ttm/ttm_bo_driver.h>
 #include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_bo_api.h>
 #include <drm/drm_buddy.h>
 
 #include "i915_drv.h"
@@ -20,6 +22,8 @@
 #include "gem/i915_gem_ttm.h"
 #include "gem/i915_gem_ttm_move.h"
 #include "gem/i915_gem_ttm_pm.h"
+#include "gem/i915_gem_clflush.h"
+#include "gem/i915_gem_tiling.h"
 #include "gt/intel_gpu_commands.h"
 
 #define I915_TTM_PRIO_PURGE     0
@@ -207,6 +211,11 @@ static int i915_ttm_tt_shmem_populate(struct ttm_device *bdev,
 			return PTR_ERR(filp);
 
 		mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
+		if (IS_I965GM(i915) || IS_I965G(i915)) {
+			/* 965gm cannot relocate objects above 4GiB. */
+			mask &= ~__GFP_HIGHMEM;
+			mask |= __GFP_DMA32;
+		}
 
 		mapping = filp->f_mapping;
 		mapping_set_gfp_mask(mapping, mask);
@@ -286,7 +295,7 @@ static struct ttm_tt *i915_ttm_tt_create(struct ttm_buffer_object *bo,
 	if (!i915_tt)
 		return NULL;
 
-	if (obj->flags & I915_BO_ALLOC_CPU_CLEAR &&
+	if (obj->flags & (I915_BO_ALLOC_CPU_CLEAR | I915_BO_ALLOC_USER) &&
 	    man->use_tt)
 		page_flags |= TTM_TT_FLAG_ZERO_ALLOC;
 
@@ -294,7 +303,8 @@ static struct ttm_tt *i915_ttm_tt_create(struct ttm_buffer_object *bo,
 	if (i915_gem_object_is_shrinkable(obj) && caching == ttm_cached) {
 		page_flags |= TTM_TT_FLAG_EXTERNAL |
 			      TTM_TT_FLAG_EXTERNAL_MAPPABLE;
-		i915_tt->is_shmem = true;
+
+		i915_tt->is_shmem = i915->params.use_pool_alloc ? false : true;
 	}
 
 	if (HAS_FLAT_CCS(i915) && i915_gem_object_needs_ccs_pages(obj))
@@ -513,9 +523,7 @@ static int i915_ttm_shrink(struct drm_i915_gem_object *obj, unsigned int flags)
 	if (!bo->ttm || bo->resource->mem_type != TTM_PL_SYSTEM)
 		return 0;
 
-	GEM_BUG_ON(!i915_tt->is_shmem);
-
-	if (!i915_tt->filp)
+	if (!ttm_tt_is_populated(bo->ttm))
 		return 0;
 
 	ret = ttm_bo_wait_ctx(bo, &ctx);
@@ -792,6 +800,16 @@ static int __i915_ttm_get_pages(struct drm_i915_gem_object *obj,
 	}
 
 	if (bo->ttm && !ttm_tt_is_populated(bo->ttm)) {
+		struct drm_i915_private *i915 = to_i915(obj->base.dev);
+		const size_t size = (size_t)bo->ttm->num_pages << PAGE_SHIFT;
+		struct intel_memory_region *mr = i915->mm.regions[INTEL_MEMORY_SYSTEM];
+		/*
+		 * If there's no chance of allocating enough pages for the whole
+		 * object, bail early.
+		 */
+		if (size > resource_size(&mr->region))
+			return -ENOMEM;
+
 		ret = ttm_tt_populate(bo->bdev, bo->ttm, &ctx);
 		if (ret)
 			return ret;
@@ -807,6 +825,14 @@ static int __i915_ttm_get_pages(struct drm_i915_gem_object *obj,
 		if (IS_ERR(rsgt))
 			return PTR_ERR(rsgt);
 
+		if (!HAS_LMEM(to_i915(obj->base.dev)) && bo->ttm) {
+			if (i915_gem_object_needs_bit17_swizzle(obj))
+				i915_gem_object_do_bit_17_swizzle(obj, &rsgt->table);
+
+			if (i915_gem_object_can_bypass_llc(obj))
+				obj->cache_dirty = true;
+		}
+
 		GEM_BUG_ON(obj->mm.rsgt);
 		obj->mm.rsgt = rsgt;
 		__i915_gem_object_set_pages(obj, &rsgt->table,
@@ -883,8 +909,8 @@ static int i915_ttm_migrate(struct drm_i915_gem_object *obj,
 	return __i915_ttm_migrate(obj, mr, obj->flags);
 }
 
-static void i915_ttm_put_pages(struct drm_i915_gem_object *obj,
-			       struct sg_table *st)
+void i915_gem_object_put_pages_ttm(struct drm_i915_gem_object *obj,
+				   struct sg_table *st)
 {
 	/*
 	 * We're currently not called from a shrinker, so put_pages()
@@ -894,10 +920,23 @@ static void i915_ttm_put_pages(struct drm_i915_gem_object *obj,
 	 * and shrinkers will move it out if needed.
 	 */
 
+	if (!HAS_LMEM(to_i915(obj->base.dev)) &&
+	    i915_gem_object_needs_bit17_swizzle(obj))
+		i915_gem_object_save_bit_17_swizzle(obj, st);
+
 	if (obj->mm.rsgt)
 		i915_refct_sgt_put(fetch_and_zero(&obj->mm.rsgt));
 }
 
+static void i915_ttm_put_pages(struct drm_i915_gem_object *obj,
+			       struct sg_table *st)
+{
+	if (likely(i915_gem_object_has_struct_page(obj)))
+		i915_gem_object_put_pages_ttm(obj, st);
+	else
+		i915_gem_object_put_pages_phys(obj, st);
+}
+
 /**
  * i915_ttm_adjust_lru - Adjust an object's position on relevant LRU lists.
  * @obj: The object
@@ -1133,6 +1172,27 @@ static void i915_ttm_unmap_virtual(struct drm_i915_gem_object *obj)
 	ttm_bo_unmap_virtual(i915_gem_to_ttm(obj));
 }
 
+static int
+ttm_pwrite(struct drm_i915_gem_object *obj,
+	     const struct drm_i915_gem_pwrite *arg)
+{
+
+	if (!i915_gem_object_has_struct_page(obj))
+		return i915_gem_object_pwrite_phys(obj, arg);
+
+	return -ENODEV;
+}
+
+static int
+ttm_pread(struct drm_i915_gem_object *obj,
+	    const struct drm_i915_gem_pread *arg)
+{
+	if (!i915_gem_object_has_struct_page(obj))
+		return i915_gem_object_pread_phys(obj, arg);
+
+	return -ENODEV;
+}
+
 static const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops = {
 	.name = "i915_gem_object_ttm",
 	.flags = I915_GEM_OBJECT_IS_SHRINKABLE |
@@ -1143,6 +1203,9 @@ static const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops = {
 	.truncate = i915_ttm_truncate,
 	.shrink = i915_ttm_shrink,
 
+	.pwrite = ttm_pwrite,
+	.pread = ttm_pread,
+
 	.adjust_lru = i915_ttm_adjust_lru,
 	.delayed_free = i915_ttm_delayed_free,
 	.migrate = i915_ttm_migrate,
@@ -1155,8 +1218,15 @@ static const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops = {
 void i915_ttm_bo_destroy(struct ttm_buffer_object *bo)
 {
 	struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
+	struct drm_i915_private *i915 = to_i915(obj->base.dev);
+
+	/*
+	 * This is for the case that an shmem object was turned into a
+	 * phys one and early released its memory region
+	 */
+	if (likely(IS_DGFX(i915) || i915_gem_object_has_struct_page(obj)))
+		i915_gem_object_release_memory_region(obj);
 
-	i915_gem_object_release_memory_region(obj);
 	mutex_destroy(&obj->ttm.get_io_page.lock);
 
 	if (obj->ttm.created) {
@@ -1222,6 +1292,11 @@ int __i915_gem_ttm_object_init(struct intel_memory_region *mem,
 	bo_type = (obj->flags & I915_BO_ALLOC_USER) ? ttm_bo_type_device :
 		ttm_bo_type_kernel;
 
+	if (!HAS_LMEM(i915) && i915->params.use_pool_alloc) {
+		GEM_WARN_ON(mem->type != INTEL_MEMORY_SYSTEM);
+		bo_type = ttm_bo_type_kernel;
+	}
+
 	obj->base.vma_node.driver_private = i915_gem_to_ttm(obj);
 
 	/* Forcing the page size is kernel internal only */
@@ -1281,3 +1356,80 @@ i915_gem_ttm_system_setup(struct drm_i915_private *i915,
 	intel_memory_region_set_name(mr, "system-ttm");
 	return mr;
 }
+
+bool i915_gem_object_is_ttm(const struct drm_i915_gem_object *obj)
+{
+	return obj->ops == &i915_gem_ttm_obj_ops;
+}
+
+struct drm_i915_gem_object *
+i915_gem_object_create_ttm_from_data(struct drm_i915_private *dev_priv,
+				       const void *data, resource_size_t size)
+{
+	struct drm_i915_gem_object *obj;
+	void *vaddr;
+
+	obj = i915_gem_object_create_shmem(dev_priv, round_up(size, PAGE_SIZE));
+	if (IS_ERR(obj))
+		return obj;
+
+	vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
+	if (IS_ERR(vaddr)) {
+		i915_gem_object_put(obj);
+		return vaddr;
+	}
+
+	memcpy(vaddr, data, size);
+
+	i915_gem_object_unpin_map(obj);
+
+	return obj;
+}
+
+unsigned long i915_gem_ttm_mmap(struct drm_i915_gem_object *obj,
+				struct drm_i915_gem_mmap *args)
+{
+	struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
+	struct mm_struct *mm = current->mm;
+	struct vm_area_struct *vma;
+	unsigned long addr;
+
+	addr = vm_mmap(NULL, 0, args->size,
+		       PROT_READ | PROT_WRITE, MAP_SHARED,
+		       args->offset);
+	if (IS_ERR_VALUE(addr))
+		return addr;
+
+	if (mmap_write_lock_killable(mm))
+		return -EINTR;
+	vma = find_vma(current->mm, addr);
+	if (IS_ERR_OR_NULL(vma)) {
+		mmap_write_unlock(mm);
+		return -ENOMEM;
+	}
+
+	vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_IO;
+	if (args->flags & I915_MMAP_WC)
+		vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+	else
+		vma->vm_page_prot = pgprot_decrypted(vm_get_page_prot(vma->vm_flags));
+	vma->vm_ops = obj->ops->mmap_ops;
+	vma->vm_private_data = bo;
+
+	mmap_write_unlock(mm);
+
+	return addr;
+}
+
+bool i915_gem_object_is_smem(struct drm_i915_gem_object *obj)
+{
+	struct intel_memory_region *mr = READ_ONCE(obj->mm.region);
+
+#ifdef CONFIG_LOCKDEP
+	if (i915_gem_object_migratable(obj) &&
+	    i915_gem_object_evictable(obj))
+		assert_object_held(obj);
+#endif
+	return mr && (mr->type == INTEL_MEMORY_SYSTEM ||
+		      mr->type == INTEL_MEMORY_STOLEN_SYSTEM);
+}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.h b/drivers/gpu/drm/i915/gem/i915_gem_ttm.h
index e4842b4296fc..c7575b6377cd 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.h
@@ -95,4 +95,18 @@ static inline bool i915_ttm_cpu_maps_iomem(struct ttm_resource *mem)
 
 bool i915_ttm_resource_mappable(struct ttm_resource *res);
 
+bool i915_gem_object_is_ttm(const struct drm_i915_gem_object *obj);
+
+struct drm_i915_gem_object *
+i915_gem_object_create_ttm_from_data(struct drm_i915_private *dev_priv,
+				     const void *data, resource_size_t size);
+
+unsigned long i915_gem_ttm_mmap(struct drm_i915_gem_object *obj,
+				struct drm_i915_gem_mmap *args);
+
+bool i915_gem_object_is_smem(struct drm_i915_gem_object *obj);
+
+void i915_gem_object_put_pages_ttm(struct drm_i915_gem_object *obj,
+				   struct sg_table *st);
+
 #endif
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index 094f06b4ce33..30446770a4de 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -192,7 +192,7 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
 	if (!pages)
 		return;
 
-	__i915_gem_object_release_shmem(obj, pages, true);
+	__i915_gem_object_release_smem(obj, pages, true);
 	i915_gem_gtt_finish_pages(obj, pages);
 
 	/*
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index 3ced9948a331..c1df3c31e341 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -871,7 +871,7 @@ static bool can_mmap(struct drm_i915_gem_object *obj, enum i915_mmap_type type)
 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
 	bool no_map;
 
-	if (obj->ops->mmap_offset)
+	if (obj->ops->mmap_offset && !i915->params.use_pool_alloc)
 		return type == I915_MMAP_TYPE_FIXED;
 	else if (type == I915_MMAP_TYPE_FIXED)
 		return false;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
index d5fca1f68eff..17a81bfccec8 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
@@ -11,6 +11,7 @@
 #include <drm/drm_print.h>
 
 #include "gem/i915_gem_lmem.h"
+#include "gem/i915_gem_ttm.h"
 #include "intel_uc_fw.h"
 #include "intel_uc_fw_abi.h"
 #include "i915_drv.h"
@@ -482,7 +483,10 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
 		if (!IS_ERR(obj))
 			obj->flags |= I915_BO_ALLOC_PM_EARLY;
 	} else {
-		obj = i915_gem_object_create_shmem_from_data(i915, fw->data, fw->size);
+		if (unlikely(i915->params.use_pool_alloc))
+			obj = i915_gem_object_create_ttm_from_data(i915, fw->data, fw->size);
+		else
+			obj = i915_gem_object_create_shmem_from_data(i915, fw->data, fw->size);
 	}
 
 	if (IS_ERR(obj)) {
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 6fc475a5db61..1af11f030ab1 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -207,6 +207,12 @@ i915_param_named_unsafe(lmem_size, uint, 0400,
 i915_param_named_unsafe(lmem_bar_size, uint, 0400,
 			"Set the lmem bar size(in MiB).");
 
+i915_param_named_unsafe(use_pool_alloc, bool, 0600,
+	"Force the driver to use TTM's pool allocator API for smem objects. "
+	"This will cause TTM to take over BO allocation even in integrated platforms. "
+	"(default: false)");
+
+
 static __always_inline void _print_param(struct drm_printer *p,
 					 const char *name,
 					 const char *type,
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index 2733cb6cfe09..6aac9c46a7fe 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -84,7 +84,9 @@ struct drm_printer;
 	param(bool, verbose_state_checks, true, 0) \
 	param(bool, nuclear_pageflip, false, 0400) \
 	param(bool, enable_dp_mst, true, 0600) \
-	param(bool, enable_gvt, false, IS_ENABLED(CONFIG_DRM_I915_GVT) ? 0400 : 0)
+	param(bool, enable_gvt, false, IS_ENABLED(CONFIG_DRM_I915_GVT) ? 0400 : 0) \
+	param(bool, use_pool_alloc, true, 0600)
+	/* set to 'true' for trybot testing */
 
 #define MEMBER(T, member, ...) T member;
 struct i915_params {
diff --git a/drivers/gpu/drm/i915/intel_memory_region.c b/drivers/gpu/drm/i915/intel_memory_region.c
index 9a4a7fb55582..8d72953b2c99 100644
--- a/drivers/gpu/drm/i915/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/intel_memory_region.c
@@ -310,6 +310,14 @@ int intel_memory_regions_hw_probe(struct drm_i915_private *i915)
 {
 	int err, i;
 
+	if (GRAPHICS_VER(i915) <= 5 &&
+	    i915->params.use_pool_alloc) {
+		drm_dbg(&i915->drm,
+			"TTM maybe not be used with GEN <=5 devices,"
+			" falling back on shmem\n");
+		i915->params.use_pool_alloc = false;
+	}
+
 	for (i = 0; i < ARRAY_SIZE(i915->mm.regions); i++) {
 		struct intel_memory_region *mem = ERR_PTR(-ENODEV);
 		u16 type, instance;
@@ -321,7 +329,7 @@ int intel_memory_regions_hw_probe(struct drm_i915_private *i915)
 		instance = intel_region_map[i].instance;
 		switch (type) {
 		case INTEL_MEMORY_SYSTEM:
-			if (IS_DGFX(i915))
+			if (IS_DGFX(i915) || i915->params.use_pool_alloc)
 				mem = i915_gem_ttm_system_setup(i915, type,
 								instance);
 			else
-- 
2.37.0



More information about the Intel-gfx-trybot mailing list