[PATCH 1/2] drm/i915/ttm: remove shmem memory region and gem object backend

Adrian Larumbe adrian.larumbe at collabora.com
Fri Mar 11 23:08:06 UTC 2022


The goal is to completely replace the shmem API for the SMEM memory
region with the TTM backend for GEM objects.

Signed-off-by: Adrian Larumbe <adrian.larumbe at collabora.com>
---
 drivers/gpu/drm/i915/display/intel_fbdev.c    |   6 +-
 drivers/gpu/drm/i915/gem/i915_gem_clflush.c   |   2 +-
 drivers/gpu/drm/i915/gem/i915_gem_lmem.c      |  26 --
 drivers/gpu/drm/i915/gem/i915_gem_lmem.h      |   4 -
 drivers/gpu/drm/i915/gem/i915_gem_object.c    |  39 +-
 drivers/gpu/drm/i915/gem/i915_gem_object.h    |  21 +-
 drivers/gpu/drm/i915/gem/i915_gem_phys.c      |   2 +-
 drivers/gpu/drm/i915/gem/i915_gem_shmem.c     | 435 ------------------
 drivers/gpu/drm/i915/gem/i915_gem_ttm.c       |  25 +
 drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c    |   2 +-
 .../gpu/drm/i915/gem/selftests/huge_pages.c   |   6 +-
 .../i915/gem/selftests/i915_gem_client_blt.c  |   2 +-
 .../drm/i915/gem/selftests/i915_gem_dmabuf.c  |   6 +-
 .../drm/i915/gem/selftests/i915_gem_object.c  |   2 +-
 .../drm/i915/gem/selftests/i915_gem_phys.c    |   2 +-
 drivers/gpu/drm/i915/gt/intel_lrc.c           |   4 +-
 .../gpu/drm/i915/gt/intel_ring_submission.c   |   2 +-
 drivers/gpu/drm/i915/gt/shmem_utils.c         |   6 -
 drivers/gpu/drm/i915/gt/uc/intel_guc.c        |   2 +-
 drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c      |   9 +-
 drivers/gpu/drm/i915/gvt/cmd_parser.c         |   4 +-
 drivers/gpu/drm/i915/i915_perf.c              |   4 +-
 drivers/gpu/drm/i915/intel_memory_region.c    |   6 +-
 drivers/gpu/drm/i915/intel_memory_region.h    |   3 -
 24 files changed, 97 insertions(+), 523 deletions(-)

diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c
index 221336178991..32e0ad1461ee 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.c
@@ -171,7 +171,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
 		if (size * 2 < dev_priv->stolen_usable_size)
 			obj = i915_gem_object_create_stolen(dev_priv, size);
 		if (IS_ERR(obj))
-			obj = i915_gem_object_create_shmem(dev_priv, size);
+			obj = i915_gem_object_create_smem(dev_priv, size);
 	}
 
 	if (IS_ERR(obj)) {
@@ -298,7 +298,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
 	 * If the object is stolen however, it will be full of whatever
 	 * garbage was left in there.
 	 */
-	if (!i915_gem_object_is_shmem(vma->obj) && !prealloc)
+	if (!i915_gem_object_is_smem(vma->obj) && !prealloc)
 		memset_io(info->screen_base, 0, info->screen_size);
 
 	/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
@@ -655,7 +655,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
 	 * full of whatever garbage was left in there.
 	 */
 	if (state == FBINFO_STATE_RUNNING &&
-	    !i915_gem_object_is_shmem(intel_fb_obj(&ifbdev->fb->base)))
+	    !i915_gem_object_is_smem(intel_fb_obj(&ifbdev->fb->base)))
 		memset_io(info->screen_base, 0, info->screen_size);
 
 	drm_fb_helper_set_suspend(&ifbdev->helper, state);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
index ce91b23385cf..c1c4e5774087 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
@@ -18,7 +18,7 @@ struct clflush {
 	struct drm_i915_gem_object *obj;
 };
 
-static void __do_clflush(struct drm_i915_gem_object *obj)
+static void __do_clflush(const struct drm_i915_gem_object *obj)
 {
 	GEM_BUG_ON(!i915_gem_object_has_pages(obj));
 	drm_clflush_sg(obj->mm.pages);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
index 444f8268b9c5..b702876909af 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
@@ -104,32 +104,6 @@ __i915_gem_object_create_lmem_with_ps(struct drm_i915_private *i915,
 					     size, page_size, flags);
 }
 
-struct drm_i915_gem_object *
-i915_gem_object_create_lmem_from_data(struct drm_i915_private *i915,
-				      const void *data, size_t size)
-{
-	struct drm_i915_gem_object *obj;
-	void *map;
-
-	obj = i915_gem_object_create_lmem(i915,
-					  round_up(size, PAGE_SIZE),
-					  I915_BO_ALLOC_CONTIGUOUS);
-	if (IS_ERR(obj))
-		return obj;
-
-	map = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
-	if (IS_ERR(map)) {
-		i915_gem_object_put(obj);
-		return map;
-	}
-
-	memcpy(map, data, size);
-
-	i915_gem_object_unpin_map(obj);
-
-	return obj;
-}
-
 struct drm_i915_gem_object *
 i915_gem_object_create_lmem(struct drm_i915_private *i915,
 			    resource_size_t size,
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
index 1b88ea13435c..4ee81fc66302 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
@@ -23,10 +23,6 @@ bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj);
 
 bool __i915_gem_object_is_lmem(struct drm_i915_gem_object *obj);
 
-struct drm_i915_gem_object *
-i915_gem_object_create_lmem_from_data(struct drm_i915_private *i915,
-				      const void *data, size_t size);
-
 struct drm_i915_gem_object *
 __i915_gem_object_create_lmem_with_ps(struct drm_i915_private *i915,
 				      resource_size_t size,
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index c1c3b510b9e2..82b3b445e5bc 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -394,7 +394,7 @@ static void i915_gem_free_object(struct drm_gem_object *gem_obj)
 		queue_delayed_work(i915->wq, &i915->mm.free_work, 0);
 }
 
-void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
+void __i915_gem_object_flush_frontbuffer(const struct drm_i915_gem_object *obj,
 					 enum fb_op_origin origin)
 {
 	struct intel_frontbuffer *front;
@@ -688,7 +688,7 @@ int i915_gem_object_migrate(struct drm_i915_gem_object *obj,
  *
  * Return: True if the object can be placed in @type. False otherwise.
  */
-bool i915_gem_object_placement_possible(struct drm_i915_gem_object *obj,
+bool i915_gem_object_placement_possible(const struct drm_i915_gem_object *obj,
 					enum intel_memory_type type)
 {
 	unsigned int i;
@@ -802,6 +802,41 @@ int i915_gem_object_wait_moving_fence(struct drm_i915_gem_object *obj,
 	return 0;
 }
 
+struct drm_i915_gem_object *
+i915_gem_object_create_fw(struct drm_i915_private *dev_priv,
+			  const void *data, resource_size_t size)
+{
+	struct drm_i915_gem_object *obj;
+	void *map;
+
+	if (HAS_LMEM(dev_priv))
+		obj = i915_gem_object_create_lmem(dev_priv,
+						  round_up(size, PAGE_SIZE),
+						  I915_BO_ALLOC_CONTIGUOUS);
+	else
+		obj = i915_gem_object_create_smem(dev_priv,
+						  round_up(size, PAGE_SIZE));
+	if (IS_ERR(obj))
+		return obj;
+
+	if (HAS_LMEM(dev_priv))
+		obj->flags |= I915_BO_ALLOC_PM_EARLY;
+	else
+		GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU);
+
+	map = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
+	if (IS_ERR(map)) {
+		i915_gem_object_put(obj);
+		return map;
+	}
+
+	memcpy(map, data, size);
+
+	i915_gem_object_unpin_map(obj);
+
+	return obj;
+}
+
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
 #include "selftests/huge_gem_object.c"
 #include "selftests/huge_pages.c"
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index 02c37fe4a535..04780c08a1a0 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -62,18 +62,13 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
 void __i915_gem_object_fini(struct drm_i915_gem_object *obj);
 
 struct drm_i915_gem_object *
-i915_gem_object_create_shmem(struct drm_i915_private *i915,
+i915_gem_object_create_smem(struct drm_i915_private *i915,
 			     resource_size_t size);
 struct drm_i915_gem_object *
-i915_gem_object_create_shmem_from_data(struct drm_i915_private *i915,
-				       const void *data, resource_size_t size);
-struct drm_i915_gem_object *
 __i915_gem_object_create_user(struct drm_i915_private *i915, u64 size,
 			      struct intel_memory_region **placements,
 			      unsigned int n_placements);
 
-extern const struct drm_i915_gem_object_ops i915_gem_shmem_ops;
-
 void __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
 				     struct sg_table *pages,
 				     bool needs_clflush);
@@ -423,7 +418,7 @@ i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
 int i915_gem_object_pin_pages_unlocked(struct drm_i915_gem_object *obj);
 
 static inline bool
-i915_gem_object_has_pages(struct drm_i915_gem_object *obj)
+i915_gem_object_has_pages(const struct drm_i915_gem_object *obj)
 {
 	return !IS_ERR_OR_NULL(READ_ONCE(obj->mm.pages));
 }
@@ -573,13 +568,13 @@ int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
 				  unsigned int flags,
 				  const struct i915_sched_attr *attr);
 
-void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
+void __i915_gem_object_flush_frontbuffer(const struct drm_i915_gem_object *obj,
 					 enum fb_op_origin origin);
 void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
 					      enum fb_op_origin origin);
 
 static inline void
-i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
+i915_gem_object_flush_frontbuffer(const struct drm_i915_gem_object *obj,
 				  enum fb_op_origin origin)
 {
 	if (unlikely(rcu_access_pointer(obj->frontbuffer)))
@@ -596,7 +591,7 @@ i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
 
 int i915_gem_object_read_from_page(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size);
 
-bool i915_gem_object_is_shmem(const struct drm_i915_gem_object *obj);
+bool i915_gem_object_is_smem(const struct drm_i915_gem_object *obj);
 
 void __i915_gem_free_object_rcu(struct rcu_head *head);
 
@@ -618,9 +613,13 @@ bool i915_gem_object_can_migrate(struct drm_i915_gem_object *obj,
 int i915_gem_object_wait_migration(struct drm_i915_gem_object *obj,
 				   unsigned int flags);
 
-bool i915_gem_object_placement_possible(struct drm_i915_gem_object *obj,
+bool i915_gem_object_placement_possible(const struct drm_i915_gem_object *obj,
 					enum intel_memory_type type);
 
+struct drm_i915_gem_object *
+i915_gem_object_create_fw(struct drm_i915_private *dev_priv,
+				 const void *data, resource_size_t size);
+
 int shmem_sg_alloc_table(struct drm_i915_private *i915, struct sg_table *st,
 			 size_t size, struct intel_memory_region *mr,
 			 struct address_space *mapping,
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
index ca6faffcc496..61e676839727 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
@@ -225,7 +225,7 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
 	if (align > obj->base.size)
 		return -EINVAL;
 
-	if (!i915_gem_object_is_shmem(obj))
+	if (!i915_gem_object_is_smem(obj))
 		return -EINVAL;
 
 	if (!i915_gem_object_has_struct_page(obj))
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index 3a1c782ed791..a4c189e84270 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -187,105 +187,6 @@ int shmem_sg_alloc_table(struct drm_i915_private *i915, struct sg_table *st,
 	return ret;
 }
 
-static int shmem_get_pages(struct drm_i915_gem_object *obj)
-{
-	struct drm_i915_private *i915 = to_i915(obj->base.dev);
-	struct intel_memory_region *mem = obj->mm.region;
-	struct address_space *mapping = obj->base.filp->f_mapping;
-	const unsigned long page_count = obj->base.size / PAGE_SIZE;
-	unsigned int max_segment = i915_sg_segment_size();
-	struct sg_table *st;
-	struct sgt_iter sgt_iter;
-	struct page *page;
-	int ret;
-
-	/*
-	 * Assert that the object is not currently in any GPU domain. As it
-	 * wasn't in the GTT, there shouldn't be any way it could have been in
-	 * a GPU cache
-	 */
-	GEM_BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
-	GEM_BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
-
-rebuild_st:
-	st = kmalloc(sizeof(*st), GFP_KERNEL);
-	if (!st)
-		return -ENOMEM;
-
-	ret = shmem_sg_alloc_table(i915, st, obj->base.size, mem, mapping,
-				   max_segment);
-	if (ret)
-		goto err_st;
-
-	ret = i915_gem_gtt_prepare_pages(obj, st);
-	if (ret) {
-		/*
-		 * DMA remapping failed? One possible cause is that
-		 * it could not reserve enough large entries, asking
-		 * for PAGE_SIZE chunks instead may be helpful.
-		 */
-		if (max_segment > PAGE_SIZE) {
-			for_each_sgt_page(page, sgt_iter, st)
-				put_page(page);
-			sg_free_table(st);
-			kfree(st);
-
-			max_segment = PAGE_SIZE;
-			goto rebuild_st;
-		} else {
-			dev_warn(i915->drm.dev,
-				 "Failed to DMA remap %lu pages\n",
-				 page_count);
-			goto err_pages;
-		}
-	}
-
-	if (i915_gem_object_needs_bit17_swizzle(obj))
-		i915_gem_object_do_bit_17_swizzle(obj, st);
-
-	if (i915_gem_object_can_bypass_llc(obj))
-		obj->cache_dirty = true;
-
-	__i915_gem_object_set_pages(obj, st, i915_sg_dma_sizes(st->sgl));
-
-	return 0;
-
-err_pages:
-	shmem_sg_free_table(st, mapping, false, false);
-	/*
-	 * shmemfs first checks if there is enough memory to allocate the page
-	 * and reports ENOSPC should there be insufficient, along with the usual
-	 * ENOMEM for a genuine allocation failure.
-	 *
-	 * We use ENOSPC in our driver to mean that we have run out of aperture
-	 * space and so want to translate the error from shmemfs back to our
-	 * usual understanding of ENOMEM.
-	 */
-err_st:
-	if (ret == -ENOSPC)
-		ret = -ENOMEM;
-
-	kfree(st);
-
-	return ret;
-}
-
-static int
-shmem_truncate(struct drm_i915_gem_object *obj)
-{
-	/*
-	 * Our goal here is to return as much of the memory as
-	 * is possible back to the system as we are called from OOM.
-	 * To do this we must instruct the shmfs to drop all of its
-	 * backing pages, *now*.
-	 */
-	shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
-	obj->mm.madv = __I915_MADV_PURGED;
-	obj->mm.pages = ERR_PTR(-EFAULT);
-
-	return 0;
-}
-
 void __shmem_writeback(size_t size, struct address_space *mapping)
 {
 	struct writeback_control wbc = {
@@ -328,27 +229,6 @@ void __shmem_writeback(size_t size, struct address_space *mapping)
 	}
 }
 
-static void
-shmem_writeback(struct drm_i915_gem_object *obj)
-{
-	__shmem_writeback(obj->base.size, obj->base.filp->f_mapping);
-}
-
-static int shmem_shrink(struct drm_i915_gem_object *obj, unsigned int flags)
-{
-	switch (obj->mm.madv) {
-	case I915_MADV_DONTNEED:
-		return i915_gem_object_truncate(obj);
-	case __I915_MADV_PURGED:
-		return 0;
-	}
-
-	if (flags & I915_GEM_OBJECT_SHRINK_WRITEBACK)
-		shmem_writeback(obj);
-
-	return 0;
-}
-
 void
 __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
 				struct sg_table *pages,
@@ -393,318 +273,3 @@ void i915_gem_object_put_pages_shmem(struct drm_i915_gem_object *obj, struct sg_
 	kfree(pages);
 	obj->mm.dirty = false;
 }
-
-static void
-shmem_put_pages(struct drm_i915_gem_object *obj, struct sg_table *pages)
-{
-	if (likely(i915_gem_object_has_struct_page(obj)))
-		i915_gem_object_put_pages_shmem(obj, pages);
-	else
-		i915_gem_object_put_pages_phys(obj, pages);
-}
-
-static int
-shmem_pwrite(struct drm_i915_gem_object *obj,
-	     const struct drm_i915_gem_pwrite *arg)
-{
-	struct address_space *mapping = obj->base.filp->f_mapping;
-	char __user *user_data = u64_to_user_ptr(arg->data_ptr);
-	u64 remain, offset;
-	unsigned int pg;
-
-	/* Caller already validated user args */
-	GEM_BUG_ON(!access_ok(user_data, arg->size));
-
-	if (!i915_gem_object_has_struct_page(obj))
-		return i915_gem_object_pwrite_phys(obj, arg);
-
-	/*
-	 * Before we instantiate/pin the backing store for our use, we
-	 * can prepopulate the shmemfs filp efficiently using a write into
-	 * the pagecache. We avoid the penalty of instantiating all the
-	 * pages, important if the user is just writing to a few and never
-	 * uses the object on the GPU, and using a direct write into shmemfs
-	 * allows it to avoid the cost of retrieving a page (either swapin
-	 * or clearing-before-use) before it is overwritten.
-	 */
-	if (i915_gem_object_has_pages(obj))
-		return -ENODEV;
-
-	if (obj->mm.madv != I915_MADV_WILLNEED)
-		return -EFAULT;
-
-	/*
-	 * Before the pages are instantiated the object is treated as being
-	 * in the CPU domain. The pages will be clflushed as required before
-	 * use, and we can freely write into the pages directly. If userspace
-	 * races pwrite with any other operation; corruption will ensue -
-	 * that is userspace's prerogative!
-	 */
-
-	remain = arg->size;
-	offset = arg->offset;
-	pg = offset_in_page(offset);
-
-	do {
-		unsigned int len, unwritten;
-		struct page *page;
-		void *data, *vaddr;
-		int err;
-		char c;
-
-		len = PAGE_SIZE - pg;
-		if (len > remain)
-			len = remain;
-
-		/* Prefault the user page to reduce potential recursion */
-		err = __get_user(c, user_data);
-		if (err)
-			return err;
-
-		err = __get_user(c, user_data + len - 1);
-		if (err)
-			return err;
-
-		err = pagecache_write_begin(obj->base.filp, mapping,
-					    offset, len, 0,
-					    &page, &data);
-		if (err < 0)
-			return err;
-
-		vaddr = kmap_atomic(page);
-		unwritten = __copy_from_user_inatomic(vaddr + pg,
-						      user_data,
-						      len);
-		kunmap_atomic(vaddr);
-
-		err = pagecache_write_end(obj->base.filp, mapping,
-					  offset, len, len - unwritten,
-					  page, data);
-		if (err < 0)
-			return err;
-
-		/* We don't handle -EFAULT, leave it to the caller to check */
-		if (unwritten)
-			return -ENODEV;
-
-		remain -= len;
-		user_data += len;
-		offset += len;
-		pg = 0;
-	} while (remain);
-
-	return 0;
-}
-
-static int
-shmem_pread(struct drm_i915_gem_object *obj,
-	    const struct drm_i915_gem_pread *arg)
-{
-	if (!i915_gem_object_has_struct_page(obj))
-		return i915_gem_object_pread_phys(obj, arg);
-
-	return -ENODEV;
-}
-
-static void shmem_release(struct drm_i915_gem_object *obj)
-{
-	if (i915_gem_object_has_struct_page(obj))
-		i915_gem_object_release_memory_region(obj);
-
-	fput(obj->base.filp);
-}
-
-const struct drm_i915_gem_object_ops i915_gem_shmem_ops = {
-	.name = "i915_gem_object_shmem",
-	.flags = I915_GEM_OBJECT_IS_SHRINKABLE,
-
-	.get_pages = shmem_get_pages,
-	.put_pages = shmem_put_pages,
-	.truncate = shmem_truncate,
-	.shrink = shmem_shrink,
-
-	.pwrite = shmem_pwrite,
-	.pread = shmem_pread,
-
-	.release = shmem_release,
-};
-
-static int __create_shmem(struct drm_i915_private *i915,
-			  struct drm_gem_object *obj,
-			  resource_size_t size)
-{
-	unsigned long flags = VM_NORESERVE;
-	struct file *filp;
-
-	drm_gem_private_object_init(&i915->drm, obj, size);
-
-	if (i915->mm.gemfs)
-		filp = shmem_file_setup_with_mnt(i915->mm.gemfs, "i915", size,
-						 flags);
-	else
-		filp = shmem_file_setup("i915", size, flags);
-	if (IS_ERR(filp))
-		return PTR_ERR(filp);
-
-	obj->filp = filp;
-	return 0;
-}
-
-static int shmem_object_init(struct intel_memory_region *mem,
-			     struct drm_i915_gem_object *obj,
-			     resource_size_t size,
-			     resource_size_t page_size,
-			     unsigned int flags)
-{
-	static struct lock_class_key lock_class;
-	struct drm_i915_private *i915 = mem->i915;
-	struct address_space *mapping;
-	unsigned int cache_level;
-	gfp_t mask;
-	int ret;
-
-	ret = __create_shmem(i915, &obj->base, size);
-	if (ret)
-		return ret;
-
-	mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
-	if (IS_I965GM(i915) || IS_I965G(i915)) {
-		/* 965gm cannot relocate objects above 4GiB. */
-		mask &= ~__GFP_HIGHMEM;
-		mask |= __GFP_DMA32;
-	}
-
-	mapping = obj->base.filp->f_mapping;
-	mapping_set_gfp_mask(mapping, mask);
-	GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM));
-
-	i915_gem_object_init(obj, &i915_gem_shmem_ops, &lock_class, 0);
-	obj->mem_flags |= I915_BO_FLAG_STRUCT_PAGE;
-	obj->write_domain = I915_GEM_DOMAIN_CPU;
-	obj->read_domains = I915_GEM_DOMAIN_CPU;
-
-	if (HAS_LLC(i915))
-		/* On some devices, we can have the GPU use the LLC (the CPU
-		 * cache) for about a 10% performance improvement
-		 * compared to uncached.  Graphics requests other than
-		 * display scanout are coherent with the CPU in
-		 * accessing this cache.  This means in this mode we
-		 * don't need to clflush on the CPU side, and on the
-		 * GPU side we only need to flush internal caches to
-		 * get data visible to the CPU.
-		 *
-		 * However, we maintain the display planes as UC, and so
-		 * need to rebind when first used as such.
-		 */
-		cache_level = I915_CACHE_LLC;
-	else
-		cache_level = I915_CACHE_NONE;
-
-	i915_gem_object_set_cache_coherency(obj, cache_level);
-
-	i915_gem_object_init_memory_region(obj, mem);
-
-	return 0;
-}
-
-struct drm_i915_gem_object *
-i915_gem_object_create_shmem(struct drm_i915_private *i915,
-			     resource_size_t size)
-{
-	return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_SMEM],
-					     size, 0, 0);
-}
-
-/* Allocate a new GEM object and fill it with the supplied data */
-struct drm_i915_gem_object *
-i915_gem_object_create_shmem_from_data(struct drm_i915_private *dev_priv,
-				       const void *data, resource_size_t size)
-{
-	struct drm_i915_gem_object *obj;
-	struct file *file;
-	resource_size_t offset;
-	int err;
-
-	GEM_WARN_ON(IS_DGFX(dev_priv));
-	obj = i915_gem_object_create_shmem(dev_priv, round_up(size, PAGE_SIZE));
-	if (IS_ERR(obj))
-		return obj;
-
-	GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU);
-
-	file = obj->base.filp;
-	offset = 0;
-	do {
-		unsigned int len = min_t(typeof(size), size, PAGE_SIZE);
-		struct page *page;
-		void *pgdata, *vaddr;
-
-		err = pagecache_write_begin(file, file->f_mapping,
-					    offset, len, 0,
-					    &page, &pgdata);
-		if (err < 0)
-			goto fail;
-
-		vaddr = kmap(page);
-		memcpy(vaddr, data, len);
-		kunmap(page);
-
-		err = pagecache_write_end(file, file->f_mapping,
-					  offset, len, len,
-					  page, pgdata);
-		if (err < 0)
-			goto fail;
-
-		size -= len;
-		data += len;
-		offset += len;
-	} while (size);
-
-	return obj;
-
-fail:
-	i915_gem_object_put(obj);
-	return ERR_PTR(err);
-}
-
-static int init_shmem(struct intel_memory_region *mem)
-{
-	int err;
-
-	err = i915_gemfs_init(mem->i915);
-	if (err) {
-		DRM_NOTE("Unable to create a private tmpfs mount, hugepage support will be disabled(%d).\n",
-			 err);
-	}
-
-	intel_memory_region_set_name(mem, "system");
-
-	return 0; /* Don't error, we can simply fallback to the kernel mnt */
-}
-
-static int release_shmem(struct intel_memory_region *mem)
-{
-	i915_gemfs_fini(mem->i915);
-	return 0;
-}
-
-static const struct intel_memory_region_ops shmem_region_ops = {
-	.init = init_shmem,
-	.release = release_shmem,
-	.init_object = shmem_object_init,
-};
-
-struct intel_memory_region *i915_gem_shmem_setup(struct drm_i915_private *i915,
-						 u16 type, u16 instance)
-{
-	return intel_memory_region_create(i915, 0,
-					  totalram_pages() << PAGE_SHIFT,
-					  PAGE_SIZE, 0, 0,
-					  type, instance,
-					  &shmem_region_ops);
-}
-
-bool i915_gem_object_is_shmem(const struct drm_i915_gem_object *obj)
-{
-	return obj->ops == &i915_gem_shmem_ops;
-}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
index 45cc5837ce00..980ad5825edd 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
@@ -1204,6 +1204,31 @@ int __i915_gem_ttm_object_init(struct intel_memory_region *mem,
 	return 0;
 }
 
+struct drm_i915_gem_object *
+i915_gem_object_create_smem(struct drm_i915_private *i915,
+			     resource_size_t size)
+{
+	return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_SMEM],
+					     size, 0, 0);
+}
+
+bool i915_gem_object_is_smem(const struct drm_i915_gem_object *obj)
+{
+	struct intel_memory_region *mr = READ_ONCE(obj->mm.region);
+
+#ifdef CONFIG_LOCKDEP
+	if (i915_gem_object_migratable(obj) &&
+	    i915_gem_object_evictable(obj))
+		assert_object_held(obj);
+#endif
+
+	/* Review list of placements to make sure object isn't migratable */
+	if (i915_gem_object_placement_possible(obj, INTEL_MEMORY_LOCAL))
+		return false;
+
+	return mr && (mr->type == INTEL_MEMORY_SYSTEM);
+}
+
 static const struct intel_memory_region_ops ttm_system_region_ops = {
 	.init_object = __i915_gem_ttm_object_init,
 	.release = intel_region_ttm_fini,
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c
index 9aad84059d56..5fdbc65d5801 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c
@@ -65,7 +65,7 @@ static int i915_ttm_backup(struct i915_gem_apply_to_region *apply,
 	if (obj->flags & I915_BO_ALLOC_PM_VOLATILE)
 		return 0;
 
-	backup = i915_gem_object_create_shmem(i915, obj->base.size);
+	backup = i915_gem_object_create_smem(i915, obj->base.size);
 	if (IS_ERR(backup))
 		return PTR_ERR(backup);
 
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
index ef15967be51a..3bb041782464 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
@@ -1260,7 +1260,7 @@ igt_create_shmem(struct drm_i915_private *i915, u32 size, u32 flags)
 		return ERR_PTR(-ENODEV);
 	}
 
-	return i915_gem_object_create_shmem(i915, size);
+	return i915_gem_object_create_smem(i915, size);
 }
 
 static struct drm_i915_gem_object *
@@ -1571,7 +1571,7 @@ static int igt_tmpfs_fallback(void *arg)
 
 	i915->mm.gemfs = NULL;
 
-	obj = i915_gem_object_create_shmem(i915, PAGE_SIZE);
+	obj = i915_gem_object_create_smem(i915, PAGE_SIZE);
 	if (IS_ERR(obj)) {
 		err = PTR_ERR(obj);
 		goto out_restore;
@@ -1647,7 +1647,7 @@ static int igt_shrink_thp(void *arg)
 	 * up.
 	 */
 
-	obj = i915_gem_object_create_shmem(i915, SZ_2M);
+	obj = i915_gem_object_create_smem(i915, SZ_2M);
 	if (IS_ERR(obj)) {
 		err = PTR_ERR(obj);
 		goto out_vm;
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
index ddd0772fd828..76817ff44f0e 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
@@ -134,7 +134,7 @@ __create_vma(struct tiled_blits *t, size_t size, bool lmem)
 	if (lmem)
 		obj = i915_gem_object_create_lmem(i915, size, 0);
 	else
-		obj = i915_gem_object_create_shmem(i915, size);
+		obj = i915_gem_object_create_smem(i915, size);
 	if (IS_ERR(obj))
 		return ERR_CAST(obj);
 
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
index b071a58dd6da..fe0082bc0e11 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
@@ -16,7 +16,7 @@ static int igt_dmabuf_export(void *arg)
 	struct drm_i915_gem_object *obj;
 	struct dma_buf *dmabuf;
 
-	obj = i915_gem_object_create_shmem(i915, PAGE_SIZE);
+	obj = i915_gem_object_create_smem(i915, PAGE_SIZE);
 	if (IS_ERR(obj))
 		return PTR_ERR(obj);
 
@@ -40,7 +40,7 @@ static int igt_dmabuf_import_self(void *arg)
 	struct dma_buf *dmabuf;
 	int err;
 
-	obj = i915_gem_object_create_shmem(i915, PAGE_SIZE);
+	obj = i915_gem_object_create_smem(i915, PAGE_SIZE);
 	if (IS_ERR(obj))
 		return PTR_ERR(obj);
 
@@ -404,7 +404,7 @@ static int igt_dmabuf_export_vmap(void *arg)
 	void *ptr;
 	int err;
 
-	obj = i915_gem_object_create_shmem(i915, PAGE_SIZE);
+	obj = i915_gem_object_create_smem(i915, PAGE_SIZE);
 	if (IS_ERR(obj))
 		return PTR_ERR(obj);
 
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c
index fe0a890775e2..ed91a38dcbdf 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c
@@ -18,7 +18,7 @@ static int igt_gem_object(void *arg)
 
 	/* Basic test to ensure we can create an object */
 
-	obj = i915_gem_object_create_shmem(i915, PAGE_SIZE);
+	obj = i915_gem_object_create_smem(i915, PAGE_SIZE);
 	if (IS_ERR(obj)) {
 		err = PTR_ERR(obj);
 		pr_err("i915_gem_object_create failed, err=%d\n", err);
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c
index d43d8dae0f69..60afa537147b 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c
@@ -18,7 +18,7 @@ static int mock_phys_object(void *arg)
 	 * i.e. exercise the i915_gem_object_phys API.
 	 */
 
-	obj = i915_gem_object_create_shmem(i915, PAGE_SIZE);
+	obj = i915_gem_object_create_smem(i915, PAGE_SIZE);
 	if (IS_ERR(obj)) {
 		err = PTR_ERR(obj);
 		pr_err("i915_gem_object_create failed, err=%d\n", err);
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 07bef7128fdb..d9d834cda165 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -954,7 +954,7 @@ __lrc_alloc_state(struct intel_context *ce, struct intel_engine_cs *engine)
 	obj = i915_gem_object_create_lmem(engine->i915, context_size,
 					  I915_BO_ALLOC_PM_VOLATILE);
 	if (IS_ERR(obj))
-		obj = i915_gem_object_create_shmem(engine->i915, context_size);
+		obj = i915_gem_object_create_smem(engine->i915, context_size);
 	if (IS_ERR(obj))
 		return ERR_CAST(obj);
 
@@ -1590,7 +1590,7 @@ static int lrc_create_wa_ctx(struct intel_engine_cs *engine)
 	struct i915_vma *vma;
 	int err;
 
-	obj = i915_gem_object_create_shmem(engine->i915, CTX_WA_BB_SIZE);
+	obj = i915_gem_object_create_smem(engine->i915, CTX_WA_BB_SIZE);
 	if (IS_ERR(obj))
 		return PTR_ERR(obj);
 
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
index 6d7ec3bf1f32..6ae0a9ce876c 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
@@ -527,7 +527,7 @@ alloc_context_vma(struct intel_engine_cs *engine)
 	struct i915_vma *vma;
 	int err;
 
-	obj = i915_gem_object_create_shmem(i915, engine->context_size);
+	obj = i915_gem_object_create_smem(i915, engine->context_size);
 	if (IS_ERR(obj))
 		return ERR_CAST(obj);
 
diff --git a/drivers/gpu/drm/i915/gt/shmem_utils.c b/drivers/gpu/drm/i915/gt/shmem_utils.c
index 402f085f3a02..ddda4e7089e5 100644
--- a/drivers/gpu/drm/i915/gt/shmem_utils.c
+++ b/drivers/gpu/drm/i915/gt/shmem_utils.c
@@ -35,12 +35,6 @@ struct file *shmem_create_from_object(struct drm_i915_gem_object *obj)
 	struct file *file;
 	void *ptr;
 
-	if (i915_gem_object_is_shmem(obj)) {
-		file = obj->base.filp;
-		atomic_long_inc(&file->f_count);
-		return file;
-	}
-
 	ptr = i915_gem_object_pin_map_unlocked(obj, i915_gem_object_is_lmem(obj) ?
 						I915_MAP_WC : I915_MAP_WB);
 	if (IS_ERR(ptr))
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
index 447a976c9f25..716979786fad 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
@@ -698,7 +698,7 @@ struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
 						  I915_BO_ALLOC_CONTIGUOUS |
 						  I915_BO_ALLOC_PM_EARLY);
 	else
-		obj = i915_gem_object_create_shmem(gt->i915, size);
+		obj = i915_gem_object_create_smem(gt->i915, size);
 
 	if (IS_ERR(obj))
 		return ERR_CAST(obj);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
index bb864655c495..b3f156e6798f 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
@@ -403,14 +403,7 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
 	if (uc_fw->type == INTEL_UC_FW_TYPE_GUC)
 		uc_fw->private_data_size = css->private_data_size;
 
-	if (HAS_LMEM(i915)) {
-		obj = i915_gem_object_create_lmem_from_data(i915, fw->data, fw->size);
-		if (!IS_ERR(obj))
-			obj->flags |= I915_BO_ALLOC_PM_EARLY;
-	} else {
-		obj = i915_gem_object_create_shmem_from_data(i915, fw->data, fw->size);
-	}
-
+	obj = i915_gem_object_create_fw(i915, fw->data, fw->size);
 	if (IS_ERR(obj)) {
 		err = PTR_ERR(obj);
 		goto fail;
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 2459213b6c87..2bd54a2032f5 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -1929,7 +1929,7 @@ static int perform_bb_shadow(struct parser_exec_state *s)
 	if (bb->ppgtt)
 		start_offset = gma & ~I915_GTT_PAGE_MASK;
 
-	bb->obj = i915_gem_object_create_shmem(s->engine->i915,
+	bb->obj = i915_gem_object_create_smem(s->engine->i915,
 					       round_up(bb_size + start_offset,
 							PAGE_SIZE));
 	if (IS_ERR(bb->obj)) {
@@ -3003,7 +3003,7 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
 	int ret = 0;
 	void *map;
 
-	obj = i915_gem_object_create_shmem(workload->engine->i915,
+	obj = i915_gem_object_create_smem(workload->engine->i915,
 					   roundup(ctx_size + CACHELINE_BYTES,
 						   PAGE_SIZE));
 	if (IS_ERR(obj))
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 0a9c3fcc09b1..32811383cedd 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -1570,7 +1570,7 @@ static int alloc_oa_buffer(struct i915_perf_stream *stream)
 	BUILD_BUG_ON_NOT_POWER_OF_2(OA_BUFFER_SIZE);
 	BUILD_BUG_ON(OA_BUFFER_SIZE < SZ_128K || OA_BUFFER_SIZE > SZ_16M);
 
-	bo = i915_gem_object_create_shmem(stream->perf->i915, OA_BUFFER_SIZE);
+	bo = i915_gem_object_create_smem(stream->perf->i915, OA_BUFFER_SIZE);
 	if (IS_ERR(bo)) {
 		drm_err(&i915->drm, "Failed to allocate OA buffer\n");
 		return PTR_ERR(bo);
@@ -1877,7 +1877,7 @@ alloc_oa_config_buffer(struct i915_perf_stream *stream,
 	config_length += 3; /* MI_BATCH_BUFFER_START */
 	config_length = ALIGN(sizeof(u32) * config_length, I915_GTT_PAGE_SIZE);
 
-	obj = i915_gem_object_create_shmem(stream->perf->i915, config_length);
+	obj = i915_gem_object_create_smem(stream->perf->i915, config_length);
 	if (IS_ERR(obj)) {
 		err = PTR_ERR(obj);
 		goto err_free;
diff --git a/drivers/gpu/drm/i915/intel_memory_region.c b/drivers/gpu/drm/i915/intel_memory_region.c
index 1c841f68169a..e593687b1487 100644
--- a/drivers/gpu/drm/i915/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/intel_memory_region.c
@@ -307,12 +307,8 @@ int intel_memory_regions_hw_probe(struct drm_i915_private *i915)
 		instance = intel_region_map[i].instance;
 		switch (type) {
 		case INTEL_MEMORY_SYSTEM:
-			if (IS_DGFX(i915))
-				mem = i915_gem_ttm_system_setup(i915, type,
+			mem = i915_gem_ttm_system_setup(i915, type,
 								instance);
-			else
-				mem = i915_gem_shmem_setup(i915, type,
-							   instance);
 			break;
 		case INTEL_MEMORY_STOLEN_LOCAL:
 			mem = i915_gem_stolen_lmem_setup(i915, type, instance);
diff --git a/drivers/gpu/drm/i915/intel_memory_region.h b/drivers/gpu/drm/i915/intel_memory_region.h
index 21dcbd620758..9b1b9523249f 100644
--- a/drivers/gpu/drm/i915/intel_memory_region.h
+++ b/drivers/gpu/drm/i915/intel_memory_region.h
@@ -126,8 +126,5 @@ void intel_memory_region_debug(struct intel_memory_region *mr,
 struct intel_memory_region *
 i915_gem_ttm_system_setup(struct drm_i915_private *i915,
 			  u16 type, u16 instance);
-struct intel_memory_region *
-i915_gem_shmem_setup(struct drm_i915_private *i915,
-		     u16 type, u16 instance);
 
 #endif
-- 
2.35.1


>From ad222bd9e9e324a1b45a87ccbc3d3e3f7c365ad9 Mon Sep 17 00:00:00 2001
From: Adrian Larumbe <adrian.larumbe at collabora.com>
Date: Fri, 11 Mar 2022 22:56:08 +0000
Subject: [PATCH 2/2] drm/i915: support mmap_offset and legacy mmap for TTM
 smem

---
 drivers/gpu/drm/i915/gem/i915_gem_mman.c      | 46 +++++++++++++++----
 .../gpu/drm/i915/gem/i915_gem_object_types.h  | 20 ++++----
 drivers/gpu/drm/i915/gem/i915_gem_ttm.c       | 26 ++++++++++-
 drivers/gpu/drm/i915/gem/i915_gem_ttm.h       |  3 ++
 4 files changed, 76 insertions(+), 19 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index c3ea243d414d..2cc965df1f9f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -64,7 +64,9 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
 	struct drm_i915_private *i915 = to_i915(dev);
 	struct drm_i915_gem_mmap *args = data;
 	struct drm_i915_gem_object *obj;
+	struct file *filp;
 	unsigned long addr;
+	int ret;
 
 	/*
 	 * mmap ioctl is disallowed for all discrete platforms,
@@ -83,10 +85,15 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
 	if (!obj)
 		return -ENOENT;
 
-	/* prime objects have no backing filp to GEM mmap
-	 * pages from.
-	 */
-	if (!obj->base.filp) {
+	ret = i915_gem_object_pin_pages(obj);
+	if (ret) {
+		addr = ret;
+		goto err;
+	}
+
+	filp = gem_ttm_get_filep(obj);
+
+	if (!filp) {
 		addr = -ENXIO;
 		goto err;
 	}
@@ -96,7 +103,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
 		goto err;
 	}
 
-	addr = vm_mmap(obj->base.filp, 0, args->size,
+	addr = vm_mmap(filp, 0, args->size,
 		       PROT_READ | PROT_WRITE, MAP_SHARED,
 		       args->offset);
 	if (IS_ERR_VALUE(addr))
@@ -111,7 +118,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
 			goto err;
 		}
 		vma = find_vma(mm, addr);
-		if (vma && __vma_matches(vma, obj->base.filp, addr, args->size))
+		if (vma && __vma_matches(vma, filp, addr, args->size))
 			vma->vm_page_prot =
 				pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
 		else
@@ -700,10 +707,11 @@ __assign_mmap_offset(struct drm_i915_gem_object *obj,
 		return -ENODEV;
 
 	if (obj->ops->mmap_offset)  {
-		if (mmap_type != I915_MMAP_TYPE_FIXED)
+		if (i915_gem_object_is_lmem(obj) &&
+		    mmap_type != I915_MMAP_TYPE_FIXED)
 			return -ENODEV;
 
-		*offset = obj->ops->mmap_offset(obj);
+		*offset = obj->ops->mmap_offset(obj, mmap_type);
 		return 0;
 	}
 
@@ -987,7 +995,27 @@ int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
 	fput(anon);
 
 	if (obj->ops->mmap_ops) {
-		vma->vm_page_prot = pgprot_decrypted(vm_get_page_prot(vma->vm_flags));
+		switch (obj->mm.mmap_type) {
+		case I915_MMAP_TYPE_WC:
+			vma->vm_page_prot =pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+			break;
+		case I915_MMAP_TYPE_FIXED:
+			GEM_WARN_ON(1);
+			fallthrough;
+		case I915_MMAP_TYPE_WB:
+			vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+			break;
+		case I915_MMAP_TYPE_UC:
+			vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
+		case I915_MMAP_TYPE_GTT:
+			vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+			break;
+		default:
+			vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+
+		}
+		vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+
 		vma->vm_ops = obj->ops->mmap_ops;
 		vma->vm_private_data = node->driver_private;
 		return 0;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index fd54eb8f4826..355086897134 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -21,6 +21,14 @@ struct drm_i915_gem_object;
 struct intel_fronbuffer;
 struct intel_memory_region;
 
+enum i915_mmap_type {
+	I915_MMAP_TYPE_GTT = 0,
+	I915_MMAP_TYPE_WC,
+	I915_MMAP_TYPE_WB,
+	I915_MMAP_TYPE_UC,
+	I915_MMAP_TYPE_FIXED,
+};
+
 /*
  * struct i915_lut_handle tracks the fast lookups from handle to vma used
  * for execbuf. Although we use a radixtree for that mapping, in order to
@@ -83,7 +91,7 @@ struct drm_i915_gem_object_ops {
 		     const struct drm_i915_gem_pread *arg);
 	int (*pwrite)(struct drm_i915_gem_object *obj,
 		      const struct drm_i915_gem_pwrite *arg);
-	u64 (*mmap_offset)(struct drm_i915_gem_object *obj);
+	u64 (*mmap_offset)(struct drm_i915_gem_object *obj, enum i915_mmap_type);
 	void (*unmap_virtual)(struct drm_i915_gem_object *obj);
 
 	int (*dmabuf_export)(struct drm_i915_gem_object *obj);
@@ -203,14 +211,6 @@ enum i915_map_type {
 	I915_MAP_FORCE_WC = I915_MAP_WC | I915_MAP_OVERRIDE,
 };
 
-enum i915_mmap_type {
-	I915_MMAP_TYPE_GTT = 0,
-	I915_MMAP_TYPE_WC,
-	I915_MMAP_TYPE_WB,
-	I915_MMAP_TYPE_UC,
-	I915_MMAP_TYPE_FIXED,
-};
-
 struct i915_mmap_offset {
 	struct drm_vma_offset_node vma_node;
 	struct drm_i915_gem_object *obj;
@@ -598,6 +598,8 @@ struct drm_i915_gem_object {
 		 * pages were last acquired.
 		 */
 		bool dirty:1;
+
+                enum i915_mmap_type mmap_type;
 	} mm;
 
 	struct {
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
index 980ad5825edd..b5483e02c15b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
@@ -20,6 +20,8 @@
 #include "gem/i915_gem_ttm.h"
 #include "gem/i915_gem_ttm_move.h"
 #include "gem/i915_gem_ttm_pm.h"
+#include "gem/i915_gem_object_types.h"
+
 
 #define I915_TTM_PRIO_PURGE     0
 #define I915_TTM_PRIO_NO_PAGES  1
@@ -1068,11 +1070,13 @@ static const struct vm_operations_struct vm_ops_ttm = {
 	.close = ttm_vm_close,
 };
 
-static u64 i915_ttm_mmap_offset(struct drm_i915_gem_object *obj)
+static u64 i915_ttm_mmap_offset(struct drm_i915_gem_object *obj, enum i915_mmap_type mmap_type)
 {
 	/* The ttm_bo must be allocated with I915_BO_ALLOC_USER */
 	GEM_BUG_ON(!drm_mm_node_allocated(&obj->base.vma_node.vm_node));
 
+	obj->mm.mmap_type = mmap_type;
+
 	return drm_vma_node_offset_addr(&obj->base.vma_node);
 }
 
@@ -1234,6 +1238,26 @@ static const struct intel_memory_region_ops ttm_system_region_ops = {
 	.release = intel_region_ttm_fini,
 };
 
+/**
+ * Return: filp.
+ */
+struct file *
+gem_ttm_get_filep(struct drm_i915_gem_object *obj)
+{
+	struct ttm_buffer_object *bo;
+	struct i915_ttm_tt *i915_tt;
+
+	bo = i915_gem_to_ttm(obj);
+	if (!bo->ttm) {
+		pr_err("ttm has not been allocated for bo\n");
+		return NULL;
+	}
+
+	i915_tt = container_of(bo->ttm, typeof(*i915_tt), ttm);
+
+	return i915_tt->filp;
+}
+
 struct intel_memory_region *
 i915_gem_ttm_system_setup(struct drm_i915_private *i915,
 			  u16 type, u16 instance)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.h b/drivers/gpu/drm/i915/gem/i915_gem_ttm.h
index 9d698ad00853..76b90895e529 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.h
@@ -91,4 +91,7 @@ static inline bool i915_ttm_cpu_maps_iomem(struct ttm_resource *mem)
 	/* Once / if we support GGTT, this is also false for cached ttm_tts */
 	return mem->mem_type != I915_PL_SYSTEM;
 }
+
+struct file * gem_ttm_get_filep(struct drm_i915_gem_object *obj);
+
 #endif
-- 
2.35.1



More information about the Intel-gfx-trybot mailing list