[Intel-gfx] [PATCH v2 33/37] drm/i915: support basic object migration

Matthew Auld matthew.auld at intel.com
Thu Jun 27 20:56:29 UTC 2019


We are going want to able to move objects between different regions
like system memory and local memory. In the future everything should
be just another region.

Signed-off-by: Matthew Auld <matthew.auld at intel.com>
Signed-off-by: Abdiel Janulgue <abdiel.janulgue at linux.intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen at linux.intel.com>
Cc: Abdiel Janulgue <abdiel.janulgue at linux.intel.com>
---
 drivers/gpu/drm/i915/gem/i915_gem_object.c    | 129 ++++++++++++++++++
 drivers/gpu/drm/i915/gem/i915_gem_object.h    |   8 ++
 drivers/gpu/drm/i915/gem/i915_gem_pages.c     |   2 +-
 .../drm/i915/selftests/intel_memory_region.c  | 129 ++++++++++++++++++
 4 files changed, 267 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index 343162bc8181..691af388e4e7 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -28,6 +28,7 @@
 #include "i915_gem_clflush.h"
 #include "i915_gem_context.h"
 #include "i915_gem_object.h"
+#include "i915_gem_object_blt.h"
 #include "i915_globals.h"
 
 static struct i915_global_object {
@@ -171,6 +172,134 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
 	}
 }
 
+int i915_gem_object_prepare_move(struct drm_i915_gem_object *obj)
+{
+	int err;
+
+	lockdep_assert_held(&obj->base.dev->struct_mutex);
+
+	if (obj->mm.madv != I915_MADV_WILLNEED)
+		return -EINVAL;
+
+	if (i915_gem_object_needs_bit17_swizzle(obj))
+		return -EINVAL;
+
+	if (atomic_read(&obj->mm.pages_pin_count) >
+	    atomic_read(&obj->bind_count))
+		return -EBUSY;
+
+	if (obj->pin_global)
+		return -EBUSY;
+
+	i915_gem_object_release_mmap(obj);
+
+	GEM_BUG_ON(obj->mm.mapping);
+	GEM_BUG_ON(obj->base.filp && mapping_mapped(obj->base.filp->f_mapping));
+
+	err = i915_gem_object_wait(obj,
+				   I915_WAIT_INTERRUPTIBLE |
+				   I915_WAIT_LOCKED |
+				   I915_WAIT_ALL,
+				   MAX_SCHEDULE_TIMEOUT);
+	if (err)
+		return err;
+
+	return i915_gem_object_unbind(obj);
+}
+
+int i915_gem_object_migrate(struct drm_i915_gem_object *obj,
+			    struct intel_context *ce,
+			    enum intel_region_id id)
+{
+	struct drm_i915_private *i915 = to_i915(obj->base.dev);
+	struct drm_i915_gem_object *donor;
+	struct intel_memory_region *mem;
+	int err = 0;
+
+	lockdep_assert_held(&i915->drm.struct_mutex);
+
+	GEM_BUG_ON(id >= INTEL_MEMORY_UKNOWN);
+	GEM_BUG_ON(obj->memory_region->id == id);
+	GEM_BUG_ON(obj->mm.madv != I915_MADV_WILLNEED);
+
+	mem = i915->regions[id];
+
+	donor = i915_gem_object_create_region(mem, obj->base.size, 0);
+	if (IS_ERR(donor))
+		return PTR_ERR(donor);
+
+	/* Copy backing-pages if we have to */
+	if (i915_gem_object_has_pages(obj)) {
+		struct sg_table *pages;
+
+		err = i915_gem_object_pin_pages(obj);
+		if (err)
+			goto err_put_donor;
+
+		err = i915_gem_object_copy_blt(obj, donor, ce);
+		if (err)
+			goto err_put_donor;
+
+		i915_gem_object_lock(donor);
+		err = i915_gem_object_set_to_cpu_domain(donor, false);
+		i915_gem_object_unlock(donor);
+		if (err)
+			goto err_put_donor;
+
+		i915_retire_requests(i915);
+
+		i915_gem_object_unbind(donor);
+		err = i915_gem_object_unbind(obj);
+		if (err)
+			goto err_put_donor;
+
+		mutex_lock(&obj->mm.lock);
+
+		pages = fetch_and_zero(&obj->mm.pages);
+		obj->ops->put_pages(obj, pages);
+
+		memcpy(&obj->mm.page_sizes, &donor->mm.page_sizes,
+		       sizeof(struct i915_page_sizes));
+		obj->mm.pages = __i915_gem_object_unset_pages(donor);
+
+		obj->mm.get_page.sg_pos = obj->mm.pages->sgl;
+		obj->mm.get_page.sg_idx = 0;
+		__i915_gem_object_reset_page_iter(obj);
+
+		mutex_unlock(&obj->mm.lock);
+	}
+
+	if (obj->ops->release)
+		obj->ops->release(obj);
+
+	/* We need still need a little special casing for shmem */
+	if (obj->base.filp)
+		fput(fetch_and_zero(&obj->base.filp));
+	else
+		obj->base.filp = fetch_and_zero(&donor->base.filp);
+
+	obj->base.size = donor->base.size;
+	obj->memory_region = mem;
+	obj->flags = donor->flags;
+	obj->ops = donor->ops;
+
+	list_replace_init(&donor->blocks, &obj->blocks);
+
+	mutex_lock(&mem->obj_lock);
+	list_add(&obj->region_link, &mem->objects);
+	mutex_unlock(&mem->obj_lock);
+
+	GEM_BUG_ON(i915_gem_object_has_pages(donor));
+	GEM_BUG_ON(i915_gem_object_has_pinned_pages(donor));
+
+err_put_donor:
+	i915_gem_object_put(donor);
+	if (i915_gem_object_has_pinned_pages(obj))
+		i915_gem_object_unpin_pages(obj);
+
+	return err;
+}
+
 static void __i915_gem_free_objects(struct drm_i915_private *i915,
 				    struct llist_node *freed)
 {
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index a7bfe79015ee..11afb4dea215 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -40,8 +40,16 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align);
 void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file);
 void i915_gem_free_object(struct drm_gem_object *obj);
 
+enum intel_region_id;
+int i915_gem_object_prepare_move(struct drm_i915_gem_object *obj);
+int i915_gem_object_migrate(struct drm_i915_gem_object *obj,
+			    struct intel_context *ce,
+			    enum intel_region_id id);
+
 void i915_gem_flush_free_objects(struct drm_i915_private *i915);
 
+void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj);
+
 struct sg_table *
 __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj);
 void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index 15eaaedffc46..c1bc047d5fc4 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -139,7 +139,7 @@ void i915_gem_object_writeback(struct drm_i915_gem_object *obj)
 		obj->ops->writeback(obj);
 }
 
-static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
+void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
 {
 	struct radix_tree_iter iter;
 	void __rcu **slot;
diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
index 23c466a1b800..ccfdc4cbd174 100644
--- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
@@ -491,6 +491,59 @@ static int igt_lmem_create(void *arg)
 	return err;
 }
 
+static int igt_smem_create_migrate(void *arg)
+{
+	struct drm_i915_private *i915 = arg;
+	struct intel_context *ce = i915->engine[BCS0]->kernel_context;
+	struct drm_i915_gem_object *obj;
+	int err;
+
+	/* Switch object backing-store on create */
+	obj = i915_gem_object_create_lmem(i915, PAGE_SIZE, 0);
+	if (IS_ERR(obj))
+		return PTR_ERR(obj);
+
+	err = i915_gem_object_migrate(obj, ce, INTEL_MEMORY_SMEM);
+	if (err)
+		goto out_put;
+
+	err = i915_gem_object_pin_pages(obj);
+	if (err)
+		goto out_put;
+
+	i915_gem_object_unpin_pages(obj);
+out_put:
+	i915_gem_object_put(obj);
+
+	return err;
+}
+
+static int igt_lmem_create_migrate(void *arg)
+{
+	struct drm_i915_private *i915 = arg;
+	struct intel_context *ce = i915->engine[BCS0]->kernel_context;
+	struct drm_i915_gem_object *obj;
+	int err;
+
+	/* Switch object backing-store on create */
+	obj = i915_gem_object_create_shmem(i915, PAGE_SIZE);
+	if (IS_ERR(obj))
+		return PTR_ERR(obj);
+
+	err = i915_gem_object_migrate(obj, ce, INTEL_MEMORY_LMEM);
+	if (err)
+		goto out_put;
+
+	err = i915_gem_object_pin_pages(obj);
+	if (err)
+		goto out_put;
+
+	i915_gem_object_unpin_pages(obj);
+out_put:
+	i915_gem_object_put(obj);
+
+	return err;
+}
 static int igt_lmem_write_gpu(void *arg)
 {
 	struct drm_i915_private *i915 = arg;
@@ -601,6 +654,79 @@ static int igt_lmem_write_cpu(void *arg)
 	return err;
 }
 
+static int igt_lmem_pages_migrate(void *arg)
+{
+	struct drm_i915_private *i915 = arg;
+	struct intel_context *ce = i915->engine[BCS0]->kernel_context;
+	struct drm_i915_gem_object *obj;
+	IGT_TIMEOUT(end_time);
+	I915_RND_STATE(prng);
+	u32 sz;
+	int err;
+
+	sz = round_up(prandom_u32_state(&prng) % SZ_32M, PAGE_SIZE);
+
+	obj = i915_gem_object_create_lmem(i915, sz, 0);
+	if (IS_ERR(obj))
+		return PTR_ERR(obj);
+
+	err = i915_gem_object_fill_blt(obj, ce, 0);
+	if (err)
+		goto out_put;
+
+	do {
+		err = i915_gem_object_prepare_move(obj);
+		if (err)
+			goto out_put;
+
+		if (i915_gem_object_is_lmem(obj)) {
+			err = i915_gem_object_migrate(obj, ce, INTEL_MEMORY_SMEM);
+			if (err)
+				goto out_put;
+
+			if (i915_gem_object_is_lmem(obj)) {
+				pr_err("object still backed by lmem\n");
+				err = -EINVAL;
+			}
+
+			if (!list_empty(&obj->blocks)) {
+				pr_err("object leaking memory region\n");
+				err = -EINVAL;
+			}
+
+			if (!i915_gem_object_has_struct_page(obj)) {
+				pr_err("object not backed by struct page\n");
+				err = -EINVAL;
+			}
+
+		} else {
+			err = i915_gem_object_migrate(obj, ce, INTEL_MEMORY_LMEM);
+			if (err)
+				goto out_put;
+
+			if (i915_gem_object_has_struct_page(obj)) {
+				pr_err("object still backed by struct page\n");
+				err = -EINVAL;
+			}
+
+			if (!i915_gem_object_is_lmem(obj)) {
+				pr_err("object not backed by lmem\n");
+				err = -EINVAL;
+			}
+		}
+
+		if (!err)
+			err = i915_gem_object_fill_blt(obj, ce, 0xdeadbeaf);
+		if (err)
+			break;
+	} while (!__igt_timeout(end_time, NULL));
+
+out_put:
+	i915_gem_object_put(obj);
+
+	return err;
+}
+
 int intel_memory_region_mock_selftests(void)
 {
 	static const struct i915_subtest tests[] = {
@@ -644,6 +770,9 @@ int intel_memory_region_live_selftests(struct drm_i915_private *i915)
 		SUBTEST(igt_lmem_create),
 		SUBTEST(igt_lmem_write_cpu),
 		SUBTEST(igt_lmem_write_gpu),
+		SUBTEST(igt_smem_create_migrate),
+		SUBTEST(igt_lmem_create_migrate),
+		SUBTEST(igt_lmem_pages_migrate),
 	};
 	int err;
 
-- 
2.20.1



More information about the Intel-gfx mailing list