[PATCH 45/65] drm/i915: Add mock exercise for i915_gem_gtt_insert

Chris Wilson chris at chris-wilson.co.uk
Wed Jan 18 21:04:00 UTC 2017


i915_gem_gtt_insert should allocate from the available free space in the
GTT, evicting as necessary to create space.

Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c | 208 ++++++++++++++++++++++++++
 1 file changed, 208 insertions(+)

diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index 6f87a0400ad8..b6d07d351f62 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -736,10 +736,218 @@ static int igt_gtt_reserve(void *arg)
 	return err;
 }
 
+static int igt_gtt_insert(void *arg)
+{
+	struct drm_i915_private *i915 = arg;
+	struct drm_i915_gem_object *obj, *on;
+	struct drm_mm_node tmp = {};
+	const struct invalid_insert {
+		u64 size;
+		u64 alignment;
+		u64 start, end;
+	} invalid_insert[] = {
+		{
+			i915->ggtt.base.total + I915_GTT_PAGE_SIZE, 0,
+			0, i915->ggtt.base.total,
+		},
+		{
+			2*I915_GTT_PAGE_SIZE, 0,
+			0, I915_GTT_PAGE_SIZE,
+		},
+		{
+			-(u64)I915_GTT_PAGE_SIZE, 0,
+			0, 4*I915_GTT_PAGE_SIZE,
+		},
+		{
+			-(u64)2*I915_GTT_PAGE_SIZE, 2*I915_GTT_PAGE_SIZE,
+			0, 4*I915_GTT_PAGE_SIZE,
+		},
+		{
+			I915_GTT_PAGE_SIZE, I915_GTT_MIN_ALIGNMENT << 1,
+			I915_GTT_MIN_ALIGNMENT, I915_GTT_MIN_ALIGNMENT << 1,
+		},
+		{}
+	}, *ii;
+	LIST_HEAD(objects);
+	u64 total;
+	int err;
+
+	/* i915_gem_gtt_insert() tries to allocate some free space in the GTT
+	 * to the node, evicting if required.
+	 */
+
+	/* Check a couple of obviously invalid requests */
+	for (ii = invalid_insert; ii->size; ii++) {
+		err = i915_gem_gtt_insert(&i915->ggtt.base, &tmp,
+					  ii->size, ii->alignment,
+					  I915_COLOR_UNEVICTABLE,
+					  ii->start, ii->end,
+					  0);
+		if (err != -ENOSPC) {
+			pr_err("Invalid i915_gem_gtt_insert(.size=%llx, .alignment=%llx, .start=%llx, .end=%llx) succeeded (err=%d)\n",
+			       ii->size, ii->alignment, ii->start, ii->end,
+			       err);
+			return -EINVAL;
+		}
+	}
+
+	/* Start by filling the GGTT */
+	for (total = 0;
+	     total + I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
+	     total += I915_GTT_PAGE_SIZE) {
+		struct i915_vma *vma;
+
+		obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE);
+		if (IS_ERR(obj)) {
+			err = PTR_ERR(obj);
+			goto err;
+		}
+
+		err = i915_gem_object_pin_pages(obj);
+		if (err) {
+			i915_gem_object_put(obj);
+			goto err;
+		}
+
+		list_add(&obj->batch_pool_link, &objects);
+
+		vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+		if (IS_ERR(vma)) {
+			err = PTR_ERR(vma);
+			goto err;
+		}
+
+		err = i915_gem_gtt_insert(&i915->ggtt.base, &vma->node,
+					   obj->base.size, 0, obj->cache_level,
+					   0, i915->ggtt.base.total,
+					   0);
+		if (err == -ENOSPC) {
+			/* maxed out the GGTT space */
+			i915_gem_object_put(obj);
+			break;
+		}
+		if (err) {
+			pr_err("i915_gem_gtt_insert (pass 1) failed at %llu/%llu with err=%d\n",
+			       total, i915->ggtt.base.total, err);
+			goto err;
+		}
+		track_vma_bind(vma);
+		__i915_vma_pin(vma);
+
+		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
+	}
+
+	list_for_each_entry(obj, &objects, batch_pool_link) {
+		struct i915_vma *vma;
+
+		vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+		if (IS_ERR(vma)) {
+			err = PTR_ERR(vma);
+			goto err;
+		}
+
+		if (!drm_mm_node_allocated(&vma->node)) {
+			pr_err("VMA was unexpectedly evicted!\n");
+			err = -EINVAL;
+			goto err;
+		}
+
+		__i915_vma_unpin(vma);
+	}
+
+	/* If we then reinsert, we should find the same hole */
+	list_for_each_entry_safe(obj, on, &objects, batch_pool_link) {
+		struct i915_vma *vma;
+		u64 offset;
+
+		vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+		if (IS_ERR(vma)) {
+			err = PTR_ERR(vma);
+			goto err;
+		}
+
+		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
+		offset = vma->node.start;
+
+		err = i915_vma_unbind(vma);
+		if (err) {
+			pr_err("i915_vma_unbind failed with err=%d!\n", err);
+			goto err;
+		}
+
+		err = i915_gem_gtt_insert(&i915->ggtt.base, &vma->node,
+					   obj->base.size, 0, obj->cache_level,
+					   0, i915->ggtt.base.total,
+					   0);
+		if (err) {
+			pr_err("i915_gem_gtt_insert (pass 2) failed at %llu/%llu with err=%d\n",
+			       total, i915->ggtt.base.total, err);
+			goto err;
+		}
+		track_vma_bind(vma);
+
+		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
+		if (vma->node.start != offset) {
+			pr_err("i915_gem_gtt_insert did not return node to its previous location (the only hole), expected address %llx, found %llx\n",
+			       offset, vma->node.start);
+			err = -EINVAL;
+			goto err;
+		}
+	}
+
+	/* And then force evictions */
+	for (total = 0;
+	     total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
+	     total += 2*I915_GTT_PAGE_SIZE) {
+		struct i915_vma *vma;
+
+		obj = i915_gem_object_create_internal(i915, 2*I915_GTT_PAGE_SIZE);
+		if (IS_ERR(obj)) {
+			err = PTR_ERR(obj);
+			goto err;
+		}
+
+		err = i915_gem_object_pin_pages(obj);
+		if (err) {
+			i915_gem_object_put(obj);
+			goto err;
+		}
+
+		list_add(&obj->batch_pool_link, &objects);
+
+		vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+		if (IS_ERR(vma)) {
+			err = PTR_ERR(vma);
+			goto err;
+		}
+
+		err = i915_gem_gtt_insert(&i915->ggtt.base, &vma->node,
+					   obj->base.size, 0, obj->cache_level,
+					   0, i915->ggtt.base.total,
+					   0);
+		if (err) {
+			pr_err("i915_gem_gtt_insert (pass 1) failed at %llu/%llu with err=%d\n",
+			       total, i915->ggtt.base.total, err);
+			goto err;
+		}
+		track_vma_bind(vma);
+
+		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
+	}
+
+err:
+	list_for_each_entry_safe(obj, on, &objects, batch_pool_link) {
+		i915_gem_object_unpin_pages(obj);
+		i915_gem_object_put(obj);
+	}
+	return err;
+}
+
 int i915_gem_gtt_mock_selftests(void)
 {
 	static const struct i915_subtest tests[] = {
 		SUBTEST(igt_gtt_reserve),
+		SUBTEST(igt_gtt_insert),
 	};
 	struct drm_i915_private *i915;
 	int err;
-- 
2.11.0



More information about the Intel-gfx-trybot mailing list