[Intel-gfx] [CI 28/47] drm/i915: Exercise filling the top/bottom portions of the ppgtt

Chris Wilson chris at chris-wilson.co.uk
Mon Feb 13 17:15:39 UTC 2017


Allocate objects with varying number of pages (which should hopefully
consist of a mixture of contiguous page chunks and so coalesced sg
lists) and check that the sg walkers in insert_pages cope.

v2: Check both small <-> large

Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen at linux.intel.com>
---
 drivers/gpu/drm/i915/i915_gem_object.h        |   3 +
 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c | 362 ++++++++++++++++++++++++++
 2 files changed, 365 insertions(+)

diff --git a/drivers/gpu/drm/i915/i915_gem_object.h b/drivers/gpu/drm/i915/i915_gem_object.h
index 4114cc8a0b9b..02365a5f7c80 100644
--- a/drivers/gpu/drm/i915/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/i915_gem_object.h
@@ -33,6 +33,8 @@
 
 #include <drm/i915_drm.h>
 
+#include "i915_selftest.h"
+
 struct drm_i915_gem_object_ops {
 	unsigned int flags;
 #define I915_GEM_OBJECT_HAS_STRUCT_PAGE 0x1
@@ -84,6 +86,7 @@ struct drm_i915_gem_object {
 	struct list_head obj_exec_link;
 
 	struct list_head batch_pool_link;
+	I915_SELFTEST_DECLARE(struct list_head st_link);
 
 	unsigned long flags;
 
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index f3359be62515..f97580cbd7f7 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -22,8 +22,98 @@
  *
  */
 
+#include <linux/prime_numbers.h>
+
 #include "../i915_selftest.h"
 
+#include "mock_drm.h"
+
+static void fake_free_pages(struct drm_i915_gem_object *obj,
+			    struct sg_table *pages)
+{
+	sg_free_table(pages);
+	kfree(pages);
+}
+
+static struct sg_table *
+fake_get_pages(struct drm_i915_gem_object *obj)
+{
+#define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
+#define PFN_BIAS 0x1000
+	struct sg_table *pages;
+	struct scatterlist *sg;
+	typeof(obj->base.size) rem;
+
+	pages = kmalloc(sizeof(*pages), GFP);
+	if (!pages)
+		return ERR_PTR(-ENOMEM);
+
+	rem = round_up(obj->base.size, BIT(31)) >> 31;
+	if (sg_alloc_table(pages, rem, GFP)) {
+		kfree(pages);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	rem = obj->base.size;
+	for (sg = pages->sgl; sg; sg = sg_next(sg)) {
+		unsigned long len = min_t(typeof(rem), rem, BIT(31));
+
+		sg_set_page(sg, pfn_to_page(PFN_BIAS), len, 0);
+		sg_dma_address(sg) = page_to_phys(sg_page(sg));
+		sg_dma_len(sg) = len;
+
+		rem -= len;
+	}
+
+	obj->mm.madv = I915_MADV_DONTNEED;
+	return pages;
+#undef GFP
+}
+
+static void fake_put_pages(struct drm_i915_gem_object *obj,
+			   struct sg_table *pages)
+{
+	fake_free_pages(obj, pages);
+	obj->mm.dirty = false;
+	obj->mm.madv = I915_MADV_WILLNEED;
+}
+
+static const struct drm_i915_gem_object_ops fake_ops = {
+	.flags = I915_GEM_OBJECT_IS_SHRINKABLE,
+	.get_pages = fake_get_pages,
+	.put_pages = fake_put_pages,
+};
+
+static struct drm_i915_gem_object *
+fake_dma_object(struct drm_i915_private *i915, u64 size)
+{
+	struct drm_i915_gem_object *obj;
+
+	GEM_BUG_ON(!size);
+	GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
+
+	if (overflows_type(size, obj->base.size))
+		return ERR_PTR(-E2BIG);
+
+	obj = i915_gem_object_alloc(i915);
+	if (!obj)
+		return ERR_PTR(-ENOMEM);
+
+	drm_gem_private_object_init(&i915->drm, &obj->base, size);
+	i915_gem_object_init(obj, &fake_ops);
+
+	obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+	obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+	obj->cache_level = I915_CACHE_NONE;
+
+	/* Preallocate the "backing storage" */
+	if (i915_gem_object_pin_pages(obj))
+		return ERR_PTR(-ENOMEM);
+
+	i915_gem_object_unpin_pages(obj);
+	return obj;
+}
+
 static int igt_ppgtt_alloc(void *arg)
 {
 	struct drm_i915_private *dev_priv = arg;
@@ -89,10 +179,282 @@ static int igt_ppgtt_alloc(void *arg)
 	return err;
 }
 
+static void close_object_list(struct list_head *objects,
+			      struct i915_address_space *vm)
+{
+	struct drm_i915_gem_object *obj, *on;
+
+	list_for_each_entry_safe(obj, on, objects, st_link) {
+		struct i915_vma *vma;
+
+		vma = i915_vma_instance(obj, vm, NULL);
+		if (!IS_ERR(vma))
+			i915_vma_close(vma);
+
+		list_del(&obj->st_link);
+		i915_gem_object_put(obj);
+	}
+}
+
+static int fill_hole(struct drm_i915_private *i915,
+		     struct i915_address_space *vm,
+		     u64 hole_start, u64 hole_end,
+		     unsigned long end_time)
+{
+	const u64 hole_size = hole_end - hole_start;
+	struct drm_i915_gem_object *obj;
+	const unsigned long max_pages =
+		min_t(u64, ULONG_MAX - 1, hole_size/2 >> PAGE_SHIFT);
+	const unsigned long max_step = max(int_sqrt(max_pages), 2UL);
+	unsigned long npages, prime, flags;
+	struct i915_vma *vma;
+	LIST_HEAD(objects);
+	int err;
+
+	/* Try binding many VMA working inwards from either edge */
+
+	flags = PIN_OFFSET_FIXED | PIN_USER;
+	if (i915_is_ggtt(vm))
+		flags |= PIN_GLOBAL;
+
+	for_each_prime_number_from(prime, 2, max_step) {
+		for (npages = 1; npages <= max_pages; npages *= prime) {
+			const u64 full_size = npages << PAGE_SHIFT;
+			const struct {
+				const char *name;
+				u64 offset;
+				int step;
+			} phases[] = {
+				{ "top-down", hole_end, -1, },
+				{ "bottom-up", hole_start, 1, },
+				{ }
+			}, *p;
+
+			obj = fake_dma_object(i915, full_size);
+			if (IS_ERR(obj))
+				break;
+
+			list_add(&obj->st_link, &objects);
+
+			/* Align differing sized objects against the edges, and
+			 * check we don't walk off into the void when binding
+			 * them into the GTT.
+			 */
+			for (p = phases; p->name; p++) {
+				u64 offset;
+
+				offset = p->offset;
+				list_for_each_entry(obj, &objects, st_link) {
+					vma = i915_vma_instance(obj, vm, NULL);
+					if (IS_ERR(vma))
+						continue;
+
+					if (p->step < 0) {
+						if (offset < hole_start + obj->base.size)
+							break;
+						offset -= obj->base.size;
+					}
+
+					err = i915_vma_pin(vma, 0, 0, offset | flags);
+					if (err) {
+						pr_err("%s(%s) pin (forward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
+						       __func__, p->name, err, npages, prime, offset);
+						goto err;
+					}
+
+					if (!drm_mm_node_allocated(&vma->node) ||
+					    i915_vma_misplaced(vma, 0, 0, offset | flags)) {
+						pr_err("%s(%s) (forward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
+						       __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
+						       offset);
+						err = -EINVAL;
+						goto err;
+					}
+
+					i915_vma_unpin(vma);
+
+					if (p->step > 0) {
+						if (offset + obj->base.size > hole_end)
+							break;
+						offset += obj->base.size;
+					}
+				}
+
+				offset = p->offset;
+				list_for_each_entry(obj, &objects, st_link) {
+					vma = i915_vma_instance(obj, vm, NULL);
+					if (IS_ERR(vma))
+						continue;
+
+					if (p->step < 0) {
+						if (offset < hole_start + obj->base.size)
+							break;
+						offset -= obj->base.size;
+					}
+
+					if (!drm_mm_node_allocated(&vma->node) ||
+					    i915_vma_misplaced(vma, 0, 0, offset | flags)) {
+						pr_err("%s(%s) (forward) moved vma.node=%llx + %llx, expected offset %llx\n",
+						       __func__, p->name, vma->node.start, vma->node.size,
+						       offset);
+						err = -EINVAL;
+						goto err;
+					}
+
+					err = i915_vma_unbind(vma);
+					if (err) {
+						pr_err("%s(%s) (forward) unbind of vma.node=%llx + %llx failed with err=%d\n",
+						       __func__, p->name, vma->node.start, vma->node.size,
+						       err);
+						goto err;
+					}
+
+					if (p->step > 0) {
+						if (offset + obj->base.size > hole_end)
+							break;
+						offset += obj->base.size;
+					}
+				}
+
+				offset = p->offset;
+				list_for_each_entry_reverse(obj, &objects, st_link) {
+					vma = i915_vma_instance(obj, vm, NULL);
+					if (IS_ERR(vma))
+						continue;
+
+					if (p->step < 0) {
+						if (offset < hole_start + obj->base.size)
+							break;
+						offset -= obj->base.size;
+					}
+
+					err = i915_vma_pin(vma, 0, 0, offset | flags);
+					if (err) {
+						pr_err("%s(%s) pin (backward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
+						       __func__, p->name, err, npages, prime, offset);
+						goto err;
+					}
+
+					if (!drm_mm_node_allocated(&vma->node) ||
+					    i915_vma_misplaced(vma, 0, 0, offset | flags)) {
+						pr_err("%s(%s) (backward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
+						       __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
+						       offset);
+						err = -EINVAL;
+						goto err;
+					}
+
+					i915_vma_unpin(vma);
+
+					if (p->step > 0) {
+						if (offset + obj->base.size > hole_end)
+							break;
+						offset += obj->base.size;
+					}
+				}
+
+				offset = p->offset;
+				list_for_each_entry_reverse(obj, &objects, st_link) {
+					vma = i915_vma_instance(obj, vm, NULL);
+					if (IS_ERR(vma))
+						continue;
+
+					if (p->step < 0) {
+						if (offset < hole_start + obj->base.size)
+							break;
+						offset -= obj->base.size;
+					}
+
+					if (!drm_mm_node_allocated(&vma->node) ||
+					    i915_vma_misplaced(vma, 0, 0, offset | flags)) {
+						pr_err("%s(%s) (backward) moved vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
+						       __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
+						       offset);
+						err = -EINVAL;
+						goto err;
+					}
+
+					err = i915_vma_unbind(vma);
+					if (err) {
+						pr_err("%s(%s) (backward) unbind of vma.node=%llx + %llx failed with err=%d\n",
+						       __func__, p->name, vma->node.start, vma->node.size,
+						       err);
+						goto err;
+					}
+
+					if (p->step > 0) {
+						if (offset + obj->base.size > hole_end)
+							break;
+						offset += obj->base.size;
+					}
+				}
+			}
+
+			if (igt_timeout(end_time, "%s timed out (npages=%lu, prime=%lu)\n",
+					__func__, npages, prime)) {
+				err = -EINTR;
+				goto err;
+			}
+		}
+
+		close_object_list(&objects, vm);
+	}
+
+	return 0;
+
+err:
+	close_object_list(&objects, vm);
+	return err;
+}
+
+static int exercise_ppgtt(struct drm_i915_private *dev_priv,
+			  int (*func)(struct drm_i915_private *i915,
+				      struct i915_address_space *vm,
+				      u64 hole_start, u64 hole_end,
+				      unsigned long end_time))
+{
+	struct drm_file *file;
+	struct i915_hw_ppgtt *ppgtt;
+	IGT_TIMEOUT(end_time);
+	int err;
+
+	if (!USES_FULL_PPGTT(dev_priv))
+		return 0;
+
+	file = mock_file(dev_priv);
+	if (IS_ERR(file))
+		return PTR_ERR(file);
+
+	mutex_lock(&dev_priv->drm.struct_mutex);
+	ppgtt = i915_ppgtt_create(dev_priv, file->driver_priv, "mock");
+	if (IS_ERR(ppgtt)) {
+		err = PTR_ERR(ppgtt);
+		goto out_unlock;
+	}
+	GEM_BUG_ON(offset_in_page(ppgtt->base.total));
+	GEM_BUG_ON(ppgtt->base.closed);
+
+	err = func(dev_priv, &ppgtt->base, 0, ppgtt->base.total, end_time);
+
+	i915_ppgtt_close(&ppgtt->base);
+	i915_ppgtt_put(ppgtt);
+out_unlock:
+	mutex_unlock(&dev_priv->drm.struct_mutex);
+
+	mock_file_free(dev_priv, file);
+	return err;
+}
+
+static int igt_ppgtt_fill(void *arg)
+{
+	return exercise_ppgtt(arg, fill_hole);
+}
+
 int i915_gem_gtt_live_selftests(struct drm_i915_private *i915)
 {
 	static const struct i915_subtest tests[] = {
 		SUBTEST(igt_ppgtt_alloc),
+		SUBTEST(igt_ppgtt_fill),
 	};
 
 	return i915_subtests(tests, i915);
-- 
2.11.0



More information about the Intel-gfx mailing list