[Intel-gfx] [CI 45/47] drm/i915: Exercise manipulate of single pages in the GGTT

Chris Wilson chris at chris-wilson.co.uk
Mon Feb 13 17:15:56 UTC 2017


Move a single page of an object around within the GGTT and check
coherency of writes and reads.

Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
Reviewed-by: Matthew Auld <matthew.auld at intel.com>
---
 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c | 91 +++++++++++++++++++++++++++
 1 file changed, 91 insertions(+)

diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index 5caa5153a394..36e5cb582af4 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -903,6 +903,96 @@ static int igt_ggtt_lowlevel(void *arg)
 	return exercise_ggtt(arg, lowlevel_hole);
 }
 
+static int igt_ggtt_page(void *arg)
+{
+	const unsigned int count = PAGE_SIZE/sizeof(u32);
+	I915_RND_STATE(prng);
+	struct drm_i915_private *i915 = arg;
+	struct i915_ggtt *ggtt = &i915->ggtt;
+	struct drm_i915_gem_object *obj;
+	struct drm_mm_node tmp;
+	unsigned int *order, n;
+	int err;
+
+	mutex_lock(&i915->drm.struct_mutex);
+
+	obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+	if (IS_ERR(obj)) {
+		err = PTR_ERR(obj);
+		goto out_unlock;
+	}
+
+	err = i915_gem_object_pin_pages(obj);
+	if (err)
+		goto out_free;
+
+	memset(&tmp, 0, sizeof(tmp));
+	err = drm_mm_insert_node_in_range(&ggtt->base.mm, &tmp,
+					  1024 * PAGE_SIZE, 0,
+					  I915_COLOR_UNEVICTABLE,
+					  0, ggtt->mappable_end,
+					  DRM_MM_INSERT_LOW);
+	if (err)
+		goto out_unpin;
+
+	order = i915_random_order(count, &prng);
+	if (!order) {
+		err = -ENOMEM;
+		goto out_remove;
+	}
+
+	for (n = 0; n < count; n++) {
+		u64 offset = tmp.start + order[n] * PAGE_SIZE;
+		u32 __iomem *vaddr;
+
+		ggtt->base.insert_page(&ggtt->base,
+				       i915_gem_object_get_dma_address(obj, 0),
+				       offset, I915_CACHE_NONE, 0);
+
+		vaddr = io_mapping_map_atomic_wc(&ggtt->mappable, offset);
+		iowrite32(n, vaddr + n);
+		io_mapping_unmap_atomic(vaddr);
+
+		wmb();
+		ggtt->base.clear_range(&ggtt->base, offset, PAGE_SIZE);
+	}
+
+	i915_random_reorder(order, count, &prng);
+	for (n = 0; n < count; n++) {
+		u64 offset = tmp.start + order[n] * PAGE_SIZE;
+		u32 __iomem *vaddr;
+		u32 val;
+
+		ggtt->base.insert_page(&ggtt->base,
+				       i915_gem_object_get_dma_address(obj, 0),
+				       offset, I915_CACHE_NONE, 0);
+
+		vaddr = io_mapping_map_atomic_wc(&ggtt->mappable, offset);
+		val = ioread32(vaddr + n);
+		io_mapping_unmap_atomic(vaddr);
+
+		ggtt->base.clear_range(&ggtt->base, offset, PAGE_SIZE);
+
+		if (val != n) {
+			pr_err("insert page failed: found %d, expected %d\n",
+			       val, n);
+			err = -EINVAL;
+			break;
+		}
+	}
+
+	kfree(order);
+out_remove:
+	drm_mm_remove_node(&tmp);
+out_unpin:
+	i915_gem_object_unpin_pages(obj);
+out_free:
+	i915_gem_object_put(obj);
+out_unlock:
+	mutex_unlock(&i915->drm.struct_mutex);
+	return err;
+}
+
 static void track_vma_bind(struct i915_vma *vma)
 {
 	struct drm_i915_gem_object *obj = vma->obj;
@@ -1360,6 +1450,7 @@ int i915_gem_gtt_live_selftests(struct drm_i915_private *i915)
 		SUBTEST(igt_ggtt_drunk),
 		SUBTEST(igt_ggtt_walk),
 		SUBTEST(igt_ggtt_fill),
+		SUBTEST(igt_ggtt_page),
 	};
 
 	GEM_BUG_ON(offset_in_page(i915->ggtt.base.total));
-- 
2.11.0



More information about the Intel-gfx mailing list