[PATCH 1/2] full_ring

Matthew Auld matthew.auld at intel.com
Wed Oct 12 16:01:30 UTC 2022


---
 drivers/gpu/drm/i915/gt/intel_ring.c       |   4 +
 drivers/gpu/drm/i915/gt/selftest_migrate.c | 133 +++++++++++++++++----
 2 files changed, 111 insertions(+), 26 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_ring.c b/drivers/gpu/drm/i915/gt/intel_ring.c
index 15ec64d881c4..f8681c5026af 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring.c
@@ -244,6 +244,8 @@ u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords)
 	if (unlikely(total_bytes > remain_usable)) {
 		const int remain_actual = ring->size - ring->emit;
 
+		pr_info("total_bytes=%u, usable=%u\n", total_bytes, remain_usable);
+
 		if (bytes > remain_usable) {
 			/*
 			 * Not enough space for the basic request. So need to
@@ -266,6 +268,8 @@ u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords)
 	if (unlikely(total_bytes > ring->space)) {
 		int ret;
 
+		pr_info("total_bytes=%u, space=%u\n", total_bytes, ring->space);
+
 		/*
 		 * Space is reserved in the ringbuffer for finalising the
 		 * request, as that cannot be allowed to fail. During request
diff --git a/drivers/gpu/drm/i915/gt/selftest_migrate.c b/drivers/gpu/drm/i915/gt/selftest_migrate.c
index 0dc5309c90a4..450585710516 100644
--- a/drivers/gpu/drm/i915/gt/selftest_migrate.c
+++ b/drivers/gpu/drm/i915/gt/selftest_migrate.c
@@ -527,6 +527,112 @@ static int live_migrate_clear(void *arg)
 	return 0;
 }
 
+static struct drm_i915_gem_object *
+create_init_lmem_internal(struct intel_gt *gt, size_t sz, bool try_lmem)
+{
+	struct drm_i915_gem_object *obj = NULL;
+	int err;
+
+	if (try_lmem)
+		obj = i915_gem_object_create_lmem(gt->i915, sz, 0);
+
+	if (IS_ERR_OR_NULL(obj)) {
+		obj = i915_gem_object_create_internal(gt->i915, sz);
+		if (IS_ERR(obj))
+			return obj;
+	}
+
+	i915_gem_object_trylock(obj, NULL);
+	err = i915_gem_object_pin_pages(obj);
+	if (err) {
+		i915_gem_object_unlock(obj);
+		i915_gem_object_put(obj);
+		return ERR_PTR(err);
+	}
+
+	return obj;
+}
+
+static int live_emit_pte_full_ring(void *arg)
+{
+	struct intel_migrate *migrate = arg;
+	struct drm_i915_private *i915 = migrate->context->engine->i915;
+	struct drm_i915_gem_object *obj;
+	struct intel_context *ce;
+	struct i915_request *rq;
+	struct sgt_dma it;
+	int len, sz, err;
+	u32 *cs;
+
+	/*
+	 * Simple regression test to check that we don't trample the
+	 * rq->reserved_space when returning from emit_pte().
+	 */
+
+	ce = intel_migrate_create_context(migrate);
+	if (IS_ERR(ce))
+		return PTR_ERR(ce);
+
+	pr_info("pin_ww\n");
+	err = intel_context_pin(ce);
+	if (err)
+		goto out;
+
+	pr_info("create_obj\n");
+	obj = create_init_lmem_internal(to_gt(i915), 2 * PAGE_SIZE, false);
+	if (IS_ERR(obj)) {
+		err = PTR_ERR(obj);
+		goto out_unpin;
+	}
+
+	pr_info("rq\n");
+	rq = i915_request_create(ce);
+	if (IS_ERR(rq)) {
+		err = PTR_ERR(rq);
+		goto out_obj;
+	}
+
+	sz = (rq->ring->space - rq->reserved_space) / sizeof(u32) - 6;
+	pr_info("sz=%d emit=%u, space=%u reserved=%u\n",
+		sz, rq->ring->emit, rq->ring->space, rq->reserved_space);
+	cs = intel_ring_begin(rq, sz);
+	if (IS_ERR(cs)) {
+		err = PTR_ERR(cs);
+		goto out_rq;
+	}
+
+	memset32(cs, MI_NOOP, sz);
+	cs += sz;
+	intel_ring_advance(rq, cs);
+
+	pr_info("sz=%d emit=%u, space=%u reserved=%u\n",
+		sz, rq->ring->emit, rq->ring->space, rq->reserved_space);
+
+	it = sg_sgt(obj->mm.pages->sgl),
+	len = emit_pte(rq, &it, obj->cache_level, false, 0, CHUNK_SZ);
+	if (!len) {
+		err = -EINVAL;
+		goto out_rq;
+	}
+	if (len < 0) {
+		err = len;
+		goto out_rq;
+	}
+
+	pr_info("sz=%d emit=%u, space=%u reserved=%u\n",
+		sz, rq->ring->emit, rq->ring->space, rq->reserved_space);
+
+out_rq:
+	i915_request_add(rq); /* GEM_BUG_ON() */
+out_obj:
+	i915_gem_object_put(obj);
+out_unpin:
+	intel_context_unpin(ce);
+out:
+	intel_context_put(ce);
+	return err;
+}
+
 struct threaded_migrate {
 	struct intel_migrate *migrate;
 	struct task_struct *tsk;
@@ -637,6 +743,7 @@ int intel_migrate_live_selftests(struct drm_i915_private *i915)
 	static const struct i915_subtest tests[] = {
 		SUBTEST(live_migrate_copy),
 		SUBTEST(live_migrate_clear),
+		SUBTEST(live_emit_pte_full_ring),
 		SUBTEST(thread_migrate_copy),
 		SUBTEST(thread_migrate_clear),
 		SUBTEST(thread_global_copy),
@@ -650,32 +757,6 @@ int intel_migrate_live_selftests(struct drm_i915_private *i915)
 	return i915_subtests(tests, &gt->migrate);
 }
 
-static struct drm_i915_gem_object *
-create_init_lmem_internal(struct intel_gt *gt, size_t sz, bool try_lmem)
-{
-	struct drm_i915_gem_object *obj = NULL;
-	int err;
-
-	if (try_lmem)
-		obj = i915_gem_object_create_lmem(gt->i915, sz, 0);
-
-	if (IS_ERR_OR_NULL(obj)) {
-		obj = i915_gem_object_create_internal(gt->i915, sz);
-		if (IS_ERR(obj))
-			return obj;
-	}
-
-	i915_gem_object_trylock(obj, NULL);
-	err = i915_gem_object_pin_pages(obj);
-	if (err) {
-		i915_gem_object_unlock(obj);
-		i915_gem_object_put(obj);
-		return ERR_PTR(err);
-	}
-
-	return obj;
-}
-
 static int wrap_ktime_compare(const void *A, const void *B)
 {
 	const ktime_t *a = A, *b = B;
-- 
2.37.3



More information about the Intel-gfx-trybot mailing list