[PATCH 25/49] drm/i915: Test simultaneously submitting requests to all engines

Chris Wilson chris at chris-wilson.co.uk
Sat Jan 21 14:10:47 UTC 2017


Use a recursive-batch to busy spin on each to ensure that each is being
run simultaneously.

Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/selftests/i915_gem_request.c | 178 ++++++++++++++++++++++
 1 file changed, 178 insertions(+)

diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_request.c b/drivers/gpu/drm/i915/selftests/i915_gem_request.c
index 19103d87a4c3..fb6f8acc1429 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_request.c
@@ -249,10 +249,188 @@ static int live_nop_request(void *arg)
 	return err;
 }
 
+static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
+{
+	struct i915_gem_context *ctx = i915->kernel_context;
+	struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
+	struct drm_i915_gem_object *obj;
+	const int gen = INTEL_GEN(i915);
+	struct i915_vma *vma;
+	u32 *cmd;
+	int err;
+
+	obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+	if (IS_ERR(obj))
+		return ERR_CAST(obj);
+
+	err = i915_gem_object_set_to_gtt_domain(obj, false);
+	if (err) {
+		i915_gem_object_put(obj);
+		return ERR_PTR(err);
+	}
+
+	vma = i915_vma_instance(obj, vm, NULL);
+	if (IS_ERR(vma)) {
+		i915_gem_object_put(obj);
+		return vma;
+	}
+
+	err = i915_vma_pin(vma, 0, 0, PIN_USER);
+	if (err) {
+		i915_gem_object_put(obj);
+		return ERR_PTR(err);
+	}
+
+	cmd = i915_gem_object_pin_map(obj, I915_MAP_WC);
+	if (IS_ERR(cmd)) {
+		i915_gem_object_put(obj);
+		return ERR_CAST(cmd);
+	}
+
+	if (gen >= 8) {
+		*cmd++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
+		*cmd++ = lower_32_bits(vma->node.start);
+		*cmd++ = upper_32_bits(vma->node.start);
+	} else if (gen >= 6) {
+		*cmd++ = MI_BATCH_BUFFER_START | 1 << 8;
+		*cmd++ = lower_32_bits(vma->node.start);
+	} else if (gen >= 4) {
+		*cmd++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
+		*cmd++ = lower_32_bits(vma->node.start);
+	} else {
+		*cmd++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT | 1;
+		*cmd++ = lower_32_bits(vma->node.start);
+	}
+
+	i915_gem_object_unpin_map(obj);
+
+	return vma;
+}
+
+static int live_all_engines(void *arg)
+{
+	struct drm_i915_private *i915 = arg;
+	struct intel_engine_cs *engine;
+	struct drm_i915_gem_request *request[I915_NUM_ENGINES];
+	struct i915_vma *batch;
+	unsigned int reset_count;
+	unsigned int id;
+	u32 *cmd;
+	int err;
+
+	/* Check we can submit requests to all engines simultaneously. We
+	 * send a recursive batch to each engine - checking that we don't
+	 * block doing so, and that they don't complete too soon.
+	 */
+
+	mutex_lock(&i915->drm.struct_mutex);
+
+	err = i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED);
+	if (err) {
+		pr_err("Failed to idle GPU before %s\n", __func__);
+		goto out_unlock;
+	}
+
+	i915->gpu_error.missed_irq_rings = 0;
+	reset_count = i915_reset_count(&i915->gpu_error);
+
+	batch = recursive_batch(i915);
+	if (IS_ERR(batch)) {
+		err = PTR_ERR(batch);
+		pr_err("%s: Unable to create batch, err=%d\n", __func__, err);
+		goto out_unlock;
+	}
+
+	for_each_engine(engine, i915, id) {
+		request[id] = i915_gem_request_alloc(engine,
+						     i915->kernel_context);
+		if (IS_ERR(request[id])) {
+			err = PTR_ERR(request[id]);
+			pr_err("%s: Request allocation failed with err=%d\n",
+			       __func__, err);
+			goto out_request;
+		}
+
+		engine->emit_bb_start(request[id],
+				      batch->node.start,
+				      batch->node.size,
+				      0);
+		if (!i915_gem_object_has_active_reference(batch->obj)) {
+			i915_gem_object_get(batch->obj);
+			i915_gem_object_set_active_reference(batch->obj);
+		}
+
+		i915_vma_move_to_active(batch, request[id], 0);
+		i915_gem_request_get(request[id]);
+		i915_add_request(request[id]);
+	}
+
+	for_each_engine(engine, i915, id) {
+		if (i915_gem_request_completed(request[id])) {
+			pr_err("%s(%s): request completed too early!\n",
+			       __func__, engine->name);
+			err = -EINVAL;
+			goto out_request;
+		}
+	}
+
+	cmd = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
+	if (IS_ERR(cmd)) {
+		err = PTR_ERR(cmd);
+		pr_err("%s: failed to WC map batch, err=%d\n", __func__, err);
+		goto out_request;
+	}
+	*cmd = MI_BATCH_BUFFER_END;
+	wmb();
+	i915_gem_object_unpin_map(batch->obj);
+
+	for_each_engine(engine, i915, id) {
+		long timeout;
+
+		timeout = i915_wait_request(request[id],
+					    I915_WAIT_LOCKED,
+					    MAX_SCHEDULE_TIMEOUT);
+		if (timeout < 0) {
+			err = timeout;
+			pr_err("%s: error waiting for request on %s, err=%d\n",
+			       __func__, engine->name, err);
+			goto out_request;
+		}
+
+		GEM_BUG_ON(!i915_gem_request_completed(request[id]));
+		i915_gem_request_put(request[id]);
+		request[id] = NULL;
+	}
+
+	if (reset_count != i915_reset_count(&i915->gpu_error)) {
+		pr_err("%s: GPU was reset %d times!\n", __func__,
+		       i915_reset_count(&i915->gpu_error) - reset_count);
+		err = -EIO;
+		goto out_request;
+	}
+
+	if (i915->gpu_error.missed_irq_rings) {
+		pr_err("%s: Missed interrupts on rings %lx\n", __func__,
+		       i915->gpu_error.missed_irq_rings);
+		err = -EIO;
+		goto out_request;
+	}
+
+out_request:
+	for_each_engine(engine, i915, id)
+		if (request[id])
+			i915_gem_request_put(request[id]);
+	i915_vma_put(batch);
+out_unlock:
+	mutex_unlock(&i915->drm.struct_mutex);
+	return err;
+}
+
 int i915_gem_request_live_selftests(struct drm_i915_private *i915)
 {
 	static const struct i915_subtest tests[] = {
 		SUBTEST(live_nop_request),
+		SUBTEST(live_all_engines),
 	};
 	return i915_subtests(tests, i915);
 }
-- 
2.11.0



More information about the Intel-gfx-trybot mailing list