[PATCH 5/5] drm/i915/gt: Pipelined clear

Chris Wilson chris at chris-wilson.co.uk
Tue Dec 1 11:51:43 UTC 2020


Update the PTE and emit a clear within a single unpreemptible packet
such that we can schedule and pipeline clears.

Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/gt/intel_migrate.c    | 135 +++++++++++++++++++++
 drivers/gpu/drm/i915/gt/intel_migrate.h    |  13 ++
 drivers/gpu/drm/i915/gt/selftest_migrate.c |  62 ++++++++++
 3 files changed, 210 insertions(+)

diff --git a/drivers/gpu/drm/i915/gt/intel_migrate.c b/drivers/gpu/drm/i915/gt/intel_migrate.c
index 1ab9c569448e..53a33636cfe3 100644
--- a/drivers/gpu/drm/i915/gt/intel_migrate.c
+++ b/drivers/gpu/drm/i915/gt/intel_migrate.c
@@ -450,6 +450,117 @@ intel_context_migrate_copy(struct intel_context *ce,
 	return err;
 }
 
+static int emit_clear(struct i915_request *rq, int size)
+{
+	const int gen = INTEL_GEN(rq->engine->i915);
+	u32 *cs;
+
+	GEM_BUG_ON(size >> PAGE_SHIFT > S16_MAX);
+
+	cs = intel_ring_begin(rq, gen >= 8 ? 8 : 6);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
+
+	if (gen >= 8) {
+		*cs++ = XY_COLOR_BLT_CMD | BLT_WRITE_RGBA | (7 - 2);
+		*cs++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | PAGE_SIZE;
+		*cs++ = 0;
+		*cs++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
+		*cs++ = 0; /* offset */
+		*cs++ = 0;
+		*cs++ = 0; /* value */
+		*cs++ = MI_NOOP;
+	} else {
+		*cs++ = XY_COLOR_BLT_CMD | BLT_WRITE_RGBA | (6 - 2);
+		*cs++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | PAGE_SIZE;
+		*cs++ = 0;
+		*cs++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
+		*cs++ = 0;
+		*cs++ = 0; /* value */
+	}
+
+	intel_ring_advance(rq, cs);
+	return 0;
+}
+
+int
+intel_context_migrate_clear(struct intel_context *ce,
+			    struct dma_fence *await,
+			    struct scatterlist *sg,
+			    unsigned long flags,
+			    struct i915_request **out)
+{
+	struct sgt_dma it = sg_sgt(sg);
+	struct i915_request *rq;
+	int err;
+
+	*out = NULL;
+
+	/* GEM_BUG_ON(ce->vm != migrate_vm); */
+
+	err = intel_context_pin(ce);
+	if (err)
+		return err;
+
+	GEM_BUG_ON(ce->ring->size < SZ_64K);
+
+	do {
+		int len;
+
+		rq = i915_request_create(ce);
+		if (IS_ERR(rq)) {
+			err = PTR_ERR(rq);
+			goto out_ce;
+		}
+
+		if (await) {
+			err = i915_request_await_dma_fence(rq, await);
+			if (err)
+				goto out_rq;
+
+			if (rq->engine->emit_init_breadcrumb) {
+				err = rq->engine->emit_init_breadcrumb(rq);
+				if (err)
+					goto out_rq;
+			}
+
+			await = NULL;
+		}
+
+		/* The PTE updates + clear must not be interrupted. */
+		err = emit_no_arbitration(rq);
+		if (err)
+			goto out_rq;
+
+		len = emit_pte(rq, &it, flags, 0, CHUNK_SZ);
+		if (len <= 0) {
+			err = len;
+			goto out_rq;
+		}
+
+		err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
+		if (err)
+			goto out_rq;
+
+		err = emit_clear(rq, len);
+
+		/* Arbitration is re-enabled between requests. */
+out_rq:
+		if (*out)
+			i915_request_put(*out);
+		*out = i915_request_get(rq);
+		i915_request_add(rq);
+		if (err || !it.sg)
+			break;
+
+		cond_resched();
+	} while (1);
+
+out_ce:
+	intel_context_unpin(ce);
+	return err;
+}
+
 int
 intel_migrate_copy(struct intel_migrate *m,
 		   struct dma_fence *await,
@@ -477,6 +588,30 @@ intel_migrate_copy(struct intel_migrate *m,
 	return err;
 }
 
+int
+intel_migrate_clear(struct intel_migrate *m,
+		    struct dma_fence *await,
+		    struct scatterlist *sg,
+		    unsigned long flags,
+		    struct i915_request **out)
+{
+	struct intel_context *ce;
+	int err;
+
+	if (!m->ce)
+		return -ENODEV;
+
+	ce = intel_migrate_create_context(m);
+	if (IS_ERR(ce))
+		ce = intel_context_get(m->ce);
+	GEM_BUG_ON(IS_ERR(ce));
+
+	err = intel_context_migrate_clear(ce, await, sg, flags, out);
+
+	intel_context_put(ce);
+	return err;
+}
+
 void intel_migrate_fini(struct intel_migrate *m)
 {
 	if (!m->ce)
diff --git a/drivers/gpu/drm/i915/gt/intel_migrate.h b/drivers/gpu/drm/i915/gt/intel_migrate.h
index ce0168c89f2b..540c3f20917f 100644
--- a/drivers/gpu/drm/i915/gt/intel_migrate.h
+++ b/drivers/gpu/drm/i915/gt/intel_migrate.h
@@ -31,6 +31,19 @@ int intel_context_migrate_copy(struct intel_context *ce,
 			       struct scatterlist *dst, unsigned long dst_flags,
 			       struct i915_request **out);
 
+int
+intel_migrate_clear(struct intel_migrate *m,
+		    struct dma_fence *await,
+		    struct scatterlist *sg,
+		    unsigned long flags,
+		    struct i915_request **out);
+int
+intel_context_migrate_clear(struct intel_context *ce,
+			    struct dma_fence *await,
+			    struct scatterlist *sg,
+			    unsigned long flags,
+			    struct i915_request **out);
+
 void intel_migrate_fini(struct intel_migrate *m);
 
 #endif /* __INTEL_MIGRATE__ */
diff --git a/drivers/gpu/drm/i915/gt/selftest_migrate.c b/drivers/gpu/drm/i915/gt/selftest_migrate.c
index 179ab19ac659..5ba313f2ae82 100644
--- a/drivers/gpu/drm/i915/gt/selftest_migrate.c
+++ b/drivers/gpu/drm/i915/gt/selftest_migrate.c
@@ -95,10 +95,72 @@ static int live_migrate_copy(void *arg)
 	return err;
 }
 
+static int live_migrate_clear(void *arg)
+{
+	struct intel_migrate *m = arg;
+	struct drm_i915_private *i915 = m->ce->engine->i915;
+	I915_RND_STATE(prng);
+	int i, j, k;
+	int err = 0;
+
+	for (i = 0; i < ARRAY_SIZE(sizes); i++) {
+		struct drm_i915_gem_object *obj;
+		struct i915_request *rq;
+		u32 *vaddr;
+
+		obj = i915_gem_object_create_internal(i915, sizes[i]);
+		if (IS_ERR(obj))
+			break;
+
+		vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
+		if (IS_ERR(vaddr)) {
+			i915_gem_object_put(obj);
+			break;
+		}
+
+		for (j = 0; j < sizes[i] / sizeof(u32); j++)
+			vaddr[j] = ~j;
+		i915_gem_object_flush_map(obj);
+
+		err = intel_migrate_clear(m, NULL,
+					  obj->mm.pages->sgl,
+					  obj->cache_level,
+					  &rq);
+		if (err)
+			pr_err("Clear failed, size: %u\n", sizes[i]);
+
+		if (rq) {
+			if (i915_request_wait(rq, 0, HZ) < 0) {
+				pr_err("Clear timed out, size: %u\n", sizes[i]);
+				err = -ETIME;
+			}
+			i915_request_put(rq);
+		}
+
+		for (j = 0; !err && j < sizes[i] / PAGE_SIZE; j++) {
+			k = i915_prandom_u32_max_state(1024, &prng);
+			if (vaddr[j * 1024 + k] != 0) {
+				pr_err("Clear failed, size: %u, offset: %zu\n",
+				       sizes[i], (j * 1024 + k) * sizeof(u32));
+				igt_hexdump(vaddr + j * 1024, 4096);
+				err = -EINVAL;
+			}
+		}
+
+		i915_gem_object_put(obj);
+		i915_gem_drain_freed_objects(i915);
+		if (err)
+			break;
+	}
+
+	return err;
+}
+
 int intel_migrate_live_selftests(struct drm_i915_private *i915)
 {
 	static const struct i915_subtest tests[] = {
 		SUBTEST(live_migrate_copy),
+		SUBTEST(live_migrate_clear),
 	};
 	struct intel_migrate m;
 	int err;
-- 
2.20.1



More information about the Intel-gfx-trybot mailing list