[Intel-gfx] [PATCH v5 18/63] drm/i915: Populate logical context during first pin.
Maarten Lankhorst
maarten.lankhorst at linux.intel.com
Wed Nov 25 10:39:26 UTC 2020
This allows us to remove pin_map from state allocation, which saves
us a few retry loops. We won't need this until first pin, anyway.
Signed-off-by: Maarten Lankhorst <maarten.lankhorst at linux.intel.com>
Reviewed-by: Thomas Hellström <thomas.hellstrom at linux.intel.com>
---
drivers/gpu/drm/i915/gt/intel_context_types.h | 13 ++-
drivers/gpu/drm/i915/gt/intel_lrc.c | 107 +++++++++---------
2 files changed, 62 insertions(+), 58 deletions(-)
diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h
index 552cb57a2e8c..bebf52868563 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
@@ -64,12 +64,13 @@ struct intel_context {
unsigned long flags;
#define CONTEXT_BARRIER_BIT 0
#define CONTEXT_ALLOC_BIT 1
-#define CONTEXT_VALID_BIT 2
-#define CONTEXT_CLOSED_BIT 3
-#define CONTEXT_USE_SEMAPHORES 4
-#define CONTEXT_BANNED 5
-#define CONTEXT_FORCE_SINGLE_SUBMISSION 6
-#define CONTEXT_NOPREEMPT 7
+#define CONTEXT_INIT_BIT 2
+#define CONTEXT_VALID_BIT 3
+#define CONTEXT_CLOSED_BIT 4
+#define CONTEXT_USE_SEMAPHORES 5
+#define CONTEXT_BANNED 6
+#define CONTEXT_FORCE_SINGLE_SUBMISSION 7
+#define CONTEXT_NOPREEMPT 8
u32 *lrc_reg_state;
union {
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index c5c50ee835dd..821f419d3108 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -3524,9 +3524,39 @@ __execlists_update_reg_state(const struct intel_context *ce,
}
}
+static void populate_lr_context(struct intel_context *ce,
+ struct intel_engine_cs *engine,
+ void *vaddr)
+{
+ bool inhibit = true;
+ struct drm_i915_gem_object *ctx_obj = ce->state->obj;
+
+ set_redzone(vaddr, engine);
+
+ if (engine->default_state) {
+ shmem_read(engine->default_state, 0,
+ vaddr, engine->context_size);
+ __set_bit(CONTEXT_VALID_BIT, &ce->flags);
+ inhibit = false;
+ }
+
+ /* Clear the ppHWSP (inc. per-context counters) */
+ memset(vaddr, 0, PAGE_SIZE);
+
+ /*
+ * The second page of the context object contains some registers which
+ * must be set up prior to the first execution.
+ */
+ execlists_init_reg_state(vaddr + LRC_STATE_OFFSET,
+ ce, engine, ce->ring, inhibit);
+
+ __i915_gem_object_flush_map(ctx_obj, 0, engine->context_size);
+}
+
static int
-execlists_context_pre_pin(struct intel_context *ce,
- struct i915_gem_ww_ctx *ww, void **vaddr)
+__execlists_context_pre_pin(struct intel_context *ce,
+ struct intel_engine_cs *engine,
+ struct i915_gem_ww_ctx *ww, void **vaddr)
{
GEM_BUG_ON(!ce->state);
GEM_BUG_ON(!i915_vma_is_pinned(ce->state));
@@ -3534,8 +3564,20 @@ execlists_context_pre_pin(struct intel_context *ce,
*vaddr = i915_gem_object_pin_map(ce->state->obj,
i915_coherent_map_type(ce->engine->i915) |
I915_MAP_OVERRIDE);
+ if (IS_ERR(*vaddr))
+ return PTR_ERR(*vaddr);
+
+ if (!__test_and_set_bit(CONTEXT_INIT_BIT, &ce->flags))
+ populate_lr_context(ce, engine, *vaddr);
+
+ return 0;
+}
- return PTR_ERR_OR_ZERO(*vaddr);
+static int
+execlists_context_pre_pin(struct intel_context *ce,
+ struct i915_gem_ww_ctx *ww, void **vaddr)
+{
+ return __execlists_context_pre_pin(ce, ce->engine, ww, vaddr);
}
static int
@@ -5343,45 +5385,6 @@ static void execlists_init_reg_state(u32 *regs,
__reset_stop_ring(regs, engine);
}
-static int
-populate_lr_context(struct intel_context *ce,
- struct drm_i915_gem_object *ctx_obj,
- struct intel_engine_cs *engine,
- struct intel_ring *ring)
-{
- bool inhibit = true;
- void *vaddr;
-
- vaddr = i915_gem_object_pin_map(ctx_obj, I915_MAP_WB);
- if (IS_ERR(vaddr)) {
- drm_dbg(&engine->i915->drm, "Could not map object pages!\n");
- return PTR_ERR(vaddr);
- }
-
- set_redzone(vaddr, engine);
-
- if (engine->default_state) {
- shmem_read(engine->default_state, 0,
- vaddr, engine->context_size);
- __set_bit(CONTEXT_VALID_BIT, &ce->flags);
- inhibit = false;
- }
-
- /* Clear the ppHWSP (inc. per-context counters) */
- memset(vaddr, 0, PAGE_SIZE);
-
- /*
- * The second page of the context object contains some registers which
- * must be set up prior to the first execution.
- */
- execlists_init_reg_state(vaddr + LRC_STATE_OFFSET,
- ce, engine, ring, inhibit);
-
- __i915_gem_object_flush_map(ctx_obj, 0, engine->context_size);
- i915_gem_object_unpin_map(ctx_obj);
- return 0;
-}
-
static struct intel_timeline *pinned_timeline(struct intel_context *ce)
{
struct intel_timeline *tl = fetch_and_zero(&ce->timeline);
@@ -5445,20 +5448,11 @@ static int __execlists_context_alloc(struct intel_context *ce,
goto error_deref_obj;
}
- ret = populate_lr_context(ce, ctx_obj, engine, ring);
- if (ret) {
- drm_dbg(&engine->i915->drm,
- "Failed to populate LRC: %d\n", ret);
- goto error_ring_free;
- }
-
ce->ring = ring;
ce->state = vma;
return 0;
-error_ring_free:
- intel_ring_put(ring);
error_deref_obj:
i915_gem_object_put(ctx_obj);
return ret;
@@ -5582,6 +5576,15 @@ static int virtual_context_alloc(struct intel_context *ce)
return __execlists_context_alloc(ce, ve->siblings[0]);
}
+static int
+virtual_context_pre_pin(struct intel_context *ce,
+ struct i915_gem_ww_ctx *ww, void **vaddr)
+{
+ struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
+
+ return __execlists_context_pre_pin(ce, ve->siblings[0], ww, vaddr);
+}
+
static int virtual_context_pin(struct intel_context *ce, void *vaddr)
{
struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
@@ -5615,7 +5618,7 @@ static void virtual_context_exit(struct intel_context *ce)
static const struct intel_context_ops virtual_context_ops = {
.alloc = virtual_context_alloc,
- .pre_pin = execlists_context_pre_pin,
+ .pre_pin = virtual_context_pre_pin,
.pin = virtual_context_pin,
.unpin = execlists_context_unpin,
.post_unpin = execlists_context_post_unpin,
--
2.29.2.222.g5d2a92d10f8
More information about the Intel-gfx
mailing list