[Intel-gfx] [PATCH 37/49] drm/i915/bdw: Implement context switching (somewhat)

oscar.mateo at intel.com oscar.mateo at intel.com
Thu Mar 27 19:00:06 CET 2014


From: Ben Widawsky <benjamin.widawsky at intel.com>

A context switch occurs by submitting a context descriptor to the
ExecList Submission Port. Given that we can now initialize a context,
it's possible to begin implementing the context switch by creating the
descriptor and submitting it to ELSP (actually two, since the ELSP
has two ports).

The context object must be mapped in the GGTT, which means it must exist
in the 0-4GB graphics VA range.

Signed-off-by: Ben Widawsky <ben at bwidawsk.net>

v2: This code has changed quite a lot in various rebases. Of particular
importance is that now we use the globally unique Submission ID to send
to the hardware. Also, context pages are now pinned unconditionally to
GGTT, so there is no need to bind them.

Signed-off-by: Oscar Mateo <oscar.mateo at intel.com>
---
 drivers/gpu/drm/i915/i915_lrc.c | 84 +++++++++++++++++++++++++++++++++++++++++
 1 file changed, 84 insertions(+)

diff --git a/drivers/gpu/drm/i915/i915_lrc.c b/drivers/gpu/drm/i915/i915_lrc.c
index 91e7ea6..aa190a2 100644
--- a/drivers/gpu/drm/i915/i915_lrc.c
+++ b/drivers/gpu/drm/i915/i915_lrc.c
@@ -77,6 +77,28 @@
 #define CTX_R_PWR_CLK_STATE		0x42
 #define CTX_GPGPU_CSR_BASE_ADDRESS	0x44
 
+#define GEN8_CTX_VALID (1<<0)
+#define GEN8_CTX_FORCE_PD_RESTORE (1<<1)
+#define GEN8_CTX_FORCE_RESTORE (1<<2)
+#define GEN8_CTX_L3LLC_COHERENT (1<<5)
+#define GEN8_CTX_PRIVILEGE (1<<8)
+enum {
+	ADVANCED_CONTEXT=0,
+	LEGACY_CONTEXT,
+	ADVANCED_AD_CONTEXT,
+	LEGACY_64B_CONTEXT
+};
+#define GEN8_CTX_MODE_SHIFT 3
+enum {
+	FAULT_AND_HANG=0,
+	FAULT_AND_HALT, /* Debug only */
+	FAULT_AND_STREAM,
+	FAULT_AND_CONTINUE /* Unsupported */
+};
+#define GEN8_CTX_FAULT_SHIFT 6
+#define GEN8_CTX_LRCA_SHIFT 12
+#define GEN8_CTX_UNUSED_SHIFT 32
+
 static inline u32 get_submission_id(struct i915_hw_context *ctx)
 {
 	struct drm_i915_file_private *file_priv = ctx->file_priv;
@@ -95,6 +117,68 @@ static inline u32 get_submission_id(struct i915_hw_context *ctx)
 	return submission_id;
 }
 
+static inline uint64_t get_descriptor(struct i915_hw_context *ctx)
+{
+	uint64_t desc;
+	u32 submission_id = get_submission_id(ctx);
+
+	BUG_ON(i915_gem_obj_ggtt_offset(ctx->obj) & 0xFFFFFFFF00000000ULL);
+
+	desc = GEN8_CTX_VALID;
+	desc |= LEGACY_CONTEXT << GEN8_CTX_MODE_SHIFT;
+	desc |= i915_gem_obj_ggtt_offset(ctx->obj);
+	desc |= GEN8_CTX_L3LLC_COHERENT;
+	desc |= (u64)submission_id << GEN8_CTX_UNUSED_SHIFT;
+	desc |= GEN8_CTX_PRIVILEGE;
+
+	/* TODO: WaDisableLiteRestore when we start using semaphore
+	 * signalling between Command Streamers */
+	/* desc |= GEN8_CTX_FORCE_RESTORE; */
+
+	return desc;
+}
+
+static void submit_execlist(struct intel_engine *ring,
+			    struct i915_hw_context *ctx0,
+			    struct i915_hw_context *ctx1)
+{
+	struct drm_i915_private *dev_priv = ring->dev->dev_private;
+	uint64_t temp = 0;
+	uint32_t desc[4];
+
+	/* XXX: You must always write both descriptors in the order below. */
+	if (ctx1)
+		temp = get_descriptor(ctx1);
+	else
+		temp = 0;
+	desc[1] = (u32)(temp >> 32);
+	desc[0] = (u32)temp;
+
+	temp = get_descriptor(ctx0);
+	desc[3] = (u32)(temp >> 32);
+	desc[2] = (u32)temp;
+
+	I915_WRITE(RING_ELSP(ring), desc[1]);
+	I915_WRITE(RING_ELSP(ring), desc[0]);
+	I915_WRITE(RING_ELSP(ring), desc[3]);
+	/* The context is automatically loaded after the following */
+	I915_WRITE(RING_ELSP(ring), desc[2]);
+}
+
+static int gen8_switch_context(struct intel_engine *ring,
+		struct i915_hw_context *to0, u32 tail0,
+		struct i915_hw_context *to1, u32 tail1)
+{
+	BUG_ON(!i915_gem_obj_is_pinned(to0->obj));
+
+	if (to1)
+		BUG_ON(!i915_gem_obj_is_pinned(to1->obj));
+
+	submit_execlist(ring, to0, to1);
+
+	return 0;
+}
+
 void gen8_gem_context_free(struct i915_hw_context *ctx)
 {
 	/* Global default contexts ringbuffers are take care of
-- 
1.9.0




More information about the Intel-gfx mailing list