[Intel-gfx] [RFCv2 10/14] drm/i915: update PDPs by condition when submit the LRC context

Zhi Wang zhi.a.wang at intel.com
Thu Feb 18 11:42:17 UTC 2016


Previously the PDPs inside the ring context are updated at:

- When populate a LRC context
- Before submitting a LRC context (only for 32 bit PPGTT, as the amount
of used PDPs may change)

This patch postpones the PDPs upgrade to submission time, and will update
it by condition if the PPGTT is 48b. Under GVT-g, one GVT context will be
used by different guest, the PPGTT instance related to the context might
be changed before the submission time. And this patch gives GVT context
a chance to load the new PPGTT instance into an initialized context.

[ Behavior Change ]

Before applying this patch:
- Populate PDPs when populating a LRC context
- Update PDPs if the PPGTT is 32b in the submission time

After applying this patch:
- Update PDPs if the PPGTT is 32b or the root_pointer_dirty flag is set
in the submission time.
- The root_pointer_dirty will be set when populating a LRC context

GVT-g benefit
- The root_pointer_dirty will be set when GVT reloading a new PPGTT
instance into an existing GVT context.

Signed-off-by: Zhi Wang <zhi.a.wang at intel.com>
---
 drivers/gpu/drm/i915/i915_drv.h  |  1 +
 drivers/gpu/drm/i915/intel_lrc.c | 53 +++++++++++++++++++---------------------
 2 files changed, 26 insertions(+), 28 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 1fd5575..86a5646 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -888,6 +888,7 @@ struct intel_context {
 		u64 lrc_desc;
 		uint32_t *lrc_reg_state;
 	} engine[I915_NUM_RINGS];
+	bool root_pointer_dirty;
 
 	struct list_head link;
 };
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 01ea99c2..e0ab5b3 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -391,22 +391,39 @@ static int execlists_update_context(struct drm_i915_gem_request *rq)
 {
 	struct intel_engine_cs *ring = rq->ring;
 	struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt;
+	struct drm_i915_private *dev_priv = to_i915(ring->dev);
+
 	uint32_t *reg_state = rq->ctx->engine[ring->id].lrc_reg_state;
 
 	reg_state[CTX_RING_TAIL+1] = rq->tail;
 
-	if (ppgtt && !IS_48BIT_PPGTT(ppgtt)) {
-		/* True 32b PPGTT with dynamic page allocation: update PDP
-		 * registers and point the unallocated PDPs to scratch page.
-		 * PML4 is allocated during ppgtt init, so this is not needed
-		 * in 48-bit mode.
+	if (!ppgtt)
+		ppgtt = dev_priv->mm.aliasing_ppgtt;
+
+	/* Update root pointers for 32b PPGTT in every submission */
+	if (!rq->ctx->root_pointer_dirty && !IS_48BIT_PPGTT(ppgtt))
+		return 0;
+
+	if (!IS_48BIT_PPGTT(ppgtt)) {
+		/* 32b PPGTT
+		 * PDP*_DESCRIPTOR contains the base address of space
+		 * supported. With dynamic page allocation, PDPs may
+		 * not be allocated at this point. Point the unallocated
+		 * PDPs to the scratch page
 		 */
 		ASSIGN_CTX_PDP(ppgtt, reg_state, 3);
 		ASSIGN_CTX_PDP(ppgtt, reg_state, 2);
 		ASSIGN_CTX_PDP(ppgtt, reg_state, 1);
 		ASSIGN_CTX_PDP(ppgtt, reg_state, 0);
+	} else {
+		/* 64b PPGTT (48bit canonical)
+		 * PDP0_DESCRIPTOR contains the base address to PML4
+		 * and other PDP Descriptors are ignored.
+		 */
+		ASSIGN_CTX_PML4(ppgtt, reg_state);
 	}
 
+	rq->ctx->root_pointer_dirty = false;
 	return 0;
 }
 
@@ -2322,15 +2339,10 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
 		    struct intel_engine_cs *ring, struct intel_ringbuffer *ringbuf)
 {
 	struct drm_device *dev = ring->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
 	struct page *page;
 	uint32_t *reg_state;
 	int ret;
 
-	if (!ppgtt)
-		ppgtt = dev_priv->mm.aliasing_ppgtt;
-
 	ret = i915_gem_object_set_to_cpu_domain(ctx_obj, true);
 	if (ret) {
 		DRM_DEBUG_DRIVER("Could not set to CPU domain\n");
@@ -2408,24 +2420,6 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
 	ASSIGN_CTX_REG(reg_state, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(ring, 0), 0);
 	ASSIGN_CTX_REG(reg_state, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(ring, 0), 0);
 
-	if (IS_48BIT_PPGTT(ppgtt)) {
-		/* 64b PPGTT (48bit canonical)
-		 * PDP0_DESCRIPTOR contains the base address to PML4 and
-		 * other PDP Descriptors are ignored.
-		 */
-		ASSIGN_CTX_PML4(ppgtt, reg_state);
-	} else {
-		/* 32b PPGTT
-		 * PDP*_DESCRIPTOR contains the base address of space supported.
-		 * With dynamic page allocation, PDPs may not be allocated at
-		 * this point. Point the unallocated PDPs to the scratch page
-		 */
-		ASSIGN_CTX_PDP(ppgtt, reg_state, 3);
-		ASSIGN_CTX_PDP(ppgtt, reg_state, 2);
-		ASSIGN_CTX_PDP(ppgtt, reg_state, 1);
-		ASSIGN_CTX_PDP(ppgtt, reg_state, 0);
-	}
-
 	if (ring->id == RCS) {
 		reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
 		ASSIGN_CTX_REG(reg_state, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE,
@@ -2435,6 +2429,9 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
 	kunmap_atomic(reg_state);
 	i915_gem_object_unpin_pages(ctx_obj);
 
+	/* PDPs inside the ring context need to be updated */
+	ctx->root_pointer_dirty = true;
+
 	return 0;
 }
 
-- 
1.9.1



More information about the Intel-gfx mailing list