[Intel-gfx] [PATCH 5/9] drm/i915: Rearrange switch_context to load the aliasing ppgtt on first use

Tvrtko Ursulin tvrtko.ursulin at linux.intel.com
Tue Apr 19 09:52:21 UTC 2016


On 19/04/16 07:49, Chris Wilson wrote:
> The code to switch_mm() is already handled by i915_switch_context(), the
> only difference required to setup the aliasing ppgtt is that we need to
> emit te switch_mm() on the first context, i.e. when transitioning from
> engine->last_context == NULL.

Explanation feels a bit short for the amount of change.

> Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
> ---
>   drivers/gpu/drm/i915/i915_drv.h         |  1 -
>   drivers/gpu/drm/i915/i915_gem.c         | 28 ---------------------
>   drivers/gpu/drm/i915/i915_gem_context.c | 43 +++++++++------------------------
>   drivers/gpu/drm/i915/i915_gem_gtt.c     | 13 ----------
>   drivers/gpu/drm/i915/i915_gem_gtt.h     |  1 -
>   5 files changed, 12 insertions(+), 74 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> index 4c55f480f60b..77becfd5a09d 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -3125,7 +3125,6 @@ int __must_check i915_gem_context_init(struct drm_device *dev);
>   void i915_gem_context_fini(struct drm_device *dev);
>   void i915_gem_context_reset(struct drm_device *dev);
>   int i915_gem_context_open(struct drm_device *dev, struct drm_file *file);
> -int i915_gem_context_enable(struct drm_i915_gem_request *req);
>   void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
>   int i915_switch_context(struct drm_i915_gem_request *req);
>   struct intel_context *
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index fd9a36badb45..4057c0febccd 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -4245,34 +4245,6 @@ i915_gem_init_hw(struct drm_device *dev)
>   		}
>   	}
>
> -	/* Now it is safe to go back round and do everything else: */
> -	for_each_engine(engine, dev_priv) {
> -		struct drm_i915_gem_request *req;
> -
> -		req = i915_gem_request_alloc(engine, NULL);
> -		if (IS_ERR(req)) {
> -			ret = PTR_ERR(req);
> -			break;
> -		}
> -
> -		ret = i915_ppgtt_init_ring(req);
> -		if (ret)
> -			goto err_request;
> -
> -		ret = i915_gem_context_enable(req);
> -		if (ret)
> -			goto err_request;
> -
> -err_request:
> -		i915_add_request_no_flush(req);
> -		if (ret) {
> -			DRM_ERROR("Failed to enable %s, error=%d\n",
> -				  engine->name, ret);
> -			i915_gem_cleanup_engines(dev);
> -			break;
> -		}
> -	}
> -
>   out:
>   	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
>   	return ret;
> diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
> index 6870556a180b..cf84138de4ec 100644
> --- a/drivers/gpu/drm/i915/i915_gem_context.c
> +++ b/drivers/gpu/drm/i915/i915_gem_context.c
> @@ -471,27 +471,6 @@ void i915_gem_context_fini(struct drm_device *dev)
>   	dev_priv->kernel_context = NULL;
>   }
>
> -int i915_gem_context_enable(struct drm_i915_gem_request *req)
> -{
> -	struct intel_engine_cs *engine = req->engine;
> -	int ret;
> -
> -	if (i915.enable_execlists) {
> -		if (engine->init_context == NULL)
> -			return 0;
> -
> -		ret = engine->init_context(req);
> -	} else
> -		ret = i915_switch_context(req);
> -
> -	if (ret) {
> -		DRM_ERROR("ring init context: %d\n", ret);
> -		return ret;
> -	}
> -
> -	return 0;
> -}
> -

So default context is not switched to at driver load, or before the 
non-default context even, any more? And that is fine?

>   static int context_idr_cleanup(int id, void *p, void *data)
>   {
>   	struct intel_context *ctx = p;
> @@ -661,7 +640,7 @@ static bool
>   needs_pd_load_pre(struct intel_engine_cs *engine, struct intel_context *to)
>   {
>   	if (!to->ppgtt)
> -		return false;
> +		return engine->last_context == NULL;
>
>   	if (engine->last_context == to &&
>   	    !(intel_engine_flag(engine) & to->ppgtt->pd_dirty_rings))
> @@ -724,6 +703,7 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
>   {
>   	struct intel_context *to = req->ctx;
>   	struct intel_engine_cs *engine = req->engine;
> +	struct i915_hw_ppgtt *ppgtt = to->ppgtt ?: req->i915->mm.aliasing_ppgtt;
>   	struct intel_context *from;
>   	u32 hw_flags;
>   	int ret, i;
> @@ -765,7 +745,7 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
>   		 * Register Immediate commands in Ring Buffer before submitting
>   		 * a context."*/
>   		trace_switch_mm(engine, to);
> -		ret = to->ppgtt->switch_mm(to->ppgtt, req);
> +		ret = ppgtt->switch_mm(ppgtt, req);
>   		if (ret)
>   			goto unpin_out;
>   	}
> @@ -776,8 +756,7 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
>   		 * space. This means we must enforce that a page table load
>   		 * occur when this occurs. */
>   		hw_flags = MI_RESTORE_INHIBIT;
> -	else if (to->ppgtt &&
> -		 intel_engine_flag(engine) & to->ppgtt->pd_dirty_rings)
> +	else if (ppgtt && intel_engine_flag(engine) & ppgtt->pd_dirty_rings)
>   		hw_flags = MI_FORCE_RESTORE;
>   	else
>   		hw_flags = 0;
> @@ -822,7 +801,7 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
>   	 */
>   	if (needs_pd_load_post(to, hw_flags)) {
>   		trace_switch_mm(engine, to);
> -		ret = to->ppgtt->switch_mm(to->ppgtt, req);
> +		ret = ppgtt->switch_mm(to->ppgtt, req);

to->ppgtt should be ppgtt I think.

>   		/* The hardware context switch is emitted, but we haven't
>   		 * actually changed the state - so it's probably safe to bail
>   		 * here. Still, let the user know something dangerous has
> @@ -832,8 +811,8 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
>   			return ret;
>   	}
>
> -	if (to->ppgtt)
> -		to->ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
> +	if (ppgtt)
> +		ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
>
>   	for (i = 0; i < MAX_L3_SLICES; i++) {
>   		if (!(to->remap_slice & (1<<i)))
> @@ -887,15 +866,17 @@ int i915_switch_context(struct drm_i915_gem_request *req)
>   		struct intel_context *to = req->ctx;
>
>   		if (needs_pd_load_pre(engine, to)) {
> +			struct i915_hw_ppgtt *ppgtt;
>   			int ret;
>
> +			ppgtt = to->ppgtt ?: to_i915(req)->mm.aliasing_ppgtt;

Wrong base again.

> +
>   			trace_switch_mm(engine, to);
> -			ret = to->ppgtt->switch_mm(to->ppgtt, req);
> +			ret = ppgtt->switch_mm(ppgtt, req);
>   			if (ret)
>   				return ret;
>
> -			/* Doing a PD load always reloads the page dirs */
> -			to->ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
> +			ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
>   		}
>
>   		if (to != engine->last_context) {
> diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
> index 780e3ad3ca10..50bdba5cb6d2 100644
> --- a/drivers/gpu/drm/i915/i915_gem_gtt.c
> +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
> @@ -2180,19 +2180,6 @@ int i915_ppgtt_init_hw(struct drm_device *dev)
>   	return 0;
>   }
>
> -int i915_ppgtt_init_ring(struct drm_i915_gem_request *req)
> -{
> -	struct i915_hw_ppgtt *ppgtt = to_i915(req)->mm.aliasing_ppgtt;
> -
> -	if (i915.enable_execlists)
> -		return 0;
> -
> -	if (!ppgtt)
> -		return 0;
> -
> -	return ppgtt->switch_mm(ppgtt, req);
> -}
> -
>   struct i915_hw_ppgtt *
>   i915_ppgtt_create(struct drm_device *dev, struct drm_i915_file_private *fpriv)
>   {
> diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
> index d7dd3d8a8758..333a2fc62b43 100644
> --- a/drivers/gpu/drm/i915/i915_gem_gtt.h
> +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
> @@ -519,7 +519,6 @@ void i915_ggtt_cleanup_hw(struct drm_device *dev);
>
>   int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt);
>   int i915_ppgtt_init_hw(struct drm_device *dev);
> -int i915_ppgtt_init_ring(struct drm_i915_gem_request *req);
>   void i915_ppgtt_release(struct kref *kref);
>   struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_device *dev,
>   					struct drm_i915_file_private *fpriv);
>

Regards,

Tvrtko


More information about the Intel-gfx mailing list