[Intel-gfx] [PATCH] drm/i915: Clean up lrc context init

Daniel Vetter daniel at ffwll.ch
Tue Aug 11 05:53:36 PDT 2015


On Fri, Aug 07, 2015 at 11:05:24AM +0100, Nick Hoath wrote:
> Clean up lrc context init by:
>    - Move context initialisation in to i915_gem_init_hw
>    - Move one off initialisation for render ring to
>         i915_gem_validate_context
>    - Move default context initialisation to logical_ring_init
> 
> Rename intel_lr_context_deferred_create to
> intel_lr_context_deferred_alloc, to reflect reduced functionality.
> 
> Issue: VIZ-4798
> Signed-off-by: Nick Hoath <nicholas.hoath at intel.com>

Commit message is a bit thin since it only describes what the patch does
and not why. Goal here is to put the init/init_hw split we have in all
other places into place for lrc context init, i.e. allocate resources
in one function (for driver load) and setup all the hw state in another
function (used both for driver load, resume and after gpu reset).

I think this patch achieves this, with the few minor comments below
addressed. So looks like a step in the right direction.

We still have a bit a mess in init ordering and a lot of if (execlist) all
over the place, but imo that should be done separately.
-Daniel

> ---
>  drivers/gpu/drm/i915/i915_drv.h            |   1 -
>  drivers/gpu/drm/i915/i915_gem.c            |  23 ++---
>  drivers/gpu/drm/i915/i915_gem_context.c    |  21 -----
>  drivers/gpu/drm/i915/i915_gem_execbuffer.c |  36 +++++++-
>  drivers/gpu/drm/i915/intel_lrc.c           | 133 +++++++++++------------------
>  drivers/gpu/drm/i915/intel_lrc.h           |   4 +-
>  drivers/gpu/drm/i915/intel_ringbuffer.c    |   2 +
>  drivers/gpu/drm/i915/intel_ringbuffer.h    |   1 +
>  8 files changed, 105 insertions(+), 116 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> index 92ea9ad..aa6c6dc 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -3065,7 +3065,6 @@ int __must_check i915_gem_context_init(struct drm_device *dev);
>  void i915_gem_context_fini(struct drm_device *dev);
>  void i915_gem_context_reset(struct drm_device *dev);
>  int i915_gem_context_open(struct drm_device *dev, struct drm_file *file);
> -int i915_gem_context_enable(struct drm_i915_gem_request *req);
>  void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
>  int i915_switch_context(struct drm_i915_gem_request *req);
>  struct intel_context *
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index d9f2701..7ebc6e3 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -4996,14 +4996,8 @@ int i915_gem_init_rings(struct drm_device *dev)
>  			goto cleanup_vebox_ring;
>  	}
>  
> -	ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
> -	if (ret)
> -		goto cleanup_bsd2_ring;
> -
>  	return 0;
>  
> -cleanup_bsd2_ring:
> -	intel_cleanup_ring_buffer(&dev_priv->ring[VCS2]);
>  cleanup_vebox_ring:
>  	intel_cleanup_ring_buffer(&dev_priv->ring[VECS]);
>  cleanup_blt_ring:
> @@ -5022,6 +5016,7 @@ i915_gem_init_hw(struct drm_device *dev)
>  	struct drm_i915_private *dev_priv = dev->dev_private;
>  	struct intel_engine_cs *ring;
>  	int ret, i, j;
> +	struct drm_i915_gem_request *req;
>  
>  	if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
>  		return -EIO;
> @@ -5073,9 +5068,12 @@ i915_gem_init_hw(struct drm_device *dev)
>  			goto out;
>  	}
>  
> +	ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
> +	if (ret)
> +		goto out;
> +
>  	/* Now it is safe to go back round and do everything else: */
>  	for_each_ring(ring, dev_priv, i) {
> -		struct drm_i915_gem_request *req;
>  
>  		WARN_ON(!ring->default_context);
>  
> @@ -5098,9 +5096,14 @@ i915_gem_init_hw(struct drm_device *dev)
>  			goto out;
>  		}
>  
> -		ret = i915_gem_context_enable(req);
> -		if (ret && ret != -EIO) {
> -			DRM_ERROR("Context enable ring #%d failed %d\n", i, ret);
> +		ret = 0;
> +		if (ring->switch_context != NULL) {
> +			ret = ring->switch_context(req);
> +		} else if (ring->init_context != NULL) {
> +			ret = ring->init_context(req);
> +		}

Adding switch_context as a replacement for the if(execlist) in
gem_context_enable doesn't seem to help in clarity but does add another
indirection. I'd just drop it.

> +		if (ret) {
> +			DRM_ERROR("ring init context: %d\n", ret);
>  			i915_gem_request_cancel(req);
>  			i915_gem_cleanup_ringbuffer(dev);
>  			goto out;
> diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
> index b77a8f7..18bd5f5 100644
> --- a/drivers/gpu/drm/i915/i915_gem_context.c
> +++ b/drivers/gpu/drm/i915/i915_gem_context.c
> @@ -407,27 +407,6 @@ void i915_gem_context_fini(struct drm_device *dev)
>  	i915_gem_context_unreference(dctx);
>  }
>  
> -int i915_gem_context_enable(struct drm_i915_gem_request *req)
> -{
> -	struct intel_engine_cs *ring = req->ring;
> -	int ret;
> -
> -	if (i915.enable_execlists) {
> -		if (ring->init_context == NULL)
> -			return 0;
> -
> -		ret = ring->init_context(req);
> -	} else
> -		ret = i915_switch_context(req);
> -
> -	if (ret) {
> -		DRM_ERROR("ring init context: %d\n", ret);
> -		return ret;
> -	}
> -
> -	return 0;
> -}
> -
>  static int context_idr_cleanup(int id, void *p, void *data)
>  {
>  	struct intel_context *ctx = p;
> diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> index 923a3c4..37b440a 100644
> --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> @@ -994,6 +994,7 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
>  {
>  	struct intel_context *ctx = NULL;
>  	struct i915_ctx_hang_stats *hs;
> +	int ret;
>  
>  	if (ring->id != RCS && ctx_id != DEFAULT_CONTEXT_HANDLE)
>  		return ERR_PTR(-EINVAL);
> @@ -1009,14 +1010,47 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
>  	}
>  
>  	if (i915.enable_execlists && !ctx->engine[ring->id].state) {
> -		int ret = intel_lr_context_deferred_create(ctx, ring);
> +		ret = intel_lr_context_deferred_alloc(ctx, ring);
>  		if (ret) {
>  			DRM_DEBUG("Could not create LRC %u: %d\n", ctx_id, ret);
>  			return ERR_PTR(ret);
>  		}
> +
> +		if (ring->id == RCS && !ctx->rcs_initialized) {
> +			if (ring->init_context) {
> +				struct drm_i915_gem_request *req;
> +
> +				ret = i915_gem_request_alloc(ring,
> +					ring->default_context, &req);
> +				if (ret) {
> +					DRM_ERROR("ring create req: %d\n",
> +						  ret);
> +					i915_gem_request_cancel(req);
> +					goto validate_error;
> +				}
> +
> +				ret = ring->init_context(req);
> +				if (ret) {
> +					DRM_ERROR("ring init context: %d\n",
> +						  ret);
> +					i915_gem_request_cancel(req);
> +					goto validate_error;
> +				}
> +				i915_add_request_no_flush(req);
> +			}
> +
> +			ctx->rcs_initialized = true;

I think calling ->init_context should be done in the deferred lrc alloc.
And we can simplify the condition to a simple if (ring->init_context) and
also drop rcs_initialized entirely - if we always init it when allocating
the per-ring ctx data (and clean up in case it fails) then there's no need
to track the init status at all. Removing rcs_initialized might be a
separate step if it's a bit more involved.

Oh random bikeshed of the day, might be pretty to wrap the execlist stuff
in our ctx structure with an lrc_ prefix, to distinguish it from legacy
and generic ctx data better. But really only if you're bored.

> +		}
>  	}
>  
>  	return ctx;
> +
> +validate_error:
> +	intel_destroy_ringbuffer_obj(ctx->engine[ring->id].ringbuf);
> +	drm_gem_object_unreference(&ctx->engine[ring->id].state->base);
> +	ctx->engine[ring->id].ringbuf = NULL;
> +	ctx->engine[ring->id].state = NULL;
> +	return ERR_PTR(ret);
>  }
>  
>  void
> diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
> index 0007d45..708f03e 100644
> --- a/drivers/gpu/drm/i915/intel_lrc.c
> +++ b/drivers/gpu/drm/i915/intel_lrc.c
> @@ -1334,11 +1334,31 @@ out:
>  	return ret;
>  }
>  
> +static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring,
> +		struct drm_i915_gem_object *default_ctx_obj)
> +{
> +	struct drm_i915_private *dev_priv = ring->dev->dev_private;
> +
> +	/* The status page is offset 0 from the default context object
> +	 * in LRC mode. */
> +	ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(default_ctx_obj);
> +	ring->status_page.page_addr =
> +			kmap(sg_page(default_ctx_obj->pages->sgl));
> +	ring->status_page.obj = default_ctx_obj;
> +
> +	I915_WRITE(RING_HWS_PGA(ring->mmio_base),
> +			(u32)ring->status_page.gfx_addr);
> +	POSTING_READ(RING_HWS_PGA(ring->mmio_base));
> +}
> +
>  static int gen8_init_common_ring(struct intel_engine_cs *ring)
>  {
>  	struct drm_device *dev = ring->dev;
>  	struct drm_i915_private *dev_priv = dev->dev_private;
>  
> +	lrc_setup_hardware_status_page(ring,
> +				ring->default_context->engine[ring->id].state);
> +
>  	I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask));
>  	I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff);
>  
> @@ -1732,9 +1752,34 @@ static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *rin
>  	if (ret)
>  		return ret;
>  
> -	ret = intel_lr_context_deferred_create(ring->default_context, ring);
> +	ret = intel_lr_context_deferred_alloc(ring->default_context, ring);
> +	if (ret)
> +		return ret;
> +
> +	ret = i915_gem_obj_ggtt_pin(
> +		ring->default_context->engine[ring->id].state,
> +		GEN8_LR_CONTEXT_ALIGN, 0);
> +	if (ret) {
> +		DRM_DEBUG_DRIVER("Pin LRC backing obj failed: %d\n",
> +				ret);
> +		return ret;
> +	}
> +
> +	ret = intel_pin_and_map_ringbuffer_obj(dev,
> +		ring->default_context->engine[ring->id].ringbuf);
> +	if (ret) {
> +		DRM_ERROR(
> +			"Failed to pin and map ringbuffer %s: %d\n",
> +			ring->name, ret);
> +		goto error_unpin_ggtt;
> +	}
>  
>  	return ret;
> +
> +error_unpin_ggtt:
> +	i915_gem_object_ggtt_unpin(
> +		ring->default_context->engine[ring->id].state);
> +	return ret;
>  }
>  
>  static int logical_render_ring_init(struct drm_device *dev)
> @@ -1935,14 +1980,8 @@ int intel_logical_rings_init(struct drm_device *dev)
>  			goto cleanup_vebox_ring;
>  	}
>  
> -	ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
> -	if (ret)
> -		goto cleanup_bsd2_ring;
> -
>  	return 0;
>  
> -cleanup_bsd2_ring:
> -	intel_logical_ring_cleanup(&dev_priv->ring[VCS2]);
>  cleanup_vebox_ring:
>  	intel_logical_ring_cleanup(&dev_priv->ring[VECS]);
>  cleanup_blt_ring:
> @@ -2183,25 +2222,8 @@ static uint32_t get_lr_context_size(struct intel_engine_cs *ring)
>  	return ret;
>  }
>  
> -static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring,
> -		struct drm_i915_gem_object *default_ctx_obj)
> -{
> -	struct drm_i915_private *dev_priv = ring->dev->dev_private;
> -
> -	/* The status page is offset 0 from the default context object
> -	 * in LRC mode. */
> -	ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(default_ctx_obj);
> -	ring->status_page.page_addr =
> -			kmap(sg_page(default_ctx_obj->pages->sgl));
> -	ring->status_page.obj = default_ctx_obj;
> -
> -	I915_WRITE(RING_HWS_PGA(ring->mmio_base),
> -			(u32)ring->status_page.gfx_addr);
> -	POSTING_READ(RING_HWS_PGA(ring->mmio_base));
> -}
> -
>  /**
> - * intel_lr_context_deferred_create() - create the LRC specific bits of a context
> + * intel_lr_context_deferred_alloc() - create the LRC specific bits of a context
>   * @ctx: LR context to create.
>   * @ring: engine to be used with the context.
>   *
> @@ -2213,10 +2235,10 @@ static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring,
>   *
>   * Return: non-zero on error.
>   */
> -int intel_lr_context_deferred_create(struct intel_context *ctx,
> +
> +int intel_lr_context_deferred_alloc(struct intel_context *ctx,
>  				     struct intel_engine_cs *ring)
>  {
> -	const bool is_global_default_ctx = (ctx == ring->default_context);
>  	struct drm_device *dev = ring->dev;
>  	struct drm_i915_gem_object *ctx_obj;
>  	uint32_t context_size;
> @@ -2234,22 +2256,12 @@ int intel_lr_context_deferred_create(struct intel_context *ctx,
>  		return -ENOMEM;
>  	}
>  
> -	if (is_global_default_ctx) {
> -		ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN, 0);
> -		if (ret) {
> -			DRM_DEBUG_DRIVER("Pin LRC backing obj failed: %d\n",
> -					ret);
> -			drm_gem_object_unreference(&ctx_obj->base);
> -			return ret;
> -		}
> -	}
> -
>  	ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
>  	if (!ringbuf) {
>  		DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s\n",
>  				ring->name);
>  		ret = -ENOMEM;
> -		goto error_unpin_ctx;
> +		goto error_deref_obj;
>  	}
>  
>  	ringbuf->ring = ring;
> @@ -2269,65 +2281,24 @@ int intel_lr_context_deferred_create(struct intel_context *ctx,
>  				ring->name, ret);
>  			goto error_free_rbuf;
>  		}
> -
> -		if (is_global_default_ctx) {
> -			ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf);
> -			if (ret) {
> -				DRM_ERROR(
> -					"Failed to pin and map ringbuffer %s: %d\n",
> -					ring->name, ret);
> -				goto error_destroy_rbuf;
> -			}
> -		}
> -
>  	}
>  
>  	ret = populate_lr_context(ctx, ctx_obj, ring, ringbuf);
>  	if (ret) {
>  		DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret);
> -		goto error;
> +		goto error_destroy_rbuf;
>  	}
>  
>  	ctx->engine[ring->id].ringbuf = ringbuf;
>  	ctx->engine[ring->id].state = ctx_obj;
>  
> -	if (ctx == ring->default_context)
> -		lrc_setup_hardware_status_page(ring, ctx_obj);
> -	else if (ring->id == RCS && !ctx->rcs_initialized) {
> -		if (ring->init_context) {
> -			struct drm_i915_gem_request *req;
> -
> -			ret = i915_gem_request_alloc(ring, ctx, &req);
> -			if (ret)
> -				return ret;
> -
> -			ret = ring->init_context(req);
> -			if (ret) {
> -				DRM_ERROR("ring init context: %d\n", ret);
> -				i915_gem_request_cancel(req);
> -				ctx->engine[ring->id].ringbuf = NULL;
> -				ctx->engine[ring->id].state = NULL;
> -				goto error;
> -			}
> -
> -			i915_add_request_no_flush(req);
> -		}
> -
> -		ctx->rcs_initialized = true;
> -	}
> -
>  	return 0;
>  
> -error:
> -	if (is_global_default_ctx)
> -		intel_unpin_ringbuffer_obj(ringbuf);
>  error_destroy_rbuf:
>  	intel_destroy_ringbuffer_obj(ringbuf);
>  error_free_rbuf:
>  	kfree(ringbuf);
> -error_unpin_ctx:
> -	if (is_global_default_ctx)
> -		i915_gem_object_ggtt_unpin(ctx_obj);
> +error_deref_obj:
>  	drm_gem_object_unreference(&ctx_obj->base);
>  	return ret;
>  }
> diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
> index e0299fb..959c68e 100644
> --- a/drivers/gpu/drm/i915/intel_lrc.h
> +++ b/drivers/gpu/drm/i915/intel_lrc.h
> @@ -68,8 +68,8 @@ static inline void intel_logical_ring_emit(struct intel_ringbuffer *ringbuf,
>  
>  /* Logical Ring Contexts */
>  void intel_lr_context_free(struct intel_context *ctx);
> -int intel_lr_context_deferred_create(struct intel_context *ctx,
> -				     struct intel_engine_cs *ring);
> +int intel_lr_context_deferred_alloc(struct intel_context *ctx,
> +				    struct intel_engine_cs *ring);
>  void intel_lr_context_unpin(struct drm_i915_gem_request *req);
>  void intel_lr_context_reset(struct drm_device *dev,
>  			struct intel_context *ctx);
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
> index 385859e..f0d029f 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.c
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
> @@ -2676,6 +2676,8 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
>  	else
>  		ring->dispatch_execbuffer = i915_dispatch_execbuffer;
>  	ring->init_hw = init_render_ring;
> +	if (!i915.enable_execlists)
> +		ring->switch_context = i915_switch_context;
>  	ring->cleanup = render_ring_cleanup;
>  
>  	/* Workaround batchbuffer to combat CS tlb bug. */
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
> index 2e85fda..822ffbe 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.h
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
> @@ -177,6 +177,7 @@ struct  intel_engine_cs {
>  	int		(*init_hw)(struct intel_engine_cs *ring);
>  
>  	int		(*init_context)(struct drm_i915_gem_request *req);
> +	int		(*switch_context)(struct drm_i915_gem_request *req);
>  
>  	void		(*write_tail)(struct intel_engine_cs *ring,
>  				      u32 value);
> -- 
> 2.1.1
> 
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx at lists.freedesktop.org
> http://lists.freedesktop.org/mailman/listinfo/intel-gfx

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch


More information about the Intel-gfx mailing list