[Intel-gfx] [PATCH 04/23] drm/i915: Push the ring creation flags to the backend

Tvrtko Ursulin tvrtko.ursulin at linux.intel.com
Wed Jul 24 11:11:54 UTC 2019


On 23/07/2019 19:38, Chris Wilson wrote:
> Push the ring creation flags from the outer GEM context to the inner
> intel_cotnext to avoid an unsightly back-reference from inside the
> backend.

Sorry I find this quite ugly. Passing in integers in pointers filed and 
casting back and forth.

> Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
> ---
>   drivers/gpu/drm/i915/gem/i915_gem_context.c   | 25 +++++++++++++------
>   .../gpu/drm/i915/gem/i915_gem_context_types.h |  3 ---
>   drivers/gpu/drm/i915/gt/intel_context.c       |  1 +
>   drivers/gpu/drm/i915/gt/intel_context.h       |  5 ++++
>   drivers/gpu/drm/i915/gt/intel_lrc.c           |  5 ++--
>   drivers/gpu/drm/i915/i915_debugfs.c           | 23 +++++++++++------
>   6 files changed, 41 insertions(+), 21 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
> index 1b3dc7258ef2..18b226bc5e3a 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
> @@ -296,6 +296,8 @@ static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx)
>   			return ERR_CAST(ce);
>   		}
>   
> +		ce->ring = __intel_context_ring_size(SZ_16K);
> +
>   		e->engines[id] = ce;
>   	}
>   	e->num_engines = id;
> @@ -434,8 +436,6 @@ __create_context(struct drm_i915_private *i915)
>   	i915_gem_context_set_bannable(ctx);
>   	i915_gem_context_set_recoverable(ctx);
>   
> -	ctx->ring_size = 4 * PAGE_SIZE;
> -
>   	for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
>   		ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
>   
> @@ -565,8 +565,15 @@ i915_gem_context_create_gvt(struct drm_device *dev)
>   	i915_gem_context_set_closed(ctx); /* not user accessible */
>   	i915_gem_context_clear_bannable(ctx);
>   	i915_gem_context_set_force_single_submission(ctx);
> -	if (!USES_GUC_SUBMISSION(to_i915(dev)))
> -		ctx->ring_size = 512 * PAGE_SIZE; /* Max ring buffer size */
> +	if (!USES_GUC_SUBMISSION(to_i915(dev))) {
> +		const unsigned long ring_size = 512 * SZ_4K; /* max */
> +		struct i915_gem_engines_iter it;
> +		struct intel_context *ce;
> +
> +		for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it)
> +			ce->ring = __intel_context_ring_size(ring_size);
> +		i915_gem_context_unlock_engines(ctx);
> +	}
>   
>   	GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
>   out:
> @@ -605,7 +612,6 @@ i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio)
>   
>   	i915_gem_context_clear_bannable(ctx);
>   	ctx->sched.priority = I915_USER_PRIORITY(prio);
> -	ctx->ring_size = PAGE_SIZE;
>   
>   	GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
>   
> @@ -1589,6 +1595,7 @@ set_engines(struct i915_gem_context *ctx,
>   	for (n = 0; n < num_engines; n++) {
>   		struct i915_engine_class_instance ci;
>   		struct intel_engine_cs *engine;
> +		struct intel_context *ce;
>   
>   		if (copy_from_user(&ci, &user->engines[n], sizeof(ci))) {
>   			__free_engines(set.engines, n);
> @@ -1611,11 +1618,15 @@ set_engines(struct i915_gem_context *ctx,
>   			return -ENOENT;
>   		}
>   
> -		set.engines->engines[n] = intel_context_create(ctx, engine);
> -		if (!set.engines->engines[n]) {
> +		ce = intel_context_create(ctx, engine);
> +		if (!ce) {
>   			__free_engines(set.engines, n);
>   			return -ENOMEM;
>   		}
> +
> +		ce->ring = __intel_context_ring_size(SZ_16K);
> +
> +		set.engines->engines[n] = ce;
>   	}
>   	set.engines->num_engines = num_engines;
>   
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
> index a02d98494078..260d59cc3de8 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
> @@ -169,9 +169,6 @@ struct i915_gem_context {
>   
>   	struct i915_sched_attr sched;
>   
> -	/** ring_size: size for allocating the per-engine ring buffer */
> -	u32 ring_size;
> -
>   	/** guilty_count: How many times this context has caused a GPU hang. */
>   	atomic_t guilty_count;
>   	/**
> diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
> index 9e4f51ce52ff..295fa0ddbcac 100644
> --- a/drivers/gpu/drm/i915/gt/intel_context.c
> +++ b/drivers/gpu/drm/i915/gt/intel_context.c
> @@ -196,6 +196,7 @@ intel_context_init(struct intel_context *ce,
>   	ce->engine = engine;
>   	ce->ops = engine->cops;
>   	ce->sseu = engine->sseu;
> +	ce->ring = __intel_context_ring_size(SZ_4K);
>   
>   	INIT_LIST_HEAD(&ce->signal_link);
>   	INIT_LIST_HEAD(&ce->signals);
> diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h
> index 23c7e4c0ce7c..3f54eb3d10ab 100644
> --- a/drivers/gpu/drm/i915/gt/intel_context.h
> +++ b/drivers/gpu/drm/i915/gt/intel_context.h
> @@ -145,4 +145,9 @@ int intel_context_prepare_remote_request(struct intel_context *ce,
>   
>   struct i915_request *intel_context_create_request(struct intel_context *ce);
>   
> +static inline struct intel_ring *__intel_context_ring_size(u64 sz)
> +{
> +	return u64_to_ptr(struct intel_ring, sz);
> +}
> +
>   #endif /* __INTEL_CONTEXT_H__ */
> diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
> index 5fdac40015cf..3f1b20cc50c2 100644
> --- a/drivers/gpu/drm/i915/gt/intel_lrc.c
> +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
> @@ -3140,9 +3140,8 @@ static int execlists_context_deferred_alloc(struct intel_context *ce,
>   		goto error_deref_obj;
>   	}
>   
> -	ring = intel_engine_create_ring(engine,
> -					timeline,
> -					ce->gem_context->ring_size);
> +	ring = intel_engine_create_ring(engine, timeline,
> +					(unsigned long)ce->ring);
>   	intel_timeline_put(timeline);
>   	if (IS_ERR(ring)) {
>   		ret = PTR_ERR(ring);
> diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
> index 6d3911469801..e237bcecfa1f 100644
> --- a/drivers/gpu/drm/i915/i915_debugfs.c
> +++ b/drivers/gpu/drm/i915/i915_debugfs.c
> @@ -328,10 +328,14 @@ static void print_context_stats(struct seq_file *m,
>   
>   		for_each_gem_engine(ce,
>   				    i915_gem_context_lock_engines(ctx), it) {
> -			if (ce->state)
> -				per_file_stats(0, ce->state->obj, &kstats);
> -			if (ce->ring)
> +			intel_context_lock_pinned(ce);
> +			if (intel_context_is_pinned(ce)) {

And these hunks do not seem to belong to this patch.

> +				if (ce->state)
> +					per_file_stats(0,
> +						       ce->state->obj, &kstats);
>   				per_file_stats(0, ce->ring->vma->obj, &kstats);
> +			}
> +			intel_context_unlock_pinned(ce);
>   		}
>   		i915_gem_context_unlock_engines(ctx);
>   
> @@ -1677,12 +1681,15 @@ static int i915_context_status(struct seq_file *m, void *unused)
>   
>   		for_each_gem_engine(ce,
>   				    i915_gem_context_lock_engines(ctx), it) {
> -			seq_printf(m, "%s: ", ce->engine->name);
> -			if (ce->state)
> -				describe_obj(m, ce->state->obj);
> -			if (ce->ring)
> +			intel_context_lock_pinned(ce);
> +			if (intel_context_is_pinned(ce)) {
> +				seq_printf(m, "%s: ", ce->engine->name);
> +				if (ce->state)
> +					describe_obj(m, ce->state->obj);
>   				describe_ctx_ring(m, ce->ring);
> -			seq_putc(m, '\n');
> +				seq_putc(m, '\n');
> +			}
> +			intel_context_unlock_pinned(ce);
>   		}
>   		i915_gem_context_unlock_engines(ctx);
>   
> 

Regards,

Tvrtko


More information about the Intel-gfx mailing list