[Intel-gfx] [PATCH 03/23] drm/i915: Remove lrc default desc from GEM context
Chris Wilson
chris at chris-wilson.co.uk
Thu Aug 1 10:57:06 UTC 2019
Quoting Tvrtko Ursulin (2019-08-01 09:53:15)
>
> On 01/08/2019 09:41, Chris Wilson wrote:
> > Quoting Tvrtko Ursulin (2019-08-01 09:37:38)
> >>
> >> On 24/07/2019 10:20, Tvrtko Ursulin wrote:
> >>>
> >>> On 23/07/2019 19:38, Chris Wilson wrote:
> >>>> We only compute the lrc_descriptor() on pinning the context, i.e.
> >>>> infrequently, so we do not benefit from storing the template as the
> >>>> addressing mode is also fixed for the lifetime of the intel_context.
> >>>>
> >>>> Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
> >>>> ---
> >>>> drivers/gpu/drm/i915/gem/i915_gem_context.c | 28 ++-----------------
> >>>> .../gpu/drm/i915/gem/i915_gem_context_types.h | 2 --
> >>>> drivers/gpu/drm/i915/gt/intel_lrc.c | 12 +++++---
> >>>> drivers/gpu/drm/i915/gvt/scheduler.c | 3 --
> >>>> 4 files changed, 10 insertions(+), 35 deletions(-)
> >>>>
> >>>> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c
> >>>> b/drivers/gpu/drm/i915/gem/i915_gem_context.c
> >>>> index b28c7ca681a8..1b3dc7258ef2 100644
> >>>> --- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
> >>>> +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
> >>>> @@ -397,30 +397,6 @@ static void context_close(struct i915_gem_context
> >>>> *ctx)
> >>>> i915_gem_context_put(ctx);
> >>>> }
> >>>> -static u32 default_desc_template(const struct drm_i915_private *i915,
> >>>> - const struct i915_address_space *vm)
> >>>> -{
> >>>> - u32 address_mode;
> >>>> - u32 desc;
> >>>> -
> >>>> - desc = GEN8_CTX_VALID | GEN8_CTX_PRIVILEGE;
> >>>> -
> >>>> - address_mode = INTEL_LEGACY_32B_CONTEXT;
> >>>> - if (vm && i915_vm_is_4lvl(vm))
> >>>> - address_mode = INTEL_LEGACY_64B_CONTEXT;
> >>>> - desc |= address_mode << GEN8_CTX_ADDRESSING_MODE_SHIFT;
> >>>> -
> >>>> - if (IS_GEN(i915, 8))
> >>>> - desc |= GEN8_CTX_L3LLC_COHERENT;
> >>>> -
> >>>> - /* TODO: WaDisableLiteRestore when we start using semaphore
> >>>> - * signalling between Command Streamers
> >>>> - * ring->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE;
> >>>> - */
> >>>> -
> >>>> - return desc;
> >>>> -}
> >>>> -
> >>>> static struct i915_gem_context *
> >>>> __create_context(struct drm_i915_private *i915)
> >>>> {
> >>>> @@ -459,7 +435,6 @@ __create_context(struct drm_i915_private *i915)
> >>>> i915_gem_context_set_recoverable(ctx);
> >>>> ctx->ring_size = 4 * PAGE_SIZE;
> >>>> - ctx->desc_template = default_desc_template(i915, NULL);
> >>>> for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
> >>>> ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
> >>>> @@ -478,8 +453,9 @@ __set_ppgtt(struct i915_gem_context *ctx, struct
> >>>> i915_address_space *vm)
> >>>> struct i915_gem_engines_iter it;
> >>>> struct intel_context *ce;
> >>>> + GEM_BUG_ON(old && i915_vm_is_4lvl(vm) != i915_vm_is_4lvl(old));
> >>>> +
> >>>> ctx->vm = i915_vm_get(vm);
> >>>> - ctx->desc_template = default_desc_template(ctx->i915, vm);
> >>>> for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
> >>>> i915_vm_put(ce->vm);
> >>>> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
> >>>> b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
> >>>> index 0ee61482ef94..a02d98494078 100644
> >>>> --- a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
> >>>> +++ b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
> >>>> @@ -171,8 +171,6 @@ struct i915_gem_context {
> >>>> /** ring_size: size for allocating the per-engine ring buffer */
> >>>> u32 ring_size;
> >>>> - /** desc_template: invariant fields for the HW context descriptor */
> >>>> - u32 desc_template;
> >>>> /** guilty_count: How many times this context has caused a GPU
> >>>> hang. */
> >>>> atomic_t guilty_count;
> >>>> diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c
> >>>> b/drivers/gpu/drm/i915/gt/intel_lrc.c
> >>>> index 632344c163a8..5fdac40015cf 100644
> >>>> --- a/drivers/gpu/drm/i915/gt/intel_lrc.c
> >>>> +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
> >>>> @@ -418,13 +418,17 @@ lrc_descriptor(struct intel_context *ce, struct
> >>>> intel_engine_cs *engine)
> >>>> BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (BIT(GEN8_CTX_ID_WIDTH)));
> >>>> BUILD_BUG_ON(GEN11_MAX_CONTEXT_HW_ID >
> >>>> (BIT(GEN11_SW_CTX_ID_WIDTH)));
> >>>> - desc = ctx->desc_template; /* bits 0-11 */
> >>>> - GEM_BUG_ON(desc & GENMASK_ULL(63, 12));
> >>>> + desc = INTEL_LEGACY_32B_CONTEXT;
> >>>> + if (i915_vm_is_4lvl(ce->vm))
> >>>> + desc = INTEL_LEGACY_64B_CONTEXT;
> >>>
> >>> if-else now that the vm null check is gone.
> >>>
> >>>> + desc <<= GEN8_CTX_ADDRESSING_MODE_SHIFT;
> >>>> +
> >>>> + desc |= GEN8_CTX_VALID | GEN8_CTX_PRIVILEGE;
> >>>> + if (IS_GEN(engine->i915, 8))
> >>>> + desc |= GEN8_CTX_L3LLC_COHERENT;
> >>>
> >>> Don't know.. it's nicer to keep it stored it both for Gen and context
> >>> state. What's the problem with it?
> >>
> >> Ping.
> >
> > There's no gem_context.
>
> We could store it in ce then. We already have well defined control
> points for when vm changes when all are updated.
We are storing it in ce; it's not like we recompute it all that often,
and when we do it's because we have rebound the vma.
> If done like this then it looks like assigning ctx->hw_id could also do
> the default_desc update, so that we can avoid even more work done at pin
> time.
What ctx->hw_id? You are imagining things again :-p
Remember that we only do this on first pin from idle, not every pin.
-Chris
More information about the Intel-gfx
mailing list