[Intel-gfx] [PATCH v2 08/12] drm/i915: Refactor execlists default context pinning
Tvrtko Ursulin
tvrtko.ursulin at linux.intel.com
Tue Apr 19 12:25:34 UTC 2016
On 19/04/16 12:40, Chris Wilson wrote:
> Refactor pinning and unpinning of contexts, such that the default
> context for an engine is pinned during initialisation and unpinned
> during teardown (pinning of the context handles the reference counting).
> Thus we can eliminate the special case handling of the default context
> that was required to mask that it was not being pinned normally.
>
> v2: Rebalance context_queue after rebasing.
> v3: Rebase to -nightly (not 40 patches in)
>
> Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
> Cc: Tvrtko Ursulin <tvrtko.ursulin at linux.intel.com>
> ---
> drivers/gpu/drm/i915/i915_debugfs.c | 5 +-
> drivers/gpu/drm/i915/i915_gem.c | 2 +-
> drivers/gpu/drm/i915/intel_lrc.c | 107 ++++++++++++++----------------------
> 3 files changed, 43 insertions(+), 71 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
> index f775451bd0b6..e81a7504656e 100644
> --- a/drivers/gpu/drm/i915/i915_debugfs.c
> +++ b/drivers/gpu/drm/i915/i915_debugfs.c
> @@ -2095,9 +2095,8 @@ static int i915_dump_lrc(struct seq_file *m, void *unused)
> return ret;
>
> list_for_each_entry(ctx, &dev_priv->context_list, link)
> - if (ctx != dev_priv->kernel_context)
> - for_each_engine(engine, dev_priv)
> - i915_dump_lrc_obj(m, ctx, engine);
> + for_each_engine(engine, dev_priv)
> + i915_dump_lrc_obj(m, ctx, engine);
>
> mutex_unlock(&dev->struct_mutex);
>
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index b95d5f83d3b0..261185281e78 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -2719,7 +2719,7 @@ void i915_gem_request_free(struct kref *req_ref)
> i915_gem_request_remove_from_client(req);
>
> if (ctx) {
> - if (i915.enable_execlists && ctx != req->i915->kernel_context)
> + if (i915.enable_execlists)
> intel_lr_context_unpin(ctx, req->engine);
>
> i915_gem_context_unreference(ctx);
> diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
> index dedd82aea386..e064a6ae2d97 100644
> --- a/drivers/gpu/drm/i915/intel_lrc.c
> +++ b/drivers/gpu/drm/i915/intel_lrc.c
> @@ -588,9 +588,7 @@ static void execlists_context_queue(struct drm_i915_gem_request *request)
> struct drm_i915_gem_request *cursor;
> int num_elements = 0;
>
> - if (request->ctx != request->i915->kernel_context)
> - intel_lr_context_pin(request->ctx, engine);
> -
> + intel_lr_context_pin(request->ctx, request->engine);
> i915_gem_request_reference(request);
>
> spin_lock_bh(&engine->execlist_lock);
> @@ -691,10 +689,7 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request
> return ret;
> }
>
> - if (request->ctx != request->i915->kernel_context)
> - ret = intel_lr_context_pin(request->ctx, request->engine);
> -
> - return ret;
> + return intel_lr_context_pin(request->ctx, request->engine);
> }
>
> static int logical_ring_wait_for_space(struct drm_i915_gem_request *req,
> @@ -774,12 +769,8 @@ intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
> if (engine->last_context != request->ctx) {
> if (engine->last_context)
> intel_lr_context_unpin(engine->last_context, engine);
> - if (request->ctx != request->i915->kernel_context) {
> - intel_lr_context_pin(request->ctx, engine);
> - engine->last_context = request->ctx;
> - } else {
> - engine->last_context = NULL;
> - }
> + intel_lr_context_pin(request->ctx, engine);
> + engine->last_context = request->ctx;
> }
>
> if (dev_priv->guc.execbuf_client)
> @@ -1002,12 +993,7 @@ void intel_execlists_retire_requests(struct intel_engine_cs *engine)
> spin_unlock_bh(&engine->execlist_lock);
>
> list_for_each_entry_safe(req, tmp, &retired_list, execlist_link) {
> - struct intel_context *ctx = req->ctx;
> - struct drm_i915_gem_object *ctx_obj =
> - ctx->engine[engine->id].state;
> -
> - if (ctx_obj && (ctx != req->i915->kernel_context))
> - intel_lr_context_unpin(ctx, engine);
> + intel_lr_context_unpin(req->ctx, engine);
>
> list_del(&req->execlist_link);
> i915_gem_request_unreference(req);
> @@ -1052,23 +1038,26 @@ int logical_ring_flush_all_caches(struct drm_i915_gem_request *req)
> return 0;
> }
>
> -static int intel_lr_context_do_pin(struct intel_context *ctx,
> - struct intel_engine_cs *engine)
> +static int intel_lr_context_pin(struct intel_context *ctx,
> + struct intel_engine_cs *engine)
> {
> - struct drm_device *dev = engine->dev;
> - struct drm_i915_private *dev_priv = dev->dev_private;
> - struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
> - struct intel_ringbuffer *ringbuf = ctx->engine[engine->id].ringbuf;
> + struct drm_i915_private *dev_priv = ctx->i915;
> + struct drm_i915_gem_object *ctx_obj;
> + struct intel_ringbuffer *ringbuf;
> void *vaddr;
> u32 *lrc_reg_state;
> int ret;
>
> - WARN_ON(!mutex_is_locked(&engine->dev->struct_mutex));
> + lockdep_assert_held(&ctx->i915->dev->struct_mutex);
>
> + if (ctx->engine[engine->id].pin_count++)
> + return 0;
> +
> + ctx_obj = ctx->engine[engine->id].state;
> ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN,
> PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
> if (ret)
> - return ret;
> + goto err;
>
> vaddr = i915_gem_object_pin_map(ctx_obj);
> if (IS_ERR(vaddr)) {
> @@ -1078,10 +1067,12 @@ static int intel_lr_context_do_pin(struct intel_context *ctx,
>
> lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
>
> + ringbuf = ctx->engine[engine->id].ringbuf;
> ret = intel_pin_and_map_ringbuffer_obj(engine->dev, ringbuf);
> if (ret)
> goto unpin_map;
>
> + i915_gem_context_reference(ctx);
> ctx->engine[engine->id].lrc_vma = i915_gem_obj_to_ggtt(ctx_obj);
> intel_lr_context_descriptor_update(ctx, engine);
> lrc_reg_state[CTX_RING_BUFFER_START+1] = ringbuf->vma->node.start;
> @@ -1092,51 +1083,39 @@ static int intel_lr_context_do_pin(struct intel_context *ctx,
> if (i915.enable_guc_submission)
> I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
>
> - return ret;
> + return 0;
>
> unpin_map:
> i915_gem_object_unpin_map(ctx_obj);
> unpin_ctx_obj:
> i915_gem_object_ggtt_unpin(ctx_obj);
> -
> +err:
> + ctx->engine[engine->id].pin_count = 0;
> return ret;
> }
>
> -static int intel_lr_context_pin(struct intel_context *ctx,
> - struct intel_engine_cs *engine)
> +void intel_lr_context_unpin(struct intel_context *ctx,
> + struct intel_engine_cs *engine)
> {
> - int ret = 0;
> + struct drm_i915_gem_object *ctx_obj;
>
> - if (ctx->engine[engine->id].pin_count++ == 0) {
> - ret = intel_lr_context_do_pin(ctx, engine);
> - if (ret)
> - goto reset_pin_count;
> + lockdep_assert_held(&ctx->i915->dev->struct_mutex);
> + GEM_BUG_ON(ctx->engine[engine->id].pin_count == 0);
>
> - i915_gem_context_reference(ctx);
> - }
> - return ret;
> + if (--ctx->engine[engine->id].pin_count)
> + return;
>
> -reset_pin_count:
> - ctx->engine[engine->id].pin_count = 0;
> - return ret;
> -}
> + intel_unpin_ringbuffer_obj(ctx->engine[engine->id].ringbuf);
>
> -void intel_lr_context_unpin(struct intel_context *ctx,
> - struct intel_engine_cs *engine)
> -{
> - struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
> + ctx_obj = ctx->engine[engine->id].state;
> + i915_gem_object_unpin_map(ctx_obj);
> + i915_gem_object_ggtt_unpin(ctx_obj);
>
> - WARN_ON(!mutex_is_locked(&ctx->i915->dev->struct_mutex));
> - if (--ctx->engine[engine->id].pin_count == 0) {
> - i915_gem_object_unpin_map(ctx_obj);
> - intel_unpin_ringbuffer_obj(ctx->engine[engine->id].ringbuf);
> - i915_gem_object_ggtt_unpin(ctx_obj);
> - ctx->engine[engine->id].lrc_vma = NULL;
> - ctx->engine[engine->id].lrc_desc = 0;
> - ctx->engine[engine->id].lrc_reg_state = NULL;
> + ctx->engine[engine->id].lrc_vma = NULL;
> + ctx->engine[engine->id].lrc_desc = 0;
> + ctx->engine[engine->id].lrc_reg_state = NULL;
>
> - i915_gem_context_unreference(ctx);
> - }
> + i915_gem_context_unreference(ctx);
> }
>
> static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
> @@ -2034,6 +2013,7 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
> i915_gem_object_unpin_map(engine->status_page.obj);
> engine->status_page.obj = NULL;
> }
> + intel_lr_context_unpin(dev_priv->kernel_context, engine);
>
> engine->idle_lite_restore_wa = 0;
> engine->disable_lite_restore_wa = false;
> @@ -2137,11 +2117,10 @@ logical_ring_init(struct drm_device *dev, struct intel_engine_cs *engine)
> goto error;
>
> /* As this is the default context, always pin it */
> - ret = intel_lr_context_do_pin(dctx, engine);
> + ret = intel_lr_context_pin(dctx, engine);
> if (ret) {
> - DRM_ERROR(
> - "Failed to pin and map ringbuffer %s: %d\n",
> - engine->name, ret);
> + DRM_ERROR("Failed to pin context for %s: %d\n",
> + engine->name, ret);
> goto error;
> }
>
> @@ -2562,12 +2541,6 @@ void intel_lr_context_free(struct intel_context *ctx)
> if (!ctx_obj)
> continue;
>
> - if (ctx == ctx->i915->kernel_context) {
> - intel_unpin_ringbuffer_obj(ringbuf);
> - i915_gem_object_ggtt_unpin(ctx_obj);
> - i915_gem_object_unpin_map(ctx_obj);
> - }
> -
> WARN_ON(ctx->engine[i].pin_count);
> intel_ringbuffer_free(ringbuf);
> drm_gem_object_unreference(&ctx_obj->base);
>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin at intel.com>
Regards,
Tvrtko
More information about the Intel-gfx
mailing list