[Intel-gfx] [PATCH 157/190] drm/i915: Tidy execlists by using intel_context_engine locals
Chris Wilson
chris at chris-wilson.co.uk
Mon Jan 11 03:00:58 PST 2016
No functional changes, just less typing.
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
drivers/gpu/drm/i915/intel_lrc.c | 63 ++++++++++++++++++++--------------------
1 file changed, 32 insertions(+), 31 deletions(-)
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index c2a45f48da66..62f19ed51fb2 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -485,6 +485,7 @@ static int execlists_context_queue(struct drm_i915_gem_request *request)
int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request)
{
struct intel_engine_cs *engine = request->engine;
+ struct intel_context_engine *ce = &request->ctx->engine[engine->id];
int ret;
if (i915.enable_guc_submission) {
@@ -498,25 +499,25 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request
ret = i915_guc_wq_check_space(guc->execbuf_client);
}
- if (request->ctx->engine[engine->id].state == NULL) {
+ if (ce->state == NULL) {
ret = execlists_context_deferred_alloc(request->ctx, engine);
if (ret)
return ret;
}
- request->ring = request->ctx->engine[engine->id].ring;
+ request->ring = ce->ring;
ret = intel_lr_context_pin(request->ctx, engine);
if (ret)
return ret;
- if (!request->ctx->engine[engine->id].initialised) {
+ if (!ce->initialised) {
ret = engine->init_context(request);
if (ret) {
intel_lr_context_unpin(request->ctx, engine);
return ret;
}
- request->ctx->engine[engine->id].initialised = true;
+ ce->initialised = true;
}
return 0;
@@ -569,18 +570,18 @@ void intel_logical_ring_stop(struct intel_engine_cs *ring)
static int intel_lr_context_pin(struct intel_context *ctx,
struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
+ struct intel_context_engine *ce = &ctx->engine[engine->id];
struct i915_vma *vma;
struct intel_ring *ring;
u32 ggtt_offset;
int ret;
- if (ctx->engine[engine->id].pin_count++)
+ if (ce->pin_count++)
return 0;
lockdep_assert_held(&engine->dev->struct_mutex);
- vma = i915_gem_object_ggtt_pin(ctx->engine[engine->id].state, NULL,
+ vma = i915_gem_object_ggtt_pin(ce->state, NULL,
0, GEN8_LR_CONTEXT_ALIGN,
PIN_OFFSET_BIAS | GUC_WOPCM_TOP |
PIN_HIGH);
@@ -589,13 +590,13 @@ static int intel_lr_context_pin(struct intel_context *ctx,
goto err;
}
- ring = ctx->engine[engine->id].ring;
+ ring = ce->ring;
ret = intel_ring_map(ring);
if (ret)
goto unpin;
i915_gem_context_reference(ctx);
- ctx->engine[engine->id].vma = vma;
+ ce->vma = vma;
vma->obj->dirty = true;
ggtt_offset = vma->node.start + LRC_PPHWSP_PN * PAGE_SIZE;
@@ -607,30 +608,33 @@ static int intel_lr_context_pin(struct intel_context *ctx,
ring->registers[CTX_RING_BUFFER_START+1] = ring->vma->node.start;
/* Invalidate GuC TLB. */
- if (i915.enable_guc_submission)
+ if (i915.enable_guc_submission) {
+ struct drm_i915_private *dev_priv = engine->i915;
I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
+ }
return 0;
unpin:
__i915_vma_unpin(vma);
err:
- ctx->engine[engine->id].pin_count = 0;
+ ce->pin_count = 0;
return ret;
}
void intel_lr_context_unpin(struct intel_context *ctx,
struct intel_engine_cs *engine)
{
+ struct intel_context_engine *ce = &ctx->engine[engine->id];
struct i915_vma *vma;
lockdep_assert_held(&engine->dev->struct_mutex);
- if (--ctx->engine[engine->id].pin_count)
+ if (--ce->pin_count)
return;
- intel_ring_unmap(ctx->engine[engine->id].ring);
+ intel_ring_unmap(ce->ring);
- vma = ctx->engine[engine->id].vma;
+ vma = ce->vma;
kunmap(i915_gem_object_get_page(vma->obj, LRC_STATE_PN));
i915_vma_unpin(vma);
@@ -1929,12 +1933,13 @@ static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring,
static int execlists_context_deferred_alloc(struct intel_context *ctx,
struct intel_engine_cs *engine)
{
+ struct intel_context_engine *ce = &ctx->engine[engine->id];
struct drm_i915_gem_object *ctx_obj;
uint32_t context_size;
struct intel_ring *ring;
int ret;
- WARN_ON(ctx->engine[engine->id].state);
+ WARN_ON(ce->state);
context_size = round_up(intel_lr_context_size(engine), 4096);
@@ -1959,9 +1964,9 @@ static int execlists_context_deferred_alloc(struct intel_context *ctx,
goto error_ringbuf;
}
- ctx->engine[engine->id].ring = ring;
- ctx->engine[engine->id].state = ctx_obj;
- ctx->engine[engine->id].initialised = engine->init_context == NULL;
+ ce->ring = ring;
+ ce->state = ctx_obj;
+ ce->initialised = engine->init_context == NULL;
return 0;
@@ -1969,40 +1974,36 @@ error_ringbuf:
intel_ring_free(ring);
error_deref_obj:
drm_gem_object_unreference(&ctx_obj->base);
- ctx->engine[engine->id].ring = NULL;
- ctx->engine[engine->id].state = NULL;
+ ce->ring = NULL;
+ ce->state = NULL;
return ret;
}
void intel_lr_context_reset(struct drm_device *dev,
- struct intel_context *ctx)
+ struct intel_context *ctx)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *unused;
int i;
for_each_ring(unused, dev_priv, i) {
- struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state;
- struct intel_ring *ring = ctx->engine[i].ring;
+ struct intel_context_engine *ce = &ctx->engine[i];
uint32_t *reg_state;
struct page *page;
- if (!ctx_obj)
+ if (ce->state == NULL)
continue;
- if (i915_gem_object_get_pages(ctx_obj)) {
+ if (i915_gem_object_get_pages(ce->state)) {
WARN(1, "Failed get_pages for context obj\n");
continue;
}
- page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN);
+ page = i915_gem_object_get_dirty_page(ce->state, LRC_STATE_PN);
reg_state = kmap_atomic(page);
- reg_state[CTX_RING_HEAD+1] = 0;
- reg_state[CTX_RING_TAIL+1] = 0;
+ reg_state[CTX_RING_HEAD+1] = ce->ring->head;
+ reg_state[CTX_RING_TAIL+1] = ce->ring->tail;
kunmap_atomic(reg_state);
-
- ring->head = 0;
- ring->tail = 0;
}
}
--
2.7.0.rc3
More information about the Intel-gfx
mailing list