[PATCH 39/46] drm/i915: Rename intel_context.active to .bound
Chris Wilson
chris at chris-wilson.co.uk
Fri Mar 1 15:06:49 UTC 2019
Rename the engine this HW context is currently bound upon to
disambiguate between the mixture of different active terms.
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
drivers/gpu/drm/i915/intel_context_types.h | 2 +-
drivers/gpu/drm/i915/intel_lrc.c | 22 +++++++++++-----------
2 files changed, 12 insertions(+), 12 deletions(-)
diff --git a/drivers/gpu/drm/i915/intel_context_types.h b/drivers/gpu/drm/i915/intel_context_types.h
index 6dc9b4b9067b..3918b9a4ae9d 100644
--- a/drivers/gpu/drm/i915/intel_context_types.h
+++ b/drivers/gpu/drm/i915/intel_context_types.h
@@ -38,7 +38,7 @@ struct intel_sseu {
struct intel_context {
struct i915_gem_context *gem_context;
struct intel_engine_cs *engine;
- struct intel_engine_cs *active;
+ struct intel_engine_cs *bound;
struct list_head active_link;
struct list_head signal_link;
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 7fef95620e62..28d9f8f6f0ba 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -441,7 +441,7 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
__i915_request_unsubmit(rq);
unwind_wa_tail(rq);
- GEM_BUG_ON(rq->hw_context->active);
+ GEM_BUG_ON(rq->hw_context->bound);
owner = rq->hw_context->engine;
if (likely(owner == engine)) {
@@ -524,17 +524,17 @@ execlists_user_end(struct intel_engine_execlists *execlists)
static inline void
execlists_context_schedule_in(struct i915_request *rq)
{
- GEM_BUG_ON(rq->hw_context->active);
+ GEM_BUG_ON(rq->hw_context->bound);
execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN);
intel_engine_context_in(rq->engine);
- rq->hw_context->active = rq->engine;
+ rq->hw_context->bound = rq->engine;
}
static inline void
execlists_context_schedule_out(struct i915_request *rq, unsigned long status)
{
- rq->hw_context->active = NULL;
+ rq->hw_context->bound = NULL;
intel_engine_context_out(rq->engine);
execlists_context_status_change(rq, status);
trace_i915_request_out(rq);
@@ -795,7 +795,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
struct virtual_engine *ve =
rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
struct i915_request *rq = READ_ONCE(ve->request);
- struct intel_engine_cs *active;
+ struct intel_engine_cs *bound;
if (!rq) {
rb_erase_cached(rb, &execlists->virtual);
@@ -804,8 +804,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
continue;
}
- active = READ_ONCE(ve->context.active);
- if (active && active != engine) {
+ bound = READ_ONCE(ve->context.bound);
+ if (bound && bound != engine) {
rb = rb_next(rb);
continue;
}
@@ -909,7 +909,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
u32 *regs = ve->context.lrc_reg_state;
unsigned int n;
- GEM_BUG_ON(READ_ONCE(ve->context.active));
+ GEM_BUG_ON(READ_ONCE(ve->context.bound));
virtual_update_register_offsets(regs, engine);
/*
@@ -1461,7 +1461,7 @@ static void execlists_context_unpin(struct intel_context *ce)
* had the chance to run yet; let it run before we teardown the
* reference it may use.
*/
- engine = READ_ONCE(ce->active);
+ engine = READ_ONCE(ce->bound);
if (unlikely(engine)) {
unsigned long flags;
@@ -1469,7 +1469,7 @@ static void execlists_context_unpin(struct intel_context *ce)
process_csb(engine);
spin_unlock_irqrestore(&engine->timeline.lock, flags);
- GEM_BUG_ON(READ_ONCE(ce->active));
+ GEM_BUG_ON(READ_ONCE(ce->bound));
}
i915_gem_context_unpin_hw_id(ce->gem_context);
@@ -3114,7 +3114,7 @@ static void virtual_engine_free(struct kref *kref)
unsigned int n;
GEM_BUG_ON(ve->request);
- GEM_BUG_ON(ve->context.active);
+ GEM_BUG_ON(ve->context.bound);
for (n = 0; n < ve->count; n++) {
struct intel_engine_cs *sibling = ve->siblings[n];
--
2.20.1
More information about the Intel-gfx-trybot
mailing list