[Intel-gfx] [PATCH 9/9] drm/i915/execlists: Report GPU rendering as IO activity to cpufreq.
Chris Wilson
chris at chris-wilson.co.uk
Wed Mar 28 23:19:40 UTC 2018
Quoting Chris Wilson (2018-03-28 20:20:19)
> Quoting Francisco Jerez (2018-03-28 19:55:12)
> > Hi Chris,
> >
> [snip]
> > That said, it wouldn't hurt to call each of them once per sequence of
> > overlapping requests. Do you want me to call them from
> > execlists_set/clear_active() themselves when bit == EXECLISTS_ACTIVE_USER,
> > or at each callsite of execlists_set/clear_active()? [possibly protected
> > with a check that execlists_is_active(EXECLISTS_ACTIVE_USER) wasn't
> > already the expected value]
>
> No, I was thinking of adding an execlists_start()/execlists_stop()
> [naming suggestions welcome, begin/end?] where we could hook additional
> bookkeeping into.
Trying to call execlist_begin() once didn't pan out. It's easier to
reuse for similar bookkeeping used in future patches if execlist_begin()
(or whatever name suits best) at the start of each context.
Something along the lines of:
@@ -374,6 +374,19 @@ execlists_context_status_change(struct i915_request *rq, unsigned long status)
status, rq);
}
+static inline void
+execlists_begin(struct intel_engine_execlists *execlists,
+ struct execlist_port *port)
+{
+ execlists_set_active_once(execlists, EXECLISTS_ACTIVE_USER);
+}
+
+static inline void
+execlists_end(struct intel_engine_execlists *execlists)
+{
+ execlists_clear_active(execlists, EXECLISTS_ACTIVE_USER);
+}
+
static inline void
execlists_context_schedule_in(struct i915_request *rq)
{
@@ -710,7 +723,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
spin_unlock_irq(&engine->timeline->lock);
if (submit) {
- execlists_set_active(execlists, EXECLISTS_ACTIVE_USER);
+ execlists_begin(execlists, execlists->port);
execlists_submit_ports(engine);
}
@@ -741,7 +754,7 @@ execlists_cancel_port_requests(struct intel_engine_execlists * const execlists)
port++;
}
- execlists_clear_active(execlists, EXECLISTS_ACTIVE_USER);
+ execlists_end(execlists);
}
static void clear_gtiir(struct intel_engine_cs *engine)
@@ -872,7 +885,7 @@ static void execlists_submission_tasklet(unsigned long data)
{
struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
struct intel_engine_execlists * const execlists = &engine->execlists;
- struct execlist_port * const port = execlists->port;
+ struct execlist_port *port = execlists->port;
struct drm_i915_private *dev_priv = engine->i915;
bool fw = false;
@@ -1010,9 +1023,19 @@ static void execlists_submission_tasklet(unsigned long data)
GEM_BUG_ON(count == 0);
if (--count == 0) {
+ /*
+ * On the final event corresponding to the
+ * submission of this context, we expect either
+ * an element-switch event or an completion
+ * event (and on completion, the active-idle
+ * marker). No more preemptions, lite-restore
+ * or otherwise
+ */
GEM_BUG_ON(status & GEN8_CTX_STATUS_PREEMPTED);
GEM_BUG_ON(port_isset(&port[1]) &&
!(status & GEN8_CTX_STATUS_ELEMENT_SWITCH));
+ GEM_BUG_ON(!port_isset(&port[1]) &&
+ !(status & GEN8_CTX_STATUS_ACTIVE_IDLE));
GEM_BUG_ON(!i915_request_completed(rq));
execlists_context_schedule_out(rq);
trace_i915_request_out(rq);
@@ -1021,17 +1044,14 @@ static void execlists_submission_tasklet(unsigned long data)
GEM_TRACE("%s completed ctx=%d\n",
engine->name, port->context_id);
- execlists_port_complete(execlists, port);
+ port = execlists_port_complete(execlists, port);
+ if (port_isset(port))
+ execlists_begin(execlists, port);
+ else
+ execlists_end(execlists);
} else {
port_set(port, port_pack(rq, count));
}
-Chris
More information about the Intel-gfx
mailing list