[Intel-gfx] [PATCH 6/8] drm/i915: Engine busy time tracking
Chris Wilson
chris at chris-wilson.co.uk
Mon Sep 25 17:43:12 UTC 2017
Quoting Tvrtko Ursulin (2017-09-25 16:15:41)
> From: Tvrtko Ursulin <tvrtko.ursulin at intel.com>
>
> Track total time requests have been executing on the hardware.
>
> We add new kernel API to allow software tracking of time GPU
> engines are spending executing requests.
>
> Both per-engine and global API is added with the latter also
> being exported for use by external users.
>
> v2:
> * Squashed with the internal API.
> * Dropped static key.
> * Made per-engine.
> * Store time in monotonic ktime.
>
> v3: Moved stats clearing to disable.
>
> v4:
> * Comments.
> * Don't export the API just yet.
>
> v5: Whitespace cleanup.
>
> v6:
> * Rename ref to active.
> * Drop engine aggregate stats for now.
> * Account initial busy period after enabling stats.
>
> v7:
> * Rebase.
>
> Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin at intel.com>
> ---
> drivers/gpu/drm/i915/intel_engine_cs.c | 84 ++++++++++++++++++++++++++++++
> drivers/gpu/drm/i915/intel_lrc.c | 2 +
> drivers/gpu/drm/i915/intel_ringbuffer.h | 92 +++++++++++++++++++++++++++++++++
> 3 files changed, 178 insertions(+)
>
> diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
> index 35c117c3fa0d..8db83f504d70 100644
> --- a/drivers/gpu/drm/i915/intel_engine_cs.c
> +++ b/drivers/gpu/drm/i915/intel_engine_cs.c
> @@ -234,6 +234,8 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
> /* Nothing to do here, execute in order of dependencies */
> engine->schedule = NULL;
>
> + spin_lock_init(&engine->stats.lock);
> +
> ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier);
>
> dev_priv->engine_class[info->class][info->instance] = engine;
> @@ -1613,6 +1615,88 @@ intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance)
> return i915->engine_class[class][instance];
> }
>
> +/**
> + * intel_enable_engine_stats() - Enable engine busy tracking on engine
> + * @engine: engine to enable stats collection
> + *
> + * Start collecting the engine busyness data for @engine.
> + *
> + * Returns 0 on success or a negative error code.
> + */
> +int intel_enable_engine_stats(struct intel_engine_cs *engine)
> +{
> + unsigned long flags;
> +
> + if (!i915_modparams.enable_execlists)
> + return -ENODEV;
> +
> + spin_lock_irqsave(&engine->stats.lock, flags);
> + if (engine->stats.enabled == ~0)
> + goto busy;
This still makes me go wut? Why not just refcount_t for this?
> + if (engine->stats.enabled++ == 0)
> + engine->stats.enabled_at = ktime_get();
> + spin_unlock_irqrestore(&engine->stats.lock, flags);
> +
> + return 0;
> +
> +busy:
> + spin_unlock_irqrestore(&engine->stats.lock, flags);
> +
> + return -EBUSY;
> +}
> +
> +/**
> + * intel_disable_engine_stats() - Disable engine busy tracking on engine
> + * @engine: engine to disable stats collection
> + *
> + * Stops collecting the engine busyness data for @engine.
> + */
> +void intel_disable_engine_stats(struct intel_engine_cs *engine)
> +{
> + unsigned long flags;
> +
> + if (!i915_modparams.enable_execlists)
> + return;
> +
> + spin_lock_irqsave(&engine->stats.lock, flags);
> + WARN_ON_ONCE(engine->stats.enabled == 0);
> + if (--engine->stats.enabled == 0) {
> + engine->stats.enabled_at = 0;
> + engine->stats.active = 0;
> + engine->stats.start = 0;
> + engine->stats.total = 0;
> + }
> + spin_unlock_irqrestore(&engine->stats.lock, flags);
> +}
> +
> +/**
> + * intel_engine_get_busy_time() - Return current accumulated engine busyness
> + * @engine: engine to report on
> + *
> + * Returns accumulated time @engine was busy since engine stats were enabled.
> + */
> +ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine)
> +{
> + ktime_t total;
> + unsigned long flags;
> +
> + spin_lock_irqsave(&engine->stats.lock, flags);
> +
> + total = engine->stats.total;
> +
> + /*
> + * If the engine is executing something at the moment
> + * add it to the total.
> + */
> + if (engine->stats.active)
> + total = ktime_add(total,
> + ktime_sub(ktime_get(), engine->stats.start));
> +
> + spin_unlock_irqrestore(&engine->stats.lock, flags);
> +
> + return total;
> +}
> +
> #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
> #include "selftests/mock_engine.c"
> #endif
> diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
> index 7cd14b701ed7..1e743d3d16cb 100644
> --- a/drivers/gpu/drm/i915/intel_lrc.c
> +++ b/drivers/gpu/drm/i915/intel_lrc.c
> @@ -366,12 +366,14 @@ execlists_context_status_change(struct drm_i915_gem_request *rq,
> static inline void
> execlists_context_schedule_in(struct drm_i915_gem_request *rq)
> {
> + intel_engine_context_in(rq->engine);
> execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN);
Symmetry: start accumulating after the notifier.
> }
>
> static inline void
> execlists_context_schedule_out(struct drm_i915_gem_request *rq)
> {
> + intel_engine_context_out(rq->engine);
> execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT);
> }
>
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
> index 7a901e766d03..8db228ebdb28 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.h
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
> @@ -538,6 +538,38 @@ struct intel_engine_cs {
> * certain bits to encode the command length in the header).
> */
> u32 (*get_cmd_length_mask)(u32 cmd_header);
> +
> + struct {
> + /**
> + * @lock: Lock protecting the below fields.
> + */
> + spinlock_t lock;
> + /**
> + * @enabled: Reference count indicating number of listeners.
> + */
> + unsigned int enabled;
> + /**
> + * @active: Number of contexts currently scheduled in.
> + */
> + unsigned int active;
> + /**
> + * @enabled_at: Timestamp when busy stats were enabled.
> + */
> + ktime_t enabled_at;
> + /**
> + * @start: Timestamp of the last idle to active transition.
> + *
> + * Idle is defined as active == 0, active is active > 0.
> + */
> + ktime_t start;
> + /**
> + * @total: Total time this engine was busy.
> + *
> + * Accumulated time not counting the most recent block in cases
> + * where engine is currently busy (active > 0).
> + */
> + ktime_t total;
> + } stats;
> };
>
> static inline unsigned int
> @@ -859,4 +891,64 @@ bool intel_engine_can_store_dword(struct intel_engine_cs *engine);
> struct intel_engine_cs *
> intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance);
>
> +static inline void intel_engine_context_in(struct intel_engine_cs *engine)
> +{
> + unsigned long flags;
> +
> + if (READ_ONCE(engine->stats.enabled) == 0)
> + return;
> +
> + spin_lock_irqsave(&engine->stats.lock, flags);
> +
> + if (engine->stats.enabled > 0) {
> + if (engine->stats.active++ == 0)
> + engine->stats.start = ktime_get();
> + GEM_BUG_ON(engine->stats.active == 0);
> + }
> +
> + spin_unlock_irqrestore(&engine->stats.lock, flags);
> +}
> +
> +static inline void intel_engine_context_out(struct intel_engine_cs *engine)
> +{
> + unsigned long flags;
> +
> + if (READ_ONCE(engine->stats.enabled) == 0)
> + return;
> +
> + spin_lock_irqsave(&engine->stats.lock, flags);
> +
> + if (engine->stats.enabled > 0) {
> + ktime_t last, now = ktime_get();
> +
> + if (engine->stats.active && --engine->stats.active == 0) {
> + /*
> + * Decrement the active context count and in case GPU
> + * is now idle add up to the running total.
> + */
> + last = ktime_sub(now, engine->stats.start);
> +
> + engine->stats.total = ktime_add(engine->stats.total,
> + last);
> + } else if (engine->stats.active == 0) {
> + /*
> + * After turning on engine stats, context out might be
> + * the first event in which case we account from the
> + * time stats gathering was turned on.
> + */
> + last = ktime_sub(now, engine->stats.enabled_at);
> +
> + engine->stats.total = ktime_add(engine->stats.total,
> + last);
> + }
> + }
> +
> + spin_unlock_irqrestore(&engine->stats.lock, flags);
> +}
> +
> +int intel_enable_engine_stats(struct intel_engine_cs *engine);
> +void intel_disable_engine_stats(struct intel_engine_cs *engine);
> +
> +ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine);
> +
> #endif /* _INTEL_RINGBUFFER_H_ */
> --
> 2.9.5
>
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx at lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/intel-gfx
More information about the Intel-gfx
mailing list