[Intel-gfx] [RFC 04/11] drm/i915/pmu: Expose a PMU interface for perf queries

Tvrtko Ursulin tvrtko.ursulin at linux.intel.com
Tue Sep 12 14:59:22 UTC 2017


On 12/09/2017 03:06, Rogozhkin, Dmitry V wrote:
> On Mon, 2017-09-11 at 16:25 +0100, Tvrtko Ursulin wrote:
>> From: Tvrtko Ursulin <tvrtko.ursulin at intel.com>
>>
>> From: Chris Wilson <chris at chris-wilson.co.uk>
>> From: Tvrtko Ursulin <tvrtko.ursulin at intel.com>
>> From: Dmitry Rogozhkin <dmitry.v.rogozhkin at intel.com>
>>
>> The first goal is to be able to measure GPU (and invidual ring) busyness
>> without having to poll registers from userspace. (Which not only incurs
>> holding the forcewake lock indefinitely, perturbing the system, but also
>> runs the risk of hanging the machine.) As an alternative we can use the
>> perf event counter interface to sample the ring registers periodically
>> and send those results to userspace.
>>
>> To be able to do so, we need to export the two symbols from
>> kernel/events/core.c to register and unregister a PMU device.
>>
>> v1-v2 (Chris Wilson):
>>
>> v2: Use a common timer for the ring sampling.
>>
>> v3: (Tvrtko Ursulin)
>>   * Decouple uAPI from i915 engine ids.
>>   * Complete uAPI defines.
>>   * Refactor some code to helpers for clarity.
>>   * Skip sampling disabled engines.
>>   * Expose counters in sysfs.
>>   * Pass in fake regs to avoid null ptr deref in perf core.
>>   * Convert to class/instance uAPI.
>>   * Use shared driver code for rc6 residency, power and frequency.
>>
>> v4: (Dmitry Rogozhkin)
>>   * Register PMU with .task_ctx_nr=perf_invalid_context
>>   * Expose cpumask for the PMU with the single CPU in the mask
>>   * Properly support pmu->stop(): it should call pmu->read()
>>   * Properly support pmu->del(): it should call stop(event, PERF_EF_UPDATE)
>>   * Make pmu.busy_stats a refcounter to avoid busy stats going away
>>     with some deleted event.
> 
> busy_stats appear later in the patch series. And in your final version
> busy_stats remain bool while we rely on events refcounting. So, this
> item is misleading. Could you, please, change it giving a credit to
> general ref-counting of events which you have rewrote for v5 of this
> patch?

Oh so I dropped the reference to "drm/i915/pmu: introduce refcounting of 
event subscriptions". My apologies. I will restore it in the next 
version. It wasn't deliberate, just an omission while squashing and 
re-ordering things.

Regards,

Tvrtko

>>   * Expose cpumask for i915 PMU to avoid multiple events creation of
>>     the same type followed by counter aggregation by perf-stat.
>>   * Track CPUs getting online/offline to migrate perf context. If (likely)
>>     cpumask will initially set CPU0, CONFIG_BOOTPARAM_HOTPLUG_CPU0 will be
>>     needed to see effect of CPU status tracking.
>>   * End result is that only global events are supported and perf stat
>>     works correctly.
>>   * Deny perf driver level sampling - it is prohibited for uncore PMU.
>>
>> v5: (Tvrtko Ursulin)
>>
>>   * Don't hardcode number of engine samplers.
>>   * Rewrite event ref-counting for correctness and simplicity.
>>   * Store initial counter value when starting already enabled events
>>     to correctly report values to all listeners.
>>   * Fix RC6 residency readout.
>>   * Comments, GPL header.
>>
>> Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
>> Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin at intel.com>
>> Signed-off-by: Dmitry Rogozhkin <dmitry.v.rogozhkin at intel.com>
>> Cc: Tvrtko Ursulin <tvrtko.ursulin at intel.com>
>> Cc: Chris Wilson <chris at chris-wilson.co.uk>
>> Cc: Dmitry Rogozhkin <dmitry.v.rogozhkin at intel.com>
>> Cc: Peter Zijlstra <peterz at infradead.org>
>> ---
>>   drivers/gpu/drm/i915/Makefile           |   1 +
>>   drivers/gpu/drm/i915/i915_drv.c         |   2 +
>>   drivers/gpu/drm/i915/i915_drv.h         |  76 ++++
>>   drivers/gpu/drm/i915/i915_pmu.c         | 686 ++++++++++++++++++++++++++++++++
>>   drivers/gpu/drm/i915/i915_reg.h         |   3 +
>>   drivers/gpu/drm/i915/intel_engine_cs.c  |  10 +
>>   drivers/gpu/drm/i915/intel_ringbuffer.c |  25 ++
>>   drivers/gpu/drm/i915/intel_ringbuffer.h |  25 ++
>>   include/uapi/drm/i915_drm.h             |  58 +++
>>   9 files changed, 886 insertions(+)
>>   create mode 100644 drivers/gpu/drm/i915/i915_pmu.c
>>
>> diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
>> index 1cb8059a3a16..7b3a0eca62b6 100644
>> --- a/drivers/gpu/drm/i915/Makefile
>> +++ b/drivers/gpu/drm/i915/Makefile
>> @@ -26,6 +26,7 @@ i915-y := i915_drv.o \
>>   
>>   i915-$(CONFIG_COMPAT)   += i915_ioc32.o
>>   i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o intel_pipe_crc.o
>> +i915-$(CONFIG_PERF_EVENTS) += i915_pmu.o
>>   
>>   # GEM code
>>   i915-y += i915_cmd_parser.o \
>> diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
>> index 5c111ea96e80..b1f96eb1be16 100644
>> --- a/drivers/gpu/drm/i915/i915_drv.c
>> +++ b/drivers/gpu/drm/i915/i915_drv.c
>> @@ -1196,6 +1196,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
>>   	struct drm_device *dev = &dev_priv->drm;
>>   
>>   	i915_gem_shrinker_init(dev_priv);
>> +	i915_pmu_register(dev_priv);
>>   
>>   	/*
>>   	 * Notify a valid surface after modesetting,
>> @@ -1250,6 +1251,7 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv)
>>   	intel_opregion_unregister(dev_priv);
>>   
>>   	i915_perf_unregister(dev_priv);
>> +	i915_pmu_unregister(dev_priv);
>>   
>>   	i915_teardown_sysfs(dev_priv);
>>   	i915_guc_log_unregister(dev_priv);
>> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
>> index 48daf9552163..62646b8dfb7a 100644
>> --- a/drivers/gpu/drm/i915/i915_drv.h
>> +++ b/drivers/gpu/drm/i915/i915_drv.h
>> @@ -40,6 +40,7 @@
>>   #include <linux/hash.h>
>>   #include <linux/intel-iommu.h>
>>   #include <linux/kref.h>
>> +#include <linux/perf_event.h>
>>   #include <linux/pm_qos.h>
>>   #include <linux/reservation.h>
>>   #include <linux/shmem_fs.h>
>> @@ -2190,6 +2191,69 @@ struct intel_cdclk_state {
>>   	unsigned int cdclk, vco, ref;
>>   };
>>   
>> +enum {
>> +	__I915_SAMPLE_FREQ_ACT = 0,
>> +	__I915_SAMPLE_FREQ_REQ,
>> +	__I915_NUM_PMU_SAMPLERS
>> +};
>> +
>> +/**
>> + * How many different events we track in the global PMU mask.
>> + *
>> + * It is also used to know to needed number of event reference counters.
>> + */
>> +#define I915_PMU_MASK_BITS \
>> +	(1 << I915_PMU_SAMPLE_BITS) + (I915_PMU_LAST + 1 - __I915_PMU_OTHER(0))
>> +
>> +struct i915_pmu {
>> +	/**
>> +	 * @node: List node for CPU hotplug handling.
>> +	 */
>> +	struct hlist_node node;
>> +	/**
>> +	 * @base: PMU base.
>> +	 */
>> +	struct pmu base;
>> +	/**
>> +	 * @lock: Lock protecting enable mask and ref count handling.
>> +	 */
>> +	spinlock_t lock;
>> +	/**
>> +	 * @timer: Timer for internal i915 PMU sampling.
>> +	 */
>> +	struct hrtimer timer;
>> +	/**
>> +	 * @enable: Bitmask of all currently enabled events.
>> +	 *
>> +	 * Bits are derived from uAPI event numbers in a way that low 16 bits
>> +	 * correspond to engine event _sample_ _type_ (I915_SAMPLE_QUEUED is
>> +	 * bit 0), and higher bits correspond to other events (for instance
>> +	 * I915_PMU_ACTUAL_FREQUENCY is bit 16 etc).
>> +	 *
>> +	 * In other words, low 16 bits are not per engine but per engine
>> +	 * sampler type, while the upper bits are directly mapped to other
>> +	 * event types.
>> +	 */
>> +	u64 enable;
>> +	/**
>> +	 * @enable_count: Reference counts for the enabled events.
>> +	 *
>> +	 * Array indices are mapped in the same way as bits in the @enable field
>> +	 * and they are used to control sampling on/off when multiple clients
>> +	 * are using the PMU API.
>> +	 */
>> +	unsigned int enable_count[I915_PMU_MASK_BITS];
>> +	/**
>> +	 * @sample: Current counter value for i915 events which need sampling.
>> +	 *
>> +	 * These counters are updated from the i915 PMU sampling timer.
>> +	 *
>> +	 * Only global counters are held here, while the per-engine ones are in
>> +	 * struct intel_engine_cs.
>> +	 */
>> +	u64 sample[__I915_NUM_PMU_SAMPLERS];
>> +};
>> +
>>   struct drm_i915_private {
>>   	struct drm_device drm;
>>   
>> @@ -2238,6 +2302,7 @@ struct drm_i915_private {
>>   	struct pci_dev *bridge_dev;
>>   	struct i915_gem_context *kernel_context;
>>   	struct intel_engine_cs *engine[I915_NUM_ENGINES];
>> +	struct intel_engine_cs *engine_class[MAX_ENGINE_CLASS + 1][MAX_ENGINE_INSTANCE + 1];
>>   	struct i915_vma *semaphore;
>>   
>>   	struct drm_dma_handle *status_page_dmah;
>> @@ -2698,6 +2763,8 @@ struct drm_i915_private {
>>   		int	irq;
>>   	} lpe_audio;
>>   
>> +	struct i915_pmu pmu;
>> +
>>   	/*
>>   	 * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch
>>   	 * will be rejected. Instead look for a better place.
>> @@ -3918,6 +3985,15 @@ extern void i915_perf_fini(struct drm_i915_private *dev_priv);
>>   extern void i915_perf_register(struct drm_i915_private *dev_priv);
>>   extern void i915_perf_unregister(struct drm_i915_private *dev_priv);
>>   
>> +/* i915_pmu.c */
>> +#ifdef CONFIG_PERF_EVENTS
>> +extern void i915_pmu_register(struct drm_i915_private *i915);
>> +extern void i915_pmu_unregister(struct drm_i915_private *i915);
>> +#else
>> +static inline void i915_pmu_register(struct drm_i915_private *i915) {}
>> +static inline void i915_pmu_unregister(struct drm_i915_private *i915) {}
>> +#endif
>> +
>>   /* i915_suspend.c */
>>   extern int i915_save_state(struct drm_i915_private *dev_priv);
>>   extern int i915_restore_state(struct drm_i915_private *dev_priv);
>> diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
>> new file mode 100644
>> index 000000000000..2ec892e57143
>> --- /dev/null
>> +++ b/drivers/gpu/drm/i915/i915_pmu.c
>> @@ -0,0 +1,686 @@
>> +/*
>> + * Copyright © 2017 Intel Corporation
>> + *
>> + * Permission is hereby granted, free of charge, to any person obtaining a
>> + * copy of this software and associated documentation files (the "Software"),
>> + * to deal in the Software without restriction, including without limitation
>> + * the rights to use, copy, modify, merge, publish, distribute, sublicense,
>> + * and/or sell copies of the Software, and to permit persons to whom the
>> + * Software is furnished to do so, subject to the following conditions:
>> + *
>> + * The above copyright notice and this permission notice (including the next
>> + * paragraph) shall be included in all copies or substantial portions of the
>> + * Software.
>> + *
>> + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
>> + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
>> + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
>> + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
>> + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
>> + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
>> + * IN THE SOFTWARE.
>> + *
>> + */
>> +
>> +#include <linux/perf_event.h>
>> +#include <linux/pm_runtime.h>
>> +
>> +#include "i915_drv.h"
>> +#include "intel_ringbuffer.h"
>> +
>> +/* Frequency for the sampling timer for events which need it. */
>> +#define FREQUENCY 200
>> +#define PERIOD max_t(u64, 10000, NSEC_PER_SEC / FREQUENCY)
>> +
>> +#define ENGINE_SAMPLE_MASK \
>> +	(BIT(I915_SAMPLE_QUEUED) | \
>> +	 BIT(I915_SAMPLE_BUSY) | \
>> +	 BIT(I915_SAMPLE_WAIT) | \
>> +	 BIT(I915_SAMPLE_SEMA))
>> +
>> +#define ENGINE_SAMPLE_BITS (1 << I915_PMU_SAMPLE_BITS)
>> +
>> +static cpumask_t i915_pmu_cpumask = CPU_MASK_NONE;
>> +
>> +static u8 engine_config_sample(u64 config)
>> +{
>> +	return config & I915_PMU_SAMPLE_MASK;
>> +}
>> +
>> +static u8 engine_event_sample(struct perf_event *event)
>> +{
>> +	return engine_config_sample(event->attr.config);
>> +}
>> +
>> +static u8 engine_event_class(struct perf_event *event)
>> +{
>> +	return (event->attr.config >> I915_PMU_CLASS_SHIFT) & 0xff;
>> +}
>> +
>> +static u8 engine_event_instance(struct perf_event *event)
>> +{
>> +	return (event->attr.config >> I915_PMU_SAMPLE_BITS) & 0xff;
>> +}
>> +
>> +static bool is_engine_config(u64 config)
>> +{
>> +	return config < __I915_PMU_OTHER(0);
>> +}
>> +
>> +static unsigned int config_enabled_bit(u64 config)
>> +{
>> +	if (is_engine_config(config))
>> +		return engine_config_sample(config);
>> +	else
>> +		return ENGINE_SAMPLE_BITS + (config - __I915_PMU_OTHER(0));
>> +}
>> +
>> +static u64 config_enabled_mask(u64 config)
>> +{
>> +	return BIT_ULL(config_enabled_bit(config));
>> +}
>> +
>> +static bool is_engine_event(struct perf_event *event)
>> +{
>> +	return is_engine_config(event->attr.config);
>> +}
>> +
>> +static unsigned int event_enabled_bit(struct perf_event *event)
>> +{
>> +	return config_enabled_bit(event->attr.config);
>> +}
>> +
>> +static bool grab_forcewake(struct drm_i915_private *i915, bool fw)
>> +{
>> +	if (!fw)
>> +		intel_uncore_forcewake_get(i915, FORCEWAKE_ALL);
>> +
>> +	return true;
>> +}
>> +
>> +static void engines_sample(struct drm_i915_private *dev_priv)
>> +{
>> +	struct intel_engine_cs *engine;
>> +	enum intel_engine_id id;
>> +	bool fw = false;
>> +
>> +	if ((dev_priv->pmu.enable & ENGINE_SAMPLE_MASK) == 0)
>> +		return;
>> +
>> +	if (!dev_priv->gt.awake)
>> +		return;
>> +
>> +	if (!intel_runtime_pm_get_if_in_use(dev_priv))
>> +		return;
>> +
>> +	for_each_engine(engine, dev_priv, id) {
>> +		u32 enable = engine->pmu.enable;
>> +
>> +		if (i915_seqno_passed(intel_engine_get_seqno(engine),
>> +				      intel_engine_last_submit(engine)))
>> +			continue;
>> +
>> +		if (enable & BIT(I915_SAMPLE_QUEUED))
>> +			engine->pmu.sample[I915_SAMPLE_QUEUED] += PERIOD;
>> +
>> +		if (enable & BIT(I915_SAMPLE_BUSY)) {
>> +			u32 val;
>> +
>> +			fw = grab_forcewake(dev_priv, fw);
>> +			val = I915_READ_FW(RING_MI_MODE(engine->mmio_base));
>> +			if (!(val & MODE_IDLE))
>> +				engine->pmu.sample[I915_SAMPLE_BUSY] += PERIOD;
>> +		}
>> +
>> +		if (enable & (BIT(I915_SAMPLE_WAIT) | BIT(I915_SAMPLE_SEMA))) {
>> +			u32 val;
>> +
>> +			fw = grab_forcewake(dev_priv, fw);
>> +			val = I915_READ_FW(RING_CTL(engine->mmio_base));
>> +			if ((enable & BIT(I915_SAMPLE_WAIT)) &&
>> +			    (val & RING_WAIT))
>> +				engine->pmu.sample[I915_SAMPLE_WAIT] += PERIOD;
>> +			if ((enable & BIT(I915_SAMPLE_SEMA)) &&
>> +			    (val & RING_WAIT_SEMAPHORE))
>> +				engine->pmu.sample[I915_SAMPLE_SEMA] += PERIOD;
>> +		}
>> +	}
>> +
>> +	if (fw)
>> +		intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
>> +	intel_runtime_pm_put(dev_priv);
>> +}
>> +
>> +static void frequency_sample(struct drm_i915_private *dev_priv)
>> +{
>> +	if (dev_priv->pmu.enable &
>> +	    config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY)) {
>> +		u64 val;
>> +
>> +		val = dev_priv->rps.cur_freq;
>> +		if (dev_priv->gt.awake &&
>> +		    intel_runtime_pm_get_if_in_use(dev_priv)) {
>> +			val = intel_get_cagf(dev_priv,
>> +					     I915_READ_NOTRACE(GEN6_RPSTAT1));
>> +			intel_runtime_pm_put(dev_priv);
>> +		}
>> +		val = intel_gpu_freq(dev_priv, val);
>> +		dev_priv->pmu.sample[__I915_SAMPLE_FREQ_ACT] += val * PERIOD;
>> +	}
>> +
>> +	if (dev_priv->pmu.enable &
>> +	    config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY)) {
>> +		u64 val = intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq);
>> +		dev_priv->pmu.sample[__I915_SAMPLE_FREQ_REQ] += val * PERIOD;
>> +	}
>> +}
>> +
>> +static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer)
>> +{
>> +	struct drm_i915_private *i915 =
>> +		container_of(hrtimer, struct drm_i915_private, pmu.timer);
>> +
>> +	if (i915->pmu.enable == 0)
>> +		return HRTIMER_NORESTART;
>> +
>> +	engines_sample(i915);
>> +	frequency_sample(i915);
>> +
>> +	hrtimer_forward_now(hrtimer, ns_to_ktime(PERIOD));
>> +	return HRTIMER_RESTART;
>> +}
>> +
>> +static u64 count_interrupts(struct drm_i915_private *i915)
>> +{
>> +	/* open-coded kstat_irqs() */
>> +	struct irq_desc *desc = irq_to_desc(i915->drm.pdev->irq);
>> +	u64 sum = 0;
>> +	int cpu;
>> +
>> +	if (!desc || !desc->kstat_irqs)
>> +		return 0;
>> +
>> +	for_each_possible_cpu(cpu)
>> +		sum += *per_cpu_ptr(desc->kstat_irqs, cpu);
>> +
>> +	return sum;
>> +}
>> +
>> +static void i915_pmu_event_destroy(struct perf_event *event)
>> +{
>> +	WARN_ON(event->parent);
>> +}
>> +
>> +static int engine_event_init(struct perf_event *event)
>> +{
>> +	struct drm_i915_private *i915 =
>> +		container_of(event->pmu, typeof(*i915), pmu.base);
>> +
>> +	if (!intel_engine_lookup_user(i915, engine_event_class(event),
>> +				      engine_event_instance(event)))
>> +		return -ENODEV;
>> +
>> +	switch (engine_event_sample(event)) {
>> +	case I915_SAMPLE_QUEUED:
>> +	case I915_SAMPLE_BUSY:
>> +	case I915_SAMPLE_WAIT:
>> +		break;
>> +	case I915_SAMPLE_SEMA:
>> +		if (INTEL_GEN(i915) < 6)
>> +			return -ENODEV;
>> +		break;
>> +	default:
>> +		return -ENOENT;
>> +	}
>> +
>> +	return 0;
>> +}
>> +
>> +static int i915_pmu_event_init(struct perf_event *event)
>> +{
>> +	struct drm_i915_private *i915 =
>> +		container_of(event->pmu, typeof(*i915), pmu.base);
>> +	int cpu, ret;
>> +
>> +	if (event->attr.type != event->pmu->type)
>> +		return -ENOENT;
>> +
>> +	/* unsupported modes and filters */
>> +	if (event->attr.sample_period) /* no sampling */
>> +		return -EINVAL;
>> +
>> +	if (has_branch_stack(event))
>> +		return -EOPNOTSUPP;
>> +
>> +	if (event->cpu < 0)
>> +		return -EINVAL;
>> +
>> +	cpu = cpumask_any_and(&i915_pmu_cpumask,
>> +			      topology_sibling_cpumask(event->cpu));
>> +	if (cpu >= nr_cpu_ids)
>> +		return -ENODEV;
>> +
>> +	ret = 0;
>> +	if (is_engine_event(event)) {
>> +		ret = engine_event_init(event);
>> +	} else switch (event->attr.config) {
>> +	case I915_PMU_ACTUAL_FREQUENCY:
>> +		if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
>> +			ret = -ENODEV; /* requires a mutex for sampling! */
>> +	case I915_PMU_REQUESTED_FREQUENCY:
>> +	case I915_PMU_ENERGY:
>> +	case I915_PMU_RC6_RESIDENCY:
>> +	case I915_PMU_RC6p_RESIDENCY:
>> +	case I915_PMU_RC6pp_RESIDENCY:
>> +		if (INTEL_GEN(i915) < 6)
>> +			ret = -ENODEV;
>> +		break;
>> +	}
>> +	if (ret)
>> +		return ret;
>> +
>> +	event->cpu = cpu;
>> +	if (!event->parent)
>> +		event->destroy = i915_pmu_event_destroy;
>> +
>> +	return 0;
>> +}
>> +
>> +static u64 __i915_pmu_event_read(struct perf_event *event)
>> +{
>> +	struct drm_i915_private *i915 =
>> +		container_of(event->pmu, typeof(*i915), pmu.base);
>> +	u64 val = 0;
>> +
>> +	if (is_engine_event(event)) {
>> +		u8 sample = engine_event_sample(event);
>> +		struct intel_engine_cs *engine;
>> +
>> +		engine = intel_engine_lookup_user(i915,
>> +						  engine_event_class(event),
>> +						  engine_event_instance(event));
>> +
>> +		if (WARN_ON_ONCE(!engine)) {
>> +			/* Do nothing */
>> +		} else {
>> +			val = engine->pmu.sample[sample];
>> +		}
>> +	} else switch (event->attr.config) {
>> +	case I915_PMU_ACTUAL_FREQUENCY:
>> +		val = i915->pmu.sample[__I915_SAMPLE_FREQ_ACT];
>> +		break;
>> +	case I915_PMU_REQUESTED_FREQUENCY:
>> +		val = i915->pmu.sample[__I915_SAMPLE_FREQ_REQ];
>> +		break;
>> +	case I915_PMU_ENERGY:
>> +		val = intel_energy_uJ(i915);
>> +		break;
>> +	case I915_PMU_INTERRUPTS:
>> +		val = count_interrupts(i915);
>> +		break;
>> +	case I915_PMU_RC6_RESIDENCY:
>> +		val = intel_rc6_residency_ns(i915,
>> +					     IS_VALLEYVIEW(i915) ?
>> +					     VLV_GT_RENDER_RC6 :
>> +					     GEN6_GT_GFX_RC6);
>> +		break;
>> +	case I915_PMU_RC6p_RESIDENCY:
>> +		if (!IS_VALLEYVIEW(i915))
>> +			val = intel_rc6_residency_ns(i915, GEN6_GT_GFX_RC6p);
>> +		break;
>> +	case I915_PMU_RC6pp_RESIDENCY:
>> +		if (!IS_VALLEYVIEW(i915))
>> +			val = intel_rc6_residency_ns(i915, GEN6_GT_GFX_RC6pp);
>> +		break;
>> +	}
>> +
>> +	return val;
>> +}
>> +
>> +static void i915_pmu_event_read(struct perf_event *event)
>> +{
>> +
>> +	local64_set(&event->count,
>> +		    __i915_pmu_event_read(event) -
>> +		    local64_read(&event->hw.prev_count));
>> +}
>> +
>> +static void i915_pmu_enable(struct perf_event *event)
>> +{
>> +	struct drm_i915_private *i915 =
>> +		container_of(event->pmu, typeof(*i915), pmu.base);
>> +	unsigned int bit = event_enabled_bit(event);
>> +	unsigned long flags;
>> +
>> +	spin_lock_irqsave(&i915->pmu.lock, flags);
>> +
>> +	/*
>> +	 * Start the sampling timer when enabling the first event.
>> +	 */
>> +	if (i915->pmu.enable == 0)
>> +		hrtimer_start_range_ns(&i915->pmu.timer,
>> +				       ns_to_ktime(PERIOD), 0,
>> +				       HRTIMER_MODE_REL_PINNED);
>> +
>> +	/*
>> +	 * Update the bitmask of enabled events and increment
>> +	 * the event reference counter.
>> +	 */
>> +	GEM_BUG_ON(bit >= I915_PMU_MASK_BITS);
>> +	GEM_BUG_ON(i915->pmu.enable_count[bit] == ~0);
>> +	i915->pmu.enable |= BIT_ULL(bit);
>> +	i915->pmu.enable_count[bit]++;
>> +
>> +	/*
>> +	 * For per-engine events the bitmask and reference counting
>> +	 * is stored per engine.
>> +	 */
>> +	if (is_engine_event(event)) {
>> +		u8 sample = engine_event_sample(event);
>> +		struct intel_engine_cs *engine;
>> +
>> +		engine = intel_engine_lookup_user(i915,
>> +						  engine_event_class(event),
>> +						  engine_event_instance(event));
>> +		GEM_BUG_ON(!engine);
>> +		engine->pmu.enable |= BIT(sample);
>> +
>> +		GEM_BUG_ON(sample >= I915_PMU_SAMPLE_BITS);
>> +		GEM_BUG_ON(engine->pmu.enable_count[sample] == ~0);
>> +		engine->pmu.enable_count[sample]++;
>> +	}
>> +
>> +	/*
>> +	 * Store the current counter value so we can report the correct delta
>> +	 * for all listeners. Even when the event was already enabled and has
>> +	 * an existing non-zero value.
>> +	 */
>> +	local64_set(&event->hw.prev_count, __i915_pmu_event_read(event));
>> +
>> +	spin_unlock_irqrestore(&i915->pmu.lock, flags);
>> +}
>> +
>> +static void i915_pmu_disable(struct perf_event *event)
>> +{
>> +	struct drm_i915_private *i915 =
>> +		container_of(event->pmu, typeof(*i915), pmu.base);
>> +	unsigned int bit = event_enabled_bit(event);
>> +	unsigned long flags;
>> +
>> +	spin_lock_irqsave(&i915->pmu.lock, flags);
>> +
>> +	if (is_engine_event(event)) {
>> +		u8 sample = engine_event_sample(event);
>> +		struct intel_engine_cs *engine;
>> +
>> +		engine = intel_engine_lookup_user(i915,
>> +						  engine_event_class(event),
>> +						  engine_event_instance(event));
>> +		GEM_BUG_ON(!engine);
>> +		GEM_BUG_ON(sample >= I915_PMU_SAMPLE_BITS);
>> +		GEM_BUG_ON(engine->pmu.enable_count[sample] == 0);
>> +		/*
>> +		 * Decrement the reference count and clear the enabled
>> +		 * bitmask when the last listener on an event goes away.
>> +		 */
>> +		if (--engine->pmu.enable_count[sample] == 0)
>> +			engine->pmu.enable &= ~BIT(sample);
>> +	}
>> +
>> +	GEM_BUG_ON(bit >= I915_PMU_MASK_BITS);
>> +	GEM_BUG_ON(i915->pmu.enable_count[bit] == 0);
>> +	/*
>> +	 * Decrement the reference count and clear the enabled
>> +	 * bitmask when the last listener on an event goes away.
>> +	 */
>> +	if (--i915->pmu.enable_count[bit] == 0)
>> +		i915->pmu.enable &= ~BIT_ULL(bit);
>> +
>> +	spin_unlock_irqrestore(&i915->pmu.lock, flags);
>> +}
>> +
>> +static void i915_pmu_event_start(struct perf_event *event, int flags)
>> +{
>> +	i915_pmu_enable(event);
>> +	event->hw.state = 0;
>> +}
>> +
>> +static void i915_pmu_event_stop(struct perf_event *event, int flags)
>> +{
>> +	if (flags & PERF_EF_UPDATE)
>> +		i915_pmu_event_read(event);
>> +	i915_pmu_disable(event);
>> +	event->hw.state = PERF_HES_STOPPED;
>> +}
>> +
>> +static int i915_pmu_event_add(struct perf_event *event, int flags)
>> +{
>> +	if (flags & PERF_EF_START)
>> +		i915_pmu_event_start(event, flags);
>> +
>> +	return 0;
>> +}
>> +
>> +static void i915_pmu_event_del(struct perf_event *event, int flags)
>> +{
>> +	i915_pmu_event_stop(event, PERF_EF_UPDATE);
>> +}
>> +
>> +static int i915_pmu_event_event_idx(struct perf_event *event)
>> +{
>> +	return 0;
>> +}
>> +
>> +static ssize_t i915_pmu_format_show(struct device *dev,
>> +				    struct device_attribute *attr, char *buf)
>> +{
>> +        struct dev_ext_attribute *eattr;
>> +
>> +        eattr = container_of(attr, struct dev_ext_attribute, attr);
>> +        return sprintf(buf, "%s\n", (char *) eattr->var);
>> +}
>> +
>> +#define I915_PMU_FORMAT_ATTR(_name, _config)           \
>> +        (&((struct dev_ext_attribute[]) {               \
>> +                { .attr = __ATTR(_name, S_IRUGO, i915_pmu_format_show, NULL), \
>> +                  .var = (void *) _config, }            \
>> +        })[0].attr.attr)
>> +
>> +static struct attribute *i915_pmu_format_attrs[] = {
>> +        I915_PMU_FORMAT_ATTR(i915_eventid, "config:0-42"),
>> +        NULL,
>> +};
>> +
>> +static const struct attribute_group i915_pmu_format_attr_group = {
>> +        .name = "format",
>> +        .attrs = i915_pmu_format_attrs,
>> +};
>> +
>> +static ssize_t i915_pmu_event_show(struct device *dev,
>> +				   struct device_attribute *attr, char *buf)
>> +{
>> +        struct dev_ext_attribute *eattr;
>> +
>> +        eattr = container_of(attr, struct dev_ext_attribute, attr);
>> +        return sprintf(buf, "config=0x%lx\n", (unsigned long) eattr->var);
>> +}
>> +
>> +#define I915_PMU_EVENT_ATTR(_name, _config)            \
>> +        (&((struct dev_ext_attribute[]) {               \
>> +                { .attr = __ATTR(_name, S_IRUGO, i915_pmu_event_show, NULL), \
>> +                  .var = (void *) _config, }            \
>> +         })[0].attr.attr)
>> +
>> +static struct attribute *i915_pmu_events_attrs[] = {
>> +	I915_PMU_EVENT_ATTR(rcs0-queued,
>> +			    I915_PMU_ENGINE_QUEUED(I915_ENGINE_CLASS_RENDER, 0)),
>> +	I915_PMU_EVENT_ATTR(rcs0-busy,
>> +			    I915_PMU_ENGINE_BUSY(I915_ENGINE_CLASS_RENDER, 0)),
>> +	I915_PMU_EVENT_ATTR(rcs0-wait,
>> +			    I915_PMU_ENGINE_WAIT(I915_ENGINE_CLASS_RENDER, 0)),
>> +	I915_PMU_EVENT_ATTR(rcs0-sema,
>> +			    I915_PMU_ENGINE_SEMA(I915_ENGINE_CLASS_RENDER, 0)),
>> +
>> +	I915_PMU_EVENT_ATTR(bcs0-queued,
>> +			    I915_PMU_ENGINE_QUEUED(I915_ENGINE_CLASS_COPY, 0)),
>> +	I915_PMU_EVENT_ATTR(bcs0-busy,
>> +			    I915_PMU_ENGINE_BUSY(I915_ENGINE_CLASS_COPY, 0)),
>> +	I915_PMU_EVENT_ATTR(bcs0-wait,
>> +			    I915_PMU_ENGINE_WAIT(I915_ENGINE_CLASS_COPY, 0)),
>> +	I915_PMU_EVENT_ATTR(bcs0-sema,
>> +			    I915_PMU_ENGINE_SEMA(I915_ENGINE_CLASS_COPY, 0)),
>> +
>> +	I915_PMU_EVENT_ATTR(vcs0-queued,
>> +			    I915_PMU_ENGINE_QUEUED(I915_ENGINE_CLASS_VIDEO, 0)),
>> +	I915_PMU_EVENT_ATTR(vcs0-busy,
>> +			    I915_PMU_ENGINE_BUSY(I915_ENGINE_CLASS_VIDEO, 0)),
>> +	I915_PMU_EVENT_ATTR(vcs0-wait,
>> +			    I915_PMU_ENGINE_WAIT(I915_ENGINE_CLASS_VIDEO, 0)),
>> +	I915_PMU_EVENT_ATTR(vcs0-sema,
>> +			    I915_PMU_ENGINE_SEMA(I915_ENGINE_CLASS_VIDEO, 0)),
>> +
>> +	I915_PMU_EVENT_ATTR(vcs1-queued,
>> +			    I915_PMU_ENGINE_QUEUED(I915_ENGINE_CLASS_VIDEO, 1)),
>> +	I915_PMU_EVENT_ATTR(vcs1-busy,
>> +			    I915_PMU_ENGINE_BUSY(I915_ENGINE_CLASS_VIDEO, 1)),
>> +	I915_PMU_EVENT_ATTR(vcs1-wait,
>> +			    I915_PMU_ENGINE_WAIT(I915_ENGINE_CLASS_VIDEO, 1)),
>> +	I915_PMU_EVENT_ATTR(vcs1-sema,
>> +			    I915_PMU_ENGINE_SEMA(I915_ENGINE_CLASS_VIDEO, 1)),
>> +
>> +	I915_PMU_EVENT_ATTR(vecs0-queued,
>> +			    I915_PMU_ENGINE_QUEUED(I915_ENGINE_CLASS_VIDEO_ENHANCE, 0)),
>> +	I915_PMU_EVENT_ATTR(vecs0-busy,
>> +			    I915_PMU_ENGINE_BUSY(I915_ENGINE_CLASS_VIDEO_ENHANCE, 0)),
>> +	I915_PMU_EVENT_ATTR(vecs0-wait,
>> +			    I915_PMU_ENGINE_WAIT(I915_ENGINE_CLASS_VIDEO_ENHANCE, 0)),
>> +	I915_PMU_EVENT_ATTR(vecs0-sema,
>> +			    I915_PMU_ENGINE_SEMA(I915_ENGINE_CLASS_VIDEO_ENHANCE, 0)),
>> +
>> +        I915_PMU_EVENT_ATTR(actual-frequency,	 I915_PMU_ACTUAL_FREQUENCY),
>> +        I915_PMU_EVENT_ATTR(requested-frequency, I915_PMU_REQUESTED_FREQUENCY),
>> +        I915_PMU_EVENT_ATTR(energy,		 I915_PMU_ENERGY),
>> +        I915_PMU_EVENT_ATTR(interrupts,		 I915_PMU_INTERRUPTS),
>> +        I915_PMU_EVENT_ATTR(rc6-residency,	 I915_PMU_RC6_RESIDENCY),
>> +        I915_PMU_EVENT_ATTR(rc6p-residency,	 I915_PMU_RC6p_RESIDENCY),
>> +        I915_PMU_EVENT_ATTR(rc6pp-residency,	 I915_PMU_RC6pp_RESIDENCY),
>> +
>> +        NULL,
>> +};
>> +
>> +static const struct attribute_group i915_pmu_events_attr_group = {
>> +        .name = "events",
>> +        .attrs = i915_pmu_events_attrs,
>> +};
>> +
>> +static ssize_t
>> +i915_pmu_get_attr_cpumask(struct device *dev,
>> +			  struct device_attribute *attr,
>> +			  char *buf)
>> +{
>> +	return cpumap_print_to_pagebuf(true, buf, &i915_pmu_cpumask);
>> +}
>> +
>> +static DEVICE_ATTR(cpumask, S_IRUGO, i915_pmu_get_attr_cpumask, NULL);
>> +
>> +static struct attribute *i915_cpumask_attrs[] = {
>> +	&dev_attr_cpumask.attr,
>> +	NULL,
>> +};
>> +
>> +static struct attribute_group i915_pmu_cpumask_attr_group = {
>> +	.attrs = i915_cpumask_attrs,
>> +};
>> +
>> +static const struct attribute_group *i915_pmu_attr_groups[] = {
>> +        &i915_pmu_format_attr_group,
>> +        &i915_pmu_events_attr_group,
>> +	&i915_pmu_cpumask_attr_group,
>> +        NULL
>> +};
>> +
>> +static int i915_pmu_cpu_online(unsigned int cpu, struct hlist_node *node)
>> +{
>> +	unsigned int target;
>> +
>> +	target = cpumask_any_and(&i915_pmu_cpumask, &i915_pmu_cpumask);
>> +	/* Select the first online CPU as a designated reader. */
>> +	if (target >= nr_cpu_ids)
>> +		cpumask_set_cpu(cpu, &i915_pmu_cpumask);
>> +
>> +	return 0;
>> +}
>> +
>> +static int i915_pmu_cpu_offline(unsigned int cpu, struct hlist_node *node)
>> +{
>> +	struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), node);
>> +	unsigned int target;
>> +
>> +	if (cpumask_test_and_clear_cpu(cpu, &i915_pmu_cpumask)) {
>> +		target = cpumask_any_but(topology_sibling_cpumask(cpu), cpu);
>> +		/* Migrate events if there is a valid target */
>> +		if (target < nr_cpu_ids) {
>> +			cpumask_set_cpu(target, &i915_pmu_cpumask);
>> +			perf_pmu_migrate_context(&pmu->base, cpu, target);
>> +		}
>> +	}
>> +
>> +	return 0;
>> +}
>> +
>> +void i915_pmu_register(struct drm_i915_private *i915)
>> +{
>> +	int ret = ENOTSUPP;
>> +
>> +	if (INTEL_GEN(i915) <= 2)
>> +		goto err;
>> +
>> +	ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_X86_UNCORE_ONLINE,
>> +				      "perf/x86/intel/i915:online",
>> +				      i915_pmu_cpu_online,
>> +			              i915_pmu_cpu_offline);
>> +	if (ret)
>> +		goto err;
>> +
>> +	ret = cpuhp_state_add_instance(CPUHP_AP_PERF_X86_UNCORE_ONLINE,
>> +				       &i915->pmu.node);
>> +	if (ret)
>> +		goto err;
>> +
>> +	i915->pmu.base.attr_groups	= i915_pmu_attr_groups;
>> +	i915->pmu.base.task_ctx_nr	= perf_invalid_context;
>> +	i915->pmu.base.event_init	= i915_pmu_event_init;
>> +	i915->pmu.base.add		= i915_pmu_event_add;
>> +	i915->pmu.base.del		= i915_pmu_event_del;
>> +	i915->pmu.base.start		= i915_pmu_event_start;
>> +	i915->pmu.base.stop		= i915_pmu_event_stop;
>> +	i915->pmu.base.read		= i915_pmu_event_read;
>> +	i915->pmu.base.event_idx	= i915_pmu_event_event_idx;
>> +
>> +	spin_lock_init(&i915->pmu.lock);
>> +	hrtimer_init(&i915->pmu.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
>> +	i915->pmu.timer.function = i915_sample;
>> +	i915->pmu.enable = 0;
>> +
>> +	if (perf_pmu_register(&i915->pmu.base, "i915", -1))
>> +		i915->pmu.base.event_init = NULL;
>> +
>> +err:
>> +	DRM_INFO("Failed to register PMU (err=%d)\n", ret);
>> +}
>> +
>> +void i915_pmu_unregister(struct drm_i915_private *i915)
>> +{
>> +	if (!i915->pmu.base.event_init)
>> +		return;
>> +
>> +	i915->pmu.enable = 0;
>> +
>> +	perf_pmu_unregister(&i915->pmu.base);
>> +	i915->pmu.base.event_init = NULL;
>> +
>> +	hrtimer_cancel(&i915->pmu.timer);
>> +
>> +	cpuhp_state_remove_instance(CPUHP_AP_PERF_X86_UNCORE_ONLINE,
>> +				    &i915->pmu.node);
>> +}
>> diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
>> index 0b03260a3967..8c362e0451c1 100644
>> --- a/drivers/gpu/drm/i915/i915_reg.h
>> +++ b/drivers/gpu/drm/i915/i915_reg.h
>> @@ -186,6 +186,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
>>   #define VIDEO_ENHANCEMENT_CLASS	2
>>   #define COPY_ENGINE_CLASS	3
>>   #define OTHER_CLASS		4
>> +#define MAX_ENGINE_CLASS	4
>> +
>> +#define MAX_ENGINE_INSTANCE    1
>>   
>>   /* PCI config space */
>>   
>> diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
>> index 3ae89a9d6241..dbc7abd65f33 100644
>> --- a/drivers/gpu/drm/i915/intel_engine_cs.c
>> +++ b/drivers/gpu/drm/i915/intel_engine_cs.c
>> @@ -198,6 +198,15 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
>>   	GEM_BUG_ON(info->class >= ARRAY_SIZE(intel_engine_classes));
>>   	class_info = &intel_engine_classes[info->class];
>>   
>> +	if (GEM_WARN_ON(info->class > MAX_ENGINE_CLASS))
>> +		return -EINVAL;
>> +
>> +	if (GEM_WARN_ON(info->instance > MAX_ENGINE_INSTANCE))
>> +		return -EINVAL;
>> +
>> +	if (GEM_WARN_ON(dev_priv->engine_class[info->class][info->instance]))
>> +		return -EINVAL;
>> +
>>   	GEM_BUG_ON(dev_priv->engine[id]);
>>   	engine = kzalloc(sizeof(*engine), GFP_KERNEL);
>>   	if (!engine)
>> @@ -225,6 +234,7 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
>>   
>>   	ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier);
>>   
>> +	dev_priv->engine_class[info->class][info->instance] = engine;
>>   	dev_priv->engine[id] = engine;
>>   	return 0;
>>   }
>> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
>> index 268342433a8e..7db4c572ef76 100644
>> --- a/drivers/gpu/drm/i915/intel_ringbuffer.c
>> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
>> @@ -2283,3 +2283,28 @@ int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine)
>>   
>>   	return intel_init_ring_buffer(engine);
>>   }
>> +
>> +static u8 user_class_map[I915_ENGINE_CLASS_MAX] = {
>> +	[I915_ENGINE_CLASS_OTHER] = OTHER_CLASS,
>> +	[I915_ENGINE_CLASS_RENDER] = RENDER_CLASS,
>> +	[I915_ENGINE_CLASS_COPY] = COPY_ENGINE_CLASS,
>> +	[I915_ENGINE_CLASS_VIDEO] = VIDEO_DECODE_CLASS,
>> +	[I915_ENGINE_CLASS_VIDEO_ENHANCE] = VIDEO_ENHANCEMENT_CLASS,
>> +};
>> +
>> +struct intel_engine_cs *
>> +intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance)
>> +{
>> +	if (class >= ARRAY_SIZE(user_class_map))
>> +		return NULL;
>> +
>> +	class = user_class_map[class];
>> +
>> +	if (WARN_ON_ONCE(class > MAX_ENGINE_CLASS))
>> +		return NULL;
>> +
>> +	if (instance > MAX_ENGINE_INSTANCE)
>> +		return NULL;
>> +
>> +	return i915->engine_class[class][instance];
>> +}
>> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
>> index 79c0021f3700..cf095b9386f4 100644
>> --- a/drivers/gpu/drm/i915/intel_ringbuffer.h
>> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
>> @@ -245,6 +245,28 @@ struct intel_engine_cs {
>>   		I915_SELFTEST_DECLARE(bool mock : 1);
>>   	} breadcrumbs;
>>   
>> +	struct {
>> +		/**
>> +		 * @enable: Bitmask of enable sample events on this engine.
>> +		 *
>> +		 * Bits correspond to sample event types, for instance
>> +		 * I915_SAMPLE_QUEUED is bit 0 etc.
>> +		 */
>> +		u32 enable;
>> +		/**
>> +		 * @enable_count: Reference count for the enabled samplers.
>> +		 *
>> +		 * Index number corresponds to the bit number from @enable.
>> +		 */
>> +		unsigned int enable_count[I915_PMU_SAMPLE_BITS];
>> +		/**
>> +		 * @sample: Counter value for sampling events.
>> +		 *
>> +		 * Our internal timer stores the current counter in this field.
>> +		 */
>> +		u64 sample[I915_ENGINE_SAMPLE_MAX];
>> +	} pmu;
>> +
>>   	/*
>>   	 * A pool of objects to use as shadow copies of client batch buffers
>>   	 * when the command parser is enabled. Prevents the client from
>> @@ -737,4 +759,7 @@ void intel_engines_reset_default_submission(struct drm_i915_private *i915);
>>   
>>   bool intel_engine_can_store_dword(struct intel_engine_cs *engine);
>>   
>> +struct intel_engine_cs *
>> +intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance);
>> +
>>   #endif /* _INTEL_RINGBUFFER_H_ */
>> diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
>> index d8d10d932759..6dc0d6fd4e4c 100644
>> --- a/include/uapi/drm/i915_drm.h
>> +++ b/include/uapi/drm/i915_drm.h
>> @@ -86,6 +86,64 @@ enum i915_mocs_table_index {
>>   	I915_MOCS_CACHED,
>>   };
>>   
>> +enum drm_i915_gem_engine_class {
>> +	I915_ENGINE_CLASS_OTHER = 0,
>> +	I915_ENGINE_CLASS_RENDER = 1,
>> +	I915_ENGINE_CLASS_COPY = 2,
>> +	I915_ENGINE_CLASS_VIDEO = 3,
>> +	I915_ENGINE_CLASS_VIDEO_ENHANCE = 4,
>> +	I915_ENGINE_CLASS_MAX /* non-ABI */
>> +};
>> +
>> +/**
>> + * DOC: perf_events exposed by i915 through /sys/bus/event_sources/drivers/i915
>> + *
>> + */
>> +
>> +enum drm_i915_pmu_engine_sample {
>> +	I915_SAMPLE_QUEUED = 0,
>> +	I915_SAMPLE_BUSY = 1,
>> +	I915_SAMPLE_WAIT = 2,
>> +	I915_SAMPLE_SEMA = 3,
>> +	I915_ENGINE_SAMPLE_MAX /* non-ABI */
>> +};
>> +
>> +#define I915_PMU_SAMPLE_BITS (4)
>> +#define I915_PMU_SAMPLE_MASK (0xf)
>> +#define I915_PMU_SAMPLE_INSTANCE_BITS (8)
>> +#define I915_PMU_CLASS_SHIFT \
>> +	(I915_PMU_SAMPLE_BITS + I915_PMU_SAMPLE_INSTANCE_BITS)
>> +
>> +#define __I915_PMU_ENGINE(class, instance, sample) \
>> +	((class) << I915_PMU_CLASS_SHIFT | \
>> +	(instance) << I915_PMU_SAMPLE_BITS | \
>> +	(sample))
>> +
>> +#define I915_PMU_ENGINE_QUEUED(class, instance) \
>> +	__I915_PMU_ENGINE(class, instance, I915_SAMPLE_QUEUED)
>> +
>> +#define I915_PMU_ENGINE_BUSY(class, instance) \
>> +	__I915_PMU_ENGINE(class, instance, I915_SAMPLE_BUSY)
>> +
>> +#define I915_PMU_ENGINE_WAIT(class, instance) \
>> +	__I915_PMU_ENGINE(class, instance, I915_SAMPLE_WAIT)
>> +
>> +#define I915_PMU_ENGINE_SEMA(class, instance) \
>> +	__I915_PMU_ENGINE(class, instance, I915_SAMPLE_SEMA)
>> +
>> +#define __I915_PMU_OTHER(x) (__I915_PMU_ENGINE(0xff, 0xff, 0xf) + 1 + (x))
>> +
>> +#define I915_PMU_ACTUAL_FREQUENCY 	__I915_PMU_OTHER(0)
>> +#define I915_PMU_REQUESTED_FREQUENCY	__I915_PMU_OTHER(1)
>> +#define I915_PMU_ENERGY			__I915_PMU_OTHER(2)
>> +#define I915_PMU_INTERRUPTS		__I915_PMU_OTHER(3)
>> +
>> +#define I915_PMU_RC6_RESIDENCY		__I915_PMU_OTHER(4)
>> +#define I915_PMU_RC6p_RESIDENCY		__I915_PMU_OTHER(5)
>> +#define I915_PMU_RC6pp_RESIDENCY	__I915_PMU_OTHER(6)
>> +
>> +#define I915_PMU_LAST I915_PMU_RC6pp_RESIDENCY
>> +
>>   /* Each region is a minimum of 16k, and there are at most 255 of them.
>>    */
>>   #define I915_NR_TEX_REGIONS 255	/* table size 2k - maximum due to use
> 
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx at lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/intel-gfx
> 


More information about the Intel-gfx mailing list