[Intel-gfx] [PATCH] RFC drm/i915: Expose a PMU interface for perf queries

Chris Wilson chris at chris-wilson.co.uk
Wed Sep 11 16:51:31 CEST 2013


The first goal is to be able to measure GPU (and invidual ring) busyness
without having to poll registers from userspace. (Which not only incurs
holding the forcewake lock indefinitely, perturbing the system, but also
runs the risk of hanging the machine.) As an alternative we can use the
perf event counter interface to sample the ring registers periodically
and send those results to userspace.

To be able to do so, we need to export the two symbols from
kernel/events/core.c to register and unregister a PMU device.

v2: Use a common timer for the ring sampling.
v3: Sample statistics and instdone - now a complete replacement for all
unsafe register access by intel_gpu_top.

Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/Makefile           |   2 +-
 drivers/gpu/drm/i915/i915_dma.c         |   4 +
 drivers/gpu/drm/i915/i915_drv.h         |  26 ++
 drivers/gpu/drm/i915/i915_perf.c        | 407 ++++++++++++++++++++++++++++++++
 drivers/gpu/drm/i915/intel_ringbuffer.h |   2 +
 include/uapi/drm/i915_drm.h             |  48 ++++
 kernel/events/core.c                    |   2 +
 7 files changed, 490 insertions(+), 1 deletion(-)
 create mode 100644 drivers/gpu/drm/i915/i915_perf.c

diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 118aefa..87240aa 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -52,8 +52,8 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \
 	  dvo_ns2501.o \
 	  i915_gem_dmabuf.o
 
+i915-$(CONFIG_PERF_EVENTS) += i915_perf.o
 i915-$(CONFIG_COMPAT)   += i915_ioc32.o
-
 i915-$(CONFIG_ACPI)	+= intel_acpi.o
 
 obj-$(CONFIG_DRM_I915)  += i915.o
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 3d28b7f..b227374 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1673,6 +1673,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
 	if (IS_GEN5(dev))
 		intel_gpu_ips_init(dev_priv);
 
+	i915_perf_register(dev);
+
 	return 0;
 
 out_power_well:
@@ -1712,6 +1714,8 @@ int i915_driver_unload(struct drm_device *dev)
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int ret;
 
+	i915_perf_unregister(dev);
+
 	intel_gpu_ips_teardown();
 
 	if (HAS_POWER_WELL(dev)) {
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 2ef9ae7..688353b 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -42,6 +42,7 @@
 #include <linux/backlight.h>
 #include <linux/intel-iommu.h>
 #include <linux/kref.h>
+#include <linux/perf_event.h>
 #include <linux/pm_qos.h>
 #include <linux/mmu_notifier.h>
 
@@ -1177,6 +1178,16 @@ struct i915_package_c8 {
 	} regsave;
 };
 
+enum {
+	__I915_SAMPLE_FREQ_ACT = 0,
+	__I915_SAMPLE_FREQ_REQ,
+	__I915_SAMPLE_STATISTIC_0,
+	__I915_SAMPLE_STATISTIC_8 = __I915_SAMPLE_STATISTIC_0 + 8,
+	__I915_SAMPLE_INSTDONE_0,
+	__I915_SAMPLE_INSTDONE_63 = __I915_SAMPLE_INSTDONE_0 + 63,
+	__I915_NUM_PMU_SAMPLERS
+};
+
 typedef struct drm_i915_private {
 	struct drm_device *dev;
 	struct kmem_cache *slab;
@@ -1370,6 +1381,12 @@ typedef struct drm_i915_private {
 
 	struct i915_package_c8 pc8;
 
+	struct pmu pmu;
+	struct hrtimer pmu_timer;
+	u64 pmu_enable;
+	u64 pmu_instdone;
+	u64 pmu_sample[__I915_NUM_PMU_SAMPLERS];
+
 	/* Old dri1 support infrastructure, beware the dragons ya fools entering
 	 * here! */
 	struct i915_dri1_state dri1;
@@ -2198,6 +2215,15 @@ void i915_destroy_error_state(struct drm_device *dev);
 void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone);
 const char *i915_cache_level_str(int type);
 
+/* i915_perf.c */
+#ifdef CONFIG_PERF_EVENTS
+extern void i915_perf_register(struct drm_device *dev);
+extern void i915_perf_unregister(struct drm_device *dev);
+#else
+static inline void i915_perf_register(struct drm_device *dev) {}
+static inline void i915_perf_unregister(struct drm_device *dev) {}
+#endif
+
 /* i915_suspend.c */
 extern int i915_save_state(struct drm_device *dev);
 extern int i915_restore_state(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
new file mode 100644
index 0000000..6fc1952
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -0,0 +1,407 @@
+#include <linux/perf_event.h>
+
+#include "i915_drv.h"
+#include "intel_ringbuffer.h"
+
+#define FREQUENCY 200
+#define PERIOD max_t(u64, 10000, NSEC_PER_SEC / FREQUENCY)
+
+#define RING_MASK 0xffffffff
+#define RING_MAX 32
+
+#define INSTDONE_ENABLE 0x8
+#define STATISTICS_MASK ((u64)0x1ff << I915_PERF_STATISTIC_0)
+
+static bool gpu_active(struct drm_i915_private *dev_priv)
+{
+	struct intel_ring_buffer *ring;
+	bool active = false;
+	int i;
+
+	for_each_ring(ring, dev_priv, i) {
+		active = !list_empty(&ring->request_list);
+		if (active)
+			break;
+	}
+
+	return active;
+}
+
+static u64 __read64(struct drm_i915_private *dev_priv, u32 reg)
+{
+	u32 high, low;
+
+	do {
+		high = I915_READ_NOTRACE(reg + 4);
+		low = I915_READ_NOTRACE(reg);
+	} while (high != I915_READ_NOTRACE(reg + 4));
+
+	return (u64)high << 32 | low;
+}
+
+static void rings_sample(struct drm_i915_private *dev_priv)
+{
+	struct intel_ring_buffer *ring;
+	int i;
+
+	if ((dev_priv->pmu_enable & (RING_MASK | STATISTICS_MASK)) == 0)
+		return;
+
+	if (!gpu_active(dev_priv))
+		return;
+
+	if (dev_priv->info->gen >= 6)
+		gen6_gt_force_wake_get(dev_priv);
+
+	for_each_ring(ring, dev_priv, i) {
+		u32 head, tail, ctrl;
+
+		if ((dev_priv->pmu_enable & (0x7 << (4*i))) == 0)
+			continue;
+
+		if (list_empty(&ring->request_list))
+			continue;
+
+		head = I915_READ_NOTRACE(RING_HEAD((ring)->mmio_base));
+		tail = I915_READ_NOTRACE(RING_TAIL((ring)->mmio_base));
+		ctrl = I915_READ_NOTRACE(RING_CTL((ring)->mmio_base));
+
+		if ((head ^ tail) & HEAD_ADDR)
+			ring->pmu_sample[I915_SAMPLE_BUSY] += PERIOD;
+		if (ctrl & RING_WAIT)
+			ring->pmu_sample[I915_SAMPLE_WAIT] += PERIOD;
+		if (ctrl & RING_WAIT_SEMAPHORE)
+			ring->pmu_sample[I915_SAMPLE_SEMA] += PERIOD;
+	}
+
+	if (dev_priv->pmu_enable & INSTDONE_ENABLE) {
+		u64 instdone;
+
+		if (dev_priv->info->gen < 4) {
+			instdone = I915_READ_NOTRACE(INSTDONE);
+		} else if (dev_priv->info->gen < 7) {
+			instdone  = I915_READ_NOTRACE(INSTDONE_I965);
+			instdone |= (u64)I915_READ_NOTRACE(INSTDONE1) << 32;
+		} else {
+			instdone  = I915_READ_NOTRACE(GEN7_INSTDONE_1);
+			instdone |= (u64)(I915_READ_NOTRACE(GEN7_SC_INSTDONE) & 0xff) << 32;
+			instdone |= (u64)(I915_READ_NOTRACE(GEN7_SAMPLER_INSTDONE) & 0xff) << 40;
+			instdone |= (u64)(I915_READ_NOTRACE(GEN7_ROW_INSTDONE) & 0xff) << 48;
+		}
+
+		for (instdone &= dev_priv->pmu_instdone, i = 0; instdone; instdone >>= 1, i++) {
+			if ((instdone & 1) == 0)
+				continue;
+
+			dev_priv->pmu_sample[__I915_SAMPLE_INSTDONE_0 + i] += PERIOD;
+		}
+	}
+
+	if (dev_priv->pmu_enable & STATISTICS_MASK) {
+		const u32 reg0 = 0x2310;
+
+		for (i = 0; i < 9; i++) {
+			if ((dev_priv->pmu_enable & ((u64)1 << I915_PERF_STATISTIC(i))) == 0)
+				continue;
+
+			dev_priv->pmu_sample[__I915_SAMPLE_STATISTIC_0 + i] = __read64(dev_priv, reg0+8*i);
+		}
+	}
+
+	if (dev_priv->info->gen >= 6)
+		gen6_gt_force_wake_put(dev_priv);
+}
+
+static void frequency_sample(struct drm_i915_private *dev_priv)
+{
+	if (dev_priv->pmu_enable & ((u64)1 << I915_PERF_ACTUAL_FREQUENCY)) {
+		u64 val;
+
+		if (gpu_active(dev_priv)) {
+			val = I915_READ_NOTRACE(GEN6_RPSTAT1);
+			if (dev_priv->info->is_haswell)
+				val = (val & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
+			else
+				val = (val & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
+		} else
+			val = dev_priv->rps.cur_delay; /* minor white lie to save power */
+
+		dev_priv->pmu_sample[__I915_SAMPLE_FREQ_ACT] += val * GT_FREQUENCY_MULTIPLIER * PERIOD;
+	}
+
+	if (dev_priv->pmu_enable & ((u64)1 << I915_PERF_REQUESTED_FREQUENCY))
+		dev_priv->pmu_sample[__I915_SAMPLE_FREQ_REQ] += (u64)dev_priv->rps.cur_delay * GT_FREQUENCY_MULTIPLIER * PERIOD;
+}
+
+static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer)
+{
+	struct drm_i915_private *dev_priv =
+		container_of(hrtimer, struct drm_i915_private, pmu_timer);
+
+	if (dev_priv->pmu_enable == 0)
+		return HRTIMER_NORESTART;
+
+	rings_sample(dev_priv);
+	frequency_sample(dev_priv);
+
+	hrtimer_forward_now(hrtimer, ns_to_ktime(PERIOD));
+	return HRTIMER_RESTART;
+}
+
+static void i915_perf_event_destroy(struct perf_event *event)
+{
+	WARN_ON(event->parent);
+}
+
+static int ring_event_init(struct perf_event *event)
+{
+	struct drm_i915_private *i915 =
+		container_of(event->pmu, typeof(*i915), pmu);
+	int ring = event->attr.config >> 2;
+	int sample = event->attr.config & 3;
+
+	switch (sample) {
+	case I915_SAMPLE_BUSY:
+		break;
+	case I915_SAMPLE_WAIT:
+		if (i915->info->gen < 3)
+			return -ENODEV;
+		break;
+	case I915_SAMPLE_SEMA:
+		if (i915->info->gen < 6)
+			return -ENODEV;
+		break;
+	default:
+		return -ENOENT;
+	}
+
+	if (ring >= I915_NUM_RINGS)
+		return -ENOENT;
+
+	if (i915->ring[ring].obj == NULL)
+		return -ENODEV;
+
+	return 0;
+}
+
+static int i915_perf_event_init(struct perf_event *event)
+{
+	struct drm_i915_private *i915 =
+		container_of(event->pmu, typeof(*i915), pmu);
+	int ret;
+
+	/* XXX ideally only want pid == -1 && cpu == -1 */
+
+	if (event->attr.type != event->pmu->type)
+		return -ENOENT;
+
+	if (has_branch_stack(event))
+		return -EOPNOTSUPP;
+
+	ret = 0;
+	if (event->attr.config < RING_MAX) {
+		ret = ring_event_init(event);
+	} else switch (event->attr.config) {
+	case I915_PERF_ACTUAL_FREQUENCY:
+	case I915_PERF_REQUESTED_FREQUENCY:
+	case I915_PERF_ENERGY:
+	case I915_PERF_RC6_RESIDENCY:
+	case I915_PERF_RC6p_RESIDENCY:
+	case I915_PERF_RC6pp_RESIDENCY:
+		if (i915->info->gen < 6)
+			ret = -ENODEV;
+		break;
+	case I915_PERF_STATISTIC_0...I915_PERF_STATISTIC_8:
+		if (i915->info->gen < 4)
+			ret = -ENODEV;
+		break;
+	case I915_PERF_INSTDONE_0...I915_PERF_INSTDONE_63:
+		if (i915->info->gen < 4 &&
+		    event->attr.config - I915_PERF_INSTDONE_0 >= 32) {
+			ret = -ENODEV;
+			break;
+		}
+	}
+	if (ret)
+		return ret;
+
+	if (!event->parent) {
+		event->destroy = i915_perf_event_destroy;
+	}
+
+	return 0;
+}
+
+static inline bool is_instdone_event(struct perf_event *event)
+{
+	return (event->attr.config >= I915_PERF_INSTDONE_0 &&
+		event->attr.config <= I915_PERF_INSTDONE_63);
+}
+
+static void i915_perf_enable(struct perf_event *event)
+{
+	struct drm_i915_private *i915 =
+		container_of(event->pmu, typeof(*i915), pmu);
+	u64 mask;
+
+	if (i915->pmu_enable == 0)
+		__hrtimer_start_range_ns(&i915->pmu_timer,
+					 ns_to_ktime(PERIOD), 0,
+					 HRTIMER_MODE_REL_PINNED, 0);
+
+	if (is_instdone_event(event)) {
+		i915->pmu_instdone |= (u64)1 << (event->attr.config - I915_PERF_INSTDONE_0);
+		mask = INSTDONE_ENABLE;
+	} else
+		mask = (u64)1 << event->attr.config;
+
+	i915->pmu_enable |= mask;
+}
+
+static void i915_perf_disable(struct perf_event *event)
+{
+	struct drm_i915_private *i915 =
+		container_of(event->pmu, typeof(*i915), pmu);
+	u64 mask;
+
+	if (is_instdone_event(event)) {
+		i915->pmu_instdone &= ~(u64)1 << (event->attr.config - I915_PERF_INSTDONE_0);
+		mask = i915->pmu_instdone == 0 ? INSTDONE_ENABLE : 0;
+	} else
+		mask = (u64)1 << event->attr.config;
+
+	i915->pmu_enable &= ~mask;
+}
+
+static int i915_perf_event_add(struct perf_event *event, int flags)
+{
+	if (flags & PERF_EF_START)
+		i915_perf_enable(event);
+	return 0;
+}
+
+static void i915_perf_event_del(struct perf_event *event, int flags)
+{
+	i915_perf_disable(event);
+}
+
+static void i915_perf_event_start(struct perf_event *event, int flags)
+{
+	//i915_perf_enable(event);
+}
+
+static void i915_perf_event_stop(struct perf_event *event, int flags)
+{
+	//i915_perf_disable(event);
+}
+
+static u64 read_energy_uJ(struct drm_i915_private *dev_priv)
+{
+	u64 power;
+	u32 units;
+
+	if (dev_priv->info->gen < 6)
+		return 0;
+
+	rdmsrl(MSR_RAPL_POWER_UNIT, power);
+	power = (power & 0x1f00) >> 8;
+	units = 1000000 / (1 << power); /* convert to uJ */
+	power = I915_READ_NOTRACE(MCH_SECP_NRG_STTS);
+	power *= units;
+
+	return power;
+}
+
+static inline u64 calc_residency(struct drm_i915_private *dev_priv, const u32 reg)
+{
+	if (dev_priv->info->gen >= 6) {
+		u64 raw_time = (u64)I915_READ_NOTRACE(reg) * (u64)128;
+		return DIV_ROUND_UP_ULL(raw_time, 100000);
+	} else
+		return 0;
+}
+
+static void i915_perf_event_read(struct perf_event *event)
+{
+	struct drm_i915_private *i915 =
+		container_of(event->pmu, typeof(*i915), pmu);
+	u64 val = 0;
+
+	if (event->attr.config < 32) {
+		int ring = event->attr.config >> 2;
+		int sample = event->attr.config & 3;
+		val = i915->ring[ring].pmu_sample[sample];
+	} else switch (event->attr.config) {
+	case I915_PERF_ACTUAL_FREQUENCY:
+		val = i915->pmu_sample[__I915_SAMPLE_FREQ_ACT];
+		break;
+	case I915_PERF_REQUESTED_FREQUENCY:
+		val = i915->pmu_sample[__I915_SAMPLE_FREQ_REQ];
+		break;
+	case I915_PERF_ENERGY:
+		val = read_energy_uJ(i915);
+		break;
+	case I915_PERF_INTERRUPTS:
+		val = atomic_read(&i915->irq_received);
+		break;
+
+	case I915_PERF_RC6_RESIDENCY:
+		val = calc_residency(i915, GEN6_GT_GFX_RC6);
+		break;
+
+	case I915_PERF_RC6p_RESIDENCY:
+		val = calc_residency(i915, GEN6_GT_GFX_RC6p);
+		break;
+
+	case I915_PERF_RC6pp_RESIDENCY:
+		val = calc_residency(i915, GEN6_GT_GFX_RC6pp);
+		break;
+
+	case I915_PERF_STATISTIC_0...I915_PERF_STATISTIC_8:
+		val = i915->pmu_sample[event->attr.config - I915_PERF_STATISTIC_0 + __I915_SAMPLE_STATISTIC_0];
+		break;
+
+	case I915_PERF_INSTDONE_0...I915_PERF_INSTDONE_63:
+		val = i915->pmu_sample[event->attr.config - I915_PERF_INSTDONE_0 + __I915_SAMPLE_INSTDONE_0];
+		break;
+	}
+
+	local64_set(&event->count, val);
+}
+
+static int i915_perf_event_event_idx(struct perf_event *event)
+{
+	return 0;
+}
+
+void i915_perf_register(struct drm_device *dev)
+{
+	struct drm_i915_private *i915 = to_i915(dev);
+
+	i915->pmu.task_ctx_nr	= perf_sw_context;
+	i915->pmu.event_init	= i915_perf_event_init;
+	i915->pmu.add		= i915_perf_event_add;
+	i915->pmu.del		= i915_perf_event_del;
+	i915->pmu.start		= i915_perf_event_start;
+	i915->pmu.stop		= i915_perf_event_stop;
+	i915->pmu.read		= i915_perf_event_read;
+	i915->pmu.event_idx	= i915_perf_event_event_idx;
+
+	hrtimer_init(&i915->pmu_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+	i915->pmu_timer.function = i915_sample;
+	i915->pmu_enable = 0;
+
+	if (perf_pmu_register(&i915->pmu, "i915", -1))
+		i915->pmu.event_init = NULL;
+}
+
+void i915_perf_unregister(struct drm_device *dev)
+{
+	struct drm_i915_private *i915 = to_i915(dev);
+
+	if (i915->pmu.event_init == NULL)
+		return;
+
+	perf_pmu_unregister(&i915->pmu);
+	i915->pmu.event_init = NULL;
+}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 71a73f4..a614c76 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -146,6 +146,8 @@ struct  intel_ring_buffer {
 	bool gpu_caches_dirty;
 	bool fbc_dirty;
 
+	u64 pmu_sample[3];
+
 	wait_queue_head_t irq_queue;
 
 	/**
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 18d109d..d4e4852 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -58,6 +58,54 @@
 #define I915_ERROR_UEVENT		"ERROR"
 #define I915_RESET_UEVENT		"RESET"
 
+/**
+ * DOC: perf_events exposed by i915 through /sys/bus/event_sources/drivers/i915
+ *
+ */
+#define I915_SAMPLE_BUSY	0
+#define I915_SAMPLE_WAIT	1
+#define I915_SAMPLE_SEMA	2
+
+#define I915_SAMPLE_RCS		0
+#define I915_SAMPLE_VCS		1
+#define I915_SAMPLE_BCS		2
+#define I915_SAMPLE_VECS	3
+
+#define __I915_PERF_COUNT(ring, id) ((ring) << 4 | (id))
+
+#define I915_PERF_COUNT_RCS_BUSY __I915_PERF_COUNT(I915_SAMPLE_RCS, I915_SAMPLE_BUSY)
+#define I915_PERF_COUNT_RCS_WAIT __I915_PERF_COUNT(I915_SAMPLE_RCS, I915_SAMPLE_WAIT)
+#define I915_PERF_COUNT_RCS_SEMA __I915_PERF_COUNT(I915_SAMPLE_RCS, I915_SAMPLE_SEMA)
+
+#define I915_PERF_COUNT_VCS_BUSY __I915_PERF_COUNT(I915_SAMPLE_VCS, I915_SAMPLE_BUSY)
+#define I915_PERF_COUNT_VCS_WAIT __I915_PERF_COUNT(I915_SAMPLE_VCS, I915_SAMPLE_WAIT)
+#define I915_PERF_COUNT_VCS_SEMA __I915_PERF_COUNT(I915_SAMPLE_VCS, I915_SAMPLE_SEMA)
+
+#define I915_PERF_COUNT_BCS_BUSY __I915_PERF_COUNT(I915_SAMPLE_BCS, I915_SAMPLE_BUSY)
+#define I915_PERF_COUNT_BCS_WAIT __I915_PERF_COUNT(I915_SAMPLE_BCS, I915_SAMPLE_WAIT)
+#define I915_PERF_COUNT_BCS_SEMA __I915_PERF_COUNT(I915_SAMPLE_BCS, I915_SAMPLE_SEMA)
+
+#define I915_PERF_COUNT_VECS_BUSY __I915_PERF_COUNT(I915_SAMPLE_VECS, I915_SAMPLE_BUSY)
+#define I915_PERF_COUNT_VECS_WAIT __I915_PERF_COUNT(I915_SAMPLE_VECS, I915_SAMPLE_WAIT)
+#define I915_PERF_COUNT_VECS_SEMA __I915_PERF_COUNT(I915_SAMPLE_VECS, I915_SAMPLE_SEMA)
+
+#define I915_PERF_ACTUAL_FREQUENCY 32
+#define I915_PERF_REQUESTED_FREQUENCY 33
+#define I915_PERF_ENERGY 34
+#define I915_PERF_INTERRUPTS 35
+
+#define I915_PERF_RC6_RESIDENCY		40
+#define I915_PERF_RC6p_RESIDENCY	41
+#define I915_PERF_RC6pp_RESIDENCY	42
+
+#define I915_PERF_STATISTIC_0	48
+#define I915_PERF_STATISTIC(n)	(I915_PERF_STATISTIC_0+(n))
+#define I915_PERF_STATISTIC_8	I915_PERF_STATISTIC(8)
+
+#define I915_PERF_INSTDONE_0	64
+#define I915_PERF_INSTDONE(n)	(I915_PERF_INSTDONE_0+(n))
+#define I915_PERF_INSTDONE_63	I915_PERF_INSTDONE(64)
+
 /* Each region is a minimum of 16k, and there are at most 255 of them.
  */
 #define I915_NR_TEX_REGIONS 255	/* table size 2k - maximum due to use
diff --git a/kernel/events/core.c b/kernel/events/core.c
index f86599e..cac360e 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -6384,6 +6384,7 @@ free_pdc:
 	free_percpu(pmu->pmu_disable_count);
 	goto unlock;
 }
+EXPORT_SYMBOL_GPL(perf_pmu_register);
 
 void perf_pmu_unregister(struct pmu *pmu)
 {
@@ -6405,6 +6406,7 @@ void perf_pmu_unregister(struct pmu *pmu)
 	put_device(pmu->dev);
 	free_pmu_context(pmu);
 }
+EXPORT_SYMBOL_GPL(perf_pmu_unregister);
 
 struct pmu *perf_init_event(struct perf_event *event)
 {
-- 
1.8.4.rc3




More information about the Intel-gfx mailing list