[Intel-gfx] [PATCH v15 14/14] drm/i915/perf: notify sseu configuration changes

Lionel Landwerlin lionel.g.landwerlin at intel.com
Wed May 31 12:33:55 UTC 2017


This adds the ability for userspace to request that the kernel track &
record sseu configuration changes. These changes are inserted into the
perf stream so that userspace can interpret the OA reports using the
configuration applied at the time the OA reports where generated.

v2: Handle timestamps wrapping around 32bits (Lionel)

Signed-off-by: Lionel Landwerlin <lionel.g.landwerlin at intel.com>
---
 drivers/gpu/drm/i915/i915_drv.h         |  45 ++++
 drivers/gpu/drm/i915/i915_gem_context.c |   3 +
 drivers/gpu/drm/i915/i915_gem_context.h |  21 ++
 drivers/gpu/drm/i915/i915_perf.c        | 459 ++++++++++++++++++++++++++++++--
 drivers/gpu/drm/i915/intel_lrc.c        |   7 +-
 include/uapi/drm/i915_drm.h             |  30 +++
 6 files changed, 538 insertions(+), 27 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 499b2f9aa4be..b2e4df1efb12 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1963,6 +1963,12 @@ struct i915_perf_stream {
 	struct i915_gem_context *ctx;
 
 	/**
+	 * @has_sseu: Whether the stream emits SSEU configuration changes
+	 * reports.
+	 */
+	bool has_sseu;
+
+	/**
 	 * @enabled: Whether the stream is currently enabled, considering
 	 * whether the stream was opened in a disabled state and based
 	 * on `I915_PERF_IOCTL_ENABLE` and `I915_PERF_IOCTL_DISABLE` calls.
@@ -2493,6 +2499,44 @@ struct drm_i915_private {
 			const struct i915_oa_format *oa_formats;
 			int n_builtin_sets;
 		} oa;
+
+		struct {
+			/**
+			 * Buffer containing change reports of the SSEU
+			 * configuration.
+			 */
+			struct i915_vma *vma;
+			u8 *vaddr;
+
+			/**
+			 * Scheduler write to the head, and the perf driver
+			 * reads from tail.
+			 */
+			u32 head;
+			u32 tail;
+
+			/**
+			 * Is the sseu buffer enabled.
+			 */
+			bool enabled;
+
+			/**
+			 * Whether the buffer overflown.
+			 */
+			bool overflow;
+
+			/**
+			 * Keeps track of how many times the OA unit has been
+			 * enabled. This number is used to discard stale
+			 * @perf_sseu in @i915_gem_context.
+			 */
+			atomic64_t enable_no;
+
+			/**
+			 * Lock writes & tail pointer updates on this buffer.
+			 */
+			spinlock_t ptr_lock;
+		} sseu_buffer;
 	} perf;
 
 	/* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
@@ -3537,6 +3581,7 @@ void i915_oa_init_reg_state(struct intel_engine_cs *engine,
 			    struct i915_gem_context *ctx,
 			    uint32_t *reg_state);
 int i915_oa_emit_noa_config_locked(struct drm_i915_gem_request *req);
+void i915_perf_emit_sseu_config(struct drm_i915_gem_request *req);
 
 /* i915_gem_evict.c */
 int __must_check i915_gem_evict_something(struct i915_address_space *vm,
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 60d0bbf0ae2b..368bb527a75a 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -510,6 +510,9 @@ mi_set_context(struct drm_i915_gem_request *req, u32 flags)
 	if (IS_ERR(cs))
 		return PTR_ERR(cs);
 
+	/* Notify perf of possible change in SSEU configuration. */
+	i915_perf_emit_sseu_config(req);
+
 	/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
 	if (INTEL_GEN(dev_priv) >= 7) {
 		*cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h
index e8247e2f6011..9c005be4933a 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/i915_gem_context.h
@@ -194,6 +194,27 @@ struct i915_gem_context {
 
 	/** remap_slice: Bitmask of cache lines that need remapping */
 	u8 remap_slice;
+
+	/**
+	 * @oa_sseu: last SSEU configuration notified to userspace
+	 *
+	 * SSEU configuration changes are notified to userspace through the
+	 * perf infrastructure. We keep track here of the last notified
+	 * configuration for a given context since configuration can change
+	 * per engine.
+	 */
+	struct sseu_dev_info perf_sseu;
+
+	/**
+	 * @perf_enable_no: last perf enable identifier
+	 *
+	 * Userspace can enable/disable the perf infrastructure whenever it
+	 * wants without reconfiguring the OA unit. This number is updated at
+	 * the same time as @oa_sseu by copying
+	 * dev_priv->perf.oa.oa_sseu.enable_no which changes every time the
+	 * user enables the OA unit.
+	 */
+	u64 perf_enable_no;
 };
 
 static inline bool i915_gem_context_is_closed(const struct i915_gem_context *ctx)
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 4229c74baa22..325b9ac54bd5 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -212,6 +212,14 @@
  */
 #define OA_BUFFER_SIZE		SZ_16M
 
+/* Dimension the SSEU buffer based on the size of the OA buffer to ensure we
+ * don't run out of space before the OA unit notifies us new data is
+ * available.
+ */
+#define SSEU_BUFFER_ITEM_SIZE	sizeof(struct perf_sseu_change)
+#define SSEU_BUFFER_SIZE	(OA_BUFFER_SIZE * SSEU_BUFFER_ITEM_SIZE / 256)
+#define SSEU_BUFFER_NB_ITEM	(SSEU_BUFFER_SIZE / SSEU_BUFFER_ITEM_SIZE)
+
 #define OA_TAKEN(tail, head)	((tail - head) & (OA_BUFFER_SIZE - 1))
 
 /**
@@ -350,6 +358,8 @@ struct perf_open_properties {
 	u64 single_context:1;
 	u64 ctx_handle;
 
+	bool sseu_enable;
+
 	/* OA sampling state */
 	int metrics_set;
 	int oa_format;
@@ -357,6 +367,16 @@ struct perf_open_properties {
 	int oa_period_exponent;
 };
 
+struct perf_sseu_change {
+	u32 timestamp;
+	struct drm_i915_perf_sseu_change change;
+};
+
+static bool timestamp_passed(u32 t0, u32 t1)
+{
+  return (s32)(t0 - t1) >= 0;
+}
+
 static u32 gen8_oa_hw_tail_read(struct drm_i915_private *dev_priv)
 {
 	return I915_READ(GEN8_OATAILPTR) & GEN8_OATAILPTR_MASK;
@@ -569,6 +589,88 @@ static int append_oa_sample(struct i915_perf_stream *stream,
 	return 0;
 }
 
+static int append_sseu_change(struct i915_perf_stream *stream,
+			      char __user *buf,
+			      size_t count,
+			      size_t *offset,
+			      const struct perf_sseu_change *sseu_report)
+{
+	struct drm_i915_perf_record_header header;
+
+	header.type = DRM_I915_PERF_RECORD_SSEU_CHANGE;
+	header.pad = 0;
+	header.size = sizeof(struct drm_i915_perf_record_header) +
+		sizeof(struct drm_i915_perf_sseu_change);
+
+	if ((count - *offset) < header.size)
+		return -ENOSPC;
+
+	buf += *offset;
+	if (copy_to_user(buf, &header, sizeof(header)))
+		return -EFAULT;
+	buf += sizeof(header);
+
+	if (copy_to_user(buf, &sseu_report->change,
+			 sizeof(struct drm_i915_perf_sseu_change)))
+		return -EFAULT;
+
+	(*offset) += header.size;
+
+	return 0;
+}
+
+static struct perf_sseu_change *
+sseu_buffer_peek(struct drm_i915_private *dev_priv, u32 head, u32 tail)
+{
+	if (head == tail)
+		return NULL;
+
+	return (struct perf_sseu_change *) (dev_priv->perf.sseu_buffer.vaddr +
+					    tail * SSEU_BUFFER_ITEM_SIZE);
+}
+
+static struct perf_sseu_change *
+sseu_buffer_advance(struct drm_i915_private *dev_priv, u32 head, u32 *tail)
+{
+	*tail = (*tail + 1) % SSEU_BUFFER_NB_ITEM;
+
+	return sseu_buffer_peek(dev_priv, head, *tail);
+}
+
+static void
+sseu_buffer_read_pointers(struct i915_perf_stream *stream, u32 *head, u32 *tail)
+{
+	struct drm_i915_private *dev_priv = stream->dev_priv;
+
+	if (!stream->has_sseu) {
+		*head = 0;
+		*tail = 0;
+		return;
+	}
+
+	spin_lock_irq(&dev_priv->perf.sseu_buffer.ptr_lock);
+
+	*head = dev_priv->perf.sseu_buffer.head;
+	*tail = dev_priv->perf.sseu_buffer.tail;
+
+	spin_unlock_irq(&dev_priv->perf.sseu_buffer.ptr_lock);
+}
+
+static void
+sseu_buffer_write_tail_pointer(struct i915_perf_stream *stream, u32 tail)
+{
+	struct drm_i915_private *dev_priv = stream->dev_priv;
+
+	if (!stream->has_sseu)
+		return;
+
+	spin_lock_irq(&dev_priv->perf.sseu_buffer.ptr_lock);
+
+	dev_priv->perf.sseu_buffer.tail = tail;
+
+	spin_unlock_irq(&dev_priv->perf.sseu_buffer.ptr_lock);
+}
+
 /**
  * Copies all buffered OA reports into userspace read() buffer.
  * @stream: An i915-perf stream opened for OA metrics
@@ -604,6 +706,8 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
 	unsigned int aged_tail_idx;
 	u32 head, tail;
 	u32 taken;
+	struct perf_sseu_change *sseu_report;
+	u32 sseu_head, sseu_tail;
 	int ret = 0;
 
 	if (WARN_ON(!stream->enabled))
@@ -641,6 +745,9 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
 		      head, tail))
 		return -EIO;
 
+	/* Extract first SSEU report. */
+	sseu_buffer_read_pointers(stream, &sseu_head, &sseu_tail);
+	sseu_report = sseu_buffer_peek(dev_priv, sseu_head, sseu_tail);
 
 	for (/* none */;
 	     (taken = OA_TAKEN(tail, head));
@@ -679,6 +786,25 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
 			continue;
 		}
 
+		while (sseu_report &&
+		       !timestamp_passed(sseu_report->timestamp, report32[1])) {
+			/* While filtering for a single context we avoid
+			 * leaking the IDs of other contexts.
+			 */
+			if (stream->ctx &&
+			    stream->ctx->hw_id != sseu_report->change.hw_id) {
+				sseu_report->change.hw_id = INVALID_CTX_ID;
+			}
+
+			ret = append_sseu_change(stream, buf, count,
+						 offset, sseu_report);
+			if (ret)
+				break;
+
+			sseu_report = sseu_buffer_advance(dev_priv, sseu_head,
+							  &sseu_tail);
+		}
+
 		/* XXX: Just keep the lower 21 bits for now since I'm not
 		 * entirely sure if the HW touches any of the higher bits in
 		 * this field
@@ -770,6 +896,9 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
 		spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
 	}
 
+	/* Update tail pointer of the sseu buffer. */
+	sseu_buffer_write_tail_pointer(stream, sseu_tail);
+
 	return ret;
 }
 
@@ -799,12 +928,32 @@ static int gen8_oa_read(struct i915_perf_stream *stream,
 			size_t *offset)
 {
 	struct drm_i915_private *dev_priv = stream->dev_priv;
+	bool overflow = false;
 	u32 oastatus;
 	int ret;
 
 	if (WARN_ON(!dev_priv->perf.oa.oa_buffer.vaddr))
 		return -EIO;
 
+	spin_lock_irq(&dev_priv->perf.sseu_buffer.ptr_lock);
+
+	overflow = dev_priv->perf.sseu_buffer.overflow;
+	if (overflow) {
+		DRM_DEBUG("SSEU buffer overflow\n");
+
+		ret = append_oa_status(stream, buf, count, offset,
+				       DRM_I915_PERF_RECORD_OA_BUFFER_LOST);
+		if (ret) {
+			spin_unlock_irq(&dev_priv->perf.sseu_buffer.ptr_lock);
+			return ret;
+		}
+
+		dev_priv->perf.oa.ops.oa_disable(dev_priv);
+		dev_priv->perf.oa.ops.oa_enable(dev_priv);
+	}
+
+	spin_unlock_irq(&dev_priv->perf.sseu_buffer.ptr_lock);
+
 	oastatus = I915_READ(GEN8_OASTATUS);
 
 	/* We treat OABUFFER_OVERFLOW as a significant error:
@@ -821,16 +970,18 @@ static int gen8_oa_read(struct i915_perf_stream *stream,
 	 * that something has gone quite badly wrong.
 	 */
 	if (oastatus & GEN8_OASTATUS_OABUFFER_OVERFLOW) {
-		ret = append_oa_status(stream, buf, count, offset,
-				       DRM_I915_PERF_RECORD_OA_BUFFER_LOST);
-		if (ret)
-			return ret;
-
 		DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n",
 			  dev_priv->perf.oa.period_exponent);
 
-		dev_priv->perf.oa.ops.oa_disable(dev_priv);
-		dev_priv->perf.oa.ops.oa_enable(dev_priv);
+		if (!overflow) {
+			ret = append_oa_status(stream, buf, count, offset,
+					       DRM_I915_PERF_RECORD_OA_BUFFER_LOST);
+			if (ret)
+				return ret;
+
+			dev_priv->perf.oa.ops.oa_disable(dev_priv);
+			dev_priv->perf.oa.ops.oa_enable(dev_priv);
+		}
 
 		/* Note: .oa_enable() is expected to re-init the oabuffer
 		 * and reset GEN8_OASTATUS for us
@@ -839,10 +990,12 @@ static int gen8_oa_read(struct i915_perf_stream *stream,
 	}
 
 	if (oastatus & GEN8_OASTATUS_REPORT_LOST) {
-		ret = append_oa_status(stream, buf, count, offset,
-				       DRM_I915_PERF_RECORD_OA_REPORT_LOST);
-		if (ret)
-			return ret;
+		if (!overflow) {
+			ret = append_oa_status(stream, buf, count, offset,
+					       DRM_I915_PERF_RECORD_OA_REPORT_LOST);
+			if (ret)
+				return ret;
+		}
 		I915_WRITE(GEN8_OASTATUS,
 			   oastatus & ~GEN8_OASTATUS_REPORT_LOST);
 	}
@@ -885,6 +1038,8 @@ static int gen7_append_oa_reports(struct i915_perf_stream *stream,
 	unsigned int aged_tail_idx;
 	u32 head, tail;
 	u32 taken;
+	struct perf_sseu_change *sseu_report;
+	u32 sseu_head, sseu_tail;
 	int ret = 0;
 
 	if (WARN_ON(!stream->enabled))
@@ -922,6 +1077,9 @@ static int gen7_append_oa_reports(struct i915_perf_stream *stream,
 		      head, tail))
 		return -EIO;
 
+	/* Extract first SSEU report. */
+	sseu_buffer_read_pointers(stream, &sseu_head, &sseu_tail);
+	sseu_report = sseu_buffer_peek(dev_priv, sseu_head, sseu_tail);
 
 	for (/* none */;
 	     (taken = OA_TAKEN(tail, head));
@@ -954,6 +1112,26 @@ static int gen7_append_oa_reports(struct i915_perf_stream *stream,
 			continue;
 		}
 
+		/* Emit any potential sseu report. */
+		while (sseu_report &&
+		       !timestamp_passed(sseu_report->timestamp, report32[1])) {
+			/* While filtering for a single context we avoid
+			 * leaking the IDs of other contexts.
+			 */
+			if (stream->ctx &&
+			    stream->ctx->hw_id != sseu_report->change.hw_id) {
+				sseu_report->change.hw_id = INVALID_CTX_ID;
+			}
+
+			ret = append_sseu_change(stream, buf, count,
+						 offset, sseu_report);
+			if (ret)
+				break;
+
+			sseu_report = sseu_buffer_advance(dev_priv, sseu_head,
+							  &sseu_tail);
+		}
+
 		ret = append_oa_sample(stream, buf, count, offset, report);
 		if (ret)
 			break;
@@ -983,6 +1161,9 @@ static int gen7_append_oa_reports(struct i915_perf_stream *stream,
 		spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
 	}
 
+	/* Update tail pointer of the sseu buffer. */
+	sseu_buffer_write_tail_pointer(stream, sseu_tail);
+
 	return ret;
 }
 
@@ -1008,12 +1189,32 @@ static int gen7_oa_read(struct i915_perf_stream *stream,
 			size_t *offset)
 {
 	struct drm_i915_private *dev_priv = stream->dev_priv;
+	bool overflow = false;
 	u32 oastatus1;
 	int ret;
 
 	if (WARN_ON(!dev_priv->perf.oa.oa_buffer.vaddr))
 		return -EIO;
 
+	spin_lock_irq(&dev_priv->perf.sseu_buffer.ptr_lock);
+
+	overflow = dev_priv->perf.sseu_buffer.overflow;
+	if (overflow) {
+		DRM_DEBUG("SSEU buffer overflow\n");
+
+		ret = append_oa_status(stream, buf, count, offset,
+				       DRM_I915_PERF_RECORD_OA_BUFFER_LOST);
+		if (ret) {
+			spin_unlock_irq(&dev_priv->perf.sseu_buffer.ptr_lock);
+			return ret;
+		}
+
+		dev_priv->perf.oa.ops.oa_disable(dev_priv);
+		dev_priv->perf.oa.ops.oa_enable(dev_priv);
+	}
+
+	spin_unlock_irq(&dev_priv->perf.sseu_buffer.ptr_lock);
+
 	oastatus1 = I915_READ(GEN7_OASTATUS1);
 
 	/* XXX: On Haswell we don't have a safe way to clear oastatus1
@@ -1044,25 +1245,30 @@ static int gen7_oa_read(struct i915_perf_stream *stream,
 	 *   now.
 	 */
 	if (unlikely(oastatus1 & GEN7_OASTATUS1_OABUFFER_OVERFLOW)) {
-		ret = append_oa_status(stream, buf, count, offset,
-				       DRM_I915_PERF_RECORD_OA_BUFFER_LOST);
-		if (ret)
-			return ret;
-
 		DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n",
 			  dev_priv->perf.oa.period_exponent);
 
-		dev_priv->perf.oa.ops.oa_disable(dev_priv);
-		dev_priv->perf.oa.ops.oa_enable(dev_priv);
+		if (!overflow) {
+			ret = append_oa_status(stream, buf, count, offset,
+					       DRM_I915_PERF_RECORD_OA_BUFFER_LOST);
+			if (ret)
+				return ret;
+
+			dev_priv->perf.oa.ops.oa_disable(dev_priv);
+			dev_priv->perf.oa.ops.oa_enable(dev_priv);
+		}
 
 		oastatus1 = I915_READ(GEN7_OASTATUS1);
 	}
 
 	if (unlikely(oastatus1 & GEN7_OASTATUS1_REPORT_LOST)) {
-		ret = append_oa_status(stream, buf, count, offset,
-				       DRM_I915_PERF_RECORD_OA_REPORT_LOST);
-		if (ret)
-			return ret;
+		if (!overflow) {
+			ret = append_oa_status(stream, buf, count, offset,
+					       DRM_I915_PERF_RECORD_OA_REPORT_LOST);
+			if (ret)
+				return ret;
+		}
+
 		dev_priv->perf.oa.gen7_latched_oastatus1 |=
 			GEN7_OASTATUS1_REPORT_LOST;
 	}
@@ -1217,18 +1423,25 @@ static void oa_put_render_ctx_id(struct i915_perf_stream *stream)
 }
 
 static void
-free_oa_buffer(struct drm_i915_private *i915)
+free_sseu_buffer(struct drm_i915_private *i915)
 {
-	mutex_lock(&i915->drm.struct_mutex);
+	i915_gem_object_unpin_map(i915->perf.sseu_buffer.vma->obj);
+	i915_vma_unpin(i915->perf.sseu_buffer.vma);
+	i915_gem_object_put(i915->perf.sseu_buffer.vma->obj);
 
+	i915->perf.sseu_buffer.vma = NULL;
+	i915->perf.sseu_buffer.vaddr = NULL;
+}
+
+static void
+free_oa_buffer(struct drm_i915_private *i915)
+{
 	i915_gem_object_unpin_map(i915->perf.oa.oa_buffer.vma->obj);
 	i915_vma_unpin(i915->perf.oa.oa_buffer.vma);
 	i915_gem_object_put(i915->perf.oa.oa_buffer.vma->obj);
 
 	i915->perf.oa.oa_buffer.vma = NULL;
 	i915->perf.oa.oa_buffer.vaddr = NULL;
-
-	mutex_unlock(&i915->drm.struct_mutex);
 }
 
 static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
@@ -1244,8 +1457,15 @@ static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
 
 	dev_priv->perf.oa.ops.disable_metric_set(dev_priv);
 
+	mutex_lock(&dev_priv->drm.struct_mutex);
+
+	if (stream->has_sseu)
+		free_sseu_buffer(dev_priv);
+
 	free_oa_buffer(dev_priv);
 
+	mutex_unlock(&dev_priv->drm.struct_mutex);
+
 	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 	intel_runtime_pm_put(dev_priv);
 
@@ -1258,6 +1478,97 @@ static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
 	}
 }
 
+static void init_sseu_buffer(struct drm_i915_private *dev_priv,
+			     bool need_lock)
+{
+	/* Cleanup auxilliary buffer. */
+	memset(dev_priv->perf.sseu_buffer.vaddr, 0, SSEU_BUFFER_SIZE);
+
+	atomic64_inc(&dev_priv->perf.sseu_buffer.enable_no);
+
+	if (need_lock)
+		spin_lock_irq(&dev_priv->perf.sseu_buffer.ptr_lock);
+
+	/* Initialize head & tail pointers. */
+	dev_priv->perf.sseu_buffer.head = 0;
+	dev_priv->perf.sseu_buffer.tail = 0;
+	dev_priv->perf.sseu_buffer.overflow = false;
+
+	if (need_lock)
+		spin_unlock_irq(&dev_priv->perf.sseu_buffer.ptr_lock);
+}
+
+static int alloc_sseu_buffer(struct drm_i915_private *dev_priv)
+{
+	struct drm_i915_gem_object *bo;
+	struct i915_vma *vma;
+	int ret = 0;
+
+	bo = i915_gem_object_create(dev_priv, SSEU_BUFFER_SIZE);
+	if (IS_ERR(bo)) {
+		DRM_ERROR("Failed to allocate SSEU buffer\n");
+		ret = PTR_ERR(bo);
+		goto out;
+	}
+
+	vma = i915_gem_object_ggtt_pin(bo, NULL, 0, SSEU_BUFFER_SIZE, 0);
+	if (IS_ERR(vma)) {
+		ret = PTR_ERR(vma);
+		goto err_unref;
+	}
+	dev_priv->perf.sseu_buffer.vma = vma;
+
+	dev_priv->perf.sseu_buffer.vaddr =
+		i915_gem_object_pin_map(bo, I915_MAP_WB);
+	if (IS_ERR(dev_priv->perf.sseu_buffer.vaddr)) {
+		ret = PTR_ERR(dev_priv->perf.sseu_buffer.vaddr);
+		goto err_unpin;
+	}
+
+	atomic64_set(&dev_priv->perf.sseu_buffer.enable_no, 0);
+
+	init_sseu_buffer(dev_priv, true);
+
+	DRM_DEBUG_DRIVER("OA SSEU Buffer initialized, gtt offset = 0x%x, vaddr = %p\n",
+			 i915_ggtt_offset(dev_priv->perf.sseu_buffer.vma),
+			 dev_priv->perf.sseu_buffer.vaddr);
+
+	goto out;
+
+err_unpin:
+	__i915_vma_unpin(vma);
+
+err_unref:
+	i915_gem_object_put(bo);
+
+	dev_priv->perf.sseu_buffer.vaddr = NULL;
+	dev_priv->perf.sseu_buffer.vma = NULL;
+
+out:
+	return ret;
+}
+
+static void sseu_enable(struct drm_i915_private *dev_priv)
+{
+	spin_lock_irq(&dev_priv->perf.sseu_buffer.ptr_lock);
+
+	init_sseu_buffer(dev_priv, false);
+
+	dev_priv->perf.sseu_buffer.enabled = true;
+
+	spin_unlock_irq(&dev_priv->perf.sseu_buffer.ptr_lock);
+}
+
+static void sseu_disable(struct drm_i915_private *dev_priv)
+{
+	spin_lock_irq(&dev_priv->perf.sseu_buffer.ptr_lock);
+
+	dev_priv->perf.sseu_buffer.enabled = false;
+	dev_priv->perf.sseu_buffer.overflow = false;
+
+	spin_unlock_irq(&dev_priv->perf.sseu_buffer.ptr_lock);
+}
+
 static void gen7_init_oa_buffer(struct drm_i915_private *dev_priv)
 {
 	u32 gtt_offset = i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma);
@@ -1740,6 +2051,9 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
 		struct intel_context *ce = &ctx->engine[RCS];
 		u32 *regs;
 
+		memset(&ctx->perf_sseu, 0, sizeof(ctx->perf_sseu));
+		ctx->perf_enable_no = 0;
+
 		/* OA settings will be set upon first use */
 		if (!ce->state)
 			continue;
@@ -1898,6 +2212,9 @@ static void i915_oa_stream_enable(struct i915_perf_stream *stream)
 {
 	struct drm_i915_private *dev_priv = stream->dev_priv;
 
+	if (stream->has_sseu)
+		sseu_enable(dev_priv);
+
 	dev_priv->perf.oa.ops.oa_enable(dev_priv);
 
 	if (dev_priv->perf.oa.periodic)
@@ -1928,6 +2245,9 @@ static void i915_oa_stream_disable(struct i915_perf_stream *stream)
 {
 	struct drm_i915_private *dev_priv = stream->dev_priv;
 
+	if (stream->has_sseu)
+		sseu_disable(dev_priv);
+
 	dev_priv->perf.oa.ops.oa_disable(dev_priv);
 
 	if (dev_priv->perf.oa.periodic)
@@ -2053,6 +2373,14 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
 			return ret;
 	}
 
+	if (props->sseu_enable) {
+		ret = alloc_sseu_buffer(dev_priv);
+		if (ret)
+			goto err_sseu_buf_alloc;
+
+		stream->has_sseu = true;
+	}
+
 	ret = alloc_oa_buffer(dev_priv);
 	if (ret)
 		goto err_oa_buf_alloc;
@@ -2085,6 +2413,9 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
 err_enable:
 	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 	intel_runtime_pm_put(dev_priv);
+	free_sseu_buffer(dev_priv);
+
+err_sseu_buf_alloc:
 	free_oa_buffer(dev_priv);
 
 err_oa_buf_alloc:
@@ -2172,6 +2503,75 @@ int i915_oa_emit_noa_config_locked(struct drm_i915_gem_request *req)
 }
 
 /**
+ * i915_perf_emit_sseu_config - emit the SSEU configuration of a gem request
+ * @req: the request
+ *
+ * If the OA unit is enabled, the write the SSEU configuration of a
+ * given request into a circular buffer. The buffer is read at the
+ * same time as the OA buffer and its elements are inserted into the
+ * perf stream to notify the userspace of SSEU configuration changes.
+ * This is needed to interpret values from the OA reports.
+ */
+void i915_perf_emit_sseu_config(struct drm_i915_gem_request *req)
+{
+	struct drm_i915_private *dev_priv = req->i915;
+	struct perf_sseu_change *report;
+	const struct sseu_dev_info *sseu;
+	u32 head, timestamp;
+	u64 perf_enable_no;
+
+	/* Perf not supported. */
+	if (!dev_priv->perf.initialized)
+		return;
+
+	/* Nothing's changed, no need to signal userspace. */
+	sseu = &req->ctx->engine[req->engine->id].sseu;
+	perf_enable_no = atomic64_read(&dev_priv->perf.sseu_buffer.enable_no);
+	if (perf_enable_no == req->ctx->perf_enable_no &&
+	    memcmp(&req->ctx->perf_sseu, sseu, sizeof(*sseu)) == 0)
+		return;
+
+	memcpy(&req->ctx->perf_sseu, sseu, sizeof(*sseu));
+	req->ctx->perf_enable_no = perf_enable_no;
+
+	/* Read the timestamp outside the spin lock. */
+	timestamp = I915_READ(RING_TIMESTAMP(RENDER_RING_BASE));
+
+	spin_lock_irq(&dev_priv->perf.sseu_buffer.ptr_lock);
+
+	if (!dev_priv->perf.sseu_buffer.enabled)
+		goto out;
+
+	head = dev_priv->perf.sseu_buffer.head;
+	dev_priv->perf.sseu_buffer.head =
+		(dev_priv->perf.sseu_buffer.head + 1) % SSEU_BUFFER_NB_ITEM;
+
+	/* This should never happen, unless we've wrongly dimensioned the SSEU
+	 * buffer. */
+	if (dev_priv->perf.sseu_buffer.head == dev_priv->perf.sseu_buffer.tail) {
+		dev_priv->perf.sseu_buffer.overflow = true;
+		goto out;
+	}
+
+	report = (struct perf_sseu_change *) (dev_priv->perf.sseu_buffer.vaddr +
+					      head * SSEU_BUFFER_ITEM_SIZE);
+	memset(report, 0, sizeof(*report));
+
+	report->timestamp = I915_READ(RING_TIMESTAMP(RENDER_RING_BASE));
+	report->change.hw_id = req->ctx->hw_id;
+
+	report->change.sseu.packed.slice_mask = sseu->slice_mask;
+	report->change.sseu.packed.subslice_mask = sseu->subslice_mask;
+	report->change.sseu.packed.min_eu_per_subslice =
+		sseu->min_eu_per_subslice;
+	report->change.sseu.packed.max_eu_per_subslice =
+		sseu->max_eu_per_subslice;
+
+out:
+	spin_unlock_irq(&dev_priv->perf.sseu_buffer.ptr_lock);
+}
+
+/**
  * i915_perf_read_locked - &i915_perf_stream_ops->read with error normalisation
  * @stream: An i915 perf stream
  * @file: An i915 perf stream file
@@ -2818,6 +3218,9 @@ static int read_properties_unlocked(struct drm_i915_private *dev_priv,
 			props->oa_periodic = true;
 			props->oa_period_exponent = value;
 			break;
+		case DRM_I915_PERF_PROP_SSEU_CHANGE:
+			props->sseu_enable = true;
+			break;
 		case DRM_I915_PERF_PROP_MAX:
 			MISSING_CASE(id);
 			return -EINVAL;
@@ -3178,6 +3581,10 @@ void i915_perf_init(struct drm_i915_private *dev_priv)
 		mutex_init(&dev_priv->perf.lock);
 		spin_lock_init(&dev_priv->perf.oa.oa_buffer.ptr_lock);
 
+		spin_lock_init(&dev_priv->perf.sseu_buffer.ptr_lock);
+		dev_priv->perf.sseu_buffer.enabled = false;
+		atomic64_set(&dev_priv->perf.sseu_buffer.enable_no, 0);
+
 		oa_sample_rate_hard_limit =
 			dev_priv->perf.oa.timestamp_frequency / 2;
 		dev_priv->perf.sysctl_header = register_sysctl_table(dev_root);
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index ad37e2b236b0..e2f83a12337b 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -350,8 +350,10 @@ static void execlists_submit_ports(struct intel_engine_cs *engine)
 		rq = port_unpack(&port[n], &count);
 		if (rq) {
 			GEM_BUG_ON(count > !n);
-			if (!count++)
+			if (!count++) {
+				i915_perf_emit_sseu_config(rq);
 				execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN);
+			}
 			port_set(&port[n], port_pack(rq, count));
 			desc = execlists_update_context(rq);
 			GEM_DEBUG_EXEC(port[n].context_id = upper_32_bits(desc));
@@ -1406,6 +1408,9 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
 		req->ctx->ppgtt->pd_dirty_rings &= ~intel_engine_flag(req->engine);
 	}
 
+	/* Emit NOA config */
+	i915_oa_emit_noa_config_locked(req);
+
 	cs = intel_ring_begin(req, 4);
 	if (IS_ERR(cs))
 		return PTR_ERR(cs);
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index d561b1f98b20..ca06d46a01af 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -439,6 +439,16 @@ typedef struct drm_i915_setparam {
 	int value;
 } drm_i915_setparam_t;
 
+union drm_i915_gem_param_sseu {
+	struct {
+		__u8 slice_mask;
+		__u8 subslice_mask;
+		__u8 min_eu_per_subslice;
+		__u8 max_eu_per_subslice;
+	} packed;
+	__u64 value;
+};
+
 /* A memory manager for regions of shared memory:
  */
 #define I915_MEM_REGION_AGP 1
@@ -1358,6 +1368,11 @@ enum drm_i915_perf_property_id {
 	 */
 	DRM_I915_PERF_PROP_OA_EXPONENT,
 
+	/**
+	 * Configure the stream to monitor sseu configuration changes.
+	 */
+	DRM_I915_PERF_PROP_SSEU_CHANGE,
+
 	DRM_I915_PERF_PROP_MAX /* non-ABI */
 };
 
@@ -1441,9 +1456,24 @@ enum drm_i915_perf_record_type {
 	 */
 	DRM_I915_PERF_RECORD_OA_BUFFER_LOST = 3,
 
+	/**
+	 * A change in the sseu configuration happened.
+	 *
+	 * struct {
+	 *     struct drm_i915_perf_record_header header;
+	 *     { struct drm_i915_perf_sseu_change change; } && DRM_I915_PERF_PROP_SSEU_CHANGE
+	 * };
+	 */
+	DRM_I915_PERF_RECORD_SSEU_CHANGE = 4,
+
 	DRM_I915_PERF_RECORD_MAX /* non-ABI */
 };
 
+struct drm_i915_perf_sseu_change {
+	__u32 hw_id;
+	union drm_i915_gem_param_sseu sseu;
+};
+
 #if defined(__cplusplus)
 }
 #endif
-- 
2.11.0



More information about the Intel-gfx mailing list