[Intel-gfx] [PATCH v3 6/6] drm/i915: expose rcs topology through query uAPI

Lionel Landwerlin lionel.g.landwerlin at intel.com
Fri Jan 12 16:00:36 UTC 2018


With the introduction of asymmetric slices in CNL, we cannot rely on
the previous SUBSLICE_MASK getparam to tell userspace what subslices
are available. Here we introduce a more detailed way of querying the
Gen's GPU topology that doesn't aggregate numbers.

This is essential for monitoring parts of the GPU with the OA unit,
because counters need to be normalized to the number of
EUs/subslices/slices. The current aggregated numbers like EU_TOTAL do
not gives us sufficient information.

As a bonus we can draw representations of the GPU :

        https://imgur.com/a/vuqpa

v2: Rename uapi struct s/_mask/_info/ (Tvrtko)
    Report max_slice/subslice/eus_per_subslice rather than strides (Tvrtko)
    Add uapi macros to read data from *_info structs (Tvrtko)

v3: Use !!(v & BIT()) for uapi macros instead of custom shifts (Tvrtko)

Signed-off-by: Lionel Landwerlin <lionel.g.landwerlin at intel.com>
---
 drivers/gpu/drm/i915/i915_query.c | 134 ++++++++++++++++++++++++++++++++++++++
 include/uapi/drm/i915_drm.h       |  51 +++++++++++++++
 2 files changed, 185 insertions(+)

diff --git a/drivers/gpu/drm/i915/i915_query.c b/drivers/gpu/drm/i915/i915_query.c
index 5694cfea4553..465ec18a472f 100644
--- a/drivers/gpu/drm/i915/i915_query.c
+++ b/drivers/gpu/drm/i915/i915_query.c
@@ -25,8 +25,129 @@
 #include "i915_drv.h"
 #include <uapi/drm/i915_drm.h>
 
+static int query_slices_info(struct drm_i915_private *dev_priv,
+			     struct drm_i915_query_item *query_item)
+{
+	const struct sseu_dev_info *sseu = &INTEL_INFO(dev_priv)->sseu;
+	struct drm_i915_query_slices_info slices_info;
+	u32 data_length, length;
+
+	if (sseu->max_slices == 0)
+		return -ENODEV;
+
+	data_length = sizeof(sseu->slice_mask);
+	length = sizeof(slices_info) + data_length;
+
+	/*
+	 * If we ever change the internal slice mask data type, we'll need to
+	 * update this function.
+	 */
+	BUILD_BUG_ON(sizeof(u8) != sizeof(sseu->slice_mask));
+
+	if (query_item->length == 0) {
+		query_item->length = length;
+		return 0;
+	}
+
+	if (query_item->length != length)
+		return -EINVAL;
+
+	memset(&slices_info, 0, sizeof(slices_info));
+	slices_info.max_slices = sseu->max_slices;
+
+	if (copy_to_user(u64_to_user_ptr(query_item->data_ptr), &slices_info,
+			 sizeof(slices_info)))
+		return -EFAULT;
+
+	if (copy_to_user(u64_to_user_ptr(query_item->data_ptr +
+					 offsetof(struct drm_i915_query_slices_info, data)),
+			 &sseu->slice_mask, data_length))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int query_subslices_info(struct drm_i915_private *dev_priv,
+				struct drm_i915_query_item *query_item)
+{
+	const struct sseu_dev_info *sseu = &INTEL_INFO(dev_priv)->sseu;
+	struct drm_i915_query_subslices_info subslices_info;
+	u32 data_length, length;
+
+	if (sseu->max_slices == 0)
+		return -ENODEV;
+
+	memset(&subslices_info, 0, sizeof(subslices_info));
+	subslices_info.max_slices = sseu->max_slices;
+	subslices_info.max_subslices = sseu->max_subslices;
+
+	data_length = subslices_info.max_slices *
+		DIV_ROUND_UP(subslices_info.max_subslices,
+			     sizeof(sseu->subslice_mask[0]) * BITS_PER_BYTE);
+	length = sizeof(subslices_info) + data_length;
+
+	if (query_item->length == 0) {
+		query_item->length = length;
+		return 0;
+	}
+
+	if (query_item->length != length)
+		return -EINVAL;
+
+	if (copy_to_user(u64_to_user_ptr(query_item->data_ptr), &subslices_info,
+			 sizeof(subslices_info)))
+		return -EFAULT;
+
+	if (copy_to_user(u64_to_user_ptr(query_item->data_ptr +
+					 offsetof(struct drm_i915_query_subslices_info, data)),
+			 sseu->subslice_mask, data_length))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int query_eus_info(struct drm_i915_private *dev_priv,
+			  struct drm_i915_query_item *query_item)
+{
+	const struct sseu_dev_info *sseu = &INTEL_INFO(dev_priv)->sseu;
+	struct drm_i915_query_eus_info eus_info;
+	u32 data_length, length;
+
+	if (sseu->max_slices == 0)
+		return -ENODEV;
+
+	memset(&eus_info, 0, sizeof(eus_info));
+	eus_info.max_slices = sseu->max_slices;
+	eus_info.max_subslices = sseu->max_subslices;
+	eus_info.max_eus_per_subslice = sseu->max_eus_per_subslice;
+
+	data_length = eus_info.max_slices * eus_info.max_subslices *
+		DIV_ROUND_UP(eus_info.max_eus_per_subslice, BITS_PER_BYTE);
+	length = sizeof(eus_info) + data_length;
+
+	if (query_item->length == 0) {
+		query_item->length = length;
+		return 0;
+	}
+
+	if (query_item->length != length)
+		return -EINVAL;
+
+	if (copy_to_user(u64_to_user_ptr(query_item->data_ptr), &eus_info,
+			 sizeof(eus_info)))
+		return -EFAULT;
+
+	if (copy_to_user(u64_to_user_ptr(query_item->data_ptr +
+					 offsetof(struct drm_i915_query_eus_info, data)),
+			 sseu->eu_mask, data_length))
+		return -EFAULT;
+
+	return 0;
+}
+
 int i915_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
 {
+	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct drm_i915_query *args = data;
 	struct drm_i915_query_item __user *user_item_ptr =
 		u64_to_user_ptr(args->items_ptr);
@@ -34,15 +155,28 @@ int i915_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
 
 	for (i = 0; i < args->num_items; i++, user_item_ptr++) {
 		struct drm_i915_query_item item;
+		int ret;
 
 		if (copy_from_user(&item, user_item_ptr, sizeof(item)))
 			return -EFAULT;
 
 		switch (item.query_id) {
+		case DRM_I915_QUERY_ID_SLICES_INFO:
+			ret = query_slices_info(dev_priv, &item);
+			break;
+		case DRM_I915_QUERY_ID_SUBSLICES_INFO:
+			ret = query_subslices_info(dev_priv, &item);
+			break;
+		case DRM_I915_QUERY_ID_EUS_INFO:
+			ret = query_eus_info(dev_priv, &item);
+			break;
 		default:
 			return -EINVAL;
 		}
 
+		if (ret)
+			return ret;
+
 		if (copy_to_user(user_item_ptr, &item, sizeof(item)))
 			return -EFAULT;
 	}
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 39e93f10f2cd..b0e664349cbf 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -1618,6 +1618,9 @@ struct drm_i915_perf_oa_config {
 
 struct drm_i915_query_item {
 	__u64 query_id;
+#define DRM_I915_QUERY_ID_SLICES_INFO    0x01
+#define DRM_I915_QUERY_ID_SUBSLICES_INFO 0x02
+#define DRM_I915_QUERY_ID_EUS_INFO       0x03
 
 	/*
 	 * When set to zero by userspace, this is filled with the size of the
@@ -1644,6 +1647,54 @@ struct drm_i915_query {
 	__u64 items_ptr;
 };
 
+/* Data written by the kernel with query DRM_I915_QUERY_ID_SLICES_INFO :
+ *
+ * data: each bit indicates whether a slice is available (1) or fused off (0).
+ *       Use DRM_I915_QUERY_SLICE_AVAILABLE() to query a given slice's
+ *       availability.
+ */
+struct drm_i915_query_slices_info {
+	__u32 max_slices;
+
+#define DRM_I915_QUERY_SLICE_AVAILABLE(info, slice) \
+	!!((info)->data[(slice) / 8] & BIT((slice) % 8))
+	__u8 data[];
+};
+
+/* Data written by the kernel with query DRM_I915_QUERY_ID_SUBSLICES_INFO :
+ *
+ * data: each bit indicates whether a subslice is available (1) or fused off
+ *       (0). Use DRM_I915_QUERY_SUBSLICE_AVAILABLE() to query a given
+ *       subslice's availability.
+ */
+struct drm_i915_query_subslices_info {
+	__u32 max_slices;
+	__u32 max_subslices;
+
+#define DRM_I915_QUERY_SUBSLICE_AVAILABLE(info, slice, subslice) \
+	!!((info)->data[(slice) * ALIGN((info)->max_subslices, 8) / 8 + \
+			(subslice) / 8] & BIT((subslice) % 8))
+	__u8 data[];
+};
+
+/* Data written by the kernel with query DRM_I915_QUERY_ID_EUS_INFO :
+ *
+ * data: Each bit indicates whether a subslice is available (1) or fused off
+ *       (0). Use DRM_I915_QUERY_EU_AVAILABLE() to query a given EU's
+ *       availability.
+ */
+struct drm_i915_query_eus_info {
+	__u32 max_slices;
+	__u32 max_subslices;
+	__u32 max_eus_per_subslice;
+
+#define DRM_I915_QUERY_EU_AVAILABLE(info, slice, subslice, eu) \
+	!!((info)->data[(slice) * ALIGN((info)->max_eus_per_subslice, 8) / 8 * (info)->max_subslices + \
+			(subslice) * ALIGN((info)->max_eus_per_subslice, 8) / 8 + \
+			(eu) / 8] & BIT((eu) % 8))
+	__u8 data[];
+};
+
 #if defined(__cplusplus)
 }
 #endif
-- 
2.15.1



More information about the Intel-gfx mailing list