[Intel-gfx] [PATCH v6 6/6] drm/i915: expose rcs topology through query uAPI
Lionel Landwerlin
lionel.g.landwerlin at intel.com
Tue Jan 16 16:02:51 UTC 2018
With the introduction of asymmetric slices in CNL, we cannot rely on
the previous SUBSLICE_MASK getparam to tell userspace what subslices
are available. Here we introduce a more detailed way of querying the
Gen's GPU topology that doesn't aggregate numbers.
This is essential for monitoring parts of the GPU with the OA unit,
because counters need to be normalized to the number of
EUs/subslices/slices. The current aggregated numbers like EU_TOTAL do
not gives us sufficient information.
As a bonus we can draw representations of the GPU :
https://imgur.com/a/vuqpa
v2: Rename uapi struct s/_mask/_info/ (Tvrtko)
Report max_slice/subslice/eus_per_subslice rather than strides (Tvrtko)
Add uapi macros to read data from *_info structs (Tvrtko)
v3: Use !!(v & DRM_I915_BIT()) for uapi macros instead of custom shifts (Tvrtko)
v4: factorize query item writting (Tvrtko)
tweak uapi struct/define names (Tvrtko)
v5: Replace ALIGN() macro (Chris)
Signed-off-by: Lionel Landwerlin <lionel.g.landwerlin at intel.com>
---
drivers/gpu/drm/i915/i915_query.c | 108 +++++++++++++++++++++++++++++++++++++-
include/uapi/drm/i915_drm.h | 54 +++++++++++++++++++
2 files changed, 161 insertions(+), 1 deletion(-)
diff --git a/drivers/gpu/drm/i915/i915_query.c b/drivers/gpu/drm/i915/i915_query.c
index 6468ca613d27..81367c8224ee 100644
--- a/drivers/gpu/drm/i915/i915_query.c
+++ b/drivers/gpu/drm/i915/i915_query.c
@@ -25,8 +25,100 @@
#include "i915_drv.h"
#include <uapi/drm/i915_drm.h>
+static int copy_query_data(struct drm_i915_query_item *query_item,
+ const void *item_ptr, u32 item_length,
+ const void *data_ptr, u32 data_length)
+{
+ u32 total_length = item_length + data_length;
+
+ if (query_item->length == 0)
+ return total_length;
+
+ if (query_item->length != total_length)
+ return -EINVAL;
+
+ if (copy_to_user(u64_to_user_ptr(query_item->data_ptr),
+ item_ptr, item_length))
+ return -EFAULT;
+
+ if (copy_to_user(u64_to_user_ptr(query_item->data_ptr + item_length),
+ data_ptr, data_length))
+ return -EFAULT;
+
+ return total_length;
+}
+
+static int query_slice_info(struct drm_i915_private *dev_priv,
+ struct drm_i915_query_item *query_item)
+{
+ const struct sseu_dev_info *sseu = &INTEL_INFO(dev_priv)->sseu;
+ struct drm_i915_query_slice_info slice_info;
+
+ if (sseu->max_slices == 0)
+ return -ENODEV;
+
+ /*
+ * If we ever change the internal slice mask data type, we'll need to
+ * update this function.
+ */
+ BUILD_BUG_ON(sizeof(u8) != sizeof(sseu->slice_mask));
+
+ memset(&slice_info, 0, sizeof(slice_info));
+ slice_info.max_slices = sseu->max_slices;
+
+ return copy_query_data(query_item, &slice_info, sizeof(slice_info),
+ &sseu->slice_mask, sizeof(sseu->slice_mask));
+}
+
+static int query_subslice_info(struct drm_i915_private *dev_priv,
+ struct drm_i915_query_item *query_item)
+{
+ const struct sseu_dev_info *sseu = &INTEL_INFO(dev_priv)->sseu;
+ struct drm_i915_query_subslice_info subslice_info;
+ u32 data_length;
+
+ if (sseu->max_slices == 0)
+ return -ENODEV;
+
+ memset(&subslice_info, 0, sizeof(subslice_info));
+ subslice_info.max_slices = sseu->max_slices;
+ subslice_info.max_subslices = sseu->max_subslices;
+
+ data_length = subslice_info.max_slices *
+ DIV_ROUND_UP(subslice_info.max_subslices,
+ sizeof(sseu->subslice_mask[0]) * BITS_PER_BYTE);
+
+ return copy_query_data(query_item,
+ &subslice_info, sizeof(subslice_info),
+ sseu->subslice_mask, data_length);
+}
+
+static int query_eu_info(struct drm_i915_private *dev_priv,
+ struct drm_i915_query_item *query_item)
+{
+ const struct sseu_dev_info *sseu = &INTEL_INFO(dev_priv)->sseu;
+ struct drm_i915_query_eu_info eu_info;
+ u32 data_length;
+
+ if (sseu->max_slices == 0)
+ return -ENODEV;
+
+ memset(&eu_info, 0, sizeof(eu_info));
+ eu_info.max_slices = sseu->max_slices;
+ eu_info.max_subslices = sseu->max_subslices;
+ eu_info.max_eus_per_subslice = sseu->max_eus_per_subslice;
+
+ data_length = eu_info.max_slices * eu_info.max_subslices *
+ DIV_ROUND_UP(eu_info.max_eus_per_subslice, BITS_PER_BYTE);
+
+ return copy_query_data(query_item,
+ &eu_info, sizeof(eu_info),
+ sseu->eu_mask, data_length);
+}
+
int i915_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_query *args = data;
struct drm_i915_query_item __user *user_item_ptr =
u64_to_user_ptr(args->items_ptr);
@@ -37,6 +129,7 @@ int i915_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
for (i = 0; i < args->num_items; i++, user_item_ptr++) {
struct drm_i915_query_item item;
+ int ret;
if (copy_from_user(&item, user_item_ptr, sizeof(item)))
return -EFAULT;
@@ -45,11 +138,24 @@ int i915_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
return -EINVAL;
switch (item.query_id) {
+ case DRM_I915_QUERY_SLICE_INFO:
+ ret = query_slice_info(dev_priv, &item);
+ break;
+ case DRM_I915_QUERY_SUBSLICE_INFO:
+ ret = query_subslice_info(dev_priv, &item);
+ break;
+ case DRM_I915_QUERY_EU_INFO:
+ ret = query_eu_info(dev_priv, &item);
+ break;
default:
return -EINVAL;
}
- if (put_user(item.length, &user_item_ptr->length))
+ if (ret < 0)
+ return ret;
+
+ /* Only write the length back to userspace if they differ. */
+ if (ret != item.length && put_user(ret, &user_item_ptr->length))
return -EFAULT;
}
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 07a2a35a4277..e3008824ad26 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -1618,6 +1618,9 @@ struct drm_i915_perf_oa_config {
struct drm_i915_query_item {
__u64 query_id;
+#define DRM_I915_QUERY_SLICE_INFO 0x01
+#define DRM_I915_QUERY_SUBSLICE_INFO 0x02
+#define DRM_I915_QUERY_EU_INFO 0x03
/*
* When set to zero by userspace, this is filled with the size of the
@@ -1645,6 +1648,57 @@ struct drm_i915_query {
__u64 items_ptr;
};
+#define DRM_I915_BIT(bit) ((__u32)1 << (bit))
+#define DRM_I915_DIV_ROUND_UP(val, div) (((val) + (div) - 1) / (div))
+
+/* Data written by the kernel with query DRM_I915_QUERY_ID_SLICES_INFO :
+ *
+ * data: each bit indicates whether a slice is available (1) or fused off (0).
+ * Use DRM_I915_QUERY_SLICE_AVAILABLE() to query a given slice's
+ * availability.
+ */
+struct drm_i915_query_slice_info {
+ __u32 max_slices;
+
+#define DRM_I915_QUERY_SLICE_AVAILABLE(info, slice) \
+ !!((info)->data[(slice) / 8] & DRM_I915_BIT((slice) % 8))
+ __u8 data[];
+};
+
+/* Data written by the kernel with query DRM_I915_QUERY_ID_SUBSLICES_INFO :
+ *
+ * data: each bit indicates whether a subslice is available (1) or fused off
+ * (0). Use DRM_I915_QUERY_SUBSLICE_AVAILABLE() to query a given
+ * subslice's availability.
+ */
+struct drm_i915_query_subslice_info {
+ __u32 max_slices;
+ __u32 max_subslices;
+
+#define DRM_I915_QUERY_SUBSLICE_AVAILABLE(info, slice, subslice) \
+ !!((info)->data[(slice) * DRM_I915_DIV_ROUND_UP((info)->max_subslices, 8) + \
+ (subslice) / 8] & DRM_I915_BIT((subslice) % 8))
+ __u8 data[];
+};
+
+/* Data written by the kernel with query DRM_I915_QUERY_ID_EUS_INFO :
+ *
+ * data: Each bit indicates whether a subslice is available (1) or fused off
+ * (0). Use DRM_I915_QUERY_EU_AVAILABLE() to query a given EU's
+ * availability.
+ */
+struct drm_i915_query_eu_info {
+ __u32 max_slices;
+ __u32 max_subslices;
+ __u32 max_eus_per_subslice;
+
+#define DRM_I915_QUERY_EU_AVAILABLE(info, slice, subslice, eu) \
+ !!((info)->data[(slice) * DRM_I915_DIV_ROUND_UP((info)->max_eus_per_subslice, 8) * (info)->max_subslices + \
+ (subslice) * DRM_I915_DIV_ROUND_UP((info)->max_eus_per_subslice, 8) + \
+ (eu) / 8] & DRM_I915_BIT((eu) % 8))
+ __u8 data[];
+};
+
#if defined(__cplusplus)
}
#endif
--
2.15.1
More information about the Intel-gfx
mailing list