[Intel-gfx] [PATCH v2 6/6] drm/i915: expose rcs topology through query uAPI
Tvrtko Ursulin
tvrtko.ursulin at linux.intel.com
Fri Jan 12 12:27:58 UTC 2018
On 11/01/2018 19:53, Lionel Landwerlin wrote:
> With the introduction of asymmetric slices in CNL, we cannot rely on
> the previous SUBSLICE_MASK getparam to tell userspace what subslices
> are available. Here we introduce a more detailed way of querying the
> Gen's GPU topology that doesn't aggregate numbers.
>
> This is essential for monitoring parts of the GPU with the OA unit,
> because counters need to be normalized to the number of
> EUs/subslices/slices. The current aggregated numbers like EU_TOTAL do
> not gives us sufficient information.
>
> As a bonus we can draw representations of the GPU :
>
> https://imgur.com/a/vuqpa
>
> v2: Rename uapi struct s/_mask/_info/ (Tvrtko)
> Report max_slice/subslice/eus_per_subslice rather than strides (Tvrtko)
> Add uapi macros to read data from *_info structs (Tvrtko)
>
> Signed-off-by: Lionel Landwerlin <lionel.g.landwerlin at intel.com>
> ---
> drivers/gpu/drm/i915/i915_query.c | 133 ++++++++++++++++++++++++++++++++
> drivers/gpu/drm/i915/intel_query_info.c | 88 +++++++++++++++++++++
> include/uapi/drm/i915_drm.h | 51 ++++++++++++
> 3 files changed, 272 insertions(+)
> create mode 100644 drivers/gpu/drm/i915/intel_query_info.c
>
> diff --git a/drivers/gpu/drm/i915/i915_query.c b/drivers/gpu/drm/i915/i915_query.c
> index 5694cfea4553..1d9f5a15323c 100644
> --- a/drivers/gpu/drm/i915/i915_query.c
> +++ b/drivers/gpu/drm/i915/i915_query.c
> @@ -25,8 +25,128 @@
> #include "i915_drv.h"
> #include <uapi/drm/i915_drm.h>
>
> +static int query_slices_info(struct drm_i915_private *dev_priv,
> + struct drm_i915_query_item *query_item)
> +{
> + const struct sseu_dev_info *sseu = &INTEL_INFO(dev_priv)->sseu;
> + struct drm_i915_query_slices_info slices_info;
> + u32 data_length, length;
> +
> + if (sseu->max_slices == 0)
> + return -ENODEV;
> +
> + data_length = sizeof(u8);
sizeof(sseu->slice_mask) ?
> + length = sizeof(slices_info) + data_length;
> +
> + /*
> + * If we ever change the internal slice mask data type, we'll need to
> + * update this function.
> + */
> + BUILD_BUG_ON(sizeof(u8) != sizeof(sseu->slice_mask));
> +
> + if (query_item->length == 0) {
> + query_item->length = length;
> + return 0;
> + }
> +
> + if (query_item->length != length)
> + return -EINVAL;
> +
> + memset(&slices_info, 0, sizeof(slices_info));
> + slices_info.max_slices = sseu->max_slices;
> +
> + if (copy_to_user(u64_to_user_ptr(query_item->data_ptr), &slices_info,
> + sizeof(slices_info)))
> + return -EFAULT;
> +
> + if (copy_to_user(u64_to_user_ptr(query_item->data_ptr +
> + offsetof(struct drm_i915_query_slices_info, data)),
> + &sseu->slice_mask, data_length))
> + return -EFAULT;
> +
> + return 0;
> +}
> +
> +static int query_subslices_info(struct drm_i915_private *dev_priv,
> + struct drm_i915_query_item *query_item)
> +{
> + const struct sseu_dev_info *sseu = &INTEL_INFO(dev_priv)->sseu;
> + struct drm_i915_query_subslices_info subslices_info;
> + u32 data_length, length;
> +
> + if (sseu->max_slices == 0)
> + return -ENODEV;
> +
> + memset(&subslices_info, 0, sizeof(subslices_info));
> + subslices_info.max_slices = sseu->max_slices;
> + subslices_info.max_subslices = sseu->max_subslices;
> +
> + data_length = subslices_info.max_slices *
> + DIV_ROUND_UP(subslices_info.max_subslices, BITS_PER_BYTE);
s/BITS_PER_BYTE/sizeof(sseu->subslice_mask[0]) * BITS_PER_BYTE/ ?
> + length = sizeof(subslices_info) + data_length;
> +
> + if (query_item->length == 0) {
> + query_item->length = length;
> + return 0;
> + }
> +
> + if (query_item->length != length)
> + return -EINVAL;
> +
> + if (copy_to_user(u64_to_user_ptr(query_item->data_ptr), &subslices_info,
> + sizeof(subslices_info)))
> + return -EFAULT;
> +
> + if (copy_to_user(u64_to_user_ptr(query_item->data_ptr +
> + offsetof(struct drm_i915_query_subslices_info, data)),
> + sseu->subslice_mask, data_length))
> + return -EFAULT;
> +
> + return 0;
> +}
> +
> +static int query_eus_info(struct drm_i915_private *dev_priv,
> + struct drm_i915_query_item *query_item)
> +{
> + const struct sseu_dev_info *sseu = &INTEL_INFO(dev_priv)->sseu;
> + struct drm_i915_query_eus_info eus_info;
> + u32 data_length, length;
> +
> + if (sseu->max_slices == 0)
> + return -ENODEV;
> +
> + memset(&eus_info, 0, sizeof(eus_info));
> + eus_info.max_slices = sseu->max_slices;
> + eus_info.max_subslices = sseu->max_subslices;
> + eus_info.max_eus_per_subslice = sseu->max_eus_per_subslice;
> +
> + data_length = eus_info.max_slices * eus_info.max_subslices *
> + DIV_ROUND_UP(eus_info.max_eus_per_subslice, BITS_PER_BYTE);
> + length = sizeof(eus_info) + data_length;
> +
> + if (query_item->length == 0) {
> + query_item->length = length;
> + return 0;
> + }
> +
> + if (query_item->length != length)
> + return -EINVAL;
> +
> + if (copy_to_user(u64_to_user_ptr(query_item->data_ptr), &eus_info,
> + sizeof(eus_info)))
> + return -EFAULT;
> +
> + if (copy_to_user(u64_to_user_ptr(query_item->data_ptr +
> + offsetof(struct drm_i915_query_eus_info, data)),
> + sseu->eu_mask, data_length))
> + return -EFAULT;
> +
> + return 0;
> +}
> +
> int i915_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
> {
> + struct drm_i915_private *dev_priv = to_i915(dev);
> struct drm_i915_query *args = data;
> struct drm_i915_query_item __user *user_item_ptr =
> u64_to_user_ptr(args->items_ptr);
> @@ -34,15 +154,28 @@ int i915_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
>
> for (i = 0; i < args->num_items; i++, user_item_ptr++) {
> struct drm_i915_query_item item;
> + int ret;
>
> if (copy_from_user(&item, user_item_ptr, sizeof(item)))
> return -EFAULT;
>
> switch (item.query_id) {
> + case DRM_I915_QUERY_ID_SLICES_INFO:
> + ret = query_slices_info(dev_priv, &item);
> + break;
> + case DRM_I915_QUERY_ID_SUBSLICES_INFO:
> + ret = query_subslices_info(dev_priv, &item);
> + break;
> + case DRM_I915_QUERY_ID_EUS_INFO:
> + ret = query_eus_info(dev_priv, &item);
> + break;
> default:
> return -EINVAL;
> }
>
> + if (ret)
> + return ret;
> +
> if (copy_to_user(user_item_ptr, &item, sizeof(item)))
> return -EFAULT;
> }
> diff --git a/drivers/gpu/drm/i915/intel_query_info.c b/drivers/gpu/drm/i915/intel_query_info.c
> new file mode 100644
> index 000000000000..79b03be9f51a
> --- /dev/null
> +++ b/drivers/gpu/drm/i915/intel_query_info.c
Ooops!
> @@ -0,0 +1,88 @@
> +/*
> + * Copyright © 2017 Intel Corporation
> + *
> + * Permission is hereby granted, free of charge, to any person obtaining a
> + * copy of this software and associated documentation files (the "Software"),
> + * to deal in the Software without restriction, including without limitation
> + * the rights to use, copy, modify, merge, publish, distribute, sublicense,
> + * and/or sell copies of the Software, and to permit persons to whom the
> + * Software is furnished to do so, subject to the following conditions:
> + *
> + * The above copyright notice and this permission notice (including the next
> + * paragraph) shall be included in all copies or substantial portions of the
> + * Software.
> + *
> + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
> + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
> + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
> + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
> + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
> + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
> + * IN THE SOFTWARE.
> + *
> + */
> +
> +#include "i915_drv.h"
> +#include <uapi/drm/i915_drm.h>
> +
> +static int query_info_rcs_topology(struct drm_i915_private *dev_priv,
> + struct drm_i915_query_info *args)
> +{
> + const struct sseu_dev_info *sseu = &INTEL_INFO(dev_priv)->sseu;
> + struct drm_i915_rcs_topology_info __user *user_topology =
> + u64_to_user_ptr(args->info_ptr);
> + struct drm_i915_rcs_topology_info topology;
> + u32 data_size, total_size;
> + const u8 *data = NULL;
> + int ret;
> +
> + /* Not supported on gen < 8. */
> + if (sseu->max_slices == 0)
> + return -ENODEV;
> +
> + switch (args->query_params[0]) {
> + case I915_RCS_TOPOLOGY_SLICE:
> + topology.params[0] = sseu->max_slices;
> + data_size = sizeof(sseu->slice_mask);
> + data = &sseu->slice_mask;
> + break;
> +
> + case I915_RCS_TOPOLOGY_SUBSLICE:
> + topology.params[0] = sseu->max_slices;
> + topology.params[1] = ALIGN(sseu->max_subslices, 8) / 8;
> + data_size = sseu->max_slices * topology.params[1];
> + data = sseu->subslices_mask;
> + break;
> +
> + case I915_RCS_TOPOLOGY_EU:
> + topology.params[2] = ALIGN(sseu->max_eus_per_subslice, 8) / 8;
> + topology.params[1] = sseu->max_subslices * topology.params[2];
> + topology.params[0] = sseu->max_slices;
> + data_size = sseu->max_slices * topology.params[1];
> + data = sseu->eu_mask;
> + break;
> +
> + default:
> + return -EINVAL;
> + }
> +
> + total_size = sizeof(topology) + data_size;
> +
> + if (args->info_ptr_len == 0) {
> + args->info_ptr_len = total_size;
> + return 0;
> + }
> +
> + if (args->info_ptr_len < total_size)
> + return -EINVAL;
> +
> + ret = copy_to_user(user_topology, &topology, sizeof(topology));
> + if (ret)
> + return -EFAULT;
> +
> + ret = copy_to_user(user_topology + 1, data, data_size);
> + if (ret)
> + return -EFAULT;
> +
> + return 0;
> +}
> diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
> index 39e93f10f2cd..5a8e2f7d5dc3 100644
> --- a/include/uapi/drm/i915_drm.h
> +++ b/include/uapi/drm/i915_drm.h
> @@ -1618,6 +1618,9 @@ struct drm_i915_perf_oa_config {
>
> struct drm_i915_query_item {
> __u64 query_id;
> +#define DRM_I915_QUERY_ID_SLICES_INFO 0x01
> +#define DRM_I915_QUERY_ID_SUBSLICES_INFO 0x02
> +#define DRM_I915_QUERY_ID_EUS_INFO 0x03
>
> /*
> * When set to zero by userspace, this is filled with the size of the
> @@ -1644,6 +1647,54 @@ struct drm_i915_query {
> __u64 items_ptr;
> };
>
> +/* Data written by the kernel with query DRM_I915_QUERY_ID_SLICES_INFO :
> + *
> + * data: each bit indicates whether a slice is available (1) or fused off (0).
> + * Use DRM_I915_QUERY_SLICE_AVAILABLE() to query a given slice's
> + * availability.
> + */
> +struct drm_i915_query_slices_info {
> + __u32 max_slices;
> +
> +#define DRM_I915_QUERY_SLICE_AVAILABLE(info, slice) \
> + (((info)->data[(slice) / 8] >> ((slice) % 8)) & 1)
Alternatively, (data[slice / 8] & BIT(slice % 8)), but it is probably a
personal preference as what is more readable.
> + __u8 data[];
> +};
> +
> +/* Data written by the kernel with query DRM_I915_QUERY_ID_SUBSLICES_INFO :
> + *
> + * data: each bit indicates whether a subslice is available (1) or fused off
> + * (0). Use DRM_I915_QUERY_SUBSLICE_AVAILABLE() to query a given
> + * subslice's availability.
> + */
> +struct drm_i915_query_subslices_info {
> + __u32 max_slices;
> + __u32 max_subslices;
> +
> +#define DRM_I915_QUERY_SUBSLICE_AVAILABLE(info, slice, subslice) \
> + (((info)->data[(slice) * ALIGN((info)->max_subslices, 8) / 8 + \
> + (subslice) / 8] >> ((subslice) % 8)) & 1)
> + __u8 data[];
> +};
> +
> +/* Data written by the kernel with query DRM_I915_QUERY_ID_EUS_INFO :
> + *
> + * data: Each bit indicates whether a subslice is available (1) or fused off
> + * (0). Use DRM_I915_QUERY_EU_AVAILABLE() to query a given EU's
> + * availability.
> + */
> +struct drm_i915_query_eus_info {
> + __u32 max_slices;
> + __u32 max_subslices;
> + __u32 max_eus_per_subslice;
> +
> +#define DRM_I915_QUERY_EU_AVAILABLE(info, slice, subslice, eu) \
> + (((info)->data[(slice) * ALIGN((info)->max_eus_per_subslice, 8) / 8 * (info)->max_subslices + \
> + (subslice) * ALIGN((info)->max_eus_per_subslice, 8) / 8 + \
> + (eu) / 8] >> ((eu) % 8)) & 1)
> + __u8 data[];
> +};
To many hardcoded eights in all of the above. But not sure what to
suggests. If someone says to lose the zero-length array or not it will
be slightly different.
Do you want to prefix the macros with !! so they return 0/1?
Is the ALIGN(x, 8) / 8 pattern equal to DIV_ROUND_UP(x, 8)? I think I
commented about that already somewhere.
> +
> #if defined(__cplusplus)
> }
> #endif
>
Regards,
Tvrtko
More information about the Intel-gfx
mailing list