[Intel-gfx] [PATCH 7/7] drm/i915: Engine queues query
Lionel Landwerlin
lionel.g.landwerlin at intel.com
Thu Apr 5 13:05:52 UTC 2018
Looks fine to me, I would just add some comments on the uAPI.
Otherwise :
Reviewed-by: Lionel Landwerlin <lionel.g.landwerlin at intel.com>
On 05/04/18 13:39, Tvrtko Ursulin wrote:
> From: Tvrtko Ursulin <tvrtko.ursulin at intel.com>
>
> As well as exposing active requests on engines via PMU, we can also export
> the current raw values (as tracked by i915 command submission) via a
> dedicated query.
>
> This is to satisfy customers who have userspace load balancing solutions
> implemented on top of their custom kernel patches.
>
> Userspace is now able to include DRM_I915_QUERY_ENGINE_QUEUES in their
> query list, pointing to initialized struct drm_i915_query_engine_queues
> entry. Fields describing engine class and instance userspace would like to
> know about need to be filled in, and i915 will fill in the rest.
>
> Multiple engines can be queried in one go by having multiple queries in
> the query list.
>
> v2:
> * Use EINVAL for reporting insufficient buffer space. (Chris Wilson)
>
> Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin at intel.com>
> Cc: Dmitry Rogozhkin <dmitry.v.rogozhkin at intel.com>
> ---
> drivers/gpu/drm/i915/i915_query.c | 43 +++++++++++++++++++++++++++++++++++++++
> include/uapi/drm/i915_drm.h | 26 +++++++++++++++++++++++
> 2 files changed, 69 insertions(+)
>
> diff --git a/drivers/gpu/drm/i915/i915_query.c b/drivers/gpu/drm/i915/i915_query.c
> index 3ace929dd90f..798672f5c104 100644
> --- a/drivers/gpu/drm/i915/i915_query.c
> +++ b/drivers/gpu/drm/i915/i915_query.c
> @@ -82,9 +82,52 @@ static int query_topology_info(struct drm_i915_private *dev_priv,
> return total_length;
> }
>
> +static int
> +query_engine_queues(struct drm_i915_private *i915,
> + struct drm_i915_query_item *query_item)
> +{
> + struct drm_i915_query_engine_queues __user *query_ptr =
> + u64_to_user_ptr(query_item->data_ptr);
> + struct drm_i915_query_engine_queues query;
> + struct intel_engine_cs *engine;
> + const int len = sizeof(query);
> + unsigned int i;
> +
> + if (query_item->flags)
> + return -EINVAL;
> +
> + if (!query_item->length)
> + return len;
> + else if (query_item->length < len)
> + return -EINVAL;
> +
> + if (copy_from_user(&query, query_ptr, len))
> + return -EFAULT;
> +
> + for (i = 0; i < ARRAY_SIZE(query.rsvd); i++) {
> + if (query.rsvd[i])
> + return -EINVAL;
> + }
> +
> + engine = intel_engine_lookup_user(i915, query.class, query.instance);
> + if (!engine)
> + return -ENOENT;
> +
> + query.queued = atomic_read(&engine->request_stats.queued);
> + query.runnable = engine->request_stats.runnable;
> + query.running = intel_engine_last_submit(engine) -
> + intel_engine_get_seqno(engine);
> +
> + if (copy_to_user(query_ptr, &query, len))
> + return -EFAULT;
> +
> + return len;
> +}
> +
> static int (* const i915_query_funcs[])(struct drm_i915_private *dev_priv,
> struct drm_i915_query_item *query_item) = {
> query_topology_info,
> + query_engine_queues,
> };
>
> int i915_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
> diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
> index 9a00c30e4071..064c3d620286 100644
> --- a/include/uapi/drm/i915_drm.h
> +++ b/include/uapi/drm/i915_drm.h
> @@ -1637,6 +1637,7 @@ struct drm_i915_perf_oa_config {
> struct drm_i915_query_item {
> __u64 query_id;
> #define DRM_I915_QUERY_TOPOLOGY_INFO 1
> +#define DRM_I915_QUERY_ENGINE_QUEUES 2
>
> /*
> * When set to zero by userspace, this is filled with the size of the
> @@ -1734,6 +1735,31 @@ struct drm_i915_query_topology_info {
> __u8 data[];
> };
>
> +/**
> + * struct drm_i915_query_engine_queues
> + *
> + * Engine queues query enables userspace to query current counts of active
> + * requests in their different states.
> + */
> +struct drm_i915_query_engine_queues {
> + /** Engine class as in enum drm_i915_gem_engine_class. */
> + __u16 class;
> +
> + /** Engine instance number. */
> + __u16 instance;
Probably want to add that the previous 2 fields are set by userspace.
> +
> + /** Number of requests with unresolved fences and dependencies. */
> + __u32 queued;
> +
> + /** Number of ready requests waiting on a slot on GPU. */
> + __u32 runnable;
> +
> + /** Number of requests executing on the GPU. */
> + __u32 running;
> +
> + __u32 rsvd[5];
Joonas made me add a comment for fields that are supposed to be cleared,
probably applies here too.
> +};
> +
> #if defined(__cplusplus)
> }
> #endif
More information about the Intel-gfx
mailing list