[PATCH] drm/xe/oa/uapi: Expose an unblock after N reports OA property
Cavitt, Jonathan
jonathan.cavitt at intel.com
Thu Dec 12 22:59:35 UTC 2024
-----Original Message-----
From: Intel-xe <intel-xe-bounces at lists.freedesktop.org> On Behalf Of Ashutosh Dixit
Sent: Thursday, December 12, 2024 2:49 PM
To: intel-xe at lists.freedesktop.org
Cc: Nerlige Ramappa, Umesh <umesh.nerlige.ramappa at intel.com>
Subject: [PATCH] drm/xe/oa/uapi: Expose an unblock after N reports OA property
>
> Expose an "unblock after N reports" OA property, to allow userspace threads
> to be woken up less frequently.
>
> Co-developed-by: Umesh Nerlige Ramappa <umesh.nerlige.ramappa at intel.com>
> Signed-off-by: Ashutosh Dixit <ashutosh.dixit at intel.com>
> ---
> drivers/gpu/drm/xe/xe_oa.c | 30 ++++++++++++++++++++++++++----
> drivers/gpu/drm/xe/xe_oa_types.h | 3 +++
> drivers/gpu/drm/xe/xe_query.c | 2 +-
> include/uapi/drm/xe_drm.h | 7 +++++++
> 4 files changed, 37 insertions(+), 5 deletions(-)
>
> diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c
> index ec88b18e9baa2..56bf375a9d4bc 100644
> --- a/drivers/gpu/drm/xe/xe_oa.c
> +++ b/drivers/gpu/drm/xe/xe_oa.c
> @@ -97,6 +97,7 @@ struct xe_oa_open_param {
> int num_syncs;
> struct xe_sync_entry *syncs;
> size_t oa_buffer_size;
> + int wait_num_reports;
> };
>
> struct xe_oa_config_bo {
> @@ -241,11 +242,10 @@ static void oa_timestamp_clear(struct xe_oa_stream *stream, u32 *report)
> static bool xe_oa_buffer_check_unlocked(struct xe_oa_stream *stream)
> {
> u32 gtt_offset = xe_bo_ggtt_addr(stream->oa_buffer.bo);
> + u32 tail, hw_tail, partial_report_size, available;
> int report_size = stream->oa_buffer.format->size;
> - u32 tail, hw_tail;
> unsigned long flags;
> bool pollin;
> - u32 partial_report_size;
>
> spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
>
> @@ -289,8 +289,8 @@ static bool xe_oa_buffer_check_unlocked(struct xe_oa_stream *stream)
>
> stream->oa_buffer.tail = tail;
>
> - pollin = xe_oa_circ_diff(stream, stream->oa_buffer.tail,
> - stream->oa_buffer.head) >= report_size;
> + available = xe_oa_circ_diff(stream, stream->oa_buffer.tail, stream->oa_buffer.head);
> + pollin = available >= stream->wait_num_reports * report_size;
>
> spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
>
> @@ -1285,6 +1285,17 @@ static int xe_oa_set_prop_oa_buffer_size(struct xe_oa *oa, u64 value,
> return 0;
> }
>
> +static int xe_oa_set_prop_wait_num_reports(struct xe_oa *oa, u64 value,
> + struct xe_oa_open_param *param)
> +{
> + if (!value) {
> + drm_dbg(&oa->xe->drm, "wait_num_reports %llu\n", value);
> + return -EINVAL;
> + }
> + param->wait_num_reports = value;
> + return 0;
> +}
> +
> static int xe_oa_set_prop_ret_inval(struct xe_oa *oa, u64 value,
> struct xe_oa_open_param *param)
> {
> @@ -1306,6 +1317,7 @@ static const xe_oa_set_property_fn xe_oa_set_property_funcs_open[] = {
> [DRM_XE_OA_PROPERTY_NUM_SYNCS] = xe_oa_set_prop_num_syncs,
> [DRM_XE_OA_PROPERTY_SYNCS] = xe_oa_set_prop_syncs_user,
> [DRM_XE_OA_PROPERTY_OA_BUFFER_SIZE] = xe_oa_set_prop_oa_buffer_size,
> + [DRM_XE_OA_PROPERTY_WAIT_NUM_REPORTS] = xe_oa_set_prop_wait_num_reports,
> };
>
> static const xe_oa_set_property_fn xe_oa_set_property_funcs_config[] = {
> @@ -1321,6 +1333,7 @@ static const xe_oa_set_property_fn xe_oa_set_property_funcs_config[] = {
> [DRM_XE_OA_PROPERTY_NUM_SYNCS] = xe_oa_set_prop_num_syncs,
> [DRM_XE_OA_PROPERTY_SYNCS] = xe_oa_set_prop_syncs_user,
> [DRM_XE_OA_PROPERTY_OA_BUFFER_SIZE] = xe_oa_set_prop_ret_inval,
> + [DRM_XE_OA_PROPERTY_WAIT_NUM_REPORTS] = xe_oa_set_prop_ret_inval,
> };
>
> static int xe_oa_user_ext_set_property(struct xe_oa *oa, enum xe_oa_user_extn_from from,
> @@ -1797,6 +1810,7 @@ static int xe_oa_stream_init(struct xe_oa_stream *stream,
> stream->periodic = param->period_exponent > 0;
> stream->period_exponent = param->period_exponent;
> stream->no_preempt = param->no_preempt;
> + stream->wait_num_reports = param->wait_num_reports;
>
> stream->xef = xe_file_get(param->xef);
> stream->num_syncs = param->num_syncs;
> @@ -2156,6 +2170,14 @@ int xe_oa_stream_open_ioctl(struct drm_device *dev, u64 data, struct drm_file *f
> if (!param.oa_buffer_size)
> param.oa_buffer_size = DEFAULT_XE_OA_BUFFER_SIZE;
>
> + if (!param.wait_num_reports)
> + param.wait_num_reports = 1;
> + if (param.wait_num_reports > param.oa_buffer_size / f->size) {
> + drm_dbg(&oa->xe->drm, "wait_num_reports %d\n", param.wait_num_reports);
> + ret = -EINVAL;
> + goto err_exec_q;
> + }
> +
> ret = xe_oa_parse_syncs(oa, ¶m);
> if (ret)
> goto err_exec_q;
> diff --git a/drivers/gpu/drm/xe/xe_oa_types.h b/drivers/gpu/drm/xe/xe_oa_types.h
> index df77939156288..2dcd3b9562e97 100644
> --- a/drivers/gpu/drm/xe/xe_oa_types.h
> +++ b/drivers/gpu/drm/xe/xe_oa_types.h
> @@ -218,6 +218,9 @@ struct xe_oa_stream {
> /** @pollin: Whether there is data available to read */
> bool pollin;
>
> + /** @wait_num_reports: Number of reports to wait for before signalling pollin */
> + int wait_num_reports;
> +
> /** @periodic: Whether periodic sampling is currently enabled */
> bool periodic;
>
> diff --git a/drivers/gpu/drm/xe/xe_query.c b/drivers/gpu/drm/xe/xe_query.c
> index d2a816f71bf26..226b234d07110 100644
> --- a/drivers/gpu/drm/xe/xe_query.c
> +++ b/drivers/gpu/drm/xe/xe_query.c
> @@ -672,7 +672,7 @@ static int query_oa_units(struct xe_device *xe,
> du->oa_unit_type = u->type;
> du->oa_timestamp_freq = xe_oa_timestamp_frequency(gt);
> du->capabilities = DRM_XE_OA_CAPS_BASE | DRM_XE_OA_CAPS_SYNCS |
> - DRM_XE_OA_CAPS_OA_BUFFER_SIZE;
> + DRM_XE_OA_CAPS_OA_BUFFER_SIZE | DRM_XE_OA_CAPS_WAIT_NUM_REPORTS;
This should probably be kept inline:
"""
du->capabilities = DRM_XE_OA_CAPS_BASE | DRM_XE_OA_CAPS_SYNCS |
DRM_XE_OA_CAPS_OA_BUFFER_SIZE |
DRM_XE_OA_CAPS_WAIT_NUM_REPORTS;
"""
But otherwise:
Reviewed-by: Jonathan Cavitt <jonathan.cavitt at intel.com>
-Jonathan Cavitt
>
> j = 0;
> for_each_hw_engine(hwe, gt, hwe_id) {
> diff --git a/include/uapi/drm/xe_drm.h b/include/uapi/drm/xe_drm.h
> index 0383b52cbd869..f62689ca861a4 100644
> --- a/include/uapi/drm/xe_drm.h
> +++ b/include/uapi/drm/xe_drm.h
> @@ -1487,6 +1487,7 @@ struct drm_xe_oa_unit {
> #define DRM_XE_OA_CAPS_BASE (1 << 0)
> #define DRM_XE_OA_CAPS_SYNCS (1 << 1)
> #define DRM_XE_OA_CAPS_OA_BUFFER_SIZE (1 << 2)
> +#define DRM_XE_OA_CAPS_WAIT_NUM_REPORTS (1 << 3)
>
> /** @oa_timestamp_freq: OA timestamp freq */
> __u64 oa_timestamp_freq;
> @@ -1660,6 +1661,12 @@ enum drm_xe_oa_property_id {
> * buffer is allocated by default.
> */
> DRM_XE_OA_PROPERTY_OA_BUFFER_SIZE,
> +
> + /**
> + * @DRM_XE_OA_PROPERTY_WAIT_NUM_REPORTS: Number of reports to wait
> + * for before unblocking poll or read
> + */
> + DRM_XE_OA_PROPERTY_WAIT_NUM_REPORTS,
> };
>
> /**
> --
> 2.47.1
>
>
More information about the Intel-xe
mailing list