[PATCH 2/3] drm/xe: Exec queue op's to enable/disable preemption and timeslicing
Umesh Nerlige Ramappa
umesh.nerlige.ramappa at intel.com
Tue Jun 25 21:57:15 UTC 2024
On Tue, Jun 25, 2024 at 01:15:17PM -0700, Ashutosh Dixit wrote:
>Introduce set_no_preempt and clear_no_preempt exec queue ops.
>set_no_preempt allows an exec queue to run uninterrupted (without
>preemption or timeslicing). clear_no_preempt re-enables preemption and
>timeslicing. The functionality is used in OA in the next patch.
>
>Cc: Matthew Brost <matthew.brost at intel.com>
>Signed-off-by: Ashutosh Dixit <ashutosh.dixit at intel.com>
>---
> drivers/gpu/drm/xe/xe_exec_queue_types.h | 4 +++
> drivers/gpu/drm/xe/xe_execlist.c | 14 ++++++++
> drivers/gpu/drm/xe/xe_guc_submit.c | 45 ++++++++++++++++++++++++
> 3 files changed, 63 insertions(+)
>
>diff --git a/drivers/gpu/drm/xe/xe_exec_queue_types.h b/drivers/gpu/drm/xe/xe_exec_queue_types.h
>index 201588ec33c3..8d64c2277e96 100644
>--- a/drivers/gpu/drm/xe/xe_exec_queue_types.h
>+++ b/drivers/gpu/drm/xe/xe_exec_queue_types.h
>@@ -164,6 +164,10 @@ struct xe_exec_queue_ops {
> int (*set_timeslice)(struct xe_exec_queue *q, u32 timeslice_us);
> /** @set_preempt_timeout: Set preemption timeout for exec queue */
> int (*set_preempt_timeout)(struct xe_exec_queue *q, u32 preempt_timeout_us);
>+ /** @set_no_preempt: Disable preemption and timeslicing for exec queue */
>+ int (*set_no_preempt)(struct xe_exec_queue *q);
>+ /** @clear_no_preempt: Re-enable preemption and timeslicing for exec queue */
>+ int (*clear_no_preempt)(struct xe_exec_queue *q);
> /**
> * @suspend: Suspend exec queue from executing, allowed to be called
> * multiple times in a row before resume with the caveat that
>diff --git a/drivers/gpu/drm/xe/xe_execlist.c b/drivers/gpu/drm/xe/xe_execlist.c
>index db906117db6d..04a2c7e86019 100644
>--- a/drivers/gpu/drm/xe/xe_execlist.c
>+++ b/drivers/gpu/drm/xe/xe_execlist.c
>@@ -416,6 +416,18 @@ static int execlist_exec_queue_set_preempt_timeout(struct xe_exec_queue *q,
> return 0;
> }
>
>+static int execlist_exec_queue_set_no_preempt(struct xe_exec_queue *q)
>+{
>+ /* NIY */
>+ return 0;
>+}
>+
>+static int execlist_exec_queue_clear_no_preempt(struct xe_exec_queue *q)
>+{
>+ /* NIY */
>+ return 0;
>+}
>+
> static int execlist_exec_queue_suspend(struct xe_exec_queue *q)
> {
> /* NIY */
>@@ -446,6 +458,8 @@ static const struct xe_exec_queue_ops execlist_exec_queue_ops = {
> .set_priority = execlist_exec_queue_set_priority,
> .set_timeslice = execlist_exec_queue_set_timeslice,
> .set_preempt_timeout = execlist_exec_queue_set_preempt_timeout,
>+ .set_no_preempt = execlist_exec_queue_set_no_preempt,
>+ .clear_no_preempt = execlist_exec_queue_clear_no_preempt,
> .suspend = execlist_exec_queue_suspend,
> .suspend_wait = execlist_exec_queue_suspend_wait,
> .resume = execlist_exec_queue_resume,
>diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
>index 373447758a60..3fd5523baf0f 100644
>--- a/drivers/gpu/drm/xe/xe_guc_submit.c
>+++ b/drivers/gpu/drm/xe/xe_guc_submit.c
>@@ -1548,6 +1548,49 @@ static int guc_exec_queue_set_preempt_timeout(struct xe_exec_queue *q,
> return 0;
> }
>
>+static int guc_exec_queue_set_no_preempt(struct xe_exec_queue *q)
>+{
>+ struct xe_sched_msg *msg;
>+
>+ if ((!q->sched_props.preempt_timeout_us && !q->sched_props.timeslice_us) ||
>+ exec_queue_killed_or_banned_or_wedged(q))
>+ return 0;
>+
>+ msg = kmalloc(sizeof(*msg), GFP_KERNEL);
>+ if (!msg)
>+ return -ENOMEM;
>+
>+ /* Setting values to 0 will disable preemption and timeslicing */
>+ q->sched_props.preempt_timeout_us = 0;
>+ q->sched_props.timeslice_us = 0;
>+
>+ guc_exec_queue_add_msg(q, msg, SET_SCHED_PROPS);
>+
>+ return 0;
>+}
>+
>+static int guc_exec_queue_clear_no_preempt(struct xe_exec_queue *q)
>+{
>+ struct xe_sched_msg *msg;
>+
>+ if ((q->sched_props.preempt_timeout_us ==
>+ q->hwe->eclass->sched_props.preempt_timeout_us &&
>+ q->sched_props.timeslice_us == q->hwe->eclass->sched_props.timeslice_us) ||
>+ exec_queue_killed_or_banned_or_wedged(q))
>+ return 0;
>+
>+ msg = kmalloc(sizeof(*msg), GFP_KERNEL);
>+ if (!msg)
>+ return -ENOMEM;
>+
>+ q->sched_props.preempt_timeout_us = q->hwe->eclass->sched_props.preempt_timeout_us;
>+ q->sched_props.timeslice_us = q->hwe->eclass->sched_props.timeslice_us;
>+
>+ guc_exec_queue_add_msg(q, msg, SET_SCHED_PROPS);
>+
>+ return 0;
>+}
Why not just use the .set_timeslice and .set_preempt_timeout hooks
instead of defining a new one to do both?
Also how do you check if this operation succeeeded? Is there a response
from GuC indicating success?
Thanks,
Umesh
>+
> static int guc_exec_queue_suspend(struct xe_exec_queue *q)
> {
> struct xe_sched_msg *msg = q->guc->static_msgs + STATIC_MSG_SUSPEND;
>@@ -1598,6 +1641,8 @@ static const struct xe_exec_queue_ops guc_exec_queue_ops = {
> .set_priority = guc_exec_queue_set_priority,
> .set_timeslice = guc_exec_queue_set_timeslice,
> .set_preempt_timeout = guc_exec_queue_set_preempt_timeout,
>+ .set_no_preempt = guc_exec_queue_set_no_preempt,
>+ .clear_no_preempt = guc_exec_queue_clear_no_preempt,
> .suspend = guc_exec_queue_suspend,
> .suspend_wait = guc_exec_queue_suspend_wait,
> .resume = guc_exec_queue_resume,
>--
>2.41.0
>
More information about the Intel-xe
mailing list