[PATCH] drm/xe: Refactor exec queue deregister
Matthew Brost
matthew.brost at intel.com
Fri Apr 18 05:07:05 UTC 2025
On Thu, Apr 17, 2025 at 06:06:29PM +0000, Stuart Summers wrote:
> No functional change here, just combine the two similar
> routines we currently have for submitting the context
> deregistration to GuC.
>
> Signed-off-by: Stuart Summers <stuart.summers at intel.com>
Reviewed-by: Matthew Brost <matthew.brost at intel.com>
> ---
> drivers/gpu/drm/xe/xe_guc_submit.c | 62 ++++++++++++------------------
> 1 file changed, 25 insertions(+), 37 deletions(-)
>
> diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
> index 813c3c0bb250..42c709729352 100644
> --- a/drivers/gpu/drm/xe/xe_guc_submit.c
> +++ b/drivers/gpu/drm/xe/xe_guc_submit.c
> @@ -568,6 +568,30 @@ static void register_exec_queue(struct xe_exec_queue *q)
> init_policies(guc, q);
> }
>
> +static void deregister_exec_queue(struct xe_guc *guc, struct xe_exec_queue *q)
> +{
> + u32 action[] = {
> + XE_GUC_ACTION_DEREGISTER_CONTEXT,
> + q->guc->id,
> + };
> +
> + xe_gt_assert(guc_to_gt(guc), exec_queue_registered(q));
> + xe_gt_assert(guc_to_gt(guc), !exec_queue_pending_enable(q));
> + xe_gt_assert(guc_to_gt(guc), !exec_queue_pending_disable(q));
> +
> + trace_xe_exec_queue_deregister(q);
> +
> + if (!exec_queue_destroyed(q)) {
> + set_exec_queue_destroyed(q);
> +
> + xe_guc_ct_send(&guc->ct, action, ARRAY_SIZE(action),
> + G2H_LEN_DW_DEREGISTER_CONTEXT, 1);
> + } else {
> + xe_guc_ct_send_g2h_handler(&guc->ct, action,
> + ARRAY_SIZE(action));
> + }
> +}
> +
> static u32 wq_space_until_wrap(struct xe_exec_queue *q)
> {
> return (WQ_SIZE - q->guc->wqi_tail);
> @@ -1026,25 +1050,6 @@ static void disable_scheduling(struct xe_exec_queue *q, bool immediate)
> G2H_LEN_DW_SCHED_CONTEXT_MODE_SET, 1);
> }
>
> -static void __deregister_exec_queue(struct xe_guc *guc, struct xe_exec_queue *q)
> -{
> - u32 action[] = {
> - XE_GUC_ACTION_DEREGISTER_CONTEXT,
> - q->guc->id,
> - };
> -
> - xe_gt_assert(guc_to_gt(guc), !exec_queue_destroyed(q));
> - xe_gt_assert(guc_to_gt(guc), exec_queue_registered(q));
> - xe_gt_assert(guc_to_gt(guc), !exec_queue_pending_enable(q));
> - xe_gt_assert(guc_to_gt(guc), !exec_queue_pending_disable(q));
> -
> - set_exec_queue_destroyed(q);
> - trace_xe_exec_queue_deregister(q);
> -
> - xe_guc_ct_send(&guc->ct, action, ARRAY_SIZE(action),
> - G2H_LEN_DW_DEREGISTER_CONTEXT, 1);
> -}
> -
> static enum drm_gpu_sched_stat
> guc_exec_queue_timedout_job(struct drm_sched_job *drm_job)
> {
> @@ -1213,7 +1218,7 @@ guc_exec_queue_timedout_job(struct drm_sched_job *drm_job)
> if (!wedged && exec_queue_registered(q) && !exec_queue_destroyed(q)) {
> set_exec_queue_extra_ref(q);
> xe_exec_queue_get(q);
> - __deregister_exec_queue(guc, q);
> + deregister_exec_queue(guc, q);
> }
>
> /* Stop fence signaling */
> @@ -1857,23 +1862,6 @@ g2h_exec_queue_lookup(struct xe_guc *guc, u32 guc_id)
> return q;
> }
>
> -static void deregister_exec_queue(struct xe_guc *guc, struct xe_exec_queue *q)
> -{
> - u32 action[] = {
> - XE_GUC_ACTION_DEREGISTER_CONTEXT,
> - q->guc->id,
> - };
> -
> - xe_gt_assert(guc_to_gt(guc), exec_queue_destroyed(q));
> - xe_gt_assert(guc_to_gt(guc), exec_queue_registered(q));
> - xe_gt_assert(guc_to_gt(guc), !exec_queue_pending_disable(q));
> - xe_gt_assert(guc_to_gt(guc), !exec_queue_pending_enable(q));
> -
> - trace_xe_exec_queue_deregister(q);
> -
> - xe_guc_ct_send_g2h_handler(&guc->ct, action, ARRAY_SIZE(action));
> -}
> -
> static void handle_sched_done(struct xe_guc *guc, struct xe_exec_queue *q,
> u32 runnable_state)
> {
> --
> 2.34.1
>
More information about the Intel-xe
mailing list