[PATCH] drm/xe: Separate out sched/deregister_done handling
Matthew Brost
matthew.brost at intel.com
Tue Mar 19 23:54:22 UTC 2024
On Tue, Mar 19, 2024 at 11:41:53AM -0700, Niranjana Vishwanathapura wrote:
> Abstract out the core part of sched_done and deregister_done handlers
> to separate functions to decouple them from any protocol error handling
> part and make them more readable.
>
> Signed-off-by: Niranjana Vishwanathapura <niranjana.vishwanathapura at intel.com>
Good clean up.
Reviewed-by: Matthew Brost <matthew.brost at intel.com>
> ---
> drivers/gpu/drm/xe/xe_guc_submit.c | 64 +++++++++++++++++-------------
> 1 file changed, 37 insertions(+), 27 deletions(-)
>
> diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
> index 82c955a2a15c..4a2b8e6b81b8 100644
> --- a/drivers/gpu/drm/xe/xe_guc_submit.c
> +++ b/drivers/gpu/drm/xe/xe_guc_submit.c
> @@ -1574,28 +1574,8 @@ static void deregister_exec_queue(struct xe_guc *guc, struct xe_exec_queue *q)
> xe_guc_ct_send_g2h_handler(&guc->ct, action, ARRAY_SIZE(action));
> }
>
> -int xe_guc_sched_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
> +static void handle_sched_done(struct xe_guc *guc, struct xe_exec_queue *q)
> {
> - struct xe_device *xe = guc_to_xe(guc);
> - struct xe_exec_queue *q;
> - u32 guc_id = msg[0];
> -
> - if (unlikely(len < 2)) {
> - drm_err(&xe->drm, "Invalid length %u", len);
> - return -EPROTO;
> - }
> -
> - q = g2h_exec_queue_lookup(guc, guc_id);
> - if (unlikely(!q))
> - return -EPROTO;
> -
> - if (unlikely(!exec_queue_pending_enable(q) &&
> - !exec_queue_pending_disable(q))) {
> - drm_err(&xe->drm, "Unexpected engine state 0x%04x",
> - atomic_read(&q->guc->state));
> - return -EPROTO;
> - }
> -
> trace_xe_exec_queue_scheduling_done(q);
>
> if (exec_queue_pending_enable(q)) {
> @@ -1615,17 +1595,15 @@ int xe_guc_sched_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
> deregister_exec_queue(guc, q);
> }
> }
> -
> - return 0;
> }
>
> -int xe_guc_deregister_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
> +int xe_guc_sched_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
> {
> struct xe_device *xe = guc_to_xe(guc);
> struct xe_exec_queue *q;
> u32 guc_id = msg[0];
>
> - if (unlikely(len < 1)) {
> + if (unlikely(len < 2)) {
> drm_err(&xe->drm, "Invalid length %u", len);
> return -EPROTO;
> }
> @@ -1634,13 +1612,20 @@ int xe_guc_deregister_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
> if (unlikely(!q))
> return -EPROTO;
>
> - if (!exec_queue_destroyed(q) || exec_queue_pending_disable(q) ||
> - exec_queue_pending_enable(q) || exec_queue_enabled(q)) {
> + if (unlikely(!exec_queue_pending_enable(q) &&
> + !exec_queue_pending_disable(q))) {
> drm_err(&xe->drm, "Unexpected engine state 0x%04x",
> atomic_read(&q->guc->state));
> return -EPROTO;
> }
>
> + handle_sched_done(guc, q);
> +
> + return 0;
> +}
> +
> +static void handle_deregister_done(struct xe_guc *guc, struct xe_exec_queue *q)
> +{
> trace_xe_exec_queue_deregister_done(q);
>
> clear_exec_queue_registered(q);
> @@ -1649,6 +1634,31 @@ int xe_guc_deregister_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
> xe_exec_queue_put(q);
> else
> __guc_exec_queue_fini(guc, q);
> +}
> +
> +int xe_guc_deregister_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
> +{
> + struct xe_device *xe = guc_to_xe(guc);
> + struct xe_exec_queue *q;
> + u32 guc_id = msg[0];
> +
> + if (unlikely(len < 1)) {
> + drm_err(&xe->drm, "Invalid length %u", len);
> + return -EPROTO;
> + }
> +
> + q = g2h_exec_queue_lookup(guc, guc_id);
> + if (unlikely(!q))
> + return -EPROTO;
> +
> + if (!exec_queue_destroyed(q) || exec_queue_pending_disable(q) ||
> + exec_queue_pending_enable(q) || exec_queue_enabled(q)) {
> + drm_err(&xe->drm, "Unexpected engine state 0x%04x",
> + atomic_read(&q->guc->state));
> + return -EPROTO;
> + }
> +
> + handle_deregister_done(guc, q);
>
> return 0;
> }
> --
> 2.43.0
>
More information about the Intel-xe
mailing list