[PATCH] drm/xe: Add timeout to preempt fences
Niranjana Vishwanathapura
niranjana.vishwanathapura at intel.com
Tue Jun 25 05:35:10 UTC 2024
On Tue, Jun 25, 2024 at 05:21:28AM +0000, Matthew Brost wrote:
>On Mon, Jun 24, 2024 at 10:16:21PM -0700, Niranjana Vishwanathapura wrote:
>> On Mon, Jun 24, 2024 at 03:48:44PM -0700, Matthew Brost wrote:
>> > To adhere to dma fencing rules that fences must signal within a
>> > reasonable amount of time, add a 5 second timeout to preempt fences. If
>> > this timeout occurs, kill the associated VM as this fatal to the VM.
>> >
>> > Cc: Niranjana Vishwanathapura <niranjana.vishwanathapura at intel.com>
>> > Signed-off-by: Matthew Brost <matthew.brost at intel.com>
>> > ---
>> > drivers/gpu/drm/xe/xe_exec_queue_types.h | 6 ++--
>> > drivers/gpu/drm/xe/xe_execlist.c | 3 +-
>> > drivers/gpu/drm/xe/xe_guc_submit.c | 35 ++++++++++++++++++++----
>> > drivers/gpu/drm/xe/xe_preempt_fence.c | 14 +++++++++-
>> > drivers/gpu/drm/xe/xe_vm.c | 10 ++++++-
>> > drivers/gpu/drm/xe/xe_vm.h | 2 ++
>> > 6 files changed, 59 insertions(+), 11 deletions(-)
>> >
>> > diff --git a/drivers/gpu/drm/xe/xe_exec_queue_types.h b/drivers/gpu/drm/xe/xe_exec_queue_types.h
>> > index 201588ec33c3..1e51c978db7a 100644
>> > --- a/drivers/gpu/drm/xe/xe_exec_queue_types.h
>> > +++ b/drivers/gpu/drm/xe/xe_exec_queue_types.h
>> > @@ -172,9 +172,11 @@ struct xe_exec_queue_ops {
>> > int (*suspend)(struct xe_exec_queue *q);
>> > /**
>> > * @suspend_wait: Wait for an exec queue to suspend executing, should be
>> > - * call after suspend.
>> > + * call after suspend. In dma-fencing path thus must return within a
>> > + * reasonable amount of time. A non-zero return shall indicate an error
>> > + * waiting for suspend.
>> > */
>> > - void (*suspend_wait)(struct xe_exec_queue *q);
>> > + int (*suspend_wait)(struct xe_exec_queue *q);
>> > /**
>> > * @resume: Resume exec queue execution, exec queue must be in a suspended
>> > * state and dma fence returned from most recent suspend call must be
>> > diff --git a/drivers/gpu/drm/xe/xe_execlist.c b/drivers/gpu/drm/xe/xe_execlist.c
>> > index db906117db6d..7502e3486eaf 100644
>> > --- a/drivers/gpu/drm/xe/xe_execlist.c
>> > +++ b/drivers/gpu/drm/xe/xe_execlist.c
>> > @@ -422,10 +422,11 @@ static int execlist_exec_queue_suspend(struct xe_exec_queue *q)
>> > return 0;
>> > }
>> >
>> > -static void execlist_exec_queue_suspend_wait(struct xe_exec_queue *q)
>> > +static int execlist_exec_queue_suspend_wait(struct xe_exec_queue *q)
>> >
>> > {
>> > /* NIY */
>> > + return 0;
>> > }
>> >
>> > static void execlist_exec_queue_resume(struct xe_exec_queue *q)
>> > diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
>> > index 373447758a60..56e7a340696e 100644
>> > --- a/drivers/gpu/drm/xe/xe_guc_submit.c
>> > +++ b/drivers/gpu/drm/xe/xe_guc_submit.c
>> > @@ -1301,6 +1301,16 @@ static void __guc_exec_queue_process_msg_set_sched_props(struct xe_sched_msg *ms
>> > kfree(msg);
>> > }
>> >
>> > +static void __suspend_fence_signal(struct xe_exec_queue *q)
>> > +{
>> > + if (!q->guc->suspend_pending)
>> > + return;
>> > +
>> > + q->guc->suspend_pending = false;
>> > + smp_wmb();
>> > + wake_up(&q->guc->suspend_wait);
>> > +}
>> > +
>> > static void suspend_fence_signal(struct xe_exec_queue *q)
>> > {
>> > struct xe_guc *guc = exec_queue_to_guc(q);
>> > @@ -1310,9 +1320,7 @@ static void suspend_fence_signal(struct xe_exec_queue *q)
>> > guc_read_stopped(guc));
>> > xe_assert(xe, q->guc->suspend_pending);
>> >
>> > - q->guc->suspend_pending = false;
>> > - smp_wmb();
>> > - wake_up(&q->guc->suspend_wait);
>> > + __suspend_fence_signal(q);
>> > }
>> >
>> > static void __guc_exec_queue_process_msg_suspend(struct xe_sched_msg *msg)
>> > @@ -1465,6 +1473,7 @@ static void guc_exec_queue_kill(struct xe_exec_queue *q)
>> > {
>> > trace_xe_exec_queue_kill(q);
>> > set_exec_queue_killed(q);
>> > + __suspend_fence_signal(q);
>> > xe_guc_exec_queue_trigger_cleanup(q);
>> > }
>> >
>> > @@ -1561,12 +1570,26 @@ static int guc_exec_queue_suspend(struct xe_exec_queue *q)
>> > return 0;
>> > }
>> >
>> > -static void guc_exec_queue_suspend_wait(struct xe_exec_queue *q)
>> > +static int guc_exec_queue_suspend_wait(struct xe_exec_queue *q)
>> > {
>> > struct xe_guc *guc = exec_queue_to_guc(q);
>> > + int ret;
>> > +
>> > + ret = wait_event_timeout(q->guc->suspend_wait,
>> > + !q->guc->suspend_pending ||
>> > + exec_queue_killed(q) ||
>> > + guc_read_stopped(guc),
>> > + HZ * 5);
>>
>> Do we need exec_queue_killed(q) here as we are anyway checking
>> for '!q->guc->suspend_pending'?
>>
>
>Probably not? There might be a goofy race where suspend_pending is set
>after exec queue is killed though, I'd have to really think about this.
>For safety I'd rather keep it as is.
>
Seems fine to keep.
Reviewed-by: Niranjana Vishwanathapura <niranjana.vishwanathapura at intel.com>
>Matt
>
>> Other than that, the change looks fine to me.
>>
>> Niranjana
>>
>> >
>> > - wait_event(q->guc->suspend_wait, !q->guc->suspend_pending ||
>> > - guc_read_stopped(guc));
>> > + if (!ret) {
>> > + xe_gt_warn(guc_to_gt(guc),
>> > + "Suspend fence, guc_id=%d, failed to respond",
>> > + q->guc->id);
>> > + /* XXX: Trigger GT reset? */
>> > + return -ETIME;
>> > + }
>> > +
>> > + return 0;
>> > }
>> >
>> > static void guc_exec_queue_resume(struct xe_exec_queue *q)
>> > diff --git a/drivers/gpu/drm/xe/xe_preempt_fence.c b/drivers/gpu/drm/xe/xe_preempt_fence.c
>> > index e8b8ae5c6485..8356d9798206 100644
>> > --- a/drivers/gpu/drm/xe/xe_preempt_fence.c
>> > +++ b/drivers/gpu/drm/xe/xe_preempt_fence.c
>> > @@ -16,11 +16,23 @@ static void preempt_fence_work_func(struct work_struct *w)
>> > struct xe_preempt_fence *pfence =
>> > container_of(w, typeof(*pfence), preempt_work);
>> > struct xe_exec_queue *q = pfence->q;
>> > + int err = 0;
>> >
>> > if (pfence->error)
>> > dma_fence_set_error(&pfence->base, pfence->error);
>> > + else if (!q->ops->reset_status(q))
>> > + err = q->ops->suspend_wait(q);
>> > else
>> > - q->ops->suspend_wait(q);
>> > + dma_fence_set_error(&pfence->base, -ENOENT);
>> > +
>> > + if (err) {
>> > + dma_fence_set_error(&pfence->base, err);
>> > +
>> > + down_write(&q->vm->lock);
>> > + xe_vm_kill(q->vm, false);
>> > + up_write(&q->vm->lock);
>> > + }
>> > +
>> >
>> > dma_fence_signal(&pfence->base);
>> > /*
>> > diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
>> > index 5b166fa03684..6b8ff13f0aff 100644
>> > --- a/drivers/gpu/drm/xe/xe_vm.c
>> > +++ b/drivers/gpu/drm/xe/xe_vm.c
>> > @@ -311,7 +311,15 @@ int __xe_vm_userptr_needs_repin(struct xe_vm *vm)
>> >
>> > #define XE_VM_REBIND_RETRY_TIMEOUT_MS 1000
>> >
>> > -static void xe_vm_kill(struct xe_vm *vm, bool unlocked)
>> > +/**
>> > + * xe_vm_kill() - VM Kill
>> > + * @vm: The VM.
>> > + * @unlocked: Flag indicates the VM's dma-resv is not held
>> > + *
>> > + * Kill the VM by setting banned flag indicated VM is no longer available for
>> > + * use. If in preempt fence mode, also kill all exec queue unlocked with the VM.
>> > + */
>> > +void xe_vm_kill(struct xe_vm *vm, bool unlocked)
>> > {
>> > struct xe_exec_queue *q;
>> >
>> > diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h
>> > index b481608b12f1..c864dba35e1d 100644
>> > --- a/drivers/gpu/drm/xe/xe_vm.h
>> > +++ b/drivers/gpu/drm/xe/xe_vm.h
>> > @@ -259,6 +259,8 @@ static inline struct dma_resv *xe_vm_resv(struct xe_vm *vm)
>> > return drm_gpuvm_resv(&vm->gpuvm);
>> > }
>> >
>> > +void xe_vm_kill(struct xe_vm *vm, bool unlocked);
>> > +
>> > /**
>> > * xe_vm_assert_held(vm) - Assert that the vm's reservation object is held.
>> > * @vm: The vm
>> > --
>> > 2.34.1
>> >
More information about the Intel-xe
mailing list