[PATCH v3 7/7] drm/xe/vf: Post migration, repopulate ring area for pending request
Michał Winiarski
michal.winiarski at intel.com
Wed May 28 10:54:38 UTC 2025
On Tue, May 20, 2025 at 01:19:25AM +0200, Tomasz Lis wrote:
> The commands within ring area allocated for a request may contain
> references to GGTT. These references require update after VF
> migration, in order to continue any preempted LRCs, or jobs which
> were emitted to the ring but not sent to GuC yet.
>
> This change calls the emit function again for all such jobs,
> as part of post-migration recovery.
>
> v2: Moved few functions to better files
>
> Signed-off-by: Tomasz Lis <tomasz.lis at intel.com>
> Cc: Michal Wajdeczko <michal.wajdeczko at intel.com>
> ---
> drivers/gpu/drm/xe/xe_exec_queue.c | 17 +++++++++++++++++
> drivers/gpu/drm/xe/xe_exec_queue.h | 2 ++
> drivers/gpu/drm/xe/xe_guc_submit.c | 19 +++++++++++++++++++
> drivers/gpu/drm/xe/xe_guc_submit.h | 2 ++
> drivers/gpu/drm/xe/xe_sriov_vf.c | 13 ++++++++++++-
> 5 files changed, 52 insertions(+), 1 deletion(-)
>
> diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c
> index 9c3e568400e0..0488d80d5b99 100644
> --- a/drivers/gpu/drm/xe/xe_exec_queue.c
> +++ b/drivers/gpu/drm/xe/xe_exec_queue.c
> @@ -1056,3 +1056,20 @@ void xe_exec_queue_contexts_hwsp_rebase(struct xe_exec_queue *q)
> xe_lrc_update_hwctx_regs_with_address(q->lrc[i]);
> }
> }
> +
> +/**
> + * xe_exec_queue_jobs_ring_restore - Re-emit ring commands of requests pending on given queue.
> + * @q: the &xe_exec_queue struct instance
> + */
> +void xe_exec_queue_jobs_ring_restore(struct xe_exec_queue *q)
> +{
> + struct xe_gpu_scheduler *sched = &q->guc->sched;
> + struct xe_sched_job *job;
> +
> + list_for_each_entry(job, &sched->base.pending_list, drm.list) {
> + if (xe_sched_job_is_error(job))
> + continue;
> +
> + q->ring_ops->emit_job(job);
> + }
Shouldn't we take the lock that protects sched->base.pending_list?
I know we're under guc->submission_state_lock, but that doesn't protect
it, right?
Other than that - LGTM.
Thanks,
-Michał
> +}
> diff --git a/drivers/gpu/drm/xe/xe_exec_queue.h b/drivers/gpu/drm/xe/xe_exec_queue.h
> index 1d399a33c5c0..67c2baa42c0f 100644
> --- a/drivers/gpu/drm/xe/xe_exec_queue.h
> +++ b/drivers/gpu/drm/xe/xe_exec_queue.h
> @@ -92,4 +92,6 @@ void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q);
>
> void xe_exec_queue_contexts_hwsp_rebase(struct xe_exec_queue *q);
>
> +void xe_exec_queue_jobs_ring_restore(struct xe_exec_queue *q);
> +
> #endif
> diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
> index 990f3265c7ad..a60e0575cc56 100644
> --- a/drivers/gpu/drm/xe/xe_guc_submit.c
> +++ b/drivers/gpu/drm/xe/xe_guc_submit.c
> @@ -766,6 +766,25 @@ guc_exec_queue_run_job(struct drm_sched_job *drm_job)
> return fence;
> }
>
> +/**
> + * xe_guc_jobs_ring_rebase - Re-emit ring commands of requests pending
> + * on all queues under a guc.
> + * @guc: the &xe_guc struct instance
> + */
> +void xe_guc_jobs_ring_rebase(struct xe_guc *guc)
> +{
> + struct xe_exec_queue *q;
> + unsigned long index;
> +
> + mutex_lock(&guc->submission_state.lock);
> + xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) {
> + if (exec_queue_killed_or_banned_or_wedged(q))
> + continue;
> + xe_exec_queue_jobs_ring_restore(q);
> + }
> + mutex_unlock(&guc->submission_state.lock);
> +}
> +
> static void guc_exec_queue_free_job(struct drm_sched_job *drm_job)
> {
> struct xe_sched_job *job = to_xe_sched_job(drm_job);
> diff --git a/drivers/gpu/drm/xe/xe_guc_submit.h b/drivers/gpu/drm/xe/xe_guc_submit.h
> index 2cc44298465f..e31680a08dba 100644
> --- a/drivers/gpu/drm/xe/xe_guc_submit.h
> +++ b/drivers/gpu/drm/xe/xe_guc_submit.h
> @@ -33,6 +33,8 @@ int xe_guc_exec_queue_memory_cat_error_handler(struct xe_guc *guc, u32 *msg,
> int xe_guc_exec_queue_reset_failure_handler(struct xe_guc *guc, u32 *msg, u32 len);
> int xe_guc_error_capture_handler(struct xe_guc *guc, u32 *msg, u32 len);
>
> +void xe_guc_jobs_ring_rebase(struct xe_guc *guc);
> +
> struct xe_guc_submit_exec_queue_snapshot *
> xe_guc_exec_queue_snapshot_capture(struct xe_exec_queue *q);
> void
> diff --git a/drivers/gpu/drm/xe/xe_sriov_vf.c b/drivers/gpu/drm/xe/xe_sriov_vf.c
> index 0a9761b6ffb5..3e7eb365f2e9 100644
> --- a/drivers/gpu/drm/xe/xe_sriov_vf.c
> +++ b/drivers/gpu/drm/xe/xe_sriov_vf.c
> @@ -8,6 +8,7 @@
> #include "xe_assert.h"
> #include "xe_device.h"
> #include "xe_exec_queue_types.h"
> +#include "xe_guc_exec_queue_types.h"
> #include "xe_gt.h"
> #include "xe_gt_sriov_printk.h"
> #include "xe_gt_sriov_vf.h"
> @@ -16,6 +17,7 @@
> #include "xe_irq.h"
> #include "xe_lrc.h"
> #include "xe_pm.h"
> +#include "xe_sched_job_types.h"
> #include "xe_sriov.h"
> #include "xe_sriov_printk.h"
> #include "xe_sriov_vf.h"
> @@ -245,6 +247,15 @@ static void vf_post_migration_fixup_contexts(struct xe_device *xe)
> }
> }
>
> +static void vf_post_migration_fixup_jobs(struct xe_device *xe)
> +{
> + struct xe_gt *gt;
> + unsigned int id;
> +
> + for_each_gt(gt, xe, id)
> + xe_guc_jobs_ring_rebase(>->uc.guc);
> +}
> +
> static void vf_post_migration_fixup_ctb(struct xe_device *xe)
> {
> struct xe_gt *gt;
> @@ -327,7 +338,7 @@ static void vf_post_migration_recovery(struct xe_device *xe)
> need_fixups = vf_post_migration_fixup_ggtt_nodes(xe);
> if (need_fixups) {
> vf_post_migration_fixup_contexts(xe);
> - /* FIXME: add the recovery steps */
> + vf_post_migration_fixup_jobs(xe);
> vf_post_migration_fixup_ctb(xe);
> }
>
> --
> 2.25.1
>
More information about the Intel-xe
mailing list