[PATCH v1 7/7] drm/xe/vf: Post migration, repopulate ring area for pending request
Lis, Tomasz
tomasz.lis at intel.com
Thu May 15 22:08:23 UTC 2025
On 14.05.2025 20:49, Michal Wajdeczko wrote:
>
> On 14.05.2025 00:49, Tomasz Lis wrote:
>> The commands within ring area allocated for a request may contain
>> references to GGTT. These references require update after VF
>> migration, in order to continue any preempted LRCs, or jobs which
>> were emitted to the ring but not sent to GuC yet.
>>
>> This change calls the emit function again for all such jobs,
>> as part of post-migration recovery.
>>
>> Signed-off-by: Tomasz Lis <tomasz.lis at intel.com>
>> ---
>> drivers/gpu/drm/xe/xe_guc_submit.c | 20 ++++++++++++++++++++
>> drivers/gpu/drm/xe/xe_guc_submit.h | 2 ++
>> drivers/gpu/drm/xe/xe_sriov_vf.c | 23 +++++++++++++++++++++++
>> 3 files changed, 45 insertions(+)
>>
>> diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
>> index c485272829a6..238b6691d575 100644
>> --- a/drivers/gpu/drm/xe/xe_guc_submit.c
>> +++ b/drivers/gpu/drm/xe/xe_guc_submit.c
>> @@ -766,6 +766,26 @@ guc_exec_queue_run_job(struct drm_sched_job *drm_job)
>> return fence;
>> }
>>
>> +/**
>> + * xe_exec_queue_jobs_ring_restore - Re-emit ring commands of requests pending on given queue.
>> + * @eq: the &xe_exec_queue struct instance
>> + */
>> +void xe_exec_queue_jobs_ring_restore(struct xe_exec_queue *eq)
> are you sure this function shouldn't be placed in xe_exec_queue.c ?
will move.
>> +{
>> + struct xe_gpu_scheduler *sched = &eq->guc->sched;
>> + struct xe_sched_job *job;
>> +
>> + if (exec_queue_killed_or_banned_or_wedged(eq))
> this condition likely can be checked by the caller (in xe_guc_submit.c)
since keeping it in the moved function would require adding #includes,
yes this makes sense.
>> + return;
>> +
>> + list_for_each_entry(job, &sched->base.pending_list, drm.list) {
>> + if (xe_sched_job_is_error(job))
>> + continue;
>> +
>> + eq->ring_ops->emit_job(job);
>> + }
>> +}
>> +
>> static void guc_exec_queue_free_job(struct drm_sched_job *drm_job)
>> {
>> struct xe_sched_job *job = to_xe_sched_job(drm_job);
>> diff --git a/drivers/gpu/drm/xe/xe_guc_submit.h b/drivers/gpu/drm/xe/xe_guc_submit.h
>> index 2c2d2936440d..55398e292b79 100644
>> --- a/drivers/gpu/drm/xe/xe_guc_submit.h
>> +++ b/drivers/gpu/drm/xe/xe_guc_submit.h
>> @@ -33,6 +33,8 @@ int xe_guc_exec_queue_memory_cat_error_handler(struct xe_guc *guc, u32 *msg,
>> int xe_guc_exec_queue_reset_failure_handler(struct xe_guc *guc, u32 *msg, u32 len);
>> int xe_guc_error_capture_handler(struct xe_guc *guc, u32 *msg, u32 len);
>>
>> +void xe_exec_queue_jobs_ring_restore(struct xe_exec_queue *eq);
>> +
>> struct xe_guc_submit_exec_queue_snapshot *
>> xe_guc_exec_queue_snapshot_capture(struct xe_exec_queue *q);
>> void
>> diff --git a/drivers/gpu/drm/xe/xe_sriov_vf.c b/drivers/gpu/drm/xe/xe_sriov_vf.c
>> index c08c44dbd383..2ff1383f0b1a 100644
>> --- a/drivers/gpu/drm/xe/xe_sriov_vf.c
>> +++ b/drivers/gpu/drm/xe/xe_sriov_vf.c
>> @@ -8,6 +8,7 @@
>> #include "xe_assert.h"
>> #include "xe_device.h"
>> #include "xe_exec_queue_types.h"
>> +#include "xe_guc_exec_queue_types.h"
>> #include "xe_gt.h"
>> #include "xe_gt_sriov_printk.h"
>> #include "xe_gt_sriov_vf.h"
>> @@ -16,6 +17,7 @@
>> #include "xe_irq.h"
>> #include "xe_lrc.h"
>> #include "xe_pm.h"
>> +#include "xe_sched_job_types.h"
>> #include "xe_sriov.h"
>> #include "xe_sriov_printk.h"
>> #include "xe_sriov_vf.h"
>> @@ -266,6 +268,26 @@ static void vf_post_migration_fixup_contexts(struct xe_device *xe)
>> }
>> }
>>
>> +static void xe_guc_jobs_ring_rebase(struct xe_guc *guc)
> and this one in xe_guc_submit.c ?
ok
>> +{
>> + struct xe_exec_queue *eq;
>> + unsigned long index;
>> +
>> + mutex_lock(&guc->submission_state.lock);
>> + xa_for_each(&guc->submission_state.exec_queue_lookup, index, eq)
>> + xe_exec_queue_jobs_ring_restore(eq);
>> + mutex_unlock(&guc->submission_state.lock);
>> +}
>> +
>> +static void vf_post_migration_fixup_jobs(struct xe_device *xe)
>> +{
>> + struct xe_gt *gt;
>> + unsigned int id;
>> +
>> + for_each_gt(gt, xe, id)
>> + xe_guc_jobs_ring_rebase(>->uc.guc);
>> +}
>> +
>> static void vf_post_migration_fixup_ctb(struct xe_device *xe)
>> {
>> struct xe_gt *gt;
>> @@ -348,6 +370,7 @@ static void vf_post_migration_recovery(struct xe_device *xe)
>> need_fixups = vf_post_migration_fixup_ggtt_nodes(xe);
>> if (need_fixups) {
>> vf_post_migration_fixup_contexts(xe);
>> + vf_post_migration_fixup_jobs(xe);
> in patch 5/7 you've dropped FIXME so I'm surprised by this step ;)
Yes, I was considering merging this with contexts fixup as it iterates
through the same list with same lock..
Will make sure the comment is removed here.
-Tomasz
>> vf_post_migration_fixup_ctb(xe);
>> }
>>
More information about the Intel-xe
mailing list