[PATCH 6/6] drm/v3d: Protect per-fd reset counter against fd release
Maíra Canal
mcanal at igalia.com
Sat Jul 19 13:24:45 UTC 2025
The per-fd reset counter tracks GPU resets caused by jobs submitted
through a specific file descriptor. However, there's a race condition
where the file descriptor can be closed while jobs are still running,
leading to potential access to freed memory when updating the reset
counter.
Ensure that the per-fd reset counter is only updated when the file
descriptor is still valid, preventing use-after-free scenarios during
GPU reset handling.
Signed-off-by: Maíra Canal <mcanal at igalia.com>
---
drivers/gpu/drm/v3d/v3d_sched.c | 28 ++++++++++++++++------------
1 file changed, 16 insertions(+), 12 deletions(-)
diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c
index 715cdc3ada62a271e6dbd0584cdbac24e23c63d6..9f57ab41a7980ab097ea4cfed613f0cf6a1e6359 100644
--- a/drivers/gpu/drm/v3d/v3d_sched.c
+++ b/drivers/gpu/drm/v3d/v3d_sched.c
@@ -724,17 +724,19 @@ v3d_cache_clean_job_run(struct drm_sched_job *sched_job)
}
static enum drm_gpu_sched_stat
-v3d_gpu_reset_for_timeout(struct v3d_dev *v3d, struct drm_sched_job *sched_job)
+v3d_gpu_reset_for_timeout(struct v3d_dev *v3d, struct drm_sched_job *sched_job,
+ enum v3d_queue q)
{
struct v3d_job *job = to_v3d_job(sched_job);
struct v3d_file_priv *v3d_priv = job->file_priv;
- enum v3d_queue q;
+ unsigned long irqflags;
+ enum v3d_queue i;
mutex_lock(&v3d->reset_lock);
/* block scheduler */
- for (q = 0; q < V3D_MAX_QUEUES; q++)
- drm_sched_stop(&v3d->queue[q].sched, sched_job);
+ for (i = 0; i < V3D_MAX_QUEUES; i++)
+ drm_sched_stop(&v3d->queue[i].sched, sched_job);
if (sched_job)
drm_sched_increase_karma(sched_job);
@@ -743,15 +745,17 @@ v3d_gpu_reset_for_timeout(struct v3d_dev *v3d, struct drm_sched_job *sched_job)
v3d_reset(v3d);
v3d->reset_counter++;
- v3d_priv->reset_counter++;
+ spin_lock_irqsave(&v3d->queue[q].queue_lock, irqflags);
+ if (v3d_priv)
+ v3d_priv->reset_counter++;
+ spin_unlock_irqrestore(&v3d->queue[q].queue_lock, irqflags);
- for (q = 0; q < V3D_MAX_QUEUES; q++)
+ for (i = 0; i < V3D_MAX_QUEUES; i++)
drm_sched_resubmit_jobs(&v3d->queue[q].sched);
/* Unblock schedulers and restart their jobs. */
- for (q = 0; q < V3D_MAX_QUEUES; q++) {
- drm_sched_start(&v3d->queue[q].sched, 0);
- }
+ for (i = 0; i < V3D_MAX_QUEUES; i++)
+ drm_sched_start(&v3d->queue[i].sched, 0);
mutex_unlock(&v3d->reset_lock);
@@ -779,7 +783,7 @@ v3d_cl_job_timedout(struct drm_sched_job *sched_job, enum v3d_queue q,
return DRM_GPU_SCHED_STAT_NO_HANG;
}
- return v3d_gpu_reset_for_timeout(v3d, sched_job);
+ return v3d_gpu_reset_for_timeout(v3d, sched_job, q);
}
static enum drm_gpu_sched_stat
@@ -805,7 +809,7 @@ v3d_tfu_job_timedout(struct drm_sched_job *sched_job)
{
struct v3d_job *job = to_v3d_job(sched_job);
- return v3d_gpu_reset_for_timeout(job->v3d, sched_job);
+ return v3d_gpu_reset_for_timeout(job->v3d, sched_job, V3D_TFU);
}
static enum drm_gpu_sched_stat
@@ -824,7 +828,7 @@ v3d_csd_job_timedout(struct drm_sched_job *sched_job)
return DRM_GPU_SCHED_STAT_NO_HANG;
}
- return v3d_gpu_reset_for_timeout(v3d, sched_job);
+ return v3d_gpu_reset_for_timeout(v3d, sched_job, V3D_CSD);
}
static const struct drm_sched_backend_ops v3d_bin_sched_ops = {
--
2.50.0
More information about the dri-devel
mailing list