[Intel-xe] [PATCH v10 8/9] drm/xe: add lockdep annotation for xe_device_mem_access_get()
Matthew Auld
matthew.auld at intel.com
Wed May 24 17:57:00 UTC 2023
The atomics here might hide potential issues, so add a dummy lock with
the idea that xe_pm_runtime_resume() is eventually going to be called
when we are holding it. This only need to happen once and then lockdep
can validate all callers and their locks.
Signed-off-by: Matthew Auld <matthew.auld at intel.com>
Cc: Rodrigo Vivi <rodrigo.vivi at intel.com>
Cc: Thomas Hellström <thomas.hellstrom at linux.intel.com>
---
drivers/gpu/drm/xe/xe_device.c | 20 ++++++++++++++++++++
drivers/gpu/drm/xe/xe_device_types.h | 8 ++++++++
2 files changed, 28 insertions(+)
diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
index 695419a64ec0..2e246d7c2ba2 100644
--- a/drivers/gpu/drm/xe/xe_device.c
+++ b/drivers/gpu/drm/xe/xe_device.c
@@ -224,6 +224,8 @@ struct xe_device *xe_device_create(struct pci_dev *pdev,
if (WARN_ON(err))
goto err_put;
+ drmm_mutex_init(&xe->drm, &xe->mem_access.lock);
+
return xe;
err_put:
@@ -436,6 +438,22 @@ void xe_device_mem_access_get(struct xe_device *xe)
if (xe_pm_read_callback_task(xe) == current)
return;
+ /*
+ * Since the resume here is synchronous it can be quite easy to deadlock
+ * if we are not careful. Also in practice it might be quite timing
+ * sensitive to ever see the 0 -> 1 transition with the callers locks
+ * held, so deadlocks might exist but are hard for lockdep to ever see.
+ * With this in mind, help lockdep learn about the potentially scary
+ * stuff that can happen inside the runtime_resume callback by acquiring
+ * a dummy lock (it doesn't protect anything and gets compiled out on
+ * non-debug builds). Lockdep then only needs to see the
+ * mem_access.lock -> runtime_resume callback once, and then can
+ * hopefully validate all the (callers_locks) -> mem_access.lock. For
+ * example if the (callers_locks) are ever grabbed in the runtime_resume
+ * callback, lockdep should give us a nice splat.
+ */
+ lock_map_acquire(&xe->mem_access.lock.dep_map);
+
if (!atomic_inc_not_zero(&xe->mem_access.ref)) {
bool hold_rpm = xe_pm_runtime_resume_and_get(xe);
int ref;
@@ -448,6 +466,8 @@ void xe_device_mem_access_get(struct xe_device *xe)
} else {
XE_WARN_ON(atomic_read(&xe->mem_access.ref) == S32_MAX);
}
+
+ lock_map_release(&xe->mem_access.lock.dep_map);
}
void xe_device_mem_access_put(struct xe_device *xe)
diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h
index 9e79857c267c..fa33b3093dba 100644
--- a/drivers/gpu/drm/xe/xe_device_types.h
+++ b/drivers/gpu/drm/xe/xe_device_types.h
@@ -260,6 +260,14 @@ struct xe_device {
* triggering additional actions when they occur.
*/
struct {
+ /**
+ * @lock: Dummy lock used as lockdep aid to hopefully ensure
+ * that lockep can more easily see any potential deadlocks when
+ * calling xe_device_mem_access_get().
+ *
+ * Doesn't protect anything.
+ */
+ struct mutex lock;
/** @ref: ref count of memory accesses */
atomic_t ref;
/** @hold_rpm: need to put rpm ref back at the end */
--
2.40.1
More information about the Intel-xe
mailing list