[Intel-gfx] [PATCH 1/5] mutex: Export an interface to wrap a mutex lock
Chris Wilson
chris at chris-wilson.co.uk
Tue Apr 7 09:31:35 PDT 2015
In i915, we have a big mutex around our device struct - every time before
we attempt to communicate with the GPU, we acquire the mutex. This makes
it a convenient juncture to place our GPU error handling - before we take
the mutex we first check whether the GPU is hung or whether we are in
the process of recovering from a GPU hang. So we wrap the call to
mutex_lock() alongside our additional error handling routines.
The downside of using a wrapper around mutex_lock() is that lockdep and
lockstat cannot discriminate the true callers of mutex_lock(). Unless we
provide a means for the wrapper to pass that information down.
It also appears that i915 is almost unique in this manner of wrapping
mutex_lock(), with only one or two other potential candidates for using
this interface.
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
Cc: Ben Widawsky <benjamin.widawsky at intel.com>
---
drivers/gpu/drm/i915/i915_gem.c | 4 +++-
include/linux/mutex.h | 9 +++++++++
kernel/locking/mutex.c | 36 ++++++++++++++++++++++++++++++++++++
3 files changed, 48 insertions(+), 1 deletion(-)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 267fdf0f46ae..7ab8e0039790 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -135,7 +135,9 @@ int i915_mutex_lock_interruptible(struct drm_device *dev)
if (ret)
return ret;
- ret = mutex_lock_interruptible(&dev->struct_mutex);
+ ret = mutex_lock_wrapper(&dev->struct_mutex,
+ TASK_INTERRUPTIBLE,
+ _RET_IP_);
if (ret)
return ret;
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index 2cb7531e7d7a..3f6030b3f5aa 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -142,10 +142,15 @@ extern int __must_check mutex_lock_interruptible_nested(struct mutex *lock,
unsigned int subclass);
extern int __must_check mutex_lock_killable_nested(struct mutex *lock,
unsigned int subclass);
+extern int __must_check mutex_lock_wrapper_nested(struct mutex *lock,
+ unsigned int subclass,
+ long state,
+ unsigned long ip);
#define mutex_lock(lock) mutex_lock_nested(lock, 0)
#define mutex_lock_interruptible(lock) mutex_lock_interruptible_nested(lock, 0)
#define mutex_lock_killable(lock) mutex_lock_killable_nested(lock, 0)
+#define mutex_lock_wrapper(lock, state, ip) mutex_lock_wrapper_nested(lock, 0, state, ip)
#define mutex_lock_nest_lock(lock, nest_lock) \
do { \
@@ -157,10 +162,14 @@ do { \
extern void mutex_lock(struct mutex *lock);
extern int __must_check mutex_lock_interruptible(struct mutex *lock);
extern int __must_check mutex_lock_killable(struct mutex *lock);
+extern int __must_check mutex_lock_wrapper(struct mutex *lock,
+ long state,
+ unsigned long ip);
# define mutex_lock_nested(lock, subclass) mutex_lock(lock)
# define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock)
# define mutex_lock_killable_nested(lock, subclass) mutex_lock_killable(lock)
+# define mutex_lock_wrapper_nested(lock, subclass, state, ip) mutex_lock_wrapper(lock, state, ip)
# define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock)
#endif
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index 94674e5919cb..098b9e71ada1 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -658,6 +658,17 @@ mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
+int __sched
+mutex_lock_wrapper_nested(struct mutex *lock, unsigned int subclass,
+ long state, unsigned long ip)
+{
+ might_sleep();
+ return __mutex_lock_common(lock, state,
+ subclass, NULL, ip, NULL, 0);
+}
+
+EXPORT_SYMBOL_GPL(mutex_lock_wrapper_nested);
+
static inline int
ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
{
@@ -780,6 +791,9 @@ __mutex_lock_killable_slowpath(struct mutex *lock);
static noinline int __sched
__mutex_lock_interruptible_slowpath(struct mutex *lock);
+static noinline int __sched
+__mutex_lock_wrapper_slowpath(struct mutex *lock, long state, unsigned long ip);
+
/**
* mutex_lock_interruptible - acquire the mutex, interruptible
* @lock: the mutex to be acquired
@@ -806,6 +820,21 @@ int __sched mutex_lock_interruptible(struct mutex *lock)
EXPORT_SYMBOL(mutex_lock_interruptible);
+int __sched mutex_lock_wrapper(struct mutex *lock, long state, unsigned long ip)
+{
+ int ret;
+
+ might_sleep();
+ ret = __mutex_fastpath_lock_retval(&lock->count);
+ if (likely(!ret)) {
+ mutex_set_owner(lock);
+ return 0;
+ } else
+ return __mutex_lock_wrapper_slowpath(lock, state, ip);
+}
+
+EXPORT_SYMBOL(mutex_lock_wrapper);
+
int __sched mutex_lock_killable(struct mutex *lock)
{
int ret;
@@ -844,6 +873,13 @@ __mutex_lock_interruptible_slowpath(struct mutex *lock)
}
static noinline int __sched
+__mutex_lock_wrapper_slowpath(struct mutex *lock, long state, unsigned long ip)
+{
+ return __mutex_lock_common(lock, state, 0,
+ NULL, ip, NULL, 0);
+}
+
+static noinline int __sched
__ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
{
return __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, 0,
--
2.1.4
More information about the Intel-gfx
mailing list