[Intel-gfx] [PATCH] lockdep: finer-grained completion key for kthread

Daniel Vetter daniel.vetter at ffwll.ch
Wed Nov 29 11:29:26 UTC 2017


Ideally we'd create the key through a macro at the real callers and
pass it all the way down. This would give us better coverage for cases
where a bunch of kthreads are created for the same thing.
But this gets the job done meanwhile and unblocks our CI. Refining
later on is always possible.

v2:
- make it compile
- use the right map (Tvrtko)

v3:

lockdep insist on a static key, so the cheap way didn't work. Wire
2 keys through all the callers.

I didn't extend this up to alloc_workqueue because the
lockdep_invariant_state() call should separate the work functions from
the workqueue kthread logic and prevent cross-release state from
leaking between unrelated work queues that happen to reuse the same
kthreads.

v4: CI found more compile fail :-/

Cc: Tvrtko Ursulin <tvrtko.ursulin at intel.com>
Cc: Marta Lofstedt <marta.lofstedt at intel.com>
References: https://bugs.freedesktop.org/show_bug.cgi?id=103950
Signed-off-by: Daniel Vetter <daniel.vetter at intel.com>
---
 include/linux/kthread.h | 48 ++++++++++++++++++++++++++++-----
 kernel/kthread.c        | 70 ++++++++++++++++++++++++++++++++++---------------
 2 files changed, 90 insertions(+), 28 deletions(-)

diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index c1961761311d..7a9463f0be5c 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -6,10 +6,12 @@
 #include <linux/sched.h>
 #include <linux/cgroup.h>
 
-__printf(4, 5)
-struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
+__printf(6, 7)
+struct task_struct *_kthread_create_on_node(int (*threadfn)(void *data),
 					   void *data,
 					   int node,
+					   struct lock_class_key *exited_key,
+					   struct lock_class_key *parked_key,
 					   const char namefmt[], ...);
 
 /**
@@ -25,12 +27,27 @@ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
  */
 #define kthread_create(threadfn, data, namefmt, arg...) \
 	kthread_create_on_node(threadfn, data, NUMA_NO_NODE, namefmt, ##arg)
+#define kthread_create_on_node(threadfn, data, node, namefmt, arg...)	\
+({									\
+	static struct lock_class_key __exited_key, __parked_key;	\
+	_kthread_create_on_node(threadfn, data, node, &__exited_key,	\
+			       &__parked_key, namefmt, ##arg);		\
+})
 
 
-struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
+struct task_struct *_kthread_create_on_cpu(int (*threadfn)(void *data),
 					  void *data,
 					  unsigned int cpu,
+					  struct lock_class_key *exited_key,
+					  struct lock_class_key *parked_key,
 					  const char *namefmt);
+#define kthread_create_on_cpu(threadfn, data, cpu, namefmt)		\
+({									\
+	static struct lock_class_key __exited_key, __parked_key;	\
+	_kthread_create_on_cpu(threadfn, data, cpu, &__exited_key,\
+			       &__parked_key, namefmt);			\
+})
+
 
 /**
  * kthread_run - create and wake a thread.
@@ -171,13 +188,30 @@ extern void __kthread_init_worker(struct kthread_worker *worker,
 
 int kthread_worker_fn(void *worker_ptr);
 
-__printf(2, 3)
+__printf(4, 5)
 struct kthread_worker *
-kthread_create_worker(unsigned int flags, const char namefmt[], ...);
+_kthread_create_worker(unsigned int flags,
+		       struct lock_class_key *exited_key,
+		       struct lock_class_key *parked_key,
+		       const char namefmt[], ...);
+#define kthread_create_worker(flags, namefmt...)				\
+({									\
+	static struct lock_class_key __exited_key, __parked_key;	\
+	_kthread_create_worker(flags, &__exited_key, &__parked_key,	\
+			       ##namefmt);				\
+})
 
-__printf(3, 4) struct kthread_worker *
-kthread_create_worker_on_cpu(int cpu, unsigned int flags,
+__printf(5, 6) struct kthread_worker *
+_kthread_create_worker_on_cpu(int cpu, unsigned int flags,
+		       struct lock_class_key *exited_key,
+		       struct lock_class_key *parked_key,
 			     const char namefmt[], ...);
+#define kthread_create_worker_on_cpu(cpu, flags, namefmt...)	\
+({									\
+	static struct lock_class_key __exited_key, __parked_key;	\
+	_kthread_create_worker_on_cpu(cpu, flags, &__exited_key, &__parked_key,\
+			       ##namefmt);				\
+})
 
 bool kthread_queue_work(struct kthread_worker *worker,
 			struct kthread_work *work);
diff --git a/kernel/kthread.c b/kernel/kthread.c
index cd50e99202b0..9022806818fc 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -32,6 +32,7 @@ struct kthread_create_info
 	int (*threadfn)(void *data);
 	void *data;
 	int node;
+	struct lock_class_key *exited_key, *parked_key;
 
 	/* Result passed back to kthread_create() from kthreadd. */
 	struct task_struct *result;
@@ -221,8 +222,17 @@ static int kthread(void *_create)
 	}
 
 	self->data = data;
-	init_completion(&self->exited);
-	init_completion(&self->parked);
+	/* these two completions are shared with all kthread, which is bonghist
+	 * imo */
+	lockdep_init_map_crosslock(&self->exited.map.map,
+			"(kthread completion)->exited",
+			create->exited_key, 0);
+	init_completion_map(&self->exited, &self->exited.map.map);
+	lockdep_init_map_crosslock(&self->parked.map.map,
+			"(kthread completion)->parked",
+			create->parked_key, 0);
+	init_completion_map(&self->parked, &self->exited.map.map);
+
 	current->vfork_done = &self->exited;
 
 	/* OK, tell user we're spawned, wait for stop or wakeup */
@@ -272,9 +282,11 @@ static void create_kthread(struct kthread_create_info *create)
 	}
 }
 
-static __printf(4, 0)
+static __printf(6, 0)
 struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data),
 						    void *data, int node,
+						    struct lock_class_key *exited_key,
+						    struct lock_class_key *parked_key,
 						    const char namefmt[],
 						    va_list args)
 {
@@ -289,6 +301,8 @@ struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data),
 	create->data = data;
 	create->node = node;
 	create->done = &done;
+	create->exited_key = exited_key;
+	create->parked_key = parked_key;
 
 	spin_lock(&kthread_create_lock);
 	list_add_tail(&create->list, &kthread_create_list);
@@ -353,21 +367,24 @@ struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data),
  *
  * Returns a task_struct or ERR_PTR(-ENOMEM) or ERR_PTR(-EINTR).
  */
-struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
-					   void *data, int node,
-					   const char namefmt[],
-					   ...)
+struct task_struct *_kthread_create_on_node(int (*threadfn)(void *data),
+					    void *data, int node,
+					    struct lock_class_key *exited_key,
+					    struct lock_class_key *parked_key,
+					    const char namefmt[],
+					    ...)
 {
 	struct task_struct *task;
 	va_list args;
 
 	va_start(args, namefmt);
-	task = __kthread_create_on_node(threadfn, data, node, namefmt, args);
+	task = __kthread_create_on_node(threadfn, data, node,
+					exited_key, parked_key, namefmt, args);
 	va_end(args);
 
 	return task;
 }
-EXPORT_SYMBOL(kthread_create_on_node);
+EXPORT_SYMBOL(_kthread_create_on_node);
 
 static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, long state)
 {
@@ -421,14 +438,16 @@ EXPORT_SYMBOL(kthread_bind);
  * Description: This helper function creates and names a kernel thread
  * The thread will be woken and put into park mode.
  */
-struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
+struct task_struct *_kthread_create_on_cpu(int (*threadfn)(void *data),
 					  void *data, unsigned int cpu,
+					  struct lock_class_key *exited_key,
+					  struct lock_class_key *parked_key,
 					  const char *namefmt)
 {
 	struct task_struct *p;
 
-	p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt,
-				   cpu);
+	p = _kthread_create_on_node(threadfn, data, cpu_to_node(cpu),
+				    exited_key, parked_key, namefmt, cpu);
 	if (IS_ERR(p))
 		return p;
 	kthread_bind(p, cpu);
@@ -649,8 +668,10 @@ int kthread_worker_fn(void *worker_ptr)
 }
 EXPORT_SYMBOL_GPL(kthread_worker_fn);
 
-static __printf(3, 0) struct kthread_worker *
+static __printf(5, 0) struct kthread_worker *
 __kthread_create_worker(int cpu, unsigned int flags,
+			struct lock_class_key *exited_key,
+			struct lock_class_key *parked_key,
 			const char namefmt[], va_list args)
 {
 	struct kthread_worker *worker;
@@ -666,8 +687,8 @@ __kthread_create_worker(int cpu, unsigned int flags,
 	if (cpu >= 0)
 		node = cpu_to_node(cpu);
 
-	task = __kthread_create_on_node(kthread_worker_fn, worker,
-						node, namefmt, args);
+	task = __kthread_create_on_node(kthread_worker_fn, worker, node,
+					exited_key, parked_key, namefmt, args);
 	if (IS_ERR(task))
 		goto fail_task;
 
@@ -694,18 +715,22 @@ __kthread_create_worker(int cpu, unsigned int flags,
  * when the worker was SIGKILLed.
  */
 struct kthread_worker *
-kthread_create_worker(unsigned int flags, const char namefmt[], ...)
+_kthread_create_worker(unsigned int flags,
+		       struct lock_class_key *exited_key,
+		       struct lock_class_key *parked_key,
+		       const char namefmt[], ...)
 {
 	struct kthread_worker *worker;
 	va_list args;
 
 	va_start(args, namefmt);
-	worker = __kthread_create_worker(-1, flags, namefmt, args);
+	worker = __kthread_create_worker(-1, flags, exited_key, parked_key,
+					 namefmt, args);
 	va_end(args);
 
 	return worker;
 }
-EXPORT_SYMBOL(kthread_create_worker);
+EXPORT_SYMBOL(_kthread_create_worker);
 
 /**
  * kthread_create_worker_on_cpu - create a kthread worker and bind it
@@ -725,19 +750,22 @@ EXPORT_SYMBOL(kthread_create_worker);
  * when the worker was SIGKILLed.
  */
 struct kthread_worker *
-kthread_create_worker_on_cpu(int cpu, unsigned int flags,
+_kthread_create_worker_on_cpu(int cpu, unsigned int flags,
+			      struct lock_class_key *exited_key,
+			      struct lock_class_key *parked_key,
 			     const char namefmt[], ...)
 {
 	struct kthread_worker *worker;
 	va_list args;
 
 	va_start(args, namefmt);
-	worker = __kthread_create_worker(cpu, flags, namefmt, args);
+	worker = __kthread_create_worker(cpu, flags, exited_key, parked_key,
+					 namefmt, args);
 	va_end(args);
 
 	return worker;
 }
-EXPORT_SYMBOL(kthread_create_worker_on_cpu);
+EXPORT_SYMBOL(_kthread_create_worker_on_cpu);
 
 /*
  * Returns true when the work could not be queued at the moment.
-- 
2.15.0



More information about the Intel-gfx mailing list