[PATCH 08/17] drm/amdkfd: Use wait_queue_t to implement event waiting

Felix Kuehling Felix.Kuehling at amd.com
Fri Oct 27 23:35:25 UTC 2017


Use standard wait queues for waiting and waking up waiting threads
instead of inventing our own. We still have our own wait loop
because the HSA event semantics require the ability to have one
thread waiting on multiple wait queues (events) at the same time.

Signed-off-by: Kent Russell <kent.russell at amd.com>
Signed-off-by: Felix Kuehling <Felix.Kuehling at amd.com>
---
 drivers/gpu/drm/amd/amdkfd/kfd_events.c | 59 ++++++++++++---------------------
 drivers/gpu/drm/amd/amdkfd/kfd_events.h |  3 +-
 2 files changed, 24 insertions(+), 38 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
index b4cda92..7dae26f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
@@ -33,22 +33,12 @@
 #include <linux/device.h>
 
 /*
- * A task can only be on a single wait_queue at a time, but we need to support
- * waiting on multiple events (any/all).
- * Instead of each event simply having a wait_queue with sleeping tasks, it
- * has a singly-linked list of tasks.
- * A thread that wants to sleep creates an array of these, one for each event
- * and adds one to each event's waiter chain.
+ * Wrapper around wait_queue_entry_t
  */
 struct kfd_event_waiter {
-	struct list_head waiters;
-	struct task_struct *sleeping_task;
-
-	/* Transitions to true when the event this belongs to is signaled. */
-	bool activated;
-
-	/* Event */
-	struct kfd_event *event;
+	wait_queue_entry_t wait;
+	struct kfd_event *event; /* Event to wait for */
+	bool activated;		 /* Becomes true when event is signaled */
 };
 
 /*
@@ -344,17 +334,12 @@ void kfd_event_init_process(struct kfd_process *p)
 
 static void destroy_event(struct kfd_process *p, struct kfd_event *ev)
 {
-	/* Wake up pending waiters. They will return failure */
-	while (!list_empty(&ev->waiters)) {
-		struct kfd_event_waiter *waiter =
-			list_first_entry(&ev->waiters, struct kfd_event_waiter,
-					 waiters);
+	struct kfd_event_waiter *waiter;
 
+	/* Wake up pending waiters. They will return failure */
+	list_for_each_entry(waiter, &ev->wq.head, wait.entry)
 		waiter->event = NULL;
-		/* _init because free_waiters will call list_del */
-		list_del_init(&waiter->waiters);
-		wake_up_process(waiter->sleeping_task);
-	}
+	wake_up_all(&ev->wq);
 
 	if (ev->signal_page) {
 		release_event_notification_slot(ev->signal_page,
@@ -424,7 +409,7 @@ int kfd_event_create(struct file *devkfd, struct kfd_process *p,
 	ev->auto_reset = auto_reset;
 	ev->signaled = false;
 
-	INIT_LIST_HEAD(&ev->waiters);
+	init_waitqueue_head(&ev->wq);
 
 	*event_page_offset = 0;
 
@@ -482,19 +467,18 @@ int kfd_event_destroy(struct kfd_process *p, uint32_t event_id)
 static void set_event(struct kfd_event *ev)
 {
 	struct kfd_event_waiter *waiter;
-	struct kfd_event_waiter *next;
 
-	/* Auto reset if the list is non-empty and we're waking someone. */
-	ev->signaled = !ev->auto_reset || list_empty(&ev->waiters);
+	/* Auto reset if the list is non-empty and we're waking
+	 * someone. waitqueue_active is safe here because we're
+	 * protected by the p->event_mutex, which is also held when
+	 * updating the wait queues in kfd_wait_on_events.
+	 */
+	ev->signaled = !ev->auto_reset || !waitqueue_active(&ev->wq);
 
-	list_for_each_entry_safe(waiter, next, &ev->waiters, waiters) {
+	list_for_each_entry(waiter, &ev->wq.head, wait.entry)
 		waiter->activated = true;
 
-		/* _init because free_waiters will call list_del */
-		list_del_init(&waiter->waiters);
-
-		wake_up_process(waiter->sleeping_task);
-	}
+	wake_up_all(&ev->wq);
 }
 
 /* Assumes that p is current. */
@@ -614,8 +598,7 @@ static struct kfd_event_waiter *alloc_event_waiters(uint32_t num_events)
 					GFP_KERNEL);
 
 	for (i = 0; (event_waiters) && (i < num_events) ; i++) {
-		INIT_LIST_HEAD(&event_waiters[i].waiters);
-		event_waiters[i].sleeping_task = current;
+		init_wait(&event_waiters[i].wait);
 		event_waiters[i].activated = false;
 	}
 
@@ -646,7 +629,7 @@ static void init_event_waiter_add_to_waitlist(struct kfd_event_waiter *waiter)
 	 * wait on this event.
 	 */
 	if (!waiter->activated)
-		list_add(&waiter->waiters, &ev->waiters);
+		add_wait_queue(&ev->wq, &waiter->wait);
 }
 
 /* test_event_condition - Test condition of events being waited for
@@ -736,7 +719,9 @@ static void free_waiters(uint32_t num_events, struct kfd_event_waiter *waiters)
 	uint32_t i;
 
 	for (i = 0; i < num_events; i++)
-		list_del(&waiters[i].waiters);
+		if (waiters[i].event)
+			remove_wait_queue(&waiters[i].event->wq,
+					  &waiters[i].wait);
 
 	kfree(waiters);
 }
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.h b/drivers/gpu/drm/amd/amdkfd/kfd_events.h
index 28f6838..96f9122 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.h
@@ -27,6 +27,7 @@
 #include <linux/hashtable.h>
 #include <linux/types.h>
 #include <linux/list.h>
+#include <linux/wait.h>
 #include "kfd_priv.h"
 #include <uapi/linux/kfd_ioctl.h>
 
@@ -56,7 +57,7 @@ struct kfd_event {
 
 	int type;
 
-	struct list_head waiters; /* List of kfd_event_waiter by waiters. */
+	wait_queue_head_t wq; /* List of event waiters. */
 
 	/* Only for signal events. */
 	struct signal_page *signal_page;
-- 
2.7.4



More information about the amd-gfx mailing list