[PATCH 09/16] drm/amdkfd: Use wait_queue_t to implement event waiting
Felix Kuehling
felix.kuehling at amd.com
Wed Oct 25 16:02:46 UTC 2017
On 2017-10-25 06:28 AM, Oded Gabbay wrote:
> On Sat, Oct 21, 2017 at 3:23 AM, Felix Kuehling <Felix.Kuehling at amd.com> wrote:
>> Signed-off-by: Kent Russell <kent.russell at amd.com>
>> Signed-off-by: Felix Kuehling <Felix.Kuehling at amd.com>
> I would like some more explanation in the commit message about this change.
> - What is the general idea in this solution.
> - Why it was done, i.e. what is the improvement
It's mostly about not duplicating the existing wait queue mechanism. Not
sure how much else there is to say about it.
> Thanks,
> Oded
>
>
>> ---
>> drivers/gpu/drm/amd/amdkfd/kfd_events.c | 53 +++++++++++----------------------
>> drivers/gpu/drm/amd/amdkfd/kfd_events.h | 3 +-
>> 2 files changed, 19 insertions(+), 37 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
>> index 949b80a..f178248 100644
>> --- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
>> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
>> @@ -33,22 +33,12 @@
>> #include <linux/device.h>
>>
>> /*
>> - * A task can only be on a single wait_queue at a time, but we need to support
>> - * waiting on multiple events (any/all).
>> - * Instead of each event simply having a wait_queue with sleeping tasks, it
>> - * has a singly-linked list of tasks.
>> - * A thread that wants to sleep creates an array of these, one for each event
>> - * and adds one to each event's waiter chain.
>> + * Wrapper around wait_queue_entry_t
>> */
>> struct kfd_event_waiter {
>> - struct list_head waiters;
>> - struct task_struct *sleeping_task;
>> -
>> - /* Transitions to true when the event this belongs to is signaled. */
>> - bool activated;
>> -
>> - /* Event */
>> - struct kfd_event *event;
>> + wait_queue_entry_t wait;
>> + struct kfd_event *event; /* Event to wait for */
>> + bool activated; /* Becomes true when event is signaled */
>> };
>>
>> /*
>> @@ -344,17 +334,12 @@ void kfd_event_init_process(struct kfd_process *p)
>>
>> static void destroy_event(struct kfd_process *p, struct kfd_event *ev)
>> {
>> - /* Wake up pending waiters. They will return failure */
>> - while (!list_empty(&ev->waiters)) {
>> - struct kfd_event_waiter *waiter =
>> - list_first_entry(&ev->waiters, struct kfd_event_waiter,
>> - waiters);
>> + struct kfd_event_waiter *waiter;
>>
>> + /* Wake up pending waiters. They will return failure */
>> + list_for_each_entry(waiter, &ev->wq.head, wait.entry)
>> waiter->event = NULL;
>> - /* _init because free_waiters will call list_del */
>> - list_del_init(&waiter->waiters);
>> - wake_up_process(waiter->sleeping_task);
>> - }
>> + wake_up_all(&ev->wq);
>>
>> if (ev->signal_page) {
>> release_event_notification_slot(ev->signal_page,
>> @@ -424,7 +409,7 @@ int kfd_event_create(struct file *devkfd, struct kfd_process *p,
>> ev->auto_reset = auto_reset;
>> ev->signaled = false;
>>
>> - INIT_LIST_HEAD(&ev->waiters);
>> + init_waitqueue_head(&ev->wq);
>>
>> *event_page_offset = 0;
>>
>> @@ -482,19 +467,14 @@ int kfd_event_destroy(struct kfd_process *p, uint32_t event_id)
>> static void set_event(struct kfd_event *ev)
>> {
>> struct kfd_event_waiter *waiter;
>> - struct kfd_event_waiter *next;
>>
>> /* Auto reset if the list is non-empty and we're waking someone. */
>> - ev->signaled = !ev->auto_reset || list_empty(&ev->waiters);
>> + ev->signaled = !ev->auto_reset || !waitqueue_active(&ev->wq);
> This line gives a warning in the checkpatch script. Per Linus's patch
> (http://www.spinics.net/lists/mm-commits/msg111660.html), please add a
> comment explaining the use of waitqueue_active and why its not racy
> with the caller.
There is a comment, but checkpatch still complains. I can add more to
the comment, but not sure that will fix the warning.
Regards,
Felix
>
> Oded
>
>> - list_for_each_entry_safe(waiter, next, &ev->waiters, waiters) {
>> + list_for_each_entry(waiter, &ev->wq.head, wait.entry)
>> waiter->activated = true;
>>
>> - /* _init because free_waiters will call list_del */
>> - list_del_init(&waiter->waiters);
>> -
>> - wake_up_process(waiter->sleeping_task);
>> - }
>> + wake_up_all(&ev->wq);
>> }
>>
>> /* Assumes that p is current. */
>> @@ -614,8 +594,7 @@ static struct kfd_event_waiter *alloc_event_waiters(uint32_t num_events)
>> GFP_KERNEL);
>>
>> for (i = 0; (event_waiters) && (i < num_events) ; i++) {
>> - INIT_LIST_HEAD(&event_waiters[i].waiters);
>> - event_waiters[i].sleeping_task = current;
>> + init_wait(&event_waiters[i].wait);
>> event_waiters[i].activated = false;
>> }
>>
>> @@ -646,7 +625,7 @@ static void init_event_waiter_add_to_waitlist(struct kfd_event_waiter *waiter)
>> * wait on this event.
>> */
>> if (!waiter->activated)
>> - list_add(&waiter->waiters, &ev->waiters);
>> + add_wait_queue(&ev->wq, &waiter->wait);
>> }
>>
>> /* test_event_condition - Test condition of events being waited for
>> @@ -736,7 +715,9 @@ static void free_waiters(uint32_t num_events, struct kfd_event_waiter *waiters)
>> uint32_t i;
>>
>> for (i = 0; i < num_events; i++)
>> - list_del(&waiters[i].waiters);
>> + if (waiters[i].event)
>> + remove_wait_queue(&waiters[i].event->wq,
>> + &waiters[i].wait);
>>
>> kfree(waiters);
>> }
>> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.h b/drivers/gpu/drm/amd/amdkfd/kfd_events.h
>> index 28f6838..96f9122 100644
>> --- a/drivers/gpu/drm/amd/amdkfd/kfd_events.h
>> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.h
>> @@ -27,6 +27,7 @@
>> #include <linux/hashtable.h>
>> #include <linux/types.h>
>> #include <linux/list.h>
>> +#include <linux/wait.h>
>> #include "kfd_priv.h"
>> #include <uapi/linux/kfd_ioctl.h>
>>
>> @@ -56,7 +57,7 @@ struct kfd_event {
>>
>> int type;
>>
>> - struct list_head waiters; /* List of kfd_event_waiter by waiters. */
>> + wait_queue_head_t wq; /* List of event waiters. */
>>
>> /* Only for signal events. */
>> struct signal_page *signal_page;
>> --
>> 2.7.4
>>
More information about the amd-gfx
mailing list