[PATCH 2/3] drm/amdgpu: Add SPSC queue to scheduler.
Andrey Grodzovsky
andrey.grodzovsky at amd.com
Mon Oct 23 10:50:29 UTC 2017
On 2017-10-23 12:06 AM, Liu, Monk wrote:
> If the deadlock issue could be solved I don't see why we give up kfifo and switch to SPSC ......
The deadlock is solved because we don't block anymore waiting for
consumer to dequeue items from the queue - which can only be
achieved with not bounded container.
Thanks,
Andrey
>
> -----Original Message-----
> From: amd-gfx [mailto:amd-gfx-bounces at lists.freedesktop.org] On Behalf Of Andrey Grodzovsky
> Sent: 2017年10月20日 21:32
> To: amd-gfx at lists.freedesktop.org
> Cc: Grodzovsky, Andrey <Andrey.Grodzovsky at amd.com>; Koenig, Christian <Christian.Koenig at amd.com>
> Subject: [PATCH 2/3] drm/amdgpu: Add SPSC queue to scheduler.
>
> It is intended to sabstitute the bounded fifo we are currently using.
>
> Signed-off-by: Andrey Grodzovsky <Andrey.Grodzovsky at amd.com>
> ---
> drivers/gpu/drm/amd/scheduler/spsc_queue.h | 120 +++++++++++++++++++++++++++++
> 1 file changed, 120 insertions(+)
> create mode 100644 drivers/gpu/drm/amd/scheduler/spsc_queue.h
>
> diff --git a/drivers/gpu/drm/amd/scheduler/spsc_queue.h b/drivers/gpu/drm/amd/scheduler/spsc_queue.h
> new file mode 100644
> index 0000000..a3394f1
> --- /dev/null
> +++ b/drivers/gpu/drm/amd/scheduler/spsc_queue.h
> @@ -0,0 +1,120 @@
> +/*
> + * Copyright 2017 Advanced Micro Devices, Inc.
> + *
> + * Permission is hereby granted, free of charge, to any person
> +obtaining a
> + * copy of this software and associated documentation files (the
> +"Software"),
> + * to deal in the Software without restriction, including without
> +limitation
> + * the rights to use, copy, modify, merge, publish, distribute,
> +sublicense,
> + * and/or sell copies of the Software, and to permit persons to whom
> +the
> + * Software is furnished to do so, subject to the following conditions:
> + *
> + * The above copyright notice and this permission notice shall be
> +included in
> + * all copies or substantial portions of the Software.
> + *
> + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
> +EXPRESS OR
> + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
> +MERCHANTABILITY,
> + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
> +SHALL
> + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM,
> +DAMAGES OR
> + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
> +OTHERWISE,
> + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
> +OR
> + * OTHER DEALINGS IN THE SOFTWARE.
> + *
> + */
> +
> +#ifndef AMD_SCHEDULER_SPSC_QUEUE_H_
> +#define AMD_SCHEDULER_SPSC_QUEUE_H_
> +
> +#include <linux/atomic.h>
> +
> +/** SPSC lockless queue */
> +
> +struct spsc_node {
> +
> + /* Stores spsc_node* */
> + struct spsc_node *next;
> +};
> +
> +struct spsc_queue {
> +
> + struct spsc_node *head;
> +
> + /* atomic pointer to struct spsc_node* */
> + atomic_long_t tail;
> +
> + atomic_t job_count;
> +};
> +
> +static inline void spsc_queue_init(struct spsc_queue *queue) {
> + queue->head = NULL;
> + atomic_long_set(&queue->tail, (long)&queue->head);
> + atomic_set(&queue->job_count, 0);
> +}
> +
> +static inline struct spsc_node *spsc_queue_peek(struct spsc_queue
> +*queue) {
> + return queue->head;
> +}
> +
> +static inline int spsc_queue_count(struct spsc_queue *queue) {
> + return atomic_read(&queue->job_count); }
> +
> +static inline bool spsc_queue_push(struct spsc_queue *queue, struct
> +spsc_node *node) {
> + struct spsc_node **tail;
> +
> + node->next = NULL;
> +
> + preempt_disable();
> +
> + tail = (struct spsc_node **)atomic_long_xchg(&queue->tail, (long)&node->next);
> + WRITE_ONCE(*tail, node);
> + atomic_inc(&queue->job_count);
> +
> + /*
> + * In case of first element verify new node will be visible to the consumer
> + * thread when we ping the kernel thread that there is new work to do.
> + */
> + smp_wmb();
> +
> + preempt_enable();
> +
> + return tail == &queue->head;
> +}
> +
> +
> +static inline struct spsc_node *spsc_queue_pop(struct spsc_queue
> +*queue) {
> + struct spsc_node *next, *node;
> +
> + /* Verify reading from memory and not the cache */
> + smp_rmb();
> +
> + node = READ_ONCE(queue->head);
> +
> + if (!node)
> + return NULL;
> +
> + next = READ_ONCE(node->next);
> + WRITE_ONCE(queue->head, next);
> +
> + if (unlikely(!next)) {
> + /* slowpath for the last element in the queue */
> +
> + if (atomic_long_cmpxchg(&queue->tail,
> + (long)&node->next,(long) &queue->head) != (long)&node->next) {
> + /* Updating tail failed wait for new next to appear */
> + do {
> + smp_rmb();
> + }while (unlikely(!(queue->head = READ_ONCE(node->next))));
> + }
> + }
> +
> + atomic_dec(&queue->job_count);
> + return node;
> +}
> +
> +
> +
> +#endif /* AMD_SCHEDULER_SPSC_QUEUE_H_ */
> --
> 2.7.4
>
> _______________________________________________
> amd-gfx mailing list
> amd-gfx at lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
More information about the amd-gfx
mailing list