[RFC PATCH 01/29] dma-fence: Add dma_fence_preempt base class

Christian König christian.koenig at amd.com
Wed Nov 20 13:31:50 UTC 2024


Am 19.11.24 um 00:37 schrieb Matthew Brost:
> Add a dma_fence_preempt base class with driver ops to implement
> preemption, based on the existing Xe preemptive fence implementation.
>
> Annotated to ensure correct driver usage.
>
> Cc: Dave Airlie <airlied at redhat.com>
> Cc: Simona Vetter <simona.vetter at ffwll.ch>
> Cc: Christian Koenig <christian.koenig at amd.com>
> Signed-off-by: Matthew Brost <matthew.brost at intel.com>
> ---
>   drivers/dma-buf/Makefile            |   2 +-
>   drivers/dma-buf/dma-fence-preempt.c | 133 ++++++++++++++++++++++++++++
>   include/linux/dma-fence-preempt.h   |  56 ++++++++++++
>   3 files changed, 190 insertions(+), 1 deletion(-)
>   create mode 100644 drivers/dma-buf/dma-fence-preempt.c
>   create mode 100644 include/linux/dma-fence-preempt.h
>
> diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile
> index 70ec901edf2c..c25500bb38b5 100644
> --- a/drivers/dma-buf/Makefile
> +++ b/drivers/dma-buf/Makefile
> @@ -1,6 +1,6 @@
>   # SPDX-License-Identifier: GPL-2.0-only
>   obj-y := dma-buf.o dma-fence.o dma-fence-array.o dma-fence-chain.o \
> -	 dma-fence-unwrap.o dma-resv.o
> +	 dma-fence-preempt.o dma-fence-unwrap.o dma-resv.o
>   obj-$(CONFIG_DMABUF_HEAPS)	+= dma-heap.o
>   obj-$(CONFIG_DMABUF_HEAPS)	+= heaps/
>   obj-$(CONFIG_SYNC_FILE)		+= sync_file.o
> diff --git a/drivers/dma-buf/dma-fence-preempt.c b/drivers/dma-buf/dma-fence-preempt.c
> new file mode 100644
> index 000000000000..6e6ce7ea7421
> --- /dev/null
> +++ b/drivers/dma-buf/dma-fence-preempt.c
> @@ -0,0 +1,133 @@
> +// SPDX-License-Identifier: MIT
> +/*
> + * Copyright © 2024 Intel Corporation
> + */
> +
> +#include <linux/dma-fence-preempt.h>
> +#include <linux/dma-resv.h>
> +
> +static void dma_fence_preempt_work_func(struct work_struct *w)
> +{
> +	bool cookie = dma_fence_begin_signalling();
> +	struct dma_fence_preempt *pfence =
> +		container_of(w, typeof(*pfence), work);
> +	const struct dma_fence_preempt_ops *ops = pfence->ops;
> +	int err = pfence->base.error;
> +
> +	if (!err) {
> +		err = ops->preempt_wait(pfence);
> +		if (err)
> +			dma_fence_set_error(&pfence->base, err);
> +	}
> +
> +	dma_fence_signal(&pfence->base);
> +	ops->preempt_finished(pfence);

Why is that callback useful?

> +
> +	dma_fence_end_signalling(cookie);
> +}
> +
> +static const char *
> +dma_fence_preempt_get_driver_name(struct dma_fence *fence)
> +{
> +	return "dma_fence_preempt";
> +}
> +
> +static const char *
> +dma_fence_preempt_get_timeline_name(struct dma_fence *fence)
> +{
> +	return "ordered";
> +}
> +
> +static void dma_fence_preempt_issue(struct dma_fence_preempt *pfence)
> +{
> +	int err;
> +
> +	err = pfence->ops->preempt(pfence);
> +	if (err)
> +		dma_fence_set_error(&pfence->base, err);
> +
> +	queue_work(pfence->wq, &pfence->work);
> +}
> +
> +static void dma_fence_preempt_cb(struct dma_fence *fence,
> +				 struct dma_fence_cb *cb)
> +{
> +	struct dma_fence_preempt *pfence =
> +		container_of(cb, typeof(*pfence), cb);
> +
> +	dma_fence_preempt_issue(pfence);
> +}
> +
> +static void dma_fence_preempt_delay(struct dma_fence_preempt *pfence)
> +{
> +	struct dma_fence *fence;
> +	int err;
> +
> +	fence = pfence->ops->preempt_delay(pfence);

Mhm, why is that useful?

> +	if (WARN_ON_ONCE(!fence || IS_ERR(fence)))
> +		return;
> +
> +	err = dma_fence_add_callback(fence, &pfence->cb, dma_fence_preempt_cb);

You are running into the exactly same bug we had :)

The problem here is that you can't call dma_fence_add_callback() from 
the enable_signaling callback. Background is that the 
fence_ops->enable_signaling callback is called with the spinlock of the 
preemption fence held.

This spinlock can be the same as the one of the user fence, but it could 
also be a different one. Either way calling dma_fence_add_callback() 
would let lockdep print a nice warning.

I tried to solve this by changing the dma_fence code to not call 
enable_signaling with the lock held, we wanted to do that anyway to 
prevent a bunch of issues with driver unload. But I realized that 
getting this upstream would take to long.

Long story short we moved handling the user fence into the work item.

Apart from that looks rather solid to me.

Regards,
Christian.

> +	if (err == -ENOENT)
> +		dma_fence_preempt_issue(pfence);
> +}
> +
> +static bool dma_fence_preempt_enable_signaling(struct dma_fence *fence)
> +{
> +	struct dma_fence_preempt *pfence =
> +		container_of(fence, typeof(*pfence), base);
> +
> +	if (pfence->ops->preempt_delay)
> +		dma_fence_preempt_delay(pfence);
> +	else
> +		dma_fence_preempt_issue(pfence);
> +
> +	return true;
> +}
> +
> +static const struct dma_fence_ops preempt_fence_ops = {
> +	.get_driver_name = dma_fence_preempt_get_driver_name,
> +	.get_timeline_name = dma_fence_preempt_get_timeline_name,
> +	.enable_signaling = dma_fence_preempt_enable_signaling,
> +};
> +
> +/**
> + * dma_fence_is_preempt() - Is preempt fence
> + *
> + * @fence: Preempt fence
> + *
> + * Return: True if preempt fence, False otherwise
> + */
> +bool dma_fence_is_preempt(const struct dma_fence *fence)
> +{
> +	return fence->ops == &preempt_fence_ops;
> +}
> +EXPORT_SYMBOL(dma_fence_is_preempt);
> +
> +/**
> + * dma_fence_preempt_init() - Initial preempt fence
> + *
> + * @fence: Preempt fence
> + * @ops: Preempt fence operations
> + * @wq: Work queue for preempt wait, should have WQ_MEM_RECLAIM set
> + * @context: Fence context
> + * @seqno: Fence seqence number
> + */
> +void dma_fence_preempt_init(struct dma_fence_preempt *fence,
> +			    const struct dma_fence_preempt_ops *ops,
> +			    struct workqueue_struct *wq,
> +			    u64 context, u64 seqno)
> +{
> +	/*
> +	 * XXX: We really want to check wq for WQ_MEM_RECLAIM here but
> +	 * workqueue_struct is private.
> +	 */
> +
> +	fence->ops = ops;
> +	fence->wq = wq;
> +	INIT_WORK(&fence->work, dma_fence_preempt_work_func);
> +	spin_lock_init(&fence->lock);
> +	dma_fence_init(&fence->base, &preempt_fence_ops,
> +		       &fence->lock, context, seqno);
> +}
> +EXPORT_SYMBOL(dma_fence_preempt_init);
> diff --git a/include/linux/dma-fence-preempt.h b/include/linux/dma-fence-preempt.h
> new file mode 100644
> index 000000000000..28d803f89527
> --- /dev/null
> +++ b/include/linux/dma-fence-preempt.h
> @@ -0,0 +1,56 @@
> +/* SPDX-License-Identifier: MIT */
> +/*
> + * Copyright © 2024 Intel Corporation
> + */
> +
> +#ifndef __LINUX_DMA_FENCE_PREEMPT_H
> +#define __LINUX_DMA_FENCE_PREEMPT_H
> +
> +#include <linux/dma-fence.h>
> +#include <linux/workqueue.h>
> +
> +struct dma_fence_preempt;
> +struct dma_resv;
> +
> +/**
> + * struct dma_fence_preempt_ops - Preempt fence operations
> + *
> + * These functions should be implemented in the driver side.
> + */
> +struct dma_fence_preempt_ops {
> +	/** @preempt_delay: Preempt execution with a delay */
> +	struct dma_fence *(*preempt_delay)(struct dma_fence_preempt *fence);
> +	/** @preempt: Preempt execution */
> +	int (*preempt)(struct dma_fence_preempt *fence);
> +	/** @preempt_wait: Wait for preempt of execution to complete */
> +	int (*preempt_wait)(struct dma_fence_preempt *fence);
> +	/** @preempt_finished: Signal that the preempt has finished */
> +	void (*preempt_finished)(struct dma_fence_preempt *fence);
> +};
> +
> +/**
> + * struct dma_fence_preempt - Embedded preempt fence base class
> + */
> +struct dma_fence_preempt {
> +	/** @base: Fence base class */
> +	struct dma_fence base;
> +	/** @lock: Spinlock for fence handling */
> +	spinlock_t lock;
> +	/** @cb: Callback preempt delay */
> +	struct dma_fence_cb cb;
> +	/** @ops: Preempt fence operation */
> +	const struct dma_fence_preempt_ops *ops;
> +	/** @wq: Work queue for preempt wait */
> +	struct workqueue_struct *wq;
> +	/** @work: Work struct for preempt wait */
> +	struct work_struct work;
> +};
> +
> +bool dma_fence_is_preempt(const struct dma_fence *fence);
> +
> +void dma_fence_preempt_init(struct dma_fence_preempt *fence,
> +			    const struct dma_fence_preempt_ops *ops,
> +			    struct workqueue_struct *wq,
> +			    u64 context, u64 seqno);
> +
> +#endif



More information about the Intel-xe mailing list