[PATCH 4/7] drm/xe/oa: Signal output fences
Matthew Brost
matthew.brost at intel.com
Wed Aug 21 15:49:53 UTC 2024
On Wed, Aug 21, 2024 at 08:28:27AM -0700, Ashutosh Dixit wrote:
> Introduce 'struct xe_oa_fence' which includes the dma_fence used to signal
> output fences in the xe_sync array. The fences are signaled
> asynchronously. When there are no output fences to signal, the OA
> configuration wait is synchronously re-introduced into the ioctl.
>
> v2: Don't wait in the work, use callback + delayed work (Matt B)
> Use a single, not a per-fence spinlock (Matt Brost)
> v3: Move ofence alloc before job submission (Matt)
> Assert, don't fail, from dma_fence_add_callback (Matt)
> Additional dma_fence_get for dma_fence_wait (Matt)
> Change dma_fence_wait to non-interruptible (Matt)
>
> Suggested-by: Matthew Brost <matthew.brost at intel.com>
> Reviewed-by: Jonathan Cavitt <jonathan.cavitt at intel.com>
> Signed-off-by: Ashutosh Dixit <ashutosh.dixit at intel.com>
Reviewed-by: Matthew Brost <matthew.brost at intel.com>
> ---
> drivers/gpu/drm/xe/xe_oa.c | 101 ++++++++++++++++++++++++++-----
> drivers/gpu/drm/xe/xe_oa_types.h | 3 +
> 2 files changed, 90 insertions(+), 14 deletions(-)
>
> diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c
> index cad8f54500a10..0c55c1b577f70 100644
> --- a/drivers/gpu/drm/xe/xe_oa.c
> +++ b/drivers/gpu/drm/xe/xe_oa.c
> @@ -100,6 +100,15 @@ struct xe_oa_config_bo {
> struct xe_bb *bb;
> };
>
> +struct xe_oa_fence {
> + /* @base: dma fence base */
> + struct dma_fence base;
> + /* @work: work to signal @base */
> + struct delayed_work work;
> + /* @cb: callback to schedule @work */
> + struct dma_fence_cb cb;
> +};
> +
> #define DRM_FMT(x) DRM_XE_OA_FMT_TYPE_##x
>
> static const struct xe_oa_format oa_formats[] = {
> @@ -945,40 +954,103 @@ xe_oa_alloc_config_buffer(struct xe_oa_stream *stream, struct xe_oa_config *oa_c
> return oa_bo;
> }
>
> +static void xe_oa_fence_work_fn(struct work_struct *w)
> +{
> + struct xe_oa_fence *ofence = container_of(w, typeof(*ofence), work.work);
> +
> + /* Signal fence to indicate new OA configuration is active */
> + dma_fence_signal(&ofence->base);
> + dma_fence_put(&ofence->base);
> +}
> +
> +static void xe_oa_config_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
> +{
> + /* Additional empirical delay needed for NOA programming after registers are written */
> +#define NOA_PROGRAM_ADDITIONAL_DELAY_US 500
> +
> + struct xe_oa_fence *ofence = container_of(cb, typeof(*ofence), cb);
> +
> + INIT_DELAYED_WORK(&ofence->work, xe_oa_fence_work_fn);
> + queue_delayed_work(system_unbound_wq, &ofence->work,
> + usecs_to_jiffies(NOA_PROGRAM_ADDITIONAL_DELAY_US));
> + dma_fence_put(fence);
> +}
> +
> +static const char *xe_oa_get_driver_name(struct dma_fence *fence)
> +{
> + return "xe_oa";
> +}
> +
> +static const char *xe_oa_get_timeline_name(struct dma_fence *fence)
> +{
> + return "unbound";
> +}
> +
> +static const struct dma_fence_ops xe_oa_fence_ops = {
> + .get_driver_name = xe_oa_get_driver_name,
> + .get_timeline_name = xe_oa_get_timeline_name,
> +};
> +
> static int xe_oa_emit_oa_config(struct xe_oa_stream *stream, struct xe_oa_config *config)
> {
> #define NOA_PROGRAM_ADDITIONAL_DELAY_US 500
> struct xe_oa_config_bo *oa_bo;
> - int err = 0, us = NOA_PROGRAM_ADDITIONAL_DELAY_US;
> + struct xe_oa_fence *ofence;
> + int i, err, num_signal = 0;
> struct dma_fence *fence;
> - long timeout;
>
> - /* Emit OA configuration batch */
> + ofence = kzalloc(sizeof(*ofence), GFP_KERNEL);
> + if (!ofence) {
> + err = -ENOMEM;
> + goto exit;
> + }
> +
> oa_bo = xe_oa_alloc_config_buffer(stream, config);
> if (IS_ERR(oa_bo)) {
> err = PTR_ERR(oa_bo);
> goto exit;
> }
>
> + /* Emit OA configuration batch */
> fence = xe_oa_submit_bb(stream, XE_OA_SUBMIT_ADD_DEPS, oa_bo->bb);
> if (IS_ERR(fence)) {
> err = PTR_ERR(fence);
> goto exit;
> }
>
> - /* Wait till all previous batches have executed */
> - timeout = dma_fence_wait_timeout(fence, false, 5 * HZ);
> - dma_fence_put(fence);
> - if (timeout < 0)
> - err = timeout;
> - else if (!timeout)
> - err = -ETIME;
> - if (err)
> - drm_dbg(&stream->oa->xe->drm, "dma_fence_wait_timeout err %d\n", err);
> + /* Point of no return: initialize and set fence to signal */
> + dma_fence_init(&ofence->base, &xe_oa_fence_ops, &stream->oa_fence_lock, 0, 0);
>
> - /* Additional empirical delay needed for NOA programming after registers are written */
> - usleep_range(us, 2 * us);
> + for (i = 0; i < stream->num_syncs; i++) {
> + if (stream->syncs[i].flags & DRM_XE_SYNC_FLAG_SIGNAL)
> + num_signal++;
> + xe_sync_entry_signal(&stream->syncs[i], &ofence->base);
> + }
> +
> + /* Additional dma_fence_get in case we dma_fence_wait */
> + if (!num_signal)
> + dma_fence_get(&ofence->base);
> +
> + /* Add job fence callback to schedule work to signal ofence->base */
> + err = dma_fence_add_callback(fence, &ofence->cb, xe_oa_config_cb);
> + xe_gt_assert(stream->gt, !err || err == -ENOENT);
> + if (err == -ENOENT)
> + xe_oa_config_cb(fence, &ofence->cb);
> +
> + /* If nothing needs to be signaled we wait synchronously */
> + if (!num_signal) {
> + dma_fence_wait(&ofence->base, false);
> + dma_fence_put(&ofence->base);
> + }
> +
> + /* Done with syncs */
> + for (i = 0; i < stream->num_syncs; i++)
> + xe_sync_entry_cleanup(&stream->syncs[i]);
> + kfree(stream->syncs);
> +
> + return 0;
> exit:
> + kfree(ofence);
> return err;
> }
>
> @@ -1480,6 +1552,7 @@ static int xe_oa_stream_init(struct xe_oa_stream *stream,
> goto err_free_oa_buf;
> }
>
> + spin_lock_init(&stream->oa_fence_lock);
> ret = xe_oa_enable_metric_set(stream);
> if (ret) {
> drm_dbg(&stream->oa->xe->drm, "Unable to enable metric set\n");
> diff --git a/drivers/gpu/drm/xe/xe_oa_types.h b/drivers/gpu/drm/xe/xe_oa_types.h
> index c1ca960af9305..412f1460c1437 100644
> --- a/drivers/gpu/drm/xe/xe_oa_types.h
> +++ b/drivers/gpu/drm/xe/xe_oa_types.h
> @@ -239,6 +239,9 @@ struct xe_oa_stream {
> /** @no_preempt: Whether preemption and timeslicing is disabled for stream exec_q */
> u32 no_preempt;
>
> + /** @oa_fence_lock: Lock for struct xe_oa_fence */
> + spinlock_t oa_fence_lock;
> +
> /** @num_syncs: size of @syncs array */
> u32 num_syncs;
>
> --
> 2.41.0
>
More information about the Intel-xe
mailing list