[PATCH 1/7] drm/xe/oa: Separate batch submission from waiting for completion

Umesh Nerlige Ramappa umesh.nerlige.ramappa at intel.com
Mon Sep 30 20:34:36 UTC 2024


On Wed, Sep 18, 2024 at 12:53:23PM -0700, Ashutosh Dixit wrote:
>When we introduce xe_syncs, we don't wait for internal OA programming
>batches to complete. That is, xe_syncs are signaled asynchronously. In
>anticipation for this, separate out batch submission from waiting for
>completion of those batches.
>
>v2: Change return type of xe_oa_submit_bb to "struct dma_fence *" (Matt B)
>v3: Retain init "int err = 0;" in xe_oa_submit_bb (Jose)
>
>Reviewed-by: Jonathan Cavitt <jonathan.cavitt at intel.com>
>Signed-off-by: Ashutosh Dixit <ashutosh.dixit at intel.com>
>---
> drivers/gpu/drm/xe/xe_oa.c | 57 +++++++++++++++++++++++++++++---------
> 1 file changed, 44 insertions(+), 13 deletions(-)
>
>diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c
>index 354ee9045efc8..52805e159e6fd 100644
>--- a/drivers/gpu/drm/xe/xe_oa.c
>+++ b/drivers/gpu/drm/xe/xe_oa.c
>@@ -570,11 +570,10 @@ static __poll_t xe_oa_poll(struct file *file, poll_table *wait)
> 	return ret;
> }
>
>-static int xe_oa_submit_bb(struct xe_oa_stream *stream, struct xe_bb *bb)
>+static struct dma_fence *xe_oa_submit_bb(struct xe_oa_stream *stream, struct xe_bb *bb)
> {
> 	struct xe_sched_job *job;
> 	struct dma_fence *fence;
>-	long timeout;
> 	int err = 0;
>
> 	/* Kernel configuration is issued on stream->k_exec_q, not stream->exec_q */
>@@ -588,14 +587,9 @@ static int xe_oa_submit_bb(struct xe_oa_stream *stream, struct xe_bb *bb)
> 	fence = dma_fence_get(&job->drm.s_fence->finished);
> 	xe_sched_job_push(job);
>
>-	timeout = dma_fence_wait_timeout(fence, false, HZ);
>-	dma_fence_put(fence);
>-	if (timeout < 0)
>-		err = timeout;
>-	else if (!timeout)
>-		err = -ETIME;
>+	return fence;
> exit:
>-	return err;
>+	return ERR_PTR(err);
> }
>
> static void write_cs_mi_lri(struct xe_bb *bb, const struct xe_oa_reg *reg_data, u32 n_regs)
>@@ -659,6 +653,7 @@ static void xe_oa_store_flex(struct xe_oa_stream *stream, struct xe_lrc *lrc,
> static int xe_oa_modify_ctx_image(struct xe_oa_stream *stream, struct xe_lrc *lrc,
> 				  const struct flex *flex, u32 count)
> {
>+	struct dma_fence *fence;
> 	struct xe_bb *bb;
> 	int err;
>
>@@ -670,7 +665,16 @@ static int xe_oa_modify_ctx_image(struct xe_oa_stream *stream, struct xe_lrc *lr
>
> 	xe_oa_store_flex(stream, lrc, bb, flex, count);
>
>-	err = xe_oa_submit_bb(stream, bb);
>+	fence = xe_oa_submit_bb(stream, bb);
>+	if (IS_ERR(fence)) {
>+		err = PTR_ERR(fence);
>+		goto free_bb;
>+	}
>+	xe_bb_free(bb, fence);
>+	dma_fence_put(fence);
>+
>+	return 0;
>+free_bb:
> 	xe_bb_free(bb, NULL);
> exit:
> 	return err;

Just a nit - if you initialize fence and err, then this will be as 
below.  Note that if fence is NULL, dma_fence_put doesn't do anything.

{
	struct dma_fence *fence = NULL;
	struct xe_bb *bb;
	int err = 0;
...
	fence = xe_oa_submit_bb(stream, bb);
	if (IS_ERR(fence))
		err = PTR_ERR(fence);

	xe_bb_free(bb, NULL);
	dma_fence_put(fence);
exit:
	return err;
}

otherwise, lgtm

Reviewed-by: Umesh Nerlige Ramappa <umesh.nerlige.ramappa at intel.com>

	

>@@ -678,6 +682,7 @@ static int xe_oa_modify_ctx_image(struct xe_oa_stream *stream, struct xe_lrc *lr
>
> static int xe_oa_load_with_lri(struct xe_oa_stream *stream, struct xe_oa_reg *reg_lri)
> {
>+	struct dma_fence *fence;
> 	struct xe_bb *bb;
> 	int err;
>
>@@ -689,7 +694,16 @@ static int xe_oa_load_with_lri(struct xe_oa_stream *stream, struct xe_oa_reg *re
>
> 	write_cs_mi_lri(bb, reg_lri, 1);
>
>-	err = xe_oa_submit_bb(stream, bb);
>+	fence = xe_oa_submit_bb(stream, bb);
>+	if (IS_ERR(fence)) {
>+		err = PTR_ERR(fence);
>+		goto free_bb;
>+	}
>+	xe_bb_free(bb, fence);
>+	dma_fence_put(fence);
>+
>+	return 0;
>+free_bb:
> 	xe_bb_free(bb, NULL);
> exit:
> 	return err;
>@@ -922,15 +936,32 @@ static int xe_oa_emit_oa_config(struct xe_oa_stream *stream, struct xe_oa_config
> {
> #define NOA_PROGRAM_ADDITIONAL_DELAY_US 500
> 	struct xe_oa_config_bo *oa_bo;
>-	int err, us = NOA_PROGRAM_ADDITIONAL_DELAY_US;
>+	int err = 0, us = NOA_PROGRAM_ADDITIONAL_DELAY_US;
>+	struct dma_fence *fence;
>+	long timeout;
>
>+	/* Emit OA configuration batch */
> 	oa_bo = xe_oa_alloc_config_buffer(stream, config);
> 	if (IS_ERR(oa_bo)) {
> 		err = PTR_ERR(oa_bo);
> 		goto exit;
> 	}
>
>-	err = xe_oa_submit_bb(stream, oa_bo->bb);
>+	fence = xe_oa_submit_bb(stream, oa_bo->bb);
>+	if (IS_ERR(fence)) {
>+		err = PTR_ERR(fence);
>+		goto exit;
>+	}
>+
>+	/* Wait till all previous batches have executed */
>+	timeout = dma_fence_wait_timeout(fence, false, 5 * HZ);
>+	dma_fence_put(fence);
>+	if (timeout < 0)
>+		err = timeout;
>+	else if (!timeout)
>+		err = -ETIME;
>+	if (err)
>+		drm_dbg(&stream->oa->xe->drm, "dma_fence_wait_timeout err %d\n", err);
>
> 	/* Additional empirical delay needed for NOA programming after registers are written */
> 	usleep_range(us, 2 * us);
>-- 
>2.41.0
>


More information about the Intel-xe mailing list