[PATCH 5/5] dma-buf/fence-chain: Wait on each tested chain link

Janusz Krzysztofik janusz.krzysztofik at linux.intel.com
Tue Aug 12 08:56:06 UTC 2025


Userspace may build dma_fence chains of arbitrary length step by step via
drm_syncobj IOCTLs, and each step may start waiting on a chain link it
has added.

Adjust the wait_* selftests to cover such extreme use cases.

Signed-off-by: Janusz Krzysztofik <janusz.krzysztofik at linux.intel.com>
---
 drivers/dma-buf/st-dma-fence-chain.c | 176 +++++++++++++++++++++------
 1 file changed, 140 insertions(+), 36 deletions(-)

diff --git a/drivers/dma-buf/st-dma-fence-chain.c b/drivers/dma-buf/st-dma-fence-chain.c
index b431acd4db04b..27b5241fc9403 100644
--- a/drivers/dma-buf/st-dma-fence-chain.c
+++ b/drivers/dma-buf/st-dma-fence-chain.c
@@ -600,31 +600,50 @@ static int __wait_fence_chains(void *arg)
 static int wait_forward(void *arg)
 {
 	uint64_t (*seqno_fn)(unsigned int) = seqno_inc;
+	struct task_struct **tsk;
 	struct dma_fence *chain;
 	struct fence_chains fc;
-	struct task_struct *tsk;
+	uint64_t seqno;
 	ktime_t dt;
+	int i = 0;
 	int err;
-	int i;
 
 	err = fence_chains_init(&fc, CHAIN_SZ, seqno_fn);
 	if (err)
 		return err;
 
-	tsk = kthread_run(__wait_fence_chains, fc.tail, "dmabuf/wait");
-	if (IS_ERR(tsk)) {
-		err = PTR_ERR(tsk);
+	tsk = kmalloc_array(fc.chain_length, sizeof(*tsk), GFP_KERNEL);
+	if (!tsk) {
+		err = -ENOMEM;
 		goto err;
 	}
-	get_task_struct(tsk);
-	yield_to(tsk, true);
+
+	for (i = 0; i < fc.chain_length; i++) {
+		chain = dma_fence_get(fc.tail);
+		seqno = seqno_fn(i);
+		err = dma_fence_chain_find_seqno(&chain, seqno);
+		dma_fence_put(chain);
+		if (err) {
+			pr_err("Reported %d for dma_fence_chain_find_seqno(%llu)!\n", err, seqno);
+			goto err;
+		}
+
+		tsk[i] = kthread_run(__wait_fence_chains, chain, "dmabuf/wait-%llu", seqno);
+		if (IS_ERR(tsk[i])) {
+			err = PTR_ERR(tsk[i]);
+			pr_err("Reported %d for kthread_run(%llu)!\n", err, seqno);
+			goto err;
+		}
+		get_task_struct(tsk[i]);
+		yield_to(tsk[i], true);
+	}
 
 	dt = -ktime_get();
 	for (i = 0; i < fc.chain_length; i++) {
-		uint64_t seqno = seqno_fn(i);
 		int fence_err;
 
 		chain = dma_fence_get(fc.tail);
+		seqno = seqno_fn(i);
 		fence_err = dma_fence_chain_find_seqno(&chain, seqno);
 		dma_fence_put(chain);
 		if (fence_err) {
@@ -635,50 +654,101 @@ static int wait_forward(void *arg)
 			continue;
 		}
 
-		dma_fence_signal(dma_fence_chain_contained(chain));
+		fence_err = dma_fence_signal(dma_fence_chain_contained(chain));
+		if (fence_err) {
+			pr_err("Reported %d for dma_fence_signal(%llu)!\n", fence_err, seqno);
+			if (!err)
+				err = fence_err;
+		}
 	}
 	dt += ktime_get();
 
 	pr_info("%s: %d signals in %llu ns\n", __func__, fc.chain_length, ktime_to_ns(dt));
 
-	err = kthread_stop_put(tsk);
-
 err:
+	while (i--) {
+		int tsk_err = kthread_stop_put(tsk[i]);
+
+		if (tsk_err)
+			pr_err("Reported %d for kthread_stop_put(%llu)!\n", tsk_err, fc.fences[i]->seqno);
+
+		if (!err)
+			err = tsk_err;
+	}
+	kfree(tsk);
+
 	fence_chains_fini(&fc);
 	return err;
 }
 
 static int wait_backward(void *arg)
 {
+	uint64_t (*seqno_fn)(unsigned int) = seqno_inc;
+	struct task_struct **tsk;
 	struct dma_fence *chain;
 	struct fence_chains fc;
-	struct task_struct *tsk;
 	ktime_t dt;
+	int i = 0;
 	int err;
 
-	err = fence_chains_init(&fc, CHAIN_SZ, seqno_inc);
+	err = fence_chains_init(&fc, CHAIN_SZ, seqno_fn);
 	if (err)
 		return err;
 
-	tsk = kthread_run(__wait_fence_chains, fc.tail, "dmabuf/wait");
-	if (IS_ERR(tsk)) {
-		err = PTR_ERR(tsk);
+	tsk = kmalloc_array(fc.chain_length, sizeof(*tsk), GFP_KERNEL);
+	if (!tsk) {
+		err = -ENOMEM;
 		goto err;
 	}
-	get_task_struct(tsk);
-	yield_to(tsk, true);
+
+	for (i = 0; i < fc.chain_length; i++) {
+		uint64_t seqno = seqno_fn(i);
+
+		chain = dma_fence_get(fc.tail);
+		err = dma_fence_chain_find_seqno(&chain, seqno_fn(i));
+		dma_fence_put(chain);
+		if (err) {
+			pr_err("Reported %d for dma_fence_chain_find_seqno(%llu)!\n", err, seqno);
+			goto err;
+		}
+
+		tsk[i] = kthread_run(__wait_fence_chains, chain, "dmabuf/wait-%llu", seqno_fn(i));
+		if (IS_ERR(tsk[i])) {
+			err = PTR_ERR(tsk[i]);
+			pr_err("Reported %d for kthread_run(%llu)!\n", err, seqno);
+			goto err;
+		}
+		get_task_struct(tsk[i]);
+		yield_to(tsk[i], true);
+	}
 
 	dt = -ktime_get();
-	dma_fence_chain_for_each(chain, fc.tail)
-		dma_fence_signal(dma_fence_chain_contained(chain));
+	dma_fence_chain_for_each(chain, fc.tail) {
+		int fence_err = dma_fence_signal(dma_fence_chain_contained(chain));
+
+		if (fence_err) {
+			pr_err("Reported %d for dma_fence_signal(%llu)!\n", err, chain->seqno);
+			if (!err)
+				err = fence_err;
+		}
+	}
 	dma_fence_put(chain);
 	dt += ktime_get();
 
 	pr_info("%s: %d signals in %llu ns\n", __func__, fc.chain_length, ktime_to_ns(dt));
 
-	err = kthread_stop_put(tsk);
-
 err:
+	while (i--) {
+		int tsk_err = kthread_stop_put(tsk[i]);
+
+		if (tsk_err)
+			pr_err("Reported %d for kthread_stop_put(%llu)!\n", tsk_err, fc.fences[i]->seqno);
+
+		if (!err)
+			err = tsk_err;
+	}
+	kfree(tsk);
+
 	fence_chains_fini(&fc);
 	return err;
 }
@@ -701,33 +771,53 @@ static void randomise_fences(struct fence_chains *fc)
 
 static int wait_random(void *arg)
 {
+	uint64_t (*seqno_fn)(unsigned int) = seqno_inc;
+	struct task_struct **tsk;
 	struct dma_fence *chain;
 	struct fence_chains fc;
-	struct task_struct *tsk;
+	uint64_t seqno;
 	ktime_t dt;
+	int i = 0;
 	int err;
-	int i;
 
-	err = fence_chains_init(&fc, CHAIN_SZ, seqno_inc);
+	err = fence_chains_init(&fc, CHAIN_SZ, seqno_fn);
 	if (err)
 		return err;
 
-	randomise_fences(&fc);
-
-	tsk = kthread_run(__wait_fence_chains, fc.tail, "dmabuf/wait");
-	if (IS_ERR(tsk)) {
-		err = PTR_ERR(tsk);
+	tsk = kmalloc_array(fc.chain_length, sizeof(*tsk), GFP_KERNEL);
+	if (!tsk) {
+		err = -ENOMEM;
 		goto err;
 	}
-	get_task_struct(tsk);
-	yield_to(tsk, true);
+
+	for (i = 0; i < fc.chain_length; i++) {
+		chain = dma_fence_get(fc.tail);
+		seqno = seqno_fn(i);
+		err = dma_fence_chain_find_seqno(&chain, seqno);
+		dma_fence_put(chain);
+		if (err) {
+			pr_err("Reported %d for dma_fence_chain_find_seqno(%llu)!\n", err, seqno);
+			goto err;
+		}
+
+		tsk[i] = kthread_run(__wait_fence_chains, chain, "dmabuf/wait-%llu", seqno);
+		if (IS_ERR(tsk[i])) {
+			err = PTR_ERR(tsk[i]);
+			pr_err("Reported %d for kthread_run(%llu)!\n", err, seqno);
+			goto err;
+		}
+		get_task_struct(tsk[i]);
+		yield_to(tsk[i], true);
+	}
+
+	randomise_fences(&fc);
 
 	dt = -ktime_get();
 	for (i = 0; i < fc.chain_length; i++) {
-		uint64_t seqno = fc.chains[i]->seqno;
 		int fence_err;
 
 		chain = dma_fence_get(fc.tail);
+		seqno = fc.chains[i]->seqno;
 		fence_err = dma_fence_chain_find_seqno(&chain, seqno);
 		dma_fence_put(chain);
 		if (fence_err) {
@@ -738,15 +828,29 @@ static int wait_random(void *arg)
 			continue;
 		}
 
-		dma_fence_signal(dma_fence_chain_contained(chain));
+		fence_err = dma_fence_signal(dma_fence_chain_contained(chain));
+		if (fence_err) {
+			pr_err("Reported %d for dma_fence_signal(%llu)!\n", err, fc.fences[i]->seqno);
+			if (!err)
+				err = fence_err;
+		}
 	}
 	dt += ktime_get();
 
 	pr_info("%s: %d signals in %llu ns\n", __func__, fc.chain_length, ktime_to_ns(dt));
 
-	err = kthread_stop_put(tsk);
-
 err:
+	while (i--) {
+		int tsk_err = kthread_stop_put(tsk[i]);
+
+		if (tsk_err) {
+			pr_err("Reported %d for kthread_stop_put(%llu)!\n", tsk_err, fc.fences[i]->seqno);
+			if (!err)
+				err = tsk_err;
+		}
+	}
+	kfree(tsk);
+
 	fence_chains_fini(&fc);
 	return err;
 }
-- 
2.50.1



More information about the Intel-gfx-trybot mailing list