[PATCH 5/5] drm/amdgpu: Acquire BO locks only during BO processing

Christian König christian.koenig at amd.com
Fri May 2 13:07:56 UTC 2025


On 4/9/25 07:48, Arunpravin Paneer Selvam wrote:
> Fix lockdep warnings.
> 
> [  +0.000024] WARNING: CPU: 10 PID: 1909 at drivers/gpu/drm/drm_syncobj.c:456 drm_syncobj_find_fence+0x58c/0x6e0
> [  +0.000519] CPU: 10 UID: 1000 PID: 1909 Comm: gnome-shel:cs0 Tainted: G        W  OE      6.12.0+ #18
> [  +0.000008] Tainted: [W]=WARN, [O]=OOT_MODULE, [E]=UNSIGNED_MODULE
> [  +0.000004] Hardware name: ASUS System Product Name/TUF GAMING B650-PLUS, BIOS 3072 12/20/2024
> [  +0.000004] RIP: 0010:drm_syncobj_find_fence+0x58c/0x6e0
> [  +0.000006] RSP: 0018:ffff88846d9ef680 EFLAGS: 00010202
> [  +0.000008] RAX: 0000000000000000 RBX: 0000000000001388 RCX: 0000000000000001
> [  +0.000004] RDX: 1ffff1108f5ad1da RSI: 0000000000000001 RDI: ffff88847ad68ed0
> [  +0.000005] RBP: ffff88846d9ef770 R08: 0000000000000000 R09: 0000000000000000
> [  +0.000004] R10: 0000000000000000 R11: 0000000000000000 R12: ffff88847ad68000
> [  +0.000004] R13: 0000000000000002 R14: ffff888149353d00 R15: 000000000000000f
> [  +0.000005] FS:  00007269977fe6c0(0000) GS:ffff888409500000(0000) knlGS:0000000000000000
> [  +0.000005] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
> [  +0.000004] CR2: 0000643a866b50d0 CR3: 0000000469eb2000 CR4: 0000000000f50ef0
> [  +0.000005] PKRU: 55555554
> [  +0.000004] Call Trace:
> [  +0.000004]  <TASK>
> [  +0.000005]  ? show_regs+0x6c/0x80
> [  +0.000010]  ? __warn+0xd2/0x2d0
> [  +0.000008]  ? drm_syncobj_find_fence+0x58c/0x6e0
> [  +0.000009]  ? report_bug+0x282/0x2f0
> [  +0.000014]  ? handle_bug+0x6e/0xc0
> [  +0.000008]  ? exc_invalid_op+0x18/0x50
> [  +0.000007]  ? asm_exc_invalid_op+0x1b/0x20
> [  +0.000020]  ? drm_syncobj_find_fence+0x58c/0x6e0
> [  +0.000012]  ? __pfx_drm_syncobj_find_fence+0x10/0x10
> [  +0.000012]  ? srso_alias_return_thunk+0x5/0xfbef5
> [  +0.000006]  ? lock_is_held_type+0xa3/0x130
> [  +0.000016]  amdgpu_userq_wait_ioctl+0x92d/0x2200 [amdgpu]
> [  +0.000257]  ? amdgpu_userq_wait_ioctl+0x92d/0x2200 [amdgpu]
> [  +0.000180]  ? srso_alias_return_thunk+0x5/0xfbef5
> [  +0.000006]  ? __lock_acquire+0x1b19/0x69c0
> [  +0.000022]  ? __pfx_amdgpu_userq_wait_ioctl+0x10/0x10 [amdgpu]
> [  +0.000179]  ? srso_alias_return_thunk+0x5/0xfbef5
> [  +0.000006]  ? __kasan_check_read+0x11/0x20
> [  +0.000007]  ? srso_alias_return_thunk+0x5/0xfbef5
> [  +0.000005]  ? mark_lock+0xfd/0x17c0
> [  +0.000012]  ? srso_alias_return_thunk+0x5/0xfbef5
> [  +0.000005]  ? lock_acquire.part.0+0x116/0x360
> [  +0.000006]  ? drm_dev_enter+0x51/0x190
> [  +0.000008]  ? __pfx___lock_acquire+0x10/0x10
> [  +0.000008]  ? srso_alias_return_thunk+0x5/0xfbef5
> [  +0.000005]  ? find_held_lock+0x36/0x140
> [  +0.000021]  ? srso_alias_return_thunk+0x5/0xfbef5
> [  +0.000005]  ? lock_acquire+0x7c/0xc0
> [  +0.000005]  ? drm_dev_enter+0x51/0x190
> [  +0.000027]  drm_ioctl_kernel+0x18b/0x330
> [  +0.000008]  ? __pfx_amdgpu_userq_wait_ioctl+0x10/0x10 [amdgpu]
> [  +0.000181]  ? __pfx_drm_ioctl_kernel+0x10/0x10
> [  +0.000005]  ? lock_acquire+0x7c/0xc0
> [  +0.000009]  ? srso_alias_return_thunk+0x5/0xfbef5
> [  +0.000005]  ? __kasan_check_write+0x14/0x30
> [  +0.000005]  ? srso_alias_return_thunk+0x5/0xfbef5
> [  +0.000010]  drm_ioctl+0x589/0xd00
> [  +0.000006]  ? srso_alias_return_thunk+0x5/0xfbef5
> [  +0.000006]  ? __pfx_amdgpu_userq_wait_ioctl+0x10/0x10 [amdgpu]
> [  +0.000187]  ? __pfx_drm_ioctl+0x10/0x10
> [  +0.000007]  ? __pm_runtime_resume+0x80/0x110
> [  +0.000021]  ? srso_alias_return_thunk+0x5/0xfbef5
> [  +0.000005]  ? trace_hardirqs_on+0x53/0x60
> [  +0.000007]  ? srso_alias_return_thunk+0x5/0xfbef5
> [  +0.000005]  ? _raw_spin_unlock_irqrestore+0x51/0x80
> [  +0.000013]  amdgpu_drm_ioctl+0xd2/0x1c0 [amdgpu]
> [  +0.000182]  __x64_sys_ioctl+0x13a/0x1c0
> [  +0.000012]  x64_sys_call+0x11ad/0x25f0
> [  +0.000007]  do_syscall_64+0x91/0x180
> [  +0.000007]  ? srso_alias_return_thunk+0x5/0xfbef5
> [  +0.000005]  ? do_syscall_64+0x9d/0x180
> [  +0.000005]  ? syscall_exit_to_user_mode+0x95/0x260
> [  +0.000008]  ? srso_alias_return_thunk+0x5/0xfbef5
> [  +0.000005]  ? do_syscall_64+0x9d/0x180
> [  +0.000009]  ? __pfx___do_sys_prctl+0x10/0x10
> [  +0.000012]  ? srso_alias_return_thunk+0x5/0xfbef5
> [  +0.000009]  ? srso_alias_return_thunk+0x5/0xfbef5
> [  +0.000007]  ? srso_alias_return_thunk+0x5/0xfbef5
> [  +0.000005]  ? syscall_exit_to_user_mode+0x95/0x260
> [  +0.000008]  ? srso_alias_return_thunk+0x5/0xfbef5
> [  +0.000005]  ? do_syscall_64+0x9d/0x180
> [  +0.000009]  ? srso_alias_return_thunk+0x5/0xfbef5
> [  +0.000009]  ? srso_alias_return_thunk+0x5/0xfbef5
> [  +0.000007]  ? srso_alias_return_thunk+0x5/0xfbef5
> [  +0.000005]  ? irqentry_exit_to_user_mode+0x8b/0x260
> [  +0.000007]  ? srso_alias_return_thunk+0x5/0xfbef5
> [  +0.000005]  ? irqentry_exit+0x77/0xb0
> [  +0.000005]  ? srso_alias_return_thunk+0x5/0xfbef5
> [  +0.000005]  ? exc_page_fault+0x93/0x150
> [  +0.000009]  entry_SYSCALL_64_after_hwframe+0x76/0x7e
> [  +0.000005] RIP: 0033:0x7269b0324ded
> [  +0.000006] Code: 04 25 28 00 00 00 48 89 45 c8 31 c0 48 8d 45 10 c7 45 b0 10 00 00 00 48 89 45 b8 48 8d 45 d0 48 89 45 c0 b8 10 00 00 00 0f 05 <89> c2 3d 00 f0 ff ff 77 1a 48 8b 45 c8 64 48 2b 04 25 28 00 00 00
> [  +0.000005] RSP: 002b:00007269977fc9b0 EFLAGS: 00000246 ORIG_RAX: 0000000000000010
> [  +0.000008] RAX: ffffffffffffffda RBX: 00007269977fcb00 RCX: 00007269b0324ded
> [  +0.000004] RDX: 00007269977fcb00 RSI: 00000000c0406458 RDI: 000000000000000d
> [  +0.000004] RBP: 00007269977fca00 R08: 00007269977fcc1c R09: 0000000000000000
> [  +0.000005] R10: 000000000000000f R11: 0000000000000246 R12: 000000000000000d
> [  +0.000004] R13: 00005bce7c309a50 R14: 00007269977fca30 R15: 0000000000000000
> [  +0.000021]  </TASK>
> [  +0.000005] irq event stamp: 1359
> [  +0.000004] hardirqs last  enabled at (1365): [<ffffffffaa8693a9>] __up_console_sem+0x79/0xa0
> [  +0.000007] hardirqs last disabled at (1370): [<ffffffffaa86938e>] __up_console_sem+0x5e/0xa0
> [  +0.000005] softirqs last  enabled at (756): [<ffffffffaa67377e>] __irq_exit_rcu+0x17e/0x1d0
> [  +0.000006] softirqs last disabled at (749): [<ffffffffaa67377e>] __irq_exit_rcu+0x17e/0x1d0
> [  +0.000005] ---[ end trace 0000000000000000 ]---
> 
> Signed-off-by: Arunpravin Paneer Selvam <Arunpravin.PaneerSelvam at amd.com>
> ---
>  .../gpu/drm/amd/amdgpu/amdgpu_userq_fence.c   | 116 +++++++++++-------
>  1 file changed, 69 insertions(+), 47 deletions(-)
> 
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
> index d5b35b5df527..c5de39a8ff98 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
> @@ -395,6 +395,43 @@ static void amdgpu_userq_fence_cleanup(struct dma_fence *fence)
>  	dma_fence_put(fence);
>  }
>  
> +static int amdgpu_userq_exec_lock(struct drm_exec *exec, u32 flags,
> +				  struct drm_gem_object **rgobj,
> +				  struct drm_gem_object **wgobj,
> +				  u32 num_read_handles,
> +				  u32 num_write_handles,
> +				  unsigned int num_fences)
num_fences should always be 1 if I'm not completely mistaken.

> +{
> +	int r;
> +
> +	if (!exec | !rgobj | !wgobj)
> +		return -EINVAL;
> +
> +	drm_exec_init(exec, flags,
> +		      (num_read_handles + num_write_handles));
> +
> +	/* Lock all BOs with retry handling */
> +	drm_exec_until_all_locked(exec) {
> +		r = drm_exec_prepare_array(exec, rgobj, num_read_handles, num_fences);
> +		drm_exec_retry_on_contention(exec);
> +		if (r)
> +			drm_exec_fini(exec);

Don't call drm_exec_fini() here. Rather just return the error.

> +
> +		r = drm_exec_prepare_array(exec, wgobj, num_write_handles, num_fences);
> +		drm_exec_retry_on_contention(exec);
> +		if (r)
> +			drm_exec_fini(exec);
Same here.

> +	}
> +
> +	return r;
> +}
> +
> +static void amdgpu_userq_exec_unlock(struct drm_exec *exec)
> +{
> +	if (exec)
> +		drm_exec_fini(exec);
> +}
> +
That looks superflous. Just call drm_exec_fini() in the caller.

Regards,
Christian.

>  int amdgpu_userq_signal_ioctl(struct drm_device *dev, void *data,
>  			      struct drm_file *filp)
>  {
> @@ -511,24 +548,14 @@ int amdgpu_userq_signal_ioctl(struct drm_device *dev, void *data,
>  	queue->last_fence = dma_fence_get(fence);
>  	mutex_unlock(&userq_mgr->userq_mutex);
>  
> -	drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT,
> -		      (num_read_bo_handles + num_write_bo_handles));
> -
> -	/* Lock all BOs with retry handling */
> -	drm_exec_until_all_locked(&exec) {
> -		r = drm_exec_prepare_array(&exec, gobj_read, num_read_bo_handles, 1);
> -		drm_exec_retry_on_contention(&exec);
> -		if (r) {
> -			amdgpu_userq_fence_cleanup(fence);
> -			goto exec_fini;
> -		}
> -
> -		r = drm_exec_prepare_array(&exec, gobj_write, num_write_bo_handles, 1);
> -		drm_exec_retry_on_contention(&exec);
> -		if (r) {
> -			amdgpu_userq_fence_cleanup(fence);
> -			goto exec_fini;
> -		}
> +	r = amdgpu_userq_exec_lock(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT,
> +				   gobj_read, gobj_write,
> +				   num_read_bo_handles,
> +				   num_write_bo_handles,
> +				   1);
> +	if (r) {
> +		amdgpu_userq_fence_cleanup(fence);
> +		goto put_gobj_write;
>  	}
>  
>  	for (i = 0; i < num_read_bo_handles; i++) {
> @@ -546,6 +573,7 @@ int amdgpu_userq_signal_ioctl(struct drm_device *dev, void *data,
>  		dma_resv_add_fence(gobj_write[i]->resv, fence,
>  				   DMA_RESV_USAGE_WRITE);
>  	}
> +	amdgpu_userq_exec_unlock(&exec);
>  
>  	/* Add the created fence to syncobj/BO's */
>  	for (i = 0; i < num_syncobj_handles; i++)
> @@ -554,8 +582,6 @@ int amdgpu_userq_signal_ioctl(struct drm_device *dev, void *data,
>  	/* drop the reference acquired in fence creation function */
>  	dma_fence_put(fence);
>  
> -exec_fini:
> -	drm_exec_fini(&exec);
>  put_gobj_write:
>  	while (wentry-- > 0)
>  		drm_gem_object_put(gobj_write[wentry]);
> @@ -666,26 +692,6 @@ int amdgpu_userq_wait_ioctl(struct drm_device *dev, void *data,
>  		}
>  	}
>  
> -	drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT,
> -		      (num_read_bo_handles + num_write_bo_handles));
> -
> -	/* Lock all BOs with retry handling */
> -	drm_exec_until_all_locked(&exec) {
> -		r = drm_exec_prepare_array(&exec, gobj_read, num_read_bo_handles, 1);
> -		drm_exec_retry_on_contention(&exec);
> -		if (r) {
> -			drm_exec_fini(&exec);
> -			goto put_gobj_write;
> -		}
> -
> -		r = drm_exec_prepare_array(&exec, gobj_write, num_write_bo_handles, 1);
> -		drm_exec_retry_on_contention(&exec);
> -		if (r) {
> -			drm_exec_fini(&exec);
> -			goto put_gobj_write;
> -		}
> -	}
> -
>  	if (!wait_info->num_fences) {
>  		if (num_points) {
>  			struct dma_fence_unwrap iter;
> @@ -698,7 +704,7 @@ int amdgpu_userq_wait_ioctl(struct drm_device *dev, void *data,
>  							   DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
>  							   &fence);
>  				if (r)
> -					goto exec_fini;
> +					goto put_gobj_write;
>  
>  				dma_fence_unwrap_for_each(f, &iter, fence)
>  					num_fences++;
> @@ -716,12 +722,20 @@ int amdgpu_userq_wait_ioctl(struct drm_device *dev, void *data,
>  						   DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
>  						   &fence);
>  			if (r)
> -				goto exec_fini;
> +				goto put_gobj_write;
>  
>  			num_fences++;
>  			dma_fence_put(fence);
>  		}
>  
> +		r = amdgpu_userq_exec_lock(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT,
> +					   gobj_read, gobj_write,
> +					   num_read_bo_handles,
> +					   num_write_bo_handles,
> +					   1);
> +		if (r)
> +			goto put_gobj_write;
> +
>  		/* Count GEM objects fence */
>  		for (i = 0; i < num_read_bo_handles; i++) {
>  			struct dma_resv_iter resv_cursor;
> @@ -740,7 +754,7 @@ int amdgpu_userq_wait_ioctl(struct drm_device *dev, void *data,
>  						DMA_RESV_USAGE_WRITE, fence)
>  				num_fences++;
>  		}
> -
> +		amdgpu_userq_exec_unlock(&exec);
>  		/*
>  		 * Passing num_fences = 0 means that userspace doesn't want to
>  		 * retrieve userq_fence_info. If num_fences = 0 we skip filling
> @@ -753,7 +767,7 @@ int amdgpu_userq_wait_ioctl(struct drm_device *dev, void *data,
>  		fence_info = kmalloc_array(wait_info->num_fences, sizeof(*fence_info), GFP_KERNEL);
>  		if (!fence_info) {
>  			r = -ENOMEM;
> -			goto exec_fini;
> +			goto put_gobj_write;
>  		}
>  
>  		/* Array of fences */
> @@ -763,6 +777,14 @@ int amdgpu_userq_wait_ioctl(struct drm_device *dev, void *data,
>  			goto free_fence_info;
>  		}
>  
> +		r = amdgpu_userq_exec_lock(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT,
> +					   gobj_read, gobj_write,
> +					   num_read_bo_handles,
> +					   num_write_bo_handles,
> +					   1);
> +		if (r)
> +			goto free_fences;
> +
>  		/* Retrieve GEM read objects fence */
>  		for (i = 0; i < num_read_bo_handles; i++) {
>  			struct dma_resv_iter resv_cursor;
> @@ -772,6 +794,7 @@ int amdgpu_userq_wait_ioctl(struct drm_device *dev, void *data,
>  						DMA_RESV_USAGE_READ, fence) {
>  				if (WARN_ON_ONCE(num_fences >= wait_info->num_fences)) {
>  					r = -EINVAL;
> +					amdgpu_userq_exec_unlock(&exec);
>  					goto free_fences;
>  				}
>  
> @@ -789,6 +812,7 @@ int amdgpu_userq_wait_ioctl(struct drm_device *dev, void *data,
>  						DMA_RESV_USAGE_WRITE, fence) {
>  				if (WARN_ON_ONCE(num_fences >= wait_info->num_fences)) {
>  					r = -EINVAL;
> +					amdgpu_userq_exec_unlock(&exec);
>  					goto free_fences;
>  				}
>  
> @@ -796,6 +820,7 @@ int amdgpu_userq_wait_ioctl(struct drm_device *dev, void *data,
>  				dma_fence_get(fence);
>  			}
>  		}
> +		amdgpu_userq_exec_unlock(&exec);
>  
>  		if (num_points) {
>  			struct dma_fence_unwrap iter;
> @@ -901,7 +926,6 @@ int amdgpu_userq_wait_ioctl(struct drm_device *dev, void *data,
>  		kfree(fence_info);
>  	}
>  
> -	drm_exec_fini(&exec);
>  	for (i = 0; i < num_read_bo_handles; i++)
>  		drm_gem_object_put(gobj_read[i]);
>  	kfree(gobj_read);
> @@ -924,8 +948,6 @@ int amdgpu_userq_wait_ioctl(struct drm_device *dev, void *data,
>  	kfree(fences);
>  free_fence_info:
>  	kfree(fence_info);
> -exec_fini:
> -	drm_exec_fini(&exec);
>  put_gobj_write:
>  	while (wentry-- > 0)
>  		drm_gem_object_put(gobj_write[wentry]);



More information about the amd-gfx mailing list