[PATCH] drm/xe: Convert to USM lock to rwsem

Matthew Auld matthew.auld at intel.com
Wed Sep 18 07:56:26 UTC 2024


On 18/09/2024 06:44, Matthew Brost wrote:
> Remove contention from GPU fault path for ASID->VM lookup.
> 
> Signed-off-by: Matthew Brost <matthew.brost at intel.com>
> ---
>   drivers/gpu/drm/xe/xe_device.c       | 4 +---
>   drivers/gpu/drm/xe/xe_device_types.h | 2 +-
>   drivers/gpu/drm/xe/xe_gt_pagefault.c | 8 ++++----
>   drivers/gpu/drm/xe/xe_vm.c           | 8 ++++----
>   4 files changed, 10 insertions(+), 12 deletions(-)
> 
> diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
> index 4d3c794f134c..38eade07a004 100644
> --- a/drivers/gpu/drm/xe/xe_device.c
> +++ b/drivers/gpu/drm/xe/xe_device.c
> @@ -335,9 +335,7 @@ struct xe_device *xe_device_create(struct pci_dev *pdev,
>   
>   	init_waitqueue_head(&xe->ufence_wq);
>   
> -	err = drmm_mutex_init(&xe->drm, &xe->usm.lock);
> -	if (err)
> -		goto err;
> +	init_rwsem(&xe->usm.lock);
>   
>   	xa_init_flags(&xe->usm.asid_to_vm, XA_FLAGS_ALLOC);
>   
> diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h
> index c92df0a2423f..4507a5756a05 100644
> --- a/drivers/gpu/drm/xe/xe_device_types.h
> +++ b/drivers/gpu/drm/xe/xe_device_types.h
> @@ -395,7 +395,7 @@ struct xe_device {
>   		/** @usm.next_asid: next ASID, used to cyclical alloc asids */
>   		u32 next_asid;
>   		/** @usm.lock: protects UM state */
> -		struct mutex lock;
> +		struct rw_semaphore lock;
>   	} usm;
>   
>   	/** @pinned: pinned BO state */
> diff --git a/drivers/gpu/drm/xe/xe_gt_pagefault.c b/drivers/gpu/drm/xe/xe_gt_pagefault.c
> index 00af059a8971..5c3af2bb5402 100644
> --- a/drivers/gpu/drm/xe/xe_gt_pagefault.c
> +++ b/drivers/gpu/drm/xe/xe_gt_pagefault.c
> @@ -198,13 +198,13 @@ static int handle_pagefault(struct xe_gt *gt, struct pagefault *pf)
>   		return -EFAULT;
>   
>   	/* ASID to VM */
> -	mutex_lock(&xe->usm.lock);
> +	down_read(&xe->usm.lock);
>   	vm = xa_load(&xe->usm.asid_to_vm, pf->asid);
>   	if (vm && xe_vm_in_fault_mode(vm))
>   		xe_vm_get(vm);
>   	else
>   		vm = NULL;
> -	mutex_unlock(&xe->usm.lock);
> +	up_read(&xe->usm.lock);
>   	if (!vm)
>   		return -EINVAL;
>   
> @@ -549,11 +549,11 @@ static int handle_acc(struct xe_gt *gt, struct acc *acc)
>   		return -EINVAL;
>   
>   	/* ASID to VM */
> -	mutex_lock(&xe->usm.lock);
> +	down_read(&xe->usm.lock);
>   	vm = xa_load(&xe->usm.asid_to_vm, acc->asid);
>   	if (vm)
>   		xe_vm_get(vm);
> -	mutex_unlock(&xe->usm.lock);
> +	up_read(&xe->usm.lock);
>   	if (!vm || !xe_vm_in_fault_mode(vm))
>   		return -EINVAL;

Looks like we potentially leak the vm here. Could maybe make this the 
same as above? Maybe even a small helper.

Anyway,
Reviewed-by: Matthew Auld <matthew.auld at intel.com>


>   
> diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
> index 7acd5fc9d032..a3d7cb7cfd22 100644
> --- a/drivers/gpu/drm/xe/xe_vm.c
> +++ b/drivers/gpu/drm/xe/xe_vm.c
> @@ -1613,7 +1613,7 @@ void xe_vm_close_and_put(struct xe_vm *vm)
>   
>   	up_write(&vm->lock);
>   
> -	mutex_lock(&xe->usm.lock);
> +	down_write(&xe->usm.lock);
>   	if (vm->usm.asid) {
>   		void *lookup;
>   
> @@ -1623,7 +1623,7 @@ void xe_vm_close_and_put(struct xe_vm *vm)
>   		lookup = xa_erase(&xe->usm.asid_to_vm, vm->usm.asid);
>   		xe_assert(xe, lookup == vm);
>   	}
> -	mutex_unlock(&xe->usm.lock);
> +	up_write(&xe->usm.lock);
>   
>   	for_each_tile(tile, xe, id)
>   		xe_range_fence_tree_fini(&vm->rftree[id]);
> @@ -1772,11 +1772,11 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data,
>   		goto err_close_and_put;
>   
>   	if (xe->info.has_asid) {
> -		mutex_lock(&xe->usm.lock);
> +		down_write(&xe->usm.lock);
>   		err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, vm,
>   				      XA_LIMIT(1, XE_MAX_ASID - 1),
>   				      &xe->usm.next_asid, GFP_KERNEL);
> -		mutex_unlock(&xe->usm.lock);
> +		up_write(&xe->usm.lock);
>   		if (err < 0)
>   			goto err_free_id;
>   


More information about the Intel-xe mailing list