[PATCH] drm/xe: Skip VMAs pin when requesting signal to the last XE_EXEC

Matthew Brost matthew.brost at intel.com
Wed Mar 13 17:34:45 UTC 2024


On Wed, Mar 13, 2024 at 10:13:18AM -0700, José Roberto de Souza wrote:
> Doing a XE_EXEC with num_batch_buffer == 0 makes signals passed as
> argument to be signaled when the last real XE_EXEC is completed.
> But to do that it was first pinning all VMAs in drm_gpuvm_exec_lock(),
> this patch remove this pinning as it is not required.
> 
> This change also help Mesa implementing memory over-commiting recovery
> as it needs to unbind not needed VMAs when the whole VM can't fit
> in GPU memory but it can only do the unbiding when the last XE_EXEC
> is completed.
> So with this change Mesa can get the signal it want without getting
> out-of-memory errors.
> 
> Cc: Thomas Hellstrom <thomas.hellstrom at linux.intel.com>
> Co-developed-by: Matthew Brost <matthew.brost at intel.com>
> Signed-off-by: Matthew Brost <matthew.brost at intel.com>
> Signed-off-by: José Roberto de Souza <jose.souza at intel.com>

Reviewed-by: Matthew Brost <matthew.brost at intel.com>

> ---
>  drivers/gpu/drm/xe/xe_exec.c | 41 ++++++++++++++++++++----------------
>  1 file changed, 23 insertions(+), 18 deletions(-)
> 
> diff --git a/drivers/gpu/drm/xe/xe_exec.c b/drivers/gpu/drm/xe/xe_exec.c
> index 952496c6260df..826c8b3896725 100644
> --- a/drivers/gpu/drm/xe/xe_exec.c
> +++ b/drivers/gpu/drm/xe/xe_exec.c
> @@ -235,6 +235,29 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
>  			goto err_unlock_list;
>  	}
>  
> +	if (!args->num_batch_buffer) {
> +		err = xe_vm_lock(vm, true);
> +		if (err)
> +			goto err_unlock_list;
> +
> +		if (!xe_vm_in_lr_mode(vm)) {
> +			struct dma_fence *fence;
> +
> +			fence = xe_sync_in_fence_get(syncs, num_syncs, q, vm);
> +			if (IS_ERR(fence)) {
> +				err = PTR_ERR(fence);
> +				goto err_unlock_list;
> +			}
> +			for (i = 0; i < num_syncs; i++)
> +				xe_sync_entry_signal(&syncs[i], NULL, fence);
> +			xe_exec_queue_last_fence_set(q, vm, fence);
> +			dma_fence_put(fence);
> +		}
> +
> +		xe_vm_unlock(vm);
> +		goto err_unlock_list;
> +	}
> +
>  	vm_exec.vm = &vm->gpuvm;
>  	vm_exec.flags = DRM_EXEC_INTERRUPTIBLE_WAIT;
>  	if (xe_vm_in_lr_mode(vm)) {
> @@ -254,24 +277,6 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
>  		goto err_exec;
>  	}
>  
> -	if (!args->num_batch_buffer) {
> -		if (!xe_vm_in_lr_mode(vm)) {
> -			struct dma_fence *fence;
> -
> -			fence = xe_sync_in_fence_get(syncs, num_syncs, q, vm);
> -			if (IS_ERR(fence)) {
> -				err = PTR_ERR(fence);
> -				goto err_exec;
> -			}
> -			for (i = 0; i < num_syncs; i++)
> -				xe_sync_entry_signal(&syncs[i], NULL, fence);
> -			xe_exec_queue_last_fence_set(q, vm, fence);
> -			dma_fence_put(fence);
> -		}
> -
> -		goto err_exec;
> -	}
> -
>  	if (xe_exec_queue_is_lr(q) && xe_exec_queue_ring_full(q)) {
>  		err = -EWOULDBLOCK;	/* Aliased to -EAGAIN */
>  		skip_retry = true;
> -- 
> 2.44.0
> 


More information about the Intel-xe mailing list