[PATCH] drm/xe: Only allow 1 ufence per exec / bind IOCTL

Welty, Brian brian.welty at intel.com
Wed Jan 24 01:30:44 UTC 2024


On 1/23/2024 8:34 AM, Matthew Brost wrote:
> The way exec ufences are coded only 1 ufence per IOCTL will be signaled.
> It is possible to fix this but for current use cases 1 ufence per IOCTL
> is sufficient. Enforce a limit of 1 ufence per IOCTL (both exec and bind
> to be uniform).

Interesting.  Makes sense to me.

Reviewed-by: Brian Welty <brian.welty at intel.com>


> 
> Cc: Mika Kahola <mika.kahola at intel.com>
> Cc: Thomas Hellström <thomas.hellstrom at linux.intel.com>
> Signed-off-by: Matthew Brost <matthew.brost at intel.com>
> ---
>   drivers/gpu/drm/xe/xe_exec.c | 10 +++++++++-
>   drivers/gpu/drm/xe/xe_sync.h |  5 +++++
>   drivers/gpu/drm/xe/xe_vm.c   | 10 +++++++++-
>   3 files changed, 23 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/gpu/drm/xe/xe_exec.c b/drivers/gpu/drm/xe/xe_exec.c
> index 59fd9bb40c18..952496c6260d 100644
> --- a/drivers/gpu/drm/xe/xe_exec.c
> +++ b/drivers/gpu/drm/xe/xe_exec.c
> @@ -150,7 +150,7 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
>   	u64 addresses[XE_HW_ENGINE_MAX_INSTANCE];
>   	struct drm_gpuvm_exec vm_exec = {.extra.fn = xe_exec_fn};
>   	struct drm_exec *exec = &vm_exec.exec;
> -	u32 i, num_syncs = 0;
> +	u32 i, num_syncs = 0, num_ufence = 0;
>   	struct xe_sched_job *job;
>   	struct dma_fence *rebind_fence;
>   	struct xe_vm *vm;
> @@ -196,6 +196,14 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
>   					   SYNC_PARSE_FLAG_LR_MODE : 0));
>   		if (err)
>   			goto err_syncs;
> +
> +		if (xe_sync_is_ufence(&syncs[i]))
> +			num_ufence++;
> +	}
> +
> +	if (XE_IOCTL_DBG(xe, num_ufence > 1)) {
> +		err = -EINVAL;
> +		goto err_syncs;
>   	}
>   
>   	if (xe_exec_queue_is_parallel(q)) {
> diff --git a/drivers/gpu/drm/xe/xe_sync.h b/drivers/gpu/drm/xe/xe_sync.h
> index d284afbe917c..f43cdcaca6c5 100644
> --- a/drivers/gpu/drm/xe/xe_sync.h
> +++ b/drivers/gpu/drm/xe/xe_sync.h
> @@ -33,4 +33,9 @@ struct dma_fence *
>   xe_sync_in_fence_get(struct xe_sync_entry *sync, int num_sync,
>   		     struct xe_exec_queue *q, struct xe_vm *vm);
>   
> +static inline bool xe_sync_is_ufence(struct xe_sync_entry *sync)
> +{
> +	return !!sync->ufence;
> +}
> +
>   #endif
> diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
> index d096a8c00bd4..8576535c4b6a 100644
> --- a/drivers/gpu/drm/xe/xe_vm.c
> +++ b/drivers/gpu/drm/xe/xe_vm.c
> @@ -2851,7 +2851,7 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
>   	struct drm_gpuva_ops **ops = NULL;
>   	struct xe_vm *vm;
>   	struct xe_exec_queue *q = NULL;
> -	u32 num_syncs;
> +	u32 num_syncs, num_ufence = 0;
>   	struct xe_sync_entry *syncs = NULL;
>   	struct drm_xe_vm_bind_op *bind_ops;
>   	LIST_HEAD(ops_list);
> @@ -2988,6 +2988,14 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
>   					   SYNC_PARSE_FLAG_DISALLOW_USER_FENCE : 0));
>   		if (err)
>   			goto free_syncs;
> +
> +		if (xe_sync_is_ufence(&syncs[num_syncs]))
> +			num_ufence++;
> +	}
> +
> +	if (XE_IOCTL_DBG(xe, num_ufence > 1)) {
> +		err = -EINVAL;
> +		goto free_syncs;
>   	}
>   
>   	if (!args->num_binds) {


More information about the Intel-xe mailing list