[RFC v1 1/1] drm/xe: Allow fault injection in vm create and vm bind IOCTLs

Matthew Brost matthew.brost at intel.com
Fri Nov 8 16:16:50 UTC 2024


On Fri, Nov 08, 2024 at 05:11:56PM +0100, Francois Dugast wrote:
> Use fault injection infrastructure to allow specific functions to
> be configured over debugfs for failing during the execution of
> xe_vm_create_ioctl() and xe_vm_bind_ioctl(). This allows more
> thorough testing from user space by going through code paths for
> error handling and unwinding which cannot be reached by simply
> injecting errors in IOCTL arguments. This can help increase code
> robustness.
> 

Let also add xe_pt_update_ops_prepare and xe_pt_update_ops_run if possible.

Matt

> Signed-off-by: Francois Dugast <francois.dugast at intel.com>
> ---
>  drivers/gpu/drm/xe/xe_exec_queue.c | 1 +
>  drivers/gpu/drm/xe/xe_pt.c         | 1 +
>  drivers/gpu/drm/xe/xe_vm.c         | 4 ++++
>  3 files changed, 6 insertions(+)
> 
> diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c
> index fd0f3b3c9101..b999db5f5c19 100644
> --- a/drivers/gpu/drm/xe/xe_exec_queue.c
> +++ b/drivers/gpu/drm/xe/xe_exec_queue.c
> @@ -240,6 +240,7 @@ struct xe_exec_queue *xe_exec_queue_create_bind(struct xe_device *xe,
>  
>  	return q;
>  }
> +ALLOW_ERROR_INJECTION(xe_exec_queue_create_bind, ERRNO);
>  
>  void xe_exec_queue_destroy(struct kref *ref)
>  {
> diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c
> index f27f579f4d85..e111698abbd9 100644
> --- a/drivers/gpu/drm/xe/xe_pt.c
> +++ b/drivers/gpu/drm/xe/xe_pt.c
> @@ -136,6 +136,7 @@ struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_tile *tile,
>  	xe_pt_free(pt);
>  	return ERR_PTR(err);
>  }
> +ALLOW_ERROR_INJECTION(xe_pt_create, ERRNO);
>  
>  /**
>   * xe_pt_populate_empty() - Populate a page-table bo with scratch- or zero
> diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
> index 624133fae5f5..2e67648ed512 100644
> --- a/drivers/gpu/drm/xe/xe_vm.c
> +++ b/drivers/gpu/drm/xe/xe_vm.c
> @@ -740,6 +740,7 @@ static int xe_vma_ops_alloc(struct xe_vma_ops *vops, bool array_of_binds)
>  
>  	return 0;
>  }
> +ALLOW_ERROR_INJECTION(xe_vma_ops_alloc, ERRNO);
>  
>  static void xe_vma_ops_fini(struct xe_vma_ops *vops)
>  {
> @@ -1352,6 +1353,7 @@ static int xe_vm_create_scratch(struct xe_device *xe, struct xe_tile *tile,
>  
>  	return 0;
>  }
> +ALLOW_ERROR_INJECTION(xe_vm_create_scratch, ERRNO);
>  
>  static void xe_vm_free_scratch(struct xe_vm *vm)
>  {
> @@ -1978,6 +1980,7 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
>  
>  	return ops;
>  }
> +ALLOW_ERROR_INJECTION(vm_bind_ioctl_ops_create, ERRNO);
>  
>  static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op,
>  			      u16 pat_index, unsigned int flags)
> @@ -2697,6 +2700,7 @@ static int vm_bind_ioctl_ops_execute(struct xe_vm *vm,
>  	drm_exec_fini(&exec);
>  	return err;
>  }
> +ALLOW_ERROR_INJECTION(vm_bind_ioctl_ops_execute, ERRNO);
>  
>  #define SUPPORTED_FLAGS_STUB  \
>  	(DRM_XE_VM_BIND_FLAG_READONLY | \
> -- 
> 2.43.0
> 


More information about the Intel-xe mailing list