[Intel-xe] [PATCH] drm/xe/uapi: Separate VM_BIND's operation and flag

Rodrigo Vivi rodrigo.vivi at intel.com
Wed Sep 6 14:34:35 UTC 2023


On Wed, Sep 06, 2023 at 01:49:24PM +0000, Francois Dugast wrote:
> Use different members in the drm_xe_vm_bind_op for op and for flags as
> it is done in other structures.
> 
> Type is left to u32 to leave enough room for future operations and flags.
> 
> Signed-off-by: Francois Dugast <francois.dugast at intel.com>
> ---
>  drivers/gpu/drm/xe/xe_vm.c | 29 ++++++++++++++++-------------
>  include/uapi/drm/xe_drm.h  |  6 ++++--
>  2 files changed, 20 insertions(+), 15 deletions(-)
> 
> diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
> index 1d9aa5c40659..d0e5974b5292 100644
> --- a/drivers/gpu/drm/xe/xe_vm.c
> +++ b/drivers/gpu/drm/xe/xe_vm.c
> @@ -2159,11 +2159,11 @@ static void vm_set_async_error(struct xe_vm *vm, int err)
>  }
>  
>  static int vm_bind_ioctl_lookup_vma(struct xe_vm *vm, struct xe_bo *bo,
> -				    u64 addr, u64 range, u32 op)
> +				    u64 addr, u64 range, u32 op, u32 flags)
>  {
>  	struct xe_device *xe = vm->xe;
>  	struct xe_vma *vma;
> -	bool async = !!(op & XE_VM_BIND_FLAG_ASYNC);
> +	bool async = !!(flags & XE_VM_BIND_FLAG_ASYNC);
>  
>  	lockdep_assert_held(&vm->lock);
>  
> @@ -2264,7 +2264,7 @@ static void print_op(struct xe_device *xe, struct drm_gpuva_op *op)
>  static struct drm_gpuva_ops *
>  vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
>  			 u64 bo_offset_or_userptr, u64 addr, u64 range,
> -			 u32 operation, u8 tile_mask, u32 region)
> +			 u32 operation, u32 flags, u8 tile_mask, u32 region)
>  {
>  	struct drm_gem_object *obj = bo ? &bo->ttm.base : NULL;
>  	struct ww_acquire_ctx ww;
> @@ -2293,10 +2293,10 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
>  
>  			op->tile_mask = tile_mask;
>  			op->map.immediate =
> -				operation & XE_VM_BIND_FLAG_IMMEDIATE;
> +				flags & XE_VM_BIND_FLAG_IMMEDIATE;
>  			op->map.read_only =
> -				operation & XE_VM_BIND_FLAG_READONLY;
> -			op->map.is_null = operation & XE_VM_BIND_FLAG_NULL;
> +				flags & XE_VM_BIND_FLAG_READONLY;
> +			op->map.is_null = flags & XE_VM_BIND_FLAG_NULL;
>  		}
>  		break;
>  	case XE_VM_BIND_OP_UNMAP:
> @@ -3116,15 +3116,16 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe,
>  		u64 range = (*bind_ops)[i].range;
>  		u64 addr = (*bind_ops)[i].addr;
>  		u32 op = (*bind_ops)[i].op;
> +		u32 flags = (*bind_ops)[i].flags;
>  		u32 obj = (*bind_ops)[i].obj;
>  		u64 obj_offset = (*bind_ops)[i].obj_offset;
>  		u32 region = (*bind_ops)[i].region;
> -		bool is_null = op & XE_VM_BIND_FLAG_NULL;
> +		bool is_null = flags & XE_VM_BIND_FLAG_NULL;
>  
>  		if (i == 0) {
> -			*async = !!(op & XE_VM_BIND_FLAG_ASYNC);
> +			*async = !!(flags & XE_VM_BIND_FLAG_ASYNC);
>  		} else if (XE_IOCTL_DBG(xe, !*async) ||
> -			   XE_IOCTL_DBG(xe, !(op & XE_VM_BIND_FLAG_ASYNC)) ||
> +			   XE_IOCTL_DBG(xe, !(flags & XE_VM_BIND_FLAG_ASYNC)) ||
>  			   XE_IOCTL_DBG(xe, VM_BIND_OP(op) ==
>  					XE_VM_BIND_OP_RESTART)) {
>  			err = -EINVAL;
> @@ -3145,7 +3146,7 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe,
>  
>  		if (XE_IOCTL_DBG(xe, VM_BIND_OP(op) >
>  				 XE_VM_BIND_OP_PREFETCH) ||
> -		    XE_IOCTL_DBG(xe, op & ~SUPPORTED_FLAGS) ||
> +		    XE_IOCTL_DBG(xe, flags & ~SUPPORTED_FLAGS) ||
>  		    XE_IOCTL_DBG(xe, obj && is_null) ||
>  		    XE_IOCTL_DBG(xe, obj_offset && is_null) ||
>  		    XE_IOCTL_DBG(xe, VM_BIND_OP(op) != XE_VM_BIND_OP_MAP &&
> @@ -3360,8 +3361,9 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
>  		u64 range = bind_ops[i].range;
>  		u64 addr = bind_ops[i].addr;
>  		u32 op = bind_ops[i].op;
> +		u32 flags = bind_ops[i].flags;
>  
> -		err = vm_bind_ioctl_lookup_vma(vm, bos[i], addr, range, op);
> +		err = vm_bind_ioctl_lookup_vma(vm, bos[i], addr, range, op, flags);
>  		if (err)
>  			goto free_syncs;
>  	}
> @@ -3370,13 +3372,14 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
>  		u64 range = bind_ops[i].range;
>  		u64 addr = bind_ops[i].addr;
>  		u32 op = bind_ops[i].op;
> +		u32 flags = bind_ops[i].flags;
>  		u64 obj_offset = bind_ops[i].obj_offset;
>  		u8 tile_mask = bind_ops[i].tile_mask;
>  		u32 region = bind_ops[i].region;
>  
>  		ops[i] = vm_bind_ioctl_ops_create(vm, bos[i], obj_offset,
> -						  addr, range, op, tile_mask,
> -						  region);
> +						  addr, range, op, flags,
> +						  tile_mask, region);
>  		if (IS_ERR(ops[i])) {
>  			err = PTR_ERR(ops[i]);
>  			ops[i] = NULL;
> diff --git a/include/uapi/drm/xe_drm.h b/include/uapi/drm/xe_drm.h
> index 86f16d50e9cc..54123ee736b5 100644
> --- a/include/uapi/drm/xe_drm.h
> +++ b/include/uapi/drm/xe_drm.h
> @@ -591,6 +591,8 @@ struct drm_xe_vm_bind_op {
>  #define XE_VM_BIND_OP_RESTART		0x3
>  #define XE_VM_BIND_OP_UNMAP_ALL		0x4
>  #define XE_VM_BIND_OP_PREFETCH		0x5
> +	/** @op: Bind operation to perform */
> +	__u32 op;
>  
>  #define XE_VM_BIND_FLAG_READONLY	(0x1 << 16)
>  	/*
> @@ -631,8 +633,8 @@ struct drm_xe_vm_bind_op {
>  	 * intended to implement VK sparse bindings.
>  	 */
>  #define XE_VM_BIND_FLAG_NULL		(0x1 << 19)
> -	/** @op: Operation to perform (lower 16 bits) and flags (upper 16 bits) */

besides adding the flags, I believe it would be better if we stop shifting
the operation in the same patch.

> -	__u32 op;
> +	/** @flags: Bind flags */
> +	__u32 flags;
>  
>  	/** @mem_region: Memory region to prefetch VMA to, instance not a mask */
>  	__u32 region;
> -- 
> 2.34.1
> 


More information about the Intel-xe mailing list