[Intel-xe] [PATCH 05/17] drm/xe/vm: Remove VM_BIND_OP macro

Matthew Brost matthew.brost at intel.com
Tue Sep 19 14:41:59 UTC 2023


On Tue, Sep 19, 2023 at 10:24:52AM -0400, Rodrigo Vivi wrote:
> From: Francois Dugast <francois.dugast at intel.com>
> 
> This macro was necessary when bind operations were shifted but this
> is no longer the case, so removing to simplify code.
> 
> Signed-off-by: Francois Dugast <francois.dugast at intel.com>
> Signed-off-by: Rodrigo Vivi <rodrigo.vivi at intel.com>

Reviewed-by: Matthew Brost <matthew.brost at intel.com>

> ---
>  drivers/gpu/drm/xe/xe_vm.c | 42 +++++++++++++++++---------------------
>  1 file changed, 19 insertions(+), 23 deletions(-)
> 
> diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
> index 7579c9537bfa..ab132b88c135 100644
> --- a/drivers/gpu/drm/xe/xe_vm.c
> +++ b/drivers/gpu/drm/xe/xe_vm.c
> @@ -2134,8 +2134,6 @@ static int xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma,
>  	}
>  }
>  
> -#define VM_BIND_OP(op)	(op & 0xffff)
> -
>  struct ttm_buffer_object *xe_vm_ttm_bo(struct xe_vm *vm)
>  {
>  	int idx = vm->flags & XE_VM_FLAG_MIGRATION ?
> @@ -2160,7 +2158,7 @@ static int vm_bind_ioctl_lookup_vma(struct xe_vm *vm, struct xe_bo *bo,
>  
>  	lockdep_assert_held(&vm->lock);
>  
> -	switch (VM_BIND_OP(op)) {
> +	switch (op) {
>  	case XE_VM_BIND_OP_MAP:
>  	case XE_VM_BIND_OP_MAP_USERPTR:
>  		vma = xe_vm_find_overlapping_vma(vm, addr, range);
> @@ -2269,10 +2267,10 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
>  
>  	vm_dbg(&vm->xe->drm,
>  	       "op=%d, addr=0x%016llx, range=0x%016llx, bo_offset_or_userptr=0x%016llx",
> -	       VM_BIND_OP(operation), (ULL)addr, (ULL)range,
> +	       operation, (ULL)addr, (ULL)range,
>  	       (ULL)bo_offset_or_userptr);
>  
> -	switch (VM_BIND_OP(operation)) {
> +	switch (operation) {
>  	case XE_VM_BIND_OP_MAP:
>  	case XE_VM_BIND_OP_MAP_USERPTR:
>  		ops = drm_gpuva_sm_map_ops_create(&vm->mgr, addr, range,
> @@ -3110,50 +3108,48 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe,
>  			*async = !!(flags & XE_VM_BIND_FLAG_ASYNC);
>  		} else if (XE_IOCTL_DBG(xe, !*async) ||
>  			   XE_IOCTL_DBG(xe, !(flags & XE_VM_BIND_FLAG_ASYNC)) ||
> -			   XE_IOCTL_DBG(xe, VM_BIND_OP(op) ==
> -					XE_VM_BIND_OP_RESTART)) {
> +			   XE_IOCTL_DBG(xe, op == XE_VM_BIND_OP_RESTART)) {
>  			err = -EINVAL;
>  			goto free_bind_ops;
>  		}
>  
>  		if (XE_IOCTL_DBG(xe, !*async &&
> -				 VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL)) {
> +				 op == XE_VM_BIND_OP_UNMAP_ALL)) {
>  			err = -EINVAL;
>  			goto free_bind_ops;
>  		}
>  
>  		if (XE_IOCTL_DBG(xe, !*async &&
> -				 VM_BIND_OP(op) == XE_VM_BIND_OP_PREFETCH)) {
> +				 op == XE_VM_BIND_OP_PREFETCH)) {
>  			err = -EINVAL;
>  			goto free_bind_ops;
>  		}
>  
> -		if (XE_IOCTL_DBG(xe, VM_BIND_OP(op) >
> -				 XE_VM_BIND_OP_PREFETCH) ||
> +		if (XE_IOCTL_DBG(xe, op > XE_VM_BIND_OP_PREFETCH) ||
>  		    XE_IOCTL_DBG(xe, flags & ~SUPPORTED_FLAGS) ||
>  		    XE_IOCTL_DBG(xe, obj && is_null) ||
>  		    XE_IOCTL_DBG(xe, obj_offset && is_null) ||
> -		    XE_IOCTL_DBG(xe, VM_BIND_OP(op) != XE_VM_BIND_OP_MAP &&
> +		    XE_IOCTL_DBG(xe, op != XE_VM_BIND_OP_MAP &&
>  				 is_null) ||
>  		    XE_IOCTL_DBG(xe, !obj &&
> -				 VM_BIND_OP(op) == XE_VM_BIND_OP_MAP &&
> +				 op == XE_VM_BIND_OP_MAP &&
>  				 !is_null) ||
>  		    XE_IOCTL_DBG(xe, !obj &&
> -				 VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL) ||
> +				 op == XE_VM_BIND_OP_UNMAP_ALL) ||
>  		    XE_IOCTL_DBG(xe, addr &&
> -				 VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL) ||
> +				 op == XE_VM_BIND_OP_UNMAP_ALL) ||
>  		    XE_IOCTL_DBG(xe, range &&
> -				 VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL) ||
> +				 op == XE_VM_BIND_OP_UNMAP_ALL) ||
>  		    XE_IOCTL_DBG(xe, obj &&
> -				 VM_BIND_OP(op) == XE_VM_BIND_OP_MAP_USERPTR) ||
> +				 op == XE_VM_BIND_OP_MAP_USERPTR) ||
>  		    XE_IOCTL_DBG(xe, obj &&
> -				 VM_BIND_OP(op) == XE_VM_BIND_OP_PREFETCH) ||
> +				 op == XE_VM_BIND_OP_PREFETCH) ||
>  		    XE_IOCTL_DBG(xe, region &&
> -				 VM_BIND_OP(op) != XE_VM_BIND_OP_PREFETCH) ||
> +				 op != XE_VM_BIND_OP_PREFETCH) ||
>  		    XE_IOCTL_DBG(xe, !(BIT(region) &
>  				       xe->info.mem_region_mask)) ||
>  		    XE_IOCTL_DBG(xe, obj &&
> -				 VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP)) {
> +				 op == XE_VM_BIND_OP_UNMAP)) {
>  			err = -EINVAL;
>  			goto free_bind_ops;
>  		}
> @@ -3161,9 +3157,9 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe,
>  		if (XE_IOCTL_DBG(xe, obj_offset & ~PAGE_MASK) ||
>  		    XE_IOCTL_DBG(xe, addr & ~PAGE_MASK) ||
>  		    XE_IOCTL_DBG(xe, range & ~PAGE_MASK) ||
> -		    XE_IOCTL_DBG(xe, !range && VM_BIND_OP(op) !=
> +		    XE_IOCTL_DBG(xe, !range && op !=
>  				 XE_VM_BIND_OP_RESTART &&
> -				 VM_BIND_OP(op) != XE_VM_BIND_OP_UNMAP_ALL)) {
> +				 op != XE_VM_BIND_OP_UNMAP_ALL)) {
>  			err = -EINVAL;
>  			goto free_bind_ops;
>  		}
> @@ -3227,7 +3223,7 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
>  		goto release_vm_lock;
>  	}
>  
> -	if (VM_BIND_OP(bind_ops[0].op) == XE_VM_BIND_OP_RESTART) {
> +	if (bind_ops[0].op == XE_VM_BIND_OP_RESTART) {
>  		if (XE_IOCTL_DBG(xe, !(vm->flags & XE_VM_FLAG_ASYNC_BIND_OPS)))
>  			err = -EOPNOTSUPP;
>  		if (XE_IOCTL_DBG(xe, !err && args->num_syncs))
> -- 
> 2.41.0
> 


More information about the Intel-xe mailing list