[PATCH v2 4/7] drm/xe: Move vm bind bo validation to a helper function

Matthew Brost matthew.brost at intel.com
Fri Apr 12 18:37:25 UTC 2024


On Fri, Apr 12, 2024 at 11:22:46AM +0200, Nirmoy Das wrote:
> Move vm bind bo validation to a helper function to make the
> xe_vm_bind_ioctl() more readable.
> 
> Signed-off-by: Nirmoy Das <nirmoy.das at intel.com>
> ---
>  drivers/gpu/drm/xe/xe_vm.c | 73 +++++++++++++++++++++-----------------
>  1 file changed, 41 insertions(+), 32 deletions(-)
> 
> diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
> index 7991d88fba22..5e633014f017 100644
> --- a/drivers/gpu/drm/xe/xe_vm.c
> +++ b/drivers/gpu/drm/xe/xe_vm.c
> @@ -2972,6 +2972,44 @@ static int vm_bind_ioctl_signal_fences(struct xe_vm *vm,
>  	return err;
>  }
>  
> +static int xe_vm_bind_ioctl_validate_bo(struct xe_device *xe, struct xe_bo *bo,
> +					u64 addr, u64 range, u64 obj_offset,
> +					u16 coh_mode, u16 pat_index)
> +{
> +	if (XE_IOCTL_DBG(xe, range > bo->size) ||
> +	    XE_IOCTL_DBG(xe, obj_offset >
> +			 bo->size - range)) {
> +		return -EINVAL;
> +	}
> +
> +	if (bo->flags & XE_BO_FLAG_INTERNAL_64K) {
> +		if (XE_IOCTL_DBG(xe, obj_offset &
> +				 XE_64K_PAGE_MASK) ||
> +		    XE_IOCTL_DBG(xe, addr & XE_64K_PAGE_MASK) ||
> +		    XE_IOCTL_DBG(xe, range & XE_64K_PAGE_MASK)) {
> +			return  -EINVAL;
> +		}
> +	}
> +
> +	coh_mode = xe_pat_index_get_coh_mode(xe, pat_index);
> +	if (bo->cpu_caching) {
> +		if (XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE &&
> +				 bo->cpu_caching == DRM_XE_GEM_CPU_CACHING_WB)) {
> +			return  -EINVAL;
> +		}
> +	} else if (XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE)) {
> +		/*
> +		 * Imported dma-buf from a different device should
> +		 * require 1way or 2way coherency since we don't know
> +		 * how it was mapped on the CPU. Just assume is it
> +		 * potentially cached on CPU side.
> +		 */
> +		return  -EINVAL;
> +	}
> +
> +	return 0;
> +}
> +
>  int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
>  {
>  	struct xe_device *xe = to_xe_device(dev);
> @@ -3067,40 +3105,11 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
>  		}
>  		bos[i] = gem_to_xe_bo(gem_obj);
>  
> -		if (XE_IOCTL_DBG(xe, range > bos[i]->size) ||
> -		    XE_IOCTL_DBG(xe, obj_offset >
> -				 bos[i]->size - range)) {
> -			err = -EINVAL;
> +		if (xe_vm_bind_ioctl_validate_bo(xe, bos[i], addr, range,
> +						 obj_offset, coh_mode,
> +						 pat_index))

err = xe_vm_bind_ioctl_validate_bo
if (err)
	goto put_obj;

Otherwise LGTM, thank for the patch.

Matt

>  			goto put_obj;
> -		}
>  
> -		if (bos[i]->flags & XE_BO_FLAG_INTERNAL_64K) {
> -			if (XE_IOCTL_DBG(xe, obj_offset &
> -					 XE_64K_PAGE_MASK) ||
> -			    XE_IOCTL_DBG(xe, addr & XE_64K_PAGE_MASK) ||
> -			    XE_IOCTL_DBG(xe, range & XE_64K_PAGE_MASK)) {
> -				err = -EINVAL;
> -				goto put_obj;
> -			}
> -		}
> -
> -		coh_mode = xe_pat_index_get_coh_mode(xe, pat_index);
> -		if (bos[i]->cpu_caching) {
> -			if (XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE &&
> -					 bos[i]->cpu_caching == DRM_XE_GEM_CPU_CACHING_WB)) {
> -				err = -EINVAL;
> -				goto put_obj;
> -			}
> -		} else if (XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE)) {
> -			/*
> -			 * Imported dma-buf from a different device should
> -			 * require 1way or 2way coherency since we don't know
> -			 * how it was mapped on the CPU. Just assume is it
> -			 * potentially cached on CPU side.
> -			 */
> -			err = -EINVAL;
> -			goto put_obj;
> -		}
>  	}
>  
>  	if (args->num_syncs) {
> -- 
> 2.42.0
> 


More information about the Intel-xe mailing list