[Intel-xe] [PATCH 2/5] drm/xe: Use a flags field instead of bools for VMA create

Thomas Hellström thomas.hellstrom at linux.intel.com
Mon Dec 4 10:45:05 UTC 2023


On 11/16/23 20:40, Matthew Brost wrote:
> Use a flags field instead of severval bools for VMA create as it is
> easier to read and less bug prone.
>
> Suggested-by: Thomas Hellström <thomas.hellstrom at linux.intel.com>
> Signed-off-by: Matthew Brost <matthew.brost at intel.com>

Hmm.

If you feel that this was not really an improvement feel free to 
disregard. Anyway,

Reviewed-by: Thomas Hellström <thomas.hellstrom at linux.intel.com>


> ---
>   drivers/gpu/drm/xe/xe_vm.c | 58 ++++++++++++++++++++------------------
>   1 file changed, 30 insertions(+), 28 deletions(-)
>
> diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
> index cd8c2332a22e..13c8a9c6117c 100644
> --- a/drivers/gpu/drm/xe/xe_vm.c
> +++ b/drivers/gpu/drm/xe/xe_vm.c
> @@ -858,17 +858,21 @@ struct dma_fence *xe_vm_rebind(struct xe_vm *vm, bool rebind_worker)
>   	return fence;
>   }
>   
> +#define VMA_CREATE_FLAG_READ_ONLY	BIT(0)
> +#define VMA_CREATE_FLAG_IS_NULL		BIT(1)
> +
>   static struct xe_vma *xe_vma_create(struct xe_vm *vm,
>   				    struct xe_bo *bo,
>   				    u64 bo_offset_or_userptr,
>   				    u64 start, u64 end,
> -				    bool read_only,
> -				    bool is_null,
> -				    u8 tile_mask)
> +				    u8 tile_mask,
> +				    unsigned int flags)
>   {
>   	struct xe_vma *vma;
>   	struct xe_tile *tile;
>   	u8 id;
> +	bool read_only = (flags & VMA_CREATE_FLAG_READ_ONLY);
> +	bool is_null = (flags & VMA_CREATE_FLAG_IS_NULL);
>   
>   	xe_assert(vm->xe, start < end);
>   	xe_assert(vm->xe, end < vm->size);
> @@ -2263,7 +2267,7 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
>   }
>   
>   static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op,
> -			      u8 tile_mask, bool read_only, bool is_null)
> +			      u8 tile_mask, unsigned int flags)
>   {
>   	struct xe_bo *bo = op->gem.obj ? gem_to_xe_bo(op->gem.obj) : NULL;
>   	struct xe_vma *vma;
> @@ -2278,8 +2282,7 @@ static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op,
>   	}
>   	vma = xe_vma_create(vm, bo, op->gem.offset,
>   			    op->va.addr, op->va.addr +
> -			    op->va.range - 1, read_only, is_null,
> -			    tile_mask);
> +			    op->va.range - 1, tile_mask, flags);
>   	if (bo)
>   		xe_bo_unlock(bo);
>   
> @@ -2405,7 +2408,9 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
>   
>   	drm_gpuva_for_each_op(__op, ops) {
>   		struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
> +		struct xe_vma *vma;
>   		bool first = list_empty(ops_list);
> +		unsigned int flags = 0;
>   
>   		INIT_LIST_HEAD(&op->link);
>   		list_add_tail(&op->link, ops_list);
> @@ -2421,11 +2426,13 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
>   		switch (op->base.op) {
>   		case DRM_GPUVA_OP_MAP:
>   		{
> -			struct xe_vma *vma;
> +			flags |= op->map.read_only ?
> +				VMA_CREATE_FLAG_READ_ONLY : 0;
> +			flags |= op->map.is_null ?
> +				VMA_CREATE_FLAG_IS_NULL : 0;
>   
>   			vma = new_vma(vm, &op->base.map,
> -				      op->tile_mask, op->map.read_only,
> -				      op->map.is_null);
> +				      op->tile_mask, flags);
>   			if (IS_ERR(vma))
>   				return PTR_ERR(vma);
>   
> @@ -2441,17 +2448,15 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
>   			op->remap.range = xe_vma_size(old);
>   
>   			if (op->base.remap.prev) {
> -				struct xe_vma *vma;
> -				bool read_only =
> -					op->base.remap.unmap->va->flags &
> -					XE_VMA_READ_ONLY;
> -				bool is_null =
> -					op->base.remap.unmap->va->flags &
> -					DRM_GPUVA_SPARSE;
> +				flags |= op->base.remap.unmap->va->flags &
> +					XE_VMA_READ_ONLY ?
> +					VMA_CREATE_FLAG_READ_ONLY : 0;
> +				flags |= op->base.remap.unmap->va->flags &
> +					DRM_GPUVA_SPARSE ?
> +					VMA_CREATE_FLAG_IS_NULL : 0;
>   
>   				vma = new_vma(vm, op->base.remap.prev,
> -					      op->tile_mask, read_only,
> -					      is_null);
> +					      op->tile_mask, flags);
>   				if (IS_ERR(vma))
>   					return PTR_ERR(vma);
>   
> @@ -2474,18 +2479,15 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
>   			}
>   
>   			if (op->base.remap.next) {
> -				struct xe_vma *vma;
> -				bool read_only =
> -					op->base.remap.unmap->va->flags &
> -					XE_VMA_READ_ONLY;
> -
> -				bool is_null =
> -					op->base.remap.unmap->va->flags &
> -					DRM_GPUVA_SPARSE;
> +				flags |= op->base.remap.unmap->va->flags &
> +					XE_VMA_READ_ONLY ?
> +					VMA_CREATE_FLAG_READ_ONLY : 0;
> +				flags |= op->base.remap.unmap->va->flags &
> +					DRM_GPUVA_SPARSE ?
> +					VMA_CREATE_FLAG_IS_NULL : 0;
>   
>   				vma = new_vma(vm, op->base.remap.next,
> -					      op->tile_mask, read_only,
> -					      is_null);
> +					      op->tile_mask, flags);
>   				if (IS_ERR(vma))
>   					return PTR_ERR(vma);
>   


More information about the Intel-xe mailing list