[PATCH v5 01/23] Introduce drm_gpuvm_sm_map_ops_flags enums for sm_map_ops

Ghimiray, Himal Prasad himal.prasad.ghimiray at intel.com
Mon Jul 28 06:16:41 UTC 2025



On 28-07-2025 02:48, Matthew Brost wrote:
> On Tue, Jul 22, 2025 at 07:05:04PM +0530, Himal Prasad Ghimiray wrote:
>> - DRM_GPUVM_SM_MAP_NOT_MADVISE: Default sm_map operations for the input
>>    range.
>>
>> - DRM_GPUVM_SKIP_GEM_OBJ_VA_SPLIT_MADVISE: This flag is used by
>>    drm_gpuvm_sm_map_ops_create to iterate over GPUVMA's in the
>> user-provided range and split the existing non-GEM object VMA if the
>> start or end of the input range lies within it. The operations can
>> create up to 2 REMAPS and 2 MAPs. The purpose of this operation is to be
>> used by the Xe driver to assign attributes to GPUVMA's within the
>> user-defined range. Unlike drm_gpuvm_sm_map_ops_flags in default mode,
>> the operation with this flag will never have UNMAPs and
>> merges, and can be without any final operations.
>>
>> v2
>> - use drm_gpuvm_sm_map_ops_create with flags instead of defining new
>>    ops_create (Danilo)
>> - Add doc (Danilo)
>>
>> v3
>> - Fix doc
>> - Fix unmapping check
>>
>> v4
>> - Fix mapping for non madvise ops
>>
>> Cc: Danilo Krummrich <dakr at redhat.com>
>> Cc: Matthew Brost <matthew.brost at intel.com>
>> Cc: Boris Brezillon <bbrezillon at kernel.org>
>> Cc: <dri-devel at lists.freedesktop.org>
>> Signed-off-by: Himal Prasad Ghimiray<himal.prasad.ghimiray at intel.com>
>> ---
>>   drivers/gpu/drm/drm_gpuvm.c            | 93 ++++++++++++++++++++------
>>   drivers/gpu/drm/nouveau/nouveau_uvmm.c |  1 +
>>   drivers/gpu/drm/xe/xe_vm.c             |  1 +
>>   include/drm/drm_gpuvm.h                | 25 ++++++-
>>   4 files changed, 98 insertions(+), 22 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/drm_gpuvm.c b/drivers/gpu/drm/drm_gpuvm.c
>> index e89b932e987c..c7779588ea38 100644
>> --- a/drivers/gpu/drm/drm_gpuvm.c
>> +++ b/drivers/gpu/drm/drm_gpuvm.c
>> @@ -2103,10 +2103,13 @@ static int
>>   __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
>>   		   const struct drm_gpuvm_ops *ops, void *priv,
>>   		   u64 req_addr, u64 req_range,
>> +		   enum drm_gpuvm_sm_map_ops_flags flags,
>>   		   struct drm_gem_object *req_obj, u64 req_offset)
>>   {
>>   	struct drm_gpuva *va, *next;
>>   	u64 req_end = req_addr + req_range;
>> +	bool is_madvise_ops = (flags == DRM_GPUVM_SKIP_GEM_OBJ_VA_SPLIT_MADVISE);
>> +	bool needs_map = !is_madvise_ops;
>>   	int ret;
>>   
>>   	if (unlikely(!drm_gpuvm_range_valid(gpuvm, req_addr, req_range)))
>> @@ -2119,26 +2122,35 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
>>   		u64 range = va->va.range;
>>   		u64 end = addr + range;
>>   		bool merge = !!va->gem.obj;
>> +		bool skip_madvise_ops = is_madvise_ops && merge;
>>   
>> +		needs_map = !is_madvise_ops;
>>   		if (addr == req_addr) {
>>   			merge &= obj == req_obj &&
>>   				 offset == req_offset;
>>   
>>   			if (end == req_end) {
>> -				ret = op_unmap_cb(ops, priv, va, merge);
>> -				if (ret)
>> -					return ret;
>> +				if (!is_madvise_ops) {
>> +					ret = op_unmap_cb(ops, priv, va, merge);
>> +					if (ret)
>> +						return ret;
>> +				}
>>   				break;
>>   			}
>>   
>>   			if (end < req_end) {
>> -				ret = op_unmap_cb(ops, priv, va, merge);
>> -				if (ret)
>> -					return ret;
>> +				if (!is_madvise_ops) {
>> +					ret = op_unmap_cb(ops, priv, va, merge);
>> +					if (ret)
>> +						return ret;
>> +				}
>>   				continue;
>>   			}
>>   
>>   			if (end > req_end) {
>> +				if (skip_madvise_ops)
>> +					break;
>> +
>>   				struct drm_gpuva_op_map n = {
>>   					.va.addr = req_end,
>>   					.va.range = range - req_range,
>> @@ -2153,6 +2165,9 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
>>   				ret = op_remap_cb(ops, priv, NULL, &n, &u);
>>   				if (ret)
>>   					return ret;
>> +
>> +				if (is_madvise_ops)
>> +					needs_map = true;
>>   				break;
>>   			}
>>   		} else if (addr < req_addr) {
>> @@ -2170,20 +2185,42 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
>>   			u.keep = merge;
>>   
>>   			if (end == req_end) {
>> +				if (skip_madvise_ops)
>> +					break;
>> +
>>   				ret = op_remap_cb(ops, priv, &p, NULL, &u);
>>   				if (ret)
>>   					return ret;
>> +
>> +				if (is_madvise_ops)
>> +					needs_map = true;
>> +
>>   				break;
>>   			}
>>   
>>   			if (end < req_end) {
>> +				if (skip_madvise_ops)
>> +					continue;
>> +
>>   				ret = op_remap_cb(ops, priv, &p, NULL, &u);
>>   				if (ret)
>>   					return ret;
>> +
>> +				if (is_madvise_ops) {
>> +					ret = op_map_cb(ops, priv, req_addr,
>> +							min(end - req_addr, req_end - end),
> 
> This doesn't look right.
> 
> This creating a new MAP operation to replace what the REMAP operation
> unmapped but didn't remap. In Xe debug speak, this is where we are:
> 
> REMAP:UNMAP
> REMAP:PREV
> MAP <-- This is the calculation we are doing.
> 
> We want to 'MAP' to size here to be:
> 
> 'REMAP:UNMAP.end - REMAP:PREV.end'
> 
> Which is 'end - req_addr'. So delete the min statement here and replace
> with 'end - req_addr'.
> 
> Matt

True, will fix this.

> 
>> +							NULL, req_offset);
>> +					if (ret)
>> +						return ret;
>> +				}
>> +
>>   				continue;
>>   			}
>>   
>>   			if (end > req_end) {
>> +				if (skip_madvise_ops)
>> +					break;
>> +
>>   				struct drm_gpuva_op_map n = {
>>   					.va.addr = req_end,
>>   					.va.range = end - req_end,
>> @@ -2195,6 +2232,9 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
>>   				ret = op_remap_cb(ops, priv, &p, &n, &u);
>>   				if (ret)
>>   					return ret;
>> +
>> +				if (is_madvise_ops)
>> +					needs_map = true;
>>   				break;
>>   			}
>>   		} else if (addr > req_addr) {
>> @@ -2203,20 +2243,29 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
>>   					   (addr - req_addr);
>>   
>>   			if (end == req_end) {
>> -				ret = op_unmap_cb(ops, priv, va, merge);
>> -				if (ret)
>> -					return ret;
>> +				if (!is_madvise_ops) {
>> +					ret = op_unmap_cb(ops, priv, va, merge);
>> +					if (ret)
>> +						return ret;
>> +				}
>> +
>>   				break;
>>   			}
>>   
>>   			if (end < req_end) {
>> -				ret = op_unmap_cb(ops, priv, va, merge);
>> -				if (ret)
>> -					return ret;
>> +				if (!is_madvise_ops) {
>> +					ret = op_unmap_cb(ops, priv, va, merge);
>> +					if (ret)
>> +						return ret;
>> +				}
>> +
>>   				continue;
>>   			}
>>   
>>   			if (end > req_end) {
>> +				if (skip_madvise_ops)
>> +					break;
>> +
>>   				struct drm_gpuva_op_map n = {
>>   					.va.addr = req_end,
>>   					.va.range = end - req_end,
>> @@ -2231,14 +2280,16 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
>>   				ret = op_remap_cb(ops, priv, NULL, &n, &u);
>>   				if (ret)
>>   					return ret;
>> +
>> +				if (is_madvise_ops)
>> +					return op_map_cb(ops, priv, addr,
>> +							(req_end - addr), NULL, req_offset);
>>   				break;
>>   			}
>>   		}
>>   	}
>> -
>> -	return op_map_cb(ops, priv,
>> -			 req_addr, req_range,
>> -			 req_obj, req_offset);
>> +	return needs_map ? op_map_cb(ops, priv, req_addr,
>> +			   req_range, req_obj, req_offset) : 0;
>>   }
>>   
>>   static int
>> @@ -2337,15 +2388,15 @@ drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv,
>>   		 struct drm_gem_object *req_obj, u64 req_offset)
>>   {
>>   	const struct drm_gpuvm_ops *ops = gpuvm->ops;
>> +	enum drm_gpuvm_sm_map_ops_flags flags = DRM_GPUVM_SM_MAP_NOT_MADVISE;
>>   
>>   	if (unlikely(!(ops && ops->sm_step_map &&
>>   		       ops->sm_step_remap &&
>>   		       ops->sm_step_unmap)))
>>   		return -EINVAL;
>>   
>> -	return __drm_gpuvm_sm_map(gpuvm, ops, priv,
>> -				  req_addr, req_range,
>> -				  req_obj, req_offset);
>> +	return __drm_gpuvm_sm_map(gpuvm, ops, priv, req_addr, req_range,
>> +				  flags, req_obj, req_offset);
>>   }
>>   EXPORT_SYMBOL_GPL(drm_gpuvm_sm_map);
>>   
>> @@ -2487,6 +2538,7 @@ static const struct drm_gpuvm_ops gpuvm_list_ops = {
>>    * @gpuvm: the &drm_gpuvm representing the GPU VA space
>>    * @req_addr: the start address of the new mapping
>>    * @req_range: the range of the new mapping
>> + * @drm_gpuvm_sm_map_ops_flag: ops flag determining madvise or not
>>    * @req_obj: the &drm_gem_object to map
>>    * @req_offset: the offset within the &drm_gem_object
>>    *
>> @@ -2517,6 +2569,7 @@ static const struct drm_gpuvm_ops gpuvm_list_ops = {
>>   struct drm_gpuva_ops *
>>   drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm,
>>   			    u64 req_addr, u64 req_range,
>> +			    enum drm_gpuvm_sm_map_ops_flags flags,
>>   			    struct drm_gem_object *req_obj, u64 req_offset)
>>   {
>>   	struct drm_gpuva_ops *ops;
>> @@ -2536,7 +2589,7 @@ drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm,
>>   	args.ops = ops;
>>   
>>   	ret = __drm_gpuvm_sm_map(gpuvm, &gpuvm_list_ops, &args,
>> -				 req_addr, req_range,
>> +				 req_addr, req_range, flags,
>>   				 req_obj, req_offset);
>>   	if (ret)
>>   		goto err_free_ops;
>> diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.c b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
>> index 48f105239f42..26e13fcdbdb8 100644
>> --- a/drivers/gpu/drm/nouveau/nouveau_uvmm.c
>> +++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
>> @@ -1303,6 +1303,7 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job,
>>   			op->ops = drm_gpuvm_sm_map_ops_create(&uvmm->base,
>>   							      op->va.addr,
>>   							      op->va.range,
>> +							      DRM_GPUVM_SM_MAP_NOT_MADVISE,
>>   							      op->gem.obj,
>>   							      op->gem.offset);
>>   			if (IS_ERR(op->ops)) {
>> diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
>> index 2035604121e6..b2ed99551b6e 100644
>> --- a/drivers/gpu/drm/xe/xe_vm.c
>> +++ b/drivers/gpu/drm/xe/xe_vm.c
>> @@ -2318,6 +2318,7 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_vma_ops *vops,
>>   	case DRM_XE_VM_BIND_OP_MAP:
>>   	case DRM_XE_VM_BIND_OP_MAP_USERPTR:
>>   		ops = drm_gpuvm_sm_map_ops_create(&vm->gpuvm, addr, range,
>> +						  DRM_GPUVM_SM_MAP_NOT_MADVISE,
>>   						  obj, bo_offset_or_userptr);
>>   		break;
>>   	case DRM_XE_VM_BIND_OP_UNMAP:
>> diff --git a/include/drm/drm_gpuvm.h b/include/drm/drm_gpuvm.h
>> index 2a9629377633..c589b886a4fd 100644
>> --- a/include/drm/drm_gpuvm.h
>> +++ b/include/drm/drm_gpuvm.h
>> @@ -211,6 +211,27 @@ enum drm_gpuvm_flags {
>>   	DRM_GPUVM_USERBITS = BIT(1),
>>   };
>>   
>> +/**
>> + * enum drm_gpuvm_sm_map_ops_flags - flags for drm_gpuvm split/merge ops
>> + */
>> +enum drm_gpuvm_sm_map_ops_flags {
>> +	/**
>> +	 * @DRM_GPUVM_SM_MAP_NOT_MADVISE: DEFAULT sm_map ops
>> +	 */
>> +	DRM_GPUVM_SM_MAP_NOT_MADVISE = 0,
>> +
>> +	/**
>> +	 * @DRM_GPUVM_SKIP_GEM_OBJ_VA_SPLIT_MADVISE: This flag is used by
>> +	 * drm_gpuvm_sm_map_ops_create to iterate over GPUVMA's in the
>> +	 * user-provided range and split the existing non-GEM object VMA if the
>> +	 * start or end of the input range lies within it. The operations can
>> +	 * create up to 2 REMAPS and 2 MAPs. Unlike drm_gpuvm_sm_map_ops_flags
>> +	 * in default mode, the operation with this flag will never have UNMAPs and
>> +	 * merges, and can be without any final operations.
>> +	 */
>> +	DRM_GPUVM_SKIP_GEM_OBJ_VA_SPLIT_MADVISE = BIT(0),
>> +};
>> +
>>   /**
>>    * struct drm_gpuvm - DRM GPU VA Manager
>>    *
>> @@ -1059,8 +1080,8 @@ struct drm_gpuva_ops {
>>   #define drm_gpuva_next_op(op) list_next_entry(op, entry)
>>   
>>   struct drm_gpuva_ops *
>> -drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm,
>> -			    u64 addr, u64 range,
>> +drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm, u64 addr, u64 range,
>> +			    enum drm_gpuvm_sm_map_ops_flags flags,
>>   			    struct drm_gem_object *obj, u64 offset);
>>   struct drm_gpuva_ops *
>>   drm_gpuvm_sm_unmap_ops_create(struct drm_gpuvm *gpuvm,
>> -- 
>> 2.34.1
>>



More information about the dri-devel mailing list