[PATCH v3 11/19] drm/xe: Allow CPU address mirror VMA unbind with gpu bindings for madvise

Ghimiray, Himal Prasad himal.prasad.ghimiray at intel.com
Thu Jun 12 09:02:33 UTC 2025



On 30-05-2025 04:24, Matthew Brost wrote:
> On Tue, May 27, 2025 at 10:09:55PM +0530, Himal Prasad Ghimiray wrote:
>> In the case of the MADVISE ioctl, if the start or end addresses fall
>> within a VMA and existing SVM ranges are present, remove the existing
>> SVM mappings. Then, continue with ops_parse to create new VMAs by REMAP
>> unmapping of old one.
>>
>> Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray at intel.com>
>> ---
>>   drivers/gpu/drm/xe/xe_svm.c | 25 +++++++++++++++++++++++++
>>   drivers/gpu/drm/xe/xe_svm.h |  8 ++++++++
>>   drivers/gpu/drm/xe/xe_vm.c  | 18 +++++++++++++++++-
>>   3 files changed, 50 insertions(+), 1 deletion(-)
>>
>> diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
>> index a4d53c24fcbc..5691bb9dbf26 100644
>> --- a/drivers/gpu/drm/xe/xe_svm.c
>> +++ b/drivers/gpu/drm/xe/xe_svm.c
>> @@ -942,6 +942,31 @@ bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end)
>>   	return drm_gpusvm_has_mapping(&vm->svm.gpusvm, start, end);
>>   }
>>   
>> +/**
>> + * xe_svm_range_clean_if_addr_within - Clean SVM mappings and ranges
>> + * @start: start addr
>> + * @end: end addr
>> + *
>> + * This function cleans up svm ranges if start or end address are inside them.
>> + */
>> +void xe_svm_range_clean_if_addr_within(struct xe_vm *vm, u64 start, u64 end)
>> +{
>> +	struct drm_gpusvm_notifier *notifier, *next;
>> +
> 
> lockdep_assert(vm lock in write mode);
> 
>> +	drm_gpusvm_for_each_notifier_safe(notifier, next, &vm->svm.gpusvm, start, end) {
>> +		struct drm_gpusvm_range *range, *__next;
>> +
>> +		drm_gpusvm_for_each_range_safe(range, __next, notifier, start, end) {
>> +			if (start > drm_gpusvm_range_start(range) ||
>> +			    end < drm_gpusvm_range_end(range)) {
>> +				if (IS_DGFX(vm->xe) && xe_svm_range_in_vram(to_xe_range(range)))
>> +					drm_gpusvm_range_evict(&vm->svm.gpusvm, range);
> 
> Why evict here? I don't think that is required.

on susequent fault the vram allocation for smaller ranges fails for 
first time with EFAULT in drm_gpusvm_migrate_to_devmem and with retry 
its succeds. But prefetch for drm_gpusvm_migrate_to_devmem  bails out 
saying retry from userspace. Hence evict ensures the prefetch post this 
also works.


> 
>> +				__xe_svm_garbage_collector(vm, to_xe_range(range));
>> +			}
>> +		}
>> +	}
>> +}
>> +
>>   /**
>>    * xe_svm_bo_evict() - SVM evict BO to system memory
>>    * @bo: BO to evict
>> diff --git a/drivers/gpu/drm/xe/xe_svm.h b/drivers/gpu/drm/xe/xe_svm.h
>> index af8f285b6caa..b36f70ab3d03 100644
>> --- a/drivers/gpu/drm/xe/xe_svm.h
>> +++ b/drivers/gpu/drm/xe/xe_svm.h
>> @@ -92,6 +92,9 @@ bool xe_svm_range_validate(struct xe_vm *vm,
>>   u64 xe_svm_find_vma_start(struct xe_vm *vm, u64 addr, u64 end,  struct xe_vma *vma);
>>   
>>   u8 xe_svm_ranges_zap_ptes_in_range(struct xe_vm *vm, u64 start, u64 end);
>> +
>> +void xe_svm_range_clean_if_addr_within(struct xe_vm *vm, u64 start, u64 end);
>> +
>>   /**
>>    * xe_svm_range_has_dma_mapping() - SVM range has DMA mapping
>>    * @range: SVM range
>> @@ -312,6 +315,11 @@ u8 xe_svm_ranges_zap_ptes_in_range(struct xe_vm *vm, u64 start, u64 end)
>>   	return 0;
>>   }
>>   
>> +static inline
>> +void xe_svm_range_clean_if_addr_within(struct xe_vm *vm, u64 start, u64 end)
>> +{
>> +}
> 
> Maybe...
> 
> s/xe_svm_range_clean_if_addr_within/s/xe_svm_unmap_address_range


Makes sense. Will change>
> Or if you can think of something better but don't really like
> xe_svm_range_clean_if_addr_within.
> 
>> +
>>   #define xe_svm_assert_in_notifier(...) do {} while (0)
>>   #define xe_svm_range_has_dma_mapping(...) false
>>   
>> diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
>> index c220bf904ee0..8208409485f6 100644
>> --- a/drivers/gpu/drm/xe/xe_vm.c
>> +++ b/drivers/gpu/drm/xe/xe_vm.c
>> @@ -2359,6 +2359,22 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_vma_ops *vops,
>>   			op->map.pat_index = pat_index;
>>   			op->map.invalidate_on_bind =
>>   				__xe_vm_needs_clear_scratch_pages(vm, flags);
>> +		} else if (__op->op == DRM_GPUVA_OP_REMAP) {
>> +			struct xe_vma *old =
>> +				gpuva_to_vma(op->base.remap.unmap->va);
>> +			u64 start = xe_vma_start(old), end = xe_vma_end(old);
>> +
>> +			if (op->base.remap.prev)
>> +				start = op->base.remap.prev->va.addr +
>> +					op->base.remap.prev->va.range;
>> +			if (op->base.remap.next)
>> +				end = op->base.remap.next->va.addr;
>> +
>> +			if (xe_vma_is_cpu_addr_mirror(old) &&
>> +			    xe_svm_has_mapping(vm, start, end)) {
>> +				drm_gpuva_ops_free(&vm->gpuvm, ops);
>> +				return ERR_PTR(-EBUSY);
>> +			}
> 
> How about dropping this.
> 
>>   		} else if (__op->op == DRM_GPUVA_OP_PREFETCH) {
>>   			struct xe_vma *vma = gpuva_to_vma(op->base.prefetch.va);
>>   			struct xe_svm_range *svm_range;
>> @@ -2662,7 +2678,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
>>   
>>   			if (xe_vma_is_cpu_addr_mirror(old) &&
>>   			    xe_svm_has_mapping(vm, start, end))
>> -				return -EBUSY;
>> +				xe_svm_range_clean_if_addr_within(vm, start, end);
>>
> 
> And here add a flag to xe_vma_ops which says we are in madvise.
> 
> e.g. XE_VMA_OPS_FLAG_MADVISE
> 
> Then...
> 
> if (xe_vma_is_cpu_addr_mirror(old) &&
>      xe_svm_has_mapping(vm, start, end)) {
> 	if (vops->flags & XE_VMA_OPS_FLAG_MADVISE)
> 		xe_svm_range_clean_if_addr_within(vm, start, end);
> 	else
> 		return -EBUSY;
> }
> 
> Matt
>    
>>   			op->remap.start = xe_vma_start(old);
>>   			op->remap.range = xe_vma_size(old);
>> -- 
>> 2.34.1
>>



More information about the Intel-xe mailing list