[PATCH v2 24/32] drm/xe: Allow CPU address mirror VMA unbind with gpu bindings for madvise

Matthew Brost matthew.brost at intel.com
Tue May 27 17:32:02 UTC 2025


On Tue, May 20, 2025 at 03:51:26PM +0530, Ghimiray, Himal Prasad wrote:
> 
> 
> On 15-05-2025 00:50, Matthew Brost wrote:
> > On Mon, Apr 07, 2025 at 03:47:11PM +0530, Himal Prasad Ghimiray wrote:
> > > In the case of the MADVISE ioctl, if the start or end addresses fall
> > > within a VMA and existing SVM ranges are present, remove the existing
> > > SVM mappings. Then, continue with ops_parse to create new VMAs by REMAP
> > > unmapping of old one.
> > > 
> > 
> > I'm quite confused why this patch is needed. Why is invalidating the
> > ranges not sufficient?
> 
> how the madvise is supposed to behave if start or end of input range is
> within existing svm range ?
> for example lets assume :
> svm_range of 2 MiB exists in offset and  offset + SZ_2M, madvise is called
> with offset as start and offset + SZ_1M as end, in this scenario vma
> boundaries will change and previous svm_ranges needs to be removed.
> 

Right, this is a weird corner case that needs to be handled. Will review
this patch in detail in your latest rev.

Matt

> > 
> > Matt
> > 
> > > Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray at intel.com>
> > > ---
> > >   drivers/gpu/drm/xe/xe_svm.c | 25 +++++++++++++++++++++++++
> > >   drivers/gpu/drm/xe/xe_svm.h |  7 +++++++
> > >   drivers/gpu/drm/xe/xe_vm.c  | 18 +++++++++++++++++-
> > >   3 files changed, 49 insertions(+), 1 deletion(-)
> > > 
> > > diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
> > > index 7ec7ecd7eb1f..efcba4b77250 100644
> > > --- a/drivers/gpu/drm/xe/xe_svm.c
> > > +++ b/drivers/gpu/drm/xe/xe_svm.c
> > > @@ -903,6 +903,31 @@ bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end)
> > >   	return drm_gpusvm_has_mapping(&vm->svm.gpusvm, start, end);
> > >   }
> > > +/**
> > > + * xe_svm_range_clean_if_addr_within - Clean SVM mappings and ranges
> > > + * @start: start addr
> > > + * @end: end addr
> > > + *
> > > + * This function cleans up svm ranges if start or end address are inside them.
> > > + */
> > > +void xe_svm_range_clean_if_addr_within(struct xe_vm *vm, u64 start, u64 end)
> > > +{
> > > +	struct drm_gpusvm_notifier *notifier, *next;
> > > +
> > > +	drm_gpusvm_for_each_notifier_safe(notifier, next, &vm->svm.gpusvm, start, end) {
> > > +		struct drm_gpusvm_range *range, *__next;
> > > +
> > > +		drm_gpusvm_for_each_range_safe(range, __next, notifier, start, end) {
> > > +			if (start > drm_gpusvm_range_start(range) ||
> > > +			    end < drm_gpusvm_range_end(range)) {
> > > +				if (IS_DGFX(vm->xe) && xe_svm_range_in_vram(to_xe_range(range)))
> > > +					drm_gpusvm_range_evict(&vm->svm.gpusvm, range);
> > > +				__xe_svm_garbage_collector(vm, to_xe_range(range));
> > > +			}
> > > +		}
> > > +	}
> > > +}
> > > +
> > >   /**
> > >    * xe_svm_bo_evict() - SVM evict BO to system memory
> > >    * @bo: BO to evict
> > > diff --git a/drivers/gpu/drm/xe/xe_svm.h b/drivers/gpu/drm/xe/xe_svm.h
> > > index d5be8229ca7e..d00ba6d6ba53 100644
> > > --- a/drivers/gpu/drm/xe/xe_svm.h
> > > +++ b/drivers/gpu/drm/xe/xe_svm.h
> > > @@ -98,6 +98,8 @@ int xe_svm_range_get_pages(struct xe_vm *vm, struct xe_svm_range *range,
> > >   bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vma *vma,
> > >   					u32 region);
> > > +void xe_svm_range_clean_if_addr_within(struct xe_vm *vm, u64 start, u64 end);
> > > +
> > >   /**
> > >    * xe_svm_range_has_dma_mapping() - SVM range has DMA mapping
> > >    * @range: SVM range
> > > @@ -291,6 +293,11 @@ bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vm
> > >   	return false;
> > >   }
> > > +static inline
> > > +void xe_svm_range_clean_if_addr_within(struct xe_vm *vm, u64 start, u64 end)
> > > +{
> > > +}
> > > +
> > >   #define xe_svm_assert_in_notifier(...) do {} while (0)
> > >   #define xe_svm_range_has_dma_mapping(...) false
> > > diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
> > > index c7c012afe9eb..92b8e0cac063 100644
> > > --- a/drivers/gpu/drm/xe/xe_vm.c
> > > +++ b/drivers/gpu/drm/xe/xe_vm.c
> > > @@ -2362,6 +2362,22 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_vma_ops *vops,
> > >   			op->map.pat_index = pat_index;
> > >   			op->map.invalidate_on_bind =
> > >   				__xe_vm_needs_clear_scratch_pages(vm, flags);
> > > +		} else if (__op->op == DRM_GPUVA_OP_REMAP) {
> > > +			struct xe_vma *old =
> > > +				gpuva_to_vma(op->base.remap.unmap->va);
> > > +			u64 start = xe_vma_start(old), end = xe_vma_end(old);
> > > +
> > > +			if (op->base.remap.prev)
> > > +				start = op->base.remap.prev->va.addr +
> > > +					op->base.remap.prev->va.range;
> > > +			if (op->base.remap.next)
> > > +				end = op->base.remap.next->va.addr;
> > > +
> > > +			if (xe_vma_is_cpu_addr_mirror(old) &&
> > > +			    xe_svm_has_mapping(vm, start, end)) {
> > > +				drm_gpuva_ops_free(&vm->gpuvm, ops);
> > > +				return ERR_PTR(-EBUSY);
> > > +			}
> > >   		} else if (__op->op == DRM_GPUVA_OP_PREFETCH) {
> > >   			struct xe_vma *vma = gpuva_to_vma(op->base.prefetch.va);
> > > @@ -2653,7 +2669,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
> > >   			if (xe_vma_is_cpu_addr_mirror(old) &&
> > >   			    xe_svm_has_mapping(vm, start, end))
> > > -				return -EBUSY;
> > > +				xe_svm_range_clean_if_addr_within(vm, start, end);
> > >   			op->remap.start = xe_vma_start(old);
> > >   			op->remap.range = xe_vma_size(old);
> > > -- 
> > > 2.34.1
> > > 
> 


More information about the Intel-xe mailing list