[PATCH v2] drm/xe: Misc refine for svm

Matthew Brost matthew.brost at intel.com
Fri Aug 8 05:36:15 UTC 2025


On Thu, Aug 07, 2025 at 10:53:43PM -0600, Lin, Shuicheng wrote:
> Hi maintainers,
> Could you please help me review and merge the patch.
> Thanks in advance.
> 

Bad timing, I'm going to suggest we hold off to make Himal's life a bit
easier, I think we are a couple of days away from merging [1] and after
tht merge we rebase this one on top.

Matt

[1] https://patchwork.freedesktop.org/series/149550/

> Best Regards
> Shuicheng
> 
> On Mon, July 21, 2025 11:27 AM, Matthew Brost wrote:
> > On Mon, Jul 21, 2025 at 03:33:40PM +0000, Shuicheng Lin wrote:
> > > Below changes should have no function effect.
> > > 1. Correct typo of "operation"in macro range_debug().
> > > 2. Combine 2 spin_lock() call in xe_svm_garbage_collector() into 1.
> > > 3. Remove preferred_region_is_vram check in
> > xe_svm_range_needs_migrate_to_vram()
> > >    as it is already checked in the begin of this function.
> > > 4. Combine the devmem_possible check in xe_svm_handle_pagefault().
> > > 5. Combine 2 xe_vm_unlock() call in xe_svm_handle_pagefault() into 1.
> > >
> > > v2: revert !ctx.devmem_only change (Matt)
> > >
> > > Cc: Matthew Brost <matthew.brost at intel.com>
> > 
> > Reviewed-by: Matthew Brost <matthew.brost at intel.com>
> > 
> > > Signed-off-by: Shuicheng Lin <shuicheng.lin at intel.com>
> > > ---
> > >  drivers/gpu/drm/xe/xe_svm.c | 30 ++++++++++++------------------
> > >  1 file changed, 12 insertions(+), 18 deletions(-)
> > >
> > > diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
> > > index 10c8a1bcb86e..a35bca95ab95 100644
> > > --- a/drivers/gpu/drm/xe/xe_svm.c
> > > +++ b/drivers/gpu/drm/xe/xe_svm.c
> > > @@ -50,11 +50,11 @@ static struct xe_vm *range_to_vm(struct
> > drm_gpusvm_range *r)
> > >  	return gpusvm_to_vm(r->gpusvm);
> > >  }
> > >
> > > -#define range_debug(r__, operaton__)
> > 	\
> > > +#define range_debug(r__, operation__)
> > 	\
> > >  	vm_dbg(&range_to_vm(&(r__)->base)->xe->drm,
> > 	\
> > >  	       "%s: asid=%u, gpusvm=%p, vram=%d,%d, seqno=%lu, " \
> > >  	       "start=0x%014lx, end=0x%014lx, size=%lu",		\
> > > -	       (operaton__), range_to_vm(&(r__)->base)->usm.asid,	\
> > > +	       (operation__), range_to_vm(&(r__)->base)->usm.asid,	\
> > >  	       (r__)->base.gpusvm,					\
> > >  	       xe_svm_range_in_vram((r__)) ? 1 : 0,			\
> > >  	       xe_svm_range_has_vram_binding((r__)) ? 1 : 0,		\
> > > @@ -263,8 +263,8 @@ static int xe_svm_garbage_collector(struct xe_vm
> > *vm)
> > >  	if (xe_vm_is_closed_or_banned(vm))
> > >  		return -ENOENT;
> > >
> > > -	spin_lock(&vm->svm.garbage_collector.lock);
> > >  	for (;;) {
> > > +		spin_lock(&vm->svm.garbage_collector.lock);
> > >  		range = list_first_entry_or_null(&vm-
> > >svm.garbage_collector.range_list,
> > >  						 typeof(*range),
> > >  						 garbage_collector_link);
> > > @@ -282,8 +282,6 @@ static int xe_svm_garbage_collector(struct xe_vm
> > *vm)
> > >  			xe_vm_kill(vm, true);
> > >  			return err;
> > >  		}
> > > -
> > > -		spin_lock(&vm->svm.garbage_collector.lock);
> > >  	}
> > >  	spin_unlock(&vm->svm.garbage_collector.lock);
> > >
> > > @@ -763,12 +761,12 @@ bool
> > xe_svm_range_needs_migrate_to_vram(struct
> > > xe_svm_range *range, struct xe_vm
> > >
> > >  	xe_assert(vm->xe, IS_DGFX(vm->xe));
> > >
> > > -	if (preferred_region_is_vram && xe_svm_range_in_vram(range)) {
> > > +	if (xe_svm_range_in_vram(range)) {
> > >  		drm_info(&vm->xe->drm, "Range is already in VRAM\n");
> > >  		return false;
> > >  	}
> > >
> > > -	if (preferred_region_is_vram && range_size < SZ_64K
> > && !supports_4K_migration(vm->xe)) {
> > > +	if (range_size < SZ_64K && !supports_4K_migration(vm->xe)) {
> > >  		drm_dbg(&vm->xe->drm, "Platform doesn't support SZ_4K
> > range migration\n");
> > >  		return false;
> > >  	}
> > > @@ -793,16 +791,14 @@ int xe_svm_handle_pagefault(struct xe_vm *vm,
> > struct xe_vma *vma,
> > >  			    struct xe_gt *gt, u64 fault_addr,
> > >  			    bool atomic)
> > >  {
> > > +	int devmem_possible = IS_DGFX(vm->xe) &&
> > > +		IS_ENABLED(CONFIG_DRM_XE_PAGEMAP);
> > >  	struct drm_gpusvm_ctx ctx = {
> > >  		.read_only = xe_vma_read_only(vma),
> > > -		.devmem_possible = IS_DGFX(vm->xe) &&
> > > -			IS_ENABLED(CONFIG_DRM_XE_PAGEMAP),
> > > -		.check_pages_threshold = IS_DGFX(vm->xe) &&
> > > -			IS_ENABLED(CONFIG_DRM_XE_PAGEMAP) ? SZ_64K :
> > 0,
> > > -		.devmem_only = atomic && IS_DGFX(vm->xe) &&
> > > -			IS_ENABLED(CONFIG_DRM_XE_PAGEMAP),
> > > -		.timeslice_ms = atomic && IS_DGFX(vm->xe) &&
> > > -			IS_ENABLED(CONFIG_DRM_XE_PAGEMAP) ?
> > > +		.devmem_possible = devmem_possible,
> > > +		.check_pages_threshold = devmem_possible ? SZ_64K : 0,
> > > +		.devmem_only = atomic && devmem_possible,
> > > +		.timeslice_ms = atomic && devmem_possible ?
> > >  			vm->xe->atomic_svm_timeslice_ms : 0,
> > >  	};
> > >  	struct xe_svm_range *range;
> > > @@ -882,8 +878,8 @@ int xe_svm_handle_pagefault(struct xe_vm *vm,
> > > struct xe_vma *vma,
> > >  retry_bind:
> > >  	xe_vm_lock(vm, false);
> > >  	fence = xe_vm_range_rebind(vm, vma, range, BIT(tile->id));
> > > +	xe_vm_unlock(vm);
> > >  	if (IS_ERR(fence)) {
> > > -		xe_vm_unlock(vm);
> > >  		err = PTR_ERR(fence);
> > >  		if (err == -EAGAIN) {
> > >  			ctx.timeslice_ms <<= 1;	/* Double timeslice if we have
> > to retry */
> > > @@ -894,13 +890,11 @@ int xe_svm_handle_pagefault(struct xe_vm *vm,
> > struct xe_vma *vma,
> > >  			goto retry_bind;
> > >  		goto err_out;
> > >  	}
> > > -	xe_vm_unlock(vm);
> > >
> > >  	dma_fence_wait(fence, false);
> > >  	dma_fence_put(fence);
> > >
> > >  err_out:
> > > -
> > >  	return err;
> > >  }
> > >
> > > --
> > > 2.49.0
> > >


More information about the Intel-xe mailing list