[PATCH v2 25/32] drm/xe/svm : Add svm ranges migration policy on atomic access

Ghimiray, Himal Prasad himal.prasad.ghimiray at intel.com
Tue May 20 10:22:09 UTC 2025



On 15-05-2025 03:51, Matthew Brost wrote:
> On Mon, Apr 07, 2025 at 03:47:12PM +0530, Himal Prasad Ghimiray wrote:
>> If the platform does not support atomic access on system memory, and the
>> ranges are in system memory, but the user requires atomic accesses on
>> the VMA, then migrate the ranges to VRAM. Apply this policy for prefetch
>> operations as well.
>>
> 
> I think the baseline was changed a bit here, but I believe it mostly
> makes sense. Will review again on the rebase.
> 
> A one nit below.
> 
>> Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray at intel.com>
>> ---
>>   drivers/gpu/drm/xe/xe_pt.c         |  9 +++++++--
>>   drivers/gpu/drm/xe/xe_svm.c        | 14 ++++++++++++--
>>   drivers/gpu/drm/xe/xe_vm.c         |  2 ++
>>   drivers/gpu/drm/xe/xe_vm_madvise.c | 11 ++++++++++-
>>   4 files changed, 31 insertions(+), 5 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c
>> index 2479d830d90a..ba9b30b25ded 100644
>> --- a/drivers/gpu/drm/xe/xe_pt.c
>> +++ b/drivers/gpu/drm/xe/xe_pt.c
>> @@ -645,13 +645,18 @@ static bool xe_atomic_for_vram(struct xe_vm *vm)
>>   	return true;
>>   }
>>   
>> -static bool xe_atomic_for_system(struct xe_vm *vm, struct xe_bo *bo)
>> +static bool xe_atomic_for_system(struct xe_vm *vm,
>> +				 struct xe_bo *bo,
>> +				 struct xe_vma *vma)
>>   {
>>   	struct xe_device *xe = vm->xe;
>>   
>>   	if (!xe->info.has_device_atomics_on_smem)
>>   		return false;
>>   
>> +	if (vma->attr.atomic_access == DRM_XE_VMA_ATOMIC_DEVICE)
>> +		return true;
>> +
>>   	/*
>>   	 * If a SMEM+LMEM allocation is backed by SMEM, a device
>>   	 * atomics will cause a gpu page fault and which then
>> @@ -745,7 +750,7 @@ xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma,
>>   
>>   	if (vma->gpuva.flags & XE_VMA_ATOMIC_PTE_BIT) {
>>   		xe_walk.default_vram_pte = xe_atomic_for_vram(vm) ? XE_USM_PPGTT_PTE_AE : 0;
>> -		xe_walk.default_system_pte = xe_atomic_for_system(vm, bo) ?
>> +		xe_walk.default_system_pte = xe_atomic_for_system(vm, bo, vma) ?
>>   			XE_USM_PPGTT_PTE_AE : 0;
>>   	}
>>   
>> diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
>> index efcba4b77250..d40111e29bfe 100644
>> --- a/drivers/gpu/drm/xe/xe_svm.c
>> +++ b/drivers/gpu/drm/xe/xe_svm.c
>> @@ -717,6 +717,16 @@ static bool supports_4K_migration(struct xe_device *xe)
>>   	return false;
>>   }
>>   
>> +static bool needs_ranges_in_vram_to_support_atomic(struct xe_device *xe, struct xe_vma *vma)
>> +{
>> +	if (vma->attr.atomic_access == DRM_XE_VMA_ATOMIC_UNDEFINED ||
>> +	    (xe->info.has_device_atomics_on_smem &&
>> +	     vma->attr.atomic_access == DRM_XE_VMA_ATOMIC_DEVICE))
>> +		return false;
>> +
>> +	return true;
>> +}
>> +
>>   /**
>>    * xe_svm_range_needs_migrate_to_vram() - SVM range needs migrate to VRAM or not
>>    * @range: SVM range for which migration needs to be decided
>> @@ -735,7 +745,7 @@ bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vm
>>   	if (!range->base.flags.migrate_devmem)
>>   		return false;
>>   
>> -	needs_migrate = region;
>> +	needs_migrate =  needs_ranges_in_vram_to_support_atomic(vm->xe, vma) || region;
>>   
>>   	if (needs_migrate && !IS_DGFX(vm->xe)) {
>>   		drm_warn(&vm->xe->drm, "Platform doesn't support VRAM\n");
>> @@ -828,7 +838,7 @@ int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
>>   
>>   	}
>>   
>> -	if (atomic)
>> +	if (atomic && needs_ranges_in_vram_to_support_atomic(vm->xe, vma))
>>   		ctx.vram_only = 1;
>>   
>>   	range_debug(range, "GET PAGES");
>> diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
>> index 92b8e0cac063..0f9c45ce82b4 100644
>> --- a/drivers/gpu/drm/xe/xe_vm.c
>> +++ b/drivers/gpu/drm/xe/xe_vm.c
>> @@ -2930,6 +2930,7 @@ static int prefetch_ranges_lock_and_prep(struct xe_vm *vm,
>>   		for (i = 0; i < op->prefetch_range.ranges_count; i++) {
>>   			svm_range = xa_load(&op->prefetch_range.range, i);
>>   			if (xe_svm_range_needs_migrate_to_vram(svm_range, vma, region)) {
>> +				region = region ? region : 1;
>>   				tile = &vm->xe->tiles[region_to_mem_type[region] - XE_PL_VRAM0];
>>   				err = xe_svm_alloc_vram(vm, tile, svm_range, &ctx);
>>   				if (err) {
>> @@ -2938,6 +2939,7 @@ static int prefetch_ranges_lock_and_prep(struct xe_vm *vm,
>>   					return -ENODATA;
>>   				}
>>   				xe_svm_range_debug(svm_range, "PREFETCH - RANGE MIGRATED TO VRAM");
>> +				ctx.vram_only = 1;
>>   			}
>>   
>>   			err = xe_svm_range_get_pages(vm, svm_range, &ctx);
>> diff --git a/drivers/gpu/drm/xe/xe_vm_madvise.c b/drivers/gpu/drm/xe/xe_vm_madvise.c
>> index ef50031649e0..7e1a95106cb9 100644
>> --- a/drivers/gpu/drm/xe/xe_vm_madvise.c
>> +++ b/drivers/gpu/drm/xe/xe_vm_madvise.c
>> @@ -69,7 +69,16 @@ static int madvise_atomic(struct xe_device *xe, struct xe_vm *vm,
>>   			  struct xe_vma **vmas, int num_vmas,
>>   			  struct drm_xe_madvise_ops ops)
>>   {
>> -	/* Implementation pending */
>> +	int i;
>> +
>> +	xe_assert(vm->xe, ops.type == DRM_XE_VMA_ATTR_ATOMIC);
>> +	xe_assert(vm->xe, ops.atomic.val > DRM_XE_VMA_ATOMIC_UNDEFINED &&
>> +		  ops.atomic.val <= DRM_XE_VMA_ATOMIC_CPU);
>> +	vm_dbg(&xe->drm, "attr_value = %d", ops.atomic.val);
> 
> Again I'm unsure if this debug message how a ton of value without
> knowing the VMA info.

Agreed, will address it in all places

> 
> Matt
> 
>> +
>> +	for (i = 0; i < num_vmas; i++)
>> +		vmas[i]->attr.atomic_access = ops.atomic.val;
>> +	/*TODO: handle bo backed vmas */
>>   	return 0;
>>   }
>>   
>> -- 
>> 2.34.1
>>



More information about the Intel-xe mailing list