[RFC 06/29] drm/xe/vm: Update xe_vma_ops_incr_pt_update_ops to take an increment value

Matthew Brost matthew.brost at intel.com
Fri Mar 28 02:56:21 UTC 2025


On Fri, Mar 14, 2025 at 01:32:03PM +0530, Himal Prasad Ghimiray wrote:
> Prefetch for SVM ranges can have more than one operation to increment,
> hence modify the function to accept an increment value as input.
> 
> Suggested-by: Matthew Brost <matthew.brost at intel.com>
> Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray at intel.com>
> ---
>  drivers/gpu/drm/xe/xe_vm.c | 22 +++++++++++-----------
>  1 file changed, 11 insertions(+), 11 deletions(-)
> 
> diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
> index 60303998bd61..53a80c0af8de 100644
> --- a/drivers/gpu/drm/xe/xe_vm.c
> +++ b/drivers/gpu/drm/xe/xe_vm.c
> @@ -806,13 +806,13 @@ static void xe_vma_ops_fini(struct xe_vma_ops *vops)
>  		kfree(vops->pt_update_ops[i].ops);
>  }
>  
> -static void xe_vma_ops_incr_pt_update_ops(struct xe_vma_ops *vops, u8 tile_mask)
> +static void xe_vma_ops_incr_pt_update_ops(struct xe_vma_ops *vops, u8 tile_mask, u8 inc_val)
>  {
>  	int i;
>  
>  	for (i = 0; i < XE_MAX_TILES_PER_DEVICE; ++i)
>  		if (BIT(i) & tile_mask)
> -			++vops->pt_update_ops[i].num_ops;
> +			vops->pt_update_ops[i].num_ops += inc_val;
>  }
>  
>  static void xe_vm_populate_rebind(struct xe_vma_op *op, struct xe_vma *vma,
> @@ -842,7 +842,7 @@ static int xe_vm_ops_add_rebind(struct xe_vma_ops *vops, struct xe_vma *vma,
>  
>  	xe_vm_populate_rebind(op, vma, tile_mask);
>  	list_add_tail(&op->link, &vops->list);
> -	xe_vma_ops_incr_pt_update_ops(vops, tile_mask);
> +	xe_vma_ops_incr_pt_update_ops(vops, tile_mask, 1);
>  
>  	return 0;
>  }
> @@ -977,7 +977,7 @@ xe_vm_ops_add_range_rebind(struct xe_vma_ops *vops,
>  
>  	xe_vm_populate_range_rebind(op, vma, range, tile_mask);
>  	list_add_tail(&op->link, &vops->list);
> -	xe_vma_ops_incr_pt_update_ops(vops, tile_mask);
> +	xe_vma_ops_incr_pt_update_ops(vops, tile_mask, 1);
>  
>  	return 0;
>  }
> @@ -1062,7 +1062,7 @@ xe_vm_ops_add_range_unbind(struct xe_vma_ops *vops,
>  
>  	xe_vm_populate_range_unbind(op, range);
>  	list_add_tail(&op->link, &vops->list);
> -	xe_vma_ops_incr_pt_update_ops(vops, range->tile_present);
> +	xe_vma_ops_incr_pt_update_ops(vops, range->tile_present, 1);
>  
>  	return 0;
>  }
> @@ -2475,7 +2475,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
>  			if ((op->map.immediate || !xe_vm_in_fault_mode(vm)) &&
>  			    !op->map.is_cpu_addr_mirror)
>  				xe_vma_ops_incr_pt_update_ops(vops,
> -							      op->tile_mask);
> +							      op->tile_mask, 1);
>  			break;
>  		}
>  		case DRM_GPUVA_OP_REMAP:
> @@ -2536,7 +2536,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
>  					       (ULL)op->remap.start,
>  					       (ULL)op->remap.range);
>  				} else {
> -					xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
> +					xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask, 1);
>  				}
>  			}
>  
> @@ -2565,11 +2565,11 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
>  					       (ULL)op->remap.start,
>  					       (ULL)op->remap.range);
>  				} else {
> -					xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
> +					xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask, 1);
>  				}
>  			}
>  			if (!skip)
> -				xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
> +				xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask, 1);

Maybe update the code here (REMAP case) to call
xe_vma_ops_incr_pt_update_ops once. I feel like that would be a bit
cleaner and use the new interface correctly.

Matt

>  			break;
>  		}
>  		case DRM_GPUVA_OP_UNMAP:
> @@ -2581,7 +2581,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
>  				return -EBUSY;
>  
>  			if (!xe_vma_is_cpu_addr_mirror(vma))
> -				xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
> +				xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask, 1);
>  			break;
>  		case DRM_GPUVA_OP_PREFETCH:
>  			vma = gpuva_to_vma(op->base.prefetch.va);
> @@ -2593,7 +2593,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
>  			}
>  
>  			if (!xe_vma_is_cpu_addr_mirror(vma))
> -				xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
> +				xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask, 1);
>  			break;
>  		default:
>  			drm_warn(&vm->xe->drm, "NOT POSSIBLE");
> -- 
> 2.34.1
> 


More information about the Intel-xe mailing list