[PATCH v5 17/23] drm/xe/bo: Update atomic_access attribute on madvise
Matthew Brost
matthew.brost at intel.com
Tue Jul 29 04:18:23 UTC 2025
On Tue, Jul 22, 2025 at 07:05:20PM +0530, Himal Prasad Ghimiray wrote:
> Update the bo_atomic_access based on user-provided input and determine
> the migration to smem during a CPU fault
>
> v2 (Matthew Brost)
> - Avoid cpu unmapping if bo is already in smem
> - check atomics on smem too for ioctl
> - Add comments
>
> v3
> - Avoid migration in prefetch
>
> v4 (Matthew Brost)
> - make sanity check function bool
> - add assert for smem placement
> - fix doc
>
> Cc: Matthew Brost <matthew.brost at intel.com>
> Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray at intel.com>
> ---
> drivers/gpu/drm/xe/xe_bo.c | 29 +++++++++++--
> drivers/gpu/drm/xe/xe_gt_pagefault.c | 2 +-
> drivers/gpu/drm/xe/xe_vm.c | 5 ++-
> drivers/gpu/drm/xe/xe_vm_madvise.c | 62 +++++++++++++++++++++++++++-
> 4 files changed, 91 insertions(+), 7 deletions(-)
>
> diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c
> index 4e0355d0f406..f133fc54664e 100644
> --- a/drivers/gpu/drm/xe/xe_bo.c
> +++ b/drivers/gpu/drm/xe/xe_bo.c
> @@ -1685,6 +1685,18 @@ static void xe_gem_object_close(struct drm_gem_object *obj,
> }
> }
>
> +static bool should_migrate_to_smem(struct xe_bo *bo)
> +{
> + /*
> + * NOTE: The following atomic checks are platform-specific. For example,
> + * if a device supports CXL atomics, these may not be necessary or
> + * may behave differently.
> + */
> +
> + return bo->attr.atomic_access == DRM_XE_ATOMIC_GLOBAL ||
> + bo->attr.atomic_access == DRM_XE_ATOMIC_CPU;
> +}
> +
> static vm_fault_t xe_gem_fault(struct vm_fault *vmf)
> {
> struct ttm_buffer_object *tbo = vmf->vma->vm_private_data;
> @@ -1693,7 +1705,7 @@ static vm_fault_t xe_gem_fault(struct vm_fault *vmf)
> struct xe_bo *bo = ttm_to_xe_bo(tbo);
> bool needs_rpm = bo->flags & XE_BO_FLAG_VRAM_MASK;
> vm_fault_t ret;
> - int idx;
> + int idx, r = 0;
>
> if (needs_rpm)
> xe_pm_runtime_get(xe);
> @@ -1705,8 +1717,19 @@ static vm_fault_t xe_gem_fault(struct vm_fault *vmf)
> if (drm_dev_enter(ddev, &idx)) {
> trace_xe_bo_cpu_fault(bo);
>
> - ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
> - TTM_BO_VM_NUM_PREFAULT);
> + if (should_migrate_to_smem(bo)) {
> + xe_assert(xe, bo->flags & XE_BO_FLAG_SYSTEM);
> +
> + r = xe_bo_migrate(bo, XE_PL_TT);
> + if (r == -EBUSY || r == -ERESTARTSYS || r == -EINTR)
> + ret = VM_FAULT_NOPAGE;
> + else if (r)
> + ret = VM_FAULT_SIGBUS;
> + }
> + if (!ret)
> + ret = ttm_bo_vm_fault_reserved(vmf,
> + vmf->vma->vm_page_prot,
> + TTM_BO_VM_NUM_PREFAULT);
> drm_dev_exit(idx);
> } else {
> ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot);
> diff --git a/drivers/gpu/drm/xe/xe_gt_pagefault.c b/drivers/gpu/drm/xe/xe_gt_pagefault.c
> index 5a75d56d8558..c1cb69c6ada8 100644
> --- a/drivers/gpu/drm/xe/xe_gt_pagefault.c
> +++ b/drivers/gpu/drm/xe/xe_gt_pagefault.c
> @@ -84,7 +84,7 @@ static int xe_pf_begin(struct drm_exec *exec, struct xe_vma *vma,
> if (err)
> return err;
>
> - if (atomic && IS_DGFX(vm->xe)) {
> + if (xe_vma_need_vram_for_atomic(vm->xe, vma, atomic)) {
Same as patch #11, if atomic fault and DRM_XE_ATOMIC_CPU, we nack the fault.
So I think the helper should be defined to figure out what to nack too.
> if (xe_vma_is_userptr(vma)) {
> err = -EACCES;
I think DRM_XE_ATOMIC_DEVICE works for userptr now if
xe->info.has_device_atomics_on_smem is true too.
> return err;
> diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
> index 2226b1eb46f1..5dc7cd7769f8 100644
> --- a/drivers/gpu/drm/xe/xe_vm.c
> +++ b/drivers/gpu/drm/xe/xe_vm.c
> @@ -4200,6 +4200,9 @@ void xe_vm_snapshot_free(struct xe_vm_snapshot *snap)
> */
> bool xe_vma_need_vram_for_atomic(struct xe_device *xe, struct xe_vma *vma, bool is_atomic)
> {
> + u32 atomic_access = xe_vma_bo(vma) ? xe_vma_bo(vma)->attr.atomic_access :
> + vma->attr.atomic_access;
> +
> if (!IS_DGFX(xe))
> return false;
>
> @@ -4208,7 +4211,7 @@ bool xe_vma_need_vram_for_atomic(struct xe_device *xe, struct xe_vma *vma, bool
> * instance, on a device supporting CXL atomics, these would ideally
> * work universally without additional handling.
> */
> - switch (vma->attr.atomic_access) {
> + switch (atomic_access) {
> case DRM_XE_ATOMIC_DEVICE:
> return !xe->info.has_device_atomics_on_smem;
>
> diff --git a/drivers/gpu/drm/xe/xe_vm_madvise.c b/drivers/gpu/drm/xe/xe_vm_madvise.c
> index 1dc4d19a5f2a..727833780b4b 100644
> --- a/drivers/gpu/drm/xe/xe_vm_madvise.c
> +++ b/drivers/gpu/drm/xe/xe_vm_madvise.c
> @@ -98,14 +98,28 @@ static void madvise_atomic(struct xe_device *xe, struct xe_vm *vm,
> struct xe_vma **vmas, int num_vmas,
> struct drm_xe_madvise *op)
> {
> + struct xe_bo *bo;
> int i;
>
> xe_assert(vm->xe, op->type == DRM_XE_MEM_RANGE_ATTR_ATOMIC);
> xe_assert(vm->xe, op->atomic.val <= DRM_XE_ATOMIC_CPU);
>
> - for (i = 0; i < num_vmas; i++)
> + for (i = 0; i < num_vmas; i++) {
> vmas[i]->attr.atomic_access = op->atomic.val;
> - /*TODO: handle bo backed vmas */
> +
> + bo = xe_vma_bo(vmas[i]);
> + if (!bo)
> + continue;
> +
> + xe_bo_assert_held(bo);
> + bo->attr.atomic_access = op->atomic.val;
> +
> + /* Invalidate cpu page table, so bo can migrate to smem in next access */
> + if (xe_bo_is_vram(bo) &&
> + (bo->attr.atomic_access == DRM_XE_ATOMIC_CPU ||
> + bo->attr.atomic_access == DRM_XE_ATOMIC_GLOBAL))
> + ttm_bo_unmap_virtual(&bo->ttm);
> + }
> }
>
> static void madvise_pat_index(struct xe_device *xe, struct xe_vm *vm,
> @@ -253,6 +267,41 @@ static bool madvise_args_are_sane(struct xe_device *xe, const struct drm_xe_madv
> return true;
> }
>
> +static bool check_bo_args_are_sane(struct xe_vm *vm, struct xe_vma **vmas,
> + int num_vmas, u32 atomic_val)
> +{
> + struct xe_device *xe = vm->xe;
> + struct xe_bo *bo;
> + int i;
> +
> + for (i = 0; i < num_vmas; i++) {
> + bo = xe_vma_bo(vmas[i]);
> + if (!bo)
> + continue;
I think for userptr VMAs we should reject any DRM_XE_ATOMIC_GLOBAL as
that won't work given we can't migrate userptrs.
Likewise DRM_XE_ATOMIC_DEVICE should be rejected for userptr if
xe->info.has_device_atomics_on_smem is clear.
Matt
> + /*
> + * NOTE: The following atomic checks are platform-specific. For example,
> + * if a device supports CXL atomics, these may not be necessary or
> + * may behave differently.
> + */
> + if (XE_IOCTL_DBG(xe, atomic_val == DRM_XE_ATOMIC_CPU &&
> + !(bo->flags & XE_BO_FLAG_SYSTEM)))
> + return false;
> +
> + if (XE_IOCTL_DBG(xe, atomic_val == DRM_XE_ATOMIC_DEVICE &&
> + !(bo->flags & XE_BO_FLAG_VRAM0) &&
> + !(bo->flags & XE_BO_FLAG_VRAM1) &&
> + !(bo->flags & XE_BO_FLAG_SYSTEM &&
> + xe->info.has_device_atomics_on_smem)))
> + return false;
> +
> + if (XE_IOCTL_DBG(xe, atomic_val == DRM_XE_ATOMIC_GLOBAL &&
> + (!(bo->flags & XE_BO_FLAG_SYSTEM) ||
> + (!(bo->flags & XE_BO_FLAG_VRAM0) &&
> + !(bo->flags & XE_BO_FLAG_VRAM1)))))
> + return false;
> + }
> + return true;
> +}
> /**
> * xe_vm_madvise_ioctl - Handle MADVise ioctl for a VM
> * @dev: DRM device pointer
> @@ -302,6 +351,15 @@ int xe_vm_madvise_ioctl(struct drm_device *dev, void *data, struct drm_file *fil
> goto unlock_vm;
>
> if (madvise_range.has_bo_vmas) {
> + if (args->type == DRM_XE_MEM_RANGE_ATTR_ATOMIC) {
> + if (!check_bo_args_are_sane(vm, madvise_range.vmas,
> + madvise_range.num_vmas,
> + args->atomic.val)) {
> + err = -EINVAL;
> + goto unlock_vm;
> + }
> + }
> +
> drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES | DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
> drm_exec_until_all_locked(&exec) {
> for (int i = 0; i < madvise_range.num_vmas; i++) {
> --
> 2.34.1
>
More information about the Intel-xe
mailing list