[PATCH v6 3/6] drm/xe/xe_vm: Add per VM pagefault info
Lin, Shuicheng
shuicheng.lin at intel.com
Fri Mar 7 23:19:29 UTC 2025
On Fri, Mar 7, 2025 2:41 PM Cavitt, Jonathan wrote
> Add additional information to each VM so they can report up to the first
> 50 seen pagefaults. Only failed pagefaults are saved this way, as successful
> pagefaults should recover and not need to be reported to userspace.
>
> v2:
> - Free vm after use (Shuicheng)
> - Compress pf copy logic (Shuicheng)
> - Update fault_unsuccessful before storing (Shuicheng)
> - Fix old struct name in comments (Shuicheng)
> - Keep first 50 pagefaults instead of last 50 (Jianxun)
>
> Signed-off-by: Jonathan Cavitt <jonathan.cavitt at intel.com>
> Suggested-by: Matthew Brost <matthew.brost at intel.com>
> CC: Shuicheng Lin <shuicheng.lin at intel.com>
> CC: Zhang Jianxun <jianxun.zhang at intel.com>
> ---
> drivers/gpu/drm/xe/xe_gt_pagefault.c | 17 +++++++++++
> drivers/gpu/drm/xe/xe_vm.c | 42 ++++++++++++++++++++++++++++
> drivers/gpu/drm/xe/xe_vm.h | 6 ++++
> drivers/gpu/drm/xe/xe_vm_types.h | 20 +++++++++++++
> 4 files changed, 85 insertions(+)
>
> diff --git a/drivers/gpu/drm/xe/xe_gt_pagefault.c
> b/drivers/gpu/drm/xe/xe_gt_pagefault.c
> index c8a9058aa09f..964ca1efb698 100644
> --- a/drivers/gpu/drm/xe/xe_gt_pagefault.c
> +++ b/drivers/gpu/drm/xe/xe_gt_pagefault.c
> @@ -343,6 +343,22 @@ int xe_guc_pagefault_handler(struct xe_guc *guc,
> u32 *msg, u32 len)
> return full ? -ENOSPC : 0;
> }
>
> +static void save_pagefault_to_vm(struct xe_device *xe, struct
> +xe_pagefault *pf) {
> + struct xe_vm *vm;
> + struct xe_pagefault *store;
> +
> + vm = asid_to_vm(xe, pf->asid);
> + if (IS_ERR(vm))
> + return;
> +
> + spin_lock(&vm->pfs.lock);
> + store = kmemdup(pf, sizeof(*pf), GFP_KERNEL);
I still prefer we could have NULL check for the store pointer.
Shuicheng
> + xe_vm_add_pf_entry(vm, store);
> + spin_unlock(&vm->pfs.lock);
> + xe_vm_put(vm);
> +}
> +
> #define USM_QUEUE_MAX_RUNTIME_MS 20
>
> static void pf_queue_work_func(struct work_struct *w) @@ -362,6 +378,7
> @@ static void pf_queue_work_func(struct work_struct *w)
> if (unlikely(ret)) {
> print_pagefault(xe, &pf);
> pf.fault_unsuccessful = 1;
> + save_pagefault_to_vm(xe, &pf);
> drm_dbg(&xe->drm, "Fault response:
> Unsuccessful %d\n", ret);
> }
>
> diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c index
> 22a26aff3a6e..eada3ecc2364 100644
> --- a/drivers/gpu/drm/xe/xe_vm.c
> +++ b/drivers/gpu/drm/xe/xe_vm.c
> @@ -778,6 +778,43 @@ int xe_vm_userptr_check_repin(struct xe_vm *vm)
> list_empty_careful(&vm->userptr.invalidated)) ? 0 : -
> EAGAIN; }
>
> +void xe_vm_add_pf_entry(struct xe_vm *vm, struct xe_pagefault *pf) {
> + struct xe_vm_pf_entry *e = NULL;
> +
> + e = kzalloc(sizeof(*e), GFP_KERNEL);
> + xe_assert(vm->xe, e);
> +
The 2nd input parameter pf is missed to be assigned to e.
Shuicheng
> + spin_lock(&vm->pfs.lock);
> +
> + /**
> + * Limit the number of pfs in the pf list to prevent memory overuse.
> + */
> + if (vm->pfs.len >= MAX_PFS) {
> + kfree(e);
> + spin_unlock(&vm->pfs.lock);
> + return;
> + }
> +
> + list_add_tail(&e->list, &vm->pfs.list);
> + vm->pfs.len++;
> + spin_unlock(&vm->pfs.lock);
> +}
> +
> +void xe_vm_remove_pf_entries(struct xe_vm *vm) {
> + struct xe_vm_pf_entry *e, *tmp;
> +
> + spin_lock(&vm->pfs.lock);
> + list_for_each_entry_safe(e, tmp, &vm->pfs.list, list) {
> + list_del(&e->list);
> + kfree(e->pf);
> + kfree(e);
> + }
> + vm->pfs.len = 0;
> + spin_unlock(&vm->pfs.lock);
> +}
> +
> static int xe_vma_ops_alloc(struct xe_vma_ops *vops, bool array_of_binds) {
> int i;
> @@ -1660,6 +1697,9 @@ struct xe_vm *xe_vm_create(struct xe_device *xe,
> u32 flags)
> init_rwsem(&vm->userptr.notifier_lock);
> spin_lock_init(&vm->userptr.invalidated_lock);
>
> + INIT_LIST_HEAD(&vm->pfs.list);
> + spin_lock_init(&vm->pfs.lock);
> +
> ttm_lru_bulk_move_init(&vm->lru_bulk_move);
>
> INIT_WORK(&vm->destroy_work, vm_destroy_work_func); @@ -
> 1930,6 +1970,8 @@ void xe_vm_close_and_put(struct xe_vm *vm)
> }
> up_write(&xe->usm.lock);
>
> + xe_vm_remove_pf_entries(vm);
> +
> for_each_tile(tile, xe, id)
> xe_range_fence_tree_fini(&vm->rftree[id]);
>
> diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h index
> 0ef811fc2bde..fe068e4448a6 100644
> --- a/drivers/gpu/drm/xe/xe_vm.h
> +++ b/drivers/gpu/drm/xe/xe_vm.h
> @@ -12,6 +12,8 @@
> #include "xe_map.h"
> #include "xe_vm_types.h"
>
> +#define MAX_PFS 50
> +
> struct drm_device;
> struct drm_printer;
> struct drm_file;
> @@ -257,6 +259,10 @@ int xe_vma_userptr_pin_pages(struct
> xe_userptr_vma *uvma);
>
> int xe_vma_userptr_check_repin(struct xe_userptr_vma *uvma);
>
> +void xe_vm_add_pf_entry(struct xe_vm *vm, struct xe_pagefault *pf);
> +
> +void xe_vm_remove_pf_entries(struct xe_vm *vm);
> +
> bool xe_vm_validate_should_retry(struct drm_exec *exec, int err, ktime_t
> *end);
>
> int xe_vm_lock_vma(struct drm_exec *exec, struct xe_vma *vma); diff --git
> a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h
> index 84fa41b9fa20..3a2329f54fb1 100644
> --- a/drivers/gpu/drm/xe/xe_vm_types.h
> +++ b/drivers/gpu/drm/xe/xe_vm_types.h
> @@ -19,6 +19,7 @@
> #include "xe_range_fence.h"
>
> struct xe_bo;
> +struct xe_pagefault;
> struct xe_svm_range;
> struct xe_sync_entry;
> struct xe_user_fence;
> @@ -142,6 +143,13 @@ struct xe_userptr_vma {
>
> struct xe_device;
>
> +struct xe_vm_pf_entry {
> + /** @pf: observed pagefault */
> + struct xe_pagefault *pf;
> + /** @list: link into @xe_vm.pfs.list */
> + struct list_head list;
> +};
> +
> struct xe_vm {
> /** @gpuvm: base GPUVM used to track VMAs */
> struct drm_gpuvm gpuvm;
> @@ -305,6 +313,18 @@ struct xe_vm {
> bool capture_once;
> } error_capture;
>
> + /**
> + * @pfs: List of all pagefaults associated with this VM
> + */
> + struct {
> + /** @lock: lock protecting @pfs.list */
> + spinlock_t lock;
> + /** @list: list of xe_exec_queue_ban_entry entries */
> + struct list_head list;
> + /** @len: length of @pfs.list */
> + unsigned int len;
> + } pfs;
> +
> /**
> * @tlb_flush_seqno: Required TLB flush seqno for the next exec.
> * protected by the vm resv.
> --
> 2.43.0
More information about the dri-devel
mailing list