[PATCH] drm/xe: Add stats for vma page faults

Francois Dugast francois.dugast at intel.com
Thu Feb 6 11:02:20 UTC 2025


Add new entries in stats for vma page faults. If CONFIG_DEBUG_FS is
enabled, the count and number of bytes can be viewed per GT in the
stat debugfs file. This helps when testing, to confirm page faults
have been triggered as expected. It also helps when looking at the
performance impact of page faults. Data is simply collected when
entering the page fault handler so there is no indication whether
it completed successfully, with or without retries, etc.

Example output:

    cat /sys/kernel/debug/dri/0/gt0/stats
    tlb_inval_count: 129
    vma_pagefault_count: 12
    vma_pagefault_bytes: 98304

Signed-off-by: Francois Dugast <francois.dugast at intel.com>
---
 drivers/gpu/drm/xe/xe_gt_pagefault.c   | 9 +++++++--
 drivers/gpu/drm/xe/xe_gt_stats.c       | 2 ++
 drivers/gpu/drm/xe/xe_gt_stats_types.h | 2 ++
 3 files changed, 11 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/xe/xe_gt_pagefault.c b/drivers/gpu/drm/xe/xe_gt_pagefault.c
index 8c18e9a278fb..1bc7f4f0affa 100644
--- a/drivers/gpu/drm/xe/xe_gt_pagefault.c
+++ b/drivers/gpu/drm/xe/xe_gt_pagefault.c
@@ -15,6 +15,7 @@
 #include "abi/guc_actions_abi.h"
 #include "xe_bo.h"
 #include "xe_gt.h"
+#include "xe_gt_stats.h"
 #include "xe_gt_tlb_invalidation.h"
 #include "xe_guc.h"
 #include "xe_guc_ct.h"
@@ -126,15 +127,19 @@ static int xe_pf_begin(struct drm_exec *exec, struct xe_vma *vma,
 	return 0;
 }
 
-static int handle_vma_pagefault(struct xe_tile *tile, struct xe_vma *vma,
+static int handle_vma_pagefault(struct xe_gt *gt, struct xe_vma *vma,
 				bool atomic)
 {
 	struct xe_vm *vm = xe_vma_vm(vma);
+	struct xe_tile *tile = gt_to_tile(gt);
 	struct drm_exec exec;
 	struct dma_fence *fence;
 	ktime_t end = 0;
 	int err;
 
+	xe_gt_stats_incr(gt, XE_GT_STATS_ID_VMA_PAGEFAULT_COUNT, 1);
+	xe_gt_stats_incr(gt, XE_GT_STATS_ID_VMA_PAGEFAULT_BYTES, xe_vma_size(vma));
+
 	lockdep_assert_held_write(&vm->lock);
 	trace_xe_vma_pagefault(vma);
 
@@ -239,7 +244,7 @@ static int handle_pagefault(struct xe_gt *gt, struct pagefault *pf)
 		err = xe_svm_handle_pagefault(vm, vma, tile,
 					      pf->page_addr, atomic);
 	else
-		err = handle_vma_pagefault(tile, vma, atomic);
+		err = handle_vma_pagefault(gt, vma, atomic);
 
 unlock_vm:
 	if (!err)
diff --git a/drivers/gpu/drm/xe/xe_gt_stats.c b/drivers/gpu/drm/xe/xe_gt_stats.c
index 7a6c1d808e41..2e9879ea4674 100644
--- a/drivers/gpu/drm/xe/xe_gt_stats.c
+++ b/drivers/gpu/drm/xe/xe_gt_stats.c
@@ -28,6 +28,8 @@ void xe_gt_stats_incr(struct xe_gt *gt, const enum xe_gt_stats_id id, int incr)
 
 static const char *const stat_description[__XE_GT_STATS_NUM_IDS] = {
 	"tlb_inval_count",
+	"vma_pagefault_count",
+	"vma_pagefault_bytes",
 };
 
 /**
diff --git a/drivers/gpu/drm/xe/xe_gt_stats_types.h b/drivers/gpu/drm/xe/xe_gt_stats_types.h
index 2fc055e39f27..b072bd80c4b9 100644
--- a/drivers/gpu/drm/xe/xe_gt_stats_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_stats_types.h
@@ -8,6 +8,8 @@
 
 enum xe_gt_stats_id {
 	XE_GT_STATS_ID_TLB_INVAL,
+	XE_GT_STATS_ID_VMA_PAGEFAULT_COUNT,
+	XE_GT_STATS_ID_VMA_PAGEFAULT_BYTES,
 	/* must be the last entry */
 	__XE_GT_STATS_NUM_IDS,
 };
-- 
2.43.0



More information about the Intel-xe mailing list