[RFC v2 6/6] drm/xe: Implement DRM_XE_EXEC_QUEUE_SET_HANG_REPLAY_STATE
Carlos Santa
carlos.santa at intel.corp-partner.google.com
Tue Feb 11 02:20:06 UTC 2025
From: Matthew Brost <matthew.brost at intel.com>
Implement DRM_XE_EXEC_QUEUE_SET_HANG_REPLAY_STATE which sets the exec
queue default state to user data passed in. The intent is for a Mesa
tool to use this replay GPU hangs.
v2:
- Enable the flag DRM_XE_EXEC_QUEUE_SET_HANG_REPLAY_STATE
- Fix the page size math calculation to avoid a crash
Cc: José Roberto de Souza <jose.souza at intel.com>
Signed-off-by: Matthew Brost <matthew.brost at intel.com>
---
drivers/gpu/drm/xe/xe_exec_queue.c | 32 ++++++++++++++++--
drivers/gpu/drm/xe/xe_exec_queue_types.h | 3 ++
drivers/gpu/drm/xe/xe_execlist.c | 2 +-
drivers/gpu/drm/xe/xe_lrc.c | 42 +++++++++++++++++-------
drivers/gpu/drm/xe/xe_lrc.h | 3 +-
5 files changed, 67 insertions(+), 15 deletions(-)
diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c
index 6051db78d706..63042bfd5bba 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue.c
+++ b/drivers/gpu/drm/xe/xe_exec_queue.c
@@ -47,6 +47,7 @@ static void __xe_exec_queue_free(struct xe_exec_queue *q)
if (q->xef)
xe_file_put(q->xef);
+ kvfree(q->replay_state);
kfree(q);
}
@@ -139,7 +140,8 @@ static int __xe_exec_queue_init(struct xe_exec_queue *q)
}
for (i = 0; i < q->width; ++i) {
- q->lrc[i] = xe_lrc_create(q->hwe, q->vm, SZ_16K, q->msix_vec, flags);
+ q->lrc[i] = xe_lrc_create(q->hwe, q->vm, q->replay_state,
+ SZ_16K, q->msix_vec, flags);
if (IS_ERR(q->lrc[i])) {
err = PTR_ERR(q->lrc[i]);
goto err_unlock;
@@ -459,6 +461,30 @@ exec_queue_set_pxp_type(struct xe_device *xe, struct xe_exec_queue *q, u64 value
return xe_pxp_exec_queue_set_type(xe->pxp, q, DRM_XE_PXP_TYPE_HWDRM);
}
+static int exec_queue_set_hang_replay_state(struct xe_device *xe,
+ struct xe_exec_queue *q,
+ u64 value)
+{
+ size_t size = xe_gt_lrc_hang_replay_size(q->gt, q->class);
+ u64 __user *address = u64_to_user_ptr(value);
+ void *ptr;
+ int err;
+
+ ptr = kvmalloc(size, GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ err = __copy_from_user(ptr, address, size);
+ if (XE_IOCTL_DBG(xe, err)) {
+ kvfree(ptr);
+ return -EFAULT;
+ }
+
+ q->replay_state = ptr;
+
+ return 0;
+}
+
typedef int (*xe_exec_queue_set_property_fn)(struct xe_device *xe,
struct xe_exec_queue *q,
u64 value);
@@ -467,6 +493,7 @@ static const xe_exec_queue_set_property_fn exec_queue_set_property_funcs[] = {
[DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY] = exec_queue_set_priority,
[DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE] = exec_queue_set_timeslice,
[DRM_XE_EXEC_QUEUE_SET_PROPERTY_PXP_TYPE] = exec_queue_set_pxp_type,
+ [DRM_XE_EXEC_QUEUE_SET_HANG_REPLAY_STATE] = exec_queue_set_hang_replay_state,
};
static int exec_queue_user_ext_set_property(struct xe_device *xe,
@@ -487,7 +514,8 @@ static int exec_queue_user_ext_set_property(struct xe_device *xe,
XE_IOCTL_DBG(xe, ext.pad) ||
XE_IOCTL_DBG(xe, ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY &&
ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE &&
- ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_PXP_TYPE))
+ ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_PXP_TYPE &&
+ ext.property != DRM_XE_EXEC_QUEUE_SET_HANG_REPLAY_STATE))
return -EINVAL;
idx = array_index_nospec(ext.property, ARRAY_SIZE(exec_queue_set_property_funcs));
diff --git a/drivers/gpu/drm/xe/xe_exec_queue_types.h b/drivers/gpu/drm/xe/xe_exec_queue_types.h
index 6eb7ff091534..60360256588a 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue_types.h
+++ b/drivers/gpu/drm/xe/xe_exec_queue_types.h
@@ -138,6 +138,9 @@ struct xe_exec_queue {
struct list_head link;
} pxp;
+ /** @replay_state: GPU hang replay state */
+ void *replay_state;
+
/** @ops: submission backend exec queue operations */
const struct xe_exec_queue_ops *ops;
diff --git a/drivers/gpu/drm/xe/xe_execlist.c b/drivers/gpu/drm/xe/xe_execlist.c
index 779a52daf3d7..d3c59e6f4553 100644
--- a/drivers/gpu/drm/xe/xe_execlist.c
+++ b/drivers/gpu/drm/xe/xe_execlist.c
@@ -269,7 +269,7 @@ struct xe_execlist_port *xe_execlist_port_create(struct xe_device *xe,
port->hwe = hwe;
- port->lrc = xe_lrc_create(hwe, NULL, SZ_16K, XE_IRQ_DEFAULT_MSIX, 0);
+ port->lrc = xe_lrc_create(hwe, NULL, NULL, SZ_16K, XE_IRQ_DEFAULT_MSIX, 0);
if (IS_ERR(port->lrc)) {
err = PTR_ERR(port->lrc);
goto err;
diff --git a/drivers/gpu/drm/xe/xe_lrc.c b/drivers/gpu/drm/xe/xe_lrc.c
index a77fbce523e6..cc685fcc9058 100644
--- a/drivers/gpu/drm/xe/xe_lrc.c
+++ b/drivers/gpu/drm/xe/xe_lrc.c
@@ -38,6 +38,7 @@
#define LRC_ENGINE_INSTANCE GENMASK_ULL(53, 48)
#define LRC_INDIRECT_RING_STATE_SIZE SZ_4K
+#define LRC_PPHWSP_SIZE SZ_4K
static struct xe_device *
lrc_to_xe(struct xe_lrc *lrc)
@@ -45,7 +46,16 @@ lrc_to_xe(struct xe_lrc *lrc)
return gt_to_xe(lrc->fence_ctx.gt);
}
-size_t xe_gt_lrc_size(struct xe_gt *gt, enum xe_engine_class class)
+/**
+ * xe_gt_lrc_hang_replay_size() - Hang replay size
+ * @gt: The GT
+ * @class: Hardware engine class
+ *
+ * Determine size of GPU hang replay state for a GT and hardware engine class.
+ *
+ * Return: Size of GPU hang replay size
+ */
+size_t xe_gt_lrc_hang_replay_size(struct xe_gt *gt, enum xe_engine_class class)
{
struct xe_device *xe = gt_to_xe(gt);
size_t size;
@@ -74,11 +84,18 @@ size_t xe_gt_lrc_size(struct xe_gt *gt, enum xe_engine_class class)
size = 2 * SZ_4K;
}
+ return size - LRC_PPHWSP_SIZE;
+}
+
+size_t xe_gt_lrc_size(struct xe_gt *gt, enum xe_engine_class class)
+{
+ size_t size = xe_gt_lrc_hang_replay_size(gt, class);
+
/* Add indirect ring state page */
if (xe_gt_has_indirect_ring_state(gt))
size += LRC_INDIRECT_RING_STATE_SIZE;
- return size;
+ return size + LRC_PPHWSP_SIZE;
}
/*
@@ -650,7 +667,6 @@ u32 xe_lrc_pphwsp_offset(struct xe_lrc *lrc)
#define LRC_START_SEQNO_PPHWSP_OFFSET (LRC_SEQNO_PPHWSP_OFFSET + 8)
#define LRC_CTX_JOB_TIMESTAMP_OFFSET (LRC_START_SEQNO_PPHWSP_OFFSET + 8)
#define LRC_PARALLEL_PPHWSP_OFFSET 2048
-#define LRC_PPHWSP_SIZE SZ_4K
u32 xe_lrc_regs_offset(struct xe_lrc *lrc)
{
@@ -883,7 +899,8 @@ static void xe_lrc_finish(struct xe_lrc *lrc)
#define PVC_CTX_ACC_CTR_THOLD (0x2a + 1)
static int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
- struct xe_vm *vm, u32 ring_size, u16 msix_vec,
+ struct xe_vm *vm, void *replay_state, u32 ring_size,
+ u16 msix_vec,
u32 init_flags)
{
struct xe_gt *gt = hwe->gt;
@@ -897,9 +914,7 @@ static int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
kref_init(&lrc->refcount);
lrc->flags = 0;
- lrc->replay_size = xe_gt_lrc_size(gt, hwe->class);
- if (xe_gt_has_indirect_ring_state(gt))
- lrc->replay_size -= LRC_INDIRECT_RING_STATE_SIZE;
+ lrc->replay_size = xe_gt_lrc_hang_replay_size(gt, hwe->class);
lrc_size = ring_size + xe_gt_lrc_size(gt, hwe->class);
if (xe_gt_has_indirect_ring_state(gt))
lrc->flags |= XE_LRC_FLAG_INDIRECT_RING_STATE;
@@ -925,7 +940,7 @@ static int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
xe_hw_fence_ctx_init(&lrc->fence_ctx, hwe->gt,
hwe->fence_irq, hwe->name);
- if (!gt->default_lrc[hwe->class]) {
+ if (!gt->default_lrc[hwe->class] && !replay_state) {
init_data = empty_lrc_data(hwe);
if (!init_data) {
err = -ENOMEM;
@@ -938,7 +953,11 @@ static int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
* values
*/
map = __xe_lrc_pphwsp_map(lrc);
- if (!init_data) {
+ if (replay_state) {
+ xe_map_memset(xe, &map, 0, 0, LRC_PPHWSP_SIZE); /* PPHWSP */
+ xe_map_memcpy_to(xe, &map, LRC_PPHWSP_SIZE, replay_state,
+ xe_gt_lrc_hang_replay_size(gt, hwe->class));
+ } else if (!init_data) {
xe_map_memset(xe, &map, 0, 0, LRC_PPHWSP_SIZE); /* PPHWSP */
xe_map_memcpy_to(xe, &map, LRC_PPHWSP_SIZE,
gt->default_lrc[hwe->class] + LRC_PPHWSP_SIZE,
@@ -1033,6 +1052,7 @@ static int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
* xe_lrc_create - Create a LRC
* @hwe: Hardware Engine
* @vm: The VM (address space)
+ * @replay_state: GPU hang replay state
* @ring_size: LRC ring size
* @msix_vec: MSI-X interrupt vector (for platforms that support it)
* @flags: LRC initialization flags
@@ -1043,7 +1063,7 @@ static int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
* upon failure.
*/
struct xe_lrc *xe_lrc_create(struct xe_hw_engine *hwe, struct xe_vm *vm,
- u32 ring_size, u16 msix_vec, u32 flags)
+ void *replay_state, u32 ring_size, u16 msix_vec, u32 flags)
{
struct xe_lrc *lrc;
int err;
@@ -1052,7 +1072,7 @@ struct xe_lrc *xe_lrc_create(struct xe_hw_engine *hwe, struct xe_vm *vm,
if (!lrc)
return ERR_PTR(-ENOMEM);
- err = xe_lrc_init(lrc, hwe, vm, ring_size, msix_vec, flags);
+ err = xe_lrc_init(lrc, hwe, vm, replay_state, ring_size, msix_vec, flags);
if (err) {
kfree(lrc);
return ERR_PTR(err);
diff --git a/drivers/gpu/drm/xe/xe_lrc.h b/drivers/gpu/drm/xe/xe_lrc.h
index 2d6838645858..1ee1bff40569 100644
--- a/drivers/gpu/drm/xe/xe_lrc.h
+++ b/drivers/gpu/drm/xe/xe_lrc.h
@@ -46,7 +46,7 @@ struct xe_lrc_snapshot {
#define XE_LRC_CREATE_RUNALONE 0x1
#define XE_LRC_CREATE_PXP 0x2
struct xe_lrc *xe_lrc_create(struct xe_hw_engine *hwe, struct xe_vm *vm,
- u32 ring_size, u16 msix_vec, u32 flags);
+ void *replay_state, u32 ring_size, u16 msix_vec, u32 flags);
void xe_lrc_destroy(struct kref *ref);
/**
@@ -73,6 +73,7 @@ static inline void xe_lrc_put(struct xe_lrc *lrc)
kref_put(&lrc->refcount, xe_lrc_destroy);
}
+size_t xe_gt_lrc_hang_replay_size(struct xe_gt *gt, enum xe_engine_class class);
size_t xe_gt_lrc_size(struct xe_gt *gt, enum xe_engine_class class);
u32 xe_lrc_pphwsp_offset(struct xe_lrc *lrc);
u32 xe_lrc_regs_offset(struct xe_lrc *lrc);
--
2.43.0
More information about the Intel-xe
mailing list