[PATCH v5 9/9] drm/xe: Implement capture of HWSP and HWCTX

Souza, Jose jose.souza at intel.com
Wed Feb 21 17:26:48 UTC 2024


On Wed, 2024-02-21 at 14:30 +0100, Maarten Lankhorst wrote:
> Capture both the HWSP and the HW ctx page to allow easier replaying of
> error dumps for debugging.

This patch is causing this:

[  831.459199] xe 0000:00:02.0: [drm] Timedout job: seqno=338, guc_id=4, flags=0x10
[  831.459911] irq event stamp: 31052

[  831.459947] hardirqs last  enabled at (31051): [<ffffffff81d2edba>] _raw_spin_unlock_irqrestore+0x4a/0x70
[  831.459959] ======================================================
[  831.459997] hardirqs last disabled at (31052): [<ffffffff81150014>] queue_work_node+0x74/0xb0
[  831.460025] WARNING: possible circular locking dependency detected
[  831.460059] softirqs last  enabled at (30876): [<ffffffff81131982>] irq_exit_rcu+0x82/0xe0
[  831.460085] 6.8.0-rc5-zeh-xe+ #1233 Not tainted
[  831.460120] softirqs last disabled at (30867): [<ffffffff81131982>] irq_exit_rcu+0x82/0xe0
[  831.460142] ------------------------------------------------------
[  831.460207] kworker/u16:6/1404 is trying to acquire lock:
[  831.460233] ffffffff827742e0 (fs_reclaim){+.+.}-{0:0}, at: kmalloc_trace+0x4d/0x300
[  831.460285]
               but task is already holding lock:
[  831.460321] ffffffff827face0 (dma_fence_map){++++}-{0:0}, at: xe_devcoredump+0x125/0x2d0 [xe]
[  831.460427]
               which lock already depends on the new lock.

[  831.460482]
               the existing dependency chain (in reverse order) is:
[  831.460523]
               -> #2 (dma_fence_map){++++}-{0:0}:
[  831.460563]        __dma_fence_might_wait+0x39/0xb0
[  831.460593]        dma_resv_lockdep+0x1b5/0x300
[  831.460615]        do_one_initcall+0x53/0x240
[  831.460638]        kernel_init_freeable+0x188/0x2e0
[  831.460664]        kernel_init+0x11/0x190
[  831.460687]        ret_from_fork+0x28/0x40
[  831.460716]        ret_from_fork_asm+0x11/0x20
[  831.460743]
               -> #1 (mmu_notifier_invalidate_range_start){+.+.}-{0:0}:
[  831.460805]        fs_reclaim_acquire+0x68/0xd0
[  831.460832]        kmalloc_trace+0x4d/0x300
[  831.460855]        __kthread_create_worker+0x2a/0xc0
[  831.460882]        kthread_create_worker+0x58/0x70
[  831.460905]        workqueue_init+0x16/0x2a0
[  831.460937]        kernel_init_freeable+0x59/0x2e0
[  831.460967]        kernel_init+0x11/0x190
[  831.460990]        ret_from_fork+0x28/0x40
[  831.461021]        ret_from_fork_asm+0x11/0x20
[  831.461054]
               -> #0 (fs_reclaim){+.+.}-{0:0}:
[  831.461091]        __lock_acquire+0x1735/0x28f0
[  831.461114]        lock_acquire+0xd3/0x2d0
[  831.461133]        fs_reclaim_acquire+0x99/0xd0
[  831.461159]        kmalloc_trace+0x4d/0x300
[  831.461183]        xe_lrc_snapshot_capture+0x2c/0x3b0 [xe]
[  831.461292]        xe_guc_exec_queue_snapshot_capture+0xe3/0x3b0 [xe]
[  831.461404]        xe_devcoredump+0x190/0x2d0 [xe]
[  831.461500]        guc_exec_queue_timedout_job+0x98/0x5a0 [xe]
[  831.461611]        drm_sched_job_timedout+0x77/0xe0 [gpu_sched]
[  831.461653]        process_one_work+0x1f4/0x4d0
[  831.461681]        worker_thread+0x1d8/0x3c0
[  831.461705]        kthread+0xfb/0x130
[  831.461726]        ret_from_fork+0x28/0x40
[  831.461750]        ret_from_fork_asm+0x11/0x20
[  831.461778]
               other info that might help us debug this:

[  831.461839] Chain exists of:
                 fs_reclaim --> mmu_notifier_invalidate_range_start --> dma_fence_map

[  831.461923]  Possible unsafe locking scenario:

[  831.463506]        CPU0                    CPU1
[  831.464888]        ----                    ----
[  831.466256]   rlock(dma_fence_map);
[  831.467594]                                lock(mmu_notifier_invalidate_range_start);
[  831.469009]                                lock(dma_fence_map);
[  831.470355]   lock(fs_reclaim);
[  831.471696]
                *** DEADLOCK ***

[  831.475615] 3 locks held by kworker/u16:6/1404:
[  831.477056]  #0: ffff88812fc4d138 ((wq_completion)gt-ordered-wq){+.+.}-{0:0}, at: process_one_work+0x18d/0x4d0
[  831.478585]  #1: ffffc90002663e58 ((work_completion)(&(&sched->work_tdr)->work)){+.+.}-{0:0}, at: process_one_work+0x18d/0x4d0
[  831.479767]  #2: ffffffff827face0 (dma_fence_map){++++}-{0:0}, at: xe_devcoredump+0x125/0x2d0 [xe]
[  831.481022]
               stack backtrace:
[  831.483548] CPU: 5 PID: 1404 Comm: kworker/u16:6 Not tainted 6.8.0-rc5-zeh-xe+ #1233
[  831.484844] Hardware name: Dell Inc. Latitude 5420/01M3M4, BIOS 1.27.0 03/17/2023
[  831.486119] Workqueue: gt-ordered-wq drm_sched_job_timedout [gpu_sched]
[  831.487346] Call Trace:
[  831.488767]  <TASK>
[  831.490214]  dump_stack_lvl+0x58/0xb0
[  831.491550]  check_noncircular+0x158/0x170
[  831.493027]  ? lockdep_hardirqs_on+0xbf/0x130
[  831.494468]  __lock_acquire+0x1735/0x28f0
[  831.495811]  lock_acquire+0xd3/0x2d0
[  831.497157]  ? kmalloc_trace+0x4d/0x300
[  831.498465]  ? xe_guc_exec_queue_snapshot_capture+0xae/0x3b0 [xe]
[  831.499826]  fs_reclaim_acquire+0x99/0xd0
[  831.501187]  ? kmalloc_trace+0x4d/0x300
[  831.502452]  kmalloc_trace+0x4d/0x300
[  831.503770]  ? __kmalloc_large_node+0x129/0x170
[  831.505192]  ? xe_lrc_snapshot_capture+0x2c/0x3b0 [xe]
[  831.506551]  ? xe_lrc_snapshot_capture+0x2c/0x3b0 [xe]
[  831.507954]  xe_lrc_snapshot_capture+0x2c/0x3b0 [xe]
[  831.509365]  ? rcu_is_watching+0xd/0x40
[  831.510658]  ? __kmalloc+0x2bd/0x400
[  831.511906]  xe_guc_exec_queue_snapshot_capture+0xe3/0x3b0 [xe]
[  831.513303]  xe_devcoredump+0x190/0x2d0 [xe]
[  831.514652]  guc_exec_queue_timedout_job+0x98/0x5a0 [xe]
[  831.516059]  drm_sched_job_timedout+0x77/0xe0 [gpu_sched]
[  831.517354]  ? process_one_work+0x18d/0x4d0
[  831.518608]  process_one_work+0x1f4/0x4d0
[  831.519916]  worker_thread+0x1d8/0x3c0
[  831.521251]  ? rescuer_thread+0x390/0x390
[  831.522547]  kthread+0xfb/0x130
[  831.523715]  ? kthread_complete_and_exit+0x20/0x20
[  831.524867]  ret_from_fork+0x28/0x40
[  831.525939]  ? kthread_complete_and_exit+0x20/0x20
[  831.527159]  ret_from_fork_asm+0x11/0x20
[  831.528283]  </TASK>
[  831.529699] xe 0000:00:02.0: [drm] Xe device coredump has been created
[  831.531196] xe 0000:00:02.0: [drm] Check your /sys/class/drm/card0/device/devcoredump/data
[  831.532863] xe 0000:00:02.0: [drm] Timedout job: seqno=339, guc_id=4, flags=0x11



> 
> Signed-off-by: Maarten Lankhorst <maarten.lankhorst at linux.intel.com>
> ---
>  drivers/gpu/drm/xe/xe_lrc.c | 60 ++++++++++++++++++++++++++++++++++++-
>  1 file changed, 59 insertions(+), 1 deletion(-)
> 
> diff --git a/drivers/gpu/drm/xe/xe_lrc.c b/drivers/gpu/drm/xe/xe_lrc.c
> index 20f235cdaca34..93c139f8d4fed 100644
> --- a/drivers/gpu/drm/xe/xe_lrc.c
> +++ b/drivers/gpu/drm/xe/xe_lrc.c
> @@ -5,6 +5,8 @@
>  
>  #include "xe_lrc.h"
>  
> +#include <linux/ascii85.h>
> +
>  #include "instructions/xe_mi_commands.h"
>  #include "instructions/xe_gfxpipe_commands.h"
>  #include "regs/xe_engine_regs.h"
> @@ -32,6 +34,10 @@
>  #define ENGINE_INSTANCE_SHIFT			48
>  
>  struct xe_lrc_snapshot {
> +	struct xe_bo *lrc_bo;
> +	void *lrc_snapshot;
> +	unsigned long lrc_size, lrc_offset;
> +
>  	u32 context_desc;
>  	u32 head;
>  	struct {
> @@ -1333,19 +1339,43 @@ struct xe_lrc_snapshot *xe_lrc_snapshot_capture(struct xe_lrc *lrc)
>  	snapshot->tail.memory = xe_lrc_read_ctx_reg(lrc, CTX_RING_TAIL);
>  	snapshot->start_seqno = xe_lrc_start_seqno(lrc);
>  	snapshot->seqno = xe_lrc_seqno(lrc);
> +	snapshot->lrc_bo = xe_bo_get(lrc->bo);
> +	snapshot->lrc_offset = xe_lrc_pphwsp_offset(lrc);
> +	snapshot->lrc_size = lrc->bo->size - snapshot->lrc_offset;
> +	snapshot->lrc_snapshot = NULL;
>  	return snapshot;
>  }
>  
>  void xe_lrc_snapshot_capture_delayed(struct xe_lrc_snapshot *snapshot)
>  {
> +	struct xe_bo *bo;
> +	struct iosys_map src;
> +
>  	if (!snapshot)
>  		return;
>  
> -	/* TODO: Copy status page */
> +	bo = snapshot->lrc_bo;
> +	snapshot->lrc_snapshot = kvmalloc(snapshot->lrc_size, GFP_KERNEL);
> +	if (!snapshot->lrc_snapshot)
> +		return;
> +
> +	dma_resv_lock(bo->ttm.base.resv, NULL);
> +	if (!ttm_bo_vmap(&bo->ttm, &src)) {
> +		xe_map_memcpy_from(xe_bo_device(bo),
> +				   snapshot->lrc_snapshot, &src, snapshot->lrc_offset,
> +				   snapshot->lrc_size);
> +		ttm_bo_vunmap(&bo->ttm, &src);
> +	} else {
> +		kvfree(snapshot->lrc_snapshot);
> +		snapshot->lrc_snapshot = NULL;
> +	}
> +	dma_resv_unlock(bo->ttm.base.resv);
>  }
>  
>  void xe_lrc_snapshot_print(struct xe_lrc_snapshot *snapshot, struct drm_printer *p)
>  {
> +	unsigned long i;
> +
>  	if (!snapshot)
>  		return;
>  
> @@ -1355,9 +1385,37 @@ void xe_lrc_snapshot_print(struct xe_lrc_snapshot *snapshot, struct drm_printer
>  		   snapshot->tail.internal, snapshot->tail.memory);
>  	drm_printf(p, "\tStart seqno: (memory) %d\n", snapshot->start_seqno);
>  	drm_printf(p, "\tSeqno: (memory) %d\n", snapshot->seqno);
> +
> +	if (!snapshot->lrc_snapshot)
> +		return;
> +
> +	drm_printf(p, "\t HWSP length: 0x%x\n", LRC_PPHWSP_SIZE);
> +	drm_puts(p, "\tHWSP data: ");
> +
> +	for (i = 0; i < LRC_PPHWSP_SIZE; i += sizeof(u32)) {
> +		u32 *val = snapshot->lrc_snapshot + i;
> +		char dumped[ASCII85_BUFSZ];
> +
> +		drm_puts(p, ascii85_encode(*val, dumped));
> +	}
> +	drm_printf(p, "\n\tHWCTX length: 0x%lx\n", snapshot->lrc_size - LRC_PPHWSP_SIZE);
> +	drm_puts(p, "\tHWCTX data: ");
> +	for (; i < snapshot->lrc_size; i += sizeof(u32)) {
> +		u32 *val = snapshot->lrc_snapshot + i;
> +		char dumped[ASCII85_BUFSZ];
> +
> +		drm_puts(p, ascii85_encode(*val, dumped));
> +	}
> +	drm_puts(p, "\n");
>  }
>  
>  void xe_lrc_snapshot_free(struct xe_lrc_snapshot *snapshot)
>  {
> +	if (!snapshot)
> +		return;
> +
> +	kvfree(snapshot->lrc_snapshot);
> +	if (snapshot->lrc_bo)
> +		xe_bo_put(snapshot->lrc_bo);
>  	kfree(snapshot);
>  }



More information about the Intel-xe mailing list