[Freedreno] [PATCH v5 08/11] drm/msm/a6xx: Remove GMU lock from runpm paths
Rob Clark
robdclark at gmail.com
Tue Aug 22 18:01:55 UTC 2023
From: Rob Clark <robdclark at chromium.org>
The locking is unneeded here as runpm provides sufficient serialization.
Fixes:
======================================================
WARNING: possible circular locking dependency detected
6.4.3-debug+ #16 Not tainted
------------------------------------------------------
kworker/5:2/211 is trying to acquire lock:
ffffffd577cefb98 (prepare_lock){+.+.}-{3:3}, at: clk_prepare_lock+0x70/0x98
but task is already holding lock:
ffffff809db316c0 (&a6xx_gpu->gmu.lock){+.+.}-{3:3}, at: a6xx_gmu_pm_suspend+0x4c/0xb4 [msm]
which lock already depends on the new lock.
the existing dependency chain (in reverse order) is:
-> #3 (&a6xx_gpu->gmu.lock){+.+.}-{3:3}:
__mutex_lock+0xc8/0x388
mutex_lock_nested+0x2c/0x38
a6xx_gmu_resume+0xf0/0x7f8 [msm]
a6xx_gmu_pm_resume+0x38/0x158 [msm]
adreno_runtime_resume+0x2c/0x38 [msm]
pm_generic_runtime_resume+0x30/0x44
__rpm_callback+0x4c/0x134
rpm_callback+0x78/0x7c
rpm_resume+0x3a4/0x46c
__pm_runtime_resume+0x78/0xbc
pm_runtime_get_sync.isra.0+0x14/0x20 [msm]
msm_gpu_submit+0x3c/0x130 [msm]
msm_job_run+0x84/0x11c [msm]
drm_sched_main+0x264/0x354 [gpu_sched]
kthread+0xf0/0x100
ret_from_fork+0x10/0x20
-> #2 (dma_fence_map){++++}-{0:0}:
__dma_fence_might_wait+0x74/0xc0
dma_fence_wait_timeout+0x50/0x174
dma_resv_wait_timeout+0x58/0xa8
active_evict+0x30/0x5c [msm]
drm_gem_lru_scan+0x15c/0x1c8
msm_gem_shrinker_scan+0x124/0x204 [msm]
do_shrink_slab+0x194/0x324
shrink_slab+0x270/0x2ec
shrink_node+0x278/0x674
do_try_to_free_pages+0x2dc/0x41c
try_to_free_pages+0x13c/0x1e4
__alloc_pages+0x364/0xb44
__folio_alloc+0x24/0x60
__read_swap_cache_async+0x10c/0x1fc
swap_cluster_readahead+0x1ac/0x234
shmem_swapin+0x6c/0xb0
shmem_swapin_folio+0x208/0x66c
shmem_get_folio_gfp+0x13c/0x650
shmem_read_folio_gfp+0x68/0xb0
shmem_read_mapping_page_gfp+0x20/0x44
drm_gem_get_pages+0xd4/0x1bc
get_pages+0x54/0x1e4 [msm]
msm_gem_pin_pages_locked+0x38/0xac [msm]
msm_gem_pin_vma_locked+0x58/0x88 [msm]
msm_ioctl_gem_submit+0xde4/0x13ac [msm]
drm_ioctl_kernel+0xe0/0x15c
drm_ioctl+0x2e8/0x3f4
vfs_ioctl+0x30/0x50
__arm64_sys_ioctl+0x80/0xb4
invoke_syscall+0x8c/0x128
el0_svc_common.constprop.0+0xdc/0x110
do_el0_svc+0x94/0xa4
el0_svc+0x44/0x88
el0t_64_sync_handler+0xac/0x13c
el0t_64_sync+0x190/0x194
-> #1 (fs_reclaim){+.+.}-{0:0}:
__fs_reclaim_acquire+0x3c/0x48
fs_reclaim_acquire+0x50/0x9c
slab_pre_alloc_hook.constprop.0+0x40/0x250
__kmem_cache_alloc_node+0x60/0x18c
kmalloc_trace+0x44/0x88
clk_rcg2_dfs_determine_rate+0x60/0x214
clk_core_determine_round_nolock+0xb8/0xf0
clk_core_round_rate_nolock+0x84/0x118
clk_core_round_rate_nolock+0xd8/0x118
clk_round_rate+0x6c/0xd0
geni_se_clk_tbl_get+0x78/0xc0
geni_se_clk_freq_match+0x44/0xe4
get_spi_clk_cfg+0x50/0xf4
geni_spi_set_clock_and_bw+0x54/0x104
spi_geni_prepare_message+0x130/0x174
__spi_pump_transfer_message+0x200/0x4d8
__spi_sync+0x13c/0x23c
spi_sync_locked+0x18/0x24
do_cros_ec_pkt_xfer_spi+0x124/0x3f0
cros_ec_xfer_high_pri_work+0x28/0x3c
kthread_worker_fn+0x14c/0x27c
kthread+0xf0/0x100
ret_from_fork+0x10/0x20
-> #0 (prepare_lock){+.+.}-{3:3}:
__lock_acquire+0xdf8/0x109c
lock_acquire+0x234/0x284
__mutex_lock+0xc8/0x388
mutex_lock_nested+0x2c/0x38
clk_prepare_lock+0x70/0x98
clk_unprepare+0x2c/0x48
clk_bulk_unprepare+0x48/0x4c
a6xx_gmu_stop+0x94/0x260 [msm]
a6xx_gmu_pm_suspend+0x54/0xb4 [msm]
adreno_runtime_suspend+0x38/0x44 [msm]
pm_generic_runtime_suspend+0x30/0x44
__rpm_callback+0x4c/0x134
rpm_callback+0x78/0x7c
rpm_suspend+0x28c/0x44c
pm_runtime_work+0xa0/0xa4
process_one_work+0x288/0x3d8
worker_thread+0x1f0/0x260
kthread+0xf0/0x100
ret_from_fork+0x10/0x20
other info that might help us debug this:
Chain exists of:
prepare_lock --> dma_fence_map --> &a6xx_gpu->gmu.lock
Possible unsafe locking scenario:
CPU0 CPU1
---- ----
lock(&a6xx_gpu->gmu.lock);
lock(dma_fence_map);
lock(&a6xx_gpu->gmu.lock);
lock(prepare_lock);
*** DEADLOCK ***
3 locks held by kworker/5:2/211:
#0: ffffff808091d138 ((wq_completion)pm){+.+.}-{0:0}, at: process_one_work+0x1a0/0x3d8
#1: ffffffc00aa73e00 ((work_completion)(&dev->power.work)){+.+.}-{0:0}, at: process_one_work+0x1a0/0x3d8
#2: ffffff809db316c0 (&a6xx_gpu->gmu.lock){+.+.}-{3:3}, at: a6xx_gmu_pm_suspend+0x4c/0xb4 [msm]
stack backtrace:
CPU: 5 PID: 211 Comm: kworker/5:2 Not tainted 6.4.3-debug+ #16
Hardware name: Google Villager (rev1+) with LTE (DT)
Workqueue: pm pm_runtime_work
Call trace:
dump_backtrace+0xb4/0xf0
show_stack+0x20/0x30
dump_stack_lvl+0x60/0x84
dump_stack+0x18/0x24
print_circular_bug+0x1cc/0x234
check_noncircular+0x78/0xac
__lock_acquire+0xdf8/0x109c
lock_acquire+0x234/0x284
__mutex_lock+0xc8/0x388
mutex_lock_nested+0x2c/0x38
clk_prepare_lock+0x70/0x98
clk_unprepare+0x2c/0x48
clk_bulk_unprepare+0x48/0x4c
a6xx_gmu_stop+0x94/0x260 [msm]
a6xx_gmu_pm_suspend+0x54/0xb4 [msm]
adreno_runtime_suspend+0x38/0x44 [msm]
pm_generic_runtime_suspend+0x30/0x44
__rpm_callback+0x4c/0x134
rpm_callback+0x78/0x7c
rpm_suspend+0x28c/0x44c
pm_runtime_work+0xa0/0xa4
process_one_work+0x288/0x3d8
worker_thread+0x1f0/0x260
kthread+0xf0/0x100
ret_from_fork+0x10/0x20
Signed-off-by: Rob Clark <robdclark at chromium.org>
---
drivers/gpu/drm/msm/adreno/a6xx_gpu.c | 15 +--------------
1 file changed, 1 insertion(+), 14 deletions(-)
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index 67dd2eeecf62..3993a5b0067b 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -1914,9 +1914,7 @@ static int a6xx_gmu_pm_resume(struct msm_gpu *gpu)
trace_msm_gpu_resume(0);
- mutex_lock(&a6xx_gpu->gmu.lock);
ret = a6xx_gmu_resume(a6xx_gpu);
- mutex_unlock(&a6xx_gpu->gmu.lock);
if (ret)
return ret;
@@ -1940,12 +1938,9 @@ static int a6xx_pm_resume(struct msm_gpu *gpu)
trace_msm_gpu_resume(0);
- mutex_lock(&a6xx_gpu->gmu.lock);
-
opp = dev_pm_opp_find_freq_ceil(&gpu->pdev->dev, &freq);
if (IS_ERR(opp)) {
- ret = PTR_ERR(opp);
- goto err_set_opp;
+ return PTR_ERR(opp);
}
dev_pm_opp_put(opp);
@@ -1969,8 +1964,6 @@ static int a6xx_pm_resume(struct msm_gpu *gpu)
pm_runtime_put(gmu->dev);
dev_pm_opp_set_opp(&gpu->pdev->dev, NULL);
}
-err_set_opp:
- mutex_unlock(&a6xx_gpu->gmu.lock);
if (!ret)
msm_devfreq_resume(gpu);
@@ -1990,9 +1983,7 @@ static int a6xx_gmu_pm_suspend(struct msm_gpu *gpu)
msm_devfreq_suspend(gpu);
- mutex_lock(&a6xx_gpu->gmu.lock);
ret = a6xx_gmu_stop(a6xx_gpu);
- mutex_unlock(&a6xx_gpu->gmu.lock);
if (ret)
return ret;
@@ -2016,8 +2007,6 @@ static int a6xx_pm_suspend(struct msm_gpu *gpu)
msm_devfreq_suspend(gpu);
- mutex_lock(&a6xx_gpu->gmu.lock);
-
/* Drain the outstanding traffic on memory buses */
a6xx_bus_clear_pending_transactions(adreno_gpu, true);
@@ -2030,8 +2019,6 @@ static int a6xx_pm_suspend(struct msm_gpu *gpu)
dev_pm_opp_set_opp(&gpu->pdev->dev, NULL);
pm_runtime_put_sync(gmu->dev);
- mutex_unlock(&a6xx_gpu->gmu.lock);
-
if (a6xx_gpu->shadow_bo)
for (i = 0; i < gpu->nr_rings; i++)
a6xx_gpu->shadow[i] = 0;
--
2.41.0
More information about the Freedreno
mailing list