[Intel-gfx] [PATCH] drm/i915/dp: Take display powerwell before intel_dp_detect_dpcd
Chris Wilson
chris at chris-wilson.co.uk
Fri Nov 29 11:28:45 UTC 2019
Prevent a circular lockdep reference caused by waking up the runtime-pm
from inside intel_dp_aux_xfer() deep within several layers of mutexes.
<7> [401.102396] [drm:intel_runtime_suspend [i915]] Device suspended
<4> [401.115284]
<4> [401.115288] ======================================================
<4> [401.115289] WARNING: possible circular locking dependency detected
<4> [401.115291] 5.4.0-rc8-CI-Patchwork_15505+ #1 Tainted: G U
<4> [401.115292] ------------------------------------------------------
<4> [401.115293] kworker/1:3/186 is trying to acquire lock:
<4> [401.115294] ffffffff82263a40 (fs_reclaim){+.+.}, at: fs_reclaim_acquire.part.117+0x0/0x30
<4> [401.115300]
but task is already holding lock:
<4> [401.115301] ffff88848816a980 (&aux->hw_mutex){+.+.}, at: drm_dp_dpcd_access+0x62/0x110
<4> [401.115304]
which lock already depends on the new lock.
<4> [401.115305]
the existing dependency chain (in reverse order) is:
<4> [401.115306]
-> #4 (&aux->hw_mutex){+.+.}:
<4> [401.115310] __mutex_lock+0x9a/0x9d0
<4> [401.115312] drm_dp_dpcd_access+0x62/0x110
<4> [401.115313] drm_dp_dpcd_write+0x21/0x90
<4> [401.115375] intel_psr_enable_locked+0x480/0x530 [i915]
<4> [401.115428] intel_psr_enable+0x7d/0xd0 [i915]
<4> [401.115477] intel_enable_ddi+0x147/0x370 [i915]
<4> [401.115528] intel_encoders_enable+0x78/0xa0 [i915]
<4> [401.115576] haswell_crtc_enable+0x3e4/0x8a0 [i915]
<4> [401.115628] intel_update_crtc+0x1de/0x200 [i915]
<4> [401.115690] skl_commit_modeset_enables+0x297/0x430 [i915]
<4> [401.115740] intel_atomic_commit_tail+0x336/0x14e0 [i915]
<4> [401.115785] intel_atomic_commit+0x31d/0x350 [i915]
<4> [401.115788] drm_atomic_helper_commit_duplicated_state+0xc4/0xd0
<4> [401.115857] __intel_display_resume+0x84/0xc0 [i915]
<4> [401.115887] intel_display_resume+0xe0/0x110 [i915]
<4> [401.115900] i915_drm_resume+0xd4/0x130 [i915]
<4> [401.115900] dpm_run_callback+0x64/0x280
<4> [401.115900] device_resume+0xb7/0x1e0
<4> [401.115900] async_resume+0x14/0x40
<4> [401.115900] async_run_entry_fn+0x34/0x160
<4> [401.115900] process_one_work+0x26a/0x620
<4> [401.115900] worker_thread+0x37/0x380
<4> [401.115900] kthread+0x119/0x130
<4> [401.115900] ret_from_fork+0x24/0x50
<4> [401.115900]
-> #3 (&dev_priv->psr.lock){+.+.}:
<4> [401.115900] __mutex_lock+0x9a/0x9d0
<4> [401.115900] intel_psr_flush+0x39/0x180 [i915]
<4> [401.115900] frontbuffer_flush+0x6a/0x80 [i915]
<4> [401.115900] frontbuffer_retire+0x27/0x38 [i915]
<4> [401.115900] __active_retire+0xb4/0x290 [i915]
<4> [401.115900] process_one_work+0x26a/0x620
<4> [401.115900] worker_thread+0x37/0x380
<4> [401.115900] kthread+0x119/0x130
<4> [401.115900] ret_from_fork+0x24/0x50
<4> [401.115900]
-> #2 ((work_completion)(&ref->work)){+.+.}:
<4> [401.115900] __flush_work+0x345/0x4b0
<4> [401.116214] i915_active_wait+0x13f/0x180 [i915]
<4> [401.116214] __i915_vma_unbind+0x1a/0x70 [i915]
<4> [401.116214] i915_vma_unbind+0x2d/0x50 [i915]
<4> [401.116214] eb_lookup_vmas+0x4b6/0x1310 [i915]
<4> [401.116214] i915_gem_do_execbuffer+0x62a/0x2700 [i915]
<4> [401.116214] i915_gem_execbuffer2_ioctl+0x11b/0x460 [i915]
<4> [401.116214] drm_ioctl_kernel+0xa7/0xf0
<4> [401.116214] drm_ioctl+0x2e1/0x390
<4> [401.116214] do_vfs_ioctl+0xa0/0x6f0
<4> [401.116214] ksys_ioctl+0x35/0x60
<4> [401.116214] __x64_sys_ioctl+0x11/0x20
<4> [401.116214] do_syscall_64+0x4f/0x210
<4> [401.116214] entry_SYSCALL_64_after_hwframe+0x49/0xbe
<4> [401.116214]
-> #1 (&vm->mutex/1){+.+.}:
<4> [401.116214] i915_gem_shrinker_taints_mutex+0xa2/0xd0 [i915]
<4> [401.116214] i915_address_space_init+0xa9/0x170 [i915]
<4> [401.116214] __ppgtt_create+0x93/0xa60 [i915]
<4> [401.116214] i915_ppgtt_create+0x7/0xf0 [i915]
<4> [401.116214] i915_gem_create_context+0x278/0x420 [i915]
<4> [401.116214] i915_gem_context_create_kernel+0xa/0xa0 [i915]
<4> [401.116214] i915_gem_init_contexts+0x2b/0xf0 [i915]
<4> [401.116214] i915_gem_init+0x199/0xa50 [i915]
<4> [401.116214] i915_driver_probe+0xb00/0x15f0 [i915]
<4> [401.116214] i915_pci_probe+0x43/0x1c0 [i915]
<4> [401.116214] pci_device_probe+0x9e/0x120
<4> [401.116214] really_probe+0xea/0x420
<4> [401.116214] driver_probe_device+0x10b/0x120
<4> [401.116214] device_driver_attach+0x4a/0x50
<4> [401.116214] __driver_attach+0x97/0x130
<4> [401.116214] bus_for_each_dev+0x74/0xc0
<4> [401.116214] bus_add_driver+0x142/0x220
<4> [401.116214] driver_register+0x56/0xf0
<4> [401.116214] do_one_initcall+0x58/0x2ff
<4> [401.116214] do_init_module+0x56/0x1f8
<4> [401.116214] load_module+0x243e/0x29f0
<4> [401.116214] __do_sys_finit_module+0xe9/0x110
<4> [401.116214] do_syscall_64+0x4f/0x210
<4> [401.116214] entry_SYSCALL_64_after_hwframe+0x49/0xbe
<4> [401.116214]
-> #0 (fs_reclaim){+.+.}:
<4> [401.116214] __lock_acquire+0x1328/0x15d0
<4> [401.116214] lock_acquire+0xa7/0x1c0
<4> [401.116214] fs_reclaim_acquire.part.117+0x24/0x30
<4> [401.116214] __kmalloc+0x48/0x320
<4> [401.116214] acpi_ns_internalize_name+0x44/0x9b
<4> [401.116214] acpi_ns_get_node_unlocked+0x6b/0xd3
<4> [401.116214] acpi_ns_get_node+0x3b/0x50
<4> [401.116214] acpi_get_handle+0x8a/0xb4
<4> [401.116214] acpi_has_method+0x1c/0x40
<4> [401.116214] acpi_pci_set_power_state+0x40/0xe0
<4> [401.116214] pci_platform_power_transition+0x3e/0x90
<4> [401.116214] pci_set_power_state+0x83/0xf0
<4> [401.116214] pci_restore_standard_config+0x22/0x40
<4> [401.116214] pci_pm_runtime_resume+0x23/0xc0
<4> [401.116214] __rpm_callback+0xb1/0x110
<4> [401.116214] rpm_callback+0x1a/0x70
<4> [401.116214] rpm_resume+0x50e/0x790
<4> [401.116214] __pm_runtime_resume+0x42/0x80
<4> [401.116214] __intel_runtime_pm_get+0x15/0x60 [i915]
<4> [401.116214] intel_display_power_get+0x1f/0x60 [i915]
<4> [401.116214] intel_dp_aux_xfer+0xd3/0x8f0 [i915]
<4> [401.116214] intel_dp_aux_transfer+0xa7/0x200 [i915]
<4> [401.116214] drm_dp_dpcd_access+0x76/0x110
<4> [401.116214] drm_dp_dpcd_read+0x29/0xc0
<4> [401.116214] intel_dp_get_dsc_sink_cap+0x54/0xe0 [i915]
<4> [401.116214] intel_dp_detect+0x1be/0x500 [i915]
<4> [401.116214] drm_helper_probe_detect_ctx+0x67/0xd0
<4> [401.116214] drm_helper_hpd_irq_event+0xa5/0x120
<4> [401.116214] i915_hpd_poll_init_work+0xc6/0x100 [i915]
<4> [401.116214] process_one_work+0x26a/0x620
<4> [401.116214] worker_thread+0x37/0x380
<4> [401.116214] kthread+0x119/0x130
<4> [401.116214] ret_from_fork+0x24/0x50
Note that while this is cross-pollination from work_struct, it still
seems prudent to take the runtime reference outside, as is done for the
other intel_dp_detect() paths.
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
Cc: Imre Deak <imre.deak at intel.com>
Cc: Ville Syrjälä <ville.syrjala at linux.intel.com>
---
drivers/gpu/drm/i915/display/intel_dp.c | 18 +++++++++++++++++-
1 file changed, 17 insertions(+), 1 deletion(-)
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index be9e8e4497bf..7917cbd99dbb 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -5254,7 +5254,7 @@ intel_dp_short_pulse(struct intel_dp *intel_dp)
/* XXX this is probably wrong for multiple downstream ports */
static enum drm_connector_status
-intel_dp_detect_dpcd(struct intel_dp *intel_dp)
+__intel_dp_detect_dpcd(struct intel_dp *intel_dp)
{
struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
u8 *dpcd = intel_dp->dpcd;
@@ -5307,6 +5307,22 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp)
return connector_status_disconnected;
}
+static enum drm_connector_status
+intel_dp_detect_dpcd(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ enum intel_display_power_domain aux_domain =
+ intel_aux_power_domain(dig_port);
+ enum drm_connector_status status = connector_status_unknown;
+ intel_wakeref_t wakeref;
+
+ with_intel_display_power(i915, aux_domain, wakeref)
+ status = __intel_dp_detect_dpcd(intel_dp);
+
+ return status;
+}
+
static enum drm_connector_status
edp_detect(struct intel_dp *intel_dp)
{
--
2.24.0
More information about the Intel-gfx
mailing list