[PATCH] drm/i915/display: Convert power_domains.lock to spinlock.

Rodrigo Vivi rodrigo.vivi at intel.com
Fri Jul 21 18:34:02 UTC 2023


The irq reset operation is unsafely changing the irq enabled
status while possibly racing with display operations such as
vblank enable and page flip enable.

But before we can protect the irq reset with its spin lock,
we also need to convert the power_domains from mutex to
spin lock, or we are going to see a [ BUG: Invalid wait context ]

Cc: Matthew Auld <matthew.auld at intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi at intel.com>
---
 .../drm/i915/display/intel_display_power.c    | 86 +++++++++----------
 .../drm/i915/display/intel_display_power.h    |  2 +-
 .../i915/display/intel_display_power_well.c   |  8 +-
 3 files changed, 48 insertions(+), 48 deletions(-)

diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index 38225e5d311e..e0665dbdfcd1 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -261,9 +261,9 @@ bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
 
 	power_domains = &dev_priv->display.power.domains;
 
-	mutex_lock(&power_domains->lock);
+	spin_lock(&power_domains->lock);
 	ret = __intel_display_power_is_enabled(dev_priv, domain);
-	mutex_unlock(&power_domains->lock);
+	spin_unlock(&power_domains->lock);
 
 	return ret;
 }
@@ -310,7 +310,7 @@ void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv,
 	bool dc_off_enabled;
 	struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
 
-	mutex_lock(&power_domains->lock);
+	spin_lock(&power_domains->lock);
 	power_well = lookup_power_well(dev_priv, SKL_DISP_DC_OFF);
 
 	if (drm_WARN_ON(&dev_priv->drm, !power_well))
@@ -335,7 +335,7 @@ void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv,
 		intel_power_well_disable(dev_priv, power_well);
 
 unlock:
-	mutex_unlock(&power_domains->lock);
+	spin_unlock(&power_domains->lock);
 }
 
 #define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0))
@@ -532,9 +532,9 @@ intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
 	struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
 	intel_wakeref_t wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
 
-	mutex_lock(&power_domains->lock);
+	spin_lock(&power_domains->lock);
 	__intel_display_power_get_domain(dev_priv, domain);
-	mutex_unlock(&power_domains->lock);
+	spin_unlock(&power_domains->lock);
 
 	return wakeref;
 }
@@ -563,7 +563,7 @@ intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
 	if (!wakeref)
 		return false;
 
-	mutex_lock(&power_domains->lock);
+	spin_lock(&power_domains->lock);
 
 	if (__intel_display_power_is_enabled(dev_priv, domain)) {
 		__intel_display_power_get_domain(dev_priv, domain);
@@ -572,7 +572,7 @@ intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
 		is_enabled = false;
 	}
 
-	mutex_unlock(&power_domains->lock);
+	spin_unlock(&power_domains->lock);
 
 	if (!is_enabled) {
 		intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
@@ -613,9 +613,9 @@ static void __intel_display_power_put(struct drm_i915_private *dev_priv,
 {
 	struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
 
-	mutex_lock(&power_domains->lock);
+	spin_lock(&power_domains->lock);
 	__intel_display_power_put_domain(dev_priv, domain);
-	mutex_unlock(&power_domains->lock);
+	spin_unlock(&power_domains->lock);
 }
 
 static void
@@ -672,7 +672,7 @@ intel_display_power_put_async_work(struct work_struct *work)
 	intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(rpm);
 	intel_wakeref_t old_work_wakeref = 0;
 
-	mutex_lock(&power_domains->lock);
+	spin_lock(&power_domains->lock);
 
 	/*
 	 * Bail out if all the domain refs pending to be released were grabbed
@@ -707,7 +707,7 @@ intel_display_power_put_async_work(struct work_struct *work)
 out_verify:
 	verify_async_put_domains_state(power_domains);
 
-	mutex_unlock(&power_domains->lock);
+	spin_unlock(&power_domains->lock);
 
 	if (old_work_wakeref)
 		intel_runtime_pm_put_raw(rpm, old_work_wakeref);
@@ -739,7 +739,7 @@ void __intel_display_power_put_async(struct drm_i915_private *i915,
 
 	delay_ms = delay_ms >= 0 ? delay_ms : 100;
 
-	mutex_lock(&power_domains->lock);
+	spin_lock(&power_domains->lock);
 
 	if (power_domains->domain_use_count[domain] > 1) {
 		__intel_display_power_put_domain(i915, domain);
@@ -764,7 +764,7 @@ void __intel_display_power_put_async(struct drm_i915_private *i915,
 out_verify:
 	verify_async_put_domains_state(power_domains);
 
-	mutex_unlock(&power_domains->lock);
+	spin_unlock(&power_domains->lock);
 
 	if (work_wakeref)
 		intel_runtime_pm_put_raw(rpm, work_wakeref);
@@ -790,7 +790,7 @@ void intel_display_power_flush_work(struct drm_i915_private *i915)
 	struct intel_power_domain_mask async_put_mask;
 	intel_wakeref_t work_wakeref;
 
-	mutex_lock(&power_domains->lock);
+	spin_lock(&power_domains->lock);
 
 	work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref);
 	if (!work_wakeref)
@@ -803,7 +803,7 @@ void intel_display_power_flush_work(struct drm_i915_private *i915)
 out_verify:
 	verify_async_put_domains_state(power_domains);
 
-	mutex_unlock(&power_domains->lock);
+	spin_unlock(&power_domains->lock);
 
 	if (work_wakeref)
 		intel_runtime_pm_put_raw(&i915->runtime_pm, work_wakeref);
@@ -1027,7 +1027,7 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
 	power_domains->target_dc_state =
 		sanitize_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
 
-	mutex_init(&power_domains->lock);
+	spin_lock_init(&power_domains->lock);
 
 	INIT_DELAYED_WORK(&power_domains->async_put_work,
 			  intel_display_power_put_async_work);
@@ -1051,10 +1051,10 @@ static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
 	struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
 	struct i915_power_well *power_well;
 
-	mutex_lock(&power_domains->lock);
+	spin_lock(&power_domains->lock);
 	for_each_power_well(dev_priv, power_well)
 		intel_power_well_sync_hw(dev_priv, power_well);
-	mutex_unlock(&power_domains->lock);
+	spin_unlock(&power_domains->lock);
 }
 
 static void gen9_dbuf_slice_set(struct drm_i915_private *dev_priv,
@@ -1095,14 +1095,14 @@ void gen9_dbuf_slices_update(struct drm_i915_private *dev_priv,
 	 * as gen9_assert_dbuf_enabled might preempt this when registers
 	 * were already updated, while dev_priv was not.
 	 */
-	mutex_lock(&power_domains->lock);
+	spin_lock(&power_domains->lock);
 
 	for_each_dbuf_slice(dev_priv, slice)
 		gen9_dbuf_slice_set(dev_priv, slice, req_slices & BIT(slice));
 
 	dev_priv->display.dbuf.enabled_slices = req_slices;
 
-	mutex_unlock(&power_domains->lock);
+	spin_unlock(&power_domains->lock);
 }
 
 static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
@@ -1448,7 +1448,7 @@ static void skl_display_core_init(struct drm_i915_private *dev_priv,
 		return;
 
 	/* enable PG1 and Misc I/O */
-	mutex_lock(&power_domains->lock);
+	spin_lock(&power_domains->lock);
 
 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
 	intel_power_well_enable(dev_priv, well);
@@ -1456,7 +1456,7 @@ static void skl_display_core_init(struct drm_i915_private *dev_priv,
 	well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
 	intel_power_well_enable(dev_priv, well);
 
-	mutex_unlock(&power_domains->lock);
+	spin_unlock(&power_domains->lock);
 
 	intel_cdclk_init_hw(dev_priv);
 
@@ -1484,7 +1484,7 @@ static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
 	/* The spec doesn't call for removing the reset handshake flag */
 	/* disable PG1 and Misc I/O */
 
-	mutex_lock(&power_domains->lock);
+	spin_lock(&power_domains->lock);
 
 	/*
 	 * BSpec says to keep the MISC IO power well enabled here, only
@@ -1495,7 +1495,7 @@ static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
 	intel_power_well_disable(dev_priv, well);
 
-	mutex_unlock(&power_domains->lock);
+	spin_unlock(&power_domains->lock);
 
 	usleep_range(10, 30);		/* 10 us delay per Bspec */
 }
@@ -1519,12 +1519,12 @@ static void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume
 		return;
 
 	/* Enable PG1 */
-	mutex_lock(&power_domains->lock);
+	spin_lock(&power_domains->lock);
 
 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
 	intel_power_well_enable(dev_priv, well);
 
-	mutex_unlock(&power_domains->lock);
+	spin_unlock(&power_domains->lock);
 
 	intel_cdclk_init_hw(dev_priv);
 
@@ -1556,12 +1556,12 @@ static void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
 	 * Note that even though the driver's request is removed power well 1
 	 * may stay enabled after this due to DMC's own request on it.
 	 */
-	mutex_lock(&power_domains->lock);
+	spin_lock(&power_domains->lock);
 
 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
 	intel_power_well_disable(dev_priv, well);
 
-	mutex_unlock(&power_domains->lock);
+	spin_unlock(&power_domains->lock);
 
 	usleep_range(10, 30);		/* 10 us delay per Bspec */
 }
@@ -1667,10 +1667,10 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv,
 	 * 3. Enable Power Well 1 (PG1).
 	 *    The AUX IO power wells will be enabled on demand.
 	 */
-	mutex_lock(&power_domains->lock);
+	spin_lock(&power_domains->lock);
 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
 	intel_power_well_enable(dev_priv, well);
-	mutex_unlock(&power_domains->lock);
+	spin_unlock(&power_domains->lock);
 
 	if (DISPLAY_VER(dev_priv) == 14)
 		intel_de_rmw(dev_priv, DC_STATE_EN,
@@ -1738,10 +1738,10 @@ static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
 	 *    The AUX IO power wells are toggled on demand, so they are already
 	 *    disabled at this point.
 	 */
-	mutex_lock(&power_domains->lock);
+	spin_lock(&power_domains->lock);
 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
 	intel_power_well_disable(dev_priv, well);
-	mutex_unlock(&power_domains->lock);
+	spin_unlock(&power_domains->lock);
 
 	/* 5. */
 	intel_combo_phy_uninit(dev_priv);
@@ -1924,14 +1924,14 @@ void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
 	} else if (DISPLAY_VER(i915) == 9) {
 		skl_display_core_init(i915, resume);
 	} else if (IS_CHERRYVIEW(i915)) {
-		mutex_lock(&power_domains->lock);
+		spin_lock(&power_domains->lock);
 		chv_phy_control_init(i915);
-		mutex_unlock(&power_domains->lock);
+		spin_unlock(&power_domains->lock);
 		assert_isp_power_gated(i915);
 	} else if (IS_VALLEYVIEW(i915)) {
-		mutex_lock(&power_domains->lock);
+		spin_lock(&power_domains->lock);
 		vlv_cmnlane_wa(i915);
-		mutex_unlock(&power_domains->lock);
+		spin_unlock(&power_domains->lock);
 		assert_ved_power_gated(i915);
 		assert_isp_power_gated(i915);
 	} else if (IS_BROADWELL(i915) || IS_HASWELL(i915)) {
@@ -2006,7 +2006,7 @@ void intel_power_domains_sanitize_state(struct drm_i915_private *i915)
 	struct i915_power_domains *power_domains = &i915->display.power.domains;
 	struct i915_power_well *power_well;
 
-	mutex_lock(&power_domains->lock);
+	spin_lock(&power_domains->lock);
 
 	for_each_power_well_reverse(i915, power_well) {
 		if (power_well->desc->always_on || power_well->count ||
@@ -2019,7 +2019,7 @@ void intel_power_domains_sanitize_state(struct drm_i915_private *i915)
 		intel_power_well_disable(i915, power_well);
 	}
 
-	mutex_unlock(&power_domains->lock);
+	spin_unlock(&power_domains->lock);
 }
 
 /**
@@ -2177,7 +2177,7 @@ static void intel_power_domains_verify_state(struct drm_i915_private *i915)
 	struct i915_power_well *power_well;
 	bool dump_domain_info;
 
-	mutex_lock(&power_domains->lock);
+	spin_lock(&power_domains->lock);
 
 	verify_async_put_domains_state(power_domains);
 
@@ -2220,7 +2220,7 @@ static void intel_power_domains_verify_state(struct drm_i915_private *i915)
 		}
 	}
 
-	mutex_unlock(&power_domains->lock);
+	spin_unlock(&power_domains->lock);
 }
 
 #else
@@ -2302,7 +2302,7 @@ void intel_display_power_debug(struct drm_i915_private *i915, struct seq_file *m
 	struct i915_power_domains *power_domains = &i915->display.power.domains;
 	int i;
 
-	mutex_lock(&power_domains->lock);
+	spin_lock(&power_domains->lock);
 
 	seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
 	for (i = 0; i < power_domains->power_well_count; i++) {
@@ -2319,7 +2319,7 @@ void intel_display_power_debug(struct drm_i915_private *i915, struct seq_file *m
 				   power_domains->domain_use_count[power_domain]);
 	}
 
-	mutex_unlock(&power_domains->lock);
+	spin_unlock(&power_domains->lock);
 }
 
 struct intel_ddi_port_domains {
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h
index d3b5d04b7b07..e69d1af9898d 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.h
+++ b/drivers/gpu/drm/i915/display/intel_display_power.h
@@ -145,7 +145,7 @@ struct i915_power_domains {
 	intel_wakeref_t init_wakeref;
 	intel_wakeref_t disable_wakeref;
 
-	struct mutex lock;
+	spinlock_t lock;
 	int domain_use_count[POWER_DOMAIN_NUM];
 
 	struct delayed_work async_put_work;
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.c b/drivers/gpu/drm/i915/display/intel_display_power_well.c
index 916009894d89..cf3b9f4c3de1 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_well.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power_well.c
@@ -1566,7 +1566,7 @@ bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
 	struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
 	bool was_override;
 
-	mutex_lock(&power_domains->lock);
+	spin_lock(&power_domains->lock);
 
 	was_override = dev_priv->display.power.chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
 
@@ -1588,7 +1588,7 @@ bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
 	assert_chv_phy_status(dev_priv);
 
 out:
-	mutex_unlock(&power_domains->lock);
+	spin_unlock(&power_domains->lock);
 
 	return was_override;
 }
@@ -1601,7 +1601,7 @@ void chv_phy_powergate_lanes(struct intel_encoder *encoder,
 	enum dpio_phy phy = vlv_dig_port_to_phy(enc_to_dig_port(encoder));
 	enum dpio_channel ch = vlv_dig_port_to_channel(enc_to_dig_port(encoder));
 
-	mutex_lock(&power_domains->lock);
+	spin_lock(&power_domains->lock);
 
 	dev_priv->display.power.chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch);
 	dev_priv->display.power.chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch);
@@ -1622,7 +1622,7 @@ void chv_phy_powergate_lanes(struct intel_encoder *encoder,
 
 	assert_chv_phy_powergate(dev_priv, phy, ch, override, mask);
 
-	mutex_unlock(&power_domains->lock);
+	spin_unlock(&power_domains->lock);
 }
 
 static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
-- 
2.41.0



More information about the Intel-gfx-trybot mailing list