[Intel-gfx] [PATCH 4/4] drm/i915: Enable polling when we don't have hpd

Lyude cpaul at redhat.com
Mon Aug 22 15:31:35 UTC 2016


Unfortunately, there's two situations where we lose hpd right now:
 - Runtime suspend
 - When we've shut off all of the power wells on Valleyview/Cherryview

While it would be nice if this didn't cause issues, this has the
ability to get us in some awkward states where a user won't be able to
get their display to turn on. For instance; if we boot a Valleyview
system without any monitors connected, it won't need any of it's power
wells and thus shut them off. Since this causes us to lose HPD, this
means that unless the user knows how to ssh into their machine and do a
manual reprobe for monitors, none of the monitors they connect after
booting will actually work.

Eventually we should come up with a better fix then having to enable
polling for this, since this makes rpm a lot less useful, but for now
the infrastructure in i915 just isn't there yet to get hpd in these
situations.

Changes since v1:
 - Add comment explaining the addition of the if
   (!mode_config->poll_running) in intel_hpd_init()
 - Remove unneeded if (!dev->mode_config.poll_enabled) in
   i915_hpd_poll_init_work()
 - Call to drm_helper_hpd_irq_event() after we disable polling
 - Add cancel_work_sync() call to intel_hpd_cancel_work()

Changes since v2:
 - Apparently dev->mode_config.poll_running doesn't actually reflect
   whether or not a poll is currently in progress, and is actually used
   for dynamic module paramter enabling/disabling. So now we instead
   keep track of our own poll_running variable in dev_priv->hotplug
 - Clean i915_hpd_poll_init_work() a little bit

Changes since v3:
 - Remove the now-redundant connector loop in intel_hpd_init(), just
   rely on intel_hpd_poll_enable() for setting connector->polled
   correctly on each connector
 - Get rid of poll_running
 - Don't assign enabled in i915_hpd_poll_init_work before we actually
   lock dev->mode_config.mutex
 - Wrap enabled assignment in i915_hpd_poll_init_work() in READ_ONCE()
   for doc purposes
 - Do the same for dev_priv->hotplug.poll_enabled with WRITE_ONCE in
   intel_hpd_poll_enable()
 - Add some comments about racing not mattering in intel_hpd_poll_enable

Changes since v4:
 - Rename intel_hpd_poll_enable() to intel_hpd_poll_init()
 - Drop the bool argument from intel_hpd_poll_init()
 - Remove redundant calls to intel_hpd_poll_init()
 - Rename poll_enable_work to poll_init_work
 - Add some kerneldoc for intel_hpd_poll_init()
 - Cross-reference intel_hpd_poll_init() in intel_hpd_init()
 - Just copy the loop from intel_hpd_init() in intel_hpd_poll_init()

Changes since v5:
 - Minor kerneldoc nitpicks

Cc: stable at vger.kernel.org
Cc: Ville Syrjälä <ville.syrjala at linux.intel.com>
Reviewed-by: Daniel Vetter <daniel.vetter at ffwll.ch>
Signed-off-by: Lyude <cpaul at redhat.com>
---
 drivers/gpu/drm/i915/i915_drv.c         |  3 ++
 drivers/gpu/drm/i915/i915_drv.h         |  3 ++
 drivers/gpu/drm/i915/intel_drv.h        |  2 +
 drivers/gpu/drm/i915/intel_hotplug.c    | 90 ++++++++++++++++++++++++++++-----
 drivers/gpu/drm/i915/intel_runtime_pm.c |  2 +
 5 files changed, 88 insertions(+), 12 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 85c4deb..fd3553b 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -1578,6 +1578,9 @@ static int intel_runtime_suspend(struct device *device)
 
 	assert_forcewakes_inactive(dev_priv);
 
+	if (!IS_VALLEYVIEW(dev_priv) || !IS_CHERRYVIEW(dev_priv))
+		intel_hpd_poll_init(dev_priv);
+
 	DRM_DEBUG_KMS("Device suspended\n");
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 7f8ea58..0ed5fd3 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -281,6 +281,9 @@ struct i915_hotplug {
 	u32 short_port_mask;
 	struct work_struct dig_port_work;
 
+	struct work_struct poll_init_work;
+	bool poll_enabled;
+
 	/*
 	 * if we get a HPD irq from DP and a HPD irq from non-DP
 	 * the non-DP HPD could block the workqueue on a mode config
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 14d1dc6..94144a7 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -1346,6 +1346,8 @@ void intel_dsi_init(struct drm_device *dev);
 
 /* intel_dvo.c */
 void intel_dvo_init(struct drm_device *dev);
+/* intel_hotplug.c */
+void intel_hpd_poll_init(struct drm_i915_private *dev_priv);
 
 
 /* legacy fbdev emulation in intel_fbdev.c */
diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c
index 0c9ee3f..2c49458 100644
--- a/drivers/gpu/drm/i915/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/intel_hotplug.c
@@ -453,20 +453,47 @@ void intel_hpd_irq_handler(struct drm_device *dev,
  *
  * This is a separate step from interrupt enabling to simplify the locking rules
  * in the driver load and resume code.
+ *
+ * Also see: intel_hpd_poll_init(), which enables connector polling
  */
 void intel_hpd_init(struct drm_i915_private *dev_priv)
 {
-	struct drm_device *dev = dev_priv->dev;
-	struct drm_mode_config *mode_config = &dev->mode_config;
-	struct drm_connector *connector;
 	int i;
 
 	for_each_hpd_pin(i) {
 		dev_priv->hotplug.stats[i].count = 0;
 		dev_priv->hotplug.stats[i].state = HPD_ENABLED;
 	}
+
+	WRITE_ONCE(dev_priv->hotplug.poll_enabled, false);
+	schedule_work(&dev_priv->hotplug.poll_init_work);
+
+	/*
+	 * Interrupt setup is already guaranteed to be single-threaded, this is
+	 * just to make the assert_spin_locked checks happy.
+	 */
+	spin_lock_irq(&dev_priv->irq_lock);
+	if (dev_priv->display.hpd_irq_setup)
+		dev_priv->display.hpd_irq_setup(dev_priv->dev);
+	spin_unlock_irq(&dev_priv->irq_lock);
+}
+
+void i915_hpd_poll_init_work(struct work_struct *work) {
+	struct drm_i915_private *dev_priv =
+		container_of(work, struct drm_i915_private,
+			     hotplug.poll_init_work);
+	struct drm_device *dev = dev_priv->dev;
+	struct drm_mode_config *mode_config = &dev->mode_config;
+	struct drm_connector *connector;
+	bool enabled;
+
+	mutex_lock(&dev->mode_config.mutex);
+
+	enabled = READ_ONCE(dev_priv->hotplug.poll_enabled);
+
 	list_for_each_entry(connector, &mode_config->connector_list, head) {
-		struct intel_connector *intel_connector = to_intel_connector(connector);
+		struct intel_connector *intel_connector =
+			to_intel_connector(connector);
 		connector->polled = intel_connector->polled;
 
 		/* MST has a dynamic intel_connector->encoder and it's reprobing
@@ -475,24 +502,62 @@ void intel_hpd_init(struct drm_i915_private *dev_priv)
 			continue;
 
 		if (!connector->polled && I915_HAS_HOTPLUG(dev) &&
-		    intel_connector->encoder->hpd_pin > HPD_NONE)
-			connector->polled = DRM_CONNECTOR_POLL_HPD;
+		    intel_connector->encoder->hpd_pin > HPD_NONE) {
+			connector->polled = enabled ?
+				DRM_CONNECTOR_POLL_CONNECT |
+				DRM_CONNECTOR_POLL_DISCONNECT :
+				DRM_CONNECTOR_POLL_HPD;
+		}
 	}
 
+	if (enabled)
+		drm_kms_helper_poll_enable_locked(dev);
+
+	mutex_unlock(&dev->mode_config.mutex);
+
 	/*
-	 * Interrupt setup is already guaranteed to be single-threaded, this is
-	 * just to make the assert_spin_locked checks happy.
+	 * We might have missed any hotplugs that happened while we were
+	 * in the middle of disabling polling
 	 */
-	spin_lock_irq(&dev_priv->irq_lock);
-	if (dev_priv->display.hpd_irq_setup)
-		dev_priv->display.hpd_irq_setup(dev);
-	spin_unlock_irq(&dev_priv->irq_lock);
+	if (!enabled)
+		drm_helper_hpd_irq_event(dev);
+}
+
+/**
+ * intel_hpd_poll_init - enables/disables polling for connectors with hpd
+ * @dev_priv: i915 device instance
+ * @enabled: Whether to enable or disable polling
+ *
+ * This function enables polling for all connectors, regardless of whether or
+ * not they support hotplug detection. Under certain conditions HPD may not be
+ * functional. On most Intel GPUs, this happens when we enter runtime suspend.
+ * On Valleyview and Cherryview systems, this also happens when we shut off all
+ * of the powerwells.
+ *
+ * Since this function can get called in contexts where we're already holding
+ * dev->mode_config.mutex, we do the actual hotplug enabling in a seperate
+ * worker.
+ *
+ * Also see: intel_hpd_init(), which restores hpd handling.
+ */
+void intel_hpd_poll_init(struct drm_i915_private *dev_priv)
+{
+	WRITE_ONCE(dev_priv->hotplug.poll_enabled, true);
+
+	/*
+	 * We might already be holding dev->mode_config.mutex, so do this in a
+	 * seperate worker
+	 * As well, there's no issue if we race here since we always reschedule
+	 * this worker anyway
+	 */
+	schedule_work(&dev_priv->hotplug.poll_init_work);
 }
 
 void intel_hpd_init_work(struct drm_i915_private *dev_priv)
 {
 	INIT_WORK(&dev_priv->hotplug.hotplug_work, i915_hotplug_work_func);
 	INIT_WORK(&dev_priv->hotplug.dig_port_work, i915_digport_work_func);
+	INIT_WORK(&dev_priv->hotplug.poll_init_work, i915_hpd_poll_init_work);
 	INIT_DELAYED_WORK(&dev_priv->hotplug.reenable_work,
 			  intel_hpd_irq_storm_reenable_work);
 }
@@ -509,6 +574,7 @@ void intel_hpd_cancel_work(struct drm_i915_private *dev_priv)
 
 	cancel_work_sync(&dev_priv->hotplug.dig_port_work);
 	cancel_work_sync(&dev_priv->hotplug.hotplug_work);
+	cancel_work_sync(&dev_priv->hotplug.poll_init_work);
 	cancel_delayed_work_sync(&dev_priv->hotplug.reenable_work);
 }
 
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 4a3fd3a..2592b39 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -1007,6 +1007,8 @@ static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
 	synchronize_irq(dev_priv->dev->irq);
 
 	vlv_power_sequencer_reset(dev_priv);
+
+	intel_hpd_poll_init(dev_priv);
 }
 
 static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
-- 
2.7.4



More information about the Intel-gfx mailing list