[Intel-gfx] [PATCH 23/46] drm/i915: Mark up Ironlake ips with rpm wakerefs
Chris Wilson
chris at chris-wilson.co.uk
Mon Jan 7 11:54:46 UTC 2019
Currently Ironlake operates under the assumption that rpm awake (and its
error checking is disabled). As such, we have missed a few places where we
access registers without taking the rpm wakeref and thus trigger
warnings. intel_ips being one culprit.
As this involved adding a potentially sleeping rpm_get, we have to
rearrange the spinlocks slightly and so switch to acquiring a device-ref
under the spinlock rather than hold the spinlock for the whole
operation. To be consistent, we make the change in pattern common to the
intel_ips interface even though this adds a few more atomic operations
than necessary in a few cases.
v2: Sagar noted the mb around setting mch_dev were overkill as we only
need ordering there, and that i915_emon_status was still using
struct_mutex for no reason, but lacked rpm.
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
Cc: Jani Nikula <jani.nikula at intel.com>
---
drivers/gpu/drm/i915/i915_debugfs.c | 32 ++----
drivers/gpu/drm/i915/i915_drv.c | 3 +
drivers/gpu/drm/i915/intel_pm.c | 172 ++++++++++++++--------------
3 files changed, 102 insertions(+), 105 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index b7be5eb5c062..4257c8f78e27 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -1741,32 +1741,24 @@ static int i915_sr_status(struct seq_file *m, void *unused)
static int i915_emon_status(struct seq_file *m, void *unused)
{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
- unsigned long temp, chipset, gfx;
+ struct drm_i915_private *i915 = node_to_i915(m->private);
intel_wakeref_t wakeref;
- int ret;
- if (!IS_GEN(dev_priv, 5))
+ if (!IS_GEN(i915, 5))
return -ENODEV;
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
+ with_intel_runtime_pm(i915, wakeref) {
+ unsigned long temp, chipset, gfx;
- wakeref = intel_runtime_pm_get(dev_priv);
-
- temp = i915_mch_val(dev_priv);
- chipset = i915_chipset_val(dev_priv);
- gfx = i915_gfx_val(dev_priv);
- mutex_unlock(&dev->struct_mutex);
+ temp = i915_mch_val(i915);
+ chipset = i915_chipset_val(i915);
+ gfx = i915_gfx_val(i915);
- intel_runtime_pm_put(dev_priv, wakeref);
-
- seq_printf(m, "GMCH temp: %ld\n", temp);
- seq_printf(m, "Chipset power: %ld\n", chipset);
- seq_printf(m, "GFX power: %ld\n", gfx);
- seq_printf(m, "Total power: %ld\n", chipset + gfx);
+ seq_printf(m, "GMCH temp: %ld\n", temp);
+ seq_printf(m, "Chipset power: %ld\n", chipset);
+ seq_printf(m, "GFX power: %ld\n", gfx);
+ seq_printf(m, "Total power: %ld\n", chipset + gfx);
+ }
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index e2f4753ca21f..4d0a4f028882 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -1781,6 +1781,9 @@ void i915_driver_unload(struct drm_device *dev)
i915_driver_unregister(dev_priv);
+ /* Flush any external code that still may be under the RCU lock */
+ synchronize_rcu();
+
if (i915_gem_suspend(dev_priv))
DRM_ERROR("failed to idle hardware; continuing to unload!\n");
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index ab7257720c7e..7613ae72df3d 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -6203,10 +6203,6 @@ void intel_init_ipc(struct drm_i915_private *dev_priv)
*/
DEFINE_SPINLOCK(mchdev_lock);
-/* Global for IPS driver to get at the current i915 device. Protected by
- * mchdev_lock. */
-static struct drm_i915_private *i915_mch_dev;
-
bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val)
{
u16 rgvswctl;
@@ -7849,16 +7845,17 @@ static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
{
- unsigned long val;
+ intel_wakeref_t wakeref;
+ unsigned long val = 0;
if (!IS_GEN(dev_priv, 5))
return 0;
- spin_lock_irq(&mchdev_lock);
-
- val = __i915_chipset_val(dev_priv);
-
- spin_unlock_irq(&mchdev_lock);
+ with_intel_runtime_pm(dev_priv, wakeref) {
+ spin_lock_irq(&mchdev_lock);
+ val = __i915_chipset_val(dev_priv);
+ spin_unlock_irq(&mchdev_lock);
+ }
return val;
}
@@ -7935,14 +7932,16 @@ static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
void i915_update_gfx_val(struct drm_i915_private *dev_priv)
{
+ intel_wakeref_t wakeref;
+
if (!IS_GEN(dev_priv, 5))
return;
- spin_lock_irq(&mchdev_lock);
-
- __i915_update_gfx_val(dev_priv);
-
- spin_unlock_irq(&mchdev_lock);
+ with_intel_runtime_pm(dev_priv, wakeref) {
+ spin_lock_irq(&mchdev_lock);
+ __i915_update_gfx_val(dev_priv);
+ spin_unlock_irq(&mchdev_lock);
+ }
}
static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
@@ -7984,18 +7983,34 @@ static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
{
- unsigned long val;
+ intel_wakeref_t wakeref;
+ unsigned long val = 0;
if (!IS_GEN(dev_priv, 5))
return 0;
- spin_lock_irq(&mchdev_lock);
+ with_intel_runtime_pm(dev_priv, wakeref) {
+ spin_lock_irq(&mchdev_lock);
+ val = __i915_gfx_val(dev_priv);
+ spin_unlock_irq(&mchdev_lock);
+ }
- val = __i915_gfx_val(dev_priv);
+ return val;
+}
- spin_unlock_irq(&mchdev_lock);
+static struct drm_i915_private *i915_mch_dev;
- return val;
+static struct drm_i915_private *mchdev_get(void)
+{
+ struct drm_i915_private *i915;
+
+ rcu_read_lock();
+ i915 = i915_mch_dev;
+ if (!kref_get_unless_zero(&i915->drm.ref))
+ i915 = NULL;
+ rcu_read_unlock();
+
+ return i915;
}
/**
@@ -8006,23 +8021,24 @@ unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
*/
unsigned long i915_read_mch_val(void)
{
- struct drm_i915_private *dev_priv;
- unsigned long chipset_val, graphics_val, ret = 0;
-
- spin_lock_irq(&mchdev_lock);
- if (!i915_mch_dev)
- goto out_unlock;
- dev_priv = i915_mch_dev;
-
- chipset_val = __i915_chipset_val(dev_priv);
- graphics_val = __i915_gfx_val(dev_priv);
+ struct drm_i915_private *i915;
+ unsigned long chipset_val = 0;
+ unsigned long graphics_val = 0;
+ intel_wakeref_t wakeref;
- ret = chipset_val + graphics_val;
+ i915 = mchdev_get();
+ if (!i915)
+ return 0;
-out_unlock:
- spin_unlock_irq(&mchdev_lock);
+ with_intel_runtime_pm(i915, wakeref) {
+ spin_lock_irq(&mchdev_lock);
+ chipset_val = __i915_chipset_val(i915);
+ graphics_val = __i915_gfx_val(i915);
+ spin_unlock_irq(&mchdev_lock);
+ }
- return ret;
+ drm_dev_put(&i915->drm);
+ return chipset_val + graphics_val;
}
EXPORT_SYMBOL_GPL(i915_read_mch_val);
@@ -8033,23 +8049,19 @@ EXPORT_SYMBOL_GPL(i915_read_mch_val);
*/
bool i915_gpu_raise(void)
{
- struct drm_i915_private *dev_priv;
- bool ret = true;
-
- spin_lock_irq(&mchdev_lock);
- if (!i915_mch_dev) {
- ret = false;
- goto out_unlock;
- }
- dev_priv = i915_mch_dev;
+ struct drm_i915_private *i915;
- if (dev_priv->ips.max_delay > dev_priv->ips.fmax)
- dev_priv->ips.max_delay--;
+ i915 = mchdev_get();
+ if (!i915)
+ return false;
-out_unlock:
+ spin_lock_irq(&mchdev_lock);
+ if (i915->ips.max_delay > i915->ips.fmax)
+ i915->ips.max_delay--;
spin_unlock_irq(&mchdev_lock);
- return ret;
+ drm_dev_put(&i915->drm);
+ return true;
}
EXPORT_SYMBOL_GPL(i915_gpu_raise);
@@ -8061,23 +8073,19 @@ EXPORT_SYMBOL_GPL(i915_gpu_raise);
*/
bool i915_gpu_lower(void)
{
- struct drm_i915_private *dev_priv;
- bool ret = true;
-
- spin_lock_irq(&mchdev_lock);
- if (!i915_mch_dev) {
- ret = false;
- goto out_unlock;
- }
- dev_priv = i915_mch_dev;
+ struct drm_i915_private *i915;
- if (dev_priv->ips.max_delay < dev_priv->ips.min_delay)
- dev_priv->ips.max_delay++;
+ i915 = mchdev_get();
+ if (!i915)
+ return false;
-out_unlock:
+ spin_lock_irq(&mchdev_lock);
+ if (i915->ips.max_delay < i915->ips.min_delay)
+ i915->ips.max_delay++;
spin_unlock_irq(&mchdev_lock);
- return ret;
+ drm_dev_put(&i915->drm);
+ return true;
}
EXPORT_SYMBOL_GPL(i915_gpu_lower);
@@ -8088,13 +8096,16 @@ EXPORT_SYMBOL_GPL(i915_gpu_lower);
*/
bool i915_gpu_busy(void)
{
- bool ret = false;
+ struct drm_i915_private *i915;
+ bool ret;
- spin_lock_irq(&mchdev_lock);
- if (i915_mch_dev)
- ret = i915_mch_dev->gt.awake;
- spin_unlock_irq(&mchdev_lock);
+ i915 = mchdev_get();
+ if (!i915)
+ return false;
+ ret = i915->gt.awake;
+
+ drm_dev_put(&i915->drm);
return ret;
}
EXPORT_SYMBOL_GPL(i915_gpu_busy);
@@ -8107,24 +8118,19 @@ EXPORT_SYMBOL_GPL(i915_gpu_busy);
*/
bool i915_gpu_turbo_disable(void)
{
- struct drm_i915_private *dev_priv;
- bool ret = true;
-
- spin_lock_irq(&mchdev_lock);
- if (!i915_mch_dev) {
- ret = false;
- goto out_unlock;
- }
- dev_priv = i915_mch_dev;
-
- dev_priv->ips.max_delay = dev_priv->ips.fstart;
+ struct drm_i915_private *i915;
+ bool ret;
- if (!ironlake_set_drps(dev_priv, dev_priv->ips.fstart))
- ret = false;
+ i915 = mchdev_get();
+ if (!i915)
+ return false;
-out_unlock:
+ spin_lock_irq(&mchdev_lock);
+ i915->ips.max_delay = i915->ips.fstart;
+ ret = ironlake_set_drps(i915, i915->ips.fstart);
spin_unlock_irq(&mchdev_lock);
+ drm_dev_put(&i915->drm);
return ret;
}
EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
@@ -8153,18 +8159,14 @@ void intel_gpu_ips_init(struct drm_i915_private *dev_priv)
{
/* We only register the i915 ips part with intel-ips once everything is
* set up, to avoid intel-ips sneaking in and reading bogus values. */
- spin_lock_irq(&mchdev_lock);
- i915_mch_dev = dev_priv;
- spin_unlock_irq(&mchdev_lock);
+ rcu_assign_pointer(i915_mch_dev, dev_priv);
ips_ping_for_i915_load();
}
void intel_gpu_ips_teardown(void)
{
- spin_lock_irq(&mchdev_lock);
- i915_mch_dev = NULL;
- spin_unlock_irq(&mchdev_lock);
+ rcu_assign_pointer(i915_mch_dev, NULL);
}
static void intel_init_emon(struct drm_i915_private *dev_priv)
--
2.20.1
More information about the Intel-gfx
mailing list