[Intel-gfx] [PATCH 2/2] drm/i915: Track all held rpm wakerefs
Chris Wilson
chris at chris-wilson.co.uk
Thu Aug 9 09:16:35 UTC 2018
Everytime we take a wakeref, record the stack trace of where it was
taken; clearing the set if we ever drop back to no owners. For debugging
a rpm leak, we can look at all the current wakerefs and check if they
have a matching rpm_put.
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
drivers/gpu/drm/i915/Kconfig.debug | 13 +++
drivers/gpu/drm/i915/i915_drv.c | 3 +-
drivers/gpu/drm/i915/i915_drv.h | 7 ++
drivers/gpu/drm/i915/intel_drv.h | 1 +
drivers/gpu/drm/i915/intel_runtime_pm.c | 143 +++++++++++++++++++++++-
5 files changed, 165 insertions(+), 2 deletions(-)
diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug
index 459f8f88a34c..ed572a454e01 100644
--- a/drivers/gpu/drm/i915/Kconfig.debug
+++ b/drivers/gpu/drm/i915/Kconfig.debug
@@ -39,6 +39,19 @@ config DRM_I915_DEBUG
If in doubt, say "N".
+config DRM_I915_DEBUG_RPM
+ bool "Insert extra checks into the runtime pm internals"
+ depends on DRM_I915
+ default n
+ select STACKDEPOT
+ help
+ Enable extra sanity checks (including BUGs) along the runtime pm
+ paths that may slow the system down and if hit hang the machine.
+
+ Recommended for driver developers only.
+
+ If in doubt, say "N".
+
config DRM_I915_DEBUG_GEM
bool "Insert extra checks into the GEM internals"
default n
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 62ef105a241d..63992fe45dbf 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -1477,7 +1477,7 @@ void i915_driver_unload(struct drm_device *dev)
intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
- WARN_ON(atomic_read(&dev_priv->runtime_pm.wakeref_count));
+ intel_runtime_pm_cleanup(dev_priv);
}
static void i915_driver_release(struct drm_device *dev)
@@ -2603,6 +2603,7 @@ static int intel_runtime_suspend(struct device *kdev)
DRM_DEBUG_KMS("Suspending device\n");
+ intel_runtime_pm_cleanup(dev_priv);
disable_rpm_wakeref_asserts(dev_priv);
/*
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 0b10a30b7d96..c8d5b896b155 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -45,6 +45,7 @@
#include <linux/pm_qos.h>
#include <linux/reservation.h>
#include <linux/shmem_fs.h>
+#include <linux/stackdepot.h>
#include <drm/drmP.h>
#include <drm/intel-gtt.h>
@@ -1278,6 +1279,12 @@ struct i915_runtime_pm {
atomic_t wakeref_count;
bool suspended;
bool irqs_enabled;
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RPM)
+ spinlock_t debug_lock;
+ depot_stack_handle_t *debug_owners;
+ unsigned long debug_count;
+#endif
};
enum intel_pipe_crc_source {
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index dc6c0cec9b36..968c9074f1a8 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -1957,6 +1957,7 @@ void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume);
void bxt_display_core_uninit(struct drm_i915_private *dev_priv);
void intel_runtime_pm_enable(struct drm_i915_private *dev_priv);
void intel_runtime_pm_disable(struct drm_i915_private *dev_priv);
+void intel_runtime_pm_cleanup(struct drm_i915_private *dev_priv);
const char *
intel_display_power_domain_str(enum intel_display_power_domain domain);
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index b78c3b48aa62..7f555a0ad2ee 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -49,6 +49,130 @@
* present for a given platform.
*/
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RPM)
+
+#include <linux/sort.h>
+
+#define STACKDEPTH 12
+
+static void track_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
+{
+ unsigned long entries[STACKDEPTH];
+ struct stack_trace trace = {
+ .entries = entries,
+ .max_entries = ARRAY_SIZE(entries),
+ .skip = 2
+ };
+ unsigned long flags;
+ depot_stack_handle_t stack, *stacks;
+
+ if (!HAS_RUNTIME_PM(i915))
+ return;
+
+ save_stack_trace(&trace);
+ if (trace.nr_entries &&
+ trace.entries[trace.nr_entries - 1] == ULONG_MAX)
+ trace.nr_entries--;
+
+ stack = depot_save_stack(&trace, GFP_NOWAIT | __GFP_NOWARN);
+ if (!stack)
+ return;
+
+ spin_lock_irqsave(&i915->runtime_pm.debug_lock, flags);
+ stacks = krealloc(i915->runtime_pm.debug_owners,
+ (i915->runtime_pm.debug_count + 1) * sizeof(*stacks),
+ GFP_NOWAIT | __GFP_NOWARN);
+ if (stacks) {
+ stacks[i915->runtime_pm.debug_count++] = stack;
+ i915->runtime_pm.debug_owners = stacks;
+ }
+ spin_unlock_irqrestore(&i915->runtime_pm.debug_lock, flags);
+}
+
+static void untrack_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
+{
+ depot_stack_handle_t *stacks;
+ unsigned long flags;
+
+ spin_lock_irqsave(&i915->runtime_pm.debug_lock, flags);
+ stacks = fetch_and_zero(&i915->runtime_pm.debug_owners);
+ i915->runtime_pm.debug_count = 0;
+ spin_unlock_irqrestore(&i915->runtime_pm.debug_lock, flags);
+
+ kfree(stacks);
+}
+
+static int cmphandle(const void *_a, const void *_b)
+{
+ const depot_stack_handle_t * const a = _a, * const b = _b;
+
+ if (*a < *b)
+ return -1;
+ else if (*a > *b)
+ return 1;
+ else
+ return 0;
+}
+
+static void show_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
+{
+ unsigned long entries[STACKDEPTH];
+ depot_stack_handle_t *stacks;
+ unsigned long flags, count, i;
+ char *buf;
+
+ spin_lock_irqsave(&i915->runtime_pm.debug_lock, flags);
+ stacks = fetch_and_zero(&i915->runtime_pm.debug_owners);
+ count = fetch_and_zero(&i915->runtime_pm.debug_count);
+ spin_unlock_irqrestore(&i915->runtime_pm.debug_lock, flags);
+ if (!count)
+ return;
+
+ DRM_DEBUG_DRIVER("leaked %lu wakerefs\n", count);
+
+ buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!buf)
+ goto out_stacks;
+
+ sort(stacks, count, sizeof(*stacks), cmphandle, NULL);
+
+ for (i = 0; i < count; i++) {
+ struct stack_trace trace = {
+ .entries = entries,
+ .max_entries = ARRAY_SIZE(entries),
+ };
+ depot_stack_handle_t stack = stacks[i];
+ unsigned long rep;
+
+ rep = 1;
+ while (i + 1 < count && stacks[i + 1] == stack)
+ rep++, i++;
+ depot_fetch_stack(stack, &trace);
+ snprint_stack_trace(buf, PAGE_SIZE, &trace, 0);
+ DRM_DEBUG_DRIVER("wakeref x%lu at\n%s", rep, buf);
+ }
+
+ kfree(buf);
+out_stacks:
+ kfree(stacks);
+}
+
+#else
+
+static void track_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
+{
+}
+
+static void untrack_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
+{
+}
+
+static void show_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
+{
+}
+
+#endif
+
bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
enum i915_power_well_id power_well_id);
@@ -3939,6 +4063,8 @@ void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
atomic_inc(&dev_priv->runtime_pm.wakeref_count);
assert_rpm_wakelock_held(dev_priv);
+
+ track_intel_runtime_pm_wakeref(dev_priv);
}
/**
@@ -3973,6 +4099,8 @@ bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
atomic_inc(&dev_priv->runtime_pm.wakeref_count);
assert_rpm_wakelock_held(dev_priv);
+ track_intel_runtime_pm_wakeref(dev_priv);
+
return true;
}
@@ -4002,6 +4130,8 @@ void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
pm_runtime_get_noresume(kdev);
atomic_inc(&dev_priv->runtime_pm.wakeref_count);
+
+ track_intel_runtime_pm_wakeref(dev_priv);
}
/**
@@ -4018,7 +4148,8 @@ void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
struct device *kdev = &pdev->dev;
assert_rpm_wakelock_held(dev_priv);
- atomic_dec(&dev_priv->runtime_pm.wakeref_count);
+ if (!atomic_dec_and_test(&dev_priv->runtime_pm.wakeref_count))
+ untrack_intel_runtime_pm_wakeref(dev_priv);
pm_runtime_mark_last_busy(kdev);
pm_runtime_put_autosuspend(kdev);
@@ -4080,3 +4211,13 @@ void intel_runtime_pm_disable(struct drm_i915_private *dev_priv)
if (!HAS_RUNTIME_PM(dev_priv))
pm_runtime_put(kdev);
}
+
+void intel_runtime_pm_cleanup(struct drm_i915_private *dev_priv)
+{
+ if (WARN_ON(atomic_read(&dev_priv->runtime_pm.wakeref_count))) {
+ show_intel_runtime_pm_wakeref(dev_priv);
+ atomic_set(&dev_priv->runtime_pm.wakeref_count, 0);
+ }
+
+ untrack_intel_runtime_pm_wakeref(dev_priv);
+}
--
2.18.0
More information about the Intel-gfx
mailing list