[Intel-gfx] [RFC 1/8] drm/i915: Introduce intel_irq
Rodrigo Vivi
rodrigo.vivi at intel.com
Thu Apr 18 20:53:40 UTC 2019
Let's start the re-org of irqs with the introduction of intel_irq
structure.
Since irq_lock is used everywhere let's start intel_irq with
this lock first.
Signed-off-by: Rodrigo Vivi <rodrigo.vivi at intel.com>
---
drivers/gpu/drm/i915/i915_debugfs.c | 8 +-
drivers/gpu/drm/i915/i915_drv.c | 6 +-
drivers/gpu/drm/i915/i915_drv.h | 10 +-
drivers/gpu/drm/i915/i915_irq.c | 182 ++++++++++-----------
drivers/gpu/drm/i915/intel_breadcrumbs.c | 8 +-
drivers/gpu/drm/i915/intel_display.c | 4 +-
drivers/gpu/drm/i915/intel_drv.h | 2 +-
drivers/gpu/drm/i915/intel_engine_types.h | 2 +-
drivers/gpu/drm/i915/intel_fifo_underrun.c | 30 ++--
drivers/gpu/drm/i915/intel_hotplug.c | 38 ++---
drivers/gpu/drm/i915/intel_runtime_pm.c | 8 +-
drivers/gpu/drm/i915/intel_tv.c | 8 +-
12 files changed, 155 insertions(+), 151 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 5823ffb17821..e2fd3250760e 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -4336,12 +4336,12 @@ static ssize_t i915_hpd_storm_ctl_write(struct file *file,
else
DRM_DEBUG_KMS("Disabling HPD storm detection\n");
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&dev_priv->irq.lock);
hotplug->hpd_storm_threshold = new_threshold;
/* Reset the HPD storm stats so we don't accidentally trigger a storm */
for_each_hpd_pin(i)
hotplug->stats[i].count = 0;
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&dev_priv->irq.lock);
/* Re-enable hpd immediately if we were in an irq storm */
flush_delayed_work(&dev_priv->hotplug.reenable_work);
@@ -4414,12 +4414,12 @@ static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
DRM_DEBUG_KMS("%sabling HPD short storm detection\n",
new_state ? "En" : "Dis");
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&dev_priv->irq.lock);
hotplug->hpd_short_storm_enabled = new_state;
/* Reset the HPD storm stats so we don't accidentally trigger a storm */
for_each_hpd_pin(i)
hotplug->stats[i].count = 0;
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&dev_priv->irq.lock);
/* Re-enable hpd immediately if we were in an irq storm */
flush_delayed_work(&dev_priv->hotplug.reenable_work);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 1ad88e6d7c04..fe5c2138a725 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -879,7 +879,7 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv)
intel_uncore_init_early(&dev_priv->uncore);
- spin_lock_init(&dev_priv->irq_lock);
+ spin_lock_init(&dev_priv->irq.lock);
spin_lock_init(&dev_priv->gpu_error.lock);
mutex_init(&dev_priv->backlight_lock);
@@ -2219,10 +2219,10 @@ static int i915_drm_resume(struct drm_device *dev)
intel_modeset_init_hw(dev);
intel_init_clock_gating(dev_priv);
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&dev_priv->irq.lock);
if (dev_priv->display.hpd_irq_setup)
dev_priv->display.hpd_irq_setup(dev_priv);
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&dev_priv->irq.lock);
intel_dp_mst_resume(dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 066fd2a12851..0b4aa818d66b 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -647,10 +647,15 @@ struct intel_rps_ei {
u32 media_c0;
};
+struct intel_irq {
+ /* protects the irq masks */
+ spinlock_t lock;
+};
+
struct intel_rps {
/*
* work, interrupts_enabled and pm_iir are protected by
- * dev_priv->irq_lock
+ * dev_priv->irq.lock
*/
struct work_struct work;
bool interrupts_enabled;
@@ -1551,8 +1556,7 @@ struct drm_i915_private {
struct resource mch_res;
- /* protects the irq masks */
- spinlock_t irq_lock;
+ struct intel_irq irq;
bool display_irqs_enabled;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index b92cfd69134b..679dc63244d9 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -262,7 +262,7 @@ i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
{
u32 val;
- lockdep_assert_held(&dev_priv->irq_lock);
+ lockdep_assert_held(&dev_priv->irq.lock);
WARN_ON(bits & ~mask);
val = I915_READ(PORT_HOTPLUG_EN);
@@ -287,9 +287,9 @@ void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
u32 mask,
u32 bits)
{
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&dev_priv->irq.lock);
i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&dev_priv->irq.lock);
}
static u32
@@ -303,7 +303,7 @@ static bool gen11_reset_one_iir(struct drm_i915_private * const i915,
void __iomem * const regs = i915->uncore.regs;
u32 dw;
- lockdep_assert_held(&i915->irq_lock);
+ lockdep_assert_held(&i915->irq.lock);
dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
if (dw & BIT(bit)) {
@@ -339,7 +339,7 @@ void ilk_update_display_irq(struct drm_i915_private *dev_priv,
{
u32 new_val;
- lockdep_assert_held(&dev_priv->irq_lock);
+ lockdep_assert_held(&dev_priv->irq.lock);
WARN_ON(enabled_irq_mask & ~interrupt_mask);
@@ -367,7 +367,7 @@ static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
u32 interrupt_mask,
u32 enabled_irq_mask)
{
- lockdep_assert_held(&dev_priv->irq_lock);
+ lockdep_assert_held(&dev_priv->irq.lock);
WARN_ON(enabled_irq_mask & ~interrupt_mask);
@@ -448,7 +448,7 @@ static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
WARN_ON(enabled_irq_mask & ~interrupt_mask);
- lockdep_assert_held(&dev_priv->irq_lock);
+ lockdep_assert_held(&dev_priv->irq.lock);
new_val = dev_priv->pm_imr;
new_val &= ~interrupt_mask;
@@ -485,7 +485,7 @@ static void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask)
{
i915_reg_t reg = gen6_pm_iir(dev_priv);
- lockdep_assert_held(&dev_priv->irq_lock);
+ lockdep_assert_held(&dev_priv->irq.lock);
I915_WRITE(reg, reset_mask);
I915_WRITE(reg, reset_mask);
@@ -494,7 +494,7 @@ static void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask)
static void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, u32 enable_mask)
{
- lockdep_assert_held(&dev_priv->irq_lock);
+ lockdep_assert_held(&dev_priv->irq.lock);
dev_priv->pm_ier |= enable_mask;
write_pm_ier(dev_priv);
@@ -504,7 +504,7 @@ static void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, u32 enable_mas
static void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, u32 disable_mask)
{
- lockdep_assert_held(&dev_priv->irq_lock);
+ lockdep_assert_held(&dev_priv->irq.lock);
dev_priv->pm_ier &= ~disable_mask;
__gen6_mask_pm_irq(dev_priv, disable_mask);
@@ -514,22 +514,22 @@ static void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, u32 disable_m
void gen11_reset_rps_interrupts(struct drm_i915_private *dev_priv)
{
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&dev_priv->irq.lock);
while (gen11_reset_one_iir(dev_priv, 0, GEN11_GTPM))
;
dev_priv->gt_pm.rps.pm_iir = 0;
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&dev_priv->irq.lock);
}
void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
{
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&dev_priv->irq.lock);
gen6_reset_pm_iir(dev_priv, GEN6_PM_RPS_EVENTS);
dev_priv->gt_pm.rps.pm_iir = 0;
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&dev_priv->irq.lock);
}
void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
@@ -539,7 +539,7 @@ void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
if (READ_ONCE(rps->interrupts_enabled))
return;
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&dev_priv->irq.lock);
WARN_ON_ONCE(rps->pm_iir);
if (INTEL_GEN(dev_priv) >= 11)
@@ -550,7 +550,7 @@ void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
rps->interrupts_enabled = true;
gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&dev_priv->irq.lock);
}
void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
@@ -560,14 +560,14 @@ void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
if (!READ_ONCE(rps->interrupts_enabled))
return;
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&dev_priv->irq.lock);
rps->interrupts_enabled = false;
I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u));
gen6_disable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS);
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&dev_priv->irq.lock);
synchronize_irq(dev_priv->drm.irq);
/* Now that we will not be generating any more work, flush any
@@ -586,35 +586,35 @@ void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv)
{
assert_rpm_wakelock_held(dev_priv);
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&dev_priv->irq.lock);
gen6_reset_pm_iir(dev_priv, dev_priv->pm_guc_events);
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&dev_priv->irq.lock);
}
void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv)
{
assert_rpm_wakelock_held(dev_priv);
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&dev_priv->irq.lock);
if (!dev_priv->guc.interrupts_enabled) {
WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) &
dev_priv->pm_guc_events);
dev_priv->guc.interrupts_enabled = true;
gen6_enable_pm_irq(dev_priv, dev_priv->pm_guc_events);
}
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&dev_priv->irq.lock);
}
void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv)
{
assert_rpm_wakelock_held(dev_priv);
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&dev_priv->irq.lock);
dev_priv->guc.interrupts_enabled = false;
gen6_disable_pm_irq(dev_priv, dev_priv->pm_guc_events);
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&dev_priv->irq.lock);
synchronize_irq(dev_priv->drm.irq);
gen9_reset_guc_interrupts(dev_priv);
@@ -633,7 +633,7 @@ static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
u32 new_val;
u32 old_val;
- lockdep_assert_held(&dev_priv->irq_lock);
+ lockdep_assert_held(&dev_priv->irq.lock);
WARN_ON(enabled_irq_mask & ~interrupt_mask);
@@ -666,7 +666,7 @@ void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
{
u32 new_val;
- lockdep_assert_held(&dev_priv->irq_lock);
+ lockdep_assert_held(&dev_priv->irq.lock);
WARN_ON(enabled_irq_mask & ~interrupt_mask);
@@ -700,7 +700,7 @@ void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
WARN_ON(enabled_irq_mask & ~interrupt_mask);
- lockdep_assert_held(&dev_priv->irq_lock);
+ lockdep_assert_held(&dev_priv->irq.lock);
if (WARN_ON(!intel_irqs_enabled(dev_priv)))
return;
@@ -715,7 +715,7 @@ u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
u32 status_mask = dev_priv->pipestat_irq_mask[pipe];
u32 enable_mask = status_mask << 16;
- lockdep_assert_held(&dev_priv->irq_lock);
+ lockdep_assert_held(&dev_priv->irq.lock);
if (INTEL_GEN(dev_priv) < 5)
goto out;
@@ -760,7 +760,7 @@ void i915_enable_pipestat(struct drm_i915_private *dev_priv,
"pipe %c: status_mask=0x%x\n",
pipe_name(pipe), status_mask);
- lockdep_assert_held(&dev_priv->irq_lock);
+ lockdep_assert_held(&dev_priv->irq.lock);
WARN_ON(!intel_irqs_enabled(dev_priv));
if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask)
@@ -783,7 +783,7 @@ void i915_disable_pipestat(struct drm_i915_private *dev_priv,
"pipe %c: status_mask=0x%x\n",
pipe_name(pipe), status_mask);
- lockdep_assert_held(&dev_priv->irq_lock);
+ lockdep_assert_held(&dev_priv->irq.lock);
WARN_ON(!intel_irqs_enabled(dev_priv));
if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0)
@@ -813,14 +813,14 @@ static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
if (!i915_has_asle(dev_priv))
return;
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&dev_priv->irq.lock);
i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
if (INTEL_GEN(dev_priv) >= 4)
i915_enable_pipestat(dev_priv, PIPE_A,
PIPE_LEGACY_BLC_EVENT_STATUS);
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&dev_priv->irq.lock);
}
/*
@@ -1289,12 +1289,12 @@ static void gen6_pm_rps_work(struct work_struct *work)
int new_delay, adj, min, max;
u32 pm_iir = 0;
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&dev_priv->irq.lock);
if (rps->interrupts_enabled) {
pm_iir = fetch_and_zero(&rps->pm_iir);
client_boost = atomic_read(&rps->num_waiters);
}
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&dev_priv->irq.lock);
/* Make sure we didn't queue anything we're not going to process. */
WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
@@ -1371,10 +1371,10 @@ static void gen6_pm_rps_work(struct work_struct *work)
out:
/* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&dev_priv->irq.lock);
if (rps->interrupts_enabled)
gen6_unmask_pm_irq(dev_priv, dev_priv->pm_rps_events);
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&dev_priv->irq.lock);
}
@@ -1452,9 +1452,9 @@ static void ivybridge_parity_work(struct work_struct *work)
out:
WARN_ON(dev_priv->l3_parity.which_slice);
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&dev_priv->irq.lock);
gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&dev_priv->irq.lock);
mutex_unlock(&dev_priv->drm.struct_mutex);
}
@@ -1465,9 +1465,9 @@ static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv
if (!HAS_L3_DPF(dev_priv))
return;
- spin_lock(&dev_priv->irq_lock);
+ spin_lock(&dev_priv->irq.lock);
gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
- spin_unlock(&dev_priv->irq_lock);
+ spin_unlock(&dev_priv->irq.lock);
iir &= GT_PARITY_ERROR(dev_priv);
if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
@@ -1845,7 +1845,7 @@ static void gen11_rps_irq_handler(struct drm_i915_private *i915, u32 pm_iir)
struct intel_rps *rps = &i915->gt_pm.rps;
const u32 events = i915->pm_rps_events & pm_iir;
- lockdep_assert_held(&i915->irq_lock);
+ lockdep_assert_held(&i915->irq.lock);
if (unlikely(!events))
return;
@@ -1864,13 +1864,13 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
struct intel_rps *rps = &dev_priv->gt_pm.rps;
if (pm_iir & dev_priv->pm_rps_events) {
- spin_lock(&dev_priv->irq_lock);
+ spin_lock(&dev_priv->irq.lock);
gen6_mask_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
if (rps->interrupts_enabled) {
rps->pm_iir |= pm_iir & dev_priv->pm_rps_events;
schedule_work(&rps->work);
}
- spin_unlock(&dev_priv->irq_lock);
+ spin_unlock(&dev_priv->irq.lock);
}
if (INTEL_GEN(dev_priv) >= 8)
@@ -1907,10 +1907,10 @@ static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
{
int pipe;
- spin_lock(&dev_priv->irq_lock);
+ spin_lock(&dev_priv->irq.lock);
if (!dev_priv->display_irqs_enabled) {
- spin_unlock(&dev_priv->irq_lock);
+ spin_unlock(&dev_priv->irq.lock);
return;
}
@@ -1964,7 +1964,7 @@ static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
I915_WRITE(reg, enable_mask);
}
}
- spin_unlock(&dev_priv->irq_lock);
+ spin_unlock(&dev_priv->irq.lock);
}
static void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv,
@@ -2981,7 +2981,7 @@ gen11_gt_engine_identity(struct drm_i915_private * const i915,
u32 timeout_ts;
u32 ident;
- lockdep_assert_held(&i915->irq_lock);
+ lockdep_assert_held(&i915->irq.lock);
raw_reg_write(regs, GEN11_IIR_REG_SELECTOR(bank), BIT(bit));
@@ -3065,7 +3065,7 @@ gen11_gt_bank_handler(struct drm_i915_private * const i915,
unsigned long intr_dw;
unsigned int bit;
- lockdep_assert_held(&i915->irq_lock);
+ lockdep_assert_held(&i915->irq.lock);
intr_dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
@@ -3085,14 +3085,14 @@ gen11_gt_irq_handler(struct drm_i915_private * const i915,
{
unsigned int bank;
- spin_lock(&i915->irq_lock);
+ spin_lock(&i915->irq.lock);
for (bank = 0; bank < 2; bank++) {
if (master_ctl & GEN11_GT_DW_IRQ(bank))
gen11_gt_bank_handler(i915, bank);
}
- spin_unlock(&i915->irq_lock);
+ spin_unlock(&i915->irq.lock);
}
static u32
@@ -3185,9 +3185,9 @@ static int i8xx_enable_vblank(struct drm_device *dev, unsigned int pipe)
struct drm_i915_private *dev_priv = to_i915(dev);
unsigned long irqflags;
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ spin_lock_irqsave(&dev_priv->irq.lock, irqflags);
i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_unlock_irqrestore(&dev_priv->irq.lock, irqflags);
return 0;
}
@@ -3207,10 +3207,10 @@ static int i965_enable_vblank(struct drm_device *dev, unsigned int pipe)
struct drm_i915_private *dev_priv = to_i915(dev);
unsigned long irqflags;
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ spin_lock_irqsave(&dev_priv->irq.lock, irqflags);
i915_enable_pipestat(dev_priv, pipe,
PIPE_START_VBLANK_INTERRUPT_STATUS);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_unlock_irqrestore(&dev_priv->irq.lock, irqflags);
return 0;
}
@@ -3222,9 +3222,9 @@ static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
u32 bit = INTEL_GEN(dev_priv) >= 7 ?
DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ spin_lock_irqsave(&dev_priv->irq.lock, irqflags);
ilk_enable_display_irq(dev_priv, bit);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_unlock_irqrestore(&dev_priv->irq.lock, irqflags);
/* Even though there is no DMC, frame counter can get stuck when
* PSR is active as no frames are generated.
@@ -3240,9 +3240,9 @@ static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
struct drm_i915_private *dev_priv = to_i915(dev);
unsigned long irqflags;
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ spin_lock_irqsave(&dev_priv->irq.lock, irqflags);
bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_unlock_irqrestore(&dev_priv->irq.lock, irqflags);
/* Even if there is no DMC, frame counter can get stuck when
* PSR is active as no frames are generated, so check only for PSR.
@@ -3261,9 +3261,9 @@ static void i8xx_disable_vblank(struct drm_device *dev, unsigned int pipe)
struct drm_i915_private *dev_priv = to_i915(dev);
unsigned long irqflags;
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ spin_lock_irqsave(&dev_priv->irq.lock, irqflags);
i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_unlock_irqrestore(&dev_priv->irq.lock, irqflags);
}
static void i945gm_disable_vblank(struct drm_device *dev, unsigned int pipe)
@@ -3281,10 +3281,10 @@ static void i965_disable_vblank(struct drm_device *dev, unsigned int pipe)
struct drm_i915_private *dev_priv = to_i915(dev);
unsigned long irqflags;
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ spin_lock_irqsave(&dev_priv->irq.lock, irqflags);
i915_disable_pipestat(dev_priv, pipe,
PIPE_START_VBLANK_INTERRUPT_STATUS);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_unlock_irqrestore(&dev_priv->irq.lock, irqflags);
}
static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
@@ -3294,9 +3294,9 @@ static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
u32 bit = INTEL_GEN(dev_priv) >= 7 ?
DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ spin_lock_irqsave(&dev_priv->irq.lock, irqflags);
ilk_disable_display_irq(dev_priv, bit);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_unlock_irqrestore(&dev_priv->irq.lock, irqflags);
}
static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
@@ -3304,9 +3304,9 @@ static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
struct drm_i915_private *dev_priv = to_i915(dev);
unsigned long irqflags;
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ spin_lock_irqsave(&dev_priv->irq.lock, irqflags);
bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_unlock_irqrestore(&dev_priv->irq.lock, irqflags);
}
static void i945gm_vblank_work_func(struct work_struct *work)
@@ -3484,10 +3484,10 @@ static void valleyview_irq_reset(struct drm_device *dev)
gen5_gt_irq_reset(dev_priv);
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&dev_priv->irq.lock);
if (dev_priv->display_irqs_enabled)
vlv_display_irq_reset(dev_priv);
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&dev_priv->irq.lock);
}
static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
@@ -3581,10 +3581,10 @@ void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
u32 extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
enum pipe pipe;
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&dev_priv->irq.lock);
if (!intel_irqs_enabled(dev_priv)) {
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&dev_priv->irq.lock);
return;
}
@@ -3593,7 +3593,7 @@ void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
dev_priv->de_irq_mask[pipe],
~dev_priv->de_irq_mask[pipe] | extra_ier);
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&dev_priv->irq.lock);
}
void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
@@ -3602,17 +3602,17 @@ void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
struct intel_uncore *uncore = &dev_priv->uncore;
enum pipe pipe;
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&dev_priv->irq.lock);
if (!intel_irqs_enabled(dev_priv)) {
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&dev_priv->irq.lock);
return;
}
for_each_pipe_masked(dev_priv, pipe, pipe_mask)
GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&dev_priv->irq.lock);
/* make sure we're done processing display irqs */
synchronize_irq(dev_priv->drm.irq);
@@ -3630,10 +3630,10 @@ static void cherryview_irq_reset(struct drm_device *dev)
GEN3_IRQ_RESET(uncore, GEN8_PCU_);
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&dev_priv->irq.lock);
if (dev_priv->display_irqs_enabled)
vlv_display_irq_reset(dev_priv);
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&dev_priv->irq.lock);
}
static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
@@ -3995,9 +3995,9 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
* spinlocking not required here for correctness since interrupt
* setup is guaranteed to run in single-threaded context. But we
* need it to make the assert_spin_locked happy. */
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&dev_priv->irq.lock);
ilk_enable_display_irq(dev_priv, DE_PCU_EVENT);
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&dev_priv->irq.lock);
}
return 0;
@@ -4005,7 +4005,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
{
- lockdep_assert_held(&dev_priv->irq_lock);
+ lockdep_assert_held(&dev_priv->irq.lock);
if (dev_priv->display_irqs_enabled)
return;
@@ -4020,7 +4020,7 @@ void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
{
- lockdep_assert_held(&dev_priv->irq_lock);
+ lockdep_assert_held(&dev_priv->irq.lock);
if (!dev_priv->display_irqs_enabled)
return;
@@ -4038,10 +4038,10 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
gen5_gt_irq_postinstall(dev);
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&dev_priv->irq.lock);
if (dev_priv->display_irqs_enabled)
vlv_display_irq_postinstall(dev_priv);
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&dev_priv->irq.lock);
I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
POSTING_READ(VLV_MASTER_IER);
@@ -4241,10 +4241,10 @@ static int cherryview_irq_postinstall(struct drm_device *dev)
gen8_gt_irq_postinstall(dev_priv);
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&dev_priv->irq.lock);
if (dev_priv->display_irqs_enabled)
vlv_display_irq_postinstall(dev_priv);
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&dev_priv->irq.lock);
I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
POSTING_READ(GEN8_MASTER_IRQ);
@@ -4287,10 +4287,10 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
/* Interrupt setup is already guaranteed to be single-threaded, this is
* just to make the assert_spin_locked check happy. */
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&dev_priv->irq.lock);
i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&dev_priv->irq.lock);
return 0;
}
@@ -4465,10 +4465,10 @@ static int i915_irq_postinstall(struct drm_device *dev)
/* Interrupt setup is already guaranteed to be single-threaded, this is
* just to make the assert_spin_locked check happy. */
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&dev_priv->irq.lock);
i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&dev_priv->irq.lock);
i915_enable_asle_pipestat(dev_priv);
@@ -4587,11 +4587,11 @@ static int i965_irq_postinstall(struct drm_device *dev)
/* Interrupt setup is already guaranteed to be single-threaded, this is
* just to make the assert_spin_locked check happy. */
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&dev_priv->irq.lock);
i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&dev_priv->irq.lock);
i915_enable_asle_pipestat(dev_priv);
@@ -4602,7 +4602,7 @@ static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
u32 hotplug_en;
- lockdep_assert_held(&dev_priv->irq_lock);
+ lockdep_assert_held(&dev_priv->irq.lock);
/* Note HDMI and DP share hotplug bits */
/* enable bits are the same for all generations */
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
index 3cbffd400b1b..98a55f0a16eb 100644
--- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -33,9 +33,9 @@ static void irq_enable(struct intel_engine_cs *engine)
return;
/* Caller disables interrupts */
- spin_lock(&engine->i915->irq_lock);
+ spin_lock(&engine->i915->irq.lock);
engine->irq_enable(engine);
- spin_unlock(&engine->i915->irq_lock);
+ spin_unlock(&engine->i915->irq.lock);
}
static void irq_disable(struct intel_engine_cs *engine)
@@ -44,9 +44,9 @@ static void irq_disable(struct intel_engine_cs *engine)
return;
/* Caller disables interrupts */
- spin_lock(&engine->i915->irq_lock);
+ spin_lock(&engine->i915->irq.lock);
engine->irq_disable(engine);
- spin_unlock(&engine->i915->irq_lock);
+ spin_unlock(&engine->i915->irq.lock);
}
static void __intel_breadcrumbs_disarm_irq(struct intel_breadcrumbs *b)
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 3bd40a4a6739..7c8c179146a5 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -3975,10 +3975,10 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
intel_modeset_init_hw(dev);
intel_init_clock_gating(dev_priv);
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&dev_priv->irq.lock);
if (dev_priv->display.hpd_irq_setup)
dev_priv->display.hpd_irq_setup(dev_priv);
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&dev_priv->irq.lock);
ret = __intel_display_resume(dev, state, ctx);
if (ret)
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index a38b9cff5cd0..417dba7d0a06 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -1099,7 +1099,7 @@ struct intel_crtc {
struct intel_crtc_state *config;
- /* Access to these should be protected by dev_priv->irq_lock. */
+ /* Access to these should be protected by dev_priv->irq.lock. */
bool cpu_fifo_underrun_disabled;
bool pch_fifo_underrun_disabled;
diff --git a/drivers/gpu/drm/i915/intel_engine_types.h b/drivers/gpu/drm/i915/intel_engine_types.h
index 1f970c76b6a6..599991a0067f 100644
--- a/drivers/gpu/drm/i915/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/intel_engine_types.h
@@ -308,7 +308,7 @@ struct intel_engine_cs {
spinlock_t irq_lock;
struct list_head signalers;
- struct irq_work irq_work; /* for use from inside irq_lock */
+ struct irq_work irq_work; /* for use from inside irq.lock */
unsigned int irq_enabled;
diff --git a/drivers/gpu/drm/i915/intel_fifo_underrun.c b/drivers/gpu/drm/i915/intel_fifo_underrun.c
index 74c8b0528294..3dd5c8453f20 100644
--- a/drivers/gpu/drm/i915/intel_fifo_underrun.c
+++ b/drivers/gpu/drm/i915/intel_fifo_underrun.c
@@ -55,7 +55,7 @@ static bool ivb_can_enable_err_int(struct drm_device *dev)
struct intel_crtc *crtc;
enum pipe pipe;
- lockdep_assert_held(&dev_priv->irq_lock);
+ lockdep_assert_held(&dev_priv->irq.lock);
for_each_pipe(dev_priv, pipe) {
crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
@@ -73,7 +73,7 @@ static bool cpt_can_enable_serr_int(struct drm_device *dev)
enum pipe pipe;
struct intel_crtc *crtc;
- lockdep_assert_held(&dev_priv->irq_lock);
+ lockdep_assert_held(&dev_priv->irq.lock);
for_each_pipe(dev_priv, pipe) {
crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
@@ -91,7 +91,7 @@ static void i9xx_check_fifo_underruns(struct intel_crtc *crtc)
i915_reg_t reg = PIPESTAT(crtc->pipe);
u32 enable_mask;
- lockdep_assert_held(&dev_priv->irq_lock);
+ lockdep_assert_held(&dev_priv->irq.lock);
if ((I915_READ(reg) & PIPE_FIFO_UNDERRUN_STATUS) == 0)
return;
@@ -111,7 +111,7 @@ static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev,
struct drm_i915_private *dev_priv = to_i915(dev);
i915_reg_t reg = PIPESTAT(pipe);
- lockdep_assert_held(&dev_priv->irq_lock);
+ lockdep_assert_held(&dev_priv->irq.lock);
if (enable) {
u32 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
@@ -143,7 +143,7 @@ static void ivybridge_check_fifo_underruns(struct intel_crtc *crtc)
enum pipe pipe = crtc->pipe;
u32 err_int = I915_READ(GEN7_ERR_INT);
- lockdep_assert_held(&dev_priv->irq_lock);
+ lockdep_assert_held(&dev_priv->irq.lock);
if ((err_int & ERR_INT_FIFO_UNDERRUN(pipe)) == 0)
return;
@@ -209,7 +209,7 @@ static void cpt_check_pch_fifo_underruns(struct intel_crtc *crtc)
enum pipe pch_transcoder = crtc->pipe;
u32 serr_int = I915_READ(SERR_INT);
- lockdep_assert_held(&dev_priv->irq_lock);
+ lockdep_assert_held(&dev_priv->irq.lock);
if ((serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) == 0)
return;
@@ -254,7 +254,7 @@ static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
bool old;
- lockdep_assert_held(&dev_priv->irq_lock);
+ lockdep_assert_held(&dev_priv->irq.lock);
old = !crtc->cpu_fifo_underrun_disabled;
crtc->cpu_fifo_underrun_disabled = !enable;
@@ -293,10 +293,10 @@ bool intel_set_cpu_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
unsigned long flags;
bool ret;
- spin_lock_irqsave(&dev_priv->irq_lock, flags);
+ spin_lock_irqsave(&dev_priv->irq.lock, flags);
ret = __intel_set_cpu_fifo_underrun_reporting(&dev_priv->drm, pipe,
enable);
- spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+ spin_unlock_irqrestore(&dev_priv->irq.lock, flags);
return ret;
}
@@ -333,7 +333,7 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
* crtc on LPT won't cause issues.
*/
- spin_lock_irqsave(&dev_priv->irq_lock, flags);
+ spin_lock_irqsave(&dev_priv->irq.lock, flags);
old = !crtc->pch_fifo_underrun_disabled;
crtc->pch_fifo_underrun_disabled = !enable;
@@ -347,7 +347,7 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
pch_transcoder,
enable, old);
- spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+ spin_unlock_irqrestore(&dev_priv->irq.lock, flags);
return old;
}
@@ -416,7 +416,7 @@ void intel_check_cpu_fifo_underruns(struct drm_i915_private *dev_priv)
{
struct intel_crtc *crtc;
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&dev_priv->irq.lock);
for_each_intel_crtc(&dev_priv->drm, crtc) {
if (crtc->cpu_fifo_underrun_disabled)
@@ -428,7 +428,7 @@ void intel_check_cpu_fifo_underruns(struct drm_i915_private *dev_priv)
ivybridge_check_fifo_underruns(crtc);
}
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&dev_priv->irq.lock);
}
/**
@@ -443,7 +443,7 @@ void intel_check_pch_fifo_underruns(struct drm_i915_private *dev_priv)
{
struct intel_crtc *crtc;
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&dev_priv->irq.lock);
for_each_intel_crtc(&dev_priv->drm, crtc) {
if (crtc->pch_fifo_underrun_disabled)
@@ -453,5 +453,5 @@ void intel_check_pch_fifo_underruns(struct drm_i915_private *dev_priv)
cpt_check_pch_fifo_underruns(crtc);
}
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&dev_priv->irq.lock);
}
diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c
index b8937c788f03..8260e81f88e7 100644
--- a/drivers/gpu/drm/i915/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/intel_hotplug.c
@@ -184,7 +184,7 @@ intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv)
enum hpd_pin pin;
bool hpd_disabled = false;
- lockdep_assert_held(&dev_priv->irq_lock);
+ lockdep_assert_held(&dev_priv->irq.lock);
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
@@ -231,7 +231,7 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
wakeref = intel_runtime_pm_get(dev_priv);
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&dev_priv->irq.lock);
for_each_hpd_pin(pin) {
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
@@ -260,7 +260,7 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
}
if (dev_priv->display_irqs_enabled && dev_priv->display.hpd_irq_setup)
dev_priv->display.hpd_irq_setup(dev_priv);
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&dev_priv->irq.lock);
intel_runtime_pm_put(dev_priv, wakeref);
}
@@ -303,12 +303,12 @@ static void i915_digport_work_func(struct work_struct *work)
struct intel_encoder *encoder;
u32 old_bits = 0;
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&dev_priv->irq.lock);
long_port_mask = dev_priv->hotplug.long_port_mask;
dev_priv->hotplug.long_port_mask = 0;
short_port_mask = dev_priv->hotplug.short_port_mask;
dev_priv->hotplug.short_port_mask = 0;
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&dev_priv->irq.lock);
for_each_intel_encoder(&dev_priv->drm, encoder) {
struct intel_digital_port *dig_port;
@@ -335,9 +335,9 @@ static void i915_digport_work_func(struct work_struct *work)
}
if (old_bits) {
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&dev_priv->irq.lock);
dev_priv->hotplug.event_bits |= old_bits;
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&dev_priv->irq.lock);
schedule_work(&dev_priv->hotplug.hotplug_work);
}
}
@@ -360,7 +360,7 @@ static void i915_hotplug_work_func(struct work_struct *work)
mutex_lock(&dev->mode_config.mutex);
DRM_DEBUG_KMS("running encoder hotplug functions\n");
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&dev_priv->irq.lock);
hpd_event_bits = dev_priv->hotplug.event_bits;
dev_priv->hotplug.event_bits = 0;
@@ -368,7 +368,7 @@ static void i915_hotplug_work_func(struct work_struct *work)
/* Enable polling for connectors which had HPD IRQ storms */
intel_hpd_irq_storm_switch_to_polling(dev_priv);
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&dev_priv->irq.lock);
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
@@ -421,7 +421,7 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
if (!pin_mask)
return;
- spin_lock(&dev_priv->irq_lock);
+ spin_lock(&dev_priv->irq.lock);
/*
* Determine whether ->hpd_pulse() exists for each pin, and
@@ -504,7 +504,7 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
*/
if (storm_detected && dev_priv->display_irqs_enabled)
dev_priv->display.hpd_irq_setup(dev_priv);
- spin_unlock(&dev_priv->irq_lock);
+ spin_unlock(&dev_priv->irq.lock);
/*
* Our hotplug handler can grab modeset locks (by calling down into the
@@ -549,10 +549,10 @@ void intel_hpd_init(struct drm_i915_private *dev_priv)
* just to make the assert_spin_locked checks happy.
*/
if (dev_priv->display_irqs_enabled && dev_priv->display.hpd_irq_setup) {
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&dev_priv->irq.lock);
if (dev_priv->display_irqs_enabled)
dev_priv->display.hpd_irq_setup(dev_priv);
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&dev_priv->irq.lock);
}
}
@@ -644,13 +644,13 @@ void intel_hpd_init_work(struct drm_i915_private *dev_priv)
void intel_hpd_cancel_work(struct drm_i915_private *dev_priv)
{
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&dev_priv->irq.lock);
dev_priv->hotplug.long_port_mask = 0;
dev_priv->hotplug.short_port_mask = 0;
dev_priv->hotplug.event_bits = 0;
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&dev_priv->irq.lock);
cancel_work_sync(&dev_priv->hotplug.dig_port_work);
cancel_work_sync(&dev_priv->hotplug.hotplug_work);
@@ -665,12 +665,12 @@ bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin)
if (pin == HPD_NONE)
return false;
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&dev_priv->irq.lock);
if (dev_priv->hotplug.stats[pin].state == HPD_ENABLED) {
dev_priv->hotplug.stats[pin].state = HPD_DISABLED;
ret = true;
}
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&dev_priv->irq.lock);
return ret;
}
@@ -680,7 +680,7 @@ void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin)
if (pin == HPD_NONE)
return;
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&dev_priv->irq.lock);
dev_priv->hotplug.stats[pin].state = HPD_ENABLED;
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&dev_priv->irq.lock);
}
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index d4f4262d0fee..5a2d7f2f799d 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -1335,9 +1335,9 @@ static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
vlv_init_display_clock_gating(dev_priv);
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&dev_priv->irq.lock);
valleyview_enable_display_irqs(dev_priv);
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&dev_priv->irq.lock);
/*
* During driver initialization/resume we can avoid restoring the
@@ -1361,9 +1361,9 @@ static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
{
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&dev_priv->irq.lock);
valleyview_disable_display_irqs(dev_priv);
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&dev_priv->irq.lock);
/* make sure we're done processing display irqs */
synchronize_irq(dev_priv->drm.irq);
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 5dbba33f4202..04081f029023 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1574,11 +1574,11 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
/* Disable TV interrupts around load detect or we'll recurse */
if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&dev_priv->irq.lock);
i915_disable_pipestat(dev_priv, 0,
PIPE_HOTPLUG_INTERRUPT_STATUS |
PIPE_HOTPLUG_TV_INTERRUPT_STATUS);
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&dev_priv->irq.lock);
}
save_tv_dac = tv_dac = I915_READ(TV_DAC);
@@ -1646,11 +1646,11 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
/* Restore interrupt config */
if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&dev_priv->irq.lock);
i915_enable_pipestat(dev_priv, 0,
PIPE_HOTPLUG_INTERRUPT_STATUS |
PIPE_HOTPLUG_TV_INTERRUPT_STATUS);
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&dev_priv->irq.lock);
}
return type;
--
2.20.1
More information about the Intel-gfx
mailing list