[Intel-gfx] [RFC] drm/i915: Check if we hold a wakeref during ioread32/iowrite32
Imre Deak
imre.deak at intel.com
Tue Feb 23 14:47:17 UTC 2016
The device needs to be in D0 state whenever we call these functions, so
add the corresponding assert checks.
I had to move around some helpers due to dependencies added by nested
includes.
Signed-off-by: Imre Deak <imre.deak at intel.com>
---
drivers/gpu/drm/i915/i915_drv.h | 117 +++++++++++++++++++++++++++++
drivers/gpu/drm/i915/i915_gem_execbuffer.c | 4 +-
drivers/gpu/drm/i915/i915_gem_gtt.c | 8 +-
drivers/gpu/drm/i915/i915_irq.c | 8 +-
drivers/gpu/drm/i915/intel_bios.c | 7 +-
drivers/gpu/drm/i915/intel_drv.h | 75 ------------------
drivers/gpu/drm/i915/intel_lrc.c | 3 +-
drivers/gpu/drm/i915/intel_lrc.h | 17 -----
drivers/gpu/drm/i915/intel_overlay.c | 44 ++++++-----
drivers/gpu/drm/i915/intel_ringbuffer.c | 3 +-
drivers/gpu/drm/i915/intel_ringbuffer.h | 13 +---
11 files changed, 161 insertions(+), 138 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 9e76bfc..c794664 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3509,6 +3509,123 @@ int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
#define I915_READ_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), false)
#define I915_WRITE_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), false)
+static inline void
+assert_rpm_device_not_suspended(struct drm_i915_private *dev_priv)
+{
+ WARN_ONCE(dev_priv->pm.suspended,
+ "Device suspended during HW access\n");
+}
+
+static inline void
+assert_rpm_wakelock_held(struct drm_i915_private *dev_priv)
+{
+ assert_rpm_device_not_suspended(dev_priv);
+ /* FIXME: Needs to be converted back to WARN_ONCE, but currently causes
+ * too much noise. */
+ if (!atomic_read(&dev_priv->pm.wakeref_count))
+ DRM_DEBUG_DRIVER("RPM wakelock ref not held during HW access");
+}
+
+static inline int
+assert_rpm_atomic_begin(struct drm_i915_private *dev_priv)
+{
+ int seq = atomic_read(&dev_priv->pm.atomic_seq);
+
+ assert_rpm_wakelock_held(dev_priv);
+
+ return seq;
+}
+
+static inline void
+assert_rpm_atomic_end(struct drm_i915_private *dev_priv, int begin_seq)
+{
+ WARN_ONCE(atomic_read(&dev_priv->pm.atomic_seq) != begin_seq,
+ "HW access outside of RPM atomic section\n");
+}
+
+/**
+ * disable_rpm_wakeref_asserts - disable the RPM assert checks
+ * @dev_priv: i915 device instance
+ *
+ * This function disable asserts that check if we hold an RPM wakelock
+ * reference, while keeping the device-not-suspended checks still enabled.
+ * It's meant to be used only in special circumstances where our rule about
+ * the wakelock refcount wrt. the device power state doesn't hold. According
+ * to this rule at any point where we access the HW or want to keep the HW in
+ * an active state we must hold an RPM wakelock reference acquired via one of
+ * the intel_runtime_pm_get() helpers. Currently there are a few special spots
+ * where this rule doesn't hold: the IRQ and suspend/resume handlers, the
+ * forcewake release timer, and the GPU RPS and hangcheck works. All other
+ * users should avoid using this function.
+ *
+ * Any calls to this function must have a symmetric call to
+ * enable_rpm_wakeref_asserts().
+ */
+static inline void
+disable_rpm_wakeref_asserts(struct drm_i915_private *dev_priv)
+{
+ atomic_inc(&dev_priv->pm.wakeref_count);
+}
+
+/**
+ * enable_rpm_wakeref_asserts - re-enable the RPM assert checks
+ * @dev_priv: i915 device instance
+ *
+ * This function re-enables the RPM assert checks after disabling them with
+ * disable_rpm_wakeref_asserts. It's meant to be used only in special
+ * circumstances otherwise its use should be avoided.
+ *
+ * Any calls to this function must have a symmetric call to
+ * disable_rpm_wakeref_asserts().
+ */
+static inline void
+enable_rpm_wakeref_asserts(struct drm_i915_private *dev_priv)
+{
+ atomic_dec(&dev_priv->pm.wakeref_count);
+}
+
+
+#define I915_IOWRITE32(val, addr) ({ \
+ assert_rpm_wakelock_held(dev_priv); \
+ iowrite32(val, addr); })
+
+#define I915_IOREAD32(addr) ({ \
+ assert_rpm_wakelock_held(dev_priv); \
+ ioread32(addr); })
+
+static inline void intel_ring_emit(struct intel_engine_cs *ring,
+ u32 data)
+{
+ struct drm_i915_private *dev_priv = to_i915(ring->dev);
+ struct intel_ringbuffer *ringbuf = ring->buffer;
+
+ I915_IOWRITE32(data, ringbuf->virtual_start + ringbuf->tail);
+ ringbuf->tail += 4;
+}
+static inline void intel_ring_emit_reg(struct intel_engine_cs *ring,
+ i915_reg_t reg)
+{
+ intel_ring_emit(ring, i915_mmio_reg_offset(reg));
+}
+/**
+ * intel_logical_ring_emit() - write a DWORD to the ringbuffer.
+ * @ringbuf: Ringbuffer to write to.
+ * @data: DWORD to write.
+ */
+static inline void intel_logical_ring_emit(struct intel_ringbuffer *ringbuf,
+ u32 data)
+{
+ struct drm_i915_private *dev_priv = to_i915(ringbuf->ring->dev);
+
+ I915_IOWRITE32(data, ringbuf->virtual_start + ringbuf->tail);
+ ringbuf->tail += 4;
+}
+static inline void intel_logical_ring_emit_reg(struct intel_ringbuffer *ringbuf,
+ i915_reg_t reg)
+{
+ intel_logical_ring_emit(ringbuf, i915_mmio_reg_offset(reg));
+}
+
/* Be very careful with read/write 64-bit values. On 32-bit machines, they
* will be implemented using 2 32-bit writes in an arbitrary order with
* an arbitrary delay between them. This can cause the hardware to
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 8fd00d2..1ad589c 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -332,7 +332,7 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
offset += reloc->offset;
reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
offset & PAGE_MASK);
- iowrite32(lower_32_bits(delta), reloc_page + offset_in_page(offset));
+ I915_IOWRITE32(lower_32_bits(delta), reloc_page + offset_in_page(offset));
if (INTEL_INFO(dev)->gen >= 8) {
offset += sizeof(uint32_t);
@@ -344,7 +344,7 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
offset);
}
- iowrite32(upper_32_bits(delta),
+ I915_IOWRITE32(upper_32_bits(delta),
reloc_page + offset_in_page(offset));
}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 9127f8f..66b03dd 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -2357,8 +2357,8 @@ static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
#ifdef writeq
writeq(pte, addr);
#else
- iowrite32((u32)pte, addr);
- iowrite32(pte >> 32, addr + 4);
+ I915_IOWRITE32((u32)pte, addr);
+ I915_IOWRITE32(pte >> 32, addr + 4);
#endif
}
@@ -2457,7 +2457,7 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
addr = sg_page_iter_dma_address(&sg_iter);
- iowrite32(vm->pte_encode(addr, level, true, flags), >t_entries[i]);
+ I915_IOWRITE32(vm->pte_encode(addr, level, true, flags), >t_entries[i]);
i++;
}
@@ -2538,7 +2538,7 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
I915_CACHE_LLC, use_scratch, 0);
for (i = 0; i < num_entries; i++)
- iowrite32(scratch_pte, >t_base[i]);
+ I915_IOWRITE32(scratch_pte, >t_base[i]);
readl(gtt_base);
assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index d1a46ef..a188f01 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -2908,7 +2908,7 @@ semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
head &= ring->buffer->size - 1;
/* This here seems to blow up */
- cmd = ioread32(ring->buffer->virtual_start + head);
+ cmd = I915_IOREAD32(ring->buffer->virtual_start + head);
if (cmd == ipehr)
break;
@@ -2918,11 +2918,11 @@ semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
if (!i)
return NULL;
- *seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1;
+ *seqno = I915_IOREAD32(ring->buffer->virtual_start + head + 4) + 1;
if (INTEL_INFO(ring->dev)->gen >= 8) {
- offset = ioread32(ring->buffer->virtual_start + head + 12);
+ offset = I915_IOREAD32(ring->buffer->virtual_start + head + 12);
offset <<= 32;
- offset = ioread32(ring->buffer->virtual_start + head + 8);
+ offset = I915_IOREAD32(ring->buffer->virtual_start + head + 8);
}
return semaphore_wait_to_signaller_ring(ring, ipehr, offset);
}
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index bf62a19..652bc40 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -1343,7 +1343,8 @@ bool intel_bios_is_valid_vbt(const void *buf, size_t size)
return vbt;
}
-static const struct vbt_header *find_vbt(void __iomem *bios, size_t size)
+static const struct vbt_header *find_vbt(struct drm_i915_private *dev_priv,
+ void __iomem *bios, size_t size)
{
size_t i;
@@ -1351,7 +1352,7 @@ static const struct vbt_header *find_vbt(void __iomem *bios, size_t size)
for (i = 0; i + 4 < size; i++) {
void *vbt;
- if (ioread32(bios + i) != *((const u32 *) "$VBT"))
+ if (I915_IOREAD32(bios + i) != *((const u32 *) "$VBT"))
continue;
/*
@@ -1397,7 +1398,7 @@ intel_bios_init(struct drm_i915_private *dev_priv)
if (!bios)
return -1;
- vbt = find_vbt(bios, size);
+ vbt = find_vbt(dev_priv, bios, size);
if (!vbt) {
pci_unmap_rom(pdev, bios);
return -1;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 4852049..35a0ac8 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -1441,81 +1441,6 @@ bool intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
void intel_display_power_put(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain);
-static inline void
-assert_rpm_device_not_suspended(struct drm_i915_private *dev_priv)
-{
- WARN_ONCE(dev_priv->pm.suspended,
- "Device suspended during HW access\n");
-}
-
-static inline void
-assert_rpm_wakelock_held(struct drm_i915_private *dev_priv)
-{
- assert_rpm_device_not_suspended(dev_priv);
- /* FIXME: Needs to be converted back to WARN_ONCE, but currently causes
- * too much noise. */
- if (!atomic_read(&dev_priv->pm.wakeref_count))
- DRM_DEBUG_DRIVER("RPM wakelock ref not held during HW access");
-}
-
-static inline int
-assert_rpm_atomic_begin(struct drm_i915_private *dev_priv)
-{
- int seq = atomic_read(&dev_priv->pm.atomic_seq);
-
- assert_rpm_wakelock_held(dev_priv);
-
- return seq;
-}
-
-static inline void
-assert_rpm_atomic_end(struct drm_i915_private *dev_priv, int begin_seq)
-{
- WARN_ONCE(atomic_read(&dev_priv->pm.atomic_seq) != begin_seq,
- "HW access outside of RPM atomic section\n");
-}
-
-/**
- * disable_rpm_wakeref_asserts - disable the RPM assert checks
- * @dev_priv: i915 device instance
- *
- * This function disable asserts that check if we hold an RPM wakelock
- * reference, while keeping the device-not-suspended checks still enabled.
- * It's meant to be used only in special circumstances where our rule about
- * the wakelock refcount wrt. the device power state doesn't hold. According
- * to this rule at any point where we access the HW or want to keep the HW in
- * an active state we must hold an RPM wakelock reference acquired via one of
- * the intel_runtime_pm_get() helpers. Currently there are a few special spots
- * where this rule doesn't hold: the IRQ and suspend/resume handlers, the
- * forcewake release timer, and the GPU RPS and hangcheck works. All other
- * users should avoid using this function.
- *
- * Any calls to this function must have a symmetric call to
- * enable_rpm_wakeref_asserts().
- */
-static inline void
-disable_rpm_wakeref_asserts(struct drm_i915_private *dev_priv)
-{
- atomic_inc(&dev_priv->pm.wakeref_count);
-}
-
-/**
- * enable_rpm_wakeref_asserts - re-enable the RPM assert checks
- * @dev_priv: i915 device instance
- *
- * This function re-enables the RPM assert checks after disabling them with
- * disable_rpm_wakeref_asserts. It's meant to be used only in special
- * circumstances otherwise its use should be avoided.
- *
- * Any calls to this function must have a symmetric call to
- * disable_rpm_wakeref_asserts().
- */
-static inline void
-enable_rpm_wakeref_asserts(struct drm_i915_private *dev_priv)
-{
- atomic_dec(&dev_priv->pm.wakeref_count);
-}
-
/* TODO: convert users of these to rely instead on proper RPM refcounting */
#define DISABLE_RPM_WAKEREF_ASSERTS(dev_priv) \
disable_rpm_wakeref_asserts(dev_priv)
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 3a03646..ffb8e9e 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -804,13 +804,14 @@ intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
static void __wrap_ring_buffer(struct intel_ringbuffer *ringbuf)
{
+ struct drm_i915_private *dev_priv = to_i915(ringbuf->obj->base.dev);
uint32_t __iomem *virt;
int rem = ringbuf->size - ringbuf->tail;
virt = ringbuf->virtual_start + ringbuf->tail;
rem /= 4;
while (rem--)
- iowrite32(MI_NOOP, virt++);
+ I915_IOWRITE32(MI_NOOP, virt++);
ringbuf->tail = 0;
intel_ring_update_space(ringbuf);
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index e6cda3e..1bdc260 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -73,23 +73,6 @@ static inline void intel_logical_ring_advance(struct intel_ringbuffer *ringbuf)
{
ringbuf->tail &= ringbuf->size - 1;
}
-/**
- * intel_logical_ring_emit() - write a DWORD to the ringbuffer.
- * @ringbuf: Ringbuffer to write to.
- * @data: DWORD to write.
- */
-static inline void intel_logical_ring_emit(struct intel_ringbuffer *ringbuf,
- u32 data)
-{
- iowrite32(data, ringbuf->virtual_start + ringbuf->tail);
- ringbuf->tail += 4;
-}
-static inline void intel_logical_ring_emit_reg(struct intel_ringbuffer *ringbuf,
- i915_reg_t reg)
-{
- intel_logical_ring_emit(ringbuf, i915_mmio_reg_offset(reg));
-}
-
/* Logical Ring Contexts */
/* One extra page is added before LRC for GuC as shared data */
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 9168413..9fcd620 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -594,6 +594,7 @@ static bool update_scaling_factors(struct intel_overlay *overlay,
struct overlay_registers __iomem *regs,
struct put_image_params *params)
{
+ struct drm_i915_private *dev_priv = to_i915(overlay->dev);
/* fixed point with a 12 bit shift */
u32 xscale, yscale, xscale_UV, yscale_UV;
#define FP_SHIFT 12
@@ -630,17 +631,17 @@ static bool update_scaling_factors(struct intel_overlay *overlay,
overlay->old_xscale = xscale;
overlay->old_yscale = yscale;
- iowrite32(((yscale & FRACT_MASK) << 20) |
+ I915_IOWRITE32(((yscale & FRACT_MASK) << 20) |
((xscale >> FP_SHIFT) << 16) |
((xscale & FRACT_MASK) << 3),
®s->YRGBSCALE);
- iowrite32(((yscale_UV & FRACT_MASK) << 20) |
+ I915_IOWRITE32(((yscale_UV & FRACT_MASK) << 20) |
((xscale_UV >> FP_SHIFT) << 16) |
((xscale_UV & FRACT_MASK) << 3),
®s->UVSCALE);
- iowrite32((((yscale >> FP_SHIFT) << 16) |
+ I915_IOWRITE32((((yscale >> FP_SHIFT) << 16) |
((yscale_UV >> FP_SHIFT) << 0)),
®s->UVSCALEV);
@@ -653,6 +654,7 @@ static bool update_scaling_factors(struct intel_overlay *overlay,
static void update_colorkey(struct intel_overlay *overlay,
struct overlay_registers __iomem *regs)
{
+ struct drm_i915_private *dev_priv = to_i915(overlay->dev);
u32 key = overlay->color_key;
u32 flags;
@@ -682,8 +684,8 @@ static void update_colorkey(struct intel_overlay *overlay,
break;
}
- iowrite32(key, ®s->DCLRKV);
- iowrite32(flags, ®s->DCLRKM);
+ I915_IOWRITE32(key, ®s->DCLRKV);
+ I915_IOWRITE32(flags, ®s->DCLRKM);
}
static u32 overlay_cmd_reg(struct put_image_params *params)
@@ -735,6 +737,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
struct drm_i915_gem_object *new_bo,
struct put_image_params *params)
{
+ struct drm_i915_private *dev_priv = to_i915(overlay->dev);
int ret, tmp_width;
struct overlay_registers __iomem *regs;
bool scale_changed = false;
@@ -770,7 +773,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
oconfig |= OCONF_CSC_MODE_BT709;
oconfig |= pipe == 0 ?
OCONF_PIPE_A : OCONF_PIPE_B;
- iowrite32(oconfig, ®s->OCONFIG);
+ I915_IOWRITE32(oconfig, ®s->OCONFIG);
intel_overlay_unmap_regs(overlay, regs);
ret = intel_overlay_on(overlay);
@@ -784,8 +787,8 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
goto out_unpin;
}
- iowrite32((params->dst_y << 16) | params->dst_x, ®s->DWINPOS);
- iowrite32((params->dst_h << 16) | params->dst_w, ®s->DWINSZ);
+ I915_IOWRITE32((params->dst_y << 16) | params->dst_x, ®s->DWINPOS);
+ I915_IOWRITE32((params->dst_h << 16) | params->dst_w, ®s->DWINSZ);
if (params->format & I915_OVERLAY_YUV_PACKED)
tmp_width = packed_width_bytes(params->format, params->src_w);
@@ -795,7 +798,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
swidth = params->src_w;
swidthsw = calc_swidthsw(overlay->dev, params->offset_Y, tmp_width);
sheight = params->src_h;
- iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_Y, ®s->OBUF_0Y);
+ I915_IOWRITE32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_Y, ®s->OBUF_0Y);
ostride = params->stride_Y;
if (params->format & I915_OVERLAY_YUV_PLANAR) {
@@ -809,21 +812,21 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
params->src_w/uv_hscale);
swidthsw |= max_t(u32, tmp_U, tmp_V) << 16;
sheight |= (params->src_h/uv_vscale) << 16;
- iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_U, ®s->OBUF_0U);
- iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_V, ®s->OBUF_0V);
+ I915_IOWRITE32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_U, ®s->OBUF_0U);
+ I915_IOWRITE32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_V, ®s->OBUF_0V);
ostride |= params->stride_UV << 16;
}
- iowrite32(swidth, ®s->SWIDTH);
- iowrite32(swidthsw, ®s->SWIDTHSW);
- iowrite32(sheight, ®s->SHEIGHT);
- iowrite32(ostride, ®s->OSTRIDE);
+ I915_IOWRITE32(swidth, ®s->SWIDTH);
+ I915_IOWRITE32(swidthsw, ®s->SWIDTHSW);
+ I915_IOWRITE32(sheight, ®s->SHEIGHT);
+ I915_IOWRITE32(ostride, ®s->OSTRIDE);
scale_changed = update_scaling_factors(overlay, regs, params);
update_colorkey(overlay, regs);
- iowrite32(overlay_cmd_reg(params), ®s->OCMD);
+ I915_IOWRITE32(overlay_cmd_reg(params), ®s->OCMD);
intel_overlay_unmap_regs(overlay, regs);
@@ -849,6 +852,7 @@ out_unpin:
int intel_overlay_switch_off(struct intel_overlay *overlay)
{
+ struct drm_i915_private *dev_priv = to_i915(overlay->dev);
struct overlay_registers __iomem *regs;
struct drm_device *dev = overlay->dev;
int ret;
@@ -868,7 +872,7 @@ int intel_overlay_switch_off(struct intel_overlay *overlay)
return ret;
regs = intel_overlay_map_regs(overlay);
- iowrite32(0, ®s->OCMD);
+ I915_IOWRITE32(0, ®s->OCMD);
intel_overlay_unmap_regs(overlay, regs);
ret = intel_overlay_off(overlay);
@@ -1232,9 +1236,11 @@ out_free:
static void update_reg_attrs(struct intel_overlay *overlay,
struct overlay_registers __iomem *regs)
{
- iowrite32((overlay->contrast << 18) | (overlay->brightness & 0xff),
+ struct drm_i915_private *dev_priv = to_i915(overlay->dev);
+
+ I915_IOWRITE32((overlay->contrast << 18) | (overlay->brightness & 0xff),
®s->OCLRC0);
- iowrite32(overlay->saturation, ®s->OCLRC1);
+ I915_IOWRITE32(overlay->saturation, ®s->OCLRC1);
}
static bool check_gamma_bounds(u32 gamma1, u32 gamma2)
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 45ce45a..12dc8e0 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -2331,13 +2331,14 @@ static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
static void __wrap_ring_buffer(struct intel_ringbuffer *ringbuf)
{
+ struct drm_i915_private *dev_priv = to_i915(ringbuf->ring->dev);
uint32_t __iomem *virt;
int rem = ringbuf->size - ringbuf->tail;
virt = ringbuf->virtual_start + ringbuf->tail;
rem /= 4;
while (rem--)
- iowrite32(MI_NOOP, virt++);
+ I915_IOWRITE32(MI_NOOP, virt++);
ringbuf->tail = 0;
intel_ring_update_space(ringbuf);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 566b0ae..cae1aac 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -446,18 +446,7 @@ int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request);
int __must_check intel_ring_begin(struct drm_i915_gem_request *req, int n);
int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req);
-static inline void intel_ring_emit(struct intel_engine_cs *ring,
- u32 data)
-{
- struct intel_ringbuffer *ringbuf = ring->buffer;
- iowrite32(data, ringbuf->virtual_start + ringbuf->tail);
- ringbuf->tail += 4;
-}
-static inline void intel_ring_emit_reg(struct intel_engine_cs *ring,
- i915_reg_t reg)
-{
- intel_ring_emit(ring, i915_mmio_reg_offset(reg));
-}
+
static inline void intel_ring_advance(struct intel_engine_cs *ring)
{
struct intel_ringbuffer *ringbuf = ring->buffer;
--
2.5.0
More information about the Intel-gfx
mailing list