[Intel-gfx] [PATCH v5 06/10] drm/i915: Implement LRI based FBC tracking
ville.syrjala at linux.intel.com
ville.syrjala at linux.intel.com
Wed Nov 27 16:22:55 CET 2013
From: Ville Syrjälä <ville.syrjala at linux.intel.com>
As per the SNB and HSW PM guides, we should enable FBC render/blitter
tracking only during batches targetting the front buffer.
On SNB we must also update the FBC render tracking address whenever it
changes. And since the register in question is stored in the context,
we need to make sure we reload it with correct data after context
switches.
On IVB/HSW we use the render nuke mechanism, so no render tracking
address updates are needed. Hoever on the blitter side we need to
enable the blitter tracking like on SNB, and in addition we need
to issue the cache clean messages, which we already did.
v2: Introduce intel_fb_obj_has_fbc()
Fix crtc locking around crtc->fb access
Drop a hunk that was included by accident in v1
Set fbc_address_dirty=false not true after emitting the LRI
v3: Now that fbc hangs on to the fb intel_fb_obj_has_fbc() doesn't
need to upset lockdep anymore
v4: Use |= instead of = to update fbc_address_dirty
v5: |= for fbc_dirty too, kill fbc_obj variable, pack the
intel_ringbuffer dirty bits using bitfields, skip ILK_FBC_RT_BASE
write on SNB+, kill sandybridge_blit_fbc_update(), reorganize
code to make future ILK FBC RT LRI support easier
Signed-off-by: Ville Syrjälä <ville.syrjala at linux.intel.com>
---
drivers/gpu/drm/i915/i915_gem_context.c | 7 ++++
drivers/gpu/drm/i915/i915_gem_execbuffer.c | 28 ++++++++++++++
drivers/gpu/drm/i915/intel_display.c | 17 ++++++++-
drivers/gpu/drm/i915/intel_drv.h | 1 +
drivers/gpu/drm/i915/intel_pm.c | 26 +------------
drivers/gpu/drm/i915/intel_ringbuffer.c | 59 +++++++++++++++++++++++++++++-
drivers/gpu/drm/i915/intel_ringbuffer.h | 6 ++-
7 files changed, 115 insertions(+), 29 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 4187704..44a1c62 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -402,6 +402,13 @@ mi_set_context(struct intel_ring_buffer *ring,
intel_ring_advance(ring);
+ /*
+ * FBC RT address is stored in the context, so we may have just
+ * restored it to an old value. Make sure we emit a new LRI
+ * to update the address.
+ */
+ ring->fbc_address_dirty = true;
+
return ret;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 3c90dd1..8db3d87 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -889,6 +889,32 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
}
static void
+i915_gem_execbuffer_mark_fbc_dirty(struct intel_ring_buffer *ring,
+ struct list_head *vmas)
+{
+ struct i915_vma *vma;
+ u32 fbc_address = -1;
+
+ list_for_each_entry(vma, vmas, exec_list) {
+ struct drm_i915_gem_object *obj = vma->obj;
+
+ if (obj->base.pending_write_domain &&
+ intel_fb_obj_has_fbc(obj)) {
+ WARN_ON(fbc_address != -1 &&
+ fbc_address != i915_gem_obj_ggtt_offset(obj));
+ fbc_address = i915_gem_obj_ggtt_offset(obj);
+ }
+ }
+
+ /* need to nuke/cache_clean on IVB+? */
+ ring->fbc_dirty |= fbc_address != -1;
+
+ /* need to update FBC tracking? */
+ ring->fbc_address_dirty |= fbc_address != ring->fbc_address;
+ ring->fbc_address = fbc_address;
+}
+
+static void
i915_gem_execbuffer_move_to_active(struct list_head *vmas,
struct intel_ring_buffer *ring)
{
@@ -1153,6 +1179,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping)
i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level);
+ i915_gem_execbuffer_mark_fbc_dirty(ring, &eb->vmas);
+
ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->vmas);
if (ret)
goto err;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 7e7348f..1400df2 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -8157,6 +8157,21 @@ void intel_mark_idle(struct drm_device *dev)
gen6_rps_idle(dev->dev_private);
}
+bool intel_fb_obj_has_fbc(struct drm_i915_gem_object *obj)
+{
+ struct drm_device *dev = obj->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /* check for potential scanout */
+ if (!obj->pin_display)
+ return false;
+
+ if (!dev_priv->fbc.fb)
+ return false;
+
+ return to_intel_framebuffer(dev_priv->fbc.fb)->obj == obj;
+}
+
void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *ring)
{
@@ -8174,8 +8189,6 @@ void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
continue;
intel_increase_pllclock(crtc);
- if (ring && intel_fbc_enabled(dev))
- ring->fbc_dirty = true;
}
}
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 0231281..119bb95 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -625,6 +625,7 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
/* intel_display.c */
int intel_pch_rawclk(struct drm_device *dev);
void intel_mark_busy(struct drm_device *dev);
+bool intel_fb_obj_has_fbc(struct drm_i915_gem_object *obj);
void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *ring);
void intel_mark_idle(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index ed35c35..5619898 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -187,26 +187,6 @@ static bool g4x_fbc_enabled(struct drm_device *dev)
return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
}
-static void sandybridge_blit_fbc_update(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 blt_ecoskpd;
-
- /* Make sure blitter notifies FBC of writes */
- gen6_gt_force_wake_get(dev_priv);
- blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
- blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
- GEN6_BLITTER_LOCK_SHIFT;
- I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
- blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
- I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
- blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
- GEN6_BLITTER_LOCK_SHIFT);
- I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
- POSTING_READ(GEN6_BLITTER_ECOSKPD);
- gen6_gt_force_wake_put(dev_priv);
-}
-
static void ironlake_enable_fbc(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
unsigned long interval)
@@ -234,7 +214,8 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc,
(stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
(interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
- I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID);
+ if (IS_GEN5(dev))
+ I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID);
/* enable it... */
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
@@ -242,7 +223,6 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc,
I915_WRITE(SNB_DPFC_CTL_SA,
SNB_CPU_FENCE_ENABLE | obj->fence_reg);
I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
- sandybridge_blit_fbc_update(dev);
}
DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
@@ -299,8 +279,6 @@ static void gen7_enable_fbc(struct drm_crtc *crtc,
SNB_CPU_FENCE_ENABLE | obj->fence_reg);
I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
- sandybridge_blit_fbc_update(dev);
-
DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 69589e4..426d868 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -51,6 +51,57 @@ void __intel_ring_advance(struct intel_ring_buffer *ring)
ring->write_tail(ring, ring->tail);
}
+static int gen6_render_fbc_tracking(struct intel_ring_buffer *ring)
+{
+ int ret;
+
+ if (!ring->fbc_address_dirty)
+ return 0;
+
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+ intel_ring_emit(ring, ILK_FBC_RT_BASE);
+ if (ring->fbc_address != -1)
+ intel_ring_emit(ring, ring->fbc_address |
+ SNB_FBC_FRONT_BUFFER | ILK_FBC_RT_VALID);
+ else
+ intel_ring_emit(ring, 0);
+ intel_ring_advance(ring);
+
+ ring->fbc_address_dirty = false;
+
+ return 0;
+}
+
+static int gen6_blt_fbc_tracking(struct intel_ring_buffer *ring)
+{
+ int ret;
+
+ if (!ring->fbc_address_dirty)
+ return 0;
+
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+ intel_ring_emit(ring, GEN6_BLITTER_ECOSKPD);
+ if (ring->fbc_address != -1)
+ intel_ring_emit(ring, _MASKED_BIT_ENABLE(GEN6_BLITTER_FBC_NOTIFY));
+ else
+ intel_ring_emit(ring, _MASKED_BIT_DISABLE(GEN6_BLITTER_FBC_NOTIFY));
+ intel_ring_advance(ring);
+
+ ring->fbc_address_dirty = false;
+
+ return 0;
+}
+
static int
gen2_render_ring_flush(struct intel_ring_buffer *ring,
u32 invalidate_domains,
@@ -256,6 +307,9 @@ gen6_render_ring_flush(struct intel_ring_buffer *ring,
intel_ring_emit(ring, 0);
intel_ring_advance(ring);
+ if (invalidate_domains)
+ return gen6_render_fbc_tracking(ring);
+
return 0;
}
@@ -298,6 +352,7 @@ static int gen7_ring_fbc_flush(struct intel_ring_buffer *ring, u32 value)
intel_ring_advance(ring);
ring->fbc_dirty = false;
+
return 0;
}
@@ -1833,7 +1888,9 @@ static int gen6_ring_flush(struct intel_ring_buffer *ring,
}
intel_ring_advance(ring);
- if (IS_GEN7(dev) && !invalidate && flush)
+ if (invalidate)
+ return gen6_blt_fbc_tracking(ring);
+ else if (flush && IS_GEN7(dev))
return gen7_ring_fbc_flush(ring, FBC_REND_CACHE_CLEAN);
return 0;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 71a73f4..e19d7d3 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -143,8 +143,10 @@ struct intel_ring_buffer {
*/
struct drm_i915_gem_request *preallocated_lazy_request;
u32 outstanding_lazy_seqno;
- bool gpu_caches_dirty;
- bool fbc_dirty;
+ u32 fbc_address;
+ bool gpu_caches_dirty:1;
+ bool fbc_dirty:1;
+ bool fbc_address_dirty:1;
wait_queue_head_t irq_queue;
--
1.8.3.2
More information about the Intel-gfx
mailing list