[Intel-gfx] [PATCH] drm/i915: Fix frontbuffer tracking for PSR

Chris Wilson chris at chris-wilson.co.uk
Sat Jun 14 08:00:52 CEST 2014


This should enable accurate frontbuffer tracking and keep PSR disabled
whilst (and only while) the scanout is being directly accessed either by
the CPU or by the GPU. The important difference here is that the
tracking is more accurate and we check in the re-enable worker whether
the fb is still being written to. This should allow legacy display
servers to use PSR effectively, without breaking the special cases where
the scanout is being written to directly and never flushed (for example
after the display server is banned from the GPU). It also has the
side-effect of working correctly with fbcon, by disabling PSR whilst
fbcon is active.

---
 drivers/gpu/drm/i915/i915_gem.c         |  24 +++++--
 drivers/gpu/drm/i915/intel_display.c    |  29 ++++++---
 drivers/gpu/drm/i915/intel_dp.c         | 111 +++++++++++++++++---------------
 drivers/gpu/drm/i915/intel_drv.h        |   2 +-
 drivers/gpu/drm/i915/intel_ringbuffer.h |   1 +
 5 files changed, 97 insertions(+), 70 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index ef7183e219ed..a0f345eb515b 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1636,8 +1636,6 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
 		goto unlock;
 	}
 
-	intel_edp_psr_exit(dev, true);
-
 	/* Try to flush the object off the GPU without holding the lock.
 	 * We will repeat the flush holding the lock in the normal manner
 	 * to catch cases where we are gazumped.
@@ -1683,8 +1681,6 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
 	if (ret)
 		return ret;
 
-	intel_edp_psr_exit(dev, true);
-
 	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
 	if (&obj->base == NULL) {
 		ret = -ENOENT;
@@ -2823,6 +2819,7 @@ void i915_gem_reset(struct drm_device *dev)
 void
 i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
 {
+	struct drm_i915_private *dev_priv = to_i915(ring->dev);
 	uint32_t seqno;
 
 	if (list_empty(&ring->request_list))
@@ -2877,6 +2874,12 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
 		ring->trace_irq_seqno = 0;
 	}
 
+	if (ring->framebuffer_write_seqno &&
+	    i915_seqno_passed(seqno, ring->framebuffer_write_seqno)) {
+		ring->framebuffer_write_seqno = 0;
+		intel_mark_fb_idle(dev_priv->dev);
+	}
+
 	WARN_ON(i915_verify_lists(ring->dev));
 }
 
@@ -3808,6 +3811,9 @@ i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
 	trace_i915_gem_object_change_domain(obj,
 					    obj->base.read_domains,
 					    old_write_domain);
+
+	if (obj->pin_display)
+		intel_mark_fb_idle(obj->base.dev);
 }
 
 /** Flushes the CPU write domain for the object if it's dirty. */
@@ -3829,6 +3835,9 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
 	trace_i915_gem_object_change_domain(obj,
 					    obj->base.read_domains,
 					    old_write_domain);
+
+	if (obj->pin_display)
+		intel_mark_fb_idle(obj->base.dev);
 }
 
 /**
@@ -3892,6 +3901,9 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
 
 	}
 
+	if (obj->pin_display && write)
+		intel_mark_fb_busy(obj, NULL);
+
 	return 0;
 }
 
@@ -4151,6 +4163,8 @@ i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj)
 {
 	i915_gem_object_ggtt_unpin(obj);
 	obj->pin_display = is_pin_display(obj);
+
+	intel_mark_fb_idle(obj->base.dev);
 }
 
 int
@@ -4493,8 +4507,6 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
 	if (ret)
 		return ret;
 
-	intel_edp_psr_exit(dev, true);
-
 	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
 	if (&obj->base == NULL) {
 		ret = -ENOENT;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index cb83ecdc536b..e1c044d81108 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -8790,6 +8790,9 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
 	if (!dev_priv->lvds_downclock_avail)
 		return;
 
+	if (!i915.powersave)
+		return;
+
 	/*
 	 * Since this is called by a timer, we should never get here in
 	 * the manual case.
@@ -8811,7 +8814,6 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
 		if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
 			DRM_DEBUG_DRIVER("failed to downclock LVDS!\n");
 	}
-
 }
 
 void intel_mark_busy(struct drm_device *dev)
@@ -8853,17 +8855,12 @@ out:
 	intel_runtime_pm_put(dev_priv);
 }
 
-
 void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
 			struct intel_engine_cs *ring)
 {
 	struct drm_device *dev = obj->base.dev;
 	struct drm_crtc *crtc;
-
-	intel_edp_psr_exit(dev, true);
-
-	if (!i915.powersave)
-		return;
+	bool found = false;
 
 	for_each_crtc(dev, crtc) {
 		if (!crtc->primary->fb)
@@ -8872,10 +8869,25 @@ void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
 		if (to_intel_framebuffer(crtc->primary->fb)->obj != obj)
 			continue;
 
+		found = true;
+
 		intel_increase_pllclock(crtc);
 		if (ring && intel_fbc_enabled(dev))
 			ring->fbc_dirty = true;
 	}
+
+	if (!found)
+		return;
+
+	intel_edp_psr_update(dev);
+
+	if (ring)
+		ring->framebuffer_write_seqno = intel_ring_get_seqno(ring);
+}
+
+void intel_mark_fb_idle(struct drm_device *dev)
+{
+	intel_edp_psr_update(dev);
 }
 
 static void intel_crtc_destroy(struct drm_crtc *crtc)
@@ -9528,9 +9540,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
 	if (work == NULL)
 		return -ENOMEM;
 
-	/* Exit PSR early in page flip */
-	intel_edp_psr_exit(dev, true);
-
 	work->event = event;
 	work->crtc = crtc;
 	work->old_fb_obj = to_intel_framebuffer(old_fb)->obj;
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 51c0799ea83d..95b40d13766d 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1748,6 +1748,32 @@ static void intel_edp_psr_enable_source(struct intel_dp *intel_dp)
 		   EDP_PSR_ENABLE);
 }
 
+static bool psr_framebuffer_busy(struct drm_device *dev)
+{
+	struct drm_crtc *crtc;
+
+	for_each_crtc(dev, crtc) {
+		struct drm_i915_gem_object *obj;
+
+		if (!crtc->primary->fb)
+			continue;
+
+		obj = to_intel_framebuffer(crtc->primary->fb)->obj;
+		if (obj->base.write_domain)
+			return true;
+
+		if (obj->last_write_seqno &&
+		    i915_seqno_passed(obj->ring->get_seqno(obj->ring, true),
+				      obj->last_write_seqno))
+			obj->last_write_seqno = 0;
+
+		if (obj->last_write_seqno)
+			return true;
+	}
+
+	return false;
+}
+
 static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
 {
 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
@@ -1760,22 +1786,13 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
 
 	dev_priv->psr.source_ok = false;
 
-	if (!HAS_PSR(dev)) {
-		DRM_DEBUG_KMS("PSR not supported on this platform\n");
-		return false;
-	}
-
-	if (IS_HASWELL(dev) && (intel_encoder->type != INTEL_OUTPUT_EDP ||
-				dig_port->port != PORT_A)) {
+	if (IS_HASWELL(dev) &&
+	    (intel_encoder->type != INTEL_OUTPUT_EDP ||
+	     dig_port->port != PORT_A)) {
 		DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n");
 		return false;
 	}
 
-	if (!i915.enable_psr) {
-		DRM_DEBUG_KMS("PSR disable by flag\n");
-		return false;
-	}
-
 	crtc = dig_port->base.base.crtc;
 	if (crtc == NULL) {
 		DRM_DEBUG_KMS("crtc not active for PSR\n");
@@ -1832,7 +1849,7 @@ static void intel_edp_psr_do_enable(struct intel_dp *intel_dp)
 	struct drm_device *dev = intel_dig_port->base.base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
-	if (intel_edp_is_psr_enabled(dev))
+	if (dev_priv->psr.enabled)
 		return;
 
 	/* Enable PSR on the panel */
@@ -1887,39 +1904,7 @@ void intel_edp_psr_disable(struct intel_dp *intel_dp)
 	dev_priv->psr.enabled = false;
 }
 
-void intel_edp_psr_update(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
-	if (!HAS_PSR(dev))
-		return;
-
-	if (!dev_priv->psr.setup_done)
-		return;
-
-	intel_edp_psr_exit(dev, true);
-}
-
-void intel_edp_psr_work(struct work_struct *work)
-{
-	struct drm_i915_private *dev_priv =
-		container_of(work, typeof(*dev_priv), psr.work.work);
-	struct drm_device *dev = dev_priv->dev;
-	struct intel_encoder *encoder;
-	struct intel_dp *intel_dp = NULL;
-
-	list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head)
-		if (encoder->type == INTEL_OUTPUT_EDP) {
-			intel_dp = enc_to_intel_dp(&encoder->base);
-
-			if (!intel_edp_psr_match_conditions(intel_dp))
-				intel_edp_psr_disable(intel_dp);
-			else
-				intel_edp_psr_do_enable(intel_dp);
-		}
-}
-
-void intel_edp_psr_inactivate(struct drm_device *dev)
+static void intel_edp_psr_inactivate(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_connector *connector;
@@ -1942,20 +1927,41 @@ void intel_edp_psr_inactivate(struct drm_device *dev)
 			intel_edp_set_psr_property(connector, 0);
 			dev_priv->psr.active = false;
 
-			I915_WRITE(EDP_PSR_CTL(dev), I915_READ(EDP_PSR_CTL(dev))
-				   & ~EDP_PSR_ENABLE);
+			I915_WRITE(EDP_PSR_CTL(dev),
+				   I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE);
 		}
 	}
 }
 
-void intel_edp_psr_exit(struct drm_device *dev, bool schedule_back)
+static void intel_edp_psr_work(struct work_struct *work)
+{
+	struct drm_i915_private *dev_priv =
+		container_of(work, typeof(*dev_priv), psr.work.work);
+	struct drm_device *dev = dev_priv->dev;
+	struct intel_encoder *encoder;
+
+	if (dev_priv->psr.active)
+		return;
+
+	if (psr_framebuffer_busy(dev)) {
+		schedule_delayed_work(&dev_priv->psr.work,
+				      msecs_to_jiffies(100));
+		return;
+	}
+
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head)
+		if (encoder->type == INTEL_OUTPUT_EDP)
+			intel_edp_psr_enable(enc_to_intel_dp(&encoder->base));
+}
+
+void intel_edp_psr_update(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
 	if (!HAS_PSR(dev))
 		return;
 
-	if (!dev_priv->psr.setup_done)
+	if (!dev_priv->psr.enabled)
 		return;
 
 	cancel_delayed_work_sync(&dev_priv->psr.work);
@@ -1963,9 +1969,8 @@ void intel_edp_psr_exit(struct drm_device *dev, bool schedule_back)
 	if (dev_priv->psr.active)
 		intel_edp_psr_inactivate(dev);
 
-	if (schedule_back)
-		schedule_delayed_work(&dev_priv->psr.work,
-				      msecs_to_jiffies(100));
+	schedule_delayed_work(&dev_priv->psr.work,
+			      msecs_to_jiffies(psr_framebuffer_busy(dev) ? 100 : 10));
 }
 
 void intel_edp_psr_init(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 9c8524016e4b..bc66ed0a4b51 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -731,6 +731,7 @@ int valleyview_cur_cdclk(struct drm_i915_private *dev_priv);
 void intel_mark_busy(struct drm_device *dev);
 void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
 			struct intel_engine_cs *ring);
+void intel_mark_fb_idle(struct drm_device *dev);
 void intel_mark_idle(struct drm_device *dev);
 void intel_crtc_restore_mode(struct drm_crtc *crtc);
 void intel_crtc_update_dpms(struct drm_crtc *crtc);
@@ -842,7 +843,6 @@ void intel_edp_psr_enable(struct intel_dp *intel_dp);
 void intel_edp_psr_disable(struct intel_dp *intel_dp);
 void intel_edp_psr_update(struct drm_device *dev);
 void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate);
-void intel_edp_psr_exit(struct drm_device *dev, bool schedule_back);
 void intel_edp_psr_init(struct drm_device *dev);
 
 
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 31c2aab8ad2c..01387ab1a4c0 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -169,6 +169,7 @@ struct  intel_engine_cs {
 	 */
 	struct drm_i915_gem_request *preallocated_lazy_request;
 	u32 outstanding_lazy_seqno;
+	u32 framebuffer_write_seqno;
 	bool gpu_caches_dirty;
 	bool fbc_dirty;
 
-- 
2.0.0




More information about the Intel-gfx mailing list