[Intel-gfx] [PATCH 11/38] drm/i915: Make fb_tracking.lock a spinlock

Chris Wilson chris at chris-wilson.co.uk
Fri Jun 3 16:55:26 UTC 2016


We only need a very lightweight mechanism here as the locking is only
used for co-ordinating a bitfield.

Also double check that the object is still pinned to the display plane
before processing the state change.

v2: Move the cheap unlikely tests into the caller

Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_drv.h          |  2 +-
 drivers/gpu/drm/i915/i915_gem.c          |  2 +-
 drivers/gpu/drm/i915/intel_drv.h         | 29 ++++++++++++++---
 drivers/gpu/drm/i915/intel_frontbuffer.c | 54 ++++++++++++++------------------
 4 files changed, 51 insertions(+), 36 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 861d132b2fe4..59846de3b33d 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1650,7 +1650,7 @@ struct intel_pipe_crc {
 };
 
 struct i915_frontbuffer_tracking {
-	struct mutex lock;
+	spinlock_t lock;
 
 	/*
 	 * Tracking bits for delayed frontbuffer flushing du to gpu activity or
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 53776a071ce7..522f379c8d44 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -4180,7 +4180,7 @@ i915_gem_load_init(struct drm_device *dev)
 
 	dev_priv->mm.interruptible = true;
 
-	mutex_init(&dev_priv->fb_tracking.lock);
+	spin_lock_init(&dev_priv->fb_tracking.lock);
 }
 
 void i915_gem_load_cleanup(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index a29618dc7e98..9410767c97da 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -1106,8 +1106,6 @@ void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state);
 uint32_t ddi_signal_levels(struct intel_dp *intel_dp);
 
 /* intel_frontbuffer.c */
-void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
-			     enum fb_op_origin origin);
 void intel_frontbuffer_flip_prepare(struct drm_device *dev,
 				    unsigned frontbuffer_bits);
 void intel_frontbuffer_flip_complete(struct drm_device *dev,
@@ -1118,8 +1116,31 @@ unsigned int intel_fb_align_height(struct drm_device *dev,
 				   unsigned int height,
 				   uint32_t pixel_format,
 				   uint64_t fb_format_modifier);
-void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire,
-			enum fb_op_origin origin);
+
+void __intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
+			       enum fb_op_origin origin);
+static inline void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
+					   enum fb_op_origin origin)
+{
+	if (!obj->frontbuffer_bits || !obj->pin_display)
+		return;
+
+	__intel_fb_obj_invalidate(obj, origin);
+}
+
+void __intel_fb_obj_flush(struct drm_i915_gem_object *obj,
+			  bool retire,
+			  enum fb_op_origin origin);
+static inline void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
+				      bool retire,
+				      enum fb_op_origin origin)
+{
+	if (!obj->frontbuffer_bits || !obj->pin_display)
+		return;
+
+	__intel_fb_obj_flush(obj, retire, origin);
+}
+
 u32 intel_fb_stride_alignment(const struct drm_i915_private *dev_priv,
 			      uint64_t fb_modifier, uint32_t pixel_format);
 
diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.c b/drivers/gpu/drm/i915/intel_frontbuffer.c
index ac85357010b4..a38ccfe4894a 100644
--- a/drivers/gpu/drm/i915/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/intel_frontbuffer.c
@@ -76,24 +76,19 @@
  * until the rendering completes or a flip on this frontbuffer plane is
  * scheduled.
  */
-void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
-			     enum fb_op_origin origin)
+void __intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
+			       enum fb_op_origin origin)
 {
 	struct drm_device *dev = obj->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 
 	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 
-	if (!obj->frontbuffer_bits)
-		return;
-
 	if (origin == ORIGIN_CS) {
-		mutex_lock(&dev_priv->fb_tracking.lock);
-		dev_priv->fb_tracking.busy_bits
-			|= obj->frontbuffer_bits;
-		dev_priv->fb_tracking.flip_bits
-			&= ~obj->frontbuffer_bits;
-		mutex_unlock(&dev_priv->fb_tracking.lock);
+		spin_lock(&dev_priv->fb_tracking.lock);
+		dev_priv->fb_tracking.busy_bits |= obj->frontbuffer_bits;
+		dev_priv->fb_tracking.flip_bits &= ~obj->frontbuffer_bits;
+		spin_unlock(&dev_priv->fb_tracking.lock);
 	}
 
 	intel_psr_invalidate(dev, obj->frontbuffer_bits);
@@ -120,11 +115,11 @@ static void intel_frontbuffer_flush(struct drm_device *dev,
 	struct drm_i915_private *dev_priv = to_i915(dev);
 
 	/* Delay flushing when rings are still busy.*/
-	mutex_lock(&dev_priv->fb_tracking.lock);
+	spin_lock(&dev_priv->fb_tracking.lock);
 	frontbuffer_bits &= ~dev_priv->fb_tracking.busy_bits;
-	mutex_unlock(&dev_priv->fb_tracking.lock);
+	spin_unlock(&dev_priv->fb_tracking.lock);
 
-	if (!frontbuffer_bits)
+	if (frontbuffer_bits == 0)
 		return;
 
 	intel_edp_drrs_flush(dev, frontbuffer_bits);
@@ -142,8 +137,9 @@ static void intel_frontbuffer_flush(struct drm_device *dev,
  * completed and frontbuffer caching can be started again. If @retire is true
  * then any delayed flushes will be unblocked.
  */
-void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
-			bool retire, enum fb_op_origin origin)
+void __intel_fb_obj_flush(struct drm_i915_gem_object *obj,
+			  bool retire,
+			  enum fb_op_origin origin)
 {
 	struct drm_device *dev = obj->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
@@ -151,21 +147,18 @@ void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
 
 	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 
-	if (!obj->frontbuffer_bits)
-		return;
-
 	frontbuffer_bits = obj->frontbuffer_bits;
 
 	if (retire) {
-		mutex_lock(&dev_priv->fb_tracking.lock);
+		spin_lock(&dev_priv->fb_tracking.lock);
 		/* Filter out new bits since rendering started. */
 		frontbuffer_bits &= dev_priv->fb_tracking.busy_bits;
-
 		dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
-		mutex_unlock(&dev_priv->fb_tracking.lock);
+		spin_unlock(&dev_priv->fb_tracking.lock);
 	}
 
-	intel_frontbuffer_flush(dev, frontbuffer_bits, origin);
+	if (frontbuffer_bits)
+		intel_frontbuffer_flush(dev, frontbuffer_bits, origin);
 }
 
 /**
@@ -185,11 +178,11 @@ void intel_frontbuffer_flip_prepare(struct drm_device *dev,
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
 
-	mutex_lock(&dev_priv->fb_tracking.lock);
+	spin_lock(&dev_priv->fb_tracking.lock);
 	dev_priv->fb_tracking.flip_bits |= frontbuffer_bits;
 	/* Remove stale busy bits due to the old buffer. */
 	dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
-	mutex_unlock(&dev_priv->fb_tracking.lock);
+	spin_unlock(&dev_priv->fb_tracking.lock);
 
 	intel_psr_single_frame_update(dev, frontbuffer_bits);
 }
@@ -209,13 +202,14 @@ void intel_frontbuffer_flip_complete(struct drm_device *dev,
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
 
-	mutex_lock(&dev_priv->fb_tracking.lock);
+	spin_lock(&dev_priv->fb_tracking.lock);
 	/* Mask any cancelled flips. */
 	frontbuffer_bits &= dev_priv->fb_tracking.flip_bits;
 	dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits;
-	mutex_unlock(&dev_priv->fb_tracking.lock);
+	spin_unlock(&dev_priv->fb_tracking.lock);
 
-	intel_frontbuffer_flush(dev, frontbuffer_bits, ORIGIN_FLIP);
+	if (frontbuffer_bits)
+		intel_frontbuffer_flush(dev, frontbuffer_bits, ORIGIN_FLIP);
 }
 
 /**
@@ -234,10 +228,10 @@ void intel_frontbuffer_flip(struct drm_device *dev,
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
 
-	mutex_lock(&dev_priv->fb_tracking.lock);
+	spin_lock(&dev_priv->fb_tracking.lock);
 	/* Remove stale busy bits due to the old buffer. */
 	dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
-	mutex_unlock(&dev_priv->fb_tracking.lock);
+	spin_unlock(&dev_priv->fb_tracking.lock);
 
 	intel_frontbuffer_flush(dev, frontbuffer_bits, ORIGIN_FLIP);
 }
-- 
2.8.1



More information about the Intel-gfx mailing list