[Intel-gfx] [PATCH 08/16] drm/i915: avoid the last 8mb of stolen on BDW/SKL

Paulo Zanoni paulo.r.zanoni at intel.com
Fri Aug 14 14:34:13 PDT 2015


The FBC hardware for these platforms doesn't have access to the
bios_reserved range, so it always assumes the maximum (8mb) is used.
So avoid this range while allocating.

This solves a bunch of FIFO underruns that happen if you end up
putting the CFB in that memory range. On my machine, with 32mb of
stolen, I need a 2560x1440 mode for that.

Testcase: igt/kms_frontbuffer_tracking/fbc-* (given the right setup)
Signed-off-by: Paulo Zanoni <paulo.r.zanoni at intel.com>
---
 drivers/gpu/drm/i915/i915_drv.h        |  4 ++++
 drivers/gpu/drm/i915/i915_gem_gtt.h    |  1 +
 drivers/gpu/drm/i915/i915_gem_stolen.c | 26 +++++++++++++++++++-------
 drivers/gpu/drm/i915/intel_fbc.c       | 16 ++++++++++++++--
 4 files changed, 38 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index e74a844..f44d101 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3159,6 +3159,10 @@ static inline void i915_gem_chipset_flush(struct drm_device *dev)
 int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
 				struct drm_mm_node *node, u64 size,
 				unsigned alignment);
+int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
+					 struct drm_mm_node *node, u64 size,
+					 unsigned alignment, u64 start,
+					 u64 end);
 void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
 				 struct drm_mm_node *node);
 int i915_gem_init_stolen(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 8275007..96ebb98 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -341,6 +341,7 @@ struct i915_gtt {
 	struct i915_address_space base;
 
 	size_t stolen_size;		/* Total size of stolen memory */
+	size_t stolen_usable_size;	/* Total size minus BIOS reserved */
 	u64 mappable_end;		/* End offset that we can CPU map */
 	struct io_mapping *mappable;	/* Mapping to our CPU mappable region */
 	phys_addr_t mappable_base;	/* PA of our GMADR */
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index a36cb95..824334d 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -42,9 +42,9 @@
  * for is a boon.
  */
 
-int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
-				struct drm_mm_node *node, u64 size,
-				unsigned alignment)
+int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
+					 struct drm_mm_node *node, u64 size,
+					 unsigned alignment, u64 start, u64 end)
 {
 	int ret;
 
@@ -52,13 +52,23 @@ int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
 		return -ENODEV;
 
 	mutex_lock(&dev_priv->mm.stolen_lock);
-	ret = drm_mm_insert_node(&dev_priv->mm.stolen, node, size, alignment,
-				 DRM_MM_SEARCH_DEFAULT);
+	ret = drm_mm_insert_node_in_range(&dev_priv->mm.stolen, node, size,
+					  alignment, start, end,
+					  DRM_MM_SEARCH_DEFAULT);
 	mutex_unlock(&dev_priv->mm.stolen_lock);
 
 	return ret;
 }
 
+int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
+				struct drm_mm_node *node, u64 size,
+				unsigned alignment)
+{
+	return i915_gem_stolen_insert_node_in_range(dev_priv, node, size,
+					alignment, 0,
+					dev_priv->gtt.stolen_usable_size);
+}
+
 void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
 				 struct drm_mm_node *node)
 {
@@ -352,9 +362,11 @@ int i915_gem_init_stolen(struct drm_device *dev)
 		      dev_priv->gtt.stolen_size >> 10,
 		      (dev_priv->gtt.stolen_size - reserved_total) >> 10);
 
+	dev_priv->gtt.stolen_usable_size = dev_priv->gtt.stolen_size -
+					   reserved_total;
+
 	/* Basic memrange allocator for stolen space */
-	drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_size -
-		    reserved_total);
+	drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_usable_size);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index 28e569c..9fd7b74 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -570,6 +570,16 @@ static int find_compression_threshold(struct drm_i915_private *dev_priv,
 {
 	int compression_threshold = 1;
 	int ret;
+	u64 end;
+
+	/* The FBC hardware for BDW/SKL doesn't have access to the stolen
+	 * reserved range size, so it always assumes the maximum (8mb) is used.
+	 * If we enable FBC using a CFB on that memory range we'll get FIFO
+	 * underruns, even if that range is not reserved by the BIOS. */
+	if (IS_BROADWELL(dev_priv) || IS_SKYLAKE(dev_priv))
+		end = dev_priv->gtt.stolen_size - 8 * 1024 * 1024;
+	else
+		end = dev_priv->gtt.stolen_usable_size;
 
 	/* HACK: This code depends on what we will do in *_enable_fbc. If that
 	 * code changes, this code needs to change as well.
@@ -579,7 +589,8 @@ static int find_compression_threshold(struct drm_i915_private *dev_priv,
 	 */
 
 	/* Try to over-allocate to reduce reallocations and fragmentation. */
-	ret = i915_gem_stolen_insert_node(dev_priv, node, size <<= 1, 4096);
+	ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size <<= 1,
+						   4096, 0, end);
 	if (ret == 0)
 		return compression_threshold;
 
@@ -589,7 +600,8 @@ again:
 	    (fb_cpp == 2 && compression_threshold == 2))
 		return 0;
 
-	ret = i915_gem_stolen_insert_node(dev_priv, node, size >>= 1, 4096);
+	ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size >>= 1,
+						   4096, 0, end);
 	if (ret && INTEL_INFO(dev_priv)->gen <= 4) {
 		return 0;
 	} else if (ret) {
-- 
2.4.6



More information about the Intel-gfx mailing list