[Intel-gfx] [PATCH 4/4] drm/i915/gtt: Per ppgtt scratch page

Mika Kuoppala mika.kuoppala at linux.intel.com
Tue Jun 30 08:16:40 PDT 2015


Previously we have pointed the page where the individual ppgtt
scratch structures refer to, to be the instance which GGTT setup have
allocated. So it has been shared.

To achive full isolation between ppgtts also in this regard,
allocate per ppgtt scratch page.

Cc: Michel Thierry <michel.thierry at intel.com>
Cc: Daniel Vetter <daniel.vetter at ffwll.ch>
Signed-off-by: Mika Kuoppala <mika.kuoppala at intel.com>
---
 drivers/gpu/drm/i915/i915_gem_gtt.c | 94 +++++++++++++++++++++++++++++--------
 1 file changed, 74 insertions(+), 20 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 402d6d3..b1a8fc4 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -682,6 +682,42 @@ static void gen8_free_page_tables(struct drm_device *dev,
 	}
 }
 
+static int gen8_init_scratch(struct i915_address_space *vm)
+{
+	struct drm_device *dev = vm->dev;
+
+	vm->scratch_page = alloc_scratch_page(dev);
+	if (IS_ERR(vm->scratch_page))
+		return PTR_ERR(vm->scratch_page);
+
+	vm->scratch_pt = alloc_pt(dev);
+	if (IS_ERR(vm->scratch_pt)) {
+		free_scratch_page(dev, vm->scratch_page);
+		return PTR_ERR(vm->scratch_pt);
+	}
+
+	vm->scratch_pd = alloc_pd(dev);
+	if (IS_ERR(vm->scratch_pd)) {
+		free_pt(dev, vm->scratch_pt);
+		free_scratch_page(dev, vm->scratch_page);
+		return PTR_ERR(vm->scratch_pd);
+	}
+
+	gen8_initialize_pt(vm, vm->scratch_pt);
+	gen8_initialize_pd(vm, vm->scratch_pd);
+
+	return 0;
+}
+
+static void gen8_free_scratch(struct i915_address_space *vm)
+{
+	struct drm_device *dev = vm->dev;
+
+	free_pd(dev, vm->scratch_pd);
+	free_pt(dev, vm->scratch_pt);
+	free_scratch_page(dev, vm->scratch_page);
+}
+
 static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
 {
 	struct i915_hw_ppgtt *ppgtt =
@@ -697,8 +733,7 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
 		free_pd(ppgtt->base.dev, ppgtt->pdp.page_directory[i]);
 	}
 
-	free_pd(vm->dev, vm->scratch_pd);
-	free_pt(vm->dev, vm->scratch_pt);
+	gen8_free_scratch(vm);
 }
 
 /**
@@ -985,16 +1020,11 @@ err_out:
  */
 static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
 {
-	ppgtt->base.scratch_pt = alloc_pt(ppgtt->base.dev);
-	if (IS_ERR(ppgtt->base.scratch_pt))
-		return PTR_ERR(ppgtt->base.scratch_pt);
-
-	ppgtt->base.scratch_pd = alloc_pd(ppgtt->base.dev);
-	if (IS_ERR(ppgtt->base.scratch_pd))
-		return PTR_ERR(ppgtt->base.scratch_pd);
+	int ret;
 
-	gen8_initialize_pt(&ppgtt->base, ppgtt->base.scratch_pt);
-	gen8_initialize_pd(&ppgtt->base, ppgtt->base.scratch_pd);
+	ret = gen8_init_scratch(&ppgtt->base);
+	if (ret)
+		return ret;
 
 	ppgtt->base.start = 0;
 	ppgtt->base.total = 1ULL << 32;
@@ -1410,6 +1440,33 @@ unwind_out:
 	return ret;
 }
 
+static int gen6_init_scratch(struct i915_address_space *vm)
+{
+	struct drm_device *dev = vm->dev;
+
+	vm->scratch_page = alloc_scratch_page(dev);
+	if (IS_ERR(vm->scratch_page))
+		return PTR_ERR(vm->scratch_page);
+
+	vm->scratch_pt = alloc_pt(dev);
+	if (IS_ERR(vm->scratch_pt)) {
+		free_scratch_page(dev, vm->scratch_page);
+		return PTR_ERR(vm->scratch_pt);
+	}
+
+	gen6_initialize_pt(vm, vm->scratch_pt);
+
+	return 0;
+}
+
+static void gen6_free_scratch(struct i915_address_space *vm)
+{
+	struct drm_device *dev = vm->dev;
+
+	free_pt(dev, vm->scratch_pt);
+	free_scratch_page(dev, vm->scratch_page);
+}
+
 static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
 {
 	struct i915_hw_ppgtt *ppgtt =
@@ -1424,11 +1481,12 @@ static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
 			free_pt(ppgtt->base.dev, pt);
 	}
 
-	free_pt(vm->dev, vm->scratch_pt);
+	gen6_free_scratch(vm);
 }
 
 static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
 {
+	struct i915_address_space *vm = &ppgtt->base;
 	struct drm_device *dev = ppgtt->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	bool retried = false;
@@ -1439,11 +1497,10 @@ static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
 	 * size. We allocate at the top of the GTT to avoid fragmentation.
 	 */
 	BUG_ON(!drm_mm_initialized(&dev_priv->gtt.base.mm));
-	ppgtt->base.scratch_pt = alloc_pt(ppgtt->base.dev);
-	if (IS_ERR(ppgtt->base.scratch_pt))
-		return PTR_ERR(ppgtt->base.scratch_pt);
 
-	gen6_initialize_pt(&ppgtt->base, ppgtt->base.scratch_pt);
+	ret = gen6_init_scratch(vm);
+	if (ret)
+		return ret;
 
 alloc:
 	ret = drm_mm_insert_node_in_range_generic(&dev_priv->gtt.base.mm,
@@ -1474,7 +1531,7 @@ alloc:
 	return 0;
 
 err_out:
-	free_pt(ppgtt->base.dev, ppgtt->base.scratch_pt);
+	gen6_free_scratch(vm);
 	return ret;
 }
 
@@ -1548,10 +1605,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
 
 static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
 	ppgtt->base.dev = dev;
-	ppgtt->base.scratch_page = dev_priv->gtt.base.scratch_page;
 
 	if (INTEL_INFO(dev)->gen < 8)
 		return gen6_ppgtt_init(ppgtt);
-- 
2.1.4



More information about the Intel-gfx mailing list