[PATCH 5/6] drm/i915/gtt: Generalize alloc_pd

Mika Kuoppala mika.kuoppala at linux.intel.com
Tue Jun 4 15:05:00 UTC 2019


Allocate all page directory variants with alloc_pd

v2: zero used

Signed-off-by: Mika Kuoppala <mika.kuoppala at linux.intel.com>
---
 drivers/gpu/drm/i915/i915_gem_gtt.c | 98 ++++++++++++-----------------
 1 file changed, 41 insertions(+), 57 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index cd5d1a2e47e9..a839b1dbd2dd 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -706,10 +706,17 @@ static struct i915_page_directory *alloc_pd(struct i915_address_space *vm)
 	return pd;
 }
 
+static inline bool is_full_pdp(const struct i915_page_directory * const pdp)
+{
+	return pdp->base.page;
+}
+
 static void free_pd(struct i915_address_space *vm,
 		    struct i915_page_directory *pd)
 {
-	cleanup_px(vm, pd);
+	if (likely(is_full_pdp(pd)))
+		cleanup_px(vm, pd);
+
 	kfree(pd);
 }
 
@@ -721,38 +728,12 @@ static void init_pd_with_page(struct i915_address_space *vm,
 	memset_p((void **)pd->entry, pt, 512);
 }
 
-static struct i915_page_directory *
-alloc_pdp(struct i915_address_space *vm)
-{
-	struct i915_page_directory *pdp;
-
-	pdp = __alloc_pd(i915_pdpes_per_pdp(vm));
-	if (!pdp)
-		return ERR_PTR(-ENOMEM);
-
-	if (i915_vm_is_4lvl(vm)) {
-		if (unlikely(setup_px(vm, pdp))) {
-			kfree(pdp);
-			return ERR_PTR(-ENOMEM);
-		}
-	}
-
-	return pdp;
-}
-
-static void free_pdp(struct i915_address_space *vm,
-		     struct i915_page_directory *pdp)
-{
-	if (i915_vm_is_4lvl(vm))
-		cleanup_px(vm, pdp);
-
-	kfree(pdp);
-}
-
 static void init_pd(struct i915_address_space *vm,
 		    struct i915_page_directory * const pd,
 		    struct i915_page_directory * const to)
 {
+	GEM_DEBUG_BUG_ON(!is_full_pdp(pd));
+
 	fill_px(vm, pd, gen8_pdpe_encode(px_dma(to), I915_CACHE_LLC));
 	memset_p((void **)pd->entry, to, 512);
 }
@@ -828,15 +809,14 @@ static bool gen8_ppgtt_clear_pd(struct i915_address_space *vm,
 	return !pd->used;
 }
 
-static void gen8_ppgtt_set_pdpe(struct i915_address_space *vm,
-				struct i915_page_directory *pdp,
+static void gen8_ppgtt_set_pdpe(struct i915_page_directory *pdp,
 				struct i915_page_directory *pd,
 				unsigned int pdpe)
 {
 	gen8_ppgtt_pdpe_t *vaddr;
 
 	pdp->entry[pdpe] = pd;
-	if (!i915_vm_is_4lvl(vm))
+	if (!is_full_pdp(pdp))
 		return;
 
 	vaddr = kmap_atomic_px(pdp);
@@ -860,7 +840,7 @@ static bool gen8_ppgtt_clear_pdp(struct i915_address_space *vm,
 		if (!gen8_ppgtt_clear_pd(vm, pd, start, length))
 			continue;
 
-		gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe);
+		gen8_ppgtt_set_pdpe(pdp, vm->scratch_pd, pdpe);
 		GEM_BUG_ON(!pdp->used);
 		pdp->used--;
 
@@ -911,7 +891,7 @@ static void gen8_ppgtt_clear_4lvl(struct i915_address_space *vm,
 
 		gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e);
 
-		free_pdp(vm, pdp);
+		free_pd(vm, pdp);
 	}
 }
 
@@ -1213,7 +1193,7 @@ static int gen8_init_scratch(struct i915_address_space *vm)
 	}
 
 	if (i915_vm_is_4lvl(vm)) {
-		vm->scratch_pdp = alloc_pdp(vm);
+		vm->scratch_pdp = alloc_pd(vm);
 		if (IS_ERR(vm->scratch_pdp)) {
 			ret = PTR_ERR(vm->scratch_pdp);
 			goto free_pd;
@@ -1275,7 +1255,7 @@ static void gen8_free_scratch(struct i915_address_space *vm)
 		return;
 
 	if (i915_vm_is_4lvl(vm))
-		free_pdp(vm, vm->scratch_pdp);
+		free_pd(vm, vm->scratch_pdp);
 	free_pd(vm, vm->scratch_pd);
 	free_pt(vm, vm->scratch_pt);
 	cleanup_scratch_page(vm);
@@ -1295,7 +1275,7 @@ static void gen8_ppgtt_cleanup_3lvl(struct i915_address_space *vm,
 		free_pd(vm, pdp->entry[i]);
 	}
 
-	free_pdp(vm, pdp);
+	free_pd(vm, pdp);
 }
 
 static void gen8_ppgtt_cleanup_4lvl(struct i915_hw_ppgtt *ppgtt)
@@ -1386,7 +1366,7 @@ static int gen8_ppgtt_alloc_pdp(struct i915_address_space *vm,
 			}
 
 			init_pd_with_page(vm, pd, vm->scratch_pt);
-			gen8_ppgtt_set_pdpe(vm, pdp, pd, pdpe);
+			gen8_ppgtt_set_pdpe(pdp, pd, pdpe);
 			GEM_BUG_ON(pdp->used > i915_pdpes_per_pdp(vm));
 		}
 
@@ -1399,7 +1379,7 @@ static int gen8_ppgtt_alloc_pdp(struct i915_address_space *vm,
 
 unwind_pd:
 	if (!pd->used) {
-		gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe);
+		gen8_ppgtt_set_pdpe(pdp, vm->scratch_pd, pdpe);
 		GEM_BUG_ON(!pdp->used);
 		pdp->used--;
 		free_pd(vm, pd);
@@ -1428,7 +1408,7 @@ static int gen8_ppgtt_alloc_4lvl(struct i915_address_space *vm,
 
 	gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
 		if (i915_pdp_entry(pml4, pml4e) == vm->scratch_pdp) {
-			pdp = alloc_pdp(vm);
+			pdp = alloc_pd(vm);
 			if (IS_ERR(pdp))
 				goto unwind;
 
@@ -1446,7 +1426,7 @@ static int gen8_ppgtt_alloc_4lvl(struct i915_address_space *vm,
 unwind_pdp:
 	if (!pdp->used) {
 		gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e);
-		free_pdp(vm, pdp);
+		free_pd(vm, pdp);
 	}
 unwind:
 	gen8_ppgtt_clear_4lvl(vm, from, start - from);
@@ -1468,7 +1448,7 @@ static int gen8_preallocate_top_level_pdp(struct i915_hw_ppgtt *ppgtt)
 			goto unwind;
 
 		init_pd_with_page(vm, pd, vm->scratch_pt);
-		gen8_ppgtt_set_pdpe(vm, pdp, pd, pdpe);
+		gen8_ppgtt_set_pdpe(pdp, pd, pdpe);
 		pdp->used++;
 	}
 
@@ -1478,7 +1458,7 @@ static int gen8_preallocate_top_level_pdp(struct i915_hw_ppgtt *ppgtt)
 unwind:
 	start -= from;
 	gen8_for_each_pdpe(pd, pdp, from, start, pdpe) {
-		gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe);
+		gen8_ppgtt_set_pdpe(pdp, vm->scratch_pd, pdpe);
 		free_pd(vm, pd);
 	}
 	pdp->used = 0;
@@ -1512,7 +1492,7 @@ static void ppgtt_init(struct drm_i915_private *i915,
 static struct i915_hw_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
 {
 	struct i915_hw_ppgtt *ppgtt;
-	int err;
+	int ret;
 
 	ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
 	if (!ppgtt)
@@ -1534,17 +1514,21 @@ static struct i915_hw_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
 	if (IS_CHERRYVIEW(i915) || IS_BROXTON(i915))
 		ppgtt->vm.pt_kmap_wc = true;
 
-	err = gen8_init_scratch(&ppgtt->vm);
-	if (err)
+	ret = gen8_init_scratch(&ppgtt->vm);
+	if (ret)
 		goto err_free;
 
-	ppgtt->pd = alloc_pdp(&ppgtt->vm);
-	if (IS_ERR(ppgtt->pd)) {
-		err = PTR_ERR(ppgtt->pd);
-		goto err_scratch;
+	ppgtt->pd = __alloc_pd(i915_pdpes_per_pdp(&ppgtt->vm));
+	if (!ppgtt->pd) {
+		ret = -ENOMEM;
+		goto err_free_scratch;
 	}
 
 	if (i915_vm_is_4lvl(&ppgtt->vm)) {
+		ret = setup_px(&ppgtt->vm, ppgtt->pd);
+		if (ret)
+			goto err_free_pdp;
+
 		init_pd(&ppgtt->vm, ppgtt->pd, ppgtt->vm.scratch_pdp);
 
 		ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc_4lvl;
@@ -1559,9 +1543,9 @@ static struct i915_hw_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
 			 &ppgtt->vm.scratch_pd, GEN8_3LVL_PDPES);
 
 		if (intel_vgpu_active(i915)) {
-			err = gen8_preallocate_top_level_pdp(ppgtt);
-			if (err)
-				goto err_pdp;
+			ret = gen8_preallocate_top_level_pdp(ppgtt);
+			if (ret)
+				goto err_free_pdp;
 		}
 
 		ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc_3lvl;
@@ -1576,13 +1560,13 @@ static struct i915_hw_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
 
 	return ppgtt;
 
-err_pdp:
-	free_pdp(&ppgtt->vm, ppgtt->pd);
-err_scratch:
+err_free_pdp:
+	free_pd(&ppgtt->vm, ppgtt->pd);
+err_free_scratch:
 	gen8_free_scratch(&ppgtt->vm);
 err_free:
 	kfree(ppgtt);
-	return ERR_PTR(err);
+	return ERR_PTR(ret);
 }
 
 /* Write pde (index) from the page directory @pd to the page table @pt */
-- 
2.17.1



More information about the Intel-gfx-trybot mailing list