[Intel-gfx] [PATCH 4/8] drm/i915/gtt: Don't use temp bitmaps to unwind gen8_alloc_va_range
Michał Winiarski
michal.winiarski at intel.com
Mon Dec 12 11:44:13 UTC 2016
We can just operate on ranges and make use of cleanup functions
introduced with ppgtt shrinking.
Cc: Arkadiusz Hiler <arkadiusz.hiler at intel.com>
Cc: Chris Wilson <chris at chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen at linux.intel.com>
Cc: Michel Thierry <michel.thierry at intel.com>
Cc: Mika Kuoppala <mika.kuoppala at intel.com>
Signed-off-by: Michał Winiarski <michal.winiarski at intel.com>
---
drivers/gpu/drm/i915/i915_gem_gtt.c | 82 ++++++++++++++-----------------------
1 file changed, 30 insertions(+), 52 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index f760c3e..c6f0708 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -1109,6 +1109,7 @@ static int gen8_ppgtt_alloc_pagetabs(struct i915_address_space *vm,
struct drm_i915_private *dev_priv = vm->i915;
struct i915_page_table *pt;
uint32_t pde;
+ const uint64_t start_save = start;
gen8_for_each_pde(pt, pd, start, length, pde) {
/* Don't reallocate page tables */
@@ -1119,8 +1120,11 @@ static int gen8_ppgtt_alloc_pagetabs(struct i915_address_space *vm,
}
pt = alloc_pt(dev_priv);
- if (IS_ERR(pt))
- goto unwind_out;
+ if (IS_ERR(pt)) {
+ gen8_ppgtt_clear_pd(vm, pd, start_save,
+ start - start_save);
+ return PTR_ERR(pt);
+ }
gen8_initialize_pt(vm, pt);
pd->page_table[pde] = pt;
@@ -1129,12 +1133,6 @@ static int gen8_ppgtt_alloc_pagetabs(struct i915_address_space *vm,
}
return 0;
-
-unwind_out:
- for_each_set_bit(pde, new_pts, I915_PDES)
- free_pt(dev_priv, pd->page_table[pde]);
-
- return -ENOMEM;
}
/**
@@ -1171,6 +1169,7 @@ gen8_ppgtt_alloc_page_directories(struct i915_address_space *vm,
struct i915_page_directory *pd;
uint32_t pdpe;
uint32_t pdpes = I915_PDPES_PER_PDP(dev_priv);
+ const uint64_t start_save = start;
WARN_ON(!bitmap_empty(new_pds, pdpes));
@@ -1179,8 +1178,11 @@ gen8_ppgtt_alloc_page_directories(struct i915_address_space *vm,
continue;
pd = alloc_pd(dev_priv);
- if (IS_ERR(pd))
- goto unwind_out;
+ if (IS_ERR(pd)) {
+ gen8_ppgtt_clear_pdp(vm, pdp, start_save,
+ start - start_save);
+ return PTR_ERR(pd);
+ }
gen8_initialize_pd(vm, pd);
pdp->page_directory[pdpe] = pd;
@@ -1189,12 +1191,6 @@ gen8_ppgtt_alloc_page_directories(struct i915_address_space *vm,
}
return 0;
-
-unwind_out:
- for_each_set_bit(pdpe, new_pds, pdpes)
- free_pd(dev_priv, pdp->page_directory[pdpe]);
-
- return -ENOMEM;
}
/**
@@ -1223,14 +1219,18 @@ gen8_ppgtt_alloc_page_dirpointers(struct i915_address_space *vm,
struct drm_i915_private *dev_priv = vm->i915;
struct i915_page_directory_pointer *pdp;
uint32_t pml4e;
+ const uint64_t start_save = start;
WARN_ON(!bitmap_empty(new_pdps, GEN8_PML4ES_PER_PML4));
gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
if (!test_bit(pml4e, pml4->used_pml4es)) {
pdp = alloc_pdp(dev_priv);
- if (IS_ERR(pdp))
- goto unwind_out;
+ if (IS_ERR(pdp)) {
+ gen8_ppgtt_clear_pml4(vm, pml4, start_save,
+ start - start_save);
+ return PTR_ERR(pdp);
+ }
gen8_initialize_pdp(vm, pdp);
pml4->pdps[pml4e] = pdp;
@@ -1243,12 +1243,6 @@ gen8_ppgtt_alloc_page_dirpointers(struct i915_address_space *vm,
}
return 0;
-
-unwind_out:
- for_each_set_bit(pml4e, new_pdps, GEN8_PML4ES_PER_PML4)
- free_pdp(dev_priv, pml4->pdps[pml4e]);
-
- return -ENOMEM;
}
static void
@@ -1295,7 +1289,6 @@ static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm,
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
unsigned long *new_page_dirs, *new_page_tables;
- struct drm_i915_private *dev_priv = vm->i915;
struct i915_page_directory *pd;
const uint64_t start_save = start;
const uint64_t length_save = length;
@@ -1328,8 +1321,12 @@ static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm,
gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
ret = gen8_ppgtt_alloc_pagetabs(vm, pd, start, length,
new_page_tables + pdpe * BITS_TO_LONGS(I915_PDES));
- if (ret)
- goto err_out;
+ if (ret) {
+ gen8_ppgtt_clear_pdp(vm, pdp, start_save,
+ start - start_save);
+ mark_tlbs_dirty(ppgtt);
+ return ret;
+ }
}
start = start_save;
@@ -1381,23 +1378,6 @@ static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm,
free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
mark_tlbs_dirty(ppgtt);
return 0;
-
-err_out:
- while (pdpe--) {
- unsigned long temp;
-
- for_each_set_bit(temp, new_page_tables + pdpe *
- BITS_TO_LONGS(I915_PDES), I915_PDES)
- free_pt(dev_priv,
- pdp->page_directory[pdpe]->page_table[temp]);
- }
-
- for_each_set_bit(pdpe, new_page_dirs, pdpes)
- free_pd(dev_priv, pdp->page_directory[pdpe]);
-
- free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
- mark_tlbs_dirty(ppgtt);
- return ret;
}
static int gen8_alloc_va_range_4lvl(struct i915_address_space *vm,
@@ -1410,6 +1390,7 @@ static int gen8_alloc_va_range_4lvl(struct i915_address_space *vm,
struct i915_page_directory_pointer *pdp;
uint64_t pml4e;
int ret = 0;
+ const uint64_t start_save = start;
/* Do the pml4 allocations first, so we don't need to track the newly
* allocated tables below the pdp */
@@ -1431,8 +1412,11 @@ static int gen8_alloc_va_range_4lvl(struct i915_address_space *vm,
WARN_ON(!pdp);
ret = gen8_alloc_va_range_3lvl(vm, pdp, start, length);
- if (ret)
- goto err_out;
+ if (ret) {
+ gen8_ppgtt_clear_pml4(vm, pml4, start_save,
+ start - start_save);
+ return ret;
+ }
gen8_setup_page_directory_pointer(ppgtt, pml4, pdp, pml4e);
}
@@ -1441,12 +1425,6 @@ static int gen8_alloc_va_range_4lvl(struct i915_address_space *vm,
GEN8_PML4ES_PER_PML4);
return 0;
-
-err_out:
- for_each_set_bit(pml4e, new_pdps, GEN8_PML4ES_PER_PML4)
- gen8_ppgtt_cleanup_3lvl(vm->i915, pml4->pdps[pml4e]);
-
- return ret;
}
static int gen8_alloc_va_range(struct i915_address_space *vm,
--
2.7.4
More information about the Intel-gfx
mailing list