[PATCH 6/6] drm/i915/gtt: pde entry encoding is identical
Mika Kuoppala
mika.kuoppala at linux.intel.com
Mon Jun 3 12:04:10 UTC 2019
For all page directory entries, the pde encoding is
identical. Don't compilicate call sites with different
versions of doing the same thing.
Signed-off-by: Mika Kuoppala <mika.kuoppala at linux.intel.com>
---
drivers/gpu/drm/i915/i915_gem_gtt.c | 106 +++++++++-------------------
drivers/gpu/drm/i915/i915_gem_gtt.h | 3 -
2 files changed, 33 insertions(+), 76 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 38fa0772cff4..5af824ca9df5 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -210,10 +210,10 @@ static u64 gen8_pte_encode(dma_addr_t addr,
return pte;
}
-static gen8_pde_t gen8_pde_encode(const dma_addr_t addr,
- const enum i915_cache_level level)
+static u64 gen8_pde_encode(const dma_addr_t addr,
+ const enum i915_cache_level level)
{
- gen8_pde_t pde = _PAGE_PRESENT | _PAGE_RW;
+ u64 pde = _PAGE_PRESENT | _PAGE_RW;
pde |= addr;
if (level != I915_CACHE_NONE)
pde |= PPAT_CACHED_PDE;
@@ -222,9 +222,6 @@ static gen8_pde_t gen8_pde_encode(const dma_addr_t addr,
return pde;
}
-#define gen8_pdpe_encode gen8_pde_encode
-#define gen8_pml4e_encode gen8_pde_encode
-
static u64 snb_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
u32 flags)
@@ -703,22 +700,28 @@ static void free_pd(struct i915_address_space *vm,
kfree(pd);
}
-static void init_pd_with_page(struct i915_address_space *vm,
- struct i915_page_directory * const pd,
- struct i915_page_table *pt)
-{
- fill_px(vm, pd, gen8_pde_encode(px_dma(pt), I915_CACHE_LLC));
- memset_p((void **)pd->entry, pt, 512);
+#define init_pd(vm, pd, to) { \
+ fill_px((vm), (pd), gen8_pde_encode(px_dma(to), I915_CACHE_LLC)); \
+ memset_p((void **)(pd)->entry, (to), 512); \
}
-static void init_pd(struct i915_address_space *vm,
- struct i915_page_directory * const pd,
- struct i915_page_directory * const to)
+static void __set_pd_entry(struct i915_page_directory * const pd,
+ const unsigned short pde,
+ void * const to,
+ const u64 encoded_entry)
{
- fill_px(vm, pd, gen8_pdpe_encode(px_dma(to), I915_CACHE_LLC));
- memset_p((void **)pd->entry, to, 512);
+ u64 *vaddr;
+
+ vaddr = kmap_atomic(pd->base.page);
+ vaddr[pde] = encoded_entry;
+ kunmap_atomic(vaddr);
+
+ pd->entry[pde] = to;
}
+#define set_pd_entry(pd, pde, to) \
+ __set_pd_entry((pd), (pde), (to), gen8_pde_encode(px_dma(to), I915_CACHE_LLC));
+
/*
* PDE TLBs are a pain to invalidate on GEN8+. When we modify
* the page table structures, we mark them dirty so that
@@ -753,20 +756,6 @@ static bool gen8_ppgtt_clear_pt(const struct i915_address_space *vm,
return false;
}
-static void gen8_ppgtt_set_pde(struct i915_address_space *vm,
- struct i915_page_directory *pd,
- struct i915_page_table *pt,
- unsigned int pde)
-{
- gen8_pde_t *vaddr;
-
- pd->entry[pde] = pt;
-
- vaddr = kmap_atomic_px(pd);
- vaddr[pde] = gen8_pde_encode(px_dma(pt), I915_CACHE_LLC);
- kunmap_atomic(vaddr);
-}
-
static bool gen8_ppgtt_clear_pd(struct i915_address_space *vm,
struct i915_page_directory *pd,
u64 start, u64 length)
@@ -780,7 +769,7 @@ static bool gen8_ppgtt_clear_pd(struct i915_address_space *vm,
if (!gen8_ppgtt_clear_pt(vm, pt, start, length))
continue;
- gen8_ppgtt_set_pde(vm, pd, vm->scratch_pt, pde);
+ set_pd_entry(pd, pde, vm->scratch_pt);
GEM_BUG_ON(!pd->used);
pd->used--;
@@ -790,22 +779,6 @@ static bool gen8_ppgtt_clear_pd(struct i915_address_space *vm,
return !pd->used;
}
-static void gen8_ppgtt_set_pdpe(struct i915_address_space *vm,
- struct i915_page_directory *pdp,
- struct i915_page_directory *pd,
- unsigned int pdpe)
-{
- gen8_ppgtt_pdpe_t *vaddr;
-
- pdp->entry[pdpe] = pd;
- if (!i915_vm_is_4lvl(vm))
- return;
-
- vaddr = kmap_atomic_px(pdp);
- vaddr[pdpe] = gen8_pdpe_encode(px_dma(pd), I915_CACHE_LLC);
- kunmap_atomic(vaddr);
-}
-
/* Removes entries from a single page dir pointer, releasing it if it's empty.
* Caller can use the return value to update higher-level entries
*/
@@ -822,7 +795,7 @@ static bool gen8_ppgtt_clear_pdp(struct i915_address_space *vm,
if (!gen8_ppgtt_clear_pd(vm, pd, start, length))
continue;
- gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe);
+ set_pd_entry(pdp, pdpe, vm->scratch_pd);
GEM_BUG_ON(!pdp->used);
pdp->used--;
@@ -838,19 +811,6 @@ static void gen8_ppgtt_clear_3lvl(struct i915_address_space *vm,
gen8_ppgtt_clear_pdp(vm, &i915_vm_to_ppgtt(vm)->pd, start, length);
}
-static void gen8_ppgtt_set_pml4e(struct i915_page_directory *pml4,
- struct i915_page_directory *pdp,
- unsigned int pml4e)
-{
- gen8_ppgtt_pml4e_t *vaddr;
-
- pml4->entry[pml4e] = pdp;
-
- vaddr = kmap_atomic_px(pml4);
- vaddr[pml4e] = gen8_pml4e_encode(px_dma(pdp), I915_CACHE_LLC);
- kunmap_atomic(vaddr);
-}
-
/* Removes entries from a single pml4.
* This is the top-level structure in 4-level page tables used on gen8+.
* Empty entries are always scratch pml4e.
@@ -871,7 +831,7 @@ static void gen8_ppgtt_clear_4lvl(struct i915_address_space *vm,
if (!gen8_ppgtt_clear_pdp(vm, pdp, start, length))
continue;
- gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e);
+ set_pd_entry(pml4, pml4e, vm->scratch_pdp);
free_pd(vm, pdp);
}
@@ -1183,7 +1143,7 @@ static int gen8_init_scratch(struct i915_address_space *vm)
}
gen8_initialize_pt(vm, vm->scratch_pt);
- init_pd_with_page(vm, vm->scratch_pd, vm->scratch_pt);
+ init_pd(vm, vm->scratch_pd, vm->scratch_pt);
if (i915_vm_is_4lvl(vm))
init_pd(vm, vm->scratch_pdp, vm->scratch_pd);
@@ -1315,7 +1275,7 @@ static int gen8_ppgtt_alloc_pd(struct i915_address_space *vm,
if (count < GEN8_PTES || intel_vgpu_active(vm->i915))
gen8_initialize_pt(vm, pt);
- gen8_ppgtt_set_pde(vm, pd, pt, pde);
+ set_pd_entry(pd, pde, pt);
GEM_BUG_ON(pd->used > I915_PDES);
}
@@ -1347,8 +1307,8 @@ static int gen8_ppgtt_alloc_pdp(struct i915_address_space *vm,
goto unwind;
}
- init_pd_with_page(vm, pd, vm->scratch_pt);
- gen8_ppgtt_set_pdpe(vm, pdp, pd, pdpe);
+ init_pd(vm, pd, vm->scratch_pt);
+ set_pd_entry(pdp, pdpe, pd);
GEM_BUG_ON(pdp->used > i915_pdpes_per_pdp(vm));
}
@@ -1361,7 +1321,7 @@ static int gen8_ppgtt_alloc_pdp(struct i915_address_space *vm,
unwind_pd:
if (!pd->used) {
- gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe);
+ set_pd_entry(pdp, pdpe, vm->scratch_pd);
GEM_BUG_ON(!pdp->used);
pdp->used--;
free_pd(vm, pd);
@@ -1395,7 +1355,7 @@ static int gen8_ppgtt_alloc_4lvl(struct i915_address_space *vm,
goto unwind;
init_pd(vm, pdp, vm->scratch_pd);
- gen8_ppgtt_set_pml4e(pml4, pdp, pml4e);
+ set_pd_entry(pml4, pml4e, pdp);
}
ret = gen8_ppgtt_alloc_pdp(vm, pdp, start, length);
@@ -1407,7 +1367,7 @@ static int gen8_ppgtt_alloc_4lvl(struct i915_address_space *vm,
unwind_pdp:
if (!pdp->used) {
- gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e);
+ set_pd_entry(pml4, pml4e, vm->scratch_pdp);
free_pd(vm, pdp);
}
unwind:
@@ -1429,8 +1389,8 @@ static int gen8_preallocate_top_level_pdp(struct i915_hw_ppgtt *ppgtt)
if (IS_ERR(pd))
goto unwind;
- init_pd_with_page(vm, pd, vm->scratch_pt);
- gen8_ppgtt_set_pdpe(vm, pdp, pd, pdpe);
+ init_pd(vm, pd, vm->scratch_pt);
+ set_pd_entry(pdp, pdpe, pd);
pdp->used++;
}
@@ -1440,7 +1400,7 @@ static int gen8_preallocate_top_level_pdp(struct i915_hw_ppgtt *ppgtt)
unwind:
start -= from;
gen8_for_each_pdpe(pd, pdp, from, start, pdpe) {
- gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe);
+ set_pd_entry(pdp, pdpe, vm->scratch_pd);
free_pd(vm, pd);
}
pdp->used = 0;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 59fccfeadae6..cac89d684b2a 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -67,9 +67,6 @@ struct i915_vma;
typedef u32 gen6_pte_t;
typedef u64 gen8_pte_t;
-typedef u64 gen8_pde_t;
-typedef u64 gen8_ppgtt_pdpe_t;
-typedef u64 gen8_ppgtt_pml4e_t;
#define ggtt_total_entries(ggtt) ((ggtt)->vm.total >> PAGE_SHIFT)
--
2.17.1
More information about the Intel-gfx-trybot
mailing list