[Intel-gfx] [PATCH 20/20] drm/i915/gtt: One instance of scratch page table/directory
Mika Kuoppala
mika.kuoppala at linux.intel.com
Thu May 21 07:37:48 PDT 2015
As we use one scratch page for all ppgtt instances, we can
use one scratch page table and scratch directory across
all ppgtt instances, saving 2 pages + structs per ppgtt.
Signed-off-by: Mika Kuoppala <mika.kuoppala at intel.com>
---
drivers/gpu/drm/i915/i915_gem_gtt.c | 288 +++++++++++++++++++++++-------------
1 file changed, 184 insertions(+), 104 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 6910996..6706081 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -349,7 +349,10 @@ static void kunmap_page_dma(struct drm_device *dev, void *vaddr, bool dirty)
}
#define kmap_px(px) kmap_page_dma(px_base(px))
-#define kunmap_px(ppgtt, vaddr) kunmap_page_dma((ppgtt)->base.dev, (vaddr), true);
+#define kunmap_px(ppgtt, vaddr) \
+ kunmap_page_dma((ppgtt)->base.dev, (vaddr), true)
+#define kunmap_readonly_px(ppgtt, vaddr) \
+ kunmap_page_dma((ppgtt)->base.dev, (vaddr), false)
#define setup_px(dev, px) setup_page_dma((dev), px_base(px))
#define cleanup_px(dev, px) cleanup_page_dma((dev), px_base(px))
@@ -403,16 +406,6 @@ static void free_pt(struct drm_device *dev, struct i915_page_table *pt)
kfree(pt);
}
-static void gen8_setup_scratch_pt(struct i915_address_space *vm)
-{
- gen8_pte_t scratch_pte;
-
- scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page),
- I915_CACHE_LLC, true);
-
- fill_px(vm->dev, vm->scratch_pt, scratch_pte);
-}
-
static struct i915_page_table *alloc_pt(struct drm_device *dev)
{
struct i915_page_table *pt;
@@ -481,6 +474,175 @@ free_pd:
return ERR_PTR(ret);
}
+static int alloc_scratch_page(struct i915_address_space *vm)
+{
+ struct i915_page_scratch *sp;
+ int ret;
+
+ WARN_ON(vm->scratch_page);
+
+ sp = kzalloc(sizeof(*sp), GFP_KERNEL);
+ if (sp == NULL)
+ return -ENOMEM;
+
+ ret = __setup_page_dma(vm->dev, px_base(sp), GFP_DMA32);
+ if (ret) {
+ kfree(sp);
+ return ret;
+ }
+
+ fill_px(vm->dev, sp, ~0ULL);
+ set_pages_uc(px_page(sp), 1);
+
+ vm->scratch_page = sp;
+
+ return 0;
+}
+
+static void free_scratch_page(struct i915_address_space *vm)
+{
+ struct i915_page_scratch *sp = vm->scratch_page;
+
+ set_pages_wb(px_page(sp), 1);
+
+ cleanup_px(vm->dev, sp);
+ kfree(sp);
+
+ vm->scratch_page = NULL;
+}
+
+static void gen6_setup_scratch_pt(struct i915_address_space *vm)
+{
+ gen6_pte_t scratch_pte;
+
+ WARN_ON(px_dma(vm->scratch_page) == 0);
+
+ scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
+ I915_CACHE_LLC, true, 0);
+
+ fill32_px(vm->dev, vm->scratch_pt, scratch_pte);
+}
+
+static void gen8_setup_scratch_pt(struct i915_address_space *vm)
+{
+ gen8_pte_t scratch_pte;
+
+ WARN_ON(px_dma(vm->scratch_page) == 0);
+
+ scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page),
+ I915_CACHE_LLC, true);
+
+ fill_px(vm->dev, vm->scratch_pt, scratch_pte);
+}
+
+static void gen8_setup_scratch_pd(struct i915_address_space *vm)
+{
+ gen8_pde_t scratch_pde;
+
+ WARN_ON(px_dma(vm->scratch_pt) == 0);
+
+ scratch_pde = gen8_pde_encode(px_dma(vm->scratch_pt), I915_CACHE_LLC);
+
+ fill_px(vm->dev, vm->scratch_pd, scratch_pde);
+}
+
+static int setup_scratch_ggtt(struct i915_address_space *vm)
+{
+ int ret;
+
+ ret = alloc_scratch_page(vm);
+ if (ret)
+ return ret;
+
+ WARN_ON(vm->scratch_pt);
+
+ if (INTEL_INFO(vm->dev)->gen < 6)
+ return 0;
+
+ vm->scratch_pt = alloc_pt(vm->dev);
+ if (IS_ERR(vm->scratch_pt))
+ return PTR_ERR(vm->scratch_pt);
+
+ if (INTEL_INFO(vm->dev)->gen >= 8) {
+ gen8_setup_scratch_pt(vm);
+
+ WARN_ON(vm->scratch_pd);
+
+ vm->scratch_pd = alloc_pd(vm->dev);
+ if (IS_ERR(vm->scratch_pd)) {
+ ret = PTR_ERR(vm->scratch_pd);
+ goto err_pd;
+ }
+
+ gen8_setup_scratch_pd(vm);
+ } else {
+ gen6_setup_scratch_pt(vm);
+ }
+
+ return 0;
+
+err_pd:
+ free_pt(vm->dev, vm->scratch_pt);
+ return ret;
+}
+
+static int setup_scratch(struct i915_address_space *vm)
+{
+ struct i915_address_space *ggtt_vm = &to_i915(vm->dev)->gtt.base;
+
+ if (i915_is_ggtt(vm))
+ return setup_scratch_ggtt(vm);
+
+ vm->scratch_page = ggtt_vm->scratch_page;
+ vm->scratch_pt = ggtt_vm->scratch_pt;
+ vm->scratch_pd = ggtt_vm->scratch_pd;
+
+ return 0;
+}
+
+static void check_scratch_page(struct i915_address_space *vm)
+{
+ struct i915_hw_ppgtt *ppgtt =
+ container_of(vm, struct i915_hw_ppgtt, base);
+ u32 i, *vaddr;
+
+ vaddr = kmap_px(vm->scratch_page);
+
+ for (i = 0; i < PAGE_SIZE / sizeof(u32); i++) {
+ if (vaddr[i] == 0xffffffff)
+ continue;
+
+ DRM_ERROR("%p scratch[%u] = 0x%08x\n", vm, i, vaddr[i]);
+ break;
+ }
+
+ kunmap_readonly_px(ppgtt, vaddr);
+}
+
+static void cleanup_scratch_ggtt(struct i915_address_space *vm)
+{
+ check_scratch_page(vm);
+ free_scratch_page(vm);
+
+ if (INTEL_INFO(vm->dev)->gen < 6)
+ return;
+
+ free_pt(vm->dev, vm->scratch_pt);
+
+ if (INTEL_INFO(vm->dev)->gen >= 8)
+ free_pd(vm->dev, vm->scratch_pd);
+}
+
+static void cleanup_scratch(struct i915_address_space *vm)
+{
+ if (i915_is_ggtt(vm))
+ cleanup_scratch_ggtt(vm);
+
+ vm->scratch_page = NULL;
+ vm->scratch_pt = NULL;
+ vm->scratch_pd = NULL;
+}
+
/* Broadwell Page Directory Pointer Descriptors */
static int gen8_write_pdp(struct intel_engine_cs *ring,
unsigned entry,
@@ -535,7 +697,7 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
unsigned num_entries = length >> PAGE_SHIFT;
unsigned last_pte, i;
- scratch_pte = gen8_pte_encode(px_dma(ppgtt->base.scratch_page),
+ scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page),
I915_CACHE_LLC, use_scratch);
while (num_entries) {
@@ -619,15 +781,6 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
kunmap_px(ppgtt, pt_vaddr);
}
-static void gen8_setup_scratch_pd(struct i915_address_space *vm)
-{
- gen8_pde_t scratch_pde;
-
- scratch_pde = gen8_pde_encode(px_dma(vm->scratch_pt), I915_CACHE_LLC);
-
- fill_px(vm->dev, vm->scratch_pd, scratch_pde);
-}
-
static void gen8_free_page_tables(struct i915_page_directory *pd, struct drm_device *dev)
{
int i;
@@ -658,8 +811,7 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
free_pd(ppgtt->base.dev, ppgtt->pdp.page_directory[i]);
}
- free_pd(vm->dev, vm->scratch_pd);
- free_pt(vm->dev, vm->scratch_pt);
+ cleanup_scratch(vm);
}
/**
@@ -947,17 +1099,6 @@ err_out:
*/
static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
{
- ppgtt->base.scratch_pt = alloc_pt(ppgtt->base.dev);
- if (IS_ERR(ppgtt->base.scratch_pt))
- return PTR_ERR(ppgtt->base.scratch_pt);
-
- ppgtt->base.scratch_pd = alloc_pd(ppgtt->base.dev);
- if (IS_ERR(ppgtt->base.scratch_pd))
- return PTR_ERR(ppgtt->base.scratch_pd);
-
- gen8_setup_scratch_pt(&ppgtt->base);
- gen8_setup_scratch_pd(&ppgtt->base);
-
ppgtt->base.start = 0;
ppgtt->base.total = 1ULL << 32;
ppgtt->base.cleanup = gen8_ppgtt_cleanup;
@@ -969,7 +1110,7 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
ppgtt->switch_mm = gen8_mm_switch;
- return 0;
+ return setup_scratch(&ppgtt->base);
}
static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
@@ -1021,7 +1162,7 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
}
seq_puts(m, "\n");
}
- kunmap_px(ppgtt, pt_vaddr);
+ kunmap_readonly_px(ppgtt, pt_vaddr);
}
}
@@ -1271,18 +1412,6 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
kunmap_px(ppgtt, pt_vaddr);
}
-static void gen6_setup_scratch_pt(struct i915_address_space *vm)
-{
- gen6_pte_t scratch_pte;
-
- WARN_ON(px_dma(vm->scratch_page) == 0);
-
- scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
- I915_CACHE_LLC, true, 0);
-
- fill32_px(vm->dev, vm->scratch_pt, scratch_pte);
-}
-
static int gen6_alloc_va_range(struct i915_address_space *vm,
uint64_t start_in, uint64_t length_in)
{
@@ -1387,7 +1516,7 @@ static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
free_pt(ppgtt->base.dev, pt);
}
- free_pt(vm->dev, vm->scratch_pt);
+ cleanup_scratch(vm);
}
static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
@@ -1402,11 +1531,10 @@ static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
* size. We allocate at the top of the GTT to avoid fragmentation.
*/
BUG_ON(!drm_mm_initialized(&dev_priv->gtt.base.mm));
- ppgtt->base.scratch_pt = alloc_pt(ppgtt->base.dev);
- if (IS_ERR(ppgtt->base.scratch_pt))
- return PTR_ERR(ppgtt->base.scratch_pt);
- gen6_setup_scratch_pt(&ppgtt->base);
+ ret = setup_scratch(&ppgtt->base);
+ if (ret)
+ return ret;
alloc:
ret = drm_mm_insert_node_in_range_generic(&dev_priv->gtt.base.mm,
@@ -1437,7 +1565,7 @@ alloc:
return 0;
err_out:
- free_pt(ppgtt->base.dev, ppgtt->base.scratch_pt);
+ cleanup_scratch(&ppgtt->base);
return ret;
}
@@ -1511,10 +1639,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
ppgtt->base.dev = dev;
- ppgtt->base.scratch_page = dev_priv->gtt.base.scratch_page;
if (INTEL_INFO(dev)->gen < 8)
return gen6_ppgtt_init(ppgtt);
@@ -2120,43 +2245,6 @@ void i915_global_gtt_cleanup(struct drm_device *dev)
vm->cleanup(vm);
}
-static int alloc_scratch_page(struct i915_address_space *vm)
-{
- struct i915_page_scratch *sp;
- int ret;
-
- WARN_ON(vm->scratch_page);
-
- sp = kzalloc(sizeof(*sp), GFP_KERNEL);
- if (sp == NULL)
- return -ENOMEM;
-
- ret = __setup_page_dma(vm->dev, px_base(sp), GFP_DMA32 | __GFP_ZERO);
- if (ret) {
- kfree(sp);
- return ret;
- }
-
- fill_px(vm->dev, sp, ~0ULL);
- set_pages_uc(px_page(sp), 1);
-
- vm->scratch_page = sp;
-
- return 0;
-}
-
-static void free_scratch_page(struct i915_address_space *vm)
-{
- struct i915_page_scratch *sp = vm->scratch_page;
-
- set_pages_wb(px_page(sp), 1);
-
- cleanup_px(vm->dev, sp);
- kfree(sp);
-
- vm->scratch_page = NULL;
-}
-
static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
{
snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
@@ -2240,7 +2328,6 @@ static int ggtt_probe_common(struct drm_device *dev,
{
struct drm_i915_private *dev_priv = dev->dev_private;
phys_addr_t gtt_phys_addr;
- int ret;
/* For Modern GENs the PTEs and register space are split in the BAR */
gtt_phys_addr = pci_resource_start(dev->pdev, 0) +
@@ -2262,14 +2349,7 @@ static int ggtt_probe_common(struct drm_device *dev,
return -ENOMEM;
}
- ret = alloc_scratch_page(&dev_priv->gtt.base);
- if (ret) {
- DRM_ERROR("Scratch setup failed\n");
- /* iounmap will also get called at remove, but meh */
- iounmap(dev_priv->gtt.gsm);
- }
-
- return ret;
+ return setup_scratch(&dev_priv->gtt.base);
}
/* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
@@ -2441,7 +2521,7 @@ static void gen6_gmch_remove(struct i915_address_space *vm)
struct i915_gtt *gtt = container_of(vm, struct i915_gtt, base);
iounmap(gtt->gsm);
- free_scratch_page(vm);
+ cleanup_scratch(vm);
}
static int i915_gmch_probe(struct drm_device *dev,
--
1.9.1
More information about the Intel-gfx
mailing list