[Intel-gfx] [PATCH 40/68] drm/i915: Always dma map page directory allocations
Ben Widawsky
benjamin.widawsky at intel.com
Fri Aug 22 05:12:03 CEST 2014
Similar to the patch a few back in the series, we can always map and
unmap page directories when we do their allocation and teardown. Page
directory pages only exist on gen8+, so this should only effect behavior
on those platforms.
Signed-off-by: Ben Widawsky <ben at bwidawsk.net>
---
drivers/gpu/drm/i915/i915_gem_gtt.c | 79 +++++++++----------------------------
1 file changed, 19 insertions(+), 60 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 205d5c6..094a82f 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -303,21 +303,23 @@ err_out:
return ret;
}
-static void __free_pd_single(struct i915_pagedir *pd)
+static void __free_pd_single(struct i915_pagedir *pd, struct drm_device *dev)
{
+ i915_dma_unmap_single(pd, dev);
__free_page(pd->page);
kfree(pd);
}
-#define free_pd_single(pd) do { \
+#define free_pd_single(pd, dev) do { \
if ((pd)->page) { \
- __free_pd_single(pd); \
+ __free_pd_single(pd, dev); \
} \
} while (0)
-static struct i915_pagedir *alloc_pd_single(void)
+static struct i915_pagedir *alloc_pd_single(struct drm_device *dev)
{
struct i915_pagedir *pd;
+ int ret;
pd = kzalloc(sizeof(*pd), GFP_KERNEL);
if (!pd)
@@ -329,6 +331,13 @@ static struct i915_pagedir *alloc_pd_single(void)
return ERR_PTR(-ENOMEM);
}
+ ret = i915_dma_map_px_single(pd, dev);
+ if (ret) {
+ __free_page(pd->page);
+ kfree(pd);
+ return ERR_PTR(ret);
+ }
+
return pd;
}
@@ -493,30 +502,7 @@ static void gen8_ppgtt_free(struct i915_hw_ppgtt *ppgtt)
for (i = 0; i < ppgtt->num_pd_pages; i++) {
gen8_free_page_tables(ppgtt->pdp.pagedir[i], ppgtt->base.dev);
- free_pd_single(ppgtt->pdp.pagedir[i]);
- }
-}
-
-static void gen8_ppgtt_dma_unmap_pages(struct i915_hw_ppgtt *ppgtt)
-{
- struct drm_device *dev = ppgtt->base.dev;
- int i, j;
-
- for (i = 0; i < ppgtt->num_pd_pages; i++) {
- /* TODO: In the future we'll support sparse mappings, so this
- * will have to change. */
- if (!ppgtt->pdp.pagedir[i]->daddr)
- continue;
-
- i915_dma_unmap_single(ppgtt->pdp.pagedir[i], dev);
-
- for (j = 0; j < I915_PDES_PER_PD; j++) {
- struct i915_pagedir *pd = ppgtt->pdp.pagedir[i];
- struct i915_pagetab *pt = pd->page_tables[j];
- dma_addr_t addr = pt->daddr;
- if (addr)
- i915_dma_unmap_single(pt, dev);
- }
+ free_pd_single(ppgtt->pdp.pagedir[i], ppgtt->base.dev);
}
}
@@ -528,7 +514,6 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
list_del(&vm->global_link);
drm_mm_takedown(&vm->mm);
- gen8_ppgtt_dma_unmap_pages(ppgtt);
gen8_ppgtt_free(ppgtt);
}
@@ -558,7 +543,7 @@ static int gen8_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt,
int i;
for (i = 0; i < max_pdp; i++) {
- ppgtt->pdp.pagedir[i] = alloc_pd_single();
+ ppgtt->pdp.pagedir[i] = alloc_pd_single(ppgtt->base.dev);
if (IS_ERR(ppgtt->pdp.pagedir[i]))
goto unwind_out;
}
@@ -570,7 +555,8 @@ static int gen8_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt,
unwind_out:
while (i--)
- free_pd_single(ppgtt->pdp.pagedir[i]);
+ free_pd_single(ppgtt->pdp.pagedir[i],
+ ppgtt->base.dev);
return -ENOMEM;
}
@@ -598,19 +584,6 @@ err_out:
return ret;
}
-static int gen8_ppgtt_setup_page_directories(struct i915_hw_ppgtt *ppgtt,
- const int pdpe)
-{
- int ret;
-
- ret = i915_dma_map_px_single(ppgtt->pdp.pagedir[pdpe],
- ppgtt->base.dev);
- if (ret)
- return ret;
-
- return 0;
-}
-
/**
* GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
* with a net effect resembling a 2-level page table in normal x86 terms. Each
@@ -636,16 +609,7 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
return ret;
/*
- * 2. Create DMA mappings for the page directories and page tables.
- */
- for (i = 0; i < max_pdp; i++) {
- ret = gen8_ppgtt_setup_page_directories(ppgtt, i);
- if (ret)
- goto bail;
- }
-
- /*
- * 3. Map all the page directory entires to point to the page tables
+ * 2. Map all the page directory entires to point to the page tables
* we've allocated.
*
* For now, the PPGTT helper functions all require that the PDEs are
@@ -681,11 +645,6 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
ppgtt->num_pd_entries,
(ppgtt->num_pd_entries - min_pt_pages) + size % (1<<30));
return 0;
-
-bail:
- gen8_ppgtt_dma_unmap_pages(ppgtt);
- gen8_ppgtt_free(ppgtt);
- return ret;
}
static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
@@ -1063,7 +1022,7 @@ static void gen6_ppgtt_free(struct i915_hw_ppgtt *ppgtt)
for (i = 0; i < ppgtt->num_pd_entries; i++)
free_pt_single(ppgtt->pd.page_tables[i], ppgtt->base.dev);
- free_pd_single(&ppgtt->pd);
+ free_pd_single(&ppgtt->pd, ppgtt->base.dev);
}
static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
--
2.0.4
More information about the Intel-gfx
mailing list