[Intel-gfx] [PATCH 33/68] drm/i915: construct page table abstractions
Ben Widawsky
benjamin.widawsky at intel.com
Fri Aug 22 05:11:56 CEST 2014
Thus far we've opted to make complex code requiring difficult review. In
the future, the code is only going to become more complex, and as such
we'll take the hit now and start to encapsulate things.
To help transition the code nicely there is some wasted space in gen6/7.
This will be ameliorated shortly.
NOTE: The pun in the subject was intentional.
Signed-off-by: Ben Widawsky <ben at bwidawsk.net>
Conflicts:
drivers/gpu/drm/i915/i915_drv.h
---
drivers/gpu/drm/i915/i915_gem_gtt.c | 174 ++++++++++++++++++------------------
drivers/gpu/drm/i915/i915_gem_gtt.h | 23 +++--
2 files changed, 104 insertions(+), 93 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 5a62ef1..b832d01 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -270,7 +270,8 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
I915_CACHE_LLC, use_scratch);
while (num_entries) {
- struct page *page_table = ppgtt->gen8_pt_pages[pdpe][pde];
+ struct i915_pagedir *pd = &ppgtt->pdp.pagedir[pdpe];
+ struct page *page_table = pd->page_tables[pde].page;
last_pte = pte + num_entries;
if (last_pte > GEN8_PTES_PER_PT)
@@ -314,8 +315,11 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
if (WARN_ON(pdpe >= GEN8_LEGACY_PDPES))
break;
- if (pt_vaddr == NULL)
- pt_vaddr = kmap_atomic(ppgtt->gen8_pt_pages[pdpe][pde]);
+ if (pt_vaddr == NULL) {
+ struct i915_pagedir *pd = &ppgtt->pdp.pagedir[pdpe];
+ struct page *page_table = pd->page_tables[pde].page;
+ pt_vaddr = kmap_atomic(page_table);
+ }
pt_vaddr[pte] =
gen8_pte_encode(sg_page_iter_dma_address(&sg_iter),
@@ -339,29 +343,33 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
}
}
-static void gen8_free_page_tables(struct page **pt_pages)
+static void gen8_free_page_tables(struct i915_pagedir *pd)
{
int i;
- if (pt_pages == NULL)
+ if (pd->page_tables == NULL)
return;
for (i = 0; i < I915_PDES_PER_PD; i++)
- if (pt_pages[i])
- __free_pages(pt_pages[i], 0);
+ if (pd->page_tables[i].page)
+ __free_page(pd->page_tables[i].page);
+}
+
+static void gen8_free_page_directories(struct i915_pagedir *pd)
+{
+ kfree(pd->page_tables);
+ __free_page(pd->page);
}
-static void gen8_ppgtt_free(const struct i915_hw_ppgtt *ppgtt)
+static void gen8_ppgtt_free(struct i915_hw_ppgtt *ppgtt)
{
int i;
for (i = 0; i < ppgtt->num_pd_pages; i++) {
- gen8_free_page_tables(ppgtt->gen8_pt_pages[i]);
- kfree(ppgtt->gen8_pt_pages[i]);
+ gen8_free_page_tables(&ppgtt->pdp.pagedir[i]);
+ gen8_free_page_directories(&ppgtt->pdp.pagedir[i]);
kfree(ppgtt->gen8_pt_dma_addr[i]);
}
-
- __free_pages(ppgtt->pd_pages, get_order(ppgtt->num_pd_pages << PAGE_SHIFT));
}
static void gen8_ppgtt_dma_unmap_pages(struct i915_hw_ppgtt *ppgtt)
@@ -399,86 +407,73 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
gen8_ppgtt_free(ppgtt);
}
-static struct page **__gen8_alloc_page_tables(void)
+static int gen8_ppgtt_allocate_dma(struct i915_hw_ppgtt *ppgtt)
{
- struct page **pt_pages;
int i;
- pt_pages = kcalloc(I915_PDES_PER_PD, sizeof(struct page *), GFP_KERNEL);
- if (!pt_pages)
- return ERR_PTR(-ENOMEM);
-
- for (i = 0; i < I915_PDES_PER_PD; i++) {
- pt_pages[i] = alloc_page(GFP_KERNEL);
- if (!pt_pages[i])
- goto bail;
+ for (i = 0; i < ppgtt->num_pd_pages; i++) {
+ ppgtt->gen8_pt_dma_addr[i] = kcalloc(I915_PDES_PER_PD,
+ sizeof(dma_addr_t),
+ GFP_KERNEL);
+ if (!ppgtt->gen8_pt_dma_addr[i])
+ return -ENOMEM;
}
- return pt_pages;
-
-bail:
- gen8_free_page_tables(pt_pages);
- kfree(pt_pages);
- return ERR_PTR(-ENOMEM);
+ return 0;
}
-static int gen8_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt,
- const int max_pdp)
+static int gen8_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt)
{
- struct page **pt_pages[GEN8_LEGACY_PDPES];
- int i, ret;
+ int i, j;
- for (i = 0; i < max_pdp; i++) {
- pt_pages[i] = __gen8_alloc_page_tables();
- if (IS_ERR(pt_pages[i])) {
- ret = PTR_ERR(pt_pages[i]);
- goto unwind_out;
+ for (i = 0; i < ppgtt->num_pd_pages; i++) {
+ for (j = 0; j < I915_PDES_PER_PD; j++) {
+ struct i915_pagetab *pt = &ppgtt->pdp.pagedir[i].page_tables[j];
+ pt->page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!pt->page)
+ goto unwind_out;
}
}
- /* NB: Avoid touching gen8_pt_pages until last to keep the allocation,
- * "atomic" - for cleanup purposes.
- */
- for (i = 0; i < max_pdp; i++)
- ppgtt->gen8_pt_pages[i] = pt_pages[i];
-
return 0;
unwind_out:
- while (i--) {
- gen8_free_page_tables(pt_pages[i]);
- kfree(pt_pages[i]);
- }
+ while (i--)
+ gen8_free_page_tables(&ppgtt->pdp.pagedir[i]);
- return ret;
+ return -ENOMEM;
}
-static int gen8_ppgtt_allocate_dma(struct i915_hw_ppgtt *ppgtt)
+static int gen8_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt,
+ const int max_pdp)
{
int i;
- for (i = 0; i < ppgtt->num_pd_pages; i++) {
- ppgtt->gen8_pt_dma_addr[i] = kcalloc(I915_PDES_PER_PD,
- sizeof(dma_addr_t),
- GFP_KERNEL);
- if (!ppgtt->gen8_pt_dma_addr[i])
- return -ENOMEM;
- }
+ for (i = 0; i < max_pdp; i++) {
+ struct i915_pagetab *pt;
+ pt = kcalloc(I915_PDES_PER_PD, sizeof(*pt), GFP_KERNEL);
+ if (!pt)
+ goto unwind_out;
- return 0;
-}
+ ppgtt->pdp.pagedir[i].page = alloc_page(GFP_KERNEL);
+ if (!ppgtt->pdp.pagedir[i].page)
+ goto unwind_out;
-static int gen8_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt,
- const int max_pdp)
-{
- ppgtt->pd_pages = alloc_pages(GFP_KERNEL, get_order(max_pdp << PAGE_SHIFT));
- if (!ppgtt->pd_pages)
- return -ENOMEM;
+ ppgtt->pdp.pagedir[i].page_tables = pt;
+ }
- ppgtt->num_pd_pages = 1 << get_order(max_pdp << PAGE_SHIFT);
+ ppgtt->num_pd_pages = max_pdp;
BUG_ON(ppgtt->num_pd_pages > GEN8_LEGACY_PDPES);
return 0;
+
+unwind_out:
+ while (i--) {
+ kfree(ppgtt->pdp.pagedir[i].page_tables);
+ __free_page(ppgtt->pdp.pagedir[i].page);
+ }
+
+ return -ENOMEM;
}
static int gen8_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt,
@@ -490,18 +485,19 @@ static int gen8_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt,
if (ret)
return ret;
- ret = gen8_ppgtt_allocate_page_tables(ppgtt, max_pdp);
- if (ret) {
- __free_pages(ppgtt->pd_pages, get_order(max_pdp << PAGE_SHIFT));
- return ret;
- }
+ ret = gen8_ppgtt_allocate_page_tables(ppgtt);
+ if (ret)
+ goto err_out;
ppgtt->num_pd_entries = max_pdp * I915_PDES_PER_PD;
ret = gen8_ppgtt_allocate_dma(ppgtt);
- if (ret)
- gen8_ppgtt_free(ppgtt);
+ if (!ret)
+ return ret;
+ /* TODO: Check this for all cases */
+err_out:
+ gen8_ppgtt_free(ppgtt);
return ret;
}
@@ -512,7 +508,7 @@ static int gen8_ppgtt_setup_page_directories(struct i915_hw_ppgtt *ppgtt,
int ret;
pd_addr = pci_map_page(ppgtt->base.dev->pdev,
- &ppgtt->pd_pages[pdpe], 0,
+ ppgtt->pdp.pagedir[pdpe].page, 0,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
ret = pci_dma_mapping_error(ppgtt->base.dev->pdev, pd_addr);
@@ -532,7 +528,7 @@ static int gen8_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt,
struct page *p;
int ret;
- p = ppgtt->gen8_pt_pages[pdpe][pde];
+ p = ppgtt->pdp.pagedir[pdpe].page_tables[pde].page;
pt_addr = pci_map_page(ppgtt->base.dev->pdev,
p, 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
ret = pci_dma_mapping_error(ppgtt->base.dev->pdev, pt_addr);
@@ -593,7 +589,7 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
*/
for (i = 0; i < max_pdp; i++) {
gen8_ppgtt_pde_t *pd_vaddr;
- pd_vaddr = kmap_atomic(&ppgtt->pd_pages[i]);
+ pd_vaddr = kmap_atomic(ppgtt->pdp.pagedir[i].page);
for (j = 0; j < I915_PDES_PER_PD; j++) {
dma_addr_t addr = ppgtt->gen8_pt_dma_addr[i][j];
pd_vaddr[j] = gen8_pde_encode(ppgtt->base.dev, addr,
@@ -655,7 +651,7 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
expected);
seq_printf(m, "\tPDE: %x\n", pd_entry);
- pt_vaddr = kmap_atomic(ppgtt->pt_pages[pde]);
+ pt_vaddr = kmap_atomic(ppgtt->pd.page_tables[pde].page);
for (pte = 0; pte < GEN6_PTES_PER_PT; pte+=4) {
unsigned long va =
(pde * PAGE_SIZE * GEN6_PTES_PER_PT) +
@@ -949,7 +945,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
if (last_pte > GEN6_PTES_PER_PT)
last_pte = GEN6_PTES_PER_PT;
- pt_vaddr = kmap_atomic(ppgtt->pt_pages[pde]);
+ pt_vaddr = kmap_atomic(ppgtt->pd.page_tables[pde].page);
for (i = pte; i < last_pte; i++)
pt_vaddr[i] = scratch_pte;
@@ -977,7 +973,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
pt_vaddr = NULL;
for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
if (pt_vaddr == NULL)
- pt_vaddr = kmap_atomic(ppgtt->pt_pages[pde]);
+ pt_vaddr = kmap_atomic(ppgtt->pd.page_tables[pde].page);
pt_vaddr[pte] =
vm->pte_encode(sg_page_iter_dma_address(&sg_iter),
@@ -1011,8 +1007,8 @@ static void gen6_ppgtt_free(struct i915_hw_ppgtt *ppgtt)
kfree(ppgtt->pt_dma_addr);
for (i = 0; i < ppgtt->num_pd_entries; i++)
- __free_page(ppgtt->pt_pages[i]);
- kfree(ppgtt->pt_pages);
+ __free_page(ppgtt->pd.page_tables[i].page);
+ kfree(ppgtt->pd.page_tables);
}
static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
@@ -1071,22 +1067,22 @@ alloc:
static int gen6_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt)
{
+ struct i915_pagetab *pt;
int i;
- ppgtt->pt_pages = kcalloc(ppgtt->num_pd_entries, sizeof(struct page *),
- GFP_KERNEL);
-
- if (!ppgtt->pt_pages)
+ pt = kcalloc(ppgtt->num_pd_entries, sizeof(*pt), GFP_KERNEL);
+ if (!pt)
return -ENOMEM;
for (i = 0; i < ppgtt->num_pd_entries; i++) {
- ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL);
- if (!ppgtt->pt_pages[i]) {
+ pt[i].page = alloc_page(GFP_KERNEL);
+ if (!pt->page) {
gen6_ppgtt_free(ppgtt);
return -ENOMEM;
}
}
+ ppgtt->pd.page_tables = pt;
return 0;
}
@@ -1121,9 +1117,11 @@ static int gen6_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt)
int i;
for (i = 0; i < ppgtt->num_pd_entries; i++) {
+ struct page *page;
dma_addr_t pt_addr;
- pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i], 0, 4096,
+ page = ppgtt->pd.page_tables[i].page;
+ pt_addr = pci_map_page(dev->pdev, page, 0, 4096,
PCI_DMA_BIDIRECTIONAL);
if (pci_dma_mapping_error(dev->pdev, pt_addr)) {
@@ -1170,7 +1168,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
ppgtt->base.cleanup = gen6_ppgtt_cleanup;
ppgtt->base.start = 0;
- ppgtt->base.total = ppgtt->num_pd_entries * GEN6_PTES_PER_PT * PAGE_SIZE;
+ ppgtt->base.total = ppgtt->num_pd_entries * GEN6_PTES_PER_PT * PAGE_SIZE;
ppgtt->debug_dump = gen6_dump_ppgtt;
ppgtt->pd_offset =
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 465549f..ee1c317 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -257,6 +257,20 @@ struct i915_gtt {
unsigned long *mappable_end);
};
+struct i915_pagetab {
+ struct page *page;
+};
+
+struct i915_pagedir {
+ struct page *page; /* NULL for GEN6-GEN7 */
+ struct i915_pagetab *page_tables;
+};
+
+struct i915_pagedirpo {
+ /* struct page *page; */
+ struct i915_pagedir pagedir[GEN8_LEGACY_PDPES];
+};
+
struct i915_hw_ppgtt {
struct i915_address_space base;
struct kref ref;
@@ -264,11 +278,6 @@ struct i915_hw_ppgtt {
unsigned num_pd_entries;
unsigned num_pd_pages; /* gen8+ */
union {
- struct page **pt_pages;
- struct page **gen8_pt_pages[GEN8_LEGACY_PDPES];
- };
- struct page *pd_pages;
- union {
uint32_t pd_offset;
dma_addr_t pd_dma_addr[GEN8_LEGACY_PDPES];
};
@@ -276,6 +285,10 @@ struct i915_hw_ppgtt {
dma_addr_t *pt_dma_addr;
dma_addr_t *gen8_pt_dma_addr[GEN8_LEGACY_PDPES];
};
+ union {
+ struct i915_pagedirpo pdp;
+ struct i915_pagedir pd;
+ };
struct intel_context *ctx;
--
2.0.4
More information about the Intel-gfx
mailing list