[Intel-gfx] [PATCH 02/12] drm/i915/bdw: Abstract PDP usage
Michel Thierry
michel.thierry at intel.com
Wed Mar 18 03:16:21 PDT 2015
On 3/3/2015 12:16 PM, akash goel wrote:
> On Fri, Feb 20, 2015 at 11:15 PM, Michel Thierry
> <michel.thierry at intel.com> wrote:
>> From: Ben Widawsky<benjamin.widawsky at intel.com>
>>
>> Up until now, ppgtt->pdp has always been the root of our page tables.
>> Legacy 32b addresses acted like it had 1 PDP with 4 PDPEs.
>>
>> In preparation for 4 level page tables, we need to stop use ppgtt->pdp
>> directly unless we know it's what we want. The future structure will use
>> ppgtt->pml4 for the top level, and the pdp is just one of the entries
>> being pointed to by a pml4e.
>>
>> v2: Updated after dynamic page allocation changes.
>>
>> Signed-off-by: Ben Widawsky<ben at bwidawsk.net>
>> Signed-off-by: Michel Thierry<michel.thierry at intel.com> (v2)
>> ---
>> drivers/gpu/drm/i915/i915_gem_gtt.c | 123 ++++++++++++++++++++----------------
>> 1 file changed, 70 insertions(+), 53 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
>> index 489f8db..d3ad517 100644
>> --- a/drivers/gpu/drm/i915/i915_gem_gtt.c
>> +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
>> @@ -560,6 +560,7 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
>> {
>> struct i915_hw_ppgtt *ppgtt =
>> container_of(vm, struct i915_hw_ppgtt, base);
>> + struct i915_page_directory_pointer_entry *pdp = &ppgtt->pdp; /* FIXME: 48b */
>> gen8_gtt_pte_t *pt_vaddr, scratch_pte;
>> unsigned pdpe = start >> GEN8_PDPE_SHIFT & GEN8_PDPE_MASK;
>> unsigned pde = start >> GEN8_PDE_SHIFT & GEN8_PDE_MASK;
>> @@ -575,10 +576,10 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
>> struct i915_page_table_entry *pt;
>> struct page *page_table;
>>
>> - if (WARN_ON(!ppgtt->pdp.page_directory[pdpe]))
>> + if (WARN_ON(!pdp->page_directory[pdpe]))
>> continue;
>>
>> - pd = ppgtt->pdp.page_directory[pdpe];
>> + pd = pdp->page_directory[pdpe];
>>
>> if (WARN_ON(!pd->page_tables[pde]))
>> continue;
>> @@ -620,6 +621,7 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
>> {
>> struct i915_hw_ppgtt *ppgtt =
>> container_of(vm, struct i915_hw_ppgtt, base);
>> + struct i915_page_directory_pointer_entry *pdp = &ppgtt->pdp; /* FIXME: 48b */
>> gen8_gtt_pte_t *pt_vaddr;
>> unsigned pdpe = start >> GEN8_PDPE_SHIFT & GEN8_PDPE_MASK;
>> unsigned pde = start >> GEN8_PDE_SHIFT & GEN8_PDE_MASK;
>> @@ -630,7 +632,7 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
>>
>> for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
>> if (pt_vaddr == NULL) {
>> - struct i915_page_directory_entry *pd = ppgtt->pdp.page_directory[pdpe];
>> + struct i915_page_directory_entry *pd = pdp->page_directory[pdpe];
>> struct i915_page_table_entry *pt = pd->page_tables[pde];
>> struct page *page_table = pt->page;
>>
>> @@ -708,16 +710,17 @@ static void gen8_free_page_tables(struct i915_page_directory_entry *pd, struct d
>> static void gen8_ppgtt_unmap_pages(struct i915_hw_ppgtt *ppgtt)
>> {
>> struct pci_dev *hwdev = ppgtt->base.dev->pdev;
>> + struct i915_page_directory_pointer_entry *pdp = &ppgtt->pdp; /* FIXME: 48b */
>> int i, j;
>>
>> - for_each_set_bit(i, ppgtt->pdp.used_pdpes,
>> + for_each_set_bit(i, pdp->used_pdpes,
>> I915_PDPES_PER_PDP(ppgtt->base.dev)) {
>> struct i915_page_directory_entry *pd;
>>
>> - if (WARN_ON(!ppgtt->pdp.page_directory[i]))
>> + if (WARN_ON(!pdp->page_directory[i]))
>> continue;
>>
>> - pd = ppgtt->pdp.page_directory[i];
>> + pd = pdp->page_directory[i];
>> if (!pd->daddr)
>> pci_unmap_page(hwdev, pd->daddr, PAGE_SIZE,
>> PCI_DMA_BIDIRECTIONAL);
>> @@ -743,15 +746,21 @@ static void gen8_ppgtt_free(struct i915_hw_ppgtt *ppgtt)
>> {
>> int i;
>>
>> - for_each_set_bit(i, ppgtt->pdp.used_pdpes,
>> - I915_PDPES_PER_PDP(ppgtt->base.dev)) {
>> - if (WARN_ON(!ppgtt->pdp.page_directory[i]))
>> - continue;
>> + if (!USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
>> + for_each_set_bit(i, ppgtt->pdp.used_pdpes,
>> + I915_PDPES_PER_PDP(ppgtt->base.dev)) {
>> + if (WARN_ON(!ppgtt->pdp.page_directory[i]))
>> + continue;
>>
>> - gen8_free_page_tables(ppgtt->pdp.page_directory[i], ppgtt->base.dev);
>> - unmap_and_free_pd(ppgtt->pdp.page_directory[i], ppgtt->base.dev);
>> + gen8_free_page_tables(ppgtt->pdp.page_directory[i],
>> + ppgtt->base.dev);
>> + unmap_and_free_pd(ppgtt->pdp.page_directory[i],
>> + ppgtt->base.dev);
>> + }
>> + unmap_and_free_pdp(&ppgtt->pdp, ppgtt->base.dev);
>> + } else {
>> + BUG(); /* to be implemented later */
>> }
>> - unmap_and_free_pdp(&ppgtt->pdp, ppgtt->base.dev);
>> }
>>
>> static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
>> @@ -765,7 +774,7 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
>>
>> /**
>> * gen8_ppgtt_alloc_pagetabs() - Allocate page tables for VA range.
>> - * @ppgtt: Master ppgtt structure.
>> + * @vm: Master vm structure.
>> * @pd: Page directory for this address range.
>> * @start: Starting virtual address to begin allocations.
>> * @length Size of the allocations.
>> @@ -781,12 +790,13 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
>> *
>> * Return: 0 if success; negative error code otherwise.
>> */
>> -static int gen8_ppgtt_alloc_pagetabs(struct i915_hw_ppgtt *ppgtt,
>> +static int gen8_ppgtt_alloc_pagetabs(struct i915_address_space *vm,
>> struct i915_page_directory_entry *pd,
>> uint64_t start,
>> uint64_t length,
>> unsigned long *new_pts)
>> {
>> + struct drm_device *dev = vm->dev;
>> struct i915_page_table_entry *pt;
>> uint64_t temp;
>> uint32_t pde;
>> @@ -799,7 +809,7 @@ static int gen8_ppgtt_alloc_pagetabs(struct i915_hw_ppgtt *ppgtt,
>> continue;
>> }
>>
>> - pt = alloc_pt_single(ppgtt->base.dev);
>> + pt = alloc_pt_single(dev);
>> if (IS_ERR(pt))
>> goto unwind_out;
>>
>> @@ -811,14 +821,14 @@ static int gen8_ppgtt_alloc_pagetabs(struct i915_hw_ppgtt *ppgtt,
>>
>> unwind_out:
>> for_each_set_bit(pde, new_pts, GEN8_PDES_PER_PAGE)
>> - unmap_and_free_pt(pd->page_tables[pde], ppgtt->base.dev);
>> + unmap_and_free_pt(pd->page_tables[pde], dev);
>>
>> return -ENOMEM;
>> }
>>
>> /**
>> * gen8_ppgtt_alloc_page_directories() - Allocate page directories for VA range.
>> - * @ppgtt: Master ppgtt structure.
>> + * @vm: Master vm structure.
>> * @pdp: Page directory pointer for this address range.
>> * @start: Starting virtual address to begin allocations.
>> * @length Size of the allocations.
>> @@ -839,16 +849,17 @@ unwind_out:
>> *
>> * Return: 0 if success; negative error code otherwise.
>> */
>> -static int gen8_ppgtt_alloc_page_directories(struct i915_hw_ppgtt *ppgtt,
>> +static int gen8_ppgtt_alloc_page_directories(struct i915_address_space *vm,
>> struct i915_page_directory_pointer_entry *pdp,
>> uint64_t start,
>> uint64_t length,
>> unsigned long *new_pds)
>> {
>> + struct drm_device *dev = vm->dev;
>> struct i915_page_directory_entry *pd;
>> uint64_t temp;
>> uint32_t pdpe;
>> - size_t pdpes = I915_PDPES_PER_PDP(ppgtt->base.dev);
>> + size_t pdpes = I915_PDPES_PER_PDP(vm->dev);
>>
>> BUG_ON(!bitmap_empty(new_pds, pdpes));
>>
>> @@ -859,7 +870,7 @@ static int gen8_ppgtt_alloc_page_directories(struct i915_hw_ppgtt *ppgtt,
>> if (pd)
>> continue;
>>
>> - pd = alloc_pd_single(ppgtt->base.dev);
>> + pd = alloc_pd_single(dev);
>> if (IS_ERR(pd))
>> goto unwind_out;
>>
>> @@ -871,7 +882,7 @@ static int gen8_ppgtt_alloc_page_directories(struct i915_hw_ppgtt *ppgtt,
>>
>> unwind_out:
>> for_each_set_bit(pdpe, new_pds, pdpes)
>> - unmap_and_free_pd(pdp->page_directory[pdpe], ppgtt->base.dev);
>> + unmap_and_free_pd(pdp->page_directory[pdpe], dev);
>>
>> return -ENOMEM;
>> }
>> @@ -926,13 +937,13 @@ err_out:
>> return -ENOMEM;
>> }
>>
>> -static int gen8_alloc_va_range(struct i915_address_space *vm,
>> - uint64_t start,
>> - uint64_t length)
>> +static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm,
>> + struct i915_page_directory_pointer_entry *pdp,
>> + uint64_t start,
>> + uint64_t length)
>> {
>> - struct i915_hw_ppgtt *ppgtt =
>> - container_of(vm, struct i915_hw_ppgtt, base);
>> unsigned long *new_page_dirs, **new_page_tables;
>> + struct drm_device *dev = vm->dev;
>> struct i915_page_directory_entry *pd;
>> const uint64_t orig_start = start;
>> const uint64_t orig_length = length;
>> @@ -961,17 +972,15 @@ static int gen8_alloc_va_range(struct i915_address_space *vm,
>> return ret;
>>
>> /* Do the allocations first so we can easily bail out */
>> - ret = gen8_ppgtt_alloc_page_directories(ppgtt, &ppgtt->pdp, start, length,
>> - new_page_dirs);
>> + ret = gen8_ppgtt_alloc_page_directories(vm, pdp, start, length, new_page_dirs);
>> if (ret) {
>> free_gen8_temp_bitmaps(new_page_dirs, new_page_tables, pdpes);
>> return ret;
>> }
>>
>> - /* For every page directory referenced, allocate page tables */
>> - gen8_for_each_pdpe(pd, &ppgtt->pdp, start, length, temp, pdpe) {
>> + gen8_for_each_pdpe(pd, pdp, start, length, temp, pdpe) {
>> bitmap_zero(new_page_tables[pdpe], GEN8_PDES_PER_PAGE);
>> - ret = gen8_ppgtt_alloc_pagetabs(ppgtt, pd, start, length,
>> + ret = gen8_ppgtt_alloc_pagetabs(vm, pd, start, length,
>> new_page_tables[pdpe]);
>> if (ret)
>> goto err_out;
>> @@ -980,10 +989,7 @@ static int gen8_alloc_va_range(struct i915_address_space *vm,
>> start = orig_start;
>> length = orig_length;
>>
>> - /* Allocations have completed successfully, so set the bitmaps, and do
>> - * the mappings. */
>> - gen8_for_each_pdpe(pd, &ppgtt->pdp, start, length, temp, pdpe) {
>> - gen8_ppgtt_pde_t *const page_directory = kmap_atomic(pd->page);
>> + gen8_for_each_pdpe(pd, pdp, start, length, temp, pdpe) {
>> struct i915_page_table_entry *pt;
>> uint64_t pd_len = gen8_clamp_pd(start, length);
>> uint64_t pd_start = start;
>> @@ -1005,20 +1011,10 @@ static int gen8_alloc_va_range(struct i915_address_space *vm,
>>
>> /* Our pde is now pointing to the pagetable, pt */
>> set_bit(pde, pd->used_pdes);
>> -
>> - /* Map the PDE to the page table */
>> - __gen8_do_map_pt(page_directory + pde, pt, vm->dev);
>> -
>> - /* NB: We haven't yet mapped ptes to pages. At this
>> - * point we're still relying on insert_entries() */
>> }
>>
>> - if (!HAS_LLC(vm->dev))
>> - drm_clflush_virt_range(page_directory, PAGE_SIZE);
>> -
>> - kunmap_atomic(page_directory);
>> -
>> - set_bit(pdpe, ppgtt->pdp.used_pdpes);
>> + set_bit(pdpe, pdp->used_pdpes);
>> + gen8_map_pagetable_range(pd, start, length, dev);
>> }
>>
>> free_gen8_temp_bitmaps(new_page_dirs, new_page_tables, pdpes);
>> @@ -1027,16 +1023,36 @@ static int gen8_alloc_va_range(struct i915_address_space *vm,
>> err_out:
>> while (pdpe--) {
>> for_each_set_bit(temp, new_page_tables[pdpe], GEN8_PDES_PER_PAGE)
>> - unmap_and_free_pt(pd->page_tables[temp], vm->dev);
>> + unmap_and_free_pt(pd->page_tables[temp], dev);
> Sorry the review comment may not be completely pertinent to this very patch.
> In the while loop, on the change of 'pdpe' value, the 'pd' is not
> being updated accordingly.
> The above call to 'unmap_and_free_pt(pd->page_table[temp], dev);'
> should be replaced with
> 'unmap_and_free_pt(pdp->page_directory[pdpe]->page_table[temp], dev);'
> This will give the right page directory.
Fixed in the patch that brings this code (drm/i915/bdw: Dynamic page
table allocations).
>> }
>>
>> for_each_set_bit(pdpe, new_page_dirs, pdpes)
>> - unmap_and_free_pd(ppgtt->pdp.page_directory[pdpe], vm->dev);
>> + unmap_and_free_pd(pdp->page_directory[pdpe], dev);
>>
>> free_gen8_temp_bitmaps(new_page_dirs, new_page_tables, pdpes);
>> return ret;
>> }
>>
>> +static int __noreturn gen8_alloc_va_range_4lvl(struct i915_address_space *vm,
>> + struct i915_pml4 *pml4,
>> + uint64_t start,
>> + uint64_t length)
>> +{
>> + BUG(); /* to be implemented later */
>> +}
>> +
>> +static int gen8_alloc_va_range(struct i915_address_space *vm,
>> + uint64_t start, uint64_t length)
>> +{
>> + struct i915_hw_ppgtt *ppgtt =
>> + container_of(vm, struct i915_hw_ppgtt, base);
>> +
>> + if (!USES_FULL_48BIT_PPGTT(vm->dev))
>> + return gen8_alloc_va_range_3lvl(vm, &ppgtt->pdp, start, length);
>> + else
>> + return gen8_alloc_va_range_4lvl(vm, &ppgtt->pml4, start, length);
>> +}
>> +
>> static void gen8_ppgtt_fini_common(struct i915_hw_ppgtt *ppgtt)
>> {
>> unmap_and_free_pt(ppgtt->scratch_pd, ppgtt->base.dev);
>> @@ -1079,12 +1095,13 @@ static int gen8_aliasing_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
>> {
>> struct drm_device *dev = ppgtt->base.dev;
>> struct drm_i915_private *dev_priv = dev->dev_private;
>> + struct i915_page_directory_pointer_entry *pdp = &ppgtt->pdp; /* FIXME: 48b */
>> struct i915_page_directory_entry *pd;
>> uint64_t temp, start = 0, size = dev_priv->gtt.base.total;
>> uint32_t pdpe;
>> int ret;
>>
>> - ret = gen8_ppgtt_init_common(ppgtt, dev_priv->gtt.base.total);
>> + ret = gen8_ppgtt_init_common(ppgtt, size);
>> if (ret)
>> return ret;
>>
>> @@ -1097,8 +1114,8 @@ static int gen8_aliasing_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
>> return ret;
>> }
>>
>> - gen8_for_each_pdpe(pd, &ppgtt->pdp, start, size, temp, pdpe)
>> - gen8_map_pagetable_range(pd, start, size, ppgtt->base.dev);
>> + gen8_for_each_pdpe(pd, pdp, start, size, temp, pdpe)
>> + gen8_map_pagetable_range(pd, start, size, dev);
> Sorry, again this comment may not be relevant for this patch.
> Is the explicit call to map the page of page tables really needed here ?
> As prior to this, already there is a call to gen8_alloc_va_range,
> which will map the page of page tables into the pdes, for the entire
> virtual range.
Thanks, it was a left over from a previous rebase (before aliasing and
full ppgtt inits split). As you say it isn't needed.
Fixed in same patch as your prev comment (drm/i915/bdw: Dynamic page
table allocations).
>> ppgtt->base.allocate_va_range = NULL;
>> ppgtt->base.clear_range = gen8_ppgtt_clear_range;
>> --
>> 2.1.1
>>
>> _______________________________________________
>> Intel-gfx mailing list
>> Intel-gfx at lists.freedesktop.org
>> http://lists.freedesktop.org/mailman/listinfo/intel-gfx
More information about the Intel-gfx
mailing list