[Intel-gfx] [PATCH 3/9] drm/i915/bdw: Split ppgtt initialization up
Imre Deak
imre.deak at intel.com
Thu Feb 20 14:10:58 CET 2014
On Wed, 2014-02-19 at 22:05 -0800, Ben Widawsky wrote:
> Like cleanup in an earlier patch, the code becomes much more readable,
> and easier to extend if we extract out helper functions for the various
> stages of init.
>
> Note that with this patch it becomes really simple, and tempting to begin
> using the 'goto out' idiom with explicit free/fini semantics. I've
> kept the error path as similar as possible to the cleanup() function to
> make sure cleanup is as robust as possible
>
> v2: Remove comment "NB:From here on, ppgtt->base.cleanup() should
> function properly"
> Update commit message to reflect above
>
> v3: Rebased on top of bugfixes found in the previous patch by Imre
> Moved number of pd pages assertion to the proper place (Imre)
>
> v4:
> Allocate dma address space for num_pd_pages, not num_pd_entries (Ben)
> Don't use gen8_pt_dma_addr after free on error path (Imre)
> With new fix from v4 of the previous patch.
>
> Signed-off-by: Ben Widawsky <ben at bwidawsk.net>
Looks ok to me:
Reviewed-by: Imre Deak <imre.deak at intel.com>
> ---
> drivers/gpu/drm/i915/i915_gem_gtt.c | 164 +++++++++++++++++++++++++-----------
> 1 file changed, 116 insertions(+), 48 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
> index 7956659..0af3587 100644
> --- a/drivers/gpu/drm/i915/i915_gem_gtt.c
> +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
> @@ -366,6 +366,113 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
> gen8_ppgtt_free(ppgtt);
> }
>
> +static int gen8_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt,
> + const int max_pdp)
> +{
> + struct page *pt_pages;
> + const int num_pt_pages = GEN8_PDES_PER_PAGE * max_pdp;
> +
> + pt_pages = alloc_pages(GFP_KERNEL, get_order(num_pt_pages << PAGE_SHIFT));
> + if (!pt_pages)
> + return -ENOMEM;
> +
> + ppgtt->gen8_pt_pages = pt_pages;
> + ppgtt->num_pt_pages = 1 << get_order(num_pt_pages << PAGE_SHIFT);
> +
> + return 0;
> +}
> +
> +static int gen8_ppgtt_allocate_dma(struct i915_hw_ppgtt *ppgtt)
> +{
> + int i;
> +
> + for (i = 0; i < ppgtt->num_pd_pages; i++) {
> + ppgtt->gen8_pt_dma_addr[i] = kcalloc(GEN8_PDES_PER_PAGE,
> + sizeof(dma_addr_t),
> + GFP_KERNEL);
> + if (!ppgtt->gen8_pt_dma_addr[i])
> + return -ENOMEM;
> + }
> +
> + return 0;
> +}
> +
> +static int gen8_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt,
> + const int max_pdp)
> +{
> + ppgtt->pd_pages = alloc_pages(GFP_KERNEL, get_order(max_pdp << PAGE_SHIFT));
> + if (!ppgtt->pd_pages)
> + return -ENOMEM;
> +
> + ppgtt->num_pd_pages = 1 << get_order(max_pdp << PAGE_SHIFT);
> + BUG_ON(ppgtt->num_pd_pages > GEN8_LEGACY_PDPS);
> +
> + return 0;
> +}
> +
> +static int gen8_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt,
> + const int max_pdp)
> +{
> + int ret;
> +
> + ret = gen8_ppgtt_allocate_page_directories(ppgtt, max_pdp);
> + if (ret)
> + return ret;
> +
> + ret = gen8_ppgtt_allocate_page_tables(ppgtt, max_pdp);
> + if (ret) {
> + __free_pages(ppgtt->pd_pages, get_order(max_pdp << PAGE_SHIFT));
> + return ret;
> + }
> +
> + ppgtt->num_pd_entries = max_pdp * GEN8_PDES_PER_PAGE;
> +
> + ret = gen8_ppgtt_allocate_dma(ppgtt);
> + if (ret)
> + gen8_ppgtt_free(ppgtt);
> +
> + return ret;
> +}
> +
> +static int gen8_ppgtt_setup_page_directories(struct i915_hw_ppgtt *ppgtt,
> + const int pd)
> +{
> + dma_addr_t pd_addr;
> + int ret;
> +
> + pd_addr = pci_map_page(ppgtt->base.dev->pdev,
> + &ppgtt->pd_pages[pd], 0,
> + PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
> +
> + ret = pci_dma_mapping_error(ppgtt->base.dev->pdev, pd_addr);
> + if (ret)
> + return ret;
> +
> + ppgtt->pd_dma_addr[pd] = pd_addr;
> +
> + return 0;
> +}
> +
> +static int gen8_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt,
> + const int pd,
> + const int pt)
> +{
> + dma_addr_t pt_addr;
> + struct page *p;
> + int ret;
> +
> + p = &ppgtt->gen8_pt_pages[pd * GEN8_PDES_PER_PAGE + pt];
> + pt_addr = pci_map_page(ppgtt->base.dev->pdev,
> + p, 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
> + ret = pci_dma_mapping_error(ppgtt->base.dev->pdev, pt_addr);
> + if (ret)
> + return ret;
> +
> + ppgtt->gen8_pt_dma_addr[pd][pt] = pt_addr;
> +
> + return 0;
> +}
> +
> /**
> * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
> * with a net effect resembling a 2-level page table in normal x86 terms. Each
> @@ -378,69 +485,30 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
> */
> static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
> {
> - struct page *pt_pages;
> const int max_pdp = DIV_ROUND_UP(size, 1 << 30);
> - const int num_pt_pages = GEN8_PDES_PER_PAGE * max_pdp;
> - struct pci_dev *hwdev = ppgtt->base.dev->pdev;
> + const int min_pt_pages = GEN8_PDES_PER_PAGE * max_pdp;
> int i, j, ret;
>
> if (size % (1<<30))
> DRM_INFO("Pages will be wasted unless GTT size (%llu) is divisible by 1GB\n", size);
>
> - /* 1. Do all our allocations for page directories and page tables */
> - ppgtt->pd_pages = alloc_pages(GFP_KERNEL, get_order(max_pdp << PAGE_SHIFT));
> - if (!ppgtt->pd_pages)
> - return -ENOMEM;
> -
> - pt_pages = alloc_pages(GFP_KERNEL, get_order(num_pt_pages << PAGE_SHIFT));
> - if (!pt_pages) {
> - __free_pages(ppgtt->pd_pages, get_order(max_pdp << PAGE_SHIFT));
> - return -ENOMEM;
> - }
> -
> - ppgtt->gen8_pt_pages = pt_pages;
> - ppgtt->num_pd_pages = 1 << get_order(max_pdp << PAGE_SHIFT);
> - ppgtt->num_pt_pages = 1 << get_order(num_pt_pages << PAGE_SHIFT);
> - ppgtt->num_pd_entries = max_pdp * GEN8_PDES_PER_PAGE;
> - BUG_ON(ppgtt->num_pd_pages > GEN8_LEGACY_PDPS);
> -
> - for (i = 0; i < max_pdp; i++) {
> - ppgtt->gen8_pt_dma_addr[i] = kcalloc(GEN8_PDES_PER_PAGE,
> - sizeof(dma_addr_t),
> - GFP_KERNEL);
> - if (!ppgtt->gen8_pt_dma_addr[i]) {
> - ret = -ENOMEM;
> - goto bail;
> - }
> - }
> + /* 1. Do all our allocations for page directories and page tables. */
> + ret = gen8_ppgtt_alloc(ppgtt, max_pdp);
> + if (ret)
> + return ret;
>
> /*
> - * 2. Create all the DMA mappings for the page directories and page
> - * tables
> + * 2. Create DMA mappings for the page directories and page tables.
> */
> for (i = 0; i < max_pdp; i++) {
> - dma_addr_t pd_addr, pt_addr;
> -
> - /* Get the page directory mappings */
> - pd_addr = pci_map_page(hwdev, &ppgtt->pd_pages[i], 0,
> - PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
> - ret = pci_dma_mapping_error(ppgtt->base.dev->pdev, pd_addr);
> + ret = gen8_ppgtt_setup_page_directories(ppgtt, i);
> if (ret)
> goto bail;
>
> - ppgtt->pd_dma_addr[i] = pd_addr;
> -
> - /* And the page table mappings per page directory */
> for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
> - struct page *p = &pt_pages[i * GEN8_PDES_PER_PAGE + j];
> -
> - pt_addr = pci_map_page(hwdev, p, 0, PAGE_SIZE,
> - PCI_DMA_BIDIRECTIONAL);
> - ret = pci_dma_mapping_error(hwdev, pt_addr);
> + ret = gen8_ppgtt_setup_page_tables(ppgtt, i, j);
> if (ret)
> goto bail;
> -
> - ppgtt->gen8_pt_dma_addr[i][j] = pt_addr;
> }
> }
>
> @@ -479,7 +547,7 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
> ppgtt->num_pd_pages, ppgtt->num_pd_pages - max_pdp);
> DRM_DEBUG_DRIVER("Allocated %d pages for page tables (%lld wasted)\n",
> ppgtt->num_pt_pages,
> - (ppgtt->num_pt_pages - num_pt_pages) +
> + (ppgtt->num_pt_pages - min_pt_pages) +
> size % (1<<30));
> return 0;
>
-------------- next part --------------
A non-text attachment was scrubbed...
Name: signature.asc
Type: application/pgp-signature
Size: 490 bytes
Desc: This is a digitally signed message part
URL: <http://lists.freedesktop.org/archives/intel-gfx/attachments/20140220/8cb9789b/attachment.sig>
More information about the Intel-gfx
mailing list