[Intel-gfx] [PATCH 3/9] [v3] drm/i915/bdw: Split ppgtt initialization up
Imre Deak
imre.deak at intel.com
Thu Feb 20 00:00:21 CET 2014
On Wed, 2014-02-19 at 14:40 -0800, Ben Widawsky wrote:
> On Wed, Feb 19, 2014 at 11:50:51PM +0200, Imre Deak wrote:
> > On Wed, 2014-02-19 at 12:09 -0800, Ben Widawsky wrote:
> > > Like cleanup in an earlier patch, the code becomes much more readable,
> > > and easier to extend if we extract out helper functions for the various
> > > stages of init.
> > >
> > > Note that with this patch it becomes really simple, and tempting to begin
> > > using the 'goto out' idiom with explicit free/fini semantics. I've
> > > kept the error path as similar as possible to the cleanup() function to
> > > make sure cleanup is as robust as possible
> > >
> > > v2: Remove comment "NB:From here on, ppgtt->base.cleanup() should
> > > function properly"
> > > Update commit message to reflect above
> > >
> > > v3: Rebased on top of bugfixes found in the previous patch by Imre
> > > Moved number of pd pages assertion to the proper place (Imre)
> > >
> > > Signed-off-by: Ben Widawsky <ben at bwidawsk.net>
> > > ---
> > > drivers/gpu/drm/i915/i915_gem_gtt.c | 172 ++++++++++++++++++++++++++----------
> > > 1 file changed, 124 insertions(+), 48 deletions(-)
> > >
> > > diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
> > > index 03f586aa..03ef760 100644
> > > --- a/drivers/gpu/drm/i915/i915_gem_gtt.c
> > > +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
> > > @@ -366,6 +366,118 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
> > > gen8_ppgtt_free(ppgtt);
> > > }
> > >
> > > +static int gen8_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt,
> > > + const int max_pdp)
> > > +{
> > > + struct page *pt_pages;
> > > + const int num_pt_pages = GEN8_PDES_PER_PAGE * max_pdp;
> > > +
> > > + pt_pages = alloc_pages(GFP_KERNEL, get_order(num_pt_pages << PAGE_SHIFT));
> > > + if (!pt_pages)
> > > + return -ENOMEM;
> > > +
> > > + ppgtt->gen8_pt_pages = pt_pages;
> > > + ppgtt->num_pt_pages = 1 << get_order(num_pt_pages << PAGE_SHIFT);
> > > +
> > > + return 0;
> > > +}
> > > +
> > > +static int gen8_ppgtt_allocate_dma(struct i915_hw_ppgtt *ppgtt)
> > > +{
> > > + int i;
> > > +
> > > + for (i = 0; i < ppgtt->num_pd_entries; i++) {
> > > + ppgtt->gen8_pt_dma_addr[i] = kcalloc(GEN8_PDES_PER_PAGE,
> > > + sizeof(dma_addr_t),
> > > + GFP_KERNEL);
> > > + if (!ppgtt->gen8_pt_dma_addr[i]) {
> > > + kfree(ppgtt->gen8_pt_dma_addr);
> > > + while(i--)
> > > + kfree(ppgtt->gen8_pt_dma_addr[i]);
> >
> > Sorry for missing this in my review, but ppgtt->gen8_pt_dma_addr is used
> > after being freed. Also after returning -ENOMEM, we'll call
> > gen8_ppgtt_free() which will try to kfree these for a second time. I
> > think doing here just if (!ppgtt->gen8_pt_dma_addr[i]) return -ENOMEM;
> > would work. The kfree() in gen8_ppgtt_free() would also dtrt if
> > ppgtt->gen8_pt_dma_addr[i] is NULL, b/c it wasn't yet kcalloc()'d. With
> > that fixed this patch looks ok to me, so:
> >
> > Reviewed-by: Imre Deak <imre.deak at intel.com>
> >
>
> i < ppgtt->num_pd_entries should be 1 < num_pd_pages. Otherwise, I think
> you're right.
Yep, I missed the important part completely:)
There is also one note below I forgot.
> > > +
> > > + return -ENOMEM;
> > > + }
> > > + }
> > > +
> > > + return 0;
> > > +}
> > > +
> > > +static int gen8_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt,
> > > + const int max_pdp)
> > > +{
> > > + ppgtt->pd_pages = alloc_pages(GFP_KERNEL, get_order(max_pdp << PAGE_SHIFT));
> > > + if (!ppgtt->pd_pages)
> > > + return -ENOMEM;
> > > +
> > > + ppgtt->num_pd_pages = 1 << get_order(max_pdp << PAGE_SHIFT);
> > > + BUG_ON(ppgtt->num_pd_pages > GEN8_LEGACY_PDPS);
> > > +
> > > + return 0;
> > > +}
> > > +
> > > +static int gen8_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt,
> > > + const int max_pdp)
> > > +{
> > > + int ret;
> > > +
> > > + ret = gen8_ppgtt_allocate_page_directories(ppgtt, max_pdp);
> > > + if (ret)
> > > + return ret;
> > > +
> > > + ret = gen8_ppgtt_allocate_page_tables(ppgtt, max_pdp);
> > > + if (ret) {
> > > + __free_pages(ppgtt->pd_pages, get_order(max_pdp << PAGE_SHIFT));
> > > + return ret;
> > > + }
> > > +
> > > + ppgtt->num_pd_entries = max_pdp * GEN8_PDES_PER_PAGE;
> > > +
> > > + ret = gen8_ppgtt_allocate_dma(ppgtt);
> > > + if (ret)
> > > + gen8_ppgtt_free(ppgtt);
> > > +
> > > + return ret;
> > > +}
> > > +
> > > +static int gen8_ppgtt_setup_page_directories(struct i915_hw_ppgtt *ppgtt,
> > > + const int pd)
> > > +{
> > > + dma_addr_t pd_addr;
> > > + int ret;
> > > +
> > > + pd_addr = pci_map_page(ppgtt->base.dev->pdev,
> > > + &ppgtt->pd_pages[pd], 0,
> > > + PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
> > > +
> > > + ret = pci_dma_mapping_error(ppgtt->base.dev->pdev, pd_addr);
> > > + if (ret)
> > > + return ret;
> > > +
> > > + ppgtt->pd_dma_addr[pd] = pd_addr;
> > > +
> > > + return 0;
> > > +}
> > > +
> > > +static int gen8_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt,
> > > + const int pd,
> > > + const int pt)
> > > +{
> > > + dma_addr_t pt_addr;
> > > + struct page *p;
> > > + int ret;
> > > +
> > > + p = &ppgtt->gen8_pt_pages[pd * GEN8_PDES_PER_PAGE + pt];
> > > + pt_addr = pci_map_page(ppgtt->base.dev->pdev,
> > > + p, 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
> > > + ret = pci_dma_mapping_error(ppgtt->base.dev->pdev, pt_addr);
> > > + if (ret)
> > > + return ret;
> > > +
> > > + ppgtt->gen8_pt_dma_addr[pd][pt] = pt_addr;
> > > +
> > > + return 0;
> > > +}
> > > +
> > > /**
> > > * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
> > > * with a net effect resembling a 2-level page table in normal x86 terms. Each
> > > @@ -378,73 +490,37 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
> > > */
> > > static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
> > > {
> > > - struct page *pt_pages;
> > > const int max_pdp = DIV_ROUND_UP(size, 1 << 30);
> > > - const int num_pt_pages = GEN8_PDES_PER_PAGE * max_pdp;
> > > + const int min_pt_pages = GEN8_PDES_PER_PAGE * max_pdp;
> > > struct pci_dev *hwdev = ppgtt->base.dev->pdev;
> > > int i, j, ret;
> > >
> > > if (size % (1<<30))
> > > DRM_INFO("Pages will be wasted unless GTT size (%llu) is divisible by 1GB\n", size);
> > >
> > > - /* 1. Do all our allocations for page directories and page tables */
> > > - ppgtt->pd_pages = alloc_pages(GFP_KERNEL, get_order(max_pdp << PAGE_SHIFT));
> > > - if (!ppgtt->pd_pages)
> > > - return -ENOMEM;
> > > -
> > > - pt_pages = alloc_pages(GFP_KERNEL, get_order(num_pt_pages << PAGE_SHIFT));
> > > - if (!pt_pages) {
> > > - __free_pages(ppgtt->pd_pages, get_order(max_pdp << PAGE_SHIFT));
> > > - return -ENOMEM;
> > > - }
> > > -
> > > - ppgtt->gen8_pt_pages = pt_pages;
> > > - ppgtt->num_pd_pages = 1 << get_order(max_pdp << PAGE_SHIFT);
> > > - ppgtt->num_pt_pages = 1 << get_order(num_pt_pages << PAGE_SHIFT);
> > > - ppgtt->num_pd_entries = max_pdp * GEN8_PDES_PER_PAGE;
> > > - BUG_ON(ppgtt->num_pd_pages > GEN8_LEGACY_PDPS);
> > > -
> > > - for (i = 0; i < max_pdp; i++) {
> > > - ppgtt->gen8_pt_dma_addr[i] = kcalloc(GEN8_PDES_PER_PAGE,
> > > - sizeof(dma_addr_t),
> > > - GFP_KERNEL);
> > > - if (!ppgtt->gen8_pt_dma_addr[i]) {
> > > - ret = -ENOMEM;
> > > - goto bail;
> > > - }
> > > - }
> > > + /* 1. Do all our allocations for page directories and page tables. */
> > > + ret = gen8_ppgtt_alloc(ppgtt, max_pdp);
> > > + if (ret)
> > > + return ret;
> > >
> > > /*
> > > - * 2. Create all the DMA mappings for the page directories and page
> > > - * tables
> > > + * 2. Create DMA mappings for the page directories and page tables.
> > > */
> > > for (i = 0; i < max_pdp; i++) {
> > > - dma_addr_t pd_addr, pt_addr;
> > > -
> > > - /* And the page directory mappings */
> > > - pd_addr = pci_map_page(hwdev, &ppgtt->pd_pages[i], 0,
> > > - PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
> > > - ret = pci_dma_mapping_error(ppgtt->base.dev->pdev, pd_addr);
> > > + ret = gen8_ppgtt_setup_page_directories(ppgtt, i);
> > > if (ret)
> > > goto bail;
> > >
> > > - ppgtt->pd_dma_addr[i] = pd_addr;
> > > -
> > > - /* Get the page table mappings per page directory */
> > > for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
> > > - struct page *p = &pt_pages[i * GEN8_PDES_PER_PAGE + j];
> > > -
> > > - pt_addr = pci_map_page(hwdev, p, 0, PAGE_SIZE,
> > > - PCI_DMA_BIDIRECTIONAL);
> > > - ret = pci_dma_mapping_error(hwdev, pt_addr);
> > > + ret = gen8_ppgtt_setup_page_tables(ppgtt, i, j);
> > > if (ret) {
> > > ppgtt->pd_dma_addr[i] = 0;
> > > - pci_unmap_page(hwdev, pd_addr, PAGE_SIZE,
> > > + pci_unmap_page(hwdev,
> > > + ppgtt->pd_dma_addr[i],
> > > + PAGE_SIZE,
> > > PCI_DMA_BIDIRECTIONAL);
> > > goto bail;
This should be just if (ret) goto bail;, but this would anyway resolve
itself automatically after the fix in the same spot in 2/3 and this
rebased on top of that, so I note it here only for reference.
> > > }
> > > -
> > > - ppgtt->gen8_pt_dma_addr[i][j] = pt_addr;
> > > }
> > > }
> > >
> > > @@ -483,7 +559,7 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
> > > ppgtt->num_pd_pages, ppgtt->num_pd_pages - max_pdp);
> > > DRM_DEBUG_DRIVER("Allocated %d pages for page tables (%lld wasted)\n",
> > > ppgtt->num_pt_pages,
> > > - (ppgtt->num_pt_pages - num_pt_pages) +
> > > + (ppgtt->num_pt_pages - min_pt_pages) +
> > > size % (1<<30));
> > > return 0;
> > >
> >
> >
>
More information about the Intel-gfx
mailing list