[Intel-gfx] [PATCH 12/20] drm/i915/gtt: Introduce kmap|kunmap for dma page

Ville Syrjälä ville.syrjala at linux.intel.com
Thu May 21 08:19:19 PDT 2015


On Thu, May 21, 2015 at 05:37:40PM +0300, Mika Kuoppala wrote:
> As there is flushing involved when we have done the cpu
> write, make functions for mapping for cpu space. Make macros
> to map any type of paging structure.
> 
> Signed-off-by: Mika Kuoppala <mika.kuoppala at intel.com>
> ---
>  drivers/gpu/drm/i915/i915_gem_gtt.c | 67 +++++++++++++++++++------------------
>  1 file changed, 35 insertions(+), 32 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
> index a3ee710..3d94ad8 100644
> --- a/drivers/gpu/drm/i915/i915_gem_gtt.c
> +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
> @@ -330,16 +330,32 @@ static void cleanup_page_dma(struct drm_device *dev, struct i915_page_dma *p)
>  	memset(p, 0, sizeof(*p));
>  }
>  
> +static void *kmap_page_dma(struct i915_page_dma *p)
> +{
> +	return kmap_atomic(p->page);
> +}
> +
> +static void kunmap_page_dma(struct drm_device *dev, void *vaddr)
> +{
> +	if (!HAS_LLC(dev))
> +		drm_clflush_virt_range(vaddr, PAGE_SIZE);
> +
> +	kunmap_atomic(vaddr);
> +}

Ah there it is. But now it's being performed on VLV as well which
doesn't need it.

Also having something called kunmap_page_dma() which does an explict
clflush for apparently no reason is rather confusing. So I think the
name should have _ppgtt_ in it to make it clear this only applies to
ppgtt, otherwise someone is likely to use it for something else which
doesn't need the clflush. Or at the very least it needs a comment to
explain this stuff.

> +
> +#define kmap_px(px) kmap_page_dma(&(px)->base)
> +#define kunmap_px(ppgtt, vaddr) kunmap_page_dma((ppgtt)->base.dev, (vaddr));
> +
>  static void fill_page_dma(struct drm_device *dev, struct i915_page_dma *p,
>  			  const uint64_t val)
>  {
>  	int i;
> -	uint64_t * const vaddr = kmap_atomic(p->page);
> +	uint64_t * const vaddr = kmap_page_dma(p);
>  
>  	for (i = 0; i < 512; i++)
>  		vaddr[i] = val;
>  
> -	kunmap_atomic(vaddr);
> +	kunmap_page_dma(dev, vaddr);
>  }
>  
>  static void fill_page_dma_32(struct drm_device *dev, struct i915_page_dma *p,
> @@ -497,7 +513,6 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
>  	while (num_entries) {
>  		struct i915_page_directory *pd;
>  		struct i915_page_table *pt;
> -		struct page *page_table;
>  
>  		if (WARN_ON(!ppgtt->pdp.page_directory[pdpe]))
>  			continue;
> @@ -512,22 +527,18 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
>  		if (WARN_ON(!pt->base.page))
>  			continue;
>  
> -		page_table = pt->base.page;
> -
>  		last_pte = pte + num_entries;
>  		if (last_pte > GEN8_PTES)
>  			last_pte = GEN8_PTES;
>  
> -		pt_vaddr = kmap_atomic(page_table);
> +		pt_vaddr = kmap_px(pt);
>  
>  		for (i = pte; i < last_pte; i++) {
>  			pt_vaddr[i] = scratch_pte;
>  			num_entries--;
>  		}
>  
> -		if (!HAS_LLC(ppgtt->base.dev))
> -			drm_clflush_virt_range(pt_vaddr, PAGE_SIZE);
> -		kunmap_atomic(pt_vaddr);
> +		kunmap_px(ppgtt, pt);
>  
>  		pte = 0;
>  		if (++pde == I915_PDES) {
> @@ -559,18 +570,14 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
>  		if (pt_vaddr == NULL) {
>  			struct i915_page_directory *pd = ppgtt->pdp.page_directory[pdpe];
>  			struct i915_page_table *pt = pd->page_table[pde];
> -			struct page *page_table = pt->base.page;
> -
> -			pt_vaddr = kmap_atomic(page_table);
> +			pt_vaddr = kmap_px(pt);
>  		}
>  
>  		pt_vaddr[pte] =
>  			gen8_pte_encode(sg_page_iter_dma_address(&sg_iter),
>  					cache_level, true);
>  		if (++pte == GEN8_PTES) {
> -			if (!HAS_LLC(ppgtt->base.dev))
> -				drm_clflush_virt_range(pt_vaddr, PAGE_SIZE);
> -			kunmap_atomic(pt_vaddr);
> +			kunmap_px(ppgtt, pt_vaddr);
>  			pt_vaddr = NULL;
>  			if (++pde == I915_PDES) {
>  				pdpe++;
> @@ -579,11 +586,9 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
>  			pte = 0;
>  		}
>  	}
> -	if (pt_vaddr) {
> -		if (!HAS_LLC(ppgtt->base.dev))
> -			drm_clflush_virt_range(pt_vaddr, PAGE_SIZE);
> -		kunmap_atomic(pt_vaddr);
> -	}
> +
> +	if (pt_vaddr)
> +		kunmap_px(ppgtt, pt_vaddr);
>  }
>  
>  static void __gen8_do_map_pt(gen8_pde_t * const pde,
> @@ -862,7 +867,7 @@ static int gen8_alloc_va_range(struct i915_address_space *vm,
>  	/* Allocations have completed successfully, so set the bitmaps, and do
>  	 * the mappings. */
>  	gen8_for_each_pdpe(pd, &ppgtt->pdp, start, length, temp, pdpe) {
> -		gen8_pde_t *const page_directory = kmap_atomic(pd->base.page);
> +		gen8_pde_t *const page_directory = kmap_px(pd);
>  		struct i915_page_table *pt;
>  		uint64_t pd_len = gen8_clamp_pd(start, length);
>  		uint64_t pd_start = start;
> @@ -892,10 +897,7 @@ static int gen8_alloc_va_range(struct i915_address_space *vm,
>  			 * point we're still relying on insert_entries() */
>  		}
>  
> -		if (!HAS_LLC(vm->dev))
> -			drm_clflush_virt_range(page_directory, PAGE_SIZE);
> -
> -		kunmap_atomic(page_directory);
> +		kunmap_px(ppgtt, page_directory);
>  
>  		set_bit(pdpe, ppgtt->pdp.used_pdpes);
>  	}
> @@ -977,7 +979,8 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
>  				   expected);
>  		seq_printf(m, "\tPDE: %x\n", pd_entry);
>  
> -		pt_vaddr = kmap_atomic(ppgtt->pd.page_table[pde]->base.page);
> +		pt_vaddr = kmap_px(ppgtt->pd.page_table[pde]);
> +
>  		for (pte = 0; pte < GEN6_PTES; pte+=4) {
>  			unsigned long va =
>  				(pde * PAGE_SIZE * GEN6_PTES) +
> @@ -999,7 +1002,7 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
>  			}
>  			seq_puts(m, "\n");
>  		}
> -		kunmap_atomic(pt_vaddr);
> +		kunmap_px(ppgtt, pt_vaddr);
>  	}
>  }
>  
> @@ -1202,12 +1205,12 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
>  		if (last_pte > GEN6_PTES)
>  			last_pte = GEN6_PTES;
>  
> -		pt_vaddr = kmap_atomic(ppgtt->pd.page_table[act_pt]->base.page);
> +		pt_vaddr = kmap_px(ppgtt->pd.page_table[act_pt]);
>  
>  		for (i = first_pte; i < last_pte; i++)
>  			pt_vaddr[i] = scratch_pte;
>  
> -		kunmap_atomic(pt_vaddr);
> +		kunmap_px(ppgtt, pt_vaddr);
>  
>  		num_entries -= last_pte - first_pte;
>  		first_pte = 0;
> @@ -1231,21 +1234,21 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
>  	pt_vaddr = NULL;
>  	for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
>  		if (pt_vaddr == NULL)
> -			pt_vaddr = kmap_atomic(ppgtt->pd.page_table[act_pt]->base.page);
> +			pt_vaddr = kmap_px(ppgtt->pd.page_table[act_pt]);
>  
>  		pt_vaddr[act_pte] =
>  			vm->pte_encode(sg_page_iter_dma_address(&sg_iter),
>  				       cache_level, true, flags);
>  
>  		if (++act_pte == GEN6_PTES) {
> -			kunmap_atomic(pt_vaddr);
> +			kunmap_px(ppgtt, pt_vaddr);
>  			pt_vaddr = NULL;
>  			act_pt++;
>  			act_pte = 0;
>  		}
>  	}
>  	if (pt_vaddr)
> -		kunmap_atomic(pt_vaddr);
> +		kunmap_px(ppgtt, pt_vaddr);
>  }
>  
>  static void gen6_initialize_pt(struct i915_address_space *vm,
> -- 
> 1.9.1
> 
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx at lists.freedesktop.org
> http://lists.freedesktop.org/mailman/listinfo/intel-gfx

-- 
Ville Syrjälä
Intel OTC


More information about the Intel-gfx mailing list