[Intel-gfx] [PATCH 12/21] drm/i915/gtt: Introduce kmap|kunmap for dma page
Michel Thierry
michel.thierry at intel.com
Wed Jun 24 08:06:53 PDT 2015
On 6/11/2015 6:50 PM, Mika Kuoppala wrote:
> As there is flushing involved when we have done the cpu
> write, make functions for mapping for cpu space. Make macros
> to map any type of paging structure.
>
> v2: Make it clear tha flushing kunmap is only for ppgtt (Ville)
> v3: Flushing fixed (Ville, Michel). Removed superfluous semicolon
>
> Cc: Ville Syrjälä <ville.syrjala at linux.intel.com>
Reviewed-by: Michel Thierry <michel.thierry at intel.com>
> Signed-off-by: Mika Kuoppala <mika.kuoppala at intel.com>
> ---
> drivers/gpu/drm/i915/i915_gem_gtt.c | 77 +++++++++++++++++++------------------
> 1 file changed, 40 insertions(+), 37 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
> index 60796b7..3ac8671 100644
> --- a/drivers/gpu/drm/i915/i915_gem_gtt.c
> +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
> @@ -330,15 +330,16 @@ static void cleanup_page_dma(struct drm_device *dev, struct i915_page_dma *p)
> memset(p, 0, sizeof(*p));
> }
>
> -static void fill_page_dma(struct drm_device *dev, struct i915_page_dma *p,
> - const uint64_t val)
> +static void *kmap_page_dma(struct i915_page_dma *p)
> {
> - int i;
> - uint64_t * const vaddr = kmap_atomic(p->page);
> -
> - for (i = 0; i < 512; i++)
> - vaddr[i] = val;
> + return kmap_atomic(p->page);
> +}
>
> +/* We use the flushing unmap only with ppgtt structures:
> + * page directories, page tables and scratch pages.
> + */
> +static void kunmap_page_dma(struct drm_device *dev, void *vaddr)
> +{
> /* There are only few exceptions for gen >=6. chv and bxt.
> * And we are not sure about the latter so play safe for now.
> */
> @@ -348,6 +349,21 @@ static void fill_page_dma(struct drm_device *dev, struct i915_page_dma *p,
> kunmap_atomic(vaddr);
> }
>
> +#define kmap_px(px) kmap_page_dma(&(px)->base)
> +#define kunmap_px(ppgtt, vaddr) kunmap_page_dma((ppgtt)->base.dev, (vaddr))
> +
> +static void fill_page_dma(struct drm_device *dev, struct i915_page_dma *p,
> + const uint64_t val)
> +{
> + int i;
> + uint64_t * const vaddr = kmap_page_dma(p);
> +
> + for (i = 0; i < 512; i++)
> + vaddr[i] = val;
> +
> + kunmap_page_dma(dev, vaddr);
> +}
> +
> static void fill_page_dma_32(struct drm_device *dev, struct i915_page_dma *p,
> const uint32_t val32)
> {
> @@ -503,7 +519,6 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
> while (num_entries) {
> struct i915_page_directory *pd;
> struct i915_page_table *pt;
> - struct page *page_table;
>
> if (WARN_ON(!ppgtt->pdp.page_directory[pdpe]))
> continue;
> @@ -518,22 +533,18 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
> if (WARN_ON(!pt->base.page))
> continue;
>
> - page_table = pt->base.page;
> -
> last_pte = pte + num_entries;
> if (last_pte > GEN8_PTES)
> last_pte = GEN8_PTES;
>
> - pt_vaddr = kmap_atomic(page_table);
> + pt_vaddr = kmap_px(pt);
>
> for (i = pte; i < last_pte; i++) {
> pt_vaddr[i] = scratch_pte;
> num_entries--;
> }
>
> - if (!HAS_LLC(ppgtt->base.dev))
> - drm_clflush_virt_range(pt_vaddr, PAGE_SIZE);
> - kunmap_atomic(pt_vaddr);
> + kunmap_px(ppgtt, pt);
>
> pte = 0;
> if (++pde == I915_PDES) {
> @@ -565,18 +576,14 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
> if (pt_vaddr == NULL) {
> struct i915_page_directory *pd = ppgtt->pdp.page_directory[pdpe];
> struct i915_page_table *pt = pd->page_table[pde];
> - struct page *page_table = pt->base.page;
> -
> - pt_vaddr = kmap_atomic(page_table);
> + pt_vaddr = kmap_px(pt);
> }
>
> pt_vaddr[pte] =
> gen8_pte_encode(sg_page_iter_dma_address(&sg_iter),
> cache_level, true);
> if (++pte == GEN8_PTES) {
> - if (!HAS_LLC(ppgtt->base.dev))
> - drm_clflush_virt_range(pt_vaddr, PAGE_SIZE);
> - kunmap_atomic(pt_vaddr);
> + kunmap_px(ppgtt, pt_vaddr);
> pt_vaddr = NULL;
> if (++pde == I915_PDES) {
> pdpe++;
> @@ -585,11 +592,9 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
> pte = 0;
> }
> }
> - if (pt_vaddr) {
> - if (!HAS_LLC(ppgtt->base.dev))
> - drm_clflush_virt_range(pt_vaddr, PAGE_SIZE);
> - kunmap_atomic(pt_vaddr);
> - }
> +
> + if (pt_vaddr)
> + kunmap_px(ppgtt, pt_vaddr);
> }
>
> static void __gen8_do_map_pt(gen8_pde_t * const pde,
> @@ -869,7 +874,7 @@ static int gen8_alloc_va_range(struct i915_address_space *vm,
> /* Allocations have completed successfully, so set the bitmaps, and do
> * the mappings. */
> gen8_for_each_pdpe(pd, &ppgtt->pdp, start, length, temp, pdpe) {
> - gen8_pde_t *const page_directory = kmap_atomic(pd->base.page);
> + gen8_pde_t *const page_directory = kmap_px(pd);
> struct i915_page_table *pt;
> uint64_t pd_len = gen8_clamp_pd(start, length);
> uint64_t pd_start = start;
> @@ -899,10 +904,7 @@ static int gen8_alloc_va_range(struct i915_address_space *vm,
> * point we're still relying on insert_entries() */
> }
>
> - if (!HAS_LLC(vm->dev))
> - drm_clflush_virt_range(page_directory, PAGE_SIZE);
> -
> - kunmap_atomic(page_directory);
> + kunmap_px(ppgtt, page_directory);
>
> set_bit(pdpe, ppgtt->pdp.used_pdpes);
> }
> @@ -991,7 +993,8 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
> expected);
> seq_printf(m, "\tPDE: %x\n", pd_entry);
>
> - pt_vaddr = kmap_atomic(ppgtt->pd.page_table[pde]->base.page);
> + pt_vaddr = kmap_px(ppgtt->pd.page_table[pde]);
> +
> for (pte = 0; pte < GEN6_PTES; pte+=4) {
> unsigned long va =
> (pde * PAGE_SIZE * GEN6_PTES) +
> @@ -1013,7 +1016,7 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
> }
> seq_puts(m, "\n");
> }
> - kunmap_atomic(pt_vaddr);
> + kunmap_px(ppgtt, pt_vaddr);
> }
> }
>
> @@ -1216,12 +1219,12 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
> if (last_pte > GEN6_PTES)
> last_pte = GEN6_PTES;
>
> - pt_vaddr = kmap_atomic(ppgtt->pd.page_table[act_pt]->base.page);
> + pt_vaddr = kmap_px(ppgtt->pd.page_table[act_pt]);
>
> for (i = first_pte; i < last_pte; i++)
> pt_vaddr[i] = scratch_pte;
>
> - kunmap_atomic(pt_vaddr);
> + kunmap_px(ppgtt, pt_vaddr);
>
> num_entries -= last_pte - first_pte;
> first_pte = 0;
> @@ -1245,21 +1248,21 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
> pt_vaddr = NULL;
> for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
> if (pt_vaddr == NULL)
> - pt_vaddr = kmap_atomic(ppgtt->pd.page_table[act_pt]->base.page);
> + pt_vaddr = kmap_px(ppgtt->pd.page_table[act_pt]);
>
> pt_vaddr[act_pte] =
> vm->pte_encode(sg_page_iter_dma_address(&sg_iter),
> cache_level, true, flags);
>
> if (++act_pte == GEN6_PTES) {
> - kunmap_atomic(pt_vaddr);
> + kunmap_px(ppgtt, pt_vaddr);
> pt_vaddr = NULL;
> act_pt++;
> act_pte = 0;
> }
> }
> if (pt_vaddr)
> - kunmap_atomic(pt_vaddr);
> + kunmap_px(ppgtt, pt_vaddr);
> }
>
> static void gen6_initialize_pt(struct i915_address_space *vm,
>
More information about the Intel-gfx
mailing list