[Intel-gfx] [PATCH 16/18] drm/i915/gtt: One instance of scratch page table/directory
Daniel Vetter
daniel at ffwll.ch
Fri Jun 26 02:10:23 PDT 2015
On Thu, Jun 25, 2015 at 06:35:18PM +0300, Mika Kuoppala wrote:
> +static int setup_scratch(struct i915_address_space *vm)
> +{
> + struct i915_address_space *ggtt_vm = &to_i915(vm->dev)->gtt.base;
> +
> + if (i915_is_ggtt(vm))
> + return setup_scratch_ggtt(vm);
> +
> + vm->scratch_page = ggtt_vm->scratch_page;
> + vm->scratch_pt = ggtt_vm->scratch_pt;
> + vm->scratch_pd = ggtt_vm->scratch_pd;
> +
> + return 0;
> +}
The point of a ppgtt is full isolation, sharing the scratch page destroys
that. Hence nack. If you want a bit of polish, renaming initialize_pd/pt
to initialize_scratch_pd/pt would make sense though I think.
-Daniel
> +
> +static void check_scratch_page(struct i915_address_space *vm)
> +{
> + struct i915_hw_ppgtt *ppgtt =
> + container_of(vm, struct i915_hw_ppgtt, base);
> + int i;
> + u64 *vaddr;
> +
> + vaddr = kmap_px(vm->scratch_page);
> +
> + for (i = 0; i < PAGE_SIZE / sizeof(u64); i++) {
> + if (vaddr[i] == SCRATCH_PAGE_MAGIC)
> + continue;
> +
> + DRM_ERROR("%p scratch[%d] = 0x%08llx\n", vm, i, vaddr[i]);
> + break;
> + }
> +
> + kunmap_px(ppgtt, vaddr);
> +}
> +
> +static void cleanup_scratch_ggtt(struct i915_address_space *vm)
> +{
> + check_scratch_page(vm);
> + free_scratch_page(vm);
> +
> + if (INTEL_INFO(vm->dev)->gen < 6)
> + return;
> +
> + free_pt(vm->dev, vm->scratch_pt);
> +
> + if (INTEL_INFO(vm->dev)->gen >= 8)
> + free_pd(vm->dev, vm->scratch_pd);
> +}
> +
> +static void cleanup_scratch(struct i915_address_space *vm)
> +{
> + if (i915_is_ggtt(vm))
> + cleanup_scratch_ggtt(vm);
> +
> + vm->scratch_page = NULL;
> + vm->scratch_pt = NULL;
> + vm->scratch_pd = NULL;
> +}
> +
> /* Broadwell Page Directory Pointer Descriptors */
> static int gen8_write_pdp(struct drm_i915_gem_request *req,
> unsigned entry,
> @@ -525,7 +686,7 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
> unsigned num_entries = length >> PAGE_SHIFT;
> unsigned last_pte, i;
>
> - scratch_pte = gen8_pte_encode(px_dma(ppgtt->base.scratch_page),
> + scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page),
> I915_CACHE_LLC, use_scratch);
>
> while (num_entries) {
> @@ -609,16 +770,6 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
> kunmap_px(ppgtt, pt_vaddr);
> }
>
> -static void gen8_initialize_pd(struct i915_address_space *vm,
> - struct i915_page_directory *pd)
> -{
> - gen8_pde_t scratch_pde;
> -
> - scratch_pde = gen8_pde_encode(px_dma(vm->scratch_pt), I915_CACHE_LLC);
> -
> - fill_px(vm->dev, pd, scratch_pde);
> -}
> -
> static void gen8_free_page_tables(struct i915_page_directory *pd, struct drm_device *dev)
> {
> int i;
> @@ -649,8 +800,7 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
> free_pd(ppgtt->base.dev, ppgtt->pdp.page_directory[i]);
> }
>
> - free_pd(vm->dev, vm->scratch_pd);
> - free_pt(vm->dev, vm->scratch_pt);
> + cleanup_scratch(vm);
> }
>
> /**
> @@ -937,16 +1087,7 @@ err_out:
> */
> static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
> {
> - ppgtt->base.scratch_pt = alloc_pt(ppgtt->base.dev);
> - if (IS_ERR(ppgtt->base.scratch_pt))
> - return PTR_ERR(ppgtt->base.scratch_pt);
> -
> - ppgtt->base.scratch_pd = alloc_pd(ppgtt->base.dev);
> - if (IS_ERR(ppgtt->base.scratch_pd))
> - return PTR_ERR(ppgtt->base.scratch_pd);
> -
> - gen8_initialize_pt(&ppgtt->base, ppgtt->base.scratch_pt);
> - gen8_initialize_pd(&ppgtt->base, ppgtt->base.scratch_pd);
> + int ret;
>
> ppgtt->base.start = 0;
> ppgtt->base.total = 1ULL << 32;
> @@ -966,6 +1107,10 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
>
> ppgtt->switch_mm = gen8_mm_switch;
>
> + ret = setup_scratch(&ppgtt->base);
> + if (ret)
> + return ret;
> +
> return 0;
> }
>
> @@ -1272,19 +1417,6 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
> kunmap_px(ppgtt, pt_vaddr);
> }
>
> -static void gen6_initialize_pt(struct i915_address_space *vm,
> - struct i915_page_table *pt)
> -{
> - gen6_pte_t scratch_pte;
> -
> - WARN_ON(px_dma(vm->scratch_page) == 0);
> -
> - scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
> - I915_CACHE_LLC, true, 0);
> -
> - fill32_px(vm->dev, pt, scratch_pte);
> -}
> -
> static int gen6_alloc_va_range(struct i915_address_space *vm,
> uint64_t start_in, uint64_t length_in)
> {
> @@ -1389,7 +1521,7 @@ static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
> free_pt(ppgtt->base.dev, pt);
> }
>
> - free_pt(vm->dev, vm->scratch_pt);
> + cleanup_scratch(vm);
> }
>
> static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
> @@ -1404,11 +1536,10 @@ static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
> * size. We allocate at the top of the GTT to avoid fragmentation.
> */
> BUG_ON(!drm_mm_initialized(&dev_priv->gtt.base.mm));
> - ppgtt->base.scratch_pt = alloc_pt(ppgtt->base.dev);
> - if (IS_ERR(ppgtt->base.scratch_pt))
> - return PTR_ERR(ppgtt->base.scratch_pt);
>
> - gen6_initialize_pt(&ppgtt->base, ppgtt->base.scratch_pt);
> + ret = setup_scratch(&ppgtt->base);
> + if (ret)
> + return ret;
>
> alloc:
> ret = drm_mm_insert_node_in_range_generic(&dev_priv->gtt.base.mm,
> @@ -1439,7 +1570,7 @@ alloc:
> return 0;
>
> err_out:
> - free_pt(ppgtt->base.dev, ppgtt->base.scratch_pt);
> + cleanup_scratch(&ppgtt->base);
> return ret;
> }
>
> @@ -1513,10 +1644,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
>
> static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
> {
> - struct drm_i915_private *dev_priv = dev->dev_private;
> -
> ppgtt->base.dev = dev;
> - ppgtt->base.scratch_page = dev_priv->gtt.base.scratch_page;
>
> if (INTEL_INFO(dev)->gen < 8)
> return gen6_ppgtt_init(ppgtt);
> @@ -2124,45 +2252,6 @@ void i915_global_gtt_cleanup(struct drm_device *dev)
> vm->cleanup(vm);
> }
>
> -#define SCRATCH_PAGE_MAGIC 0xffff00ffffff00ffULL
> -
> -static int alloc_scratch_page(struct i915_address_space *vm)
> -{
> - struct i915_page_scratch *sp;
> - int ret;
> -
> - WARN_ON(vm->scratch_page);
> -
> - sp = kzalloc(sizeof(*sp), GFP_KERNEL);
> - if (sp == NULL)
> - return -ENOMEM;
> -
> - ret = __setup_page_dma(vm->dev, px_base(sp), GFP_DMA32 | __GFP_ZERO);
> - if (ret) {
> - kfree(sp);
> - return ret;
> - }
> -
> - fill_px(vm->dev, sp, SCRATCH_PAGE_MAGIC);
> - set_pages_uc(px_page(sp), 1);
> -
> - vm->scratch_page = sp;
> -
> - return 0;
> -}
> -
> -static void free_scratch_page(struct i915_address_space *vm)
> -{
> - struct i915_page_scratch *sp = vm->scratch_page;
> -
> - set_pages_wb(px_page(sp), 1);
> -
> - cleanup_px(vm->dev, sp);
> - kfree(sp);
> -
> - vm->scratch_page = NULL;
> -}
> -
> static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
> {
> snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
> @@ -2246,7 +2335,6 @@ static int ggtt_probe_common(struct drm_device *dev,
> {
> struct drm_i915_private *dev_priv = dev->dev_private;
> phys_addr_t gtt_phys_addr;
> - int ret;
>
> /* For Modern GENs the PTEs and register space are split in the BAR */
> gtt_phys_addr = pci_resource_start(dev->pdev, 0) +
> @@ -2268,14 +2356,7 @@ static int ggtt_probe_common(struct drm_device *dev,
> return -ENOMEM;
> }
>
> - ret = alloc_scratch_page(&dev_priv->gtt.base);
> - if (ret) {
> - DRM_ERROR("Scratch setup failed\n");
> - /* iounmap will also get called at remove, but meh */
> - iounmap(dev_priv->gtt.gsm);
> - }
> -
> - return ret;
> + return setup_scratch(&dev_priv->gtt.base);
> }
>
> /* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
> @@ -2447,7 +2528,7 @@ static void gen6_gmch_remove(struct i915_address_space *vm)
> struct i915_gtt *gtt = container_of(vm, struct i915_gtt, base);
>
> iounmap(gtt->gsm);
> - free_scratch_page(vm);
> + cleanup_scratch(vm);
> }
>
> static int i915_gmch_probe(struct drm_device *dev,
> --
> 1.9.1
>
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx at lists.freedesktop.org
> http://lists.freedesktop.org/mailman/listinfo/intel-gfx
--
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch
More information about the Intel-gfx
mailing list