[Intel-gfx] [PATCH v2 15/37] drm/i915/lmem: support CPU relocations
Chris Wilson
chris at chris-wilson.co.uk
Thu Jun 27 23:46:27 UTC 2019
Quoting Matthew Auld (2019-06-27 21:56:11)
> @@ -1020,16 +1022,23 @@ static void reloc_cache_reset(struct reloc_cache *cache)
> i915_gem_object_finish_access((struct drm_i915_gem_object *)cache->node.mm);
> } else {
> wmb();
> - io_mapping_unmap_atomic((void __iomem *)vaddr);
> - if (cache->node.allocated) {
> - struct i915_ggtt *ggtt = cache_to_ggtt(cache);
> -
> - ggtt->vm.clear_range(&ggtt->vm,
> - cache->node.start,
> - cache->node.size);
> - drm_mm_remove_node(&cache->node);
> +
> + if (cache->is_lmem) {
> + io_mapping_unmap_atomic((void __iomem *)vaddr);
> + i915_gem_object_unpin_pages((struct drm_i915_gem_object *)cache->node.mm);
> + cache->is_lmem = false;
> } else {
> - i915_vma_unpin((struct i915_vma *)cache->node.mm);
> + io_mapping_unmap_atomic((void __iomem *)vaddr);
The first step of each branch is the same. What am I missing?
> + if (cache->node.allocated) {
> + struct i915_ggtt *ggtt = cache_to_ggtt(cache);
> +
> + ggtt->vm.clear_range(&ggtt->vm,
> + cache->node.start,
> + cache->node.size);
> + drm_mm_remove_node(&cache->node);
> + } else {
> + i915_vma_unpin((struct i915_vma *)cache->node.mm);
> + }
> }
> }
>
> @@ -1069,6 +1078,40 @@ static void *reloc_kmap(struct drm_i915_gem_object *obj,
> return vaddr;
> }
>
> +static void *reloc_lmem(struct drm_i915_gem_object *obj,
> + struct reloc_cache *cache,
> + unsigned long page)
> +{
> + void *vaddr;
> + int err;
> +
> + GEM_BUG_ON(use_cpu_reloc(cache, obj));
> +
> + if (cache->vaddr) {
> + io_mapping_unmap_atomic((void __force __iomem *) unmask_page(cache->vaddr));
> + } else {
> + i915_gem_object_lock(obj);
> + err = i915_gem_object_set_to_wc_domain(obj, true);
> + i915_gem_object_unlock(obj);
> + if (err)
> + return ERR_PTR(err);
> +
> + err = i915_gem_object_pin_pages(obj);
> + if (err)
> + return ERR_PTR(err);
> +
> + cache->node.mm = (void *)obj;
> + cache->is_lmem = true;
> + }
> +
> + vaddr = i915_gem_object_lmem_io_map_page(obj, page);
Secret atomic. Notice the asymmetric release.
> + cache->vaddr = (unsigned long)vaddr;
> + cache->page = page;
> +
> + return vaddr;
> +}
> +
> static void *reloc_iomap(struct drm_i915_gem_object *obj,
> struct reloc_cache *cache,
> unsigned long page)
> @@ -1145,8 +1188,12 @@ static void *reloc_vaddr(struct drm_i915_gem_object *obj,
> vaddr = unmask_page(cache->vaddr);
> } else {
> vaddr = NULL;
> - if ((cache->vaddr & KMAP) == 0)
> - vaddr = reloc_iomap(obj, cache, page);
> + if ((cache->vaddr & KMAP) == 0) {
> + if (i915_gem_object_is_lmem(obj))
> + vaddr = reloc_lmem(obj, cache, page);
> + else
> + vaddr = reloc_iomap(obj, cache, page);
> + }
> if (!vaddr)
> vaddr = reloc_kmap(obj, cache, page);
> }
> --
> 2.20.1
>
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx at lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/intel-gfx
More information about the Intel-gfx
mailing list