[Intel-gfx] [RFC PATCH 18/42] drm/i915/lmem: support CPU relocations
Matthew Auld
matthew.auld at intel.com
Thu Feb 14 14:57:16 UTC 2019
We need to support doing relocations from the CPU when dealing with LMEM
objects.
Signed-off-by: Matthew Auld <matthew.auld at intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen at linux.intel.com>
Cc: Abdiel Janulgue <abdiel.janulgue at linux.intel.com>
Cc: Rodrigo Vivi <rodrigo.vivi at intel.com>
---
drivers/gpu/drm/i915/i915_gem_execbuffer.c | 67 ++++++++++++++++++----
1 file changed, 56 insertions(+), 11 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 02adcaf6ebea..390691128c1d 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -265,6 +265,7 @@ struct i915_execbuffer {
bool has_llc : 1;
bool has_fence : 1;
bool needs_unfenced : 1;
+ bool is_lmem : 1;
struct i915_request *rq;
u32 *rq_cmd;
@@ -973,6 +974,7 @@ static void reloc_cache_init(struct reloc_cache *cache,
cache->use_64bit_reloc = HAS_64BIT_RELOC(i915);
cache->has_fence = cache->gen < 4;
cache->needs_unfenced = INTEL_INFO(i915)->unfenced_needs_alignment;
+ cache->is_lmem = false;
cache->node.allocated = false;
cache->rq = NULL;
cache->rq_size = 0;
@@ -1027,16 +1029,23 @@ static void reloc_cache_reset(struct reloc_cache *cache)
i915_gem_obj_finish_shmem_access((struct drm_i915_gem_object *)cache->node.mm);
} else {
wmb();
- io_mapping_unmap_atomic((void __iomem *)vaddr);
- if (cache->node.allocated) {
- struct i915_ggtt *ggtt = cache_to_ggtt(cache);
-
- ggtt->vm.clear_range(&ggtt->vm,
- cache->node.start,
- cache->node.size);
- drm_mm_remove_node(&cache->node);
+
+ if (cache->is_lmem) {
+ io_mapping_unmap_atomic((void __iomem *)vaddr);
+ i915_gem_object_unpin_pages((struct drm_i915_gem_object *)cache->node.mm);
+ cache->is_lmem = false;
} else {
- i915_vma_unpin((struct i915_vma *)cache->node.mm);
+ io_mapping_unmap_atomic((void __iomem *)vaddr);
+ if (cache->node.allocated) {
+ struct i915_ggtt *ggtt = cache_to_ggtt(cache);
+
+ ggtt->vm.clear_range(&ggtt->vm,
+ cache->node.start,
+ cache->node.size);
+ drm_mm_remove_node(&cache->node);
+ } else {
+ i915_vma_unpin((struct i915_vma *)cache->node.mm);
+ }
}
}
@@ -1076,6 +1085,38 @@ static void *reloc_kmap(struct drm_i915_gem_object *obj,
return vaddr;
}
+static void *reloc_lmem(struct drm_i915_gem_object *obj,
+ struct reloc_cache *cache,
+ unsigned long page)
+{
+ void *vaddr;
+ int err;
+
+ GEM_BUG_ON(use_cpu_reloc(cache, obj));
+
+ if (cache->vaddr) {
+ io_mapping_unmap_atomic((void __force __iomem *) unmask_page(cache->vaddr));
+ } else {
+ err = i915_gem_object_set_to_wc_domain(obj, true);
+ if (err)
+ return ERR_PTR(err);
+
+ err = i915_gem_object_pin_pages(obj);
+ if (err)
+ return ERR_PTR(err);
+
+ cache->node.mm = (void *)obj;
+ cache->is_lmem = true;
+ }
+
+ vaddr = i915_gem_object_lmem_io_map_page(obj, page);
+
+ cache->vaddr = (unsigned long)vaddr;
+ cache->page = page;
+
+ return vaddr;
+}
+
static void *reloc_iomap(struct drm_i915_gem_object *obj,
struct reloc_cache *cache,
unsigned long page)
@@ -1150,8 +1191,12 @@ static void *reloc_vaddr(struct drm_i915_gem_object *obj,
vaddr = unmask_page(cache->vaddr);
} else {
vaddr = NULL;
- if ((cache->vaddr & KMAP) == 0)
- vaddr = reloc_iomap(obj, cache, page);
+ if ((cache->vaddr & KMAP) == 0) {
+ if (i915_gem_object_is_lmem(obj))
+ vaddr = reloc_lmem(obj, cache, page);
+ else
+ vaddr = reloc_iomap(obj, cache, page);
+ }
if (!vaddr)
vaddr = reloc_kmap(obj, cache, page);
}
--
2.20.1
More information about the Intel-gfx
mailing list