[Intel-gfx] [PATCH 19/68] drm/i915: Wrap VMA binding
Ben Widawsky
benjamin.widawsky at intel.com
Fri Aug 22 05:11:42 CEST 2014
This will be useful for some upcoming patches which do more platform
specific work. Having it in one central place just makes things a bit
cleaner and easier.
NOTE: I didn't actually end up using this patch for the intended
purpose, but I thought it was a nice patch to keep around.
v2: s/i915_gem_bind_vma/i915_gem_vma_bind/
s/i915_gem_unbind_vma/i915_gem_vma_unbind/
(Chris)
v3: Missed one spot
v4: Don't change the trace events (Daniel)
Signed-off-by: Ben Widawsky <ben at bwidawsk.net>
---
drivers/gpu/drm/i915/i915_drv.h | 3 +++
drivers/gpu/drm/i915/i915_gem.c | 12 ++++++------
drivers/gpu/drm/i915/i915_gem_context.c | 2 +-
drivers/gpu/drm/i915/i915_gem_execbuffer.c | 5 +++--
drivers/gpu/drm/i915/i915_gem_gtt.c | 13 ++++++++++++-
5 files changed, 25 insertions(+), 10 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 04c9e2c..d1750d5 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2461,6 +2461,9 @@ bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
struct i915_address_space *vm);
unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
struct i915_address_space *vm);
+void i915_gem_vma_bind(struct i915_vma *vma, enum i915_cache_level,
+ unsigned flags);
+void i915_gem_vma_unbind(struct i915_vma *vma);
struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
struct i915_address_space *vm);
struct i915_vma *
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 4ad2205..5f66939 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -3031,7 +3031,7 @@ int i915_vma_unbind(struct i915_vma *vma)
trace_i915_vma_unbind(vma);
- vma->unbind_vma(vma);
+ i915_gem_vma_unbind(vma);
list_del_init(&vma->mm_list);
/* Avoid an unnecessary call to unbind on rebind. */
@@ -3585,8 +3585,8 @@ search_free:
WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
trace_i915_vma_bind(vma, flags);
- vma->bind_vma(vma, obj->cache_level,
- flags & (PIN_MAPPABLE | PIN_GLOBAL) ? GLOBAL_BIND : 0);
+ i915_gem_vma_bind(vma, obj->cache_level,
+ flags & (PIN_MAPPABLE | PIN_GLOBAL) ? GLOBAL_BIND : 0);
i915_gem_verify_gtt(dev);
return vma;
@@ -3797,8 +3797,8 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
list_for_each_entry(vma, &obj->vma_list, vma_link)
if (drm_mm_node_allocated(&vma->node))
- vma->bind_vma(vma, cache_level,
- obj->has_global_gtt_mapping ? GLOBAL_BIND : 0);
+ i915_gem_vma_bind(vma, cache_level,
+ obj->has_global_gtt_mapping ? GLOBAL_BIND : 0);
}
list_for_each_entry(vma, &obj->vma_list, vma_link)
@@ -4199,7 +4199,7 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
}
if (flags & PIN_GLOBAL && !obj->has_global_gtt_mapping)
- vma->bind_vma(vma, obj->cache_level, GLOBAL_BIND);
+ i915_gem_vma_bind(vma, obj->cache_level, GLOBAL_BIND);
vma->pin_count++;
if (flags & PIN_MAPPABLE)
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index a5c7d5d..51b517e 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -781,7 +781,7 @@ static int do_switch_rcs(struct intel_engine_cs *ring,
if (!to->legacy_hw_ctx.rcs_state->has_global_gtt_mapping) {
struct i915_vma *vma = i915_gem_obj_to_vma(to->legacy_hw_ctx.rcs_state,
&dev_priv->gtt.base);
- vma->bind_vma(vma, to->legacy_hw_ctx.rcs_state->cache_level, GLOBAL_BIND);
+ i915_gem_vma_bind(vma, to->legacy_hw_ctx.rcs_state->cache_level, GLOBAL_BIND);
}
if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to)) {
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 1420aeb..884ec39 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -376,7 +376,8 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
struct i915_vma *vma =
list_first_entry(&target_i915_obj->vma_list,
typeof(*vma), vma_link);
- vma->bind_vma(vma, target_i915_obj->cache_level, GLOBAL_BIND);
+ i915_gem_vma_bind(vma, target_i915_obj->cache_level,
+ GLOBAL_BIND);
}
/* Validate that the target is in a valid r/w GPU domain */
@@ -1392,7 +1393,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
* allocate space first */
struct i915_vma *vma = i915_gem_obj_to_ggtt(batch_obj);
BUG_ON(!vma);
- vma->bind_vma(vma, batch_obj->cache_level, GLOBAL_BIND);
+ i915_gem_vma_bind(vma, batch_obj->cache_level, GLOBAL_BIND);
}
if (flags & I915_DISPATCH_SECURE)
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 254895d..2a75bce 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -1343,7 +1343,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
* without telling our object about it. So we need to fake it.
*/
obj->has_global_gtt_mapping = 0;
- vma->bind_vma(vma, obj->cache_level, GLOBAL_BIND);
+ i915_gem_vma_bind(vma, obj->cache_level, GLOBAL_BIND);
}
@@ -2097,6 +2097,17 @@ int i915_gem_gtt_init(struct drm_device *dev)
return 0;
}
+void i915_gem_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
+ unsigned flags)
+{
+ vma->bind_vma(vma, cache_level, flags);
+}
+
+void i915_gem_vma_unbind(struct i915_vma *vma)
+{
+ vma->unbind_vma(vma);
+}
+
static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
struct i915_address_space *vm)
{
--
2.0.4
More information about the Intel-gfx
mailing list