[Intel-gfx] [PATCH 5/8] drm/i915: Convert active API to VMA
Ben Widawsky
benjamin.widawsky at intel.com
Sat Aug 31 01:43:58 CEST 2013
From: Ben Widawsky <ben at bwidawsk.net>
Even though we track object activity and not VMA, because we have the
active_list be based on the VM, it makes the most sense to use VMAs in the
APIs.
NOTE: Daniel intends to eventually rip out active/inactive LRUs, but for
now, leave them be.
Signed-off-by: Ben Widawsky <ben at bwidawsk.net>
Conflicts:
drivers/gpu/drm/i915/i915_gem_execbuffer.c
---
drivers/gpu/drm/i915/i915_drv.h | 5 ++---
drivers/gpu/drm/i915/i915_gem.c | 11 +++++++++--
drivers/gpu/drm/i915/i915_gem_context.c | 8 ++++----
drivers/gpu/drm/i915/i915_gem_execbuffer.c | 3 +--
4 files changed, 16 insertions(+), 11 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 6e8ade0..c9ed77a 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1866,9 +1866,8 @@ static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
int i915_gem_object_sync(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *to);
-void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *ring);
-
+void i915_vma_move_to_active(struct i915_vma *vma,
+ struct intel_ring_buffer *ring);
int i915_gem_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index a839bcb..8547b97 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1881,11 +1881,11 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
return 0;
}
-void
+static void
i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *ring)
{
- struct drm_device *dev = obj->base.dev;
+ struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 seqno = intel_ring_get_seqno(ring);
@@ -1920,6 +1920,13 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
}
}
+void i915_vma_move_to_active(struct i915_vma *vma,
+ struct intel_ring_buffer *ring)
+{
+ list_move_tail(&vma->mm_list, &vma->vm->active_list);
+ return i915_gem_object_move_to_active(vma->obj, ring);
+}
+
static void
i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
{
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 403309c..6a0f568 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -436,11 +436,11 @@ static int do_switch(struct i915_hw_context *to)
* MI_SET_CONTEXT instead of when the next seqno has completed.
*/
if (from != NULL) {
- struct drm_i915_private *dev_priv = from->obj->base.dev->dev_private;
- struct i915_address_space *ggtt = &dev_priv->gtt.base;
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ struct i915_vma *vma =
+ i915_gem_obj_to_vma(from->obj, &dev_priv->gtt.base);
from->obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
- list_move_tail(&i915_gem_obj_to_vma(from->obj, ggtt)->mm_list, &ggtt->active_list);
- i915_gem_object_move_to_active(from->obj, ring);
+ i915_vma_move_to_active(vma, ring);
/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
* whole damn pipeline, we don't need to explicitly mark the
* object dirty. The only exception is that the context must be
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index f0a0bd7..928d37b 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -869,8 +869,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
obj->base.read_domains = obj->base.pending_read_domains;
obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
- list_move_tail(&vma->mm_list, &vma->vm->active_list);
- i915_gem_object_move_to_active(obj, ring);
+ i915_vma_move_to_active(vma, ring);
if (obj->base.write_domain) {
obj->dirty = 1;
obj->last_write_seqno = intel_ring_get_seqno(ring);
--
1.8.4
More information about the Intel-gfx
mailing list