[Intel-gfx] [PATCH 39/66] drm/i915: Move active to vma
Ben Widawsky
ben at bwidawsk.net
Fri Jun 28 01:30:40 CEST 2013
Probably need to squash whole thing, or just the inactive part, tbd...
Signed-off-by: Ben Widawsky <ben at bwidawsk.net>
---
drivers/gpu/drm/i915/i915_drv.h | 14 ++++++------
drivers/gpu/drm/i915/i915_gem.c | 47 ++++++++++++++++++++++++-----------------
2 files changed, 35 insertions(+), 26 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index b3eb067..247a124 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -545,6 +545,13 @@ struct i915_vma {
/* Page aligned offset (helper for stolen) */
unsigned long deferred_offset;
+ /**
+ * This is set if the object is on the active lists (has pending
+ * rendering and so a non-zero seqno), and is not set if it i s on
+ * inactive (ready to be unbound) list.
+ */
+ unsigned int active:1;
+
/** This object's place on the active/inactive lists */
struct list_head mm_list;
@@ -1245,13 +1252,6 @@ struct drm_i915_gem_object {
struct list_head exec_list;
/**
- * This is set if the object is on the active lists (has pending
- * rendering and so a non-zero seqno), and is not set if it i s on
- * inactive (ready to be unbound) list.
- */
- unsigned int active:1;
-
- /**
* This is set if the object has been written to since last bound
* to the GTT
*/
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index f448804..a3e8c26 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -137,7 +137,13 @@ int i915_mutex_lock_interruptible(struct drm_device *dev)
/* NB: Not the same as !i915_gem_object_is_inactive */
bool i915_gem_object_is_active(struct drm_i915_gem_object *obj)
{
- return obj->active;
+ struct i915_vma *vma;
+
+ list_for_each_entry(vma, &obj->vma_list, vma_link)
+ if (vma->active)
+ return true;
+
+ return false;
}
static inline bool
@@ -1889,14 +1895,14 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
BUG_ON(ring == NULL);
obj->ring = ring;
+ /* Move from whatever list we were on to the tail of execution. */
+ vma = i915_gem_obj_to_vma(obj, vm);
/* Add a reference if we're newly entering the active list. */
- if (!i915_gem_object_is_active(obj)) {
+ if (!vma->active) {
drm_gem_object_reference(&obj->base);
- obj->active = 1;
+ vma->active = 1;
}
- /* Move from whatever list we were on to the tail of execution. */
- vma = i915_gem_obj_to_vma(obj, vm);
list_move_tail(&vma->mm_list, &vm->active_list);
list_move_tail(&obj->ring_list, &ring->active_list);
@@ -1917,16 +1923,23 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
}
static void
-i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm)
+i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
{
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ struct i915_address_space *vm;
struct i915_vma *vma;
+ int i = 0;
BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
- BUG_ON(!i915_gem_object_is_active(obj));
- vma = i915_gem_obj_to_vma(obj, vm);
- list_move_tail(&vma->mm_list, &vm->inactive_list);
+ list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
+ vma = i915_gem_obj_to_vma(obj, vm);
+ if (!vma || !vma->active)
+ continue;
+ list_move_tail(&vma->mm_list, &vm->inactive_list);
+ vma->active = 0;
+ i++;
+ }
list_del_init(&obj->ring_list);
obj->ring = NULL;
@@ -1938,8 +1951,8 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj,
obj->last_fenced_seqno = 0;
obj->fenced_gpu_access = false;
- obj->active = 0;
- drm_gem_object_unreference(&obj->base);
+ while (i--)
+ drm_gem_object_unreference(&obj->base);
WARN_ON(i915_verify_lists(dev));
}
@@ -2273,15 +2286,13 @@ static bool i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
}
while (!list_empty(&ring->active_list)) {
- struct i915_address_space *vm;
struct drm_i915_gem_object *obj;
obj = list_first_entry(&ring->active_list,
struct drm_i915_gem_object,
ring_list);
- list_for_each_entry(vm, &dev_priv->vm_list, global_link)
- i915_gem_object_move_to_inactive(obj, vm);
+ i915_gem_object_move_to_inactive(obj);
}
return ctx_banned;
@@ -2365,8 +2376,6 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
* by the ringbuffer to the flushing/inactive lists as appropriate.
*/
while (!list_empty(&ring->active_list)) {
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
- struct i915_address_space *vm;
struct drm_i915_gem_object *obj;
obj = list_first_entry(&ring->active_list,
@@ -2376,8 +2385,8 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
if (!i915_seqno_passed(seqno, obj->last_read_seqno))
break;
- list_for_each_entry(vm, &dev_priv->vm_list, global_link)
- i915_gem_object_move_to_inactive(obj, vm);
+ BUG_ON(!i915_gem_object_is_active(obj));
+ i915_gem_object_move_to_inactive(obj);
}
if (unlikely(ring->trace_irq_seqno &&
--
1.8.3.1
More information about the Intel-gfx
mailing list