[Intel-gfx] [PATCH 02/17] drm/i915: Defer active reference until required
Chris Wilson
chris at chris-wilson.co.uk
Mon Aug 22 08:03:35 UTC 2016
We only need the active reference to keep the object alive after the
handle has been deleted (so as to prevent a synchronous gem_close). Why
the pay the price of a kref on every execbuf when we can insert that
final active ref just in time for the handle deletion?
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
drivers/gpu/drm/i915/i915_drv.h | 27 +++++++++++++++++++++++++++
drivers/gpu/drm/i915/i915_gem.c | 23 ++++++++++++++++++++++-
drivers/gpu/drm/i915/i915_gem_batch_pool.c | 2 +-
drivers/gpu/drm/i915/i915_gem_context.c | 2 +-
drivers/gpu/drm/i915/i915_gem_execbuffer.c | 5 +----
drivers/gpu/drm/i915/i915_gem_render_state.c | 2 +-
drivers/gpu/drm/i915/intel_ringbuffer.c | 2 +-
7 files changed, 54 insertions(+), 9 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 24672d6e36c1..a85316be9116 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2175,6 +2175,13 @@ struct drm_i915_gem_object {
((READ_ONCE((bo)->flags) >> I915_BO_ACTIVE_SHIFT) & I915_BO_ACTIVE_MASK)
/**
+ * Have we taken a reference for the object for incomplete GPU
+ * activity?
+ */
+#define I915_BO_ACTIVE_REF_SHIFT (I915_BO_ACTIVE_SHIFT + I915_NUM_ENGINES)
+#define I915_BO_ACTIVE_REF_BIT BIT(I915_BO_ACTIVE_REF_SHIFT)
+
+ /**
* This is set if the object has been written to since last bound
* to the GTT
*/
@@ -2344,6 +2351,26 @@ i915_gem_object_has_active_engine(const struct drm_i915_gem_object *obj,
return obj->flags & BIT(engine + I915_BO_ACTIVE_SHIFT);
}
+static inline bool
+i915_gem_object_has_active_reference(const struct drm_i915_gem_object *obj)
+{
+ return obj->flags & I915_BO_ACTIVE_REF_BIT;
+}
+
+static inline void
+i915_gem_object_set_active_reference(struct drm_i915_gem_object *obj)
+{
+ obj->flags |= I915_BO_ACTIVE_REF_BIT;
+}
+
+static inline void
+i915_gem_object_clear_active_reference(struct drm_i915_gem_object *obj)
+{
+ obj->flags &= ~I915_BO_ACTIVE_REF_BIT;
+}
+
+void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj);
+
static inline unsigned int
i915_gem_object_get_tiling(struct drm_i915_gem_object *obj)
{
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 04607d4115d6..0a48c90979a9 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2444,7 +2444,10 @@ i915_gem_object_retire__read(struct i915_gem_active *active,
list_move_tail(&obj->global_list,
&request->i915->mm.bound_list);
- i915_gem_object_put(obj);
+ if (i915_gem_object_has_active_reference(obj)) {
+ i915_gem_object_clear_active_reference(obj);
+ i915_gem_object_put(obj);
+ }
}
static bool i915_context_is_banned(const struct i915_gem_context *ctx)
@@ -2675,6 +2678,12 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
list_for_each_entry_safe(vma, vn, &obj->vma_list, obj_link)
if (vma->vm->file == fpriv)
i915_vma_close(vma);
+
+ if (i915_gem_object_is_active(obj) &&
+ !i915_gem_object_has_active_reference(obj)) {
+ i915_gem_object_set_active_reference(obj);
+ i915_gem_object_get(obj);
+ }
mutex_unlock(&obj->base.dev->struct_mutex);
}
@@ -4223,6 +4232,18 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
intel_runtime_pm_put(dev_priv);
}
+void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj)
+{
+ if (!obj)
+ return;
+
+ GEM_BUG_ON(i915_gem_object_has_active_reference(obj));
+ if (i915_gem_object_is_active(obj))
+ i915_gem_object_set_active_reference(obj);
+ else
+ i915_gem_object_put(obj);
+}
+
int i915_gem_suspend(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
diff --git a/drivers/gpu/drm/i915/i915_gem_batch_pool.c b/drivers/gpu/drm/i915/i915_gem_batch_pool.c
index ed989596d9a3..cb25cad3318c 100644
--- a/drivers/gpu/drm/i915/i915_gem_batch_pool.c
+++ b/drivers/gpu/drm/i915/i915_gem_batch_pool.c
@@ -73,7 +73,7 @@ void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool)
list_for_each_entry_safe(obj, next,
&pool->cache_list[n],
batch_pool_link)
- i915_gem_object_put(obj);
+ __i915_gem_object_release_unless_active(obj);
INIT_LIST_HEAD(&pool->cache_list[n]);
}
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 35950ee46a1d..ab54f208a4a9 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -155,7 +155,7 @@ void i915_gem_context_free(struct kref *ctx_ref)
if (ce->ring)
intel_ring_free(ce->ring);
- i915_vma_put(ce->state);
+ __i915_gem_object_release_unless_active(ce->state->obj);
}
put_pid(ctx->pid);
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 6baad503764d..8f9d5ad0cfd8 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1280,15 +1280,12 @@ void i915_vma_move_to_active(struct i915_vma *vma,
obj->dirty = 1; /* be paranoid */
- /* Add a reference if we're newly entering the active list.
- * The order in which we add operations to the retirement queue is
+ /* The order in which we add operations to the retirement queue is
* vital here: mark_active adds to the start of the callback list,
* such that subsequent callbacks are called first. Therefore we
* add the active reference first and queue for it to be dropped
* *last*.
*/
- if (!i915_gem_object_is_active(obj))
- i915_gem_object_get(obj);
i915_gem_object_set_active(obj, idx);
i915_gem_active_set(&obj->last_read[idx], req);
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index 95b7e9afd5f8..055139343137 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -225,6 +225,6 @@ int i915_gem_render_state_init(struct drm_i915_gem_request *req)
err_unpin:
i915_vma_unpin(so.vma);
err_obj:
- i915_gem_object_put(obj);
+ __i915_gem_object_release_unless_active(obj);
return ret;
}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index cc5bcd14b6df..63ed3fb9c49e 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -2014,7 +2014,7 @@ intel_engine_create_ring(struct intel_engine_cs *engine, int size)
void
intel_ring_free(struct intel_ring *ring)
{
- i915_vma_put(ring->vma);
+ __i915_gem_object_release_unless_active(ring->vma->obj);
list_del(&ring->link);
kfree(ring);
}
--
2.9.3
More information about the Intel-gfx
mailing list