[PATCH 10/14] drm/i915/gt: Defer context release to after unlocking pin_mutex
Chris Wilson
chris at chris-wilson.co.uk
Tue Dec 31 17:32:03 UTC 2019
Atomically acquire the barriers we need to release under the pin_mutex
when unpinning the context, but we can defer the actual work to after
dropping the mutex, reducing the context therein and hopefully
preventing some work when busy.
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
drivers/gpu/drm/i915/gt/intel_context.c | 10 ++++++++--
drivers/gpu/drm/i915/i915_active.c | 15 +++++++++++++--
drivers/gpu/drm/i915/i915_active.h | 5 +++++
3 files changed, 26 insertions(+), 4 deletions(-)
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index c25d2be2ae0f..c9df7853dede 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -122,6 +122,8 @@ int __intel_context_do_pin(struct intel_context *ce)
void intel_context_unpin(struct intel_context *ce)
{
+ struct llist_node *release = ERR_PTR(-1);
+
if (likely(atomic_add_unless(&ce->pin_count, -1, 1)))
return;
@@ -133,11 +135,15 @@ void intel_context_unpin(struct intel_context *ce)
CE_TRACE(ce, "retire\n");
ce->ops->unpin(ce);
+ release = __i915_active_take_barriers(&ce->active);
+ }
+ mutex_unlock(&ce->pin_mutex);
- intel_context_active_release(ce);
+ if (release != ERR_PTR(-1)) {
+ __i915_active_acquire_barriers(&ce->active, release);
+ i915_active_release(&ce->active);
}
- mutex_unlock(&ce->pin_mutex);
intel_context_put(ce);
}
diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index cfe09964622b..4336102595f5 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -671,7 +671,8 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
return err;
}
-void i915_active_acquire_barrier(struct i915_active *ref)
+void __i915_active_acquire_barriers(struct i915_active *ref,
+ struct llist_node *barriers)
{
struct llist_node *pos, *next;
unsigned long flags;
@@ -684,7 +685,7 @@ void i915_active_acquire_barrier(struct i915_active *ref)
* populated by i915_request_add_active_barriers() to point to the
* request that will eventually release them.
*/
- llist_for_each_safe(pos, next, take_preallocated_barriers(ref)) {
+ llist_for_each_safe(pos, next, barriers) {
struct active_node *node = barrier_from_ll(pos);
struct intel_engine_cs *engine = barrier_to_engine(node);
struct rb_node **p, *parent;
@@ -714,6 +715,16 @@ void i915_active_acquire_barrier(struct i915_active *ref)
}
}
+struct llist_node *__i915_active_take_barriers(struct i915_active *ref)
+{
+ return take_preallocated_barriers(ref);
+}
+
+void i915_active_acquire_barrier(struct i915_active *ref)
+{
+ __i915_active_acquire_barriers(ref, __i915_active_take_barriers(ref));
+}
+
static struct dma_fence **ll_to_fence_slot(struct llist_node *node)
{
return __active_fence_slot(&barrier_from_ll(node)->base);
diff --git a/drivers/gpu/drm/i915/i915_active.h b/drivers/gpu/drm/i915/i915_active.h
index b571f675c795..2358d2762879 100644
--- a/drivers/gpu/drm/i915/i915_active.h
+++ b/drivers/gpu/drm/i915/i915_active.h
@@ -202,7 +202,12 @@ static inline void i915_active_fini(struct i915_active *ref) { }
int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
struct intel_engine_cs *engine);
+
+struct llist_node *__i915_active_take_barriers(struct i915_active *ref);
+void __i915_active_acquire_barriers(struct i915_active *ref,
+ struct llist_node *barriers);
void i915_active_acquire_barrier(struct i915_active *ref);
+
void i915_request_add_active_barriers(struct i915_request *rq);
void i915_active_print(struct i915_active *ref, struct drm_printer *m);
--
2.25.0.rc0
More information about the Intel-gfx-trybot
mailing list