[Intel-gfx] [PATCH 16/28] drm/i915: Unpeel awaits on a proxy fence
Chris Wilson
chris at chris-wilson.co.uk
Sun Jun 7 22:20:56 UTC 2020
If the real target for a proxy fence is known at the time we are
attaching our awaits, use the real target in preference to hooking up to
the proxy. If use the real target instead, we can optimize the awaits,
e.g. if it along the same engine, we can order the submission and avoid
the wait-for-completion.
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
drivers/gpu/drm/i915/i915_request.c | 155 ++++++++++++++++++++++++++
drivers/gpu/drm/i915/i915_scheduler.c | 41 +++++++
drivers/gpu/drm/i915/i915_scheduler.h | 3 +
3 files changed, 199 insertions(+)
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 3bb7320249ae..f04f91b4d879 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -24,6 +24,7 @@
#include <linux/dma-fence-array.h>
#include <linux/dma-fence-chain.h>
+#include <linux/dma-fence-proxy.h>
#include <linux/irq_work.h>
#include <linux/prefetch.h>
#include <linux/sched.h>
@@ -461,6 +462,7 @@ static bool fatal_error(int error)
case 0: /* not an error! */
case -EAGAIN: /* innocent victim of a GT reset (__i915_request_reset) */
case -ETIMEDOUT: /* waiting for Godot (timer_i915_sw_fence_wake) */
+ case -EDEADLK: /* cyclic fence lockup (await_proxy) */
return false;
default:
return true;
@@ -1241,6 +1243,136 @@ i915_request_await_external(struct i915_request *rq, struct dma_fence *fence)
return err;
}
+struct await_proxy {
+ struct wait_queue_entry base;
+ struct i915_request *request;
+ struct dma_fence *fence;
+ struct timer_list timer;
+ struct work_struct work;
+ int (*attach)(struct await_proxy *ap);
+ void *data;
+};
+
+static void await_proxy_work(struct work_struct *work)
+{
+ struct await_proxy *ap = container_of(work, typeof(*ap), work);
+ struct i915_request *rq = ap->request;
+
+ del_timer_sync(&ap->timer);
+
+ if (ap->fence) {
+ int err = 0;
+
+ /*
+ * If the fence is external, we impose a 10s timeout.
+ * However, if the fence is internal, we skip a timeout in
+ * the belief that all fences are in-order (DAG, no cycles)
+ * and we can enforce forward progress by reset the GPU if
+ * necessary. A future fence, provided userspace, can trivially
+ * generate a cycle in the dependency graph, and so cause
+ * that entire cycle to become deadlocked and for no forward
+ * progress to either be made, and the driver being kept
+ * eternally awake.
+ */
+ if (dma_fence_is_i915(ap->fence) &&
+ !i915_sched_node_verify_dag(&rq->sched,
+ &to_request(ap->fence)->sched))
+ err = -EDEADLK;
+
+ if (!err) {
+ mutex_lock(&rq->context->timeline->mutex);
+ err = ap->attach(ap);
+ mutex_unlock(&rq->context->timeline->mutex);
+ }
+
+ /* Don't flag an error for co-dependent scheduling */
+ if (err == -EDEADLK) {
+ struct i915_sched_node *waiter =
+ &to_request(ap->fence)->sched;
+ struct i915_dependency *p;
+
+ for_each_waiter(p, rq) {
+ if (p->waiter == waiter &&
+ p->flags & I915_DEPENDENCY_WEAK) {
+ err = 0;
+ break;
+ }
+ }
+ }
+
+ if (err < 0)
+ i915_sw_fence_set_error_once(&rq->submit, err);
+ }
+
+ i915_sw_fence_complete(&rq->submit);
+
+ dma_fence_put(ap->fence);
+ kfree(ap);
+}
+
+static int
+await_proxy_wake(struct wait_queue_entry *entry,
+ unsigned int mode,
+ int flags,
+ void *fence)
+{
+ struct await_proxy *ap = container_of(entry, typeof(*ap), base);
+
+ ap->fence = dma_fence_get(fence);
+ schedule_work(&ap->work);
+
+ return 0;
+}
+
+static void
+await_proxy_timer(struct timer_list *t)
+{
+ struct await_proxy *ap = container_of(t, typeof(*ap), timer);
+
+ if (dma_fence_remove_proxy_listener(ap->base.private, &ap->base)) {
+ struct i915_request *rq = ap->request;
+
+ pr_notice("Asynchronous wait on unset proxy fence by %s:%s:%llx timed out\n",
+ rq->fence.ops->get_driver_name(&rq->fence),
+ rq->fence.ops->get_timeline_name(&rq->fence),
+ rq->fence.seqno);
+ i915_sw_fence_set_error_once(&rq->submit, -ETIMEDOUT);
+
+ schedule_work(&ap->work);
+ }
+}
+
+static int
+__i915_request_await_proxy(struct i915_request *rq,
+ struct dma_fence *fence,
+ unsigned long timeout,
+ int (*attach)(struct await_proxy *ap),
+ void *data)
+{
+ struct await_proxy *ap;
+
+ ap = kzalloc(sizeof(*ap), I915_FENCE_GFP);
+ if (!ap)
+ return -ENOMEM;
+
+ i915_sw_fence_await(&rq->submit);
+ mark_external(rq);
+
+ ap->base.private = fence;
+ ap->base.func = await_proxy_wake;
+ ap->request = rq;
+ INIT_WORK(&ap->work, await_proxy_work);
+ ap->attach = attach;
+ ap->data = data;
+
+ timer_setup(&ap->timer, await_proxy_timer, 0);
+ if (timeout)
+ mod_timer(&ap->timer, round_jiffies_up(jiffies + timeout));
+
+ dma_fence_add_proxy_listener(fence, &ap->base);
+ return 0;
+}
+
int
i915_request_await_execution(struct i915_request *rq,
struct dma_fence *fence,
@@ -1339,6 +1471,24 @@ i915_request_await_request(struct i915_request *to, struct i915_request *from)
return 0;
}
+static int await_proxy(struct await_proxy *ap)
+{
+ return i915_request_await_dma_fence(ap->request, ap->fence);
+}
+
+static int
+i915_request_await_proxy(struct i915_request *rq, struct dma_fence *fence)
+{
+ /*
+ * Wait until we know the real fence so that can optimise the
+ * inter-fence synchronisation.
+ */
+ return __i915_request_await_proxy(rq, fence,
+ i915_fence_context_timeout(rq->engine->i915,
+ fence->context),
+ await_proxy, NULL);
+}
+
int
i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
{
@@ -1346,6 +1496,9 @@ i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
unsigned int nchild = 1;
int ret;
+ /* Unpeel the proxy fence if the real target is already known */
+ fence = dma_fence_proxy_get_real(fence);
+
/*
* Note that if the fence-array was created in signal-on-any mode,
* we should *not* decompose it into its individual fences. However,
@@ -1385,6 +1538,8 @@ i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
if (dma_fence_is_i915(fence))
ret = i915_request_await_request(rq, to_request(fence));
+ else if (dma_fence_is_proxy(fence))
+ ret = i915_request_await_proxy(rq, fence);
else
ret = i915_request_await_external(rq, fence);
if (ret < 0)
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index cbb880b10c65..250832768279 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -469,6 +469,47 @@ int i915_sched_node_add_dependency(struct i915_sched_node *node,
return 0;
}
+bool i915_sched_node_verify_dag(struct i915_sched_node *waiter,
+ struct i915_sched_node *signaler)
+{
+ struct i915_dependency *dep, *p;
+ struct i915_dependency stack;
+ bool result = false;
+ LIST_HEAD(dfs);
+
+ if (list_empty(&waiter->waiters_list))
+ return true;
+
+ spin_lock_irq(&schedule_lock);
+
+ stack.signaler = signaler;
+ list_add(&stack.dfs_link, &dfs);
+
+ list_for_each_entry(dep, &dfs, dfs_link) {
+ struct i915_sched_node *node = dep->signaler;
+
+ if (node_signaled(node))
+ continue;
+
+ list_for_each_entry(p, &node->signalers_list, signal_link) {
+ if (p->signaler == waiter)
+ goto out;
+
+ if (list_empty(&p->dfs_link))
+ list_add_tail(&p->dfs_link, &dfs);
+ }
+ }
+
+ result = true;
+out:
+ list_for_each_entry_safe(dep, p, &dfs, dfs_link)
+ INIT_LIST_HEAD(&dep->dfs_link);
+
+ spin_unlock_irq(&schedule_lock);
+
+ return result;
+}
+
void i915_sched_node_fini(struct i915_sched_node *node)
{
struct i915_dependency *dep, *tmp;
diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h
index 6f0bf00fc569..13432add8929 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.h
+++ b/drivers/gpu/drm/i915/i915_scheduler.h
@@ -28,6 +28,9 @@
void i915_sched_node_init(struct i915_sched_node *node);
void i915_sched_node_reinit(struct i915_sched_node *node);
+bool i915_sched_node_verify_dag(struct i915_sched_node *waiter,
+ struct i915_sched_node *signal);
+
bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
struct i915_sched_node *signal,
struct i915_dependency *dep,
--
2.20.1
More information about the Intel-gfx
mailing list