[PATCH 5/7] drm/i915: Add a struct dma_fence_work timeline
Thomas Hellström
thomas.hellstrom at linux.intel.com
Wed Oct 6 06:59:46 UTC 2021
The TTM managers and, possibly, the gtt address space managers will
need to be able to order fences for async operation.
Using dma_fence_is_later() for this will require that the fences we hand
them are from a single fence context and ordered.
Introduce a struct dma_fence_work_timeline, and a functino to attach
struct dma_fence_work to such a timeline in a way that all previous
fences attached to the timeline will be signaled when the latest
attached struct dma_fence_work signals.
Signed-off-by: Thomas Hellström <thomas.hellstrom at linux.intel.com>
---
drivers/gpu/drm/i915/i915_sw_fence_work.c | 64 ++++++++++++++++++++++-
drivers/gpu/drm/i915/i915_sw_fence_work.h | 28 ++++++++++
2 files changed, 90 insertions(+), 2 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_sw_fence_work.c b/drivers/gpu/drm/i915/i915_sw_fence_work.c
index b8b5aecf6e4f..bb62898752b3 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence_work.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence_work.c
@@ -5,6 +5,47 @@
*/
#include "i915_sw_fence_work.h"
+#include "i915_utils.h"
+
+void dma_fence_work_timeline_attach(struct dma_fence_work_timeline *tl,
+ struct dma_fence_work *f,
+ struct i915_sw_dma_fence_cb *tl_cb)
+{
+ struct dma_fence *await;
+
+ if (tl->ops->get)
+ tl->ops->get(tl);
+
+ spin_lock(&tl->lock);
+ await = tl->last_fence;
+ tl->last_fence = dma_fence_get(&f->dma);
+ f->dma.seqno = tl->seqno++;
+ f->dma.context = tl->context;
+ f->tl = tl;
+ spin_unlock(&tl->lock);
+
+ if (await) {
+ __i915_sw_fence_await_dma_fence(&f->chain, await, tl_cb);
+ dma_fence_put(await);
+ }
+}
+
+static void dma_fence_work_timeline_detach(struct dma_fence_work *f)
+{
+ struct dma_fence_work_timeline *tl = f->tl;
+ bool put = false;
+
+ spin_lock(&tl->lock);
+ if (tl->last_fence == &f->dma) {
+ put = true;
+ tl->last_fence = NULL;
+ }
+ spin_unlock(&tl->lock);
+ if (tl->ops->put)
+ tl->ops->put(tl);
+ if (put)
+ dma_fence_put(&f->dma);
+}
static void dma_fence_work_complete(struct dma_fence_work *f)
{
@@ -13,6 +54,9 @@ static void dma_fence_work_complete(struct dma_fence_work *f)
if (f->ops->release)
f->ops->release(f);
+ if (f->tl)
+ dma_fence_work_timeline_detach(f);
+
dma_fence_put(&f->dma);
}
@@ -57,14 +101,17 @@ fence_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
static const char *get_driver_name(struct dma_fence *fence)
{
- return "dma-fence";
+ struct dma_fence_work *f = container_of(fence, typeof(*f), dma);
+
+ return (f->tl && f->tl->ops->name) ? f->tl->ops->name : "dma-fence";
}
static const char *get_timeline_name(struct dma_fence *fence)
{
struct dma_fence_work *f = container_of(fence, typeof(*f), dma);
- return f->ops->name ?: "work";
+ return (f->tl && f->tl->name) ? f->tl->name :
+ f->ops->name ?: "work";
}
static void fence_release(struct dma_fence *fence)
@@ -88,6 +135,7 @@ void dma_fence_work_init(struct dma_fence_work *f,
{
f->ops = ops;
f->error = 0;
+ f->tl = NULL;
spin_lock_init(&f->lock);
dma_fence_init(&f->dma, &fence_ops, &f->lock, 0, 0);
i915_sw_fence_init(&f->chain, fence_notify);
@@ -101,3 +149,15 @@ int dma_fence_work_chain(struct dma_fence_work *f, struct dma_fence *signal)
return __i915_sw_fence_await_dma_fence(&f->chain, signal, &f->cb);
}
+
+void dma_fence_work_timeline_init(struct dma_fence_work_timeline *tl,
+ const char *name,
+ const struct dma_fence_work_timeline_ops *ops)
+{
+ tl->name = name;
+ spin_lock_init(&tl->lock);
+ tl->context = dma_fence_context_alloc(1);
+ tl->seqno = 0;
+ tl->last_fence = NULL;
+ tl->ops = ops;
+}
diff --git a/drivers/gpu/drm/i915/i915_sw_fence_work.h b/drivers/gpu/drm/i915/i915_sw_fence_work.h
index caa59fb5252b..77361666accb 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence_work.h
+++ b/drivers/gpu/drm/i915/i915_sw_fence_work.h
@@ -14,6 +14,23 @@
#include "i915_sw_fence.h"
struct dma_fence_work;
+struct dma_fence_work_timeline;
+
+struct dma_fence_work_timeline_ops {
+ const char *name;
+ void (*put)(struct dma_fence_work_timeline *tl);
+ void (*get)(struct dma_fence_work_timeline *tl);
+};
+
+struct dma_fence_work_timeline {
+ const char *name;
+ /** Protects mutable members of the structure */
+ spinlock_t lock;
+ u64 context;
+ u64 seqno;
+ struct dma_fence *last_fence;
+ const struct dma_fence_work_timeline_ops *ops;
+};
struct dma_fence_work_ops {
const char *name;
@@ -30,6 +47,9 @@ struct dma_fence_work {
struct i915_sw_dma_fence_cb cb;
struct work_struct work;
+
+ struct dma_fence_work_timeline *tl;
+
const struct dma_fence_work_ops *ops;
};
@@ -65,4 +85,12 @@ static inline void dma_fence_work_commit_imm(struct dma_fence_work *f)
dma_fence_work_commit(f);
}
+void dma_fence_work_timeline_attach(struct dma_fence_work_timeline *tl,
+ struct dma_fence_work *f,
+ struct i915_sw_dma_fence_cb *tl_cb);
+
+void dma_fence_work_timeline_init(struct dma_fence_work_timeline *tl,
+ const char *name,
+ const struct dma_fence_work_timeline_ops *ops);
+
#endif /* I915_SW_FENCE_WORK_H */
--
2.31.1
More information about the Intel-gfx-trybot
mailing list