[Intel-gfx] [PATCH 3/4] drm/i915: Extend i915_request_await_active to use all timelines
Chris Wilson
chris at chris-wilson.co.uk
Fri Feb 28 00:39:57 UTC 2020
Extend i915_request_await_active() to be able to asynchronously wait on
all the tracked timelines simultaneously.
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
drivers/gpu/drm/i915/i915_active.c | 54 +++++++++++++++++++++++-------
drivers/gpu/drm/i915/i915_active.h | 5 ++-
drivers/gpu/drm/i915/i915_vma.c | 2 +-
3 files changed, 47 insertions(+), 14 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index 9308d134211f..39902df0cc11 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -515,23 +515,53 @@ int i915_active_wait(struct i915_active *ref)
return 0;
}
-int i915_request_await_active(struct i915_request *rq, struct i915_active *ref)
+static int await_active(struct i915_request *rq,
+ struct i915_active_fence *active)
{
- int err = 0;
+ struct dma_fence *fence;
+
+ if (is_barrier(active))
+ return 0;
+
+ fence = i915_active_fence_get(active);
+ if (fence) {
+ int err;
+
+ err = i915_request_await_dma_fence(rq, fence);
+ dma_fence_put(fence);
+ if (err < 0)
+ return err;
+ }
+ return 0;
+}
+
+int i915_request_await_active(struct i915_request *rq,
+ struct i915_active *ref,
+ unsigned int flags)
+{
+ int err;
+
+ /* We must always wait for the exclusive fence! */
if (rcu_access_pointer(ref->excl.fence)) {
- struct dma_fence *fence;
-
- rcu_read_lock();
- fence = dma_fence_get_rcu_safe(&ref->excl.fence);
- rcu_read_unlock();
- if (fence) {
- err = i915_request_await_dma_fence(rq, fence);
- dma_fence_put(fence);
- }
+ err = await_active(rq, &ref->excl);
+ if (err)
+ return err;
}
- /* In the future we may choose to await on all fences */
+ if (flags & I915_ACTIVE_AWAIT_ALL && i915_active_acquire_if_busy(ref)) {
+ struct active_node *it, *n;
+
+ err = 0;
+ rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
+ err = await_active(rq, &it->base);
+ if (err)
+ break;
+ }
+ i915_active_release(ref);
+ if (err)
+ return err;
+ }
return err;
}
diff --git a/drivers/gpu/drm/i915/i915_active.h b/drivers/gpu/drm/i915/i915_active.h
index 7e438501333e..e3c13060c4c7 100644
--- a/drivers/gpu/drm/i915/i915_active.h
+++ b/drivers/gpu/drm/i915/i915_active.h
@@ -183,7 +183,10 @@ static inline bool i915_active_has_exclusive(struct i915_active *ref)
int i915_active_wait(struct i915_active *ref);
-int i915_request_await_active(struct i915_request *rq, struct i915_active *ref);
+int i915_request_await_active(struct i915_request *rq,
+ struct i915_active *ref,
+ unsigned int flags);
+#define I915_ACTIVE_AWAIT_ALL BIT(0)
int i915_active_acquire(struct i915_active *ref);
bool i915_active_acquire_if_busy(struct i915_active *ref);
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 298ca4316e65..ce23c452e6ac 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -1174,7 +1174,7 @@ int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq)
GEM_BUG_ON(!i915_vma_is_pinned(vma));
/* Wait for the vma to be bound before we start! */
- err = i915_request_await_active(rq, &vma->active);
+ err = i915_request_await_active(rq, &vma->active, 0);
if (err)
return err;
--
2.25.1
More information about the Intel-gfx
mailing list