[Intel-gfx] [PATCH v2 09/14] drm/i915: Remove the preempted request from the execution queue
Chris Wilson
chris at chris-wilson.co.uk
Tue Feb 14 09:54:08 UTC 2017
After the request is cancelled, we then need to remove it from the
global execution timeline and return it to the context timeline, the
inverse of submit_request().
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
drivers/gpu/drm/i915/i915_gem_request.c | 58 +++++++++++++++++++++++++++++++-
drivers/gpu/drm/i915/i915_gem_request.h | 3 ++
drivers/gpu/drm/i915/intel_breadcrumbs.c | 19 +++++++++--
3 files changed, 77 insertions(+), 3 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index 5db4fd1eabcd..56ebb653406d 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -435,6 +435,55 @@ void i915_gem_request_submit(struct drm_i915_gem_request *request)
spin_unlock_irqrestore(&engine->timeline->lock, flags);
}
+void __i915_gem_request_unsubmit(struct drm_i915_gem_request *request)
+{
+ struct intel_engine_cs *engine = request->engine;
+ struct intel_timeline *timeline;
+
+ assert_spin_locked(&engine->timeline->lock);
+
+ /* Only unwind in reverse order, required so that the per-context list
+ * is kept in seqno/ring order.
+ */
+ GEM_BUG_ON(request->global_seqno != engine->timeline->seqno);
+ engine->timeline->seqno--;
+
+ /* We may be recursing from the signal callback of another i915 fence */
+ spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
+ request->global_seqno = 0;
+ if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags))
+ intel_engine_cancel_signaling(request);
+ spin_unlock(&request->lock);
+
+ /* Transfer back from the global per-engine timeline to per-context */
+ timeline = request->timeline;
+ GEM_BUG_ON(timeline == engine->timeline);
+
+ spin_lock(&timeline->lock);
+ list_move(&request->link, &timeline->requests);
+ spin_unlock(&timeline->lock);
+
+ /* We don't need to wake_up any waiters on request->execute, they
+ * will get woken by any other event or us re-adding this request
+ * to the engine timeline (__i915_gem_request_submit()). The waiters
+ * should be quite adapt at finding that the request now has a new
+ * global_seqno to the one they went to sleep on.
+ */
+}
+
+void i915_gem_request_unsubmit(struct drm_i915_gem_request *request)
+{
+ struct intel_engine_cs *engine = request->engine;
+ unsigned long flags;
+
+ /* Will be called from irq-context when using foreign fences. */
+ spin_lock_irqsave(&engine->timeline->lock, flags);
+
+ __i915_gem_request_unsubmit(request);
+
+ spin_unlock_irqrestore(&engine->timeline->lock, flags);
+}
+
static int __i915_sw_fence_call
submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
{
@@ -1016,9 +1065,11 @@ long i915_wait_request(struct drm_i915_gem_request *req,
if (flags & I915_WAIT_LOCKED)
add_wait_queue(errq, &reset);
- intel_wait_init(&wait, i915_gem_request_global_seqno(req));
+ wait.tsk = current;
+restart:
reset_wait_queue(&req->execute, &exec);
+ wait.seqno = i915_gem_request_global_seqno(req);
if (!wait.seqno) {
do {
set_current_state(state);
@@ -1117,6 +1168,11 @@ long i915_wait_request(struct drm_i915_gem_request *req,
/* Only spin if we know the GPU is processing this request */
if (i915_spin_request(req, state, 2))
break;
+
+ if (i915_gem_request_global_seqno(req) != wait.seqno) {
+ intel_engine_remove_wait(req->engine, &wait);
+ goto restart;
+ }
}
intel_engine_remove_wait(req->engine, &wait);
diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h
index b81f6709905c..5f73d8c0a38a 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.h
+++ b/drivers/gpu/drm/i915/i915_gem_request.h
@@ -274,6 +274,9 @@ void __i915_add_request(struct drm_i915_gem_request *req, bool flush_caches);
void __i915_gem_request_submit(struct drm_i915_gem_request *request);
void i915_gem_request_submit(struct drm_i915_gem_request *request);
+void __i915_gem_request_unsubmit(struct drm_i915_gem_request *request);
+void i915_gem_request_unsubmit(struct drm_i915_gem_request *request);
+
struct intel_rps_client;
#define NO_WAITBOOST ERR_PTR(-1)
#define IS_RPS_CLIENT(p) (!IS_ERR(p))
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
index 420bbaf1f1c0..39a0f87a32d0 100644
--- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -431,7 +431,14 @@ void intel_engine_remove_wait(struct intel_engine_cs *engine,
spin_unlock_irq(&b->lock);
}
-static bool signal_complete(struct drm_i915_gem_request *request)
+static bool signal_valid(const struct drm_i915_gem_request *request)
+{
+ u32 seqno = READ_ONCE(request->global_seqno);
+
+ return seqno == request->signaling.wait.seqno;
+}
+
+static bool signal_complete(const struct drm_i915_gem_request *request)
{
if (!request)
return false;
@@ -440,7 +447,7 @@ static bool signal_complete(struct drm_i915_gem_request *request)
* signalled that this wait is already completed.
*/
if (intel_wait_complete(&request->signaling.wait))
- return true;
+ return signal_valid(request);
/* Carefully check if the request is complete, giving time for the
* seqno to be visible or if the GPU hung.
@@ -520,13 +527,21 @@ static int intel_breadcrumbs_signaler(void *arg)
i915_gem_request_put(request);
} else {
+ DEFINE_WAIT(exec);
+
if (kthread_should_stop()) {
GEM_BUG_ON(request);
break;
}
+ if (request)
+ add_wait_queue(&request->execute, &exec);
+
schedule();
+ if (request)
+ remove_wait_queue(&request->execute, &exec);
+
if (kthread_should_park())
kthread_parkme();
}
--
2.11.0
More information about the Intel-gfx
mailing list