[PATCH 12/22] drm/i915/gem: Re-evaluate execbuf throttling after wait
Chris Wilson
chris at chris-wilson.co.uk
Fri Dec 20 13:55:41 UTC 2019
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
.../gpu/drm/i915/gem/i915_gem_execbuffer.c | 36 +++++++++++--------
drivers/gpu/drm/i915/i915_request.c | 2 ++
drivers/gpu/drm/i915/i915_request.h | 12 +++++++
3 files changed, 35 insertions(+), 15 deletions(-)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 2da2b0589856..36923fc67bca 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -2247,7 +2247,6 @@ static struct i915_request *eb_throttle(struct intel_context *ce)
static int __eb_pin_engine(struct i915_execbuffer *eb, struct intel_context *ce)
{
struct intel_timeline *tl;
- struct i915_request *rq;
int err;
/*
@@ -2270,6 +2269,12 @@ static int __eb_pin_engine(struct i915_execbuffer *eb, struct intel_context *ce)
if (err)
return err;
+ tl = intel_context_timeline_lock(ce);
+ if (IS_ERR(tl)) {
+ err = PTR_ERR(tl);
+ goto err_unpin;
+ }
+
/*
* Take a local wakeref for preparing to dispatch the execbuf as
* we expect to access the hardware fairly frequently in the
@@ -2278,21 +2283,18 @@ static int __eb_pin_engine(struct i915_execbuffer *eb, struct intel_context *ce)
* until the timeline is idle, which in turn releases the wakeref
* taken on the engine, and the parent device.
*/
- tl = intel_context_timeline_lock(ce);
- if (IS_ERR(tl)) {
- err = PTR_ERR(tl);
- goto err_unpin;
- }
-
intel_context_enter(ce);
- rq = eb_throttle(ce);
- intel_context_timeline_unlock(tl);
-
- if (rq) {
- bool nonblock = eb->file->filp->f_flags & O_NONBLOCK;
+ do {
+ const bool nonblock = eb->file->filp->f_flags & O_NONBLOCK;
+ struct i915_request *rq;
long timeout;
+ rq = eb_throttle(ce);
+ intel_context_timeline_unlock(tl);
+ if (!rq)
+ break;
+
timeout = MAX_SCHEDULE_TIMEOUT;
if (nonblock)
timeout = 0;
@@ -2300,13 +2302,17 @@ static int __eb_pin_engine(struct i915_execbuffer *eb, struct intel_context *ce)
timeout = i915_request_wait(rq,
I915_WAIT_INTERRUPTIBLE,
timeout);
- i915_request_put(rq);
-
if (timeout < 0) {
+ i915_request_put(rq);
err = nonblock ? -EWOULDBLOCK : timeout;
goto err_exit;
}
- }
+
+ mutex_lock(&tl->mutex);
+ if (!i915_request_is_dead(rq))
+ i915_request_retire_upto(rq);
+ i915_request_put(rq);
+ } while (1);
eb->engine = ce->engine;
eb->context = ce;
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 14a5a99284fa..7376767d79af 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -269,6 +269,8 @@ bool i915_request_retire(struct i915_request *rq)
set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
__notify_execute_cb(rq);
}
+ GEM_BUG_ON(i915_request_is_dead(rq));
+ set_bit(I915_FENCE_FLAG_RETIRED, &rq->fence.flags);
GEM_BUG_ON(!list_empty(&rq->execute_cb));
spin_unlock_irq(&rq->lock);
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index 0e4fe3205ce7..9e90c2dc9262 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -76,6 +76,13 @@ enum {
* a request is on the various signal_list.
*/
I915_FENCE_FLAG_SIGNAL,
+
+ /*
+ * I915_FENCE_FLAG_RETIRED - this request has been retired
+ *
+ * Mark the request as having been retired; don't retire again!
+ */
+ I915_FENCE_FLAG_RETIRED,
};
/**
@@ -333,6 +340,11 @@ static inline bool i915_request_is_active(const struct i915_request *rq)
return test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
}
+static inline bool i915_request_is_dead(const struct i915_request *rq)
+{
+ return test_bit(I915_FENCE_FLAG_RETIRED, &rq->fence.flags);
+}
+
/**
* Returns true if seq1 is later than seq2.
*/
--
2.24.1
More information about the Intel-gfx-trybot
mailing list