[Intel-gfx] [PATCH 46/70] drm/i915: Cache the reset_counter for the request
Chris Wilson
chris at chris-wilson.co.uk
Tue Apr 7 08:21:10 PDT 2015
Instead of querying the reset counter before every access to the ring,
query it the first time we touch the ring, and do a final compare when
submitting the request. For correctness, we need to then sanitize how
the reset_counter is incremented to prevent broken submission and
waiting across resets, in the process fixing the persistent -EIO we
still see today on failed waits.
v2: Rebase
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
drivers/gpu/drm/i915/i915_drv.c | 32 +++++++-----
drivers/gpu/drm/i915/i915_drv.h | 29 +++++++----
drivers/gpu/drm/i915/i915_gem.c | 87 ++++++++++++++-------------------
drivers/gpu/drm/i915/i915_irq.c | 28 ++++-------
drivers/gpu/drm/i915/intel_display.c | 10 ++--
drivers/gpu/drm/i915/intel_lrc.c | 7 ---
drivers/gpu/drm/i915/intel_ringbuffer.c | 6 ---
7 files changed, 87 insertions(+), 112 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 1366e0ec4933..72b01323c549 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -827,6 +827,8 @@ int i915_resume_legacy(struct drm_device *dev)
int i915_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_gpu_error *error = &dev_priv->gpu_error;
+ unsigned reset_counter;
bool simulated;
int ret;
@@ -836,17 +838,23 @@ int i915_reset(struct drm_device *dev)
intel_reset_gt_powersave(dev);
mutex_lock(&dev->struct_mutex);
+ reset_counter = atomic_inc_return(&error->reset_counter);
+ if (WARN_ON(__i915_reset_in_progress(reset_counter))) {
+ atomic_set_mask(I915_WEDGED, &error->reset_counter);
+ mutex_unlock(&dev->struct_mutex);
+ return -EIO;
+ }
i915_gem_reset(dev);
- simulated = dev_priv->gpu_error.stop_rings != 0;
+ simulated = error->stop_rings != 0;
ret = intel_gpu_reset(dev);
/* Also reset the gpu hangman. */
if (simulated) {
DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
- dev_priv->gpu_error.stop_rings = 0;
+ error->stop_rings = 0;
if (ret == -ENODEV) {
DRM_INFO("Reset not implemented, but ignoring "
"error for simulated gpu hangs\n");
@@ -859,8 +867,7 @@ int i915_reset(struct drm_device *dev)
if (ret) {
DRM_ERROR("Failed to reset chip: %i\n", ret);
- mutex_unlock(&dev->struct_mutex);
- return ret;
+ goto error;
}
intel_overlay_reset(dev_priv);
@@ -879,20 +886,14 @@ int i915_reset(struct drm_device *dev)
* was running at the time of the reset (i.e. we weren't VT
* switched away).
*/
-
- /* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset */
- dev_priv->gpu_error.reload_in_reset = true;
-
ret = i915_gem_init_hw(dev);
-
- dev_priv->gpu_error.reload_in_reset = false;
-
- mutex_unlock(&dev->struct_mutex);
if (ret) {
DRM_ERROR("Failed hw init on reset %d\n", ret);
- return ret;
+ goto error;
}
+ mutex_unlock(&dev->struct_mutex);
+
/*
* rps/rc6 re-init is necessary to restore state lost after the
* reset and the re-install of gt irqs. Skip for ironlake per
@@ -903,6 +904,11 @@ int i915_reset(struct drm_device *dev)
intel_enable_gt_powersave(dev);
return 0;
+
+error:
+ atomic_set_mask(I915_WEDGED, &error->reset_counter);
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
}
static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 89839751237c..97f5d266b17c 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1308,9 +1308,6 @@ struct i915_gpu_error {
/* For missed irq/seqno simulation. */
unsigned int test_irq_rings;
-
- /* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset */
- bool reload_in_reset;
};
enum modeset_restore {
@@ -2072,6 +2069,7 @@ struct drm_i915_gem_request {
/** On Which ring this request was generated */
struct drm_i915_private *i915;
struct intel_engine_cs *ring;
+ unsigned reset_counter;
/** GEM sequence number associated with this request. */
uint32_t seqno;
@@ -2767,24 +2765,38 @@ struct drm_i915_gem_request *
i915_gem_find_active_request(struct intel_engine_cs *ring);
bool i915_gem_retire_requests(struct drm_device *dev);
-int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
+int __must_check i915_gem_check_wedge(unsigned reset_counter,
bool interruptible);
int __must_check i915_gem_check_olr(struct drm_i915_gem_request *req);
+static inline u32 i915_reset_counter(struct i915_gpu_error *error)
+{
+ return atomic_read(&error->reset_counter);
+}
+
+static inline bool __i915_reset_in_progress(u32 reset)
+{
+ return unlikely(reset & (I915_RESET_IN_PROGRESS_FLAG | I915_WEDGED));
+}
+
+static inline bool __i915_terminally_wedged(u32 reset)
+{
+ return reset & I915_WEDGED;
+}
+
static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
{
- return unlikely(atomic_read(&error->reset_counter)
- & (I915_RESET_IN_PROGRESS_FLAG | I915_WEDGED));
+ return __i915_reset_in_progress(i915_reset_counter(error));
}
static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
{
- return atomic_read(&error->reset_counter) & I915_WEDGED;
+ return __i915_terminally_wedged(i915_reset_counter(error));
}
static inline u32 i915_reset_count(struct i915_gpu_error *error)
{
- return ((atomic_read(&error->reset_counter) & ~I915_WEDGED) + 1) / 2;
+ return ((i915_reset_counter(error) & ~I915_WEDGED) + 1) / 2;
}
static inline bool i915_stop_ring_allow_ban(struct drm_i915_private *dev_priv)
@@ -2816,7 +2828,6 @@ int __i915_add_request(struct intel_engine_cs *ring,
#define i915_add_request(ring) \
__i915_add_request(ring, NULL, NULL)
int __i915_wait_request(struct drm_i915_gem_request *req,
- unsigned reset_counter,
bool interruptible,
s64 *timeout,
struct drm_i915_file_private *file_priv);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index e90894545fa4..729c7fa02e12 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -100,14 +100,19 @@ static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
spin_unlock(&dev_priv->mm.object_stat_lock);
}
+inline static bool reset_complete(struct i915_gpu_error *error)
+{
+ unsigned reset_counter = i915_reset_counter(error);
+ return (!__i915_reset_in_progress(reset_counter) ||
+ __i915_terminally_wedged(reset_counter));
+}
+
static int
i915_gem_wait_for_error(struct i915_gpu_error *error)
{
int ret;
-#define EXIT_COND (!i915_reset_in_progress(error) || \
- i915_terminally_wedged(error))
- if (EXIT_COND)
+ if (reset_complete(error))
return 0;
/*
@@ -116,17 +121,16 @@ i915_gem_wait_for_error(struct i915_gpu_error *error)
* we should simply try to bail out and fail as gracefully as possible.
*/
ret = wait_event_interruptible_timeout(error->reset_queue,
- EXIT_COND,
+ reset_complete(error),
10*HZ);
if (ret == 0) {
DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
return -EIO;
} else if (ret < 0) {
return ret;
+ } else {
+ return 0;
}
-#undef EXIT_COND
-
- return 0;
}
int i915_mutex_lock_interruptible(struct drm_device *dev)
@@ -1127,26 +1131,18 @@ put_rpm:
}
int
-i915_gem_check_wedge(struct i915_gpu_error *error,
+i915_gem_check_wedge(unsigned reset_counter,
bool interruptible)
{
- if (i915_reset_in_progress(error)) {
+ if (__i915_reset_in_progress(reset_counter)) {
/* Non-interruptible callers can't handle -EAGAIN, hence return
* -EIO unconditionally for these. */
if (!interruptible)
return -EIO;
/* Recovery complete, but the reset failed ... */
- if (i915_terminally_wedged(error))
+ if (__i915_terminally_wedged(reset_counter))
return -EIO;
-
- /*
- * Check if GPU Reset is in progress - we need intel_ring_begin
- * to work properly to reinit the hw state while the gpu is
- * still marked as reset-in-progress. Handle this with a flag.
- */
- if (!error->reload_in_reset)
- return -EAGAIN;
}
return 0;
@@ -1206,7 +1202,6 @@ static int __i915_spin_request(struct drm_i915_gem_request *rq)
/**
* __i915_wait_request - wait until execution of request has finished
* @req: duh!
- * @reset_counter: reset sequence associated with the given request
* @interruptible: do an interruptible wait (normally yes)
* @timeout: in - how long to wait (NULL forever); out - how much time remaining
*
@@ -1221,7 +1216,6 @@ static int __i915_spin_request(struct drm_i915_gem_request *rq)
* errno with remaining time filled in timeout argument.
*/
int __i915_wait_request(struct drm_i915_gem_request *req,
- unsigned reset_counter,
bool interruptible,
s64 *timeout,
struct drm_i915_file_private *file_priv)
@@ -1271,12 +1265,12 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
/* We need to check whether any gpu reset happened in between
* the caller grabbing the seqno and now ... */
- if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) {
- /* ... but upgrade the -EAGAIN to an -EIO if the gpu
- * is truely gone. */
- ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
- if (ret == 0)
- ret = -EAGAIN;
+ if (req->reset_counter != i915_reset_counter(&dev_priv->gpu_error)) {
+ /* As we do not requeue the request over a GPU reset,
+ * if one does occur we know that the request is
+ * effectively complete.
+ */
+ ret = 0;
break;
}
@@ -1414,17 +1408,11 @@ i915_wait_request(struct drm_i915_gem_request *req)
BUG_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
- ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
- if (ret)
- return ret;
-
ret = i915_gem_check_olr(req);
if (ret)
return ret;
- ret = __i915_wait_request(req,
- atomic_read(&dev_priv->gpu_error.reset_counter),
- interruptible, NULL, NULL);
+ ret = __i915_wait_request(req, interruptible, NULL, NULL);
if (ret)
return ret;
@@ -1499,7 +1487,6 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_request *requests[I915_NUM_RINGS];
- unsigned reset_counter;
int ret, i, n = 0;
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
@@ -1508,12 +1495,6 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
if (!obj->active)
return 0;
- ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
- if (ret)
- return ret;
-
- reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
-
if (readonly) {
struct drm_i915_gem_request *rq;
@@ -1544,8 +1525,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
mutex_unlock(&dev->struct_mutex);
for (i = 0; ret == 0 && i < n; i++)
- ret = __i915_wait_request(requests[i], reset_counter, true,
- NULL, file_priv);
+ ret = __i915_wait_request(requests[i], true, NULL, file_priv);
mutex_lock(&dev->struct_mutex);
err:
@@ -2489,6 +2469,9 @@ int __i915_add_request(struct intel_engine_cs *ring,
if (WARN_ON(request == NULL))
return -ENOMEM;
+ if (request->reset_counter != i915_reset_counter(&dev_priv->gpu_error))
+ return -EAGAIN;
+
if (i915.enable_execlists) {
ringbuf = request->ctx->engine[ring->id].ringbuf;
} else
@@ -2640,18 +2623,24 @@ int i915_gem_request_alloc(struct intel_engine_cs *ring,
struct intel_context *ctx)
{
struct drm_i915_private *dev_priv = to_i915(ring->dev);
+ unsigned reset_counter = i915_reset_counter(&dev_priv->gpu_error);
struct drm_i915_gem_request *rq;
int ret;
if (ring->outstanding_lazy_request)
return 0;
+ ret = i915_gem_check_wedge(reset_counter, dev_priv->mm.interruptible);
+ if (ret)
+ return ret;
+
rq = kmem_cache_zalloc(dev_priv->requests, GFP_KERNEL);
if (rq == NULL)
return -ENOMEM;
kref_init(&rq->ref);
rq->i915 = dev_priv;
+ rq->reset_counter = reset_counter;
ret = i915_gem_get_seqno(ring->dev, &rq->seqno);
if (ret) {
@@ -2999,11 +2988,9 @@ void i915_gem_close_object(struct drm_gem_object *gem,
int
i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_wait *args = data;
struct drm_i915_gem_object *obj;
struct drm_i915_gem_request *req[I915_NUM_RINGS];
- unsigned reset_counter;
int i, n = 0;
int ret;
@@ -3037,7 +3024,6 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
}
drm_gem_object_unreference(&obj->base);
- reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
for (i = 0; i < I915_NUM_RINGS; i++) {
if (obj->last_read_req[i] == NULL)
@@ -3050,7 +3036,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
for (i = 0; i < n; i++) {
if (ret == 0)
- ret = __i915_wait_request(req[i], reset_counter, true,
+ ret = __i915_wait_request(req[i], true,
args->timeout_ns > 0 ? &args->timeout_ns : NULL,
file->driver_priv);
i915_gem_request_unreference__unlocked(req[i]);
@@ -3088,7 +3074,6 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
if (!i915_semaphore_is_enabled(obj->base.dev)) {
struct drm_i915_private *i915 = to_i915(obj->base.dev);
ret = __i915_wait_request(rq,
- atomic_read(&i915->gpu_error.reset_counter),
i915->mm.interruptible,
NULL,
&i915->rps.semaphores);
@@ -4298,14 +4283,15 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
struct drm_i915_file_private *file_priv = file->driver_priv;
unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
struct drm_i915_gem_request *request, *target = NULL;
- unsigned reset_counter;
int ret;
ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
if (ret)
return ret;
- ret = i915_gem_check_wedge(&dev_priv->gpu_error, false);
+ /* ABI: return -EIO if wedged */
+ ret = i915_gem_check_wedge(i915_reset_counter(&dev_priv->gpu_error),
+ false);
if (ret)
return ret;
@@ -4316,7 +4302,6 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
target = request;
}
- reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
if (target)
i915_gem_request_reference(target);
spin_unlock(&file_priv->mm.lock);
@@ -4324,7 +4309,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
if (target == NULL)
return 0;
- ret = __i915_wait_request(target, reset_counter, true, NULL, NULL);
+ ret = __i915_wait_request(target, true, NULL, NULL);
if (ret == 0)
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 46bcbff89760..47c9c02e6731 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -2299,6 +2299,13 @@ static void i915_error_wake_up(struct drm_i915_private *dev_priv,
wake_up_all(&dev_priv->gpu_error.reset_queue);
}
+static bool reset_pending(struct i915_gpu_error *error)
+{
+ unsigned reset_counter = i915_reset_counter(error);
+ return (__i915_reset_in_progress(reset_counter) &&
+ !__i915_terminally_wedged(reset_counter));
+}
+
/**
* i915_reset_and_wakeup - do process context error handling work
*
@@ -2308,7 +2315,6 @@ static void i915_error_wake_up(struct drm_i915_private *dev_priv,
static void i915_reset_and_wakeup(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- struct i915_gpu_error *error = &dev_priv->gpu_error;
char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
@@ -2326,7 +2332,7 @@ static void i915_reset_and_wakeup(struct drm_device *dev)
* the reset in-progress bit is only ever set by code outside of this
* work we don't need to worry about any other races.
*/
- if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
+ if (reset_pending(&dev_priv->gpu_error)) {
DRM_DEBUG_DRIVER("resetting chip\n");
kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
reset_event);
@@ -2354,25 +2360,9 @@ static void i915_reset_and_wakeup(struct drm_device *dev)
intel_runtime_pm_put(dev_priv);
- if (ret == 0) {
- /*
- * After all the gem state is reset, increment the reset
- * counter and wake up everyone waiting for the reset to
- * complete.
- *
- * Since unlock operations are a one-sided barrier only,
- * we need to insert a barrier here to order any seqno
- * updates before
- * the counter increment.
- */
- smp_mb__before_atomic();
- atomic_inc(&dev_priv->gpu_error.reset_counter);
-
+ if (ret == 0)
kobject_uevent_env(&dev->primary->kdev->kobj,
KOBJ_CHANGE, reset_done_event);
- } else {
- atomic_set_mask(I915_WEDGED, &error->reset_counter);
- }
/*
* Note: The wake_up also serves as a memory barrier so that
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 0c2bb2ce04fc..69db1c3b26a8 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -3196,8 +3196,7 @@ static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
bool pending;
- if (i915_reset_in_progress(&dev_priv->gpu_error) ||
- intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
+ if (intel_crtc->reset_counter != i915_reset_counter(&dev_priv->gpu_error))
return false;
spin_lock_irq(&dev->event_lock);
@@ -9689,8 +9688,7 @@ static bool page_flip_finished(struct intel_crtc *crtc)
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- if (i915_reset_in_progress(&dev_priv->gpu_error) ||
- crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
+ if (crtc->reset_counter != i915_reset_counter(&dev_priv->gpu_error))
return true;
/*
@@ -10109,9 +10107,7 @@ static void intel_mmio_flip_work_func(struct work_struct *work)
container_of(work, struct intel_mmio_flip, work);
if (mmio_flip->rq)
- WARN_ON(__i915_wait_request(mmio_flip->rq,
- mmio_flip->crtc->reset_counter,
- false, NULL,
+ WARN_ON(__i915_wait_request(mmio_flip->rq, false, NULL,
&mmio_flip->i915->rps.mmioflips));
intel_do_mmio_flip(mmio_flip->crtc);
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 26f96999a4a9..fc57d4111e56 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -676,15 +676,8 @@ static int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf,
struct intel_context *ctx, int num_dwords)
{
struct intel_engine_cs *ring = ringbuf->ring;
- struct drm_device *dev = ring->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
- ret = i915_gem_check_wedge(&dev_priv->gpu_error,
- dev_priv->mm.interruptible);
- if (ret)
- return ret;
-
ret = logical_ring_prepare(ringbuf, ctx, num_dwords * sizeof(uint32_t));
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 6b894ab9d0f2..b6b2e076fed4 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -2189,14 +2189,8 @@ static int __intel_ring_prepare(struct intel_engine_cs *ring,
int intel_ring_begin(struct intel_engine_cs *ring,
int num_dwords)
{
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
int ret;
- ret = i915_gem_check_wedge(&dev_priv->gpu_error,
- dev_priv->mm.interruptible);
- if (ret)
- return ret;
-
ret = __intel_ring_prepare(ring, num_dwords * sizeof(uint32_t));
if (ret)
return ret;
--
2.1.4
More information about the Intel-gfx
mailing list