[Intel-gfx] [RFC 05/11] drm/i915: Extending i915_gem_check_wedge to check engine reset in progress
Tomas Elf
tomas.elf at intel.com
Mon Jun 8 10:03:23 PDT 2015
i915_gem_wedge now returns a positive result in three different cases:
1. Legacy: A hang has been detected and full GPU reset has been promoted.
2. Per-engine recovery:
a. A single engine reference can be passed to the function, in which
case only that engine will be checked. If that particular engine is
detected to be hung and is to be reset this will yield a positive
result but not if reset is in progress for any other engine.
b. No engine reference is passed to the function, in which case all
engines are checked for ongoing per-engine hang recovery.
Also, i915_wait_request was updated to take advantage of this new
functionality. This is important since the TDR hang recovery mechanism needs a
way to force waiting threads that hold the struct_mutex to give up the
struct_mutex and try again after the hang recovery has completed. If
i915_wait_request does not take per-engine hang recovery into account there is
no way for a waiting thread to know that a per-engine recovery is about to
happen and that it needs to back off.
Signed-off-by: Tomas Elf <tomas.elf at intel.com>
Signed-off-by: Arun Siluvery <arun.siluvery at intel.com>
Signed-off-by: Ian Lister <ian.lister at intel.com>
---
drivers/gpu/drm/i915/i915_drv.h | 3 +-
drivers/gpu/drm/i915/i915_gem.c | 79 ++++++++++++++++++++++++-------
drivers/gpu/drm/i915/intel_lrc.c | 3 +-
drivers/gpu/drm/i915/intel_ringbuffer.c | 3 +-
4 files changed, 68 insertions(+), 20 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 9cc5e8d..d092cb8 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2807,7 +2807,8 @@ i915_gem_find_active_request(struct intel_engine_cs *ring);
bool i915_gem_retire_requests(struct drm_device *dev);
void i915_gem_retire_requests_ring(struct intel_engine_cs *ring);
-int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
+int __must_check i915_gem_check_wedge(struct drm_i915_private *dev_priv,
+ struct intel_engine_cs *engine,
bool interruptible);
int __must_check i915_gem_check_olr(struct drm_i915_gem_request *req);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 4c88e5c..2208b0f 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -97,12 +97,38 @@ static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
spin_unlock(&dev_priv->mm.object_stat_lock);
}
+static inline int
+i915_engine_reset_in_progress(struct drm_i915_private *dev_priv,
+ struct intel_engine_cs *engine)
+{
+ int ret = 0;
+
+ if (engine) {
+ ret = !!(atomic_read(&dev_priv->ring[engine->id].hangcheck.flags)
+ & I915_ENGINE_RESET_IN_PROGRESS);
+ } else {
+ int i;
+
+ for (i = 0; i < I915_NUM_RINGS; i++)
+ if (atomic_read(&dev_priv->ring[i].hangcheck.flags)
+ & I915_ENGINE_RESET_IN_PROGRESS) {
+
+ ret = 1;
+ break;
+ }
+ }
+
+ return ret;
+}
+
static int
-i915_gem_wait_for_error(struct i915_gpu_error *error)
+i915_gem_wait_for_error(struct drm_i915_private *dev_priv)
{
int ret;
+ struct i915_gpu_error *error = &dev_priv->gpu_error;
#define EXIT_COND (!i915_reset_in_progress(error) || \
+ !i915_engine_reset_in_progress(dev_priv, NULL) || \
i915_terminally_wedged(error))
if (EXIT_COND)
return 0;
@@ -131,7 +157,7 @@ int i915_mutex_lock_interruptible(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
- ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
+ ret = i915_gem_wait_for_error(dev_priv);
if (ret)
return ret;
@@ -1128,10 +1154,15 @@ put_rpm:
}
int
-i915_gem_check_wedge(struct i915_gpu_error *error,
+i915_gem_check_wedge(struct drm_i915_private *dev_priv,
+ struct intel_engine_cs *engine,
bool interruptible)
{
- if (i915_reset_in_progress(error)) {
+ struct i915_gpu_error *error = &dev_priv->gpu_error;
+
+ if (i915_reset_in_progress(error) ||
+ i915_engine_reset_in_progress(dev_priv, engine)) {
+
/* Non-interruptible callers can't handle -EAGAIN, hence return
* -EIO unconditionally for these. */
if (!interruptible)
@@ -1213,6 +1244,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
unsigned long timeout_expire;
s64 before, now;
int ret;
+ int reset_in_progress = 0;
WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled");
@@ -1239,11 +1271,17 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
/* We need to check whether any gpu reset happened in between
* the caller grabbing the seqno and now ... */
- if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) {
+ reset_in_progress =
+ i915_gem_check_wedge(ring->dev->dev_private, NULL, interruptible);
+
+ if ((reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) ||
+ reset_in_progress) {
+
/* ... but upgrade the -EAGAIN to an -EIO if the gpu
* is truely gone. */
- ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
- if (ret == 0)
+ if (reset_in_progress)
+ ret = reset_in_progress;
+ else
ret = -EAGAIN;
break;
}
@@ -1327,7 +1365,7 @@ i915_wait_request(struct drm_i915_gem_request *req)
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
- ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
+ ret = i915_gem_check_wedge(dev_priv, NULL, interruptible);
if (ret)
return ret;
@@ -1396,6 +1434,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned reset_counter;
int ret;
+ struct intel_engine_cs *ring = NULL;
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
BUG_ON(!dev_priv->mm.interruptible);
@@ -1404,7 +1443,9 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
if (!req)
return 0;
- ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
+ ring = i915_gem_request_get_ring(req);
+
+ ret = i915_gem_check_wedge(dev_priv, ring, true);
if (ret)
return ret;
@@ -4089,11 +4130,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
unsigned reset_counter;
int ret;
- ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
- if (ret)
- return ret;
-
- ret = i915_gem_check_wedge(&dev_priv->gpu_error, false);
+ ret = i915_gem_wait_for_error(dev_priv);
if (ret)
return ret;
@@ -4112,9 +4149,17 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
if (target == NULL)
return 0;
- ret = __i915_wait_request(target, reset_counter, true, NULL, NULL);
- if (ret == 0)
- queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
+ if (target->ring) {
+ if (i915_gem_check_wedge(dev_priv, NULL, false))
+ return -EIO;
+
+ ret = __i915_wait_request(target, reset_counter, true, NULL,
+ NULL);
+
+ if (ret == 0)
+ queue_delayed_work(dev_priv->wq,
+ &dev_priv->mm.retire_work, 0);
+ }
i915_gem_request_unreference__unlocked(target);
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index a4273ac..e9940cc 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -1103,7 +1103,8 @@ static int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf,
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
- ret = i915_gem_check_wedge(&dev_priv->gpu_error,
+ ret = i915_gem_check_wedge(dev_priv,
+ ring,
dev_priv->mm.interruptible);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 0fdf983..fc82942 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -2259,7 +2259,8 @@ int intel_ring_begin(struct intel_engine_cs *ring,
struct drm_i915_private *dev_priv = ring->dev->dev_private;
int ret;
- ret = i915_gem_check_wedge(&dev_priv->gpu_error,
+ ret = i915_gem_check_wedge(dev_priv,
+ ring,
dev_priv->mm.interruptible);
if (ret)
return ret;
--
1.7.9.5
More information about the Intel-gfx
mailing list