[Intel-gfx] [PATCH 08/11] drm/i915: fixup i915_gem_evict_everything to actually evict everything
Daniel Vetter
daniel.vetter at ffwll.ch
Fri Jan 15 13:24:15 CET 2010
Due to our not-so-optimal write-domain tracking, i915_gem_flush
is actually not sufficient to idle the gpu. When the gpu is actually
busy we most likely have a few bos with write_domain != 0 on the
active_list (why would the gpu be busy when not creating dirty bos?).
After the i915_wait_request these bos end up on the flushing_list.
I've left the BUG_ON I've used to prove my theory in there.
Therefore beef up i915_gpu_idle to first wait for any outstanding
rendering and then to flush everything and use this helper in
i915_gem_evict_everything.
This has the nice side-effect of potentially fixing s/r related
corruptions: Save when the bios somehow flushed gpu cashes for us
i915_gem_idle (the original user of i915_gpu_idle) suffered from the
same problem. Also move around i915_gpu_idle to avoid forward decls.
Signed-off-by: Daniel Vetter <daniel.vetter at ffwll.ch>
---
drivers/gpu/drm/i915/i915_gem.c | 69 +++++++++++++++++++++------------------
1 files changed, 37 insertions(+), 32 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index d8812ce..6bf4481 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2103,32 +2103,61 @@ i915_gem_find_inactive_object(struct drm_device *dev, int min_size)
}
static int
-i915_gem_evict_everything(struct drm_device *dev)
+i915_gpu_idle(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
+ bool lists_empty;
uint32_t seqno;
int ret;
- bool lists_empty;
spin_lock(&dev_priv->mm.active_list_lock);
- lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
- list_empty(&dev_priv->mm.flushing_list) &&
- list_empty(&dev_priv->mm.active_list));
+ lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
+ list_empty(&dev_priv->mm.active_list);
spin_unlock(&dev_priv->mm.active_list_lock);
if (lists_empty)
- return -ENOSPC;
+ return 0;
- /* Flush everything (on to the inactive lists) and evict */
- i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+ /* Wait for any outstanding rendering to complete ... */
seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS);
if (seqno == 0)
return -ENOMEM;
ret = i915_wait_request(dev, seqno);
+ if (ret != 0)
+ return ret;
+
+ /* ... and flush everything onto the inactive list. */
+ i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+ seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS);
+ if (seqno == 0)
+ return -ENOMEM;
+
+ return i915_wait_request(dev, seqno);
+}
+
+static int
+i915_gem_evict_everything(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int ret;
+ bool lists_empty;
+
+ spin_lock(&dev_priv->mm.active_list_lock);
+ lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
+ list_empty(&dev_priv->mm.flushing_list) &&
+ list_empty(&dev_priv->mm.active_list));
+ spin_unlock(&dev_priv->mm.active_list_lock);
+
+ if (lists_empty)
+ return -ENOSPC;
+
+ ret = i915_gpu_idle(dev);
if (ret)
return ret;
+ BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
+
ret = i915_gem_evict_from_inactive_list(dev);
if (ret)
return ret;
@@ -4455,30 +4484,6 @@ i915_gem_evict_from_inactive_list(struct drm_device *dev)
return 0;
}
-static int
-i915_gpu_idle(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- bool lists_empty;
- uint32_t seqno;
-
- spin_lock(&dev_priv->mm.active_list_lock);
- lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
- list_empty(&dev_priv->mm.active_list);
- spin_unlock(&dev_priv->mm.active_list_lock);
-
- if (lists_empty)
- return 0;
-
- /* Flush everything onto the inactive list. */
- i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
- seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS);
- if (seqno == 0)
- return -ENOMEM;
-
- return i915_wait_request(dev, seqno);
-}
-
int
i915_gem_idle(struct drm_device *dev)
{
--
1.6.6
More information about the Intel-gfx
mailing list