[PATCH 06/10] drm/i915: Split idling from forcing context switch

Chris Wilson chris at chris-wilson.co.uk
Wed May 25 20:44:50 UTC 2016


We only need to force a switch to the kernel context placeholder during
eviction. All other uses of i915_gpu_idle() just want to wait until
existing work on the GPU is idle. Rename i915_gpu_idle() to
i915_gem_wait_for_idle() to avoid any implications about "parking" the
context first.

v2: Tweak an error message if the wait fails for the ilk vtd w/a

Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_debugfs.c      |  2 +-
 drivers/gpu/drm/i915/i915_drv.h          |  2 +-
 drivers/gpu/drm/i915/i915_gem.c          | 20 +++----------
 drivers/gpu/drm/i915/i915_gem_evict.c    | 51 +++++++++++++++++++++++++++++---
 drivers/gpu/drm/i915/i915_gem_gtt.c      |  4 +--
 drivers/gpu/drm/i915/i915_gem_shrinker.c |  2 +-
 6 files changed, 56 insertions(+), 25 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index ac7e5692496d..615cef736356 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -4941,7 +4941,7 @@ i915_drop_caches_set(void *data, u64 val)
 		return ret;
 
 	if (val & DROP_ACTIVE) {
-		ret = i915_gpu_idle(dev);
+		ret = i915_gem_wait_for_idle(dev_priv);
 		if (ret)
 			goto unlock;
 	}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 19d0194c728f..b9d9a4205992 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3285,7 +3285,7 @@ int i915_gem_init_engines(struct drm_device *dev);
 int __must_check i915_gem_init_hw(struct drm_device *dev);
 void i915_gem_init_swizzling(struct drm_device *dev);
 void i915_gem_cleanup_engines(struct drm_device *dev);
-int __must_check i915_gpu_idle(struct drm_device *dev);
+int __must_check i915_gem_wait_for_idle(struct drm_i915_private *dev_priv);
 int __must_check i915_gem_suspend(struct drm_device *dev);
 void __i915_add_request(struct drm_i915_gem_request *req,
 			struct drm_i915_gem_object *batch_obj,
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 61812a53e56d..e94758850c77 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -3420,29 +3420,17 @@ int __i915_vma_unbind_no_wait(struct i915_vma *vma)
 	return __i915_vma_unbind(vma, false);
 }
 
-int i915_gpu_idle(struct drm_device *dev)
+int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_engine_cs *engine;
 	int ret;
 
+	lockdep_assert_held(&dev_priv->dev->struct_mutex);
+
 	for_each_engine(engine, dev_priv) {
 		if (engine->last_context == NULL)
 			continue;
 
-		if (!i915.enable_execlists) {
-			struct drm_i915_gem_request *req;
-
-			req = i915_gem_request_alloc(engine, NULL);
-			if (IS_ERR(req))
-				return PTR_ERR(req);
-
-			ret = i915_switch_context(req);
-			i915_add_request_no_flush(req);
-			if (ret)
-				return ret;
-		}
-
 		ret = intel_engine_idle(engine);
 		if (ret)
 			return ret;
@@ -4703,7 +4691,7 @@ i915_gem_suspend(struct drm_device *dev)
 	int ret = 0;
 
 	mutex_lock(&dev->struct_mutex);
-	ret = i915_gpu_idle(dev);
+	ret = i915_gem_wait_for_idle(dev_priv);
 	if (ret)
 		goto err;
 
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index b144c3f5c650..5741b58d186c 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -33,6 +33,37 @@
 #include "intel_drv.h"
 #include "i915_trace.h"
 
+static int switch_to_pinned_context(struct drm_i915_private *dev_priv)
+{
+	struct intel_engine_cs *engine;
+
+	if (i915.enable_execlists)
+		return 0;
+
+	for_each_engine(engine, dev_priv) {
+		struct drm_i915_gem_request *req;
+		int ret;
+
+		if (engine->last_context == NULL)
+			continue;
+
+		if (engine->last_context == dev_priv->kernel_context)
+			continue;
+
+		req = i915_gem_request_alloc(engine, dev_priv->kernel_context);
+		if (IS_ERR(req))
+			return PTR_ERR(req);
+
+		ret = i915_switch_context(req);
+		i915_add_request_no_flush(req);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+
 static bool
 mark_free(struct i915_vma *vma, struct list_head *unwind)
 {
@@ -150,11 +181,17 @@ none:
 
 	/* Only idle the GPU and repeat the search once */
 	if (pass++ == 0) {
-		ret = i915_gpu_idle(dev);
+		struct drm_i915_private *dev_priv = to_i915(dev);
+
+		ret = switch_to_pinned_context(dev_priv);
 		if (ret)
 			return ret;
 
-		i915_gem_retire_requests(to_i915(dev));
+		ret = i915_gem_wait_for_idle(dev_priv);
+		if (ret)
+			return ret;
+
+		i915_gem_retire_requests(dev_priv);
 		goto search_again;
 	}
 
@@ -261,11 +298,17 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
 	trace_i915_gem_evict_vm(vm);
 
 	if (do_idle) {
-		ret = i915_gpu_idle(vm->dev);
+		struct drm_i915_private *dev_priv = to_i915(vm->dev);
+
+		ret = switch_to_pinned_context(dev_priv);
+		if (ret)
+			return ret;
+
+		ret = i915_gem_wait_for_idle(dev_priv);
 		if (ret)
 			return ret;
 
-		i915_gem_retire_requests(to_i915(vm->dev));
+		i915_gem_retire_requests(dev_priv);
 
 		WARN_ON(!list_empty(&vm->active_list));
 	}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 46684779d4d6..5860fb73c0e3 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -2261,8 +2261,8 @@ static bool do_idling(struct drm_i915_private *dev_priv)
 
 	if (unlikely(ggtt->do_idle_maps)) {
 		dev_priv->mm.interruptible = false;
-		if (i915_gpu_idle(dev_priv->dev)) {
-			DRM_ERROR("Couldn't idle GPU\n");
+		if (i915_gem_wait_for_idle(dev_priv)) {
+			DRM_ERROR("Failed to wait for idle; VT'd may hang.\n");
 			/* Wait a bit, in hopes it avoids the hang */
 			udelay(10);
 		}
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index 538c30499848..886a8797566d 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -408,7 +408,7 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
 		return NOTIFY_DONE;
 
 	/* Force everything onto the inactive lists */
-	ret = i915_gpu_idle(dev_priv->dev);
+	ret = i915_gem_wait_for_idle(dev_priv);
 	if (ret)
 		goto out;
 
-- 
2.8.1



More information about the Intel-gfx-trybot mailing list