[RFC PATCH 146/162] drm/i915/pm: suspend and restore ppgtt mapping

Matthew Auld matthew.auld at intel.com
Fri Nov 27 12:07:02 UTC 2020


From: Prathap Kumar Valsan <prathap.kumar.valsan at intel.com>

During suspend we will lose all page tables as they are allocated in
LMEM. In-order to  make sure that the contexts do not access the
corrupted page table after we restore, we are evicting all vma's that
are bound to vm's. This includes kernel vm.

During resume, we are restoring the page tables back to scratch page.

Signed-off-by: Prathap Kumar Valsan <prathap.kumar.valsan at intel.com>
Signed-off-by: Venkata Ramana Nayana <venkata.ramana.nayana at intel.com>
Cc: CQ Tang <cq.tang at intel.com>
---
 drivers/gpu/drm/i915/gt/gen8_ppgtt.c  |  13 ++++
 drivers/gpu/drm/i915/gt/gen8_ppgtt.h  |   2 +
 drivers/gpu/drm/i915/gt/intel_ppgtt.c |   4 +
 drivers/gpu/drm/i915/i915_drv.c       | 102 +++++++++++++++++++++++---
 4 files changed, 112 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
index b6fcebeef02a..704cab807e0b 100644
--- a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
@@ -775,3 +775,16 @@ struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt)
 	kfree(ppgtt);
 	return ERR_PTR(err);
 }
+
+void gen8_restore_ppgtt_mappings(struct i915_address_space *vm)
+{
+	const unsigned int count = gen8_pd_top_count(vm);
+	int i;
+
+	for (i = 1; i <= vm->top; i++)
+		fill_px(vm->scratch[i], vm->scratch[i - 1]->encode);
+
+	fill_page_dma(px_base(i915_vm_to_ppgtt(vm)->pd),
+		      vm->scratch[vm->top]->encode, count);
+}
+
diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.h b/drivers/gpu/drm/i915/gt/gen8_ppgtt.h
index 76a08b9c1f5c..3fa4b95aaabd 100644
--- a/drivers/gpu/drm/i915/gt/gen8_ppgtt.h
+++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.h
@@ -6,8 +6,10 @@
 #ifndef __GEN8_PPGTT_H__
 #define __GEN8_PPGTT_H__
 
+struct i915_address_space;
 struct intel_gt;
 
+void gen8_restore_ppgtt_mappings(struct i915_address_space *vm);
 struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt);
 
 #endif
diff --git a/drivers/gpu/drm/i915/gt/intel_ppgtt.c b/drivers/gpu/drm/i915/gt/intel_ppgtt.c
index 34a02643bb75..9b3eacd12a7e 100644
--- a/drivers/gpu/drm/i915/gt/intel_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ppgtt.c
@@ -9,6 +9,8 @@
 #include "intel_gtt.h"
 #include "gem/i915_gem_lmem.h"
 #include "gem/i915_gem_region.h"
+#include "gem/i915_gem_context.h"
+#include "gem/i915_gem_region.h"
 #include "gen6_ppgtt.h"
 #include "gen8_ppgtt.h"
 
@@ -317,3 +319,5 @@ void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt)
 	ppgtt->vm.vma_ops.set_pages   = ppgtt_set_pages;
 	ppgtt->vm.vma_ops.clear_pages = clear_pages;
 }
+
+
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index e8c4931fc818..7115f4db5043 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -64,6 +64,7 @@
 #include "gem/i915_gem_context.h"
 #include "gem/i915_gem_ioctls.h"
 #include "gem/i915_gem_mman.h"
+#include "gt/gen8_ppgtt.h"
 #include "gt/intel_gt.h"
 #include "gt/intel_gt_pm.h"
 #include "gt/intel_rc6.h"
@@ -1136,13 +1137,13 @@ static int intel_dmem_evict_buffers(struct drm_device *dev, bool in_suspend)
 
 				mutex_unlock(&mem->objects.lock);
 
-				if (in_suspend)
-					i915_gem_object_unbind(obj, 0);
-
 				if (in_suspend) {
 					obj->swapto = NULL;
 					obj->evicted = false;
 					obj->do_swapping = true;
+
+					i915_gem_object_unbind(obj, 0);
+
 					ret = __i915_gem_object_put_pages(obj);
 					obj->do_swapping = false;
 					if (ret) {
@@ -1176,6 +1177,43 @@ static int intel_dmem_evict_buffers(struct drm_device *dev, bool in_suspend)
 	return ret;
 }
 
+static int i915_gem_suspend_ppgtt_mappings(struct drm_i915_private *i915)
+{
+	struct i915_gem_context *ctx, *cn;
+	int ret;
+
+	spin_lock(&i915->gem.contexts.lock);
+	list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
+		struct i915_address_space *vm;
+
+		if (!kref_get_unless_zero(&ctx->ref))
+			continue;
+		spin_unlock(&i915->gem.contexts.lock);
+
+		vm = i915_gem_context_get_vm_rcu(ctx);
+		mutex_lock(&vm->mutex);
+		ret = i915_gem_evict_vm(vm);
+		mutex_unlock(&vm->mutex);
+		if (ret) {
+			GEM_WARN_ON(ret);
+			i915_vm_put(vm);
+			i915_gem_context_put(ctx);
+			return ret;
+		}
+		i915_vm_put(vm);
+		spin_lock(&i915->gem.contexts.lock);
+		list_safe_reset_next(ctx, cn, link);
+		i915_gem_context_put(ctx);
+	}
+	spin_unlock(&i915->gem.contexts.lock);
+
+	mutex_lock(&i915->gt.vm->mutex);
+	ret = i915_gem_evict_vm(i915->gt.vm);
+	mutex_unlock(&i915->gt.vm->mutex);
+
+	return ret;
+}
+
 static int i915_drm_suspend(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
@@ -1213,9 +1251,17 @@ static int i915_drm_suspend(struct drm_device *dev)
 
 	intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
 
-	ret = intel_dmem_evict_buffers(dev, true);
-	if (ret)
-		return ret;
+	if (HAS_LMEM(dev_priv))	{
+		ret = intel_dmem_evict_buffers(dev, true);
+		if (ret)
+			return ret;
+
+		i915_teardown_blt_windows(dev_priv);
+
+		ret = i915_gem_suspend_ppgtt_mappings(dev_priv);
+		if (ret)
+			return ret;
+	}
 
 	dev_priv->suspend_count++;
 
@@ -1306,6 +1352,36 @@ int i915_suspend_switcheroo(struct drm_i915_private *i915, pm_message_t state)
 	return i915_drm_suspend_late(&i915->drm, false);
 }
 
+static void i915_gem_restore_ppgtt_mappings(struct drm_i915_private *i915)
+{
+	struct i915_gem_context *ctx, *cn;
+
+	spin_lock(&i915->gem.contexts.lock);
+
+	list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
+		struct i915_address_space *vm;
+
+		if (!kref_get_unless_zero(&ctx->ref))
+			continue;
+
+		spin_unlock(&i915->gem.contexts.lock);
+
+		vm = i915_gem_context_get_vm_rcu(ctx);
+		mutex_lock(&vm->mutex);
+		gen8_restore_ppgtt_mappings(vm);
+		mutex_unlock(&vm->mutex);
+		i915_vm_put(vm);
+		spin_lock(&i915->gem.contexts.lock);
+		list_safe_reset_next(ctx, cn, link);
+		i915_gem_context_put(ctx);
+	}
+	spin_unlock(&i915->gem.contexts.lock);
+
+	mutex_lock(&i915->gt.vm->mutex);
+	gen8_restore_ppgtt_mappings(i915->gt.vm);
+	mutex_unlock(&i915->gt.vm->mutex);
+}
+
 static int i915_drm_resume(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
@@ -1342,9 +1418,17 @@ static int i915_drm_resume(struct drm_device *dev)
 
 	drm_mode_config_reset(dev);
 
-	ret = intel_dmem_evict_buffers(dev, false);
-	if (ret)
-		DRM_ERROR("i915_resume:i915_gem_object_pin_pages failed with err=%d\n", ret);
+	if (HAS_LMEM(dev_priv)) {
+		i915_gem_restore_ppgtt_mappings(dev_priv);
+
+		ret = i915_setup_blt_windows(dev_priv);
+		if (ret)
+			GEM_BUG_ON(ret);
+
+		ret = intel_dmem_evict_buffers(dev, false);
+		if (ret)
+			DRM_ERROR("i915_resume:i915_gem_object_pin_pages failed with err=%d\n", ret);
+	}
 
 	i915_gem_resume(dev_priv);
 
-- 
2.26.2



More information about the dri-devel mailing list