[Intel-gfx] [PATCH 2/4] Revert "drm/i915/gem: Almagamate clflushes on suspend"
Michael Cheng
michael.cheng at intel.com
Sat Mar 19 19:42:25 UTC 2022
As we are making i915 more architecture-neutral, lets revert this commit
to the previous logic [1] to avoid using wbinvd_on_all_cpus.
[1]. ac05a22cd07a ("drm/i915/gem: Almagamate clflushes on suspend")
Suggested-by: Lucas De Marchi <lucas.demarchi at intel.com>
Signed-off-by: Michael Cheng <michael.cheng at intel.com>
---
drivers/gpu/drm/i915/gem/i915_gem_pm.c | 41 +++++++++++++++++---------
1 file changed, 27 insertions(+), 14 deletions(-)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
index 00359ec9d58b..3f20961bb59b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
@@ -13,13 +13,6 @@
#include "i915_driver.h"
#include "i915_drv.h"
-#if defined(CONFIG_X86)
-#include <asm/smp.h>
-#else
-#define wbinvd_on_all_cpus() \
- pr_warn(DRIVER_NAME ": Missing cache flush in %s\n", __func__)
-#endif
-
void i915_gem_suspend(struct drm_i915_private *i915)
{
GEM_TRACE("%s\n", dev_name(i915->drm.dev));
@@ -123,6 +116,13 @@ int i915_gem_backup_suspend(struct drm_i915_private *i915)
return ret;
}
+static struct drm_i915_gem_object *first_mm_object(struct list_head *list)
+{
+ return list_first_entry_or_null(list,
+ struct drm_i915_gem_object,
+ mm.link);
+}
+
void i915_gem_suspend_late(struct drm_i915_private *i915)
{
struct drm_i915_gem_object *obj;
@@ -132,7 +132,6 @@ void i915_gem_suspend_late(struct drm_i915_private *i915)
NULL
}, **phase;
unsigned long flags;
- bool flush = false;
/*
* Neither the BIOS, ourselves or any other kernel
@@ -158,15 +157,29 @@ void i915_gem_suspend_late(struct drm_i915_private *i915)
spin_lock_irqsave(&i915->mm.obj_lock, flags);
for (phase = phases; *phase; phase++) {
- list_for_each_entry(obj, *phase, mm.link) {
- if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
- flush |= (obj->read_domains & I915_GEM_DOMAIN_CPU) == 0;
- __start_cpu_write(obj); /* presume auto-hibernate */
+ LIST_HEAD(keep);
+
+ while ((obj = first_mm_object(*phase))) {
+ list_move_tail(&obj->mm.link, &keep);
+
+ /* Beware the background _i915_gem_free_objects */
+ if (!kref_get_unless_zero(&obj->base.refcount))
+ continue;
+
+ spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
+
+ i915_gem_object_lock(obj, NULL);
+ drm_WARN_ON(&i915->drm,
+ i915_gem_object_set_to_gtt_domain(obj, false));
+ i915_gem_object_unlock(obj);
+ i915_gem_object_put(obj);
+
+ spin_lock_irqsave(&i915->mm.obj_lock, flags);
}
+
+ list_splice_tail(&keep, *phase);
}
spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
- if (flush)
- wbinvd_on_all_cpus();
}
int i915_gem_freeze(struct drm_i915_private *i915)
--
2.25.1
More information about the Intel-gfx
mailing list