[RFC PATCH 143/162] drm/i915: suspend/resume eviction
Matthew Auld
matthew.auld at intel.com
Fri Nov 27 12:06:59 UTC 2020
From: Venkata Ramana Nayana <venkata.ramana.nayana at intel.com>
As the initial phase of implementation, when the system in idle,
copying the user objects from LMEM to SMEM during suspend and
restoring back in resume. In present implementation using memcpy based
eviction during swapout/swapin of objects. To test the functionality,
suspend is initiated as part of igt application.
Signed-off-by: Venkata Ramana Nayana <venkata.ramana.nayana at intel.com>
Cc: CQ Tang <cq.tang at intel.com>
---
.../gpu/drm/i915/gem/i915_gem_object_types.h | 3 +
drivers/gpu/drm/i915/i915_drv.c | 83 +++++++++++++++++++
2 files changed, 86 insertions(+)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index e9f42d3137b3..331d113f7d5b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -322,6 +322,9 @@ struct drm_i915_gem_object {
*/
bool do_swapping;
struct drm_i915_gem_object *swapto;
+
+ /** mark evicted object during suspend */
+ bool evicted;
};
static inline struct drm_i915_gem_object *
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 78b528e89486..e8c4931fc818 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -1102,11 +1102,86 @@ static int i915_drm_prepare(struct drm_device *dev)
return 0;
}
+static int intel_dmem_evict_buffers(struct drm_device *dev, bool in_suspend)
+{
+ struct drm_i915_private *i915 = to_i915(dev);
+ struct drm_i915_gem_object *obj;
+ struct intel_memory_region *mem;
+ int id, ret = 0;
+
+ /*
+ * FIXME: Presently using memcpy,
+ * will replace with blitter once
+ * fix the issues.
+ */
+ i915->params.enable_eviction = 1;
+
+ for_each_memory_region(mem, i915, id) {
+ struct list_head still_in_list;
+ INIT_LIST_HEAD(&still_in_list);
+ if (mem->type == INTEL_MEMORY_LOCAL && mem->total) {
+ mutex_lock(&mem->objects.lock);
+ while ((obj = list_first_entry_or_null(&mem->objects.list,
+ typeof(*obj),
+ mm.region_link))) {
+
+ list_move_tail(&obj->mm.region_link, &still_in_list);
+
+ if (!i915_gem_object_has_pages(obj) && in_suspend)
+ continue;
+
+ /* Ignore previously evicted objects */
+ if (obj->swapto && in_suspend)
+ continue;
+
+ mutex_unlock(&mem->objects.lock);
+
+ if (in_suspend)
+ i915_gem_object_unbind(obj, 0);
+
+ if (in_suspend) {
+ obj->swapto = NULL;
+ obj->evicted = false;
+ obj->do_swapping = true;
+ ret = __i915_gem_object_put_pages(obj);
+ obj->do_swapping = false;
+ if (ret) {
+ /*
+ * FIXME: internal ctx objects still pinned
+ * returning as BUSY. Presently just evicting
+ * the user objects, will fix it later
+ */
+ obj->evicted = false;
+ ret = 0;
+ } else
+ obj->evicted = true;
+ } else {
+ if (obj->swapto && obj->evicted) {
+ ret = i915_gem_object_pin_pages(obj);
+ if (ret) {
+ i915_gem_object_put(obj);
+ } else {
+ i915_gem_object_unpin_pages(obj);
+ obj->evicted = false;
+ }
+ }
+ }
+ mutex_lock(&mem->objects.lock);
+ }
+ list_splice_tail(&still_in_list, &mem->objects.list);
+ mutex_unlock(&mem->objects.lock);
+ }
+ }
+ i915->params.enable_eviction = 3;
+ return ret;
+}
+
static int i915_drm_suspend(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct pci_dev *pdev = dev_priv->drm.pdev;
pci_power_t opregion_target_state;
+ int ret = 0;
disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
@@ -1138,6 +1213,10 @@ static int i915_drm_suspend(struct drm_device *dev)
intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
+ ret = intel_dmem_evict_buffers(dev, true);
+ if (ret)
+ return ret;
+
dev_priv->suspend_count++;
intel_csr_ucode_suspend(dev_priv);
@@ -1263,6 +1342,10 @@ static int i915_drm_resume(struct drm_device *dev)
drm_mode_config_reset(dev);
+ ret = intel_dmem_evict_buffers(dev, false);
+ if (ret)
+ DRM_ERROR("i915_resume:i915_gem_object_pin_pages failed with err=%d\n", ret);
+
i915_gem_resume(dev_priv);
intel_modeset_init_hw(dev_priv);
--
2.26.2
More information about the dri-devel
mailing list