[Intel-gfx] [PATCH 38/48] drm/i915: Clean up VMAs before freeing

Ben Widawsky benjamin.widawsky at intel.com
Fri Dec 6 23:12:11 CET 2013


From: Ben Widawsky <ben at bwidawsk.net>

It's quite common for an object to simply be on the inactive list (and
not unbound) when we want to free the context. This of course happens
with lazy unbinding. Simply, this is needed when an object isn't fully
unbound but we want to free one VMA of the object, for whatever reason.

NOTE: The aliasing PPGTT is not a proper VM, so it needs special casing.

This addresses the fixup requirement mentioned in:
drm/915: Better reset handling for contexts

In the flink, and dmabuf case, we can't assert that the object isn't
still active. To keep it more generic, just check the vma's link in the
object vma list. If we wanted to do a better job, we could track last
seqno (and active) per VMA. It was decided not to do this in the last
iteration. Unfortunately this means the assertion can miss real bugs
when using flink/dmabuf.

v2: Use the newer introduced i915_gem_evict_vm(). Note that handling the
aliasing PPGTT is special.

Signed-off-by: Ben Widawsky <ben at bwidawsk.net>
---
 drivers/gpu/drm/i915/i915_drv.h | 48 ++++++++++++++++++++++++++++++-----------
 1 file changed, 36 insertions(+), 12 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index ff30f3f..fe13023 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2257,6 +2257,17 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
 int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
 				   struct drm_file *file);
 
+/* i915_gem_evict.c */
+int __must_check i915_gem_evict_something(struct drm_device *dev,
+					  struct i915_address_space *vm,
+					  int min_size,
+					  unsigned alignment,
+					  unsigned cache_level,
+					  bool mappable,
+					  bool nonblock);
+int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
+int i915_gem_evict_everything(struct drm_device *dev);
+
 /* i915_gem_gtt.c */
 void i915_check_and_clear_faults(struct drm_device *dev);
 void i915_gem_suspend_gtt_mappings(struct drm_device *dev);
@@ -2294,22 +2305,35 @@ static inline bool intel_enable_ppgtt(struct drm_device *dev, bool full)
 static inline void ppgtt_release(struct kref *kref)
 {
 	struct i915_hw_ppgtt *ppgtt = container_of(kref, struct i915_hw_ppgtt, ref);
+	struct drm_device *dev = ppgtt->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct i915_address_space *vm = &ppgtt->base;
+
+	if (ppgtt == dev_priv->mm.aliasing_ppgtt ||
+	    (list_empty(&vm->active_list) && list_empty(&vm->inactive_list))) {
+		ppgtt->base.cleanup(&ppgtt->base);
+		return;
+	}
+
+	/* Make sure vmas are unbound before we take down the drm_mm
+	 */
+	if (!list_empty(&vm->active_list)) {
+		struct i915_vma *vma;
+
+		list_for_each_entry(vma, &vm->active_list, mm_list)
+			if (WARN_ON(list_empty(&vma->vma_link) ||
+				    list_is_singular(&vma->vma_link)))
+				break;
+
+		i915_gem_evict_vm(&ppgtt->base, true);
+	} else {
+		i915_gem_retire_requests(dev);
+		i915_gem_evict_vm(&ppgtt->base, false);
+	}
 
 	ppgtt->base.cleanup(&ppgtt->base);
 }
 
-
-/* i915_gem_evict.c */
-int __must_check i915_gem_evict_something(struct drm_device *dev,
-					  struct i915_address_space *vm,
-					  int min_size,
-					  unsigned alignment,
-					  unsigned cache_level,
-					  bool mappable,
-					  bool nonblock);
-int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
-int i915_gem_evict_everything(struct drm_device *dev);
-
 /* i915_gem_stolen.c */
 int i915_gem_init_stolen(struct drm_device *dev);
 int i915_gem_stolen_setup_compression(struct drm_device *dev, int size);
-- 
1.8.4.2




More information about the Intel-gfx mailing list