[Intel-gfx] [PATCH 086/190] drm/i915: Mark the context and address space as closed

Chris Wilson chris at chris-wilson.co.uk
Mon Jan 11 01:17:37 PST 2016


When the user closes the context mark it and the dependent address space
as closed. As we use an asynchronous destruct method, this has two purposes.
First it allows us to flag the closed context and detect internal errors if
we to create any new objects for it (as it is removed from the user's
namespace, these should be internal bugs only). And secondly, it allows
us to immediately reap stale vma.

Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_drv.h         |  3 +++
 drivers/gpu/drm/i915/i915_gem.c         | 17 +++++++-------
 drivers/gpu/drm/i915/i915_gem_context.c | 40 +++++++++++++++++++++++++++++----
 drivers/gpu/drm/i915/i915_gem_gtt.c     |  9 ++++++--
 drivers/gpu/drm/i915/i915_gem_gtt.h     |  9 ++++++++
 drivers/gpu/drm/i915/i915_gem_stolen.c  |  2 +-
 6 files changed, 65 insertions(+), 15 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 262d1b247344..fc35a9b8d910 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -888,6 +888,8 @@ struct intel_context {
 	} engine[I915_NUM_RINGS];
 
 	struct list_head link;
+
+	bool closed:1;
 };
 
 enum fb_op_origin {
@@ -2707,6 +2709,7 @@ int __must_check i915_vma_unbind(struct i915_vma *vma);
  * _guarantee_ VMA in question is _not in use_ anywhere.
  */
 int __must_check __i915_vma_unbind_no_wait(struct i915_vma *vma);
+void i915_vma_close(struct i915_vma *vma);
 
 int i915_gem_object_unbind(struct drm_i915_gem_object *obj);
 int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 1f95cf39b7d2..16ee3bd7010e 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2385,7 +2385,7 @@ i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
 	}
 }
 
-static void i915_vma_close(struct i915_vma *vma)
+void i915_vma_close(struct i915_vma *vma)
 {
 	GEM_BUG_ON(vma->closed);
 	vma->closed = true;
@@ -2654,12 +2654,15 @@ static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
 			return ret;
 	}
 
-	trace_i915_vma_unbind(vma);
-
-	vma->vm->unbind_vma(vma);
+	if (likely(!vma->vm->closed)) {
+		trace_i915_vma_unbind(vma);
+		vma->vm->unbind_vma(vma);
+	}
 	vma->bound = 0;
 
-	list_del_init(&vma->vm_link);
+	drm_mm_remove_node(&vma->node);
+	list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
+
 	if (vma->is_ggtt) {
 		if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
 			obj->map_and_fenceable = false;
@@ -2670,8 +2673,6 @@ static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
 		vma->ggtt_view.pages = NULL;
 	}
 
-	drm_mm_remove_node(&vma->node);
-
 	/* Since the unbound list is global, only move to that list if
 	 * no more VMAs exist. */
 	if (--obj->bind_count == 0)
@@ -2917,7 +2918,7 @@ search_free:
 		goto err_remove_node;
 
 	list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
-	list_add_tail(&vma->vm_link, &vm->inactive_list);
+	list_move_tail(&vma->vm_link, &vm->inactive_list);
 	obj->bind_count++;
 
 	return vma;
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 310a770b7984..4583d8fe3585 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -153,6 +153,7 @@ void i915_gem_context_free(struct kref *ctx_ref)
 	struct intel_context *ctx = container_of(ctx_ref, typeof(*ctx), ref);
 
 	trace_i915_context_free(ctx);
+	GEM_BUG_ON(!ctx->closed);
 
 	if (i915.enable_execlists)
 		intel_lr_context_free(ctx);
@@ -209,6 +210,37 @@ i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
 	return obj;
 }
 
+static void i915_ppgtt_close(struct i915_address_space *vm)
+{
+	struct list_head *phases[] = {
+		&vm->active_list,
+		&vm->inactive_list,
+		&vm->unbound_list,
+		NULL,
+	}, **phase;
+
+	GEM_BUG_ON(i915_is_ggtt(vm));
+	GEM_BUG_ON(vm->closed);
+	vm->closed = true;
+
+	for (phase = phases; *phase; phase++) {
+		struct i915_vma *vma, *vn;
+
+		list_for_each_entry_safe(vma, vn, *phase, vm_link)
+			if (!vma->closed)
+				i915_vma_close(vma);
+	}
+}
+
+static void context_close(struct intel_context *ctx)
+{
+	GEM_BUG_ON(ctx->closed);
+	ctx->closed = true;
+	if (ctx->ppgtt)
+		i915_ppgtt_close(&ctx->ppgtt->base);
+	i915_gem_context_unreference(ctx);
+}
+
 static struct intel_context *
 __create_hw_context(struct drm_device *dev,
 		    struct drm_i915_file_private *file_priv)
@@ -256,7 +288,7 @@ __create_hw_context(struct drm_device *dev,
 	return ctx;
 
 err_out:
-	i915_gem_context_unreference(ctx);
+	context_close(ctx);
 	return ERR_PTR(ret);
 }
 
@@ -318,7 +350,7 @@ err_unpin:
 		i915_gem_object_ggtt_unpin(ctx->legacy_hw_ctx.rcs_state);
 err_destroy:
 	idr_remove(&file_priv->context_idr, ctx->user_handle);
-	i915_gem_context_unreference(ctx);
+	context_close(ctx);
 	return ERR_PTR(ret);
 }
 
@@ -474,7 +506,7 @@ static int context_idr_cleanup(int id, void *p, void *data)
 {
 	struct intel_context *ctx = p;
 
-	i915_gem_context_unreference(ctx);
+	context_close(ctx);
 	return 0;
 }
 
@@ -894,7 +926,7 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
 	}
 
 	idr_remove(&ctx->file_priv->context_idr, ctx->user_handle);
-	i915_gem_context_unreference(ctx);
+	context_close(ctx);
 	mutex_unlock(&dev->struct_mutex);
 
 	DRM_DEBUG_DRIVER("HW context %d destroyed\n", args->ctx_id);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index ef093db6b8a6..ad26c9e331aa 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -2130,6 +2130,7 @@ static void i915_address_space_init(struct i915_address_space *vm,
 	vm->dev = dev_priv->dev;
 	INIT_LIST_HEAD(&vm->active_list);
 	INIT_LIST_HEAD(&vm->inactive_list);
+	INIT_LIST_HEAD(&vm->unbound_list);
 	list_add_tail(&vm->global_link, &dev_priv->vm_list);
 }
 
@@ -2214,9 +2215,10 @@ void  i915_ppgtt_release(struct kref *kref)
 
 	trace_i915_ppgtt_release(&ppgtt->base);
 
-	/* vmas should already be unbound */
+	/* vmas should already be unbound and destroyed */
 	WARN_ON(!list_empty(&ppgtt->base.active_list));
 	WARN_ON(!list_empty(&ppgtt->base.inactive_list));
+	WARN_ON(!list_empty(&ppgtt->base.unbound_list));
 
 	list_del(&ppgtt->base.global_link);
 	drm_mm_takedown(&ppgtt->base.mm);
@@ -3269,6 +3271,8 @@ __i915_gem_vma_create(struct drm_i915_gem_object *obj,
 	struct i915_vma *vma;
 	int i;
 
+	GEM_BUG_ON(vm->closed);
+
 	if (WARN_ON(i915_is_ggtt(vm) != !!ggtt_view))
 		return ERR_PTR(-EINVAL);
 
@@ -3276,11 +3280,11 @@ __i915_gem_vma_create(struct drm_i915_gem_object *obj,
 	if (vma == NULL)
 		return ERR_PTR(-ENOMEM);
 
-	INIT_LIST_HEAD(&vma->vm_link);
 	INIT_LIST_HEAD(&vma->obj_link);
 	INIT_LIST_HEAD(&vma->exec_list);
 	for (i = 0; i < ARRAY_SIZE(vma->last_read); i++)
 		init_request_active(&vma->last_read[i], i915_vma_retire);
+	list_add(&vma->vm_link, &vm->unbound_list);
 	vma->vm = vm;
 	vma->obj = obj;
 	vma->is_ggtt = i915_is_ggtt(vm);
@@ -3327,6 +3331,7 @@ i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj,
 	if (!vma)
 		vma = __i915_gem_vma_create(obj, ggtt, view);
 
+	GEM_BUG_ON(vma->closed);
 	return vma;
 
 }
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index d68d5fd02923..6346d1786d41 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -292,6 +292,8 @@ struct i915_address_space {
 	u64 start;		/* Start offset always 0 for dri2 */
 	u64 total;		/* size addr space maps (ex. 2GB for ggtt) */
 
+	bool closed;
+
 	struct i915_page_scratch *scratch_page;
 	struct i915_page_table *scratch_pt;
 	struct i915_page_directory *scratch_pd;
@@ -320,6 +322,13 @@ struct i915_address_space {
 	 */
 	struct list_head inactive_list;
 
+	/**
+	 * List of vma that have been unbound.
+	 *
+	 * A reference is not held on the buffer while on this list.
+	 */
+	struct list_head unbound_list;
+
 	/* FIXME: Need a more generic return type */
 	gen6_pte_t (*pte_encode)(dma_addr_t addr,
 				 enum i915_cache_level level,
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 1c81a1470baf..c110563823bd 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -692,7 +692,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
 
 	vma->bound |= GLOBAL_BIND;
 	__i915_vma_set_map_and_fenceable(vma);
-	list_add_tail(&vma->vm_link, &ggtt->inactive_list);
+	list_move_tail(&vma->vm_link, &ggtt->inactive_list);
 	obj->bind_count++;
 
 	list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
-- 
2.7.0.rc3



More information about the Intel-gfx mailing list