[Intel-gfx] [PATCH v4 16/38] drm/i915: Added tracking/locking of batch buffer objects

John.C.Harrison at Intel.com John.C.Harrison at Intel.com
Mon Jan 11 10:42:45 PST 2016


From: John Harrison <John.C.Harrison at Intel.com>

The scheduler needs to track interdependencies between batch buffers.
These are calculated by analysing the object lists of the buffers and
looking for commonality. The scheduler also needs to keep those
buffers locked long after the initial IOCTL call has returned to user
land.

v3: Updated to support read-read optimisation.

For: VIZ-1587
Signed-off-by: John Harrison <John.C.Harrison at Intel.com>
---
 drivers/gpu/drm/i915/i915_gem_execbuffer.c | 52 ++++++++++++++++++++++++++++--
 drivers/gpu/drm/i915/i915_scheduler.c      | 33 +++++++++++++++++--
 2 files changed, 80 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 13c6217..3c84b97 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1384,7 +1384,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 	struct i915_execbuffer_params *params = &qe.params;
 	const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
 	u32 dispatch_flags;
-	int ret;
+	int ret, i;
 	bool need_relocs;
 
 	if (!i915_gem_check_execbuffer(args))
@@ -1497,6 +1497,14 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 		goto pre_mutex_err;
 	}
 
+	qe.saved_objects = kzalloc(
+			sizeof(*qe.saved_objects) * args->buffer_count,
+			GFP_KERNEL);
+	if (!qe.saved_objects) {
+		ret = -ENOMEM;
+		goto err;
+	}
+
 	/* Look up object handles */
 	ret = eb_lookup_vmas(eb, exec, args, vm, file);
 	if (ret)
@@ -1617,7 +1625,30 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 	params->args_DR1                = args->DR1;
 	params->args_DR4                = args->DR4;
 	params->batch_obj               = batch_obj;
-	params->ctx                     = ctx;
+
+	/*
+	 * Save away the list of objects used by this batch buffer for the
+	 * purpose of tracking inter-buffer dependencies.
+	 */
+	for (i = 0; i < args->buffer_count; i++) {
+		struct drm_i915_gem_object *obj;
+
+		/*
+		 * NB: 'drm_gem_object_lookup()' increments the object's
+		 * reference count and so must be matched by a
+		 * 'drm_gem_object_unreference' call.
+		 */
+		obj = to_intel_bo(drm_gem_object_lookup(dev, file,
+							  exec[i].handle));
+		qe.saved_objects[i].obj       = obj;
+		qe.saved_objects[i].read_only = obj->base.pending_write_domain == 0;
+
+	}
+	qe.num_objs = i;
+
+	/* Lock and save the context object as well. */
+	i915_gem_context_reference(ctx);
+	params->ctx = ctx;
 
 	ret = dev_priv->gt.execbuf_submit(params, args, &eb->vmas);
 	if (ret)
@@ -1648,6 +1679,23 @@ err:
 	i915_gem_context_unreference(ctx);
 	eb_destroy(eb);
 
+	if (qe.saved_objects) {
+		/* Need to release the objects: */
+		for (i = 0; i < qe.num_objs; i++) {
+			if (!qe.saved_objects[i].obj)
+				continue;
+
+			drm_gem_object_unreference(
+					&qe.saved_objects[i].obj->base);
+		}
+
+		kfree(qe.saved_objects);
+
+		/* Context too */
+		if (params->ctx)
+			i915_gem_context_unreference(params->ctx);
+	}
+
 	/*
 	 * If the request was created but not successfully submitted then it
 	 * must be freed again. If it was submitted then it is being tracked
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index 3c282c6..067ef33 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -158,7 +158,23 @@ int i915_scheduler_queue_execbuffer(struct i915_scheduler_queue_entry *qe)
 		if (ret)
 			return ret;
 
-		/* Free everything that is owned by the QE structure: */
+		/* Need to release the objects: */
+		for (i = 0; i < qe->num_objs; i++) {
+			if (!qe->saved_objects[i].obj)
+				continue;
+
+			drm_gem_object_unreference(&qe->saved_objects[i].obj->base);
+		}
+
+		kfree(qe->saved_objects);
+		qe->saved_objects = NULL;
+		qe->num_objs = 0;
+
+		/* Free the context object too: */
+		if (qe->params.ctx)
+			i915_gem_context_unreference(qe->params.ctx);
+
+		/* And anything else owned by the QE structure: */
 		kfree(qe->params.cliprects);
 		if (qe->params.dispatch_flags & I915_DISPATCH_SECURE)
 			i915_gem_execbuff_release_batch_obj(qe->params.batch_obj);
@@ -407,7 +423,7 @@ static int i915_scheduler_remove(struct intel_engine_cs *ring)
 	int                 flying = 0, queued = 0;
 	int                 ret = 0;
 	bool                do_submit;
-	uint32_t            min_seqno;
+	uint32_t            i, min_seqno;
 	struct list_head    remove;
 
 	if (list_empty(&scheduler->node_queue[ring->id]))
@@ -512,7 +528,18 @@ static int i915_scheduler_remove(struct intel_engine_cs *ring)
 		if (node->params.dispatch_flags & I915_DISPATCH_SECURE)
 			i915_gem_execbuff_release_batch_obj(node->params.batch_obj);
 
-		/* Free everything that is owned by the node: */
+		/* Release the locked buffers: */
+		for (i = 0; i < node->num_objs; i++) {
+			drm_gem_object_unreference(
+					    &node->saved_objects[i].obj->base);
+		}
+		kfree(node->saved_objects);
+
+		/* Context too: */
+		if (node->params.ctx)
+			i915_gem_context_unreference(node->params.ctx);
+
+		/* And anything else owned by the node: */
 		node->params.request->scheduler_qe = NULL;
 		i915_gem_request_unreference(node->params.request);
 		kfree(node->params.cliprects);
-- 
1.9.1



More information about the Intel-gfx mailing list