<html>
<head>
<meta content="text/html; charset=utf-8" http-equiv="Content-Type">
</head>
<body bgcolor="#FFFFFF" text="#000000">
On 25/08/2016 10:08, Chris Wilson wrote:<br>
<blockquote
cite="mid:20160825090839.9952-10-chris@chris-wilson.co.uk"
type="cite">
<pre wrap="">We are about to specialize object synchronisation to enable nonblocking
execbuf submission. First we make a copy of the current object
synchronisation for execbuffer. The general i915_gem_object_sync() will
be removed following the removal of CS flips in the near future.
Signed-off-by: Chris Wilson <a class="moz-txt-link-rfc2396E" href="mailto:chris@chris-wilson.co.uk"><chris@chris-wilson.co.uk></a>
---
drivers/gpu/drm/i915/i915_gem_execbuffer.c | 64 +++++++++++++++++++++++++++++-
1 file changed, 63 insertions(+), 1 deletion(-)
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 601156c353cc..b7cc158733ad 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1122,6 +1122,68 @@ static unsigned int eb_other_engines(struct drm_i915_gem_request *req)
}
static int
+__eb_sync(struct drm_i915_gem_request *to,
+ struct drm_i915_gem_request *from)
+{
+ int idx, ret;
+
+ if (to->engine == from->engine)
+ return 0;
+
+ idx = intel_engine_sync_index(from->engine, to->engine);
+ if (from->fence.seqno <= from->engine->semaphore.sync_seqno[idx])
+ return 0;
+
+ trace_i915_gem_ring_sync_to(to, from);
+ if (!i915.semaphores) {
+ ret = i915_wait_request(from, true, NULL, NO_WAITBOOST);
+ if (ret)
+ return ret;
+ } else {
+ ret = to->engine->semaphore.sync_to(to, from);
+ if (ret)
+ return ret;
+ }
+
+ from->engine->semaphore.sync_seqno[idx] = from->fence.seqno;
+ return 0;
+}
+
+static int
+eb_sync(struct drm_i915_gem_object *obj,
+ struct drm_i915_gem_request *to,
+ bool write)
+{
+ struct i915_gem_active *active;
+ unsigned long active_mask;
+ int idx;
+
+ if (write) {
+ active_mask = i915_gem_object_get_active(obj);
+ active = obj->last_read;
+ } else {
+ active_mask = 1;
+ active = &obj->last_write;
+ }
+
+ for_each_active(active_mask, idx) {
+ struct drm_i915_gem_request *request;
+ int ret;
+
+ request = i915_gem_active_peek(&active[idx],
+ &obj->base.dev->struct_mutex);
+ if (!request)
+ continue;
+
+ ret = __eb_sync(to, request);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
struct list_head *vmas)
{
@@ -1133,7 +1195,7 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
struct drm_i915_gem_object *obj = vma->obj;
if (obj->flags & other_rings) {
- ret = i915_gem_object_sync(obj, req);
+ ret = eb_sync(obj, req, obj->base.pending_write_domain);
if (ret)
return ret;
}
</pre>
</blockquote>
<br>
Reviewed-by: John Harrison <a moz-do-not-send="true"
class="moz-txt-link-rfc2396E"
href="mailto:john.c.harrison@intel.com"><john.c.harrison@intel.com></a><br>
<br>
</body>
</html>