[Intel-gfx] [RFC 8/9] drm/i915: Connecting execbuff fences to scheduler
John.C.Harrison at Intel.com
John.C.Harrison at Intel.com
Wed Jan 13 09:57:34 PST 2016
From: John Harrison <John.C.Harrison at Intel.com>
The scheduler now supports sync framework fences being associated with
batch buffers. The execbuff IOCTL allows such fences to be passed in
from user land. This patch wires the two together so that the IOCTL no
longer needs to stall on the fence immediately. Instead the stall is
now swallowed by the scheduler's scheduling algorithm.
v0.1: Updated a comment that would become incorrect after this patch.
For: VIZ-1587
Signed-off-by: John Harrison <John.C.Harrison at Intel.com>
---
drivers/gpu/drm/i915/i915_gem_execbuffer.c | 25 +++++++++++++++++++++----
drivers/gpu/drm/i915/i915_scheduler.c | 3 +++
2 files changed, 24 insertions(+), 4 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 372922a..8232a02 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1412,9 +1412,9 @@ eb_get_batch(struct eb_vmas *eb)
}
/*
- * Do a synchronous wait on any incoming fence object (until the scheduler
- * arrives and implements asynchronous waits). NB: This must be called before
- * acquiring the driver mutex lock!
+ * Do a synchronous wait on any incoming fence object (e.g. in the case where
+ * the scheduler is disabled). NB: This must be called before acquiring the
+ * driver mutex lock!
*/
static int i915_early_fence_wait(struct intel_engine_cs *ring, int fence_fd)
{
@@ -1562,7 +1562,9 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
/*
* Without a GPU scheduler, any fence waits must be done up front.
*/
- if (args->flags & I915_EXEC_WAIT_FENCE) {
+ if ((args->flags & I915_EXEC_WAIT_FENCE) &&
+ (i915.scheduler_override & i915_so_direct_submit))
+ {
ret = i915_early_fence_wait(ring, fd_fence_wait);
if (ret < 0)
return ret;
@@ -1754,6 +1756,18 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
i915_gem_context_reference(ctx);
params->ctx = ctx;
+ if (args->flags & I915_EXEC_WAIT_FENCE) {
+ if (fd_fence_wait < 0) {
+ DRM_ERROR("Wait fence for ring %d has invalid id %d\n",
+ (int) ring->id, fd_fence_wait);
+ } else {
+ params->fence_wait = sync_fence_fdget(fd_fence_wait);
+ if (params->fence_wait == NULL)
+ DRM_ERROR("Invalid wait fence %d\n",
+ fd_fence_wait);
+ }
+ }
+
if (args->flags & I915_EXEC_CREATE_FENCE) {
/*
* Caller has requested a sync fence.
@@ -1834,6 +1848,9 @@ err:
i915_gem_context_unreference(params->ctx);
}
+ if (params->fence_wait)
+ sync_fence_put(params->fence_wait);
+
/*
* If the request was created but not successfully submitted then it
* must be freed again. If it was submitted then it is being tracked
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index 4b3943a..5228df7 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -1280,6 +1280,9 @@ static int i915_scheduler_pop_from_queue_locked(struct intel_engine_cs *ring,
else
signalled = true;
+ if (!signalled)
+ signalled = i915_safe_to_ignore_fence(ring, node->params.fence_wait);
+
has_local = false;
has_remote = false;
for (i = 0; i < node->num_deps; i++) {
--
1.9.1
More information about the Intel-gfx
mailing list