[Mesa-dev] [PATCH 49/70] i965: Allow syncobjects to hook into the internal fence tracking
Chris Wilson
chris at chris-wilson.co.uk
Fri Aug 7 13:13:53 PDT 2015
Since we use fences internally for tracking buffer busyness within
brw_batch.c, we can expose those directly for GL/DRI2 sync objects.
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
src/mesa/drivers/dri/i965/brw_batch.c | 41 +++++++++++++-
src/mesa/drivers/dri/i965/brw_batch.h | 23 +++++++-
src/mesa/drivers/dri/i965/intel_syncobj.c | 92 +++++++++----------------------
3 files changed, 85 insertions(+), 71 deletions(-)
diff --git a/src/mesa/drivers/dri/i965/brw_batch.c b/src/mesa/drivers/dri/i965/brw_batch.c
index d9386bb..d1f5828 100644
--- a/src/mesa/drivers/dri/i965/brw_batch.c
+++ b/src/mesa/drivers/dri/i965/brw_batch.c
@@ -191,7 +191,7 @@ static void __brw_request_retire(struct brw_request * const rq)
assert(!__brw_bo_busy(RQ_BO(tmp)) || batch->fini);
assert(RQ_BO(tmp)->exec == NULL);
- list_for_each_entry_safe(struct __brw_fence, fence, &tmp->fences, link) {
+ list_for_each_entry_safe(struct brw_fence, fence, &tmp->fences, link) {
struct brw_bo *bo;
assert(fence->rq == tmp);
@@ -667,7 +667,7 @@ static void __brw_batch_grow_exec(struct brw_batch *batch)
if (new_exec != batch->exec) {
struct list_head * const list = &batch->next_request->fences;
- list_for_each_entry_rev(struct __brw_fence, fence, list, link) {
+ list_for_each_entry_rev(struct brw_fence, fence, list, link) {
struct brw_bo *bo;
if (unlikely(fence->signal != (void *)READ_SIGNAL)) {
@@ -1075,7 +1075,7 @@ int brw_batch_flush(struct brw_batch *batch, struct perf_debug *perf)
}
skip:
- list_for_each_entry_rev(struct __brw_fence, fence, &rq->fences, link) {
+ list_for_each_entry_rev(struct brw_fence, fence, &rq->fences, link) {
struct brw_bo *bo;
if (unlikely(fence->signal != (void *)READ_SIGNAL)) {
@@ -1391,6 +1391,41 @@ struct brw_bo *brw_bo_create_from_name(struct brw_batch *batch,
return bo;
}
+bool brw_batch_insert_fence(struct brw_batch *batch,
+ struct brw_fence *fence,
+ void (*signal)(struct brw_fence *),
+ struct perf_debug *perf)
+{
+ brw_batch_flush(batch, perf);
+
+ fence->rq = batch->requests[batch->ring].mru;
+ if (fence->rq == NULL)
+ return false;
+
+ fence->signal = signal ?: (void *)NO_SIGNAL;
+ list_addtail(&fence->link, &fence->rq->fences);
+ return true;
+}
+
+int brw_fence_wait(struct brw_fence *fence,
+ int64_t timeout,
+ struct perf_debug *perf)
+{
+ if (fence->rq == NULL)
+ return 0;
+
+ return __brw_bo_wait(RQ_BO(fence->rq), timeout, perf);
+}
+
+void brw_fence_finish(struct brw_fence *fence)
+{
+ if (fence->rq == NULL)
+ return;
+
+ list_del(&fence->link);
+ fence->rq = NULL;
+}
+
/*
* Write a portion of the *linear* buffer using the pointer provided.
*
diff --git a/src/mesa/drivers/dri/i965/brw_batch.h b/src/mesa/drivers/dri/i965/brw_batch.h
index fee4d80..da88bc2 100644
--- a/src/mesa/drivers/dri/i965/brw_batch.h
+++ b/src/mesa/drivers/dri/i965/brw_batch.h
@@ -65,16 +65,16 @@ enum brw_bo_domain { DOMAIN_NONE, DOMAIN_CPU, DOMAIN_GTT, DOMAIN_GPU };
* the GPU passes that point, the fence will be signalled. Or you can wait
* for a fence to complete.
*/
-struct __brw_fence {
+struct brw_fence {
struct brw_request *rq;
struct list_head link;
- void (*signal)(struct __brw_fence *);
+ void (*signal)(struct brw_fence *);
};
typedef struct brw_bo {
struct brw_batch *batch;
struct drm_i915_gem_exec_object2 *exec;
- struct __brw_fence read, write;
+ struct brw_fence read, write;
unsigned dirty : 1;
unsigned domain : 2;
@@ -341,6 +341,23 @@ static inline void brw_bo_put(struct brw_bo *bo)
__brw_bo_free(bo);
}
+bool
+brw_batch_insert_fence(struct brw_batch *batch,
+ struct brw_fence *fence,
+ void (*signal)(struct brw_fence *),
+ struct perf_debug *perf);
+
+static inline bool
+brw_fence_busy(struct brw_fence *fence, struct perf_debug *perf)
+{
+ return __brw_request_busy(fence->rq, BUSY_FLUSH | BUSY_RETIRE, perf);
+}
+
+int brw_fence_wait(struct brw_fence *fence,
+ int64_t timeout,
+ struct perf_debug *perf);
+void brw_fence_finish(struct brw_fence *fence);
+
/* Control batch command insertion and submission to hw */
MUST_CHECK int brw_batch_begin(struct brw_batch *batch,
uint32_t estimated_bytes,
diff --git a/src/mesa/drivers/dri/i965/intel_syncobj.c b/src/mesa/drivers/dri/i965/intel_syncobj.c
index d55cf4b..b1ae15c 100644
--- a/src/mesa/drivers/dri/i965/intel_syncobj.c
+++ b/src/mesa/drivers/dri/i965/intel_syncobj.c
@@ -43,80 +43,30 @@
#include "brw_context.h"
#include "intel_reg.h"
-struct brw_fence {
- /** The fence waits for completion of this batch. */
- brw_bo *batch_bo;
-
- bool signalled;
-};
-
struct intel_gl_sync_object {
struct gl_sync_object Base;
struct brw_fence fence;
};
-static void
-brw_fence_finish(struct brw_fence *fence)
-{
- brw_bo_put(fence->batch_bo);
-}
-
-static void
-brw_fence_insert(struct brw_context *brw, struct brw_fence *fence)
-{
- assert(!fence->batch_bo);
- assert(!fence->signalled);
-
- brw_mi_flush(brw, brw->batch.ring);
- fence->batch_bo = brw_bo_get(brw->batch.bo);
- brw_batch_flush(&brw->batch, PERF_DEBUG(brw, "SyncFence"));
-}
-
-static bool
-brw_fence_has_completed(struct brw_fence *fence)
-{
- if (fence->signalled)
- return true;
-
- if (brw_bo_busy(fence->batch_bo, BUSY_WRITE | BUSY_RETIRE, NULL)) {
- brw_bo_put(fence->batch_bo);
- fence->batch_bo = NULL;
- fence->signalled = true;
- return true;
- }
-
- return false;
-}
-
/**
* Return true if the function successfully signals or has already signalled.
* (This matches the behavior expected from __DRI2fence::client_wait_sync).
*/
static bool
-brw_fence_client_wait(struct brw_context *brw, struct brw_fence *fence,
- uint64_t timeout)
+brw_fence_client_wait(struct brw_context *brw,
+ struct brw_fence *fence,
+ uint64_t timeout,
+ struct perf_debug *perf)
{
- if (fence->signalled)
- return true;
-
- assert(fence->batch_bo);
-
/* DRM_IOCTL_I915_GEM_WAIT uses a signed 64 bit timeout and returns
- * immediately for timeouts <= 0. The best we can do is to clamp the
- * timeout to INT64_MAX. This limits the maximum timeout from 584 years to
- * 292 years - likely not a big deal.
+ * immediately for timeout == 0, and indefinitely if timeout is negative.
+ * The best we can do is to clamp the timeout to INT64_MAX. This limits
+ * the maximum timeout from 584 years to 292 years - likely not a big deal.
*/
if (timeout > INT64_MAX)
timeout = INT64_MAX;
- if (drm_intel_gem_bo_wait(fence->batch_bo->base, timeout) != 0)
- return false;
-
- fence->signalled = true;
- brw_bo_put(fence->batch_bo);
- fence->batch_bo = NULL;
-
- return true;
+ return brw_fence_wait(fence, timeout, perf) == 0;
}
static void
@@ -151,13 +101,23 @@ intel_gl_delete_sync_object(struct gl_context *ctx, struct gl_sync_object *s)
}
static void
+__intel_fence_signal(struct brw_fence *fence)
+{
+ struct intel_gl_sync_object *sync = container_of(fence, sync, fence);
+
+ sync->Base.StatusFlag = 1;
+}
+
+static void
intel_gl_fence_sync(struct gl_context *ctx, struct gl_sync_object *s,
GLenum condition, GLbitfield flags)
{
struct brw_context *brw = brw_context(ctx);
struct intel_gl_sync_object *sync = (struct intel_gl_sync_object *)s;
- brw_fence_insert(brw, &sync->fence);
+ s->StatusFlag = !brw_batch_insert_fence(&brw->batch,
+ &sync->fence, __intel_fence_signal,
+ PERF_DEBUG(brw, "FenceSync"));
}
static void
@@ -167,8 +127,8 @@ intel_gl_client_wait_sync(struct gl_context *ctx, struct gl_sync_object *s,
struct brw_context *brw = brw_context(ctx);
struct intel_gl_sync_object *sync = (struct intel_gl_sync_object *)s;
- if (brw_fence_client_wait(brw, &sync->fence, timeout))
- s->StatusFlag = 1;
+ brw_fence_client_wait(brw, &sync->fence, timeout,
+ PERF_DEBUG(brw, "ClientWaitSync"));
}
static void
@@ -186,8 +146,8 @@ intel_gl_check_sync(struct gl_context *ctx, struct gl_sync_object *s)
{
struct intel_gl_sync_object *sync = (struct intel_gl_sync_object *)s;
- if (brw_fence_has_completed(&sync->fence))
- s->StatusFlag = 1;
+ brw_fence_busy(&sync->fence,
+ PERF_DEBUG(brw_context(ctx), "CheckSync"));
}
void
@@ -211,7 +171,8 @@ intel_dri_create_fence(__DRIcontext *ctx)
if (!fence)
return NULL;
- brw_fence_insert(brw, fence);
+ brw_batch_insert_fence(&brw->batch, fence, NULL,
+ PERF_DEBUG(brw, "DRI2CreateFence"));
return fence;
}
@@ -232,7 +193,8 @@ intel_dri_client_wait_sync(__DRIcontext *ctx, void *driver_fence, unsigned flags
struct brw_context *brw = ctx->driverPrivate;
struct brw_fence *fence = driver_fence;
- return brw_fence_client_wait(brw, fence, timeout);
+ return brw_fence_client_wait(brw, fence, timeout,
+ PERF_DEBUG(brw, "DRI2ClientFenceWait"));
}
static void
--
2.5.0
More information about the mesa-dev
mailing list