[Intel-gfx] [PATCH 17/21] i965: Split intel_syncobject into vfuncs

Chris Wilson chris at chris-wilson.co.uk
Thu Aug 25 09:08:35 UTC 2016


Separate out the underlying fence implementation for the sync object so
that we can extend the internal seqno based fence with an external fd
fence in the next few patches.

Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
 src/mesa/drivers/dri/i965/intel_syncobj.c | 33 ++++++++++++++++++++++++++-----
 1 file changed, 28 insertions(+), 5 deletions(-)

diff --git a/src/mesa/drivers/dri/i965/intel_syncobj.c b/src/mesa/drivers/dri/i965/intel_syncobj.c
index d5071f6..14fc4b8 100644
--- a/src/mesa/drivers/dri/i965/intel_syncobj.c
+++ b/src/mesa/drivers/dri/i965/intel_syncobj.c
@@ -76,7 +76,16 @@ void brw_context_fini_fences(struct brw_context *brw)
    munmap(brw->fences.map, 4096);
 }
 
+struct brw_fence;
+
+struct brw_fence_ops {
+   bool (*check)(struct brw_fence *fence);
+   bool (*client_wait)(struct brw_fence *, uint64_t timeout);
+   void (*server_wait)(struct brw_fence *, struct brw_context *brw);
+};
+
 struct brw_fence {
+   const struct brw_fence_ops *ops;
    drm_intel_bo *batch;
    uint32_t *hw_seqno;
    uint32_t seqno;
@@ -102,6 +111,11 @@ static void seqno_insert(struct brw_fence *fence, struct brw_context *brw)
    fence->batch = brw->batch.bo;
 }
 
+static bool seqno_check(struct brw_fence *fence)
+{
+   return seqno_passed(fence);
+}
+
 static bool seqno_client_wait(struct brw_fence *fence, uint64_t timeout)
 {
    if (seqno_passed(fence))
@@ -132,6 +146,12 @@ static void seqno_server_wait(struct brw_fence *fence, struct brw_context *brw)
     */
 }
 
+static const struct brw_fence_ops seqno_ops = {
+   .check = seqno_check,
+   .client_wait = seqno_client_wait,
+   .server_wait = seqno_server_wait,
+};
+
 static void brw_fence_finish(struct brw_fence *fence)
 {
    if (fence->batch)
@@ -152,6 +172,7 @@ intel_gl_new_sync_object(struct gl_context *ctx, GLuint id)
    if (!sync)
       return NULL;
 
+   sync->fence.ops = &seqno_ops;
    return &sync->Base;
 }
 
@@ -171,6 +192,7 @@ intel_gl_fence_sync(struct gl_context *ctx, struct gl_sync_object *s,
    struct brw_context *brw = brw_context(ctx);
    struct intel_gl_sync_object *sync = (struct intel_gl_sync_object *)s;
 
+   assert(sync->fence.ops == &seqno_ops);
    seqno_insert(&sync->fence, brw);
 }
 
@@ -184,7 +206,7 @@ intel_gl_client_wait_sync(struct gl_context *ctx, struct gl_sync_object *s,
    if (sync->fence.batch == brw->batch.bo)
       intel_batchbuffer_flush(brw);
 
-   if (seqno_client_wait(&sync->fence, timeout))
+   if (sync->fence.ops->client_wait(&sync->fence, timeout))
       s->StatusFlag = 1;
 }
 
@@ -195,7 +217,7 @@ intel_gl_server_wait_sync(struct gl_context *ctx, struct gl_sync_object *s,
    struct brw_context *brw = brw_context(ctx);
    struct intel_gl_sync_object *sync = (struct intel_gl_sync_object *)s;
 
-   seqno_server_wait(&sync->fence, brw);
+   sync->fence.ops->server_wait(&sync->fence, brw);
 }
 
 static void
@@ -207,7 +229,7 @@ intel_gl_check_sync(struct gl_context *ctx, struct gl_sync_object *s)
    if (sync->fence.batch == brw->batch.bo)
       intel_batchbuffer_flush(brw);
 
-   if (seqno_passed(&sync->fence))
+   if (sync->fence.ops->check(&sync->fence))
       s->StatusFlag = 1;
 }
 
@@ -232,6 +254,7 @@ intel_dri_create_fence(__DRIcontext *ctx)
    if (!fence)
       return NULL;
 
+   fence->ops = &seqno_ops;
    seqno_insert(fence, brw);
 
    return fence;
@@ -258,7 +281,7 @@ intel_dri_client_wait_sync(__DRIcontext *ctx, void *driver_fence, unsigned flags
          intel_batchbuffer_flush(brw);
    }
 
-   return seqno_client_wait(fence, timeout);
+   return fence->ops->client_wait(fence, timeout);
 }
 
 static void
@@ -273,7 +296,7 @@ intel_dri_server_wait_sync(__DRIcontext *ctx, void *driver_fence, unsigned flags
    if (!fence)
       return;
 
-   seqno_server_wait(fence, brw);
+   fence->ops->server_wait(fence, brw);
 }
 
 const __DRI2fenceExtension intelFenceExtension = {
-- 
2.9.3



More information about the Intel-gfx mailing list