[Mesa-dev] [PATCH 13/13] rfc! i965/sync: Support EGL_ANDROID_native_fence_sync

Chad Versace chad.versace at intel.com
Sat Jul 9 00:01:03 UTC 2016


TODO: The i915 kernel interface nor the libdrm interface is upstream
  yet. So small details may change between now and the final patch.
TODO: Agree on fd ownership rules between EGL and driver with Rob Clark.
TODO: Handle errors from ppoll().
TODO: Test it!
---
 src/mesa/drivers/dri/i965/intel_syncobj.c | 217 ++++++++++++++++++++++++++----
 1 file changed, 193 insertions(+), 24 deletions(-)

diff --git a/src/mesa/drivers/dri/i965/intel_syncobj.c b/src/mesa/drivers/dri/i965/intel_syncobj.c
index 8132efa..77cc8bf 100644
--- a/src/mesa/drivers/dri/i965/intel_syncobj.c
+++ b/src/mesa/drivers/dri/i965/intel_syncobj.c
@@ -38,17 +38,36 @@
  * performance bottleneck, though.
  */
 
+#include <poll.h>
+
 #include "main/imports.h"
 
 #include "brw_context.h"
 #include "intel_batchbuffer.h"
 #include "intel_reg.h"
 
+enum brw_fence_type {
+   BRW_FENCE_TYPE_BATCH_WAIT,
+   BRW_FENCE_TYPE_SYNC_FD,
+};
+
 struct brw_fence {
-   /** The fence waits for completion of this batch. */
-   drm_intel_bo *batch_bo;
+   union {
+      /** The fence waits for completion of this batch. */
+      drm_intel_bo *batch_bo;
+
+      /**
+       * A kernel sync fd.  Requires I915_PARAM_HAS_EXEC_FENCE_FD. The uapi
+       * for generic sync fds arrived in Linux 4.7; i915 support arrived in
+       * Linux TODO.
+       */
+      int sync_fd;
+   };
+
+   enum brw_fence_type type;
 };
 
+
 struct intel_gl_sync_object {
    struct gl_sync_object Base;
    struct brw_fence fence;
@@ -57,50 +76,153 @@ struct intel_gl_sync_object {
 static void
 brw_fence_finish(struct brw_fence *fence)
 {
-   if (fence->batch_bo)
-      drm_intel_bo_unreference(fence->batch_bo);
+   switch (fence->type) {
+   case BRW_FENCE_TYPE_BATCH_WAIT:
+      if (fence->batch_bo)
+         drm_intel_bo_unreference(fence->batch_bo);
+      break;
+   case BRW_FENCE_TYPE_SYNC_FD:
+      /* EGL owns the fd, so don't close it. */
+      break;
+   }
+}
+
+static void
+brw_fence_init(struct brw_fence *fence, enum brw_fence_type type)
+{
+   fence->type = type;
+
+   switch (type) {
+   case BRW_FENCE_TYPE_BATCH_WAIT:
+      fence->batch_bo = NULL;
+      break;
+   case BRW_FENCE_TYPE_SYNC_FD:
+      fence->sync_fd = -1;
+      break;
+   }
 }
 
 static void
 brw_fence_insert(struct brw_context *brw, struct brw_fence *fence)
 {
-   assert(!fence->batch_bo);
-
    brw_emit_mi_flush(brw);
-   fence->batch_bo = brw->batch.bo;
-   drm_intel_bo_reference(fence->batch_bo);
-   intel_batchbuffer_flush(brw);
+
+   switch (fence->type) {
+   case BRW_FENCE_TYPE_BATCH_WAIT:
+      assert(fence->batch_bo == NULL);
+      fence->batch_bo = brw->batch.bo;
+      drm_intel_bo_reference(fence->batch_bo);
+      intel_batchbuffer_flush(brw);
+      break;
+   case BRW_FENCE_TYPE_SYNC_FD:
+      if (fence->sync_fd == -1) {
+         /* Create a new sync fd.
+          *
+          * From the EGL_ANDROID_native_fence_sync spec (v3):
+          *
+          *     When [...] an EGL native fence sync object is created with the
+          *     EGL_SYNC_NATIVE_FENCE_FD_ANDROID attribute set to
+          *     EGL_NO_NATIVE_FENCE_FD_ANDROID, eglCreateSyncKHR also inserts
+          *     a fence command into the command stream of the bound client
+          *     API's current context [...] The next Flush() operation [...]
+          *     causes a new native fence object to be created, and the
+          *     EGL_SYNC_NATIVE_FENCE_ANDROID attribute of the EGL native
+          *     fence object is set to a file descriptor that refers to the
+          *     new native fence object.
+          *
+          * We immediately flush the context. The spec permits but does not
+          * require an immediate flush.
+          */
+         intel_batchbuffer_flush_fence(brw, -1, &fence->sync_fd);
+      } else {
+         /* Insert an existing sync fd into the command stream.
+          *
+          * From the EGL_ANDROID_native_fence_sync spec (v3):
+          *
+          *    If the EGL_SYNC_NATIVE_FENCE_FD_ANDROID attribute is not
+          *    EGL_NO_NATIVE_FENCE_FD_ANDROID then the EGL_SYNC_CONDITION_KHR
+          *    attribute is set to EGL_SYNC_NATIVE_FENCE_SIGNALED_ANDROID and
+          *    the EGL_SYNC_STATUS_KHR attribute is set to reflect the signal
+          *    status of the native fence object.  Additionally, the EGL
+          *    implementation assumes ownership of the file descriptor, so the
+          *    caller must not use it after calling eglCreateSyncKHR.
+          */
+         intel_batchbuffer_flush_fence(brw, fence->sync_fd, NULL);
+      }
+      break;
+   }
 }
 
 /**
- * Return true if the function successfully signals or has already signalled.
+ * Return true if the fence signals or has already signalled.
  * (This matches the behavior expected from __DRI2fence::client_wait_sync).
  */
 static bool
 brw_fence_client_wait(struct brw_context *brw, struct brw_fence *fence,
                       uint64_t timeout)
 {
-   assert(fence->batch_bo);
+   switch (fence->type) {
+   case BRW_FENCE_TYPE_BATCH_WAIT:
+      assert(fence->batch_bo != NULL);
 
-   /* DRM_IOCTL_I915_GEM_WAIT uses a signed 64 bit timeout and returns
-    * immediately for timeouts <= 0.  The best we can do is to clamp the
-    * timeout to INT64_MAX.  This limits the maximum timeout from 584 years to
-    * 292 years - likely not a big deal.
-    */
-   if (timeout > INT64_MAX)
-      timeout = INT64_MAX;
+      /* DRM_IOCTL_I915_GEM_WAIT uses a signed 64 bit timeout and returns
+       * immediately for timeouts <= 0.  The best we can do is to clamp the
+       * timeout to INT64_MAX.  This limits the maximum timeout from 584 years to
+       * 292 years - likely not a big deal.
+       */
+      if (timeout > INT64_MAX)
+         timeout = INT64_MAX;
 
-   return drm_intel_gem_bo_wait(fence->batch_bo, timeout) == 0;
+      return drm_intel_gem_bo_wait(fence->batch_bo, timeout) == 0;
+   case BRW_FENCE_TYPE_SYNC_FD: {
+      assert(fence->sync_fd != -1);
+
+      /* The timeout fields in ppoll are signed long ints, where a negative
+       * timeout means forever.
+       */
+      if (timeout == __DRI2_FENCE_TIMEOUT_INFINITE) {
+         timeout = -1;
+      } else if (timeout > LONG_MAX) {
+         timeout = LONG_MAX;
+      }
+
+      struct pollfd pollfd = {
+         .fd = fence->sync_fd,
+         .events = POLLIN,
+      };
+
+      struct timespec timeout_ts = {
+         .tv_sec = timeout / 1000000000,
+         .tv_nsec = timeout % 1000000000,
+      };
+
+      if (ppoll(&pollfd, 1, &timeout_ts, NULL) != 1)
+         return false;
+
+      return (pollfd.revents & POLLIN) != 0;
+   }
+   }
+
+   assert(!"bad brw_fence_type");
+   return 0;
 }
 
 static void
 brw_fence_server_wait(struct brw_context *brw, struct brw_fence *fence)
 {
-   /* We have nothing to do for WaitSync.  Our GL command stream is sequential,
-    * so given that the sync object has already flushed the batchbuffer, any
-    * batchbuffers coming after this waitsync will naturally not occur until
-    * the previous one is done.
-    */
+   switch (fence->type) {
+   case BRW_FENCE_TYPE_BATCH_WAIT:
+      /* We have nothing to do for WaitSync.  Our GL command stream is sequential,
+       * so given that the sync object has already flushed the batchbuffer, any
+       * batchbuffers coming after this waitsync will naturally not occur until
+       * the previous one is done.
+       */
+      return;
+   case BRW_FENCE_TYPE_SYNC_FD:
+      if (fence->sync_fd != -1)
+         brw_fence_insert(brw, fence);
+      return;
+   }
 }
 
 static struct gl_sync_object *
@@ -112,6 +234,8 @@ intel_gl_new_sync_object(struct gl_context *ctx, GLuint id)
    if (!sync)
       return NULL;
 
+   brw_fence_init(&sync->fence, BRW_FENCE_TYPE_BATCH_WAIT);
+
    return &sync->Base;
 }
 
@@ -188,6 +312,7 @@ intel_dri_create_fence(__DRIcontext *ctx)
    if (!fence)
       return NULL;
 
+   brw_fence_init(fence, BRW_FENCE_TYPE_BATCH_WAIT);
    brw_fence_insert(brw, fence);
 
    return fence;
@@ -221,6 +346,47 @@ intel_dri_server_wait_sync(__DRIcontext *ctx, void *driver_fence, unsigned flags
    brw_fence_server_wait(brw, fence);
 }
 
+static unsigned
+intel_dri_get_capabilities(__DRIscreen *screen)
+{
+   struct intel_screen *intelScreen = screen->driverPrivate;
+   unsigned caps = 0;
+
+   if (intelScreen->has_fence_fd)
+      caps |= __DRI_FENCE_CAP_NATIVE_FD;
+
+   return caps;
+}
+
+static void *
+intel_dri_create_fence_fd(__DRIcontext *ctx, int fd)
+{
+   struct brw_context *brw = ctx->driverPrivate;
+   struct brw_fence *fence;
+
+   fence = calloc(1, sizeof(*fence));
+   if (!fence)
+      return NULL;
+
+   brw_fence_init(fence, BRW_FENCE_TYPE_SYNC_FD);
+   fence->sync_fd = fd;
+
+   if (fd == -1)
+      brw_fence_insert(brw, fence);
+
+   return fence;
+}
+
+static int
+intel_dri_get_fence_fd(__DRIscreen *screen, void *_fence)
+{
+   struct brw_fence *fence = _fence;
+
+   assert(fence->type == BRW_FENCE_TYPE_SYNC_FD);
+   return fence->sync_fd;
+}
+
+
 const __DRI2fenceExtension intelFenceExtension = {
    .base = { __DRI2_FENCE, 1 },
 
@@ -229,4 +395,7 @@ const __DRI2fenceExtension intelFenceExtension = {
    .client_wait_sync = intel_dri_client_wait_sync,
    .server_wait_sync = intel_dri_server_wait_sync,
    .get_fence_from_cl_event = NULL,
+   .get_capabilities = intel_dri_get_capabilities,
+   .create_fence_fd = intel_dri_create_fence_fd,
+   .get_fence_fd = intel_dri_get_fence_fd,
 };
-- 
2.9.0.rc2



More information about the mesa-dev mailing list