[Beignet] [PATCH 1/2] runtime: fix a builtin-kernel related thread safe bug.

Zhigang Gong zhigang.gong at intel.com
Wed Jul 15 21:47:46 PDT 2015


This patch fixed two thread-safe bugs in the builtin-kernel
usage code path.

1. The builtin kernel array itself need to be protected.
2. Each caller need to get a dup of the builtin kernel,
   rather than share the same kernel structure.

Signed-off-by: Zhigang Gong <zhigang.gong at intel.com>
---
 src/cl_context.c | 22 +++++++++++++++-------
 src/cl_mem.c     | 12 +++++++++++-
 2 files changed, 26 insertions(+), 8 deletions(-)

diff --git a/src/cl_context.c b/src/cl_context.c
index 45064ad..5b9da23 100644
--- a/src/cl_context.c
+++ b/src/cl_context.c
@@ -325,16 +325,21 @@ cl_context_get_static_kernel_from_bin(cl_context ctx, cl_int index,
 {
   cl_int ret;
   cl_int binary_status = CL_SUCCESS;
-  if (!ctx->internal_prgs[index]) {
+  cl_kernel ker;
+  pthread_mutex_lock(&ctx->program_lock);
+  if (ctx->internal_prgs[index] == NULL) {
     ctx->internal_prgs[index] = cl_program_create_from_binary(ctx, 1, &ctx->device,
       &size, (const unsigned char **)&str_kernel, &binary_status, &ret);
 
-    if (!ctx->internal_prgs[index])
-      return NULL;
-
+    if (!ctx->internal_prgs[index]) {
+      ker = NULL;
+      goto unlock;
+    }
     ret = cl_program_build(ctx->internal_prgs[index], str_option);
-    if (ret != CL_SUCCESS)
-      return NULL;
+    if (ret != CL_SUCCESS) {
+      ker = NULL;
+      goto unlock;
+    }
 
     ctx->internal_prgs[index]->is_built = 1;
 
@@ -368,6 +373,9 @@ cl_context_get_static_kernel_from_bin(cl_context ctx, cl_int index,
       ctx->internel_kernels[index] = cl_kernel_dup(ctx->internal_prgs[index]->ker[0]);
     }
   }
+  ker = ctx->internel_kernels[index];
 
-  return ctx->internel_kernels[index];
+unlock:
+  pthread_mutex_unlock(&ctx->program_lock);
+  return cl_kernel_dup(ker);
 }
diff --git a/src/cl_mem.c b/src/cl_mem.c
index f6aa5b5..b5671bd 100644
--- a/src/cl_mem.c
+++ b/src/cl_mem.c
@@ -1203,6 +1203,7 @@ cl_mem_copy(cl_command_queue queue, cl_mem src_buf, cl_mem dst_buf,
     cl_kernel_set_arg(ker, 3, sizeof(int), &dw_dst_offset);
     cl_kernel_set_arg(ker, 4, sizeof(int), &cb);
     ret = cl_command_queue_ND_range(queue, ker, 1, global_off, global_sz, local_sz);
+    cl_kernel_delete(ker);
     return ret;
   }
 
@@ -1243,6 +1244,7 @@ cl_mem_copy(cl_command_queue queue, cl_mem src_buf, cl_mem dst_buf,
     cl_kernel_set_arg(ker, 5, sizeof(int), &first_mask);
     cl_kernel_set_arg(ker, 6, sizeof(int), &last_mask);
     ret = cl_command_queue_ND_range(queue, ker, 1, global_off, global_sz, local_sz);
+    cl_kernel_delete(ker);
     return ret;
   }
 
@@ -1272,6 +1274,7 @@ cl_mem_copy(cl_command_queue queue, cl_mem src_buf, cl_mem dst_buf,
     cl_kernel_set_arg(ker, 7, sizeof(int), &shift);
     cl_kernel_set_arg(ker, 8, sizeof(int), &dw_mask);
     ret = cl_command_queue_ND_range(queue, ker, 1, global_off, global_sz, local_sz);
+    cl_kernel_delete(ker);
     return ret;
   }
 
@@ -1300,6 +1303,7 @@ cl_mem_copy(cl_command_queue queue, cl_mem src_buf, cl_mem dst_buf,
     cl_kernel_set_arg(ker, 8, sizeof(int), &dw_mask);
     cl_kernel_set_arg(ker, 9, sizeof(int), &src_less);
     ret = cl_command_queue_ND_range(queue, ker, 1, global_off, global_sz, local_sz);
+    cl_kernel_delete(ker);
     return ret;
   }
 
@@ -1372,6 +1376,7 @@ cl_image_fill(cl_command_queue queue, const void * pattern, struct _cl_mem_image
   cl_kernel_set_arg(ker, 7, sizeof(cl_int), &origin[2]);
 
   ret = cl_command_queue_ND_range(queue, ker, 3, global_off, global_sz, local_sz);
+  cl_kernel_delete(ker);
   return ret;
 }
 
@@ -1474,6 +1479,7 @@ cl_mem_fill(cl_command_queue queue, const void * pattern, size_t pattern_size,
     cl_kernel_set_arg(ker, 4, pattern_size, pattern1);
 
   ret = cl_command_queue_ND_range(queue, ker, 1, global_off, global_sz, local_sz);
+  cl_kernel_delete(ker);
   return ret;
 }
 
@@ -1546,7 +1552,7 @@ cl_mem_copy_buffer_rect(cl_command_queue queue, cl_mem src_buf, cl_mem dst_buf,
   cl_kernel_set_arg(ker, 10, sizeof(cl_int), &dst_slice_pitch);
 
   ret = cl_command_queue_ND_range(queue, ker, 1, global_off, global_sz, local_sz);
-
+  cl_kernel_delete(ker);
   return ret;
 }
 
@@ -1696,6 +1702,8 @@ cl_mem_kernel_copy_image(cl_command_queue queue, struct _cl_mem_image* src_image
   ret = cl_command_queue_ND_range(queue, ker, 1, global_off, global_sz, local_sz);
 
 fail:
+
+  cl_kernel_delete(ker);
   if (fixupDataType) {
     src_image->intel_fmt = savedIntelFmt;
     dst_image->intel_fmt = savedIntelFmt;
@@ -1797,6 +1805,7 @@ cl_mem_copy_image_to_buffer(cl_command_queue queue, struct _cl_mem_image* image,
 
 fail:
 
+  cl_kernel_delete(ker);
   image->intel_fmt = intel_fmt;
   image->bpp = bpp;
   image->w = w_saved;
@@ -1893,6 +1902,7 @@ cl_mem_copy_buffer_to_image(cl_command_queue queue, cl_mem buffer, struct _cl_me
   cl_kernel_set_arg(ker, 8, sizeof(cl_int), &kn_src_offset);
 
   ret = cl_command_queue_ND_range(queue, ker, 1, global_off, global_sz, local_sz);
+  cl_kernel_delete(ker);
 
   image->intel_fmt = intel_fmt;
   image->bpp = bpp;
-- 
1.9.1



More information about the Beignet mailing list