[Beignet] [PATCH 17/19] OCL20: handle device enqueue in runtime.

Yang Rong rong.r.yang at intel.com
Mon Nov 28 11:32:43 UTC 2016


There are some step to handle device enqueue:
1. allocate the device enqueue bo to store the device enqueue
information for parent kernel. Add must convert all global buffers to
SVM buffers to make sure the child kernels have the same GPU address.
2. When flush the command, check whether have device enqueue or not. If
has device enqueue, must wait finish and parse the device enqueue info.
3. Start the child ndrange according the device enqueue info, and the
parent's global buffers as the exec info.

Because of non uniform workgroup size, one enqueue api will flush
serveral times, but device enqueue only need handle once, so add a flag
to function cl_command_queue_flush to indicate the last flush.

Signed-off-by: Yang Rong <rong.r.yang at intel.com>
Reviewed-by: Pan Xiuli <xiuli.pan at intel.com>
---
 src/CMakeLists.txt          |    2 +
 src/cl_api_kernel.c         |   97 +++-
 src/cl_command_queue.c      |   88 +---
 src/cl_command_queue.h      |   11 +-
 src/cl_command_queue_gen7.c |   15 +-
 src/cl_context.c            |   17 +
 src/cl_context.h            |    3 +
 src/cl_device_enqueue.c     |  198 +++++++
 src/cl_device_enqueue.h     |   31 ++
 src/cl_enqueue.c            |    7 +
 src/cl_enqueue.h            |    3 +
 src/cl_kernel.c             |    6 +
 src/cl_kernel.h             |    6 +-
 src/cl_mem.c                |   30 +-
 src/intel/intel_driver.c    | 1189 ++++++++++++++++++++++---------------------
 15 files changed, 1003 insertions(+), 700 deletions(-)
 create mode 100644 src/cl_device_enqueue.c
 create mode 100644 src/cl_device_enqueue.h

diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
index 26cccea..b7fc13d 100644
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -88,6 +88,8 @@ set(OPENCL_SRC
     cl_context.c
     cl_command_queue.c
     cl_command_queue.h
+    cl_device_enqueue.c
+    cl_device_enqueue.h
     cl_command_queue_gen7.c
     cl_command_queue_enqueue.c
     cl_driver.h
diff --git a/src/cl_api_kernel.c b/src/cl_api_kernel.c
index 70140b2..7812acf 100644
--- a/src/cl_api_kernel.c
+++ b/src/cl_api_kernel.c
@@ -160,35 +160,86 @@ clEnqueueNDRangeKernel(cl_command_queue command_queue,
       break;
     }
 
-    e = cl_event_create(command_queue->ctx, command_queue, num_events_in_wait_list,
-                        event_wait_list, CL_COMMAND_NDRANGE_KERNEL, &err);
-    if (err != CL_SUCCESS) {
-      break;
-    }
+    int i,j,k;
+    const size_t global_wk_sz_div[3] = {
+      fixed_global_sz[0] / fixed_local_sz[0] * fixed_local_sz[0],
+      fixed_global_sz[1] / fixed_local_sz[1] * fixed_local_sz[1],
+      fixed_global_sz[2] / fixed_local_sz[2] * fixed_local_sz[2]
+    };
+
+    const size_t global_wk_sz_rem[3] = {
+      fixed_global_sz[0] % fixed_local_sz[0],
+      fixed_global_sz[1] % fixed_local_sz[1],
+      fixed_global_sz[2] % fixed_local_sz[2]
+    };
+    cl_uint count;
+    count = global_wk_sz_rem[0] ? 2 : 1;
+    count *= global_wk_sz_rem[1] ? 2 : 1;
+    count *= global_wk_sz_rem[2] ? 2 : 1;
+
+    const size_t *global_wk_all[2] = {global_wk_sz_div, global_wk_sz_rem};
+    /* Go through the at most 8 cases and euque if there is work items left */
+    for (i = 0; i < 2;i++) {
+      for (j = 0; j < 2;j++) {
+        for (k = 0; k < 2; k++) {
+          size_t global_wk_sz_use[3] = {global_wk_all[k][0], global_wk_all[j][1], global_wk_all[i][2]};
+          size_t global_dim_off[3] = {
+            k * global_wk_sz_div[0] / fixed_local_sz[0],
+            j * global_wk_sz_div[1] / fixed_local_sz[1],
+            i * global_wk_sz_div[2] / fixed_local_sz[2]
+          };
+          size_t local_wk_sz_use[3] = {
+            k ? global_wk_sz_rem[0] : fixed_local_sz[0],
+            j ? global_wk_sz_rem[1] : fixed_local_sz[1],
+            i ? global_wk_sz_rem[2] : fixed_local_sz[2]
+          };
+          if (local_wk_sz_use[0] == 0 || local_wk_sz_use[1] == 0 || local_wk_sz_use[2] == 0)
+            continue;
+
+          e = cl_event_create(command_queue->ctx, command_queue, num_events_in_wait_list,
+                              event_wait_list, CL_COMMAND_NDRANGE_KERNEL, &err);
+          if (err != CL_SUCCESS) {
+            break;
+          }
 
-    /* Do device specific checks are enqueue the kernel */
-    err = cl_command_queue_ND_range(command_queue, kernel, e, work_dim,
-                                    fixed_global_off, fixed_global_sz, fixed_local_sz);
-    if (err != CL_SUCCESS) {
-      break;
-    }
+          /* Do device specific checks are enqueue the kernel */
+          err = cl_command_queue_ND_range(command_queue, kernel, e, work_dim,
+                                          fixed_global_off, global_dim_off, fixed_global_sz,
+                                          global_wk_sz_use, fixed_local_sz, local_wk_sz_use);
+          if (err != CL_SUCCESS) {
+            break;
+          }
+          e->exec_data.mid_event_of_enq = (count > 1);
+          count--;
+
+          /* We will flush the ndrange if no event depend. Else we will add it to queue list.
+             The finish or Complete status will always be done in queue list. */
+          event_status = cl_event_is_ready(e);
+          if (event_status < CL_COMPLETE) { // Error happend, cancel.
+            err = CL_EXEC_STATUS_ERROR_FOR_EVENTS_IN_WAIT_LIST;
+            break;
+          } else if (event_status == CL_COMPLETE) {
+            err = cl_enqueue_handle(&e->exec_data, CL_SUBMITTED);
+            if (err != CL_SUCCESS) {
+              break;
+            }
 
-    /* We will flush the ndrange if no event depend. Else we will add it to queue list.
-       The finish or Complete status will always be done in queue list. */
-    event_status = cl_event_is_ready(e);
-    if (event_status < CL_COMPLETE) { // Error happend, cancel.
-      err = CL_EXEC_STATUS_ERROR_FOR_EVENTS_IN_WAIT_LIST;
-      break;
-    } else if (event_status == CL_COMPLETE) {
-      err = cl_enqueue_handle(&e->exec_data, CL_SUBMITTED);
+            e->status = CL_SUBMITTED;
+          }
+
+          cl_command_queue_enqueue_event(command_queue, e);
+
+          if (e->exec_data.mid_event_of_enq)
+            cl_event_delete(e);
+        }
+        if (err != CL_SUCCESS) {
+          break;
+        }
+      }
       if (err != CL_SUCCESS) {
         break;
       }
-
-      e->status = CL_SUBMITTED;
     }
-
-    cl_command_queue_enqueue_event(command_queue, e);
   } while (0);
 
   if (err == CL_SUCCESS && event) {
diff --git a/src/cl_command_queue.c b/src/cl_command_queue.c
index 5ca1d91..bc69f3f 100644
--- a/src/cl_command_queue.c
+++ b/src/cl_command_queue.c
@@ -183,23 +183,29 @@ cl_command_queue_bind_surface(cl_command_queue queue, cl_kernel k, cl_gpgpu gpgp
 }
 
 LOCAL cl_int
-cl_command_queue_bind_exec_info(cl_command_queue queue, cl_kernel k, cl_gpgpu gpgpu, uint32_t max_bti)
+cl_command_queue_bind_exec_info(cl_command_queue queue, cl_kernel k, cl_gpgpu gpgpu, uint32_t *max_bti)
 {
   uint32_t i;
-  size_t mem_offset, bti = max_bti;
-  cl_mem svm_mem;
+  size_t mem_offset, bti = *max_bti;
+  cl_mem mem;
+  int32_t offset = interp_kernel_get_curbe_size(k->opaque);
 
   for (i = 0; i < k->exec_info_n; i++) {
     void *ptr = k->exec_info[i];
-    if((svm_mem = cl_context_get_svm_from_ptr(k->program->ctx, ptr)) != NULL) {
-      mem_offset = (size_t)ptr - (size_t)svm_mem->host_ptr;
+    mem = cl_context_get_svm_from_ptr(k->program->ctx, ptr);
+    if(mem == NULL)
+      mem = cl_context_get_mem_from_ptr(k->program->ctx, ptr);
+
+    if (mem) {
+      mem_offset = (size_t)ptr - (size_t)mem->host_ptr;
       /* only need realloc in surface state, don't need realloc in curbe */
-      cl_gpgpu_bind_buf(gpgpu, svm_mem->bo, -1, svm_mem->offset + mem_offset, svm_mem->size, bti++);
+      cl_gpgpu_bind_buf(gpgpu, mem->bo, offset + i * sizeof(ptr), mem->offset + mem_offset, mem->size, bti++);
       if(bti == BTI_WORKAROUND_IMAGE_OFFSET)
-        bti = max_bti + BTI_WORKAROUND_IMAGE_OFFSET;
+        bti = *max_bti + BTI_WORKAROUND_IMAGE_OFFSET;
       assert(bti < BTI_MAX_ID);
     }
   }
+  *max_bti = bti;
 
   return CL_SUCCESS;
 }
@@ -219,69 +225,16 @@ cl_kernel_check_args(cl_kernel k)
 }
 
 LOCAL cl_int
-cl_command_queue_ND_range_wrap(cl_command_queue queue,
-                               cl_kernel ker,
-                               cl_event event,
-                               const uint32_t work_dim,
-                               const size_t *global_wk_off,
-                               const size_t *global_wk_sz,
-                               const size_t *local_wk_sz)
-{
-  /* Used for non uniform work group size */
-  cl_int err = CL_SUCCESS;
-  int i,j,k;
-  const size_t global_wk_sz_div[3] = {
-    global_wk_sz[0]/local_wk_sz[0]*local_wk_sz[0],
-    global_wk_sz[1]/local_wk_sz[1]*local_wk_sz[1],
-    global_wk_sz[2]/local_wk_sz[2]*local_wk_sz[2]
-  };
-
-  const size_t global_wk_sz_rem[3] = {
-    global_wk_sz[0]%local_wk_sz[0],
-    global_wk_sz[1]%local_wk_sz[1],
-    global_wk_sz[2]%local_wk_sz[2]
-  };
-
-  const size_t *global_wk_all[2] = {global_wk_sz_div, global_wk_sz_rem};
-  /* Go through the at most 8 cases and euque if there is work items left */
-  for(i = 0; i < 2;i++) {
-    for(j = 0; j < 2;j++) {
-      for(k = 0; k < 2; k++) {
-        size_t global_wk_sz_use[3] = {global_wk_all[k][0],global_wk_all[j][1],global_wk_all[i][2]};
-        size_t global_dim_off[3] = {
-          k * global_wk_sz_div[0] / local_wk_sz[0],
-          j * global_wk_sz_div[1] / local_wk_sz[1],
-          i * global_wk_sz_div[2] / local_wk_sz[2]
-        };
-        size_t local_wk_sz_use[3] = {
-          k ? global_wk_sz_rem[0] : local_wk_sz[0],
-          j ? global_wk_sz_rem[1] : local_wk_sz[1],
-          i ? global_wk_sz_rem[2] : local_wk_sz[2]
-        };
-        if(local_wk_sz_use[0] == 0 || local_wk_sz_use[1] == 0 || local_wk_sz_use[2] == 0) continue;
-        TRY (cl_command_queue_ND_range_gen7, queue, ker, event, work_dim, global_wk_off,global_dim_off, global_wk_sz,global_wk_sz_use,local_wk_sz, local_wk_sz_use);
-        /* TODO: need to handle events for multiple enqueue, now is a workaroud for uniform group size */
-        if(!(global_wk_sz_rem[0] == 0 && global_wk_sz_rem[1] == 0 && global_wk_sz_rem[2] == 0))
-          err = cl_command_queue_wait_flush(queue);
-      }
-      if(work_dim < 2)
-        break;
-    }
-    if(work_dim < 3)
-      break;
-  }
-error:
-  return err;
-}
-
-LOCAL cl_int
 cl_command_queue_ND_range(cl_command_queue queue,
                           cl_kernel k,
                           cl_event event,
                           const uint32_t work_dim,
                           const size_t *global_wk_off,
+                          const size_t *global_dim_off,
                           const size_t *global_wk_sz,
-                          const size_t *local_wk_sz)
+                          const size_t *global_wk_sz_use,
+                          const size_t *local_wk_sz,
+                          const size_t *local_wk_sz_use)
 {
   if(b_output_kernel_perf)
     time_start(queue->ctx, cl_kernel_get_name(k), queue);
@@ -294,8 +247,10 @@ cl_command_queue_ND_range(cl_command_queue queue,
 
   if (ver == 7 || ver == 75 || ver == 8 || ver == 9)
     //TRY (cl_command_queue_ND_range_gen7, queue, k, work_dim, global_wk_off, global_wk_sz, local_wk_sz);
-    TRY (cl_command_queue_ND_range_wrap, queue, k, event, work_dim,
-         global_wk_off, global_wk_sz, local_wk_sz);
+    TRY (cl_command_queue_ND_range_gen7, queue, k, event, work_dim,
+                                global_wk_off, global_dim_off, global_wk_sz,
+                                global_wk_sz_use, local_wk_sz, local_wk_sz_use);
+
   else
     FATAL ("Unknown Gen Device");
 
@@ -329,6 +284,7 @@ cl_command_queue_flush_gpgpu(cl_gpgpu gpgpu)
     interp_output_profiling(profiling_info, cl_gpgpu_map_profiling_buffer(gpgpu));
     cl_gpgpu_unmap_profiling_buffer(gpgpu);
   }
+
   return CL_SUCCESS;
 }
 
diff --git a/src/cl_command_queue.h b/src/cl_command_queue.h
index 9eb1b09..6a51455 100644
--- a/src/cl_command_queue.h
+++ b/src/cl_command_queue.h
@@ -75,9 +75,12 @@ extern cl_int cl_command_queue_ND_range(cl_command_queue queue,
                                         cl_kernel ker,
                                         cl_event event,
                                         const uint32_t work_dim,
-                                        const size_t *global_work_offset,
-                                        const size_t *global_work_size,
-                                        const size_t *local_work_size);
+                                        const size_t *global_wk_off,
+                                        const size_t *global_dim_off,
+                                        const size_t *global_wk_sz,
+                                        const size_t *global_wk_sz_use,
+                                        const size_t *local_wk_sz,
+                                        const size_t *local_wk_sz_use);
 
 /* The memory object where to report the performance */
 extern cl_int cl_command_queue_set_report_buffer(cl_command_queue, cl_mem);
@@ -92,7 +95,7 @@ extern cl_int cl_command_queue_bind_surface(cl_command_queue, cl_kernel, cl_gpgp
 extern cl_int cl_command_queue_bind_image(cl_command_queue, cl_kernel, cl_gpgpu, uint32_t *);
 
 /* Bind all exec info to bind table */
-extern cl_int cl_command_queue_bind_exec_info(cl_command_queue, cl_kernel, cl_gpgpu, uint32_t);
+extern cl_int cl_command_queue_bind_exec_info(cl_command_queue, cl_kernel, cl_gpgpu, uint32_t *);
 
 /* Insert a user event to command's wait_events */
 extern void cl_command_queue_insert_event(cl_command_queue, cl_event);
diff --git a/src/cl_command_queue_gen7.c b/src/cl_command_queue_gen7.c
index 4487360..84f2cd5 100644
--- a/src/cl_command_queue_gen7.c
+++ b/src/cl_command_queue_gen7.c
@@ -26,6 +26,7 @@
 #include "cl_event.h"
 #include "cl_utils.h"
 #include "cl_alloc.h"
+#include "cl_device_enqueue.h"
 
 #include <assert.h>
 #include <stdio.h>
@@ -369,7 +370,7 @@ cl_command_queue_ND_range_gen7(cl_command_queue queue,
   cl_gpgpu_kernel kernel;
   const uint32_t simd_sz = cl_kernel_get_simd_width(ker);
   size_t i, batch_sz = 0u, local_sz = 0u;
-  size_t cst_sz = ker->curbe_sz= interp_kernel_get_curbe_size(ker->opaque);
+  size_t cst_sz = interp_kernel_get_curbe_size(ker->opaque);
   int32_t scratch_sz = interp_kernel_get_scratch_size(ker->opaque);
   size_t thread_n = 0u;
   int printf_num = 0;
@@ -378,6 +379,13 @@ cl_command_queue_ND_range_gen7(cl_command_queue queue,
   void* printf_info = NULL;
   uint32_t max_bti = 0;
 
+  if (ker->exec_info_n > 0) {
+    cst_sz += ker->exec_info_n * sizeof(void *);
+    cst_sz = (cst_sz + 31) / 32 * 32;   //align to register size, hard code here.
+    ker->curbe = cl_realloc(ker->curbe, cst_sz);
+  }
+  ker->curbe_sz = cst_sz;
+
   /* Setup kernel */
   kernel.name = interp_kernel_get_name(ker->opaque);
   kernel.grf_blocks = 128;
@@ -436,7 +444,9 @@ cl_command_queue_ND_range_gen7(cl_command_queue queue,
   if(UNLIKELY(err = cl_command_queue_bind_image(queue, ker, gpgpu, &max_bti) != CL_SUCCESS))
     return err;
   /* Bind all exec infos */
-  cl_command_queue_bind_exec_info(queue, ker, gpgpu, max_bti);
+  cl_command_queue_bind_exec_info(queue, ker, gpgpu, &max_bti);
+  /* Bind device enqueue buffer */
+  cl_device_enqueue_bind_buffer(gpgpu, ker, &max_bti, &kernel);
   /* Bind all samplers */
   if (ker->vme)
     cl_gpgpu_bind_vme_state(gpgpu, ker->accel);
@@ -479,6 +489,7 @@ cl_command_queue_ND_range_gen7(cl_command_queue queue,
   /* Close the batch buffer and submit it */
   cl_gpgpu_batch_end(gpgpu, 0);
 
+  event->exec_data.queue = queue;
   event->exec_data.gpgpu = gpgpu;
   event->exec_data.type = EnqueueNDRangeKernel;
 
diff --git a/src/cl_context.c b/src/cl_context.c
index c2adf3f..322dce5 100644
--- a/src/cl_context.c
+++ b/src/cl_context.c
@@ -453,6 +453,7 @@ unlock:
   return cl_kernel_dup(ker);
 }
 
+
 cl_mem
 cl_context_get_svm_from_ptr(cl_context ctx, const void * p)
 {
@@ -470,3 +471,19 @@ cl_context_get_svm_from_ptr(cl_context ctx, const void * p)
   }
   return NULL;
 }
+
+cl_mem
+cl_context_get_mem_from_ptr(cl_context ctx, const void * p)
+{
+  struct list_head *pos;
+  cl_mem buf;
+
+  list_for_each (pos, (&ctx->mem_objects)) {
+    buf = (cl_mem)list_entry(pos, _cl_base_object, node);
+    if(buf->host_ptr == NULL) continue;
+    if((size_t)buf->host_ptr <= (size_t)p &&
+       (size_t)p < ((size_t)buf->host_ptr + buf->size))
+      return buf;
+  }
+  return NULL;
+}
diff --git a/src/cl_context.h b/src/cl_context.h
index caa57dc..16ce897 100644
--- a/src/cl_context.h
+++ b/src/cl_context.h
@@ -188,5 +188,8 @@ extern cl_kernel cl_context_get_static_kernel_from_bin(cl_context ctx, cl_int in
 
 /* Get the SVM from pointer, return NULL if pointer is not from SVM */
 extern cl_mem cl_context_get_svm_from_ptr(cl_context ctx, const void *p);
+/* Get the mem from pointer, return NULL if pointer is not from mem*/
+extern cl_mem cl_context_get_mem_from_ptr(cl_context ctx, const void *p);
+
 #endif /* __CL_CONTEXT_H__ */
 
diff --git a/src/cl_device_enqueue.c b/src/cl_device_enqueue.c
new file mode 100644
index 0000000..18cd7e1
--- /dev/null
+++ b/src/cl_device_enqueue.c
@@ -0,0 +1,198 @@
+/*
+ * Copyright © 2012 Intel Corporation
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author: Rong Yang<rong.r.yang at intel.com>
+ */
+#include "cl_device_enqueue.h"
+#include "cl_mem.h"
+#include "cl_utils.h"
+#include "cl_context.h"
+#include "cl_program.h"
+#include "cl_alloc.h"
+#include "cl_kernel.h"
+#include "cl_command_queue.h"
+#include "cl_event.h"
+
+LOCAL cl_int
+cl_device_enqueue_fix_offset(cl_kernel ker) {
+  uint32_t i;
+  void *ptr;
+  cl_mem mem;
+  enum gbe_arg_type arg_type; /* kind of argument */
+  for (i = 0; i < ker->arg_n; ++i) {
+    arg_type = interp_kernel_get_arg_type(ker->opaque, i);
+    //HOW about image
+    if (!(arg_type == GBE_ARG_GLOBAL_PTR || arg_type == GBE_ARG_CONSTANT_PTR) || !ker->args[i].mem)
+      continue;
+
+    if(!ker->args[i].is_svm) {
+      mem = ker->args[i].mem;
+      ptr = cl_mem_map(mem, 0);
+      cl_buffer_set_softpin_offset(mem->bo, (size_t)ptr);
+      cl_buffer_set_bo_use_full_range(mem->bo, 1);
+      cl_buffer_disable_reuse(mem->bo);
+      mem->host_ptr = ptr;
+      cl_mem_unmap(mem);
+      ker->device_enqueue_infos[ker->device_enqueue_info_n++] = ptr;
+    } else {
+      ker->device_enqueue_infos[ker->device_enqueue_info_n++] = ker->args[i].mem->host_ptr;
+    }
+  }
+  return 0;
+}
+
+LOCAL cl_int
+cl_device_enqueue_bind_buffer(cl_gpgpu gpgpu, cl_kernel ker, uint32_t *max_bti, cl_gpgpu_kernel *kernel)
+{
+  int32_t value = GBE_CURBE_ENQUEUE_BUF_POINTER;
+  int32_t offset = interp_kernel_get_curbe_offset(ker->opaque, value, 0);
+  size_t buf_size = 32 * 1024 * 1024;  //fix 32M
+  cl_mem mem;
+
+  if(offset > 0) {
+    if(ker->useDeviceEnqueue == false) {
+      if(ker->device_enqueue_ptr == NULL)
+        ker->device_enqueue_ptr = cl_mem_svm_allocate(ker->program->ctx, 0, buf_size, 0);
+      if(ker->device_enqueue_infos == NULL)
+        ker->device_enqueue_infos = cl_calloc(ker->arg_n, sizeof(void *));
+      ker->device_enqueue_info_n = 0;
+      ker->useDeviceEnqueue = CL_TRUE;
+      cl_device_enqueue_fix_offset(ker);
+      cl_kernel_add_ref(ker);
+    }
+
+    mem = cl_context_get_svm_from_ptr(ker->program->ctx, ker->device_enqueue_ptr);
+    cl_gpgpu_bind_buf(gpgpu, mem->bo, offset, 0, buf_size, *max_bti);
+
+    cl_gpgpu_set_kernel(gpgpu, ker);
+  }
+  return 0;
+}
+
+typedef struct ndrange_info_t {
+  int type;
+  int global_work_size[3];
+  int local_work_size[3];
+  int global_work_offset[3];
+} ndrange_info_t;
+
+typedef struct Block_literal {
+  void *isa; // initialized to &_NSConcreteStackBlock or &_NSConcreteGlobalBlock
+  int flags;
+  int reserved;
+  int index;
+  struct Block_descriptor_1 {
+    unsigned long int slm_size;         // NULL
+    unsigned long int size;         // sizeof(struct Block_literal_1)
+    // optional helper functions
+    void *copy_helper;     // IFF (1<<25)
+    void *dispose_helper;             // IFF (1<<25)
+    // required ABI.2010.3.16
+    const char *signature;                         // IFF (1<<30)
+  } *descriptor;
+  // imported variables
+} Block_literal;
+
+LOCAL cl_int
+cl_device_enqueue_parse_result(cl_command_queue queue, cl_gpgpu gpgpu)
+{
+  cl_mem mem;
+  int size, type, dim, i;
+  const char * kernel_name;
+  cl_kernel child_ker;
+  cl_event evt = NULL;
+
+  cl_kernel ker = cl_gpgpu_get_kernel(gpgpu);
+  if(ker == NULL || ker->useDeviceEnqueue == CL_FALSE)
+    return 0;
+
+  void *buf = cl_gpgpu_ref_batch_buf(gpgpu);
+  //wait the gpgpu's batch buf finish, the gpgpu in queue may be not
+  //same as the param gpgpu, for example when flush event.
+  cl_gpgpu_sync(buf);
+  cl_gpgpu_unref_batch_buf(buf);
+
+  mem = cl_context_get_svm_from_ptr(ker->program->ctx, ker->device_enqueue_ptr);
+  if(mem == NULL) return -1;
+  char *ptr = (char *)cl_mem_map(mem, 0);
+
+  size =  *(int *)ptr;
+  ptr += 4;
+  while(size > 0) {
+    size_t fixed_global_off[] = {0,0,0};
+    size_t fixed_global_sz[] = {1,1,1};
+    size_t fixed_local_sz[] = {1,1,1};
+    ndrange_info_t* ndrange_info = (ndrange_info_t *)ptr;
+    size -= sizeof(ndrange_info_t);
+    ptr += sizeof(ndrange_info_t);
+
+    Block_literal *block = (Block_literal *)ptr;
+    size -=  block->descriptor->size;
+    ptr += block->descriptor->size;
+
+    type = ndrange_info->type;
+    dim = (type & 0xf0) >> 4;
+    type = type & 0xf;
+    for(i = 0; i <= dim; i++) {
+      fixed_global_sz[i] = ndrange_info->global_work_size[i];
+      if(type > 1)
+        fixed_local_sz[i] = ndrange_info->local_work_size[i];
+      if(type > 2)
+        fixed_global_off[i] = ndrange_info->global_work_offset[i];
+    }
+
+    int *slm_sizes = (int *)ptr;
+    int slm_size = block->descriptor->slm_size;
+    size -= slm_size;
+    ptr += slm_size;
+
+    kernel_name = interp_program_get_device_enqueue_kernel_name(ker->program->opaque, block->index);
+    child_ker = cl_program_create_kernel(ker->program, kernel_name, NULL);
+    cl_kernel_set_arg_svm_pointer(child_ker, 0, block);
+    int index = 1;
+    for(i=0; i<slm_size/sizeof(int); i++, index++) {
+      cl_kernel_set_arg(child_ker, index, slm_sizes[i], NULL);
+    }
+    cl_kernel_set_exec_info(child_ker, ker->device_enqueue_info_n * sizeof(void *),
+                            ker->device_enqueue_infos);
+
+    if (evt != NULL) {
+      clReleaseEvent(evt);
+      evt = NULL;
+    }
+    clEnqueueNDRangeKernel(queue, child_ker, dim + 1, fixed_global_off,
+                           fixed_global_sz, fixed_local_sz, 0, NULL, &evt);
+    cl_command_queue_flush_gpgpu(gpgpu);
+    cl_kernel_delete(child_ker);
+  }
+
+  if (evt != NULL) {
+    //Can't call clWaitForEvents here, it may cause dead lock.
+    //If evt->exec_data.gpgpu is NULL, evt has finished.
+    if (evt->exec_data.gpgpu) {
+      buf = cl_gpgpu_ref_batch_buf(evt->exec_data.gpgpu);
+      //wait the gpgpu's batch buf finish, the gpgpu in queue may be not
+      //same as the param gpgpu, for example when flush event.
+      cl_gpgpu_sync(buf);
+      cl_gpgpu_unref_batch_buf(buf);
+    }
+    clReleaseEvent(evt);
+    evt = NULL;
+  }
+  cl_mem_unmap_auto(mem);
+  cl_kernel_delete(ker);
+  return 0;
+}
diff --git a/src/cl_device_enqueue.h b/src/cl_device_enqueue.h
new file mode 100644
index 0000000..17fc6c7
--- /dev/null
+++ b/src/cl_device_enqueue.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright © 2012 Intel Corporation
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author: Rong Yang<rong.r.yang at intel.com>
+ */
+
+#ifndef __CL_DEVICE_ENQUEUE_H__
+#define __CL_DEVICE_ENQUEUE_H__
+
+#include "cl_internals.h"
+#include "cl_driver.h"
+#include "CL/cl.h"
+#include <stdint.h>
+
+extern cl_int cl_device_enqueue_bind_buffer(cl_gpgpu gpgpu, cl_kernel ker,
+                                                     uint32_t *max_bti, cl_gpgpu_kernel *kernel);
+extern cl_int cl_device_enqueue_parse_result(cl_command_queue queue, cl_gpgpu gpgpu);
+#endif /* __CL_DEVICE_ENQUEUE_H__ */
diff --git a/src/cl_enqueue.c b/src/cl_enqueue.c
index fbcd7b6..bd4833f 100644
--- a/src/cl_enqueue.c
+++ b/src/cl_enqueue.c
@@ -24,6 +24,7 @@
 #include "cl_command_queue.h"
 #include "cl_utils.h"
 #include "cl_alloc.h"
+#include "cl_device_enqueue.h"
 #include <stdio.h>
 #include <string.h>
 #include <assert.h>
@@ -568,6 +569,12 @@ cl_enqueue_ndrange(enqueue_data *data, cl_int status)
 
   if (status == CL_SUBMITTED) {
     err = cl_command_queue_flush_gpgpu(data->gpgpu);
+    //if it is the last ndrange of an cl enqueue api,
+    //check the device enqueue information.
+    if (data->mid_event_of_enq == 0) {
+      assert(data->queue);
+      cl_device_enqueue_parse_result(data->queue, data->gpgpu);
+    }
   } else if (status == CL_COMPLETE) {
     void *batch_buf = cl_gpgpu_ref_batch_buf(data->gpgpu);
     cl_gpgpu_sync(batch_buf);
diff --git a/src/cl_enqueue.h b/src/cl_enqueue.h
index 63a491c..50a54fc 100644
--- a/src/cl_enqueue.h
+++ b/src/cl_enqueue.h
@@ -78,6 +78,9 @@ typedef struct _enqueue_data {
                                  void *svm_pointers[],
                                  void *user_data);  /* pointer to pfn_free_func of clEnqueueSVMFree */
   cl_gpgpu gpgpu;
+  cl_bool mid_event_of_enq;  /* For non-uniform ndrange, one enqueue have a sequence event, the
+                                last event need to parse device enqueue information.
+                                0 : last event; 1: non-last event */
 } enqueue_data;
 
 /* Do real enqueue commands */
diff --git a/src/cl_kernel.c b/src/cl_kernel.c
index 49bbaf0..e8599db 100644
--- a/src/cl_kernel.c
+++ b/src/cl_kernel.c
@@ -74,7 +74,13 @@ cl_kernel_delete(cl_kernel k)
   if (k->exec_info)
     cl_free(k->exec_info);
 
+  if (k->device_enqueue_ptr)
+    cl_mem_svm_delete(k->program->ctx, k->device_enqueue_ptr);
+  if (k->device_enqueue_infos)
+    cl_free(k->device_enqueue_infos);
+
   CL_OBJECT_DESTROY_BASE(k);
+
   cl_free(k);
 }
 
diff --git a/src/cl_kernel.h b/src/cl_kernel.h
index 1e60a75..8acd82a 100644
--- a/src/cl_kernel.h
+++ b/src/cl_kernel.h
@@ -73,7 +73,11 @@ struct _cl_kernel {
 
   void* cmrt_kernel;          /* CmKernel* */
   uint32_t exec_info_n;       /* The kernel's exec info count */
-  void** exec_info;            /* The kernel's exec info */
+  void** exec_info;             /* The kernel's exec info */
+  cl_bool useDeviceEnqueue;     /* kernel use device enqueue */
+  void* device_enqueue_ptr;     /* device_enqueue buffer*/
+  uint32_t device_enqueue_info_n; /* count of parent kernel's arguments buffers, as child enqueues' exec info */
+  void** device_enqueue_infos;   /* parent kernel's arguments buffers, as child enqueues' exec info   */
 };
 
 #define CL_OBJECT_KERNEL_MAGIC 0x1234567890abedefLL
diff --git a/src/cl_mem.c b/src/cl_mem.c
index f856ba3..dce7ece 100644
--- a/src/cl_mem.c
+++ b/src/cl_mem.c
@@ -1529,7 +1529,8 @@ cl_mem_copy(cl_command_queue queue, cl_event event, cl_mem src_buf, cl_mem dst_b
     cl_kernel_set_arg(ker, 2, sizeof(cl_mem), &dst_buf);
     cl_kernel_set_arg(ker, 3, sizeof(int), &dw_dst_offset);
     cl_kernel_set_arg(ker, 4, sizeof(int), &cb);
-    ret = cl_command_queue_ND_range(queue, ker, event, 1, global_off, global_sz, local_sz);
+    ret = cl_command_queue_ND_range(queue, ker, event, 1, global_off,
+                                    global_off, global_sz, global_sz, local_sz, local_sz);
     cl_kernel_delete(ker);
     return ret;
   }
@@ -1570,7 +1571,8 @@ cl_mem_copy(cl_command_queue queue, cl_event event, cl_mem src_buf, cl_mem dst_b
     cl_kernel_set_arg(ker, 4, sizeof(int), &dw_num);
     cl_kernel_set_arg(ker, 5, sizeof(int), &first_mask);
     cl_kernel_set_arg(ker, 6, sizeof(int), &last_mask);
-    ret = cl_command_queue_ND_range(queue, ker, event, 1, global_off, global_sz, local_sz);
+    ret = cl_command_queue_ND_range(queue, ker, event, 1, global_off,
+                                    global_off, global_sz, global_sz, local_sz, local_sz);
     cl_kernel_delete(ker);
     return ret;
   }
@@ -1600,7 +1602,8 @@ cl_mem_copy(cl_command_queue queue, cl_event event, cl_mem src_buf, cl_mem dst_b
     cl_kernel_set_arg(ker, 6, sizeof(int), &last_mask);
     cl_kernel_set_arg(ker, 7, sizeof(int), &shift);
     cl_kernel_set_arg(ker, 8, sizeof(int), &dw_mask);
-    ret = cl_command_queue_ND_range(queue, ker, event, 1, global_off, global_sz, local_sz);
+    ret = cl_command_queue_ND_range(queue, ker, event, 1, global_off,
+                                    global_off, global_sz, global_sz, local_sz, local_sz);
     cl_kernel_delete(ker);
     return ret;
   }
@@ -1632,7 +1635,8 @@ cl_mem_copy(cl_command_queue queue, cl_event event, cl_mem src_buf, cl_mem dst_b
     cl_kernel_set_arg(ker, 7, sizeof(int), &shift);
     cl_kernel_set_arg(ker, 8, sizeof(int), &dw_mask);
     cl_kernel_set_arg(ker, 9, sizeof(int), &src_less);
-    ret = cl_command_queue_ND_range(queue, ker, event, 1, global_off, global_sz, local_sz);
+    ret = cl_command_queue_ND_range(queue, ker, event, 1, global_off,
+                                    global_off, global_sz, global_sz, local_sz, local_sz);
     cl_kernel_delete(ker);
     return ret;
   }
@@ -1724,7 +1728,8 @@ cl_image_fill(cl_command_queue queue, cl_event e, const void * pattern, struct _
   cl_kernel_set_arg(ker, 6, sizeof(cl_int), &origin[1]);
   cl_kernel_set_arg(ker, 7, sizeof(cl_int), &origin[2]);
 
-  ret = cl_command_queue_ND_range(queue, ker, e, 3, global_off, global_sz, local_sz);
+  ret = cl_command_queue_ND_range(queue, ker, e, 3, global_off,
+                                  global_off, global_sz, global_sz, local_sz, local_sz);
   cl_kernel_delete(ker);
   src_image->intel_fmt = savedIntelFmt;
   return ret;
@@ -1828,7 +1833,8 @@ cl_mem_fill(cl_command_queue queue, cl_event e, const void * pattern, size_t pat
   if (is_128)
     cl_kernel_set_arg(ker, 4, pattern_size, pattern1);
 
-  ret = cl_command_queue_ND_range(queue, ker, e, 1, global_off, global_sz, local_sz);
+  ret = cl_command_queue_ND_range(queue, ker, e, 1, global_off,
+                                  global_off, global_sz, global_sz, local_sz, local_sz);
   cl_kernel_delete(ker);
   return ret;
 }
@@ -1901,7 +1907,8 @@ cl_mem_copy_buffer_rect(cl_command_queue queue, cl_event event, cl_mem src_buf,
   cl_kernel_set_arg(ker, 9, sizeof(cl_int), &dst_row_pitch);
   cl_kernel_set_arg(ker, 10, sizeof(cl_int), &dst_slice_pitch);
 
-  ret = cl_command_queue_ND_range(queue, ker, event, 1, global_off, global_sz, local_sz);
+  ret = cl_command_queue_ND_range(queue, ker, event, 1, global_off,
+                                  global_off, global_sz, global_sz, local_sz, local_sz);
   cl_kernel_delete(ker);
   return ret;
 }
@@ -2052,7 +2059,8 @@ cl_mem_kernel_copy_image(cl_command_queue queue, cl_event event, struct _cl_mem_
   cl_kernel_set_arg(ker, 9, sizeof(cl_int), &dst_origin[1]);
   cl_kernel_set_arg(ker, 10, sizeof(cl_int), &dst_origin[2]);
 
-  ret = cl_command_queue_ND_range(queue, ker, event, 1, global_off, global_sz, local_sz);
+  ret = cl_command_queue_ND_range(queue, ker, event, 1, global_off,
+                                  global_off, global_sz, global_sz, local_sz, local_sz);
 
 fail:
 
@@ -2154,7 +2162,8 @@ cl_mem_copy_image_to_buffer(cl_command_queue queue, cl_event event, struct _cl_m
   cl_kernel_set_arg(ker, 7, sizeof(cl_int), &src_origin[2]);
   cl_kernel_set_arg(ker, 8, sizeof(cl_int), &kn_dst_offset);
 
-  ret = cl_command_queue_ND_range(queue, ker, event, 1, global_off, global_sz, local_sz);
+  ret = cl_command_queue_ND_range(queue, ker, event, 1, global_off,
+                                  global_off, global_sz, global_sz, local_sz, local_sz);
 
 fail:
 
@@ -2254,7 +2263,8 @@ cl_mem_copy_buffer_to_image(cl_command_queue queue, cl_event event, cl_mem buffe
   cl_kernel_set_arg(ker, 7, sizeof(cl_int), &dst_origin[2]);
   cl_kernel_set_arg(ker, 8, sizeof(cl_int), &kn_src_offset);
 
-  ret = cl_command_queue_ND_range(queue, ker, event, 1, global_off, global_sz, local_sz);
+  ret = cl_command_queue_ND_range(queue, ker, event, 1, global_off,
+                                  global_off, global_sz, global_sz, local_sz, local_sz);
   cl_kernel_delete(ker);
 
   image->intel_fmt = intel_fmt;
diff --git a/src/intel/intel_driver.c b/src/intel/intel_driver.c
index 363e018..a8d554c 100644
--- a/src/intel/intel_driver.c
+++ b/src/intel/intel_driver.c
@@ -100,9 +100,9 @@ intel_driver_new(void)
 exit:
   return driver;
 error:
-  intel_driver_delete(driver);
-  driver = NULL;
-  goto exit;
+intel_driver_delete(driver);
+driver = NULL;
+goto exit;
 }
 
 /* just used for maximum relocation number in drm_intel */
@@ -112,383 +112,383 @@ error:
 static void
 intel_driver_aub_dump(intel_driver_t *driver)
 {
-  char *val;
-  val = getenv("OCL_DUMP_AUB");
-  if (!val)
-    return;
-  if (atoi(val) != 0) {
-    drm_intel_bufmgr_gem_set_aub_filename(driver->bufmgr,
-					  "beignet.aub");
-    drm_intel_bufmgr_gem_set_aub_dump(driver->bufmgr, 1);
-  }
+char *val;
+val = getenv("OCL_DUMP_AUB");
+if (!val)
+  return;
+if (atoi(val) != 0) {
+  drm_intel_bufmgr_gem_set_aub_filename(driver->bufmgr,
+          "beignet.aub");
+  drm_intel_bufmgr_gem_set_aub_dump(driver->bufmgr, 1);
+}
 }
 
 static int
 intel_driver_memman_init(intel_driver_t *driver)
 {
-  driver->bufmgr = drm_intel_bufmgr_gem_init(driver->fd, BATCH_SIZE);
-  if (!driver->bufmgr) return 0;
-  drm_intel_bufmgr_gem_enable_reuse(driver->bufmgr);
-  driver->device_id = drm_intel_bufmgr_gem_get_devid(driver->bufmgr);
-  intel_driver_aub_dump(driver);
-  return 1;
+driver->bufmgr = drm_intel_bufmgr_gem_init(driver->fd, BATCH_SIZE);
+if (!driver->bufmgr) return 0;
+drm_intel_bufmgr_gem_enable_reuse(driver->bufmgr);
+driver->device_id = drm_intel_bufmgr_gem_get_devid(driver->bufmgr);
+intel_driver_aub_dump(driver);
+return 1;
 }
 
 static void
 intel_driver_context_init(intel_driver_t *driver)
 {
-  driver->ctx = drm_intel_gem_context_create(driver->bufmgr);
-  assert(driver->ctx);
-  driver->null_bo = NULL;
+driver->ctx = drm_intel_gem_context_create(driver->bufmgr);
+assert(driver->ctx);
+driver->null_bo = NULL;
 #ifdef HAS_BO_SET_SOFTPIN
-  drm_intel_bo *bo = dri_bo_alloc(driver->bufmgr, "null_bo", 64*1024, 4096);
-  drm_intel_bo_set_softpin_offset(bo, 0);
-  // don't reuse it, that would make two bo trying to bind to same address,
-  // which is un-reasonable.
-  drm_intel_bo_disable_reuse(bo);
-  driver->null_bo = bo;
+drm_intel_bo *bo = dri_bo_alloc(driver->bufmgr, "null_bo", 64*1024, 4096);
+drm_intel_bo_set_softpin_offset(bo, 0);
+// don't reuse it, that would make two bo trying to bind to same address,
+// which is un-reasonable.
+drm_intel_bo_disable_reuse(bo);
+driver->null_bo = bo;
 #endif
 }
 
 static void
 intel_driver_context_destroy(intel_driver_t *driver)
 {
-  if (driver->null_bo)
-    drm_intel_bo_unreference(driver->null_bo);
-  if(driver->ctx)
-    drm_intel_gem_context_destroy(driver->ctx);
-  driver->ctx = NULL;
+if (driver->null_bo)
+  drm_intel_bo_unreference(driver->null_bo);
+if(driver->ctx)
+  drm_intel_gem_context_destroy(driver->ctx);
+driver->ctx = NULL;
 }
 
 static int
 intel_driver_init(intel_driver_t *driver, int dev_fd)
 {
-  driver->fd = dev_fd;
-  driver->locked = 0;
-  pthread_mutex_init(&driver->ctxmutex, NULL);
+driver->fd = dev_fd;
+driver->locked = 0;
+pthread_mutex_init(&driver->ctxmutex, NULL);
 
-  if (!intel_driver_memman_init(driver)) return 0;
-  intel_driver_context_init(driver);
+if (!intel_driver_memman_init(driver)) return 0;
+intel_driver_context_init(driver);
 
 #if EMULATE_GEN
-  driver->gen_ver = EMULATE_GEN;
-  if (EMULATE_GEN == 75)
-    driver->device_id = PCI_CHIP_HASWELL_L;       /* we pick L for HSW */
-  else if (EMULATE_GEN == 7)
-    driver->device_id = PCI_CHIP_IVYBRIDGE_GT2; /* we pick GT2 for IVB */
-  else if (EMULATE_GEN == 6)
-    driver->device_id = PCI_CHIP_SANDYBRIDGE_GT2; /* we pick GT2 for SNB */
-  else
-    FATAL ("Unsupported Gen for emulation");
+driver->gen_ver = EMULATE_GEN;
+if (EMULATE_GEN == 75)
+  driver->device_id = PCI_CHIP_HASWELL_L;       /* we pick L for HSW */
+else if (EMULATE_GEN == 7)
+  driver->device_id = PCI_CHIP_IVYBRIDGE_GT2; /* we pick GT2 for IVB */
+else if (EMULATE_GEN == 6)
+  driver->device_id = PCI_CHIP_SANDYBRIDGE_GT2; /* we pick GT2 for SNB */
+else
+  FATAL ("Unsupported Gen for emulation");
 #else
-  if (IS_GEN9(driver->device_id))
-    driver->gen_ver = 9;
-  else if (IS_GEN8(driver->device_id))
-    driver->gen_ver = 8;
-  else if (IS_GEN75(driver->device_id))
-    driver->gen_ver = 75;
-  else if (IS_GEN7(driver->device_id))
-    driver->gen_ver = 7;
-  else if (IS_GEN6(driver->device_id))
-    driver->gen_ver = 6;
-  else if(IS_IGDNG(driver->device_id))
-    driver->gen_ver = 5;
-  else
-    driver->gen_ver = 4;
+if (IS_GEN9(driver->device_id))
+  driver->gen_ver = 9;
+else if (IS_GEN8(driver->device_id))
+  driver->gen_ver = 8;
+else if (IS_GEN75(driver->device_id))
+  driver->gen_ver = 75;
+else if (IS_GEN7(driver->device_id))
+  driver->gen_ver = 7;
+else if (IS_GEN6(driver->device_id))
+  driver->gen_ver = 6;
+else if(IS_IGDNG(driver->device_id))
+  driver->gen_ver = 5;
+else
+  driver->gen_ver = 4;
 #endif /* EMULATE_GEN */
-  return 1;
+return 1;
 }
 
 static cl_int
 intel_driver_open(intel_driver_t *intel, cl_context_prop props)
 {
-  int cardi;
+int cardi;
 #ifdef HAS_X11
-  char *driver_name;
+char *driver_name;
 #endif
-  if (props != NULL
-      && props->gl_type != CL_GL_NOSHARE
-      && props->gl_type != CL_GL_GLX_DISPLAY
-      && props->gl_type != CL_GL_EGL_DISPLAY) {
-    fprintf(stderr, "Unsupported gl share type %d.\n", props->gl_type);
-    return CL_INVALID_OPERATION;
-  }
+if (props != NULL
+    && props->gl_type != CL_GL_NOSHARE
+    && props->gl_type != CL_GL_GLX_DISPLAY
+    && props->gl_type != CL_GL_EGL_DISPLAY) {
+  fprintf(stderr, "Unsupported gl share type %d.\n", props->gl_type);
+  return CL_INVALID_OPERATION;
+}
 
 #ifdef HAS_X11
-  intel->x11_display = XOpenDisplay(NULL);
-
-  if(intel->x11_display) {
-    if((intel->dri_ctx = getDRI2State(intel->x11_display,
-                                     DefaultScreen(intel->x11_display),
-                                     &driver_name))) {
-      intel_driver_init_shared(intel, intel->dri_ctx);
-      Xfree(driver_name);
-    }
-    else
-      fprintf(stderr, "X server found. dri2 connection failed! \n");
+intel->x11_display = XOpenDisplay(NULL);
+
+if(intel->x11_display) {
+  if((intel->dri_ctx = getDRI2State(intel->x11_display,
+                                   DefaultScreen(intel->x11_display),
+                                   &driver_name))) {
+    intel_driver_init_shared(intel, intel->dri_ctx);
+    Xfree(driver_name);
   }
+  else
+    fprintf(stderr, "X server found. dri2 connection failed! \n");
+}
 #endif
 
-  if(!intel_driver_is_active(intel)) {
-    char card_name[20];
-    for(cardi = 0; cardi < 16; cardi++) {
-      sprintf(card_name, "/dev/dri/renderD%d", 128+cardi);
-      if (access(card_name, R_OK) != 0)
-        continue;
-      if(intel_driver_init_render(intel, card_name))
-        break;
-    }
+if(!intel_driver_is_active(intel)) {
+  char card_name[20];
+  for(cardi = 0; cardi < 16; cardi++) {
+    sprintf(card_name, "/dev/dri/renderD%d", 128+cardi);
+    if (access(card_name, R_OK) != 0)
+      continue;
+    if(intel_driver_init_render(intel, card_name))
+      break;
   }
+}
 
-  if(!intel_driver_is_active(intel)) {
-    char card_name[20];
-    for(cardi = 0; cardi < 16; cardi++) {
-      sprintf(card_name, "/dev/dri/card%d", cardi);
-      if (access(card_name, R_OK) != 0)
-        continue;
-      if(intel_driver_init_master(intel, card_name))
-        break;
-    }
+if(!intel_driver_is_active(intel)) {
+  char card_name[20];
+  for(cardi = 0; cardi < 16; cardi++) {
+    sprintf(card_name, "/dev/dri/card%d", cardi);
+    if (access(card_name, R_OK) != 0)
+      continue;
+    if(intel_driver_init_master(intel, card_name))
+      break;
   }
+}
 
-  if(!intel_driver_is_active(intel)) {
-    fprintf(stderr, "Device open failed, aborting...\n");
-    return CL_DEVICE_NOT_FOUND;
-  }
+if(!intel_driver_is_active(intel)) {
+  fprintf(stderr, "Device open failed, aborting...\n");
+  return CL_DEVICE_NOT_FOUND;
+}
 
 #ifdef HAS_GL_EGL
-  if (props && props->gl_type == CL_GL_EGL_DISPLAY) {
-    assert(props->egl_display);
-  }
+if (props && props->gl_type == CL_GL_EGL_DISPLAY) {
+  assert(props->egl_display);
+}
 #endif
-  return CL_SUCCESS;
+return CL_SUCCESS;
 }
 
 static void
 intel_driver_close(intel_driver_t *intel)
 {
-  //Due to the drm change about the test usrptr, we need to destroy the bufmgr
-  //befor the driver was closed, otherwise the test usrptr will not be freed.
-  if (intel->bufmgr)
-    drm_intel_bufmgr_destroy(intel->bufmgr);
+//Due to the drm change about the test usrptr, we need to destroy the bufmgr
+//befor the driver was closed, otherwise the test usrptr will not be freed.
+if (intel->bufmgr)
+  drm_intel_bufmgr_destroy(intel->bufmgr);
 #ifdef HAS_X11
-  if(intel->dri_ctx) dri_state_release(intel->dri_ctx);
-  if(intel->x11_display) XCloseDisplay(intel->x11_display);
+if(intel->dri_ctx) dri_state_release(intel->dri_ctx);
+if(intel->x11_display) XCloseDisplay(intel->x11_display);
 #endif
-  if(intel->need_close) {
-    close(intel->fd);
-    intel->need_close = 0;
-  }
-  intel->dri_ctx = NULL;
-  intel->x11_display = NULL;
-  intel->fd = -1;
+if(intel->need_close) {
+  close(intel->fd);
+  intel->need_close = 0;
+}
+intel->dri_ctx = NULL;
+intel->x11_display = NULL;
+intel->fd = -1;
 }
 
 LOCAL int
 intel_driver_is_active(intel_driver_t *driver) {
-  return driver->fd >= 0;
+return driver->fd >= 0;
 }
 
 #ifdef HAS_X11
 LOCAL int 
 intel_driver_init_shared(intel_driver_t *driver, dri_state_t *state)
 {
-  int ret;
-  assert(state);
-  if(state->driConnectedFlag != DRI2)
-    return 0;
-  ret = intel_driver_init(driver, state->fd);
-  driver->need_close = 0;
-  return ret;
+int ret;
+assert(state);
+if(state->driConnectedFlag != DRI2)
+  return 0;
+ret = intel_driver_init(driver, state->fd);
+driver->need_close = 0;
+return ret;
 }
 #endif
 
 LOCAL int
 intel_driver_init_master(intel_driver_t *driver, const char* dev_name)
 {
-  int dev_fd, ret;
+int dev_fd, ret;
 
-  drm_client_t client;
+drm_client_t client;
 
-  // usually dev_name = "/dev/dri/card%d"
-  dev_fd = open(dev_name, O_RDWR);
-  if (dev_fd == -1) {
-    fprintf(stderr, "open(\"%s\", O_RDWR) failed: %s\n", dev_name, strerror(errno));
-    return 0;
-  }
+// usually dev_name = "/dev/dri/card%d"
+dev_fd = open(dev_name, O_RDWR);
+if (dev_fd == -1) {
+  fprintf(stderr, "open(\"%s\", O_RDWR) failed: %s\n", dev_name, strerror(errno));
+  return 0;
+}
 
-  // Check that we're authenticated
-  memset(&client, 0, sizeof(drm_client_t));
-  ret = ioctl(dev_fd, DRM_IOCTL_GET_CLIENT, &client);
-  if (ret == -1) {
-    fprintf(stderr, "ioctl(dev_fd, DRM_IOCTL_GET_CLIENT, &client) failed: %s\n", strerror(errno));
-    close(dev_fd);
-    return 0;
-  }
+// Check that we're authenticated
+memset(&client, 0, sizeof(drm_client_t));
+ret = ioctl(dev_fd, DRM_IOCTL_GET_CLIENT, &client);
+if (ret == -1) {
+  fprintf(stderr, "ioctl(dev_fd, DRM_IOCTL_GET_CLIENT, &client) failed: %s\n", strerror(errno));
+  close(dev_fd);
+  return 0;
+}
 
-  if (!client.auth) {
-    fprintf(stderr, "%s not authenticated\n", dev_name);
-    close(dev_fd);
-    return 0;
-  }
+if (!client.auth) {
+  fprintf(stderr, "%s not authenticated\n", dev_name);
+  close(dev_fd);
+  return 0;
+}
 
-  ret = intel_driver_init(driver, dev_fd);
-  driver->need_close = 1;
+ret = intel_driver_init(driver, dev_fd);
+driver->need_close = 1;
 
-  return ret;
+return ret;
 }
 
 LOCAL int
 intel_driver_init_render(intel_driver_t *driver, const char* dev_name)
 {
-  int dev_fd, ret;
+int dev_fd, ret;
 
-  dev_fd = open(dev_name, O_RDWR);
-  if (dev_fd == -1)
-    return 0;
+dev_fd = open(dev_name, O_RDWR);
+if (dev_fd == -1)
+  return 0;
 
-  ret = intel_driver_init(driver, dev_fd);
-  driver->need_close = 1;
+ret = intel_driver_init(driver, dev_fd);
+driver->need_close = 1;
 
-  return ret;
+return ret;
 }
 
 LOCAL int 
 intel_driver_terminate(intel_driver_t *driver)
 {
-  pthread_mutex_destroy(&driver->ctxmutex);
+pthread_mutex_destroy(&driver->ctxmutex);
 
-  if(driver->need_close) {
-    close(driver->fd);
-    driver->need_close = 0;
-  }
-  driver->fd = -1;
-  return 1;
+if(driver->need_close) {
+  close(driver->fd);
+  driver->need_close = 0;
+}
+driver->fd = -1;
+return 1;
 }
 
 LOCAL void
 intel_driver_lock_hardware(intel_driver_t *driver)
 {
 
-  PPTHREAD_MUTEX_LOCK(driver);
-  assert(!driver->locked);
-  driver->locked = 1;
+PPTHREAD_MUTEX_LOCK(driver);
+assert(!driver->locked);
+driver->locked = 1;
 }
 
 LOCAL void 
 intel_driver_unlock_hardware(intel_driver_t *driver)
 {
-  driver->locked = 0;
-  PPTHREAD_MUTEX_UNLOCK(driver);
+driver->locked = 0;
+PPTHREAD_MUTEX_UNLOCK(driver);
 }
 
 LOCAL dri_bo*
 intel_driver_share_buffer_from_name(intel_driver_t *driver, const char *sname, uint32_t name)
 {
-  dri_bo *bo = intel_bo_gem_create_from_name(driver->bufmgr,
-                                             sname,
-                                             name);
-  if (bo == NULL) {
-    fprintf(stderr, "intel_bo_gem_create_from_name create \"%s\" bo from name %d failed: %s\n", sname, name, strerror(errno));
-    return NULL;
-  }
-  return bo;
+dri_bo *bo = intel_bo_gem_create_from_name(driver->bufmgr,
+                                           sname,
+                                           name);
+if (bo == NULL) {
+  fprintf(stderr, "intel_bo_gem_create_from_name create \"%s\" bo from name %d failed: %s\n", sname, name, strerror(errno));
+  return NULL;
+}
+return bo;
 }
 
 LOCAL dri_bo*
 intel_driver_share_buffer_from_fd(intel_driver_t *driver, int fd, int size)
 {
-  dri_bo *bo = drm_intel_bo_gem_create_from_prime(driver->bufmgr,
-                                                  fd,
-                                                  size);
-  if (bo == NULL) {
-    fprintf(stderr, "drm_intel_bo_gem_create_from_prime create bo(size %d) from fd %d failed: %s\n", size, fd, strerror(errno));
-    return NULL;
-  }
-  return bo;
+dri_bo *bo = drm_intel_bo_gem_create_from_prime(driver->bufmgr,
+                                                fd,
+                                                size);
+if (bo == NULL) {
+  fprintf(stderr, "drm_intel_bo_gem_create_from_prime create bo(size %d) from fd %d failed: %s\n", size, fd, strerror(errno));
+  return NULL;
+}
+return bo;
 }
 
 LOCAL uint32_t
 intel_driver_shared_name(intel_driver_t *driver, dri_bo *bo)
 {
-  uint32_t name;
-  assert(bo);
-  dri_bo_flink(bo, &name);
-  return name;
+uint32_t name;
+assert(bo);
+dri_bo_flink(bo, &name);
+return name;
 }
 /* XXX a null props is ok? */
 static int
 intel_get_device_id(void)
 {
-  intel_driver_t *driver = NULL;
-  int intel_device_id;
-
-  driver = intel_driver_new();
-  assert(driver != NULL);
-  if(UNLIKELY(intel_driver_open(driver, NULL) != CL_SUCCESS)) return INVALID_CHIP_ID;
-  intel_device_id = driver->device_id;
-  intel_driver_context_destroy(driver);
-  intel_driver_close(driver);
-  intel_driver_terminate(driver);
-  intel_driver_delete(driver);
-
-  return intel_device_id;
+intel_driver_t *driver = NULL;
+int intel_device_id;
+
+driver = intel_driver_new();
+assert(driver != NULL);
+if(UNLIKELY(intel_driver_open(driver, NULL) != CL_SUCCESS)) return INVALID_CHIP_ID;
+intel_device_id = driver->device_id;
+intel_driver_context_destroy(driver);
+intel_driver_close(driver);
+intel_driver_terminate(driver);
+intel_driver_delete(driver);
+
+return intel_device_id;
 }
 
 extern void intel_gpgpu_delete_all(intel_driver_t *driver);
 static void
 cl_intel_driver_delete(intel_driver_t *driver)
 {
-  if (driver == NULL)
-    return;
-  intel_gpgpu_delete_all(driver);
-  intel_driver_context_destroy(driver);
-  intel_driver_close(driver);
-  intel_driver_terminate(driver);
-  intel_driver_delete(driver);
+if (driver == NULL)
+  return;
+intel_gpgpu_delete_all(driver);
+intel_driver_context_destroy(driver);
+intel_driver_close(driver);
+intel_driver_terminate(driver);
+intel_driver_delete(driver);
 }
 
 #include "cl_gbe_loader.h"
 static intel_driver_t*
 cl_intel_driver_new(cl_context_prop props)
 {
-  intel_driver_t *driver = NULL;
-  TRY_ALLOC_NO_ERR (driver, intel_driver_new());
-  if(UNLIKELY(intel_driver_open(driver, props) != CL_SUCCESS)) goto error;
+intel_driver_t *driver = NULL;
+TRY_ALLOC_NO_ERR (driver, intel_driver_new());
+if(UNLIKELY(intel_driver_open(driver, props) != CL_SUCCESS)) goto error;
 exit:
-  return driver;
+return driver;
 error:
-  cl_intel_driver_delete(driver);
-  driver = NULL;
-  goto exit;
+cl_intel_driver_delete(driver);
+driver = NULL;
+goto exit;
 }
 
 static drm_intel_bufmgr*
 intel_driver_get_bufmgr(intel_driver_t *drv)
 {
-  return drv->bufmgr;
+return drv->bufmgr;
 }
 
 static uint32_t
 intel_driver_get_ver(struct intel_driver *drv)
 {
-  return drv->gen_ver;
+return drv->gen_ver;
 }
 
 static void
 intel_driver_enlarge_stack_size(struct intel_driver *drv, int32_t *stack_size)
 {
-    if (drv->gen_ver == 75)
-      *stack_size = *stack_size * 4;
-    else if (drv->device_id == PCI_CHIP_BROXTON_1 || drv->device_id == PCI_CHIP_BROXTON_3 ||
-             IS_CHERRYVIEW(drv->device_id))
-      *stack_size = *stack_size * 2;
+  if (drv->gen_ver == 75)
+    *stack_size = *stack_size * 4;
+  else if (drv->device_id == PCI_CHIP_BROXTON_1 || drv->device_id == PCI_CHIP_BROXTON_3 ||
+           IS_CHERRYVIEW(drv->device_id))
+    *stack_size = *stack_size * 2;
 }
 
 static void
 intel_driver_set_atomic_flag(intel_driver_t *drv, int atomic_flag)
 {
-  drv->atomic_test_result = atomic_flag;
+drv->atomic_test_result = atomic_flag;
 }
 
 static size_t drm_intel_bo_get_size(drm_intel_bo *bo) { return bo->size; }
@@ -496,66 +496,66 @@ static void* drm_intel_bo_get_virtual(drm_intel_bo *bo) { return bo->virtual; }
 
 static int get_cl_tiling(uint32_t drm_tiling)
 {
-  switch(drm_tiling) {
-  case I915_TILING_X: return CL_TILE_X;
-  case I915_TILING_Y: return CL_TILE_Y;
-  case I915_TILING_NONE: return CL_NO_TILE;
-  default:
-    assert(0);
-  }
-  return CL_NO_TILE;
+switch(drm_tiling) {
+case I915_TILING_X: return CL_TILE_X;
+case I915_TILING_Y: return CL_TILE_Y;
+case I915_TILING_NONE: return CL_NO_TILE;
+default:
+  assert(0);
+}
+return CL_NO_TILE;
 }
 
 static uint32_t intel_buffer_get_tiling_align(cl_context ctx, uint32_t tiling_mode, uint32_t dim)
 {
-  uint32_t gen_ver = ((intel_driver_t *)ctx->drv)->gen_ver;
-  uint32_t ret = 0;
-
-  switch (tiling_mode) {
-  case CL_TILE_X:
-    if (dim == 0) { //tileX width in bytes
-      ret = 512;
-    } else if (dim == 1) { //tileX height in number of rows
+uint32_t gen_ver = ((intel_driver_t *)ctx->drv)->gen_ver;
+uint32_t ret = 0;
+
+switch (tiling_mode) {
+case CL_TILE_X:
+  if (dim == 0) { //tileX width in bytes
+    ret = 512;
+  } else if (dim == 1) { //tileX height in number of rows
+    ret = 8;
+  }  else if (dim == 2) { //height to calculate slice pitch
+    if (gen_ver == 9) //SKL same as tileY height
       ret = 8;
-    }  else if (dim == 2) { //height to calculate slice pitch
-      if (gen_ver == 9) //SKL same as tileY height
-        ret = 8;
-      else if (gen_ver == 8)  //IVB, HSW, BDW same as CL_NO_TILE vertical alignment
-        ret = 4;
-      else
-        ret = 2;
-    } else
-      assert(0);
-    break;
-
-  case CL_TILE_Y:
-    if (dim == 0) { //tileY width in bytes
-      ret = 128;
-    } else if (dim == 1) { //tileY height in number of rows
+    else if (gen_ver == 8)  //IVB, HSW, BDW same as CL_NO_TILE vertical alignment
+      ret = 4;
+    else
+      ret = 2;
+  } else
+    assert(0);
+  break;
+
+case CL_TILE_Y:
+  if (dim == 0) { //tileY width in bytes
+    ret = 128;
+  } else if (dim == 1) { //tileY height in number of rows
+    ret = 32;
+  } else if (dim == 2) { //height to calculate slice pitch
+    if (gen_ver == 9) //SKL same as tileY height
       ret = 32;
-    } else if (dim == 2) { //height to calculate slice pitch
-      if (gen_ver == 9) //SKL same as tileY height
-        ret = 32;
-      else if (gen_ver == 8) //IVB, HSW, BDW same as CL_NO_TILE vertical alignment
-        ret = 4;
-      else
-        ret = 2;
-    } else
-      assert(0);
-    break;
+    else if (gen_ver == 8) //IVB, HSW, BDW same as CL_NO_TILE vertical alignment
+      ret = 4;
+    else
+      ret = 2;
+  } else
+    assert(0);
+  break;
 
-  case CL_NO_TILE:
-    if (dim == 1 || dim == 2) { //vertical alignment
-      if (gen_ver == 8 || gen_ver == 9) //SKL 1D array need 4 alignment qpitch
-        ret = 4;
-      else
-        ret = 2;
-    } else
-      assert(0);
-    break;
-  }
+case CL_NO_TILE:
+  if (dim == 1 || dim == 2) { //vertical alignment
+    if (gen_ver == 8 || gen_ver == 9) //SKL 1D array need 4 alignment qpitch
+      ret = 4;
+    else
+      ret = 2;
+  } else
+    assert(0);
+  break;
+}
 
-  return ret;
+return ret;
 }
 
 #if defined(HAS_GL_EGL)
@@ -567,449 +567,450 @@ static PFNEGLEXPORTDMABUFIMAGEMESAPROC eglExportDMABUFImageMESA_func = NULL;
 static int
 get_required_egl_extensions(){
 
+if(eglExportDMABUFImageMESA_func == NULL){
+  eglExportDMABUFImageMESA_func =  (PFNEGLEXPORTDMABUFIMAGEMESAPROC) eglGetProcAddress("eglExportDMABUFImageMESA");
   if(eglExportDMABUFImageMESA_func == NULL){
-    eglExportDMABUFImageMESA_func =  (PFNEGLEXPORTDMABUFIMAGEMESAPROC) eglGetProcAddress("eglExportDMABUFImageMESA");
-    if(eglExportDMABUFImageMESA_func == NULL){
-      fprintf(stderr, "Failed to get EGL extension function eglExportDMABUFImageMESA\n");
-      return -1;
-    }
+    fprintf(stderr, "Failed to get EGL extension function eglExportDMABUFImageMESA\n");
+    return -1;
   }
-  return 0;
+}
+return 0;
 }
 
 
 static int cl_get_clformat_from_texture(GLint tex_format, cl_image_format * cl_format)
 {
-  cl_int ret = CL_SUCCESS;
-
-  switch (tex_format) {
-  case GL_RGBA8:
-  case GL_RGBA:
-  case GL_RGBA16:
-  case GL_RGBA8I:
-  case GL_RGBA16I:
-  case GL_RGBA32I:
-  case GL_RGBA8UI:
-  case GL_RGBA16UI:
-  case GL_RGBA32UI:
-  case GL_RGBA16F:
-  case GL_RGBA32F:
-    cl_format->image_channel_order = CL_RGBA;
-    break;
-  case GL_BGRA:
-    cl_format->image_channel_order = CL_BGRA;
-    break;
-  default:
-    ret = -1;
-    goto error;
-  }
+cl_int ret = CL_SUCCESS;
+
+switch (tex_format) {
+case GL_RGBA8:
+case GL_RGBA:
+case GL_RGBA16:
+case GL_RGBA8I:
+case GL_RGBA16I:
+case GL_RGBA32I:
+case GL_RGBA8UI:
+case GL_RGBA16UI:
+case GL_RGBA32UI:
+case GL_RGBA16F:
+case GL_RGBA32F:
+  cl_format->image_channel_order = CL_RGBA;
+  break;
+case GL_BGRA:
+  cl_format->image_channel_order = CL_BGRA;
+  break;
+default:
+  ret = -1;
+  goto error;
+}
 
-  switch (tex_format) {
-  case GL_RGBA8:
-  case GL_RGBA:
-  case GL_BGRA:
-    cl_format->image_channel_data_type = CL_UNORM_INT8;
-    break;
-  case GL_RGBA16:
-    cl_format->image_channel_data_type = CL_UNORM_INT16;
-    break;
-  case GL_RGBA8I:
-    cl_format->image_channel_data_type = CL_SIGNED_INT8;
-    break;
-  case GL_RGBA16I:
-    cl_format->image_channel_data_type = CL_SIGNED_INT16;
-    break;
-  case GL_RGBA32I:
-    cl_format->image_channel_data_type = CL_SIGNED_INT32;
-    break;
-  case GL_RGBA8UI:
-    cl_format->image_channel_data_type = CL_UNSIGNED_INT8;
-    break;
-  case GL_RGBA16UI:
-    cl_format->image_channel_data_type = CL_UNSIGNED_INT16;
-    break;
-  case GL_RGBA32UI:
-    cl_format->image_channel_data_type = CL_UNSIGNED_INT32;
-    break;
-  case GL_RGBA16F:
-    cl_format->image_channel_data_type = CL_HALF_FLOAT;
-    break;
-  case GL_RGBA32F:
-    cl_format->image_channel_order = CL_FLOAT;
-    break;
-  default:
-    ret = -1;
-    goto error;
-  }
+switch (tex_format) {
+case GL_RGBA8:
+case GL_RGBA:
+case GL_BGRA:
+  cl_format->image_channel_data_type = CL_UNORM_INT8;
+  break;
+case GL_RGBA16:
+  cl_format->image_channel_data_type = CL_UNORM_INT16;
+  break;
+case GL_RGBA8I:
+  cl_format->image_channel_data_type = CL_SIGNED_INT8;
+  break;
+case GL_RGBA16I:
+  cl_format->image_channel_data_type = CL_SIGNED_INT16;
+  break;
+case GL_RGBA32I:
+  cl_format->image_channel_data_type = CL_SIGNED_INT32;
+  break;
+case GL_RGBA8UI:
+  cl_format->image_channel_data_type = CL_UNSIGNED_INT8;
+  break;
+case GL_RGBA16UI:
+  cl_format->image_channel_data_type = CL_UNSIGNED_INT16;
+  break;
+case GL_RGBA32UI:
+  cl_format->image_channel_data_type = CL_UNSIGNED_INT32;
+  break;
+case GL_RGBA16F:
+  cl_format->image_channel_data_type = CL_HALF_FLOAT;
+  break;
+case GL_RGBA32F:
+  cl_format->image_channel_order = CL_FLOAT;
+  break;
+default:
+  ret = -1;
+  goto error;
+}
 
 error:
-  return ret;
+return ret;
 }
 
 static int
 get_mem_type_from_target(GLenum texture_target, cl_mem_object_type *type)
 {
-  switch(texture_target) {
-  case GL_TEXTURE_1D: *type = CL_MEM_OBJECT_IMAGE1D; break;
-  case GL_TEXTURE_2D: *type = CL_MEM_OBJECT_IMAGE2D; break;
-  case GL_TEXTURE_3D: *type = CL_MEM_OBJECT_IMAGE3D; break;
-  case GL_TEXTURE_1D_ARRAY: *type = CL_MEM_OBJECT_IMAGE1D_ARRAY; break;
-  case GL_TEXTURE_2D_ARRAY: *type = CL_MEM_OBJECT_IMAGE2D_ARRAY; break;
-  default:
-    return -1;
-  }
-  return CL_SUCCESS;
+switch(texture_target) {
+case GL_TEXTURE_1D: *type = CL_MEM_OBJECT_IMAGE1D; break;
+case GL_TEXTURE_2D: *type = CL_MEM_OBJECT_IMAGE2D; break;
+case GL_TEXTURE_3D: *type = CL_MEM_OBJECT_IMAGE3D; break;
+case GL_TEXTURE_1D_ARRAY: *type = CL_MEM_OBJECT_IMAGE1D_ARRAY; break;
+case GL_TEXTURE_2D_ARRAY: *type = CL_MEM_OBJECT_IMAGE2D_ARRAY; break;
+default:
+  return -1;
+}
+return CL_SUCCESS;
 }
 
 static cl_buffer
 intel_alloc_buffer_from_texture_egl(cl_context ctx, unsigned int target,
-                                    int miplevel, unsigned int texture,
-                                    struct _cl_mem_image *image)
+                                  int miplevel, unsigned int texture,
+                                  struct _cl_mem_image *image)
 {
-  drm_intel_bo *intel_bo = NULL;
-  struct _intel_cl_gl_share_image_info info;
-  unsigned int bpp, intel_fmt;
-  cl_image_format cl_format;
-  EGLBoolean ret;
-
-  EGLenum e_target;
-  //We just support GL_TEXTURE_2D because we can't query info like slice_pitch now.
-  if(target == GL_TEXTURE_2D)
-    e_target = EGL_GL_TEXTURE_2D;
-  else
-    return NULL;
-
-  if(get_required_egl_extensions() != 0)
-    return NULL;
-
-  EGLAttrib attrib_list[] = {EGL_GL_TEXTURE_LEVEL, miplevel,
-                            EGL_NONE};
-  EGLImage e_image = eglCreateImage(EGL_DISP(ctx), EGL_CTX(ctx), e_target,
-                                    (EGLClientBuffer)texture, &attrib_list[0]);
-  if(e_image == EGL_NO_IMAGE)
-    return NULL;
-
-  int fd, stride, offset;
-  ret = eglExportDMABUFImageMESA_func(EGL_DISP(ctx), e_image, &fd, &stride, &offset);
-  if(ret != EGL_TRUE){
-    eglDestroyImage(EGL_DISP(ctx), e_image);
-    return NULL;
-  }
-  info.fd = fd;
+drm_intel_bo *intel_bo = NULL;
+struct _intel_cl_gl_share_image_info info;
+unsigned int bpp, intel_fmt;
+cl_image_format cl_format;
+EGLBoolean ret;
+
+EGLenum e_target;
+//We just support GL_TEXTURE_2D because we can't query info like slice_pitch now.
+if(target == GL_TEXTURE_2D)
+  e_target = EGL_GL_TEXTURE_2D;
+else
+  return NULL;
 
-  /* The size argument just takes effect in intel_driver_share_buffer_from_fd when
-   * Linux kernel is older than 3.12, so it doesn't matter we set to 0 here.
-   */
-  int size = 0;
-  intel_bo = intel_driver_share_buffer_from_fd((intel_driver_t *)ctx->drv, fd, size);
+if(get_required_egl_extensions() != 0)
+  return NULL;
 
-  if (intel_bo == NULL) {
-    eglDestroyImage(EGL_DISP(ctx), e_image);
-    return NULL;
-  }
+EGLAttrib attrib_list[] = {EGL_GL_TEXTURE_LEVEL, miplevel,
+                          EGL_NONE};
+EGLImage e_image = eglCreateImage(EGL_DISP(ctx), EGL_CTX(ctx), e_target,
+                                  (EGLClientBuffer)texture, &attrib_list[0]);
+if(e_image == EGL_NO_IMAGE)
+  return NULL;
+
+int fd, stride, offset;
+ret = eglExportDMABUFImageMESA_func(EGL_DISP(ctx), e_image, &fd, &stride, &offset);
+if(ret != EGL_TRUE){
+  eglDestroyImage(EGL_DISP(ctx), e_image);
+  return NULL;
+}
+info.fd = fd;
 
-  GLint param_value;
-  glGetTexLevelParameteriv(target, miplevel, GL_TEXTURE_WIDTH, &param_value);
-  info.w = param_value;
-  glGetTexLevelParameteriv(target, miplevel, GL_TEXTURE_HEIGHT, &param_value);
-  info.h = param_value;
-  glGetTexLevelParameteriv(target, miplevel, GL_TEXTURE_DEPTH, &param_value);
-  info.depth = 1;
-  info.pitch = stride;
-  uint32_t tiling_mode, swizzle_mode;
-  drm_intel_bo_get_tiling(intel_bo, &tiling_mode, &swizzle_mode);
-  info.offset = offset;
-  info.tile_x = 0;
-  info.tile_y = 0;
-  glGetTexLevelParameteriv(target, miplevel, GL_TEXTURE_INTERNAL_FORMAT, &param_value);
-  info.gl_format = param_value;
-  info.row_pitch = stride;
-  info.slice_pitch = 0;
-
-  info.tiling = get_cl_tiling(tiling_mode);
-  if (cl_get_clformat_from_texture(info.gl_format, &cl_format) != 0)
-    goto error;
-
-  if (cl_image_byte_per_pixel(&cl_format, &bpp) != CL_SUCCESS)
-    goto error;
-  intel_fmt = cl_image_get_intel_format(&cl_format);
-  if (intel_fmt == INTEL_UNSUPPORTED_FORMAT)
-    goto error;
-  cl_mem_object_type image_type;
-  if (get_mem_type_from_target(target, &image_type) != 0)
-    goto error;
-
-  cl_mem_image_init(image, info.w, info.h,
-                    image_type, info.depth, cl_format,
-                    intel_fmt, bpp, info.row_pitch,
-                    info.slice_pitch, info.tiling,
-                    info.tile_x, info.tile_y, info.offset);
-
-  struct _cl_mem_gl_image *gl_image = (struct _cl_mem_gl_image*)image;
-  gl_image->fd = fd;
-  gl_image->egl_image = e_image;
-
-  return (cl_buffer) intel_bo;
+/* The size argument just takes effect in intel_driver_share_buffer_from_fd when
+ * Linux kernel is older than 3.12, so it doesn't matter we set to 0 here.
+ */
+int size = 0;
+intel_bo = intel_driver_share_buffer_from_fd((intel_driver_t *)ctx->drv, fd, size);
 
-error:
-  drm_intel_bo_unreference(intel_bo);
-  close(fd);
+if (intel_bo == NULL) {
   eglDestroyImage(EGL_DISP(ctx), e_image);
   return NULL;
 }
 
+GLint param_value;
+glGetTexLevelParameteriv(target, miplevel, GL_TEXTURE_WIDTH, &param_value);
+info.w = param_value;
+glGetTexLevelParameteriv(target, miplevel, GL_TEXTURE_HEIGHT, &param_value);
+info.h = param_value;
+glGetTexLevelParameteriv(target, miplevel, GL_TEXTURE_DEPTH, &param_value);
+info.depth = 1;
+info.pitch = stride;
+uint32_t tiling_mode, swizzle_mode;
+drm_intel_bo_get_tiling(intel_bo, &tiling_mode, &swizzle_mode);
+info.offset = offset;
+info.tile_x = 0;
+info.tile_y = 0;
+glGetTexLevelParameteriv(target, miplevel, GL_TEXTURE_INTERNAL_FORMAT, &param_value);
+info.gl_format = param_value;
+info.row_pitch = stride;
+info.slice_pitch = 0;
+
+info.tiling = get_cl_tiling(tiling_mode);
+if (cl_get_clformat_from_texture(info.gl_format, &cl_format) != 0)
+  goto error;
+
+if (cl_image_byte_per_pixel(&cl_format, &bpp) != CL_SUCCESS)
+  goto error;
+intel_fmt = cl_image_get_intel_format(&cl_format);
+if (intel_fmt == INTEL_UNSUPPORTED_FORMAT)
+  goto error;
+cl_mem_object_type image_type;
+if (get_mem_type_from_target(target, &image_type) != 0)
+  goto error;
+
+cl_mem_image_init(image, info.w, info.h,
+                  image_type, info.depth, cl_format,
+                  intel_fmt, bpp, info.row_pitch,
+                  info.slice_pitch, info.tiling,
+                  info.tile_x, info.tile_y, info.offset);
+
+struct _cl_mem_gl_image *gl_image = (struct _cl_mem_gl_image*)image;
+gl_image->fd = fd;
+gl_image->egl_image = e_image;
+
+return (cl_buffer) intel_bo;
+
+error:
+drm_intel_bo_unreference(intel_bo);
+close(fd);
+eglDestroyImage(EGL_DISP(ctx), e_image);
+return NULL;
+}
+
 static cl_buffer
 intel_alloc_buffer_from_texture(cl_context ctx, unsigned int target,
-                                int miplevel, unsigned int texture,
-                                struct _cl_mem_image *image)
+                              int miplevel, unsigned int texture,
+                              struct _cl_mem_image *image)
 {
 
-  if (IS_EGL_CONTEXT(ctx))
-    return intel_alloc_buffer_from_texture_egl(ctx, target, miplevel, texture, image);
+if (IS_EGL_CONTEXT(ctx))
+  return intel_alloc_buffer_from_texture_egl(ctx, target, miplevel, texture, image);
 
-  return NULL;
+return NULL;
 }
 
 static int
 intel_release_buffer_from_texture(cl_context ctx, struct _cl_mem_gl_image *gl_image)
 {
-  if (IS_EGL_CONTEXT(ctx)) {
-    close(gl_image->fd);
-    eglDestroyImage(EGL_DISP(ctx), gl_image->egl_image);
-    return CL_SUCCESS;
-  }
-  return -1;
+if (IS_EGL_CONTEXT(ctx)) {
+  close(gl_image->fd);
+  eglDestroyImage(EGL_DISP(ctx), gl_image->egl_image);
+  return CL_SUCCESS;
+}
+return -1;
 }
 #endif
 
 cl_buffer intel_share_buffer_from_libva(cl_context ctx,
-                                        unsigned int bo_name,
-                                        size_t *sz)
+                                      unsigned int bo_name,
+                                      size_t *sz)
 {
-  drm_intel_bo *intel_bo;
+drm_intel_bo *intel_bo;
 
-  intel_bo = intel_driver_share_buffer_from_name((intel_driver_t *)ctx->drv, "shared from libva", bo_name);
+intel_bo = intel_driver_share_buffer_from_name((intel_driver_t *)ctx->drv, "shared from libva", bo_name);
 
-  if (intel_bo == NULL)
-    return NULL;
+if (intel_bo == NULL)
+  return NULL;
 
-  if (sz)
-    *sz = intel_bo->size;
+if (sz)
+  *sz = intel_bo->size;
 
-  return (cl_buffer)intel_bo;
+return (cl_buffer)intel_bo;
 }
 
 cl_buffer intel_share_image_from_libva(cl_context ctx,
-                                       unsigned int bo_name,
-                                       struct _cl_mem_image *image)
+                                     unsigned int bo_name,
+                                     struct _cl_mem_image *image)
 {
-  drm_intel_bo *intel_bo;
-  uint32_t intel_tiling, intel_swizzle_mode;
+drm_intel_bo *intel_bo;
+uint32_t intel_tiling, intel_swizzle_mode;
 
-  intel_bo = intel_driver_share_buffer_from_name((intel_driver_t *)ctx->drv, "shared from libva", bo_name);
+intel_bo = intel_driver_share_buffer_from_name((intel_driver_t *)ctx->drv, "shared from libva", bo_name);
 
-  if (intel_bo == NULL)
-    return NULL;
+if (intel_bo == NULL)
+  return NULL;
 
-  drm_intel_bo_get_tiling(intel_bo, &intel_tiling, &intel_swizzle_mode);
-  image->tiling = get_cl_tiling(intel_tiling);
+drm_intel_bo_get_tiling(intel_bo, &intel_tiling, &intel_swizzle_mode);
+image->tiling = get_cl_tiling(intel_tiling);
 
-  return (cl_buffer)intel_bo;
+return (cl_buffer)intel_bo;
 }
 
 cl_buffer intel_share_buffer_from_fd(cl_context ctx,
-                                     int fd,
-                                     int buffer_size)
+                                   int fd,
+                                   int buffer_size)
 {
-  drm_intel_bo *intel_bo;
+drm_intel_bo *intel_bo;
 
-  intel_bo = intel_driver_share_buffer_from_fd((intel_driver_t *)ctx->drv, fd, buffer_size);
+intel_bo = intel_driver_share_buffer_from_fd((intel_driver_t *)ctx->drv, fd, buffer_size);
 
-  if (intel_bo == NULL)
-    return NULL;
+if (intel_bo == NULL)
+  return NULL;
 
-  return (cl_buffer)intel_bo;
+return (cl_buffer)intel_bo;
 }
 
 cl_buffer intel_share_image_from_fd(cl_context ctx,
-                                    int fd,
-                                    int image_size,
-                                    struct _cl_mem_image *image)
+                                  int fd,
+                                  int image_size,
+                                  struct _cl_mem_image *image)
 {
-  drm_intel_bo *intel_bo;
-  uint32_t intel_tiling, intel_swizzle_mode;
+drm_intel_bo *intel_bo;
+uint32_t intel_tiling, intel_swizzle_mode;
 
-  intel_bo = intel_driver_share_buffer_from_fd((intel_driver_t *)ctx->drv, fd, image_size);
+intel_bo = intel_driver_share_buffer_from_fd((intel_driver_t *)ctx->drv, fd, image_size);
 
-  if (intel_bo == NULL)
-    return NULL;
+if (intel_bo == NULL)
+  return NULL;
 
-  drm_intel_bo_get_tiling(intel_bo, &intel_tiling, &intel_swizzle_mode);
-  image->tiling = get_cl_tiling(intel_tiling);
+drm_intel_bo_get_tiling(intel_bo, &intel_tiling, &intel_swizzle_mode);
+image->tiling = get_cl_tiling(intel_tiling);
 
-  return (cl_buffer)intel_bo;
+return (cl_buffer)intel_bo;
 }
 
 static cl_buffer intel_buffer_alloc_userptr(cl_buffer_mgr bufmgr, const char* name, void *data,size_t size, unsigned long flags)
 {
 #ifdef HAS_USERPTR
-  drm_intel_bo *bo;
-  bo = drm_intel_bo_alloc_userptr((drm_intel_bufmgr *)bufmgr, name, data, I915_TILING_NONE, 0, size, flags);
-  /* Fallback to unsynchronized userptr allocation if kernel has no MMU notifier enabled. */
-  if (bo == NULL)
-    bo = drm_intel_bo_alloc_userptr((drm_intel_bufmgr *)bufmgr, name, data, I915_TILING_NONE, 0, size, flags | I915_USERPTR_UNSYNCHRONIZED);
-  return (cl_buffer)bo;
+drm_intel_bo *bo;
+bo = drm_intel_bo_alloc_userptr((drm_intel_bufmgr *)bufmgr, name, data, I915_TILING_NONE, 0, size, flags);
+/* Fallback to unsynchronized userptr allocation if kernel has no MMU notifier enabled. */
+if (bo == NULL)
+  bo = drm_intel_bo_alloc_userptr((drm_intel_bufmgr *)bufmgr, name, data, I915_TILING_NONE, 0, size, flags | I915_USERPTR_UNSYNCHRONIZED);
+return (cl_buffer)bo;
 #else
-  return NULL;
+return NULL;
 #endif
 }
 
 static int32_t get_intel_tiling(cl_int tiling, uint32_t *intel_tiling)
 {
-  switch (tiling) {
-    case CL_NO_TILE:
-      *intel_tiling = I915_TILING_NONE;
-      break;
-    case CL_TILE_X:
-      *intel_tiling = I915_TILING_X;
-      break;
-    case CL_TILE_Y:
-      *intel_tiling = I915_TILING_Y;
-      break;
-    default:
-      assert(0);
-      return -1;
-  }
-  return 0;
+switch (tiling) {
+  case CL_NO_TILE:
+    *intel_tiling = I915_TILING_NONE;
+    break;
+  case CL_TILE_X:
+    *intel_tiling = I915_TILING_X;
+    break;
+  case CL_TILE_Y:
+    *intel_tiling = I915_TILING_Y;
+    break;
+  default:
+    assert(0);
+    return -1;
+}
+return 0;
 }
 
 static int intel_buffer_set_tiling(cl_buffer bo,
-                                   cl_image_tiling_t tiling, size_t stride)
+                                 cl_image_tiling_t tiling, size_t stride)
 {
-  uint32_t intel_tiling;
-  int ret;
-  if (UNLIKELY((get_intel_tiling(tiling, &intel_tiling)) < 0))
-    return -1;
+uint32_t intel_tiling;
+int ret;
+if (UNLIKELY((get_intel_tiling(tiling, &intel_tiling)) < 0))
+  return -1;
 #ifndef NDEBUG
-  uint32_t required_tiling;
-  required_tiling = intel_tiling;
+uint32_t required_tiling;
+required_tiling = intel_tiling;
 #endif
-  ret = drm_intel_bo_set_tiling((drm_intel_bo*)bo, &intel_tiling, stride);
-  assert(intel_tiling == required_tiling);
-  return ret;
+ret = drm_intel_bo_set_tiling((drm_intel_bo*)bo, &intel_tiling, stride);
+assert(intel_tiling == required_tiling);
+return ret;
 }
 
 #define CHV_CONFIG_WARNING \
-        "Warning: can't get GPU's configurations, will use the minimal one. Please update your drm to 2.4.59+ and linux kernel to 4.0.0+.\n"
+      "Warning: can't get GPU's configurations, will use the minimal one. Please update your drm to 2.4.59+ and linux kernel to 4.0.0+.\n"
 static void
 intel_update_device_info(cl_device_id device)
 {
-  intel_driver_t *driver;
+intel_driver_t *driver;
 
-  driver = intel_driver_new();
-  assert(driver != NULL);
-  if (intel_driver_open(driver, NULL) != CL_SUCCESS) {
-    intel_driver_delete(driver);
-    return;
-  }
+driver = intel_driver_new();
+assert(driver != NULL);
+if (intel_driver_open(driver, NULL) != CL_SUCCESS) {
+  intel_driver_delete(driver);
+  return;
+}
 
 #ifdef HAS_USERPTR
-  const size_t sz = 4096;
-  void *host_ptr;
-
-  host_ptr = cl_aligned_malloc(sz, 4096);
-  if (host_ptr != NULL) {
-    cl_buffer bo = intel_buffer_alloc_userptr((cl_buffer_mgr)driver->bufmgr,
-      "CL memory object", host_ptr, sz, 0);
-    if (bo == NULL)
-      device->host_unified_memory = CL_FALSE;
-    else
-      drm_intel_bo_unreference((drm_intel_bo*)bo);
-    cl_free(host_ptr);
-  }
-  else
+const size_t sz = 4096;
+void *host_ptr;
+
+host_ptr = cl_aligned_malloc(sz, 4096);
+if (host_ptr != NULL) {
+  cl_buffer bo = intel_buffer_alloc_userptr((cl_buffer_mgr)driver->bufmgr,
+    "CL memory object", host_ptr, sz, 0);
+  if (bo == NULL)
     device->host_unified_memory = CL_FALSE;
+  else
+    drm_intel_bo_unreference((drm_intel_bo*)bo);
+  cl_free(host_ptr);
+}
+else
+  device->host_unified_memory = CL_FALSE;
 #endif
 
 #ifdef HAS_EU_TOTAL
-  unsigned int eu_total;
+unsigned int eu_total;
 
-  /* Prefer driver-queried max compute units if supported */
-  if (!drm_intel_get_eu_total(driver->fd, &eu_total))
-    device->max_compute_unit = eu_total;
-  else if (IS_CHERRYVIEW(device->device_id))
-    printf(CHV_CONFIG_WARNING);
+/* Prefer driver-queried max compute units if supported */
+if (!drm_intel_get_eu_total(driver->fd, &eu_total))
+  device->max_compute_unit = eu_total;
+else if (IS_CHERRYVIEW(device->device_id))
+  printf(CHV_CONFIG_WARNING);
 #else
-  if (IS_CHERRYVIEW(device->device_id)) {
+if (IS_CHERRYVIEW(device->device_id)) {
 #if defined(__ANDROID__)
-    device->max_compute_unit = 12;
+  device->max_compute_unit = 12;
 #else
-    printf(CHV_CONFIG_WARNING);
+  printf(CHV_CONFIG_WARNING);
 #endif
-  }
+}
 #endif
 
 #ifdef HAS_SUBSLICE_TOTAL
-  unsigned int subslice_total;
+unsigned int subslice_total;
 
-  /* Prefer driver-queried subslice count if supported */
-  if (!drm_intel_get_subslice_total(driver->fd, &subslice_total))
-    device->sub_slice_count = subslice_total;
-  else if (IS_CHERRYVIEW(device->device_id))
-    printf(CHV_CONFIG_WARNING);
+/* Prefer driver-queried subslice count if supported */
+if (!drm_intel_get_subslice_total(driver->fd, &subslice_total))
+  device->sub_slice_count = subslice_total;
+else if (IS_CHERRYVIEW(device->device_id))
+  printf(CHV_CONFIG_WARNING);
 #else
-  if (IS_CHERRYVIEW(device->device_id)) {
+if (IS_CHERRYVIEW(device->device_id)) {
 #if defined(__ANDROID__)
-    device->sub_slice_count = 2;
+  device->sub_slice_count = 2;
 #else
-    printf(CHV_CONFIG_WARNING);
+  printf(CHV_CONFIG_WARNING);
 #endif
-  }
+}
 #endif
 
 #ifdef HAS_POOLED_EU
-  /* BXT pooled eu, 3*6 to 2*9, like sub slice count is 2 */
-  int has_pooled_eu;
-  if((has_pooled_eu = drm_intel_get_pooled_eu(driver->fd)) > 0)
-    device->sub_slice_count = 2;
+/* BXT pooled eu, 3*6 to 2*9, like sub slice count is 2 */
+int has_pooled_eu;
+if((has_pooled_eu = drm_intel_get_pooled_eu(driver->fd)) > 0)
+  device->sub_slice_count = 2;
 
 #ifdef HAS_MIN_EU_IN_POOL
-  int min_eu;
-  /* for fused down 2x6 devices, beignet don't support. */
-  if (has_pooled_eu > 0 && (min_eu = drm_intel_get_min_eu_in_pool(driver->fd)) > 0) {
-    assert(min_eu == 9); //don't support fuse down device.
-  }
+int min_eu;
+/* for fused down 2x6 devices, beignet don't support. */
+if (has_pooled_eu > 0 && (min_eu = drm_intel_get_min_eu_in_pool(driver->fd)) > 0) {
+  assert(min_eu == 9); //don't support fuse down device.
+}
 #endif //HAS_MIN_EU_IN_POOL
 #endif //HAS_POOLED_EU
-  //We should get the device memory dynamically, but the
-  //mapablce mem size usage is unknown. Just ignore it.
-  size_t total_mem,map_mem;
-  if(drm_intel_get_aperture_sizes(driver->fd,&map_mem,&total_mem) == 0)
-    device->global_mem_size = (cl_ulong)total_mem;
-
-  intel_driver_context_destroy(driver);
-  intel_driver_close(driver);
-  intel_driver_terminate(driver);
-  intel_driver_delete(driver);
+//We should get the device memory dynamically, but the
+//mapablce mem size usage is unknown. Just ignore it.
+size_t total_mem,map_mem;
+if(drm_intel_get_aperture_sizes(driver->fd,&map_mem,&total_mem) == 0)
+  device->global_mem_size = (cl_ulong)total_mem;
+
+intel_driver_context_destroy(driver);
+intel_driver_close(driver);
+intel_driver_terminate(driver);
+intel_driver_delete(driver);
 }
 
 LOCAL void
 intel_setup_callbacks(void)
 {
-  cl_driver_new = (cl_driver_new_cb *) cl_intel_driver_new;
-  cl_driver_delete = (cl_driver_delete_cb *) cl_intel_driver_delete;
-  cl_driver_get_ver = (cl_driver_get_ver_cb *) intel_driver_get_ver;
-  cl_driver_enlarge_stack_size = (cl_driver_enlarge_stack_size_cb *) intel_driver_enlarge_stack_size;
-  cl_driver_set_atomic_flag = (cl_driver_set_atomic_flag_cb *) intel_driver_set_atomic_flag;
-  cl_driver_get_bufmgr = (cl_driver_get_bufmgr_cb *) intel_driver_get_bufmgr;
-  cl_driver_get_device_id = (cl_driver_get_device_id_cb *) intel_get_device_id;
-  cl_driver_update_device_info = (cl_driver_update_device_info_cb *) intel_update_device_info;
-  cl_buffer_alloc = (cl_buffer_alloc_cb *) drm_intel_bo_alloc;
-  cl_buffer_alloc_userptr = (cl_buffer_alloc_userptr_cb*) intel_buffer_alloc_userptr;
+cl_driver_new = (cl_driver_new_cb *) cl_intel_driver_new;
+cl_driver_delete = (cl_driver_delete_cb *) cl_intel_driver_delete;
+cl_driver_get_ver = (cl_driver_get_ver_cb *) intel_driver_get_ver;
+cl_driver_enlarge_stack_size = (cl_driver_enlarge_stack_size_cb *) intel_driver_enlarge_stack_size;
+cl_driver_set_atomic_flag = (cl_driver_set_atomic_flag_cb *) intel_driver_set_atomic_flag;
+cl_driver_get_bufmgr = (cl_driver_get_bufmgr_cb *) intel_driver_get_bufmgr;
+cl_driver_get_device_id = (cl_driver_get_device_id_cb *) intel_get_device_id;
+cl_driver_update_device_info = (cl_driver_update_device_info_cb *) intel_update_device_info;
+cl_buffer_alloc = (cl_buffer_alloc_cb *) drm_intel_bo_alloc;
+cl_buffer_alloc_userptr = (cl_buffer_alloc_userptr_cb*) intel_buffer_alloc_userptr;
 #ifdef HAS_BO_SET_SOFTPIN
-  cl_buffer_set_softpin_offset = (cl_buffer_set_softpin_offset_cb *) drm_intel_bo_set_softpin_offset;
-  cl_buffer_set_bo_use_full_range = (cl_buffer_set_bo_use_full_range_cb *) drm_intel_bo_use_48b_address_range;
+cl_buffer_set_softpin_offset = (cl_buffer_set_softpin_offset_cb *) drm_intel_bo_set_softpin_offset;
+cl_buffer_set_bo_use_full_range = (cl_buffer_set_bo_use_full_range_cb *) drm_intel_bo_use_48b_address_range;
 #endif
+  cl_buffer_disable_reuse = (cl_buffer_disable_reuse_cb *) drm_intel_bo_disable_reuse;
   cl_buffer_set_tiling = (cl_buffer_set_tiling_cb *) intel_buffer_set_tiling;
 #if defined(HAS_GL_EGL)
   cl_buffer_alloc_from_texture = (cl_buffer_alloc_from_texture_cb *) intel_alloc_buffer_from_texture;
-- 
2.1.4



More information about the Beignet mailing list