[Beignet] [PATCH v2 1/5] Make use of write enable flag for mem bo map

Zhenyu Wang zhenyuw at linux.intel.com
Thu Oct 23 00:19:22 PDT 2014


Use drm/intel optimization for mem bo mapping in case of read or write.
So we could be possibly waiting less.

This also adds 'map_flags' check in clEnqueueMapBuffer/clEnqueueMapImage
for actual read or write mapping.

But currently leave clMapBufferIntel untouched which might break ABI/API.

v2: Fix write_map flag in clEnqueueMapBuffer/clEnqueueMapImage.

Signed-off-by: Zhenyu Wang <zhenyuw at linux.intel.com>
---
 src/cl_api.c           |  6 +++++-
 src/cl_command_queue.c |  2 +-
 src/cl_enqueue.c       | 18 +++++++++---------
 src/cl_enqueue.h       |  1 +
 src/cl_mem.c           | 18 +++++++++---------
 src/cl_mem.h           |  4 ++--
 6 files changed, 27 insertions(+), 22 deletions(-)

diff --git a/src/cl_api.c b/src/cl_api.c
index 8a2e999..05d3093 100644
--- a/src/cl_api.c
+++ b/src/cl_api.c
@@ -2653,6 +2653,8 @@ clEnqueueMapBuffer(cl_command_queue  command_queue,
   data->size        = size;
   data->ptr         = ptr;
   data->unsync_map  = 1;
+  if (map_flags & (CL_MAP_WRITE | CL_MAP_WRITE_INVALIDATE_REGION))
+    data->write_map = 1;
 
   if(handle_events(command_queue, num_events_in_wait_list, event_wait_list,
                    event, data, CL_COMMAND_MAP_BUFFER) == CL_ENQUEUE_EXECUTE_IMM) {
@@ -2735,6 +2737,8 @@ clEnqueueMapImage(cl_command_queue   command_queue,
   data->region[0]   = region[0];  data->region[1] = region[1];  data->region[2] = region[2];
   data->ptr         = ptr;
   data->unsync_map  = 1;
+  if (map_flags & (CL_MAP_WRITE | CL_MAP_WRITE_INVALIDATE_REGION))
+    data->write_map = 1;
 
   if(handle_events(command_queue, num_events_in_wait_list, event_wait_list,
                    event, data, CL_COMMAND_MAP_IMAGE) == CL_ENQUEUE_EXECUTE_IMM) {
@@ -3203,7 +3207,7 @@ clMapBufferIntel(cl_mem mem, cl_int *errcode_ret)
   void *ptr = NULL;
   cl_int err = CL_SUCCESS;
   CHECK_MEM (mem);
-  ptr = cl_mem_map(mem);
+  ptr = cl_mem_map(mem, 1);
 error:
   if (errcode_ret)
     *errcode_ret = err;
diff --git a/src/cl_command_queue.c b/src/cl_command_queue.c
index 48deba0..d07774f 100644
--- a/src/cl_command_queue.c
+++ b/src/cl_command_queue.c
@@ -336,7 +336,7 @@ cl_fulsim_read_all_surfaces(cl_command_queue queue, cl_kernel k)
     assert(mem->bo);
     chunk_n = cl_buffer_get_size(mem->bo) / chunk_sz;
     chunk_remainder = cl_buffer_get_size(mem->bo) % chunk_sz;
-    to = cl_mem_map(mem);
+    to = cl_mem_map(mem, 1);
     for (j = 0; j < chunk_n; ++j) {
       char name[256];
       sprintf(name, "dump%03i.bmp", curr);
diff --git a/src/cl_enqueue.c b/src/cl_enqueue.c
index af118ad..2e43122 100644
--- a/src/cl_enqueue.c
+++ b/src/cl_enqueue.c
@@ -38,7 +38,7 @@ cl_int cl_enqueue_read_buffer(enqueue_data* data)
   void* src_ptr;
   struct _cl_mem_buffer* buffer = (struct _cl_mem_buffer*)mem;
 
-  if (!(src_ptr = cl_mem_map_auto(data->mem_obj))) {
+  if (!(src_ptr = cl_mem_map_auto(data->mem_obj, 0))) {
     err = CL_MAP_FAILURE;
     goto error;
   }
@@ -66,7 +66,7 @@ cl_int cl_enqueue_read_buffer_rect(enqueue_data* data)
          mem->type == CL_MEM_SUBBUFFER_TYPE);
   struct _cl_mem_buffer* buffer = (struct _cl_mem_buffer*)mem;
 
-  if (!(src_ptr = cl_mem_map_auto(mem))) {
+  if (!(src_ptr = cl_mem_map_auto(mem, 0))) {
     err = CL_MAP_FAILURE;
     goto error;
   }
@@ -112,7 +112,7 @@ cl_int cl_enqueue_write_buffer(enqueue_data *data)
   struct _cl_mem_buffer* buffer = (struct _cl_mem_buffer*)mem;
   void* dst_ptr;
 
-  if (!(dst_ptr = cl_mem_map_auto(data->mem_obj))) {
+  if (!(dst_ptr = cl_mem_map_auto(data->mem_obj, 1))) {
     err = CL_MAP_FAILURE;
     goto error;
   }
@@ -140,7 +140,7 @@ cl_int cl_enqueue_write_buffer_rect(enqueue_data *data)
          mem->type == CL_MEM_SUBBUFFER_TYPE);
   struct _cl_mem_buffer* buffer = (struct _cl_mem_buffer*)mem;
 
-  if (!(dst_ptr = cl_mem_map_auto(mem))) {
+  if (!(dst_ptr = cl_mem_map_auto(mem, 1))) {
     err = CL_MAP_FAILURE;
     goto error;
   }
@@ -188,7 +188,7 @@ cl_int cl_enqueue_read_image(enqueue_data *data)
   const size_t* origin = data->origin;
   const size_t* region = data->region;
 
-  if (!(src_ptr = cl_mem_map_auto(mem))) {
+  if (!(src_ptr = cl_mem_map_auto(mem, 0))) {
     err = CL_MAP_FAILURE;
     goto error;
   }
@@ -231,7 +231,7 @@ cl_int cl_enqueue_write_image(enqueue_data *data)
   cl_mem mem = data->mem_obj;
   CHECK_IMAGE(mem, image);
 
-  if (!(dst_ptr = cl_mem_map_auto(mem))) {
+  if (!(dst_ptr = cl_mem_map_auto(mem, 1))) {
     err = CL_MAP_FAILURE;
     goto error;
   }
@@ -260,7 +260,7 @@ cl_int cl_enqueue_map_buffer(enqueue_data *data)
     //because using unsync map in clEnqueueMapBuffer, so force use map_gtt here
     ptr = cl_mem_map_gtt(mem);
   else
-    ptr = cl_mem_map_auto(mem);
+    ptr = cl_mem_map_auto(mem, data->write_map ? 1 : 0);
 
   if (ptr == NULL) {
     err = CL_MAP_FAILURE;
@@ -290,7 +290,7 @@ cl_int cl_enqueue_map_image(enqueue_data *data)
     //because using unsync map in clEnqueueMapBuffer, so force use map_gtt here
     ptr = cl_mem_map_gtt(mem);
   else
-    ptr = cl_mem_map_auto(mem);
+    ptr = cl_mem_map_auto(mem, data->write_map ? 1 : 0);
 
   if (ptr == NULL) {
     err = CL_MAP_FAILURE;
@@ -412,7 +412,7 @@ cl_int cl_enqueue_native_kernel(enqueue_data *data)
       const cl_mem buffer = mem_list[i];
       CHECK_MEM(buffer);
 
-      *((void **)args_mem_loc[i]) = cl_mem_map_auto(buffer);
+      *((void **)args_mem_loc[i]) = cl_mem_map_auto(buffer, 0);
   }
   data->user_func(data->ptr);
 
diff --git a/src/cl_enqueue.h b/src/cl_enqueue.h
index a9b3601..b179f78 100644
--- a/src/cl_enqueue.h
+++ b/src/cl_enqueue.h
@@ -65,6 +65,7 @@ typedef struct _enqueue_data {
   void *            ptr;              /* Ptr for write and return value */
   const cl_mem*     mem_list;         /* mem_list of clEnqueueNativeKernel */
   uint8_t           unsync_map;       /* Indicate the clEnqueueMapBuffer/Image is unsync map */
+  uint8_t           write_map;        /* Indicate if the clEnqueueMapBuffer is write enable */
   void (*user_func)(void *);          /* pointer to a host-callable user function */
 } enqueue_data;
 
diff --git a/src/cl_mem.c b/src/cl_mem.c
index 59265a3..16bd613 100644
--- a/src/cl_mem.c
+++ b/src/cl_mem.c
@@ -571,8 +571,8 @@ void
 cl_mem_copy_image_to_image(const size_t *dst_origin,const size_t *src_origin, const size_t *region,
                            const struct _cl_mem_image *dst_image, const struct _cl_mem_image *src_image)
 {
-  char* dst= cl_mem_map_auto((cl_mem)dst_image);
-  char* src= cl_mem_map_auto((cl_mem)src_image);
+  char* dst= cl_mem_map_auto((cl_mem)dst_image, 1);
+  char* src= cl_mem_map_auto((cl_mem)src_image, 0);
   size_t dst_offset = dst_image->bpp * dst_origin[0] + dst_image->row_pitch * dst_origin[1] + dst_image->slice_pitch * dst_origin[2];
   size_t src_offset = src_image->bpp * src_origin[0] + src_image->row_pitch * src_origin[1] + src_image->slice_pitch * src_origin[2];
   dst= (char*)dst+ dst_offset;
@@ -601,7 +601,7 @@ cl_mem_copy_image(struct _cl_mem_image *image,
 		  size_t slice_pitch,
 		  void* host_ptr)
 {
-  char* dst_ptr = cl_mem_map_auto((cl_mem)image);
+  char* dst_ptr = cl_mem_map_auto((cl_mem)image, 1);
   size_t origin[3] = {0, 0, 0};
   size_t region[3] = {image->w, image->h, image->depth};
 
@@ -907,8 +907,8 @@ _cl_mem_new_image_from_buffer(cl_context ctx,
                     mem_buffer->base.size / bpp, 0, 0, 0, 0, NULL, errcode_ret);
   if (image == NULL)
     return NULL;
-  void *src = cl_mem_map(buffer);
-  void *dst = cl_mem_map(image);
+  void *src = cl_mem_map(buffer, 0);
+  void *dst = cl_mem_map(image, 1);
   //
   // FIXME, we could use copy buffer to image to do this on GPU latter.
   // currently the copy buffer to image function doesn't support 1D image.
@@ -1748,9 +1748,9 @@ cl_mem_copy_buffer_to_image(cl_command_queue queue, cl_mem buffer, struct _cl_me
 
 
 LOCAL void*
-cl_mem_map(cl_mem mem)
+cl_mem_map(cl_mem mem, int write)
 {
-  cl_buffer_map(mem->bo, 1);
+  cl_buffer_map(mem->bo, write);
   assert(cl_buffer_get_virtual(mem->bo));
   return cl_buffer_get_virtual(mem->bo);
 }
@@ -1787,12 +1787,12 @@ cl_mem_unmap_gtt(cl_mem mem)
 }
 
 LOCAL void*
-cl_mem_map_auto(cl_mem mem)
+cl_mem_map_auto(cl_mem mem, int write)
 {
   if (IS_IMAGE(mem) && cl_mem_image(mem)->tiling != CL_NO_TILE)
     return cl_mem_map_gtt(mem);
   else
-    return cl_mem_map(mem);
+    return cl_mem_map(mem, write);
 }
 
 LOCAL cl_int
diff --git a/src/cl_mem.h b/src/cl_mem.h
index 0ccbb5d..95c5f05 100644
--- a/src/cl_mem.h
+++ b/src/cl_mem.h
@@ -232,7 +232,7 @@ extern cl_int cl_mem_copy_buffer_to_image(cl_command_queue, cl_mem, struct _cl_m
                                           const size_t, const size_t *, const size_t *);
 
 /* Directly map a memory object */
-extern void *cl_mem_map(cl_mem);
+extern void *cl_mem_map(cl_mem, int);
 
 /* Unmap a memory object */
 extern cl_int cl_mem_unmap(cl_mem);
@@ -247,7 +247,7 @@ extern void *cl_mem_map_gtt_unsync(cl_mem);
 extern cl_int cl_mem_unmap_gtt(cl_mem);
 
 /* Directly map a memory object - tiled images are mapped in GTT mode */
-extern void *cl_mem_map_auto(cl_mem);
+extern void *cl_mem_map_auto(cl_mem, int);
 
 /* Unmap a memory object - tiled images are unmapped in GTT mode */
 extern cl_int cl_mem_unmap_auto(cl_mem);
-- 
2.1.1



More information about the Beignet mailing list