[Intel-gfx] [PATCH v2 1/2] i965: Cleanup MapRangeBuffer

Ben Widawsky ben at bwidawsk.net
Mon Sep 26 03:35:31 CEST 2011


Clean the code up, and always use a BO when creating a new buffer. I've
not seen any regressions but haven't yet tried this on < Gen6.

Cc: Chad Versace <chad at chad-versace.us>
Cc: Eric Anholt <eric at anholt.net>
Signed-off-by: Ben Widawsky <ben at bwidawsk.net>
---
 src/mesa/drivers/dri/intel/intel_buffer_objects.c |  114 ++++++++++-----------
 src/mesa/drivers/dri/intel/intel_buffer_objects.h |    4 +-
 2 files changed, 55 insertions(+), 63 deletions(-)

diff --git a/src/mesa/drivers/dri/intel/intel_buffer_objects.c b/src/mesa/drivers/dri/intel/intel_buffer_objects.c
index 4df2d76..78e3468 100644
--- a/src/mesa/drivers/dri/intel/intel_buffer_objects.c
+++ b/src/mesa/drivers/dri/intel/intel_buffer_objects.c
@@ -291,6 +291,19 @@ intel_bufferobj_map_range(struct gl_context * ctx,
 
    assert(intel_obj);
 
+   /* Catch some errors early to make real logic a bit easier */
+   if ((access & (GL_MAP_FLUSH_EXPLICIT_BIT | GL_MAP_WRITE_BIT)) ==
+      GL_MAP_INVALIDATE_RANGE_BIT)
+	 goto error_out;
+
+   if ((access & (GL_MAP_INVALIDATE_RANGE_BIT | GL_MAP_READ_BIT)) ==
+      (GL_MAP_INVALIDATE_RANGE_BIT|GL_MAP_READ_BIT))
+	 goto error_out;
+
+   if ((access & (GL_MAP_INVALIDATE_BUFFER_BIT | GL_MAP_READ_BIT)) ==
+      (GL_MAP_INVALIDATE_BUFFER_BIT | GL_MAP_READ_BIT))
+	 goto error_out;
+
    /* _mesa_MapBufferRange (GL entrypoint) sets these, but the vbo module also
     * internally uses our functions directly.
     */
@@ -314,6 +327,9 @@ intel_bufferobj_map_range(struct gl_context * ctx,
       intel_obj->sys_buffer = NULL;
    }
 
+   if (intel_obj->buffer == NULL)
+      goto error_out;
+
    /* If the mapping is synchronized with other GL operations, flush
     * the batchbuffer so that GEM knows about the buffer access for later
     * syncing.
@@ -322,11 +338,6 @@ intel_bufferobj_map_range(struct gl_context * ctx,
        drm_intel_bo_references(intel->batch.bo, intel_obj->buffer))
       intel_flush(ctx);
 
-   if (intel_obj->buffer == NULL) {
-      obj->Pointer = NULL;
-      return NULL;
-   }
-
    /* If the user doesn't care about existing buffer contents and mapping
     * would cause us to block, then throw out the old buffer.
     */
@@ -344,36 +355,30 @@ intel_bufferobj_map_range(struct gl_context * ctx,
     */
    if ((access & GL_MAP_INVALIDATE_RANGE_BIT) &&
        drm_intel_bo_busy(intel_obj->buffer)) {
-      if (access & GL_MAP_FLUSH_EXPLICIT_BIT) {
-	 intel_obj->range_map_buffer = malloc(length);
-	 obj->Pointer = intel_obj->range_map_buffer;
-      } else {
-	 intel_obj->range_map_bo = drm_intel_bo_alloc(intel->bufmgr,
-						      "range map",
-						      length, 64);
-	 if (!(access & GL_MAP_READ_BIT)) {
-	    drm_intel_gem_bo_map_gtt(intel_obj->range_map_bo);
-	    intel_obj->mapped_gtt = GL_TRUE;
-	 } else {
-	    drm_intel_bo_map(intel_obj->range_map_bo,
-			     (access & GL_MAP_WRITE_BIT) != 0);
-	    intel_obj->mapped_gtt = GL_FALSE;
-	 }
-	 obj->Pointer = intel_obj->range_map_bo->virtual;
-      }
-      return obj->Pointer;
-   }
-
-   if (!(access & GL_MAP_READ_BIT)) {
+      intel_obj->range_map_bo = drm_intel_bo_alloc(intel->bufmgr,
+						   "range map",
+						   length, 64);
+      drm_intel_gem_bo_map_gtt(intel_obj->range_map_bo);
+      intel_obj->mapped_gtt = true;
+      obj->Pointer = intel_obj->range_map_bo->virtual;
+   } else if (!(access & GL_MAP_READ_BIT)) { /* Write only */
       drm_intel_gem_bo_map_gtt(intel_obj->buffer);
-      intel_obj->mapped_gtt = GL_TRUE;
-   } else {
+      intel_obj->mapped_gtt = true;
+      obj->Pointer = intel_obj->buffer->virtual + offset;
+   } else { /* R/W or RO */
       drm_intel_bo_map(intel_obj->buffer, (access & GL_MAP_WRITE_BIT) != 0);
-      intel_obj->mapped_gtt = GL_FALSE;
+      intel_obj->mapped_gtt = false;
+      obj->Pointer = intel_obj->buffer->virtual + offset;
    }
 
-   obj->Pointer = intel_obj->buffer->virtual + offset;
+   if (!(access & GL_MAP_FLUSH_EXPLICIT_BIT))
+      intel_obj->needs_flush_at_unmap = true;
+
    return obj->Pointer;
+
+error_out:
+      obj->Pointer = NULL;
+      return NULL;
 }
 
 /* Ideally we'd use a BO to avoid taking up cache space for the temporary
@@ -388,27 +393,22 @@ intel_bufferobj_flush_mapped_range(struct gl_context *ctx,
 {
    struct intel_context *intel = intel_context(ctx);
    struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
-   drm_intel_bo *temp_bo;
 
-   /* Unless we're in the range map using a temporary system buffer,
-    * there's no work to do.
-    */
-   if (intel_obj->range_map_buffer == NULL)
-      return;
+   assert(intel_obj->needs_flush_at_unmap == false);
 
    if (length == 0)
       return;
 
-   temp_bo = drm_intel_bo_alloc(intel->bufmgr, "range map flush", length, 64);
-
-   drm_intel_bo_subdata(temp_bo, 0, length, intel_obj->range_map_buffer);
-
-   intel_emit_linear_blit(intel,
-			  intel_obj->buffer, obj->Offset + offset,
-			  temp_bo, 0,
-			  length);
+   if (!intel_obj->range_map_bo) {
+      intel_batchbuffer_emit_mi_flush(intel);
+   } else {
+      intel_emit_linear_blit(intel,
+			     intel_obj->buffer, obj->Offset + offset,
+			     intel_obj->range_map_bo, 0,
+			     length);
 
-   drm_intel_bo_unreference(temp_bo);
+      drm_intel_bo_unreference(intel_obj->range_map_bo);
+   }
 }
 
 
@@ -425,15 +425,6 @@ intel_bufferobj_unmap(struct gl_context * ctx, struct gl_buffer_object *obj)
    assert(obj->Pointer);
    if (intel_obj->sys_buffer != NULL) {
       /* always keep the mapping around. */
-   } else if (intel_obj->range_map_buffer != NULL) {
-      /* Since we've emitted some blits to buffers that will (likely) be used
-       * in rendering operations in other cache domains in this batch, emit a
-       * flush.  Once again, we wish for a domain tracker in libdrm to cover
-       * usage inside of a batchbuffer.
-       */
-      intel_batchbuffer_emit_mi_flush(intel);
-      free(intel_obj->range_map_buffer);
-      intel_obj->range_map_buffer = NULL;
    } else if (intel_obj->range_map_bo != NULL) {
       if (intel_obj->mapped_gtt) {
 	 drm_intel_gem_bo_unmap_gtt(intel_obj->range_map_bo);
@@ -441,10 +432,13 @@ intel_bufferobj_unmap(struct gl_context * ctx, struct gl_buffer_object *obj)
 	 drm_intel_bo_unmap(intel_obj->range_map_bo);
       }
 
-      intel_emit_linear_blit(intel,
-			     intel_obj->buffer, obj->Offset,
-			     intel_obj->range_map_bo, 0,
-			     obj->Length);
+      if (intel_obj->needs_flush_at_unmap) {
+	 intel_emit_linear_blit(intel,
+				intel_obj->buffer, obj->Offset,
+				intel_obj->range_map_bo, 0,
+				obj->Length);
+	 drm_intel_bo_unreference(intel_obj->range_map_bo);
+      }
 
       /* Since we've emitted some blits to buffers that will (likely) be used
        * in rendering operations in other cache domains in this batch, emit a
@@ -452,8 +446,6 @@ intel_bufferobj_unmap(struct gl_context * ctx, struct gl_buffer_object *obj)
        * usage inside of a batchbuffer.
        */
       intel_batchbuffer_emit_mi_flush(intel);
-
-      drm_intel_bo_unreference(intel_obj->range_map_bo);
       intel_obj->range_map_bo = NULL;
    } else if (intel_obj->buffer != NULL) {
       if (intel_obj->mapped_gtt) {
@@ -462,6 +454,8 @@ intel_bufferobj_unmap(struct gl_context * ctx, struct gl_buffer_object *obj)
 	 drm_intel_bo_unmap(intel_obj->buffer);
       }
    }
+
+   intel_obj->needs_flush_at_unmap = false;
    obj->Pointer = NULL;
    obj->Offset = 0;
    obj->Length = 0;
diff --git a/src/mesa/drivers/dri/intel/intel_buffer_objects.h b/src/mesa/drivers/dri/intel/intel_buffer_objects.h
index e74d061..24a1636 100644
--- a/src/mesa/drivers/dri/intel/intel_buffer_objects.h
+++ b/src/mesa/drivers/dri/intel/intel_buffer_objects.h
@@ -47,9 +47,7 @@ struct intel_buffer_object
    void *sys_buffer;
 
    drm_intel_bo *range_map_bo;
-   void *range_map_buffer;
-   unsigned int range_map_offset;
-   GLsizei range_map_size;
+   bool needs_flush_at_unmap;
 
    GLboolean mapped_gtt;
    GLboolean source;
-- 
1.7.6.4




More information about the Intel-gfx mailing list