[Mesa-dev] [PATCH v2 1/2] i965: Cleanup MapRangeBuffer

Ben Widawsky ben at bwidawsk.net
Fri Sep 30 20:59:39 PDT 2011


Clean the code up, and always use a BO when creating a new buffer. I've
not seen any regressions but haven't yet tried this on < Gen6.

Cc: Chad Versace <chad at chad-versace.us>
Cc: Eric Anholt <eric at anholt.net>
Signed-off-by: Ben Widawsky <ben at bwidawsk.net>
---
 src/mesa/drivers/dri/intel/intel_buffer_objects.c |  104 +++++++++------------
 src/mesa/drivers/dri/intel/intel_buffer_objects.h |    4 +-
 2 files changed, 44 insertions(+), 64 deletions(-)

diff --git a/src/mesa/drivers/dri/intel/intel_buffer_objects.c b/src/mesa/drivers/dri/intel/intel_buffer_objects.c
index c52afbd..fdbffe3 100644
--- a/src/mesa/drivers/dri/intel/intel_buffer_objects.c
+++ b/src/mesa/drivers/dri/intel/intel_buffer_objects.c
@@ -314,6 +314,9 @@ intel_bufferobj_map_range(struct gl_context * ctx,
       intel_obj->sys_buffer = NULL;
    }
 
+   if (intel_obj->buffer == NULL)
+      goto error_out;
+
    /* If the mapping is synchronized with other GL operations, flush
     * the batchbuffer so that GEM knows about the buffer access for later
     * syncing.
@@ -322,11 +325,6 @@ intel_bufferobj_map_range(struct gl_context * ctx,
        drm_intel_bo_references(intel->batch.bo, intel_obj->buffer))
       intel_flush(ctx);
 
-   if (intel_obj->buffer == NULL) {
-      obj->Pointer = NULL;
-      return NULL;
-   }
-
    /* If the user doesn't care about existing buffer contents and mapping
     * would cause us to block, then throw out the old buffer.
     */
@@ -344,36 +342,30 @@ intel_bufferobj_map_range(struct gl_context * ctx,
     */
    if ((access & GL_MAP_INVALIDATE_RANGE_BIT) &&
        drm_intel_bo_busy(intel_obj->buffer)) {
-      if (access & GL_MAP_FLUSH_EXPLICIT_BIT) {
-	 intel_obj->range_map_buffer = malloc(length);
-	 obj->Pointer = intel_obj->range_map_buffer;
-      } else {
-	 intel_obj->range_map_bo = drm_intel_bo_alloc(intel->bufmgr,
-						      "range map",
-						      length, 64);
-	 if (!(access & GL_MAP_READ_BIT)) {
-	    drm_intel_gem_bo_map_gtt(intel_obj->range_map_bo);
-	    intel_obj->mapped_gtt = GL_TRUE;
-	 } else {
-	    drm_intel_bo_map(intel_obj->range_map_bo,
-			     (access & GL_MAP_WRITE_BIT) != 0);
-	    intel_obj->mapped_gtt = GL_FALSE;
-	 }
-	 obj->Pointer = intel_obj->range_map_bo->virtual;
-      }
-      return obj->Pointer;
-   }
-
-   if (!(access & GL_MAP_READ_BIT)) {
+      intel_obj->range_map_bo = drm_intel_bo_alloc(intel->bufmgr,
+						   "range map",
+						   length, 64);
+      drm_intel_gem_bo_map_gtt(intel_obj->range_map_bo);
+      intel_obj->mapped_gtt = true;
+      obj->Pointer = intel_obj->range_map_bo->virtual;
+   } else if (!(access & GL_MAP_READ_BIT)) { /* Write only */
       drm_intel_gem_bo_map_gtt(intel_obj->buffer);
-      intel_obj->mapped_gtt = GL_TRUE;
-   } else {
+      intel_obj->mapped_gtt = true;
+      obj->Pointer = intel_obj->buffer->virtual + offset;
+   } else { /* R/W or RO */
       drm_intel_bo_map(intel_obj->buffer, (access & GL_MAP_WRITE_BIT) != 0);
-      intel_obj->mapped_gtt = GL_FALSE;
+      intel_obj->mapped_gtt = false;
+      obj->Pointer = intel_obj->buffer->virtual + offset;
    }
 
-   obj->Pointer = intel_obj->buffer->virtual + offset;
+   if (!(access & GL_MAP_FLUSH_EXPLICIT_BIT))
+      intel_obj->needs_flush_at_unmap = true;
+
    return obj->Pointer;
+
+error_out:
+      obj->Pointer = NULL;
+      return NULL;
 }
 
 /* Ideally we'd use a BO to avoid taking up cache space for the temporary
@@ -388,27 +380,23 @@ intel_bufferobj_flush_mapped_range(struct gl_context *ctx,
 {
    struct intel_context *intel = intel_context(ctx);
    struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
-   drm_intel_bo *temp_bo;
 
-   /* Unless we're in the range map using a temporary system buffer,
-    * there's no work to do.
-    */
-   if (intel_obj->range_map_buffer == NULL)
-      return;
+   if (intel_obj->needs_flush_at_unmap)
+      return 0;
 
    if (length == 0)
       return;
 
-   temp_bo = drm_intel_bo_alloc(intel->bufmgr, "range map flush", length, 64);
-
-   drm_intel_bo_subdata(temp_bo, 0, length, intel_obj->range_map_buffer);
-
-   intel_emit_linear_blit(intel,
-			  intel_obj->buffer, obj->Offset + offset,
-			  temp_bo, 0,
-			  length);
+   if (!intel_obj->range_map_bo) {
+      intel_batchbuffer_emit_mi_flush(intel);
+   } else {
+      intel_emit_linear_blit(intel,
+			     intel_obj->buffer, obj->Offset + offset,
+			     intel_obj->range_map_bo, 0,
+			     length);
 
-   drm_intel_bo_unreference(temp_bo);
+      drm_intel_bo_unreference(intel_obj->range_map_bo);
+   }
 }
 
 
@@ -425,15 +413,6 @@ intel_bufferobj_unmap(struct gl_context * ctx, struct gl_buffer_object *obj)
    assert(obj->Pointer);
    if (intel_obj->sys_buffer != NULL) {
       /* always keep the mapping around. */
-   } else if (intel_obj->range_map_buffer != NULL) {
-      /* Since we've emitted some blits to buffers that will (likely) be used
-       * in rendering operations in other cache domains in this batch, emit a
-       * flush.  Once again, we wish for a domain tracker in libdrm to cover
-       * usage inside of a batchbuffer.
-       */
-      intel_batchbuffer_emit_mi_flush(intel);
-      free(intel_obj->range_map_buffer);
-      intel_obj->range_map_buffer = NULL;
    } else if (intel_obj->range_map_bo != NULL) {
       if (intel_obj->mapped_gtt) {
 	 drm_intel_gem_bo_unmap_gtt(intel_obj->range_map_bo);
@@ -441,19 +420,20 @@ intel_bufferobj_unmap(struct gl_context * ctx, struct gl_buffer_object *obj)
 	 drm_intel_bo_unmap(intel_obj->range_map_bo);
       }
 
-      intel_emit_linear_blit(intel,
-			     intel_obj->buffer, obj->Offset,
-			     intel_obj->range_map_bo, 0,
-			     obj->Length);
+      if (intel_obj->needs_flush_at_unmap) {
+	 intel_emit_linear_blit(intel,
+				intel_obj->buffer, obj->Offset,
+				intel_obj->range_map_bo, 0,
+				obj->Length);
+	 drm_intel_bo_unreference(intel_obj->range_map_bo);
+	 intel_batchbuffer_emit_mi_flush(intel);
+      }
 
       /* Since we've emitted some blits to buffers that will (likely) be used
        * in rendering operations in other cache domains in this batch, emit a
        * flush.  Once again, we wish for a domain tracker in libdrm to cover
        * usage inside of a batchbuffer.
        */
-      intel_batchbuffer_emit_mi_flush(intel);
-
-      drm_intel_bo_unreference(intel_obj->range_map_bo);
       intel_obj->range_map_bo = NULL;
    } else if (intel_obj->buffer != NULL) {
       if (intel_obj->mapped_gtt) {
@@ -462,6 +442,8 @@ intel_bufferobj_unmap(struct gl_context * ctx, struct gl_buffer_object *obj)
 	 drm_intel_bo_unmap(intel_obj->buffer);
       }
    }
+
+   intel_obj->needs_flush_at_unmap = false;
    obj->Pointer = NULL;
    obj->Offset = 0;
    obj->Length = 0;
diff --git a/src/mesa/drivers/dri/intel/intel_buffer_objects.h b/src/mesa/drivers/dri/intel/intel_buffer_objects.h
index e74d061..24a1636 100644
--- a/src/mesa/drivers/dri/intel/intel_buffer_objects.h
+++ b/src/mesa/drivers/dri/intel/intel_buffer_objects.h
@@ -47,9 +47,7 @@ struct intel_buffer_object
    void *sys_buffer;
 
    drm_intel_bo *range_map_bo;
-   void *range_map_buffer;
-   unsigned int range_map_offset;
-   GLsizei range_map_size;
+   bool needs_flush_at_unmap;
 
    GLboolean mapped_gtt;
    GLboolean source;
-- 
1.7.6.4



More information about the mesa-dev mailing list