[Mesa-dev] [PATCH] nicer-no-wrap-patch

Kevin Rogovin kevin.rogovin at intel.com
Mon Nov 11 23:24:35 PST 2013


feedback integrated

space-pace
---
 src/mesa/drivers/dri/i965/brw_context.h       | 85 +++++++++++++++++++++++++++
 src/mesa/drivers/dri/i965/brw_draw.c          |  4 +-
 src/mesa/drivers/dri/i965/brw_state_batch.c   | 15 +++++
 src/mesa/drivers/dri/i965/intel_batchbuffer.h |  6 ++
 4 files changed, 108 insertions(+), 2 deletions(-)

diff --git a/src/mesa/drivers/dri/i965/brw_context.h b/src/mesa/drivers/dri/i965/brw_context.h
index 8b1cbb3..638e570 100644
--- a/src/mesa/drivers/dri/i965/brw_context.h
+++ b/src/mesa/drivers/dri/i965/brw_context.h
@@ -1028,8 +1028,37 @@ struct brw_context
    uint32_t reset_count;
 
    struct intel_batchbuffer batch;
+
+   /**
+    * Debug-only: flag to indicate to that batch buffer flush 
+    * should not happen
+    * 
+    * While no_batch_wrap is true, the batch buffer must not
+    * be flushed. Use the functions begin_no_batch_wrap() and
+    * end_no_batch_wrap() to mark the start and end points
+    * that the batch buffer must not be flushed.
+    */
    bool no_batch_wrap;
 
+   /**
+    * Debug-only: expected maximum number bytes added to 
+    * batch buffer
+    *
+    * If \ref no_batch_wrap is true, specifies the number
+    * of bytes that are expected before \ref no_batch_wrap
+    * is set to false.
+    */
+   int max_expected_batch_size_during_no_batch_wrap;
+
+   /**
+    * Debug-only: current number of bytes added to batch buffer
+    *
+    * records the number of bytes consumed so far
+    * in the batch buffer since the last time \ref 
+    * no_batch_wrap was set to true
+    */
+   int number_bytes_consumed_during_no_batch_wrap;
+
    struct {
       drm_intel_bo *bo;
       GLuint offset;
@@ -1450,6 +1479,62 @@ is_power_of_two(uint32_t value)
    return (value & (value - 1)) == 0;
 }
 
+/**
+ * Affect only in debug: Begin mark of don't flush batch buffer
+ *
+ * Function to mark the start of a sequence of commands and state 
+ * added to the batch buffer that must not be partitioned by
+ * a flush. 
+ * Requirements: 
+ * - no_batch_wrap is false
+ *
+ * Output/side effects:
+ * - no_batch_wrap set to true
+ * - max_expected_batch_size_during_no_batch_wrap set
+ * - number_bytes_consumed_during_no_batch_wrap reset to 0
+ *
+ * \ref brw context pointer
+ * \ref pmax_expected_batch_size value specifying expected maximum number of bytes to 
+ *                               be consumed in the batch buffer  
+ */ 
+static INLINE void
+begin_no_batch_wrap(struct brw_context *brw, int pmax_expected_batch_size)
+{
+#ifdef DEBUG
+   assert(!brw->no_batch_wrap);
+   brw->no_batch_wrap=true;
+   brw->max_expected_batch_size_during_no_batch_wrap=pmax_expected_batch_size;
+   brw->number_bytes_consumed_during_no_batch_wrap=0;
+#else
+   (void)brw;
+   (void)pmax_expected_batch_size;
+#endif
+}
+
+/**
+ * Affect only in Debug only: end mark of don't flush batch buffer
+ *
+ * Function to mark the end of a sequence of commands and state 
+ * added to the batch buffer that must not be partitioned by
+ * a flush. 
+ * Requirements:
+ * - no_batch_wrap is true
+ *
+ * Output/side effects:
+ * - no_batch_wrap set to false
+ */
+static INLINE void
+end_no_batch_wrap(struct brw_context *brw)
+{
+#ifdef DEBUG
+   assert(brw->no_batch_wrap);
+   brw->no_batch_wrap=false;
+#else
+   (void)brw;
+#endif
+}
+
+
 /*======================================================================
  * brw_vtbl.c
  */
diff --git a/src/mesa/drivers/dri/i965/brw_draw.c b/src/mesa/drivers/dri/i965/brw_draw.c
index 7b33b76..12f0ffe 100644
--- a/src/mesa/drivers/dri/i965/brw_draw.c
+++ b/src/mesa/drivers/dri/i965/brw_draw.c
@@ -416,14 +416,14 @@ retry:
        * *_set_prim or intel_batchbuffer_flush(), which only impacts
        * brw->state.dirty.brw.
        */
+      begin_no_batch_wrap(brw, estimated_max_prim_size);
       if (brw->state.dirty.brw) {
-	 brw->no_batch_wrap = true;
 	 brw_upload_state(brw);
       }
 
       brw_emit_prim(brw, &prims[i], brw->primitive);
+      end_no_batch_wrap(brw);
 
-      brw->no_batch_wrap = false;
 
       if (dri_bufmgr_check_aperture_space(&brw->batch.bo, 1)) {
 	 if (!fail_next) {
diff --git a/src/mesa/drivers/dri/i965/brw_state_batch.c b/src/mesa/drivers/dri/i965/brw_state_batch.c
index c71d2f3..60d3c2d 100644
--- a/src/mesa/drivers/dri/i965/brw_state_batch.c
+++ b/src/mesa/drivers/dri/i965/brw_state_batch.c
@@ -127,6 +127,21 @@ brw_state_batch(struct brw_context *brw,
    assert(size < batch->bo->size);
    offset = ROUND_DOWN_TO(batch->state_batch_offset - size, alignment);
 
+#ifdef DEBUG
+   if(brw->no_batch_wrap) {
+      /* although the request is for size bytes, the consumption 
+       * can be greater because of alignment, thus we use the 
+       * differences of the new-to-be offset with the old 
+       * offset value.
+       */
+      brw->number_bytes_consumed_during_no_batch_wrap += 
+         (batch->state_batch_offset-offset);
+
+      assert(brw->number_bytes_consumed_during_no_batch_wrap <= 
+             brw->max_expected_batch_size_during_no_batch_wrap);
+   }
+#endif
+
    /* If allocating from the top would wrap below the batchbuffer, or
     * if the batch's used space (plus the reserved pad) collides with our
     * space, then flush and try again.
diff --git a/src/mesa/drivers/dri/i965/intel_batchbuffer.h b/src/mesa/drivers/dri/i965/intel_batchbuffer.h
index d46f48e..e7650f9 100644
--- a/src/mesa/drivers/dri/i965/intel_batchbuffer.h
+++ b/src/mesa/drivers/dri/i965/intel_batchbuffer.h
@@ -111,7 +111,13 @@ intel_batchbuffer_require_space(struct brw_context *brw, GLuint sz, int is_blit)
 
 #ifdef DEBUG
    assert(sz < BATCH_SZ - BATCH_RESERVED);
+   if(brw->no_batch_wrap) {
+      brw->number_bytes_consumed_during_no_batch_wrap += sz;
+      assert(brw->number_bytes_consumed_during_no_batch_wrap <= 
+             brw->max_expected_batch_size_during_no_batch_wrap);
+   }
 #endif
+
    if (intel_batchbuffer_space(brw) < sz)
       intel_batchbuffer_flush(brw);
 }
-- 
1.8.1.2



More information about the mesa-dev mailing list