Mesa (master): svga: Ensure DMA commands are serialized with unsynchronized flag is unset.

Jose Fonseca jrfonseca at kemper.freedesktop.org
Tue Mar 15 15:47:41 UTC 2011


Module: Mesa
Branch: master
Commit: ef33c82bfde788733aabc5dcc9647416c31ae254
URL:    http://cgit.freedesktop.org/mesa/mesa/commit/?id=ef33c82bfde788733aabc5dcc9647416c31ae254

Author: José Fonseca <jfonseca at vmware.com>
Date:   Fri Mar 11 14:00:25 2011 +0000

svga: Ensure DMA commands are serialized with unsynchronized flag is unset.

---

 src/gallium/drivers/svga/svga_resource_buffer.c    |  105 ++++++++++++++++++--
 .../drivers/svga/svga_resource_buffer_upload.c     |   23 +++--
 .../drivers/svga/svga_resource_buffer_upload.h     |    4 +
 3 files changed, 113 insertions(+), 19 deletions(-)

diff --git a/src/gallium/drivers/svga/svga_resource_buffer.c b/src/gallium/drivers/svga/svga_resource_buffer.c
index b0e6d96..ae854a8 100644
--- a/src/gallium/drivers/svga/svga_resource_buffer.c
+++ b/src/gallium/drivers/svga/svga_resource_buffer.c
@@ -51,18 +51,105 @@ svga_buffer_needs_hw_storage(unsigned usage)
 }
 
 
+/**
+ * Map a range of a buffer.
+ *
+ * Unlike texture DMAs (which are written immediately to the command buffer and
+ * therefore inherently serialized with other context operations), for buffers
+ * we try to coalesce multiple range mappings (i.e, multiple calls to this
+ * function) into a single DMA command, for better efficiency in command
+ * processing.  This means we need to exercise extra care here to ensure that
+ * the end result is exactly the same as if one DMA was used for every mapped
+ * range.
+ */
 static void *
-svga_buffer_map_range( struct pipe_screen *screen,
+svga_buffer_map_range( struct pipe_context *pipe,
                        struct pipe_resource *buf,
                        unsigned offset,
 		       unsigned length,
                        unsigned usage )
 {
-   struct svga_screen *ss = svga_screen(screen); 
+   struct svga_context *svga = svga_context(pipe);
+   struct svga_screen *ss = svga_screen(pipe->screen);
    struct svga_winsys_screen *sws = ss->sws;
    struct svga_buffer *sbuf = svga_buffer( buf );
    void *map;
 
+   if (usage & PIPE_TRANSFER_WRITE) {
+      if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) {
+         /*
+          * Finish writing any pending DMA commands, and tell the host to discard
+          * the buffer contents on the next DMA operation.
+          */
+
+         if (sbuf->dma.pending) {
+            svga_buffer_upload_flush(svga, sbuf);
+
+            /*
+             * Instead of flushing the context command buffer, simply discard
+             * the current hwbuf, and start a new one.
+             */
+
+            svga_buffer_destroy_hw_storage(ss, sbuf);
+         }
+
+         sbuf->map.num_ranges = 0;
+         sbuf->dma.flags.discard = TRUE;
+      }
+
+      if (usage & PIPE_TRANSFER_UNSYNCHRONIZED) {
+         if (!sbuf->map.num_ranges) {
+            /*
+             * No pending ranges to upload so far, so we can tell the host to
+             * not synchronize on the next DMA command.
+             */
+
+            sbuf->dma.flags.unsynchronized = TRUE;
+         }
+      } else {
+         /*
+          * Synchronizing, so finish writing any pending DMA command, and
+          * ensure the next DMA will be done in order.
+          */
+
+         if (sbuf->dma.pending) {
+            svga_buffer_upload_flush(svga, sbuf);
+
+            if (sbuf->hwbuf) {
+               /*
+                * We have a pending DMA upload from a hardware buffer, therefore
+                * we need to ensure that the host finishes processing that DMA
+                * command before the state tracker can start overwriting the
+                * hardware buffer.
+                *
+                * XXX: This could be avoided by tying the hardware buffer to
+                * the transfer (just as done with textures), which would allow
+                * overlapping DMAs commands to be queued on the same context
+                * buffer. However, due to the likelihood of software vertex
+                * processing, it is more convenient to hold on to the hardware
+                * buffer, allowing to quickly access the contents from the CPU
+                * without having to do a DMA download from the host.
+                */
+
+               if (usage & PIPE_TRANSFER_DONTBLOCK) {
+                  /*
+                   * Flushing the command buffer here will most likely cause
+                   * the map of the hwbuf below to block, so preemptively
+                   * return NULL here if DONTBLOCK is set to prevent unnecessary
+                   * command buffer flushes.
+                   */
+
+                  return NULL;
+               }
+
+               svga_context_flush(svga, NULL);
+            }
+         }
+
+         sbuf->dma.flags.unsynchronized = FALSE;
+      }
+   }
+
    if (!sbuf->swbuf && !sbuf->hwbuf) {
       if (svga_buffer_create_hw_storage(ss, sbuf) != PIPE_OK) {
          /*
@@ -108,12 +195,12 @@ svga_buffer_map_range( struct pipe_screen *screen,
 
 
 static void 
-svga_buffer_flush_mapped_range( struct pipe_screen *screen,
+svga_buffer_flush_mapped_range( struct pipe_context *pipe,
                                 struct pipe_resource *buf,
                                 unsigned offset, unsigned length)
 {
    struct svga_buffer *sbuf = svga_buffer( buf );
-   struct svga_screen *ss = svga_screen(screen);
+   struct svga_screen *ss = svga_screen(pipe->screen);
    
    pipe_mutex_lock(ss->swc_mutex);
    assert(sbuf->map.writing);
@@ -125,10 +212,10 @@ svga_buffer_flush_mapped_range( struct pipe_screen *screen,
 }
 
 static void 
-svga_buffer_unmap( struct pipe_screen *screen,
+svga_buffer_unmap( struct pipe_context *pipe,
                    struct pipe_resource *buf)
 {
-   struct svga_screen *ss = svga_screen(screen); 
+   struct svga_screen *ss = svga_screen(pipe->screen);
    struct svga_winsys_screen *sws = ss->sws;
    struct svga_buffer *sbuf = svga_buffer( buf );
    
@@ -192,7 +279,7 @@ static void *
 svga_buffer_transfer_map( struct pipe_context *pipe,
 			  struct pipe_transfer *transfer )
 {
-   uint8_t *map = svga_buffer_map_range( pipe->screen,
+   uint8_t *map = svga_buffer_map_range( pipe,
 					 transfer->resource,
 					 transfer->box.x,
 					 transfer->box.width,
@@ -215,7 +302,7 @@ static void svga_buffer_transfer_flush_region( struct pipe_context *pipe,
 {
    assert(box->x + box->width <= transfer->box.width);
 
-   svga_buffer_flush_mapped_range(pipe->screen,
+   svga_buffer_flush_mapped_range(pipe,
 				  transfer->resource,
 				  transfer->box.x + box->x,
 				  box->width);
@@ -224,7 +311,7 @@ static void svga_buffer_transfer_flush_region( struct pipe_context *pipe,
 static void svga_buffer_transfer_unmap( struct pipe_context *pipe,
 			    struct pipe_transfer *transfer )
 {
-   svga_buffer_unmap(pipe->screen,
+   svga_buffer_unmap(pipe,
 		     transfer->resource);
 }
 
diff --git a/src/gallium/drivers/svga/svga_resource_buffer_upload.c b/src/gallium/drivers/svga/svga_resource_buffer_upload.c
index b7d5460..0bfa8a1 100644
--- a/src/gallium/drivers/svga/svga_resource_buffer_upload.c
+++ b/src/gallium/drivers/svga/svga_resource_buffer_upload.c
@@ -252,7 +252,7 @@ svga_buffer_upload_command(struct svga_context *svga,
  * Patch up the upload DMA command reserved by svga_buffer_upload_command
  * with the final ranges.
  */
-static void
+void
 svga_buffer_upload_flush(struct svga_context *svga,
                          struct svga_buffer *sbuf)
 {
@@ -260,6 +260,10 @@ svga_buffer_upload_flush(struct svga_context *svga,
    unsigned i;
    struct pipe_resource *dummy;
 
+   if (!sbuf->dma.pending) {
+      return;
+   }
+
    assert(sbuf->handle); 
    assert(sbuf->hwbuf);
    assert(sbuf->map.num_ranges);
@@ -296,6 +300,8 @@ svga_buffer_upload_flush(struct svga_context *svga,
    sbuf->head.next = sbuf->head.prev = NULL; 
 #endif
    sbuf->dma.pending = FALSE;
+   sbuf->dma.flags.discard = FALSE;
+   sbuf->dma.flags.unsynchronized = FALSE;
 
    sbuf->dma.svga = NULL;
    sbuf->dma.boxes = NULL;
@@ -306,7 +312,6 @@ svga_buffer_upload_flush(struct svga_context *svga,
 }
 
 
-
 /**
  * Note a dirty range.
  *
@@ -337,12 +342,6 @@ svga_buffer_add_range(struct svga_buffer *sbuf,
 
    /*
     * Try to grow one of the ranges.
-    *
-    * Note that it is not this function task to care about overlapping ranges,
-    * as the GMR was already given so it is too late to do anything. Situations
-    * where overlapping ranges may pose a problem should be detected via
-    * pipe_context::is_resource_referenced and the context that refers to the
-    * buffer should be flushed.
     */
 
    for(i = 0; i < sbuf->map.num_ranges; ++i) {
@@ -357,6 +356,11 @@ svga_buffer_add_range(struct svga_buffer *sbuf,
       if (dist <= 0) {
          /*
           * Ranges are contiguous or overlapping -- extend this one and return.
+          *
+          * Note that it is not this function's task to prevent overlapping
+          * ranges, as the GMR was already given so it is too late to do
+          * anything.  If the ranges overlap here it must surely be because
+          * PIPE_TRANSFER_UNSYNCHRONIZED was set.
           */
 
          sbuf->map.ranges[i].start = MIN2(sbuf->map.ranges[i].start, start);
@@ -380,8 +384,7 @@ svga_buffer_add_range(struct svga_buffer *sbuf,
     * pending DMA upload and start clean.
     */
 
-   if(sbuf->dma.pending)
-      svga_buffer_upload_flush(sbuf->dma.svga, sbuf);
+   svga_buffer_upload_flush(sbuf->dma.svga, sbuf);
 
    assert(!sbuf->dma.pending);
    assert(!sbuf->dma.svga);
diff --git a/src/gallium/drivers/svga/svga_resource_buffer_upload.h b/src/gallium/drivers/svga/svga_resource_buffer_upload.h
index 11df306..13d8f3e 100644
--- a/src/gallium/drivers/svga/svga_resource_buffer_upload.h
+++ b/src/gallium/drivers/svga/svga_resource_buffer_upload.h
@@ -28,6 +28,10 @@
 
 
 void
+svga_buffer_upload_flush(struct svga_context *svga,
+                         struct svga_buffer *sbuf);
+
+void
 svga_buffer_add_range(struct svga_buffer *sbuf,
                       unsigned start,
                       unsigned end);




More information about the mesa-commit mailing list