[Mesa-dev] [PATCH] i965: Discard bo->map_count

Chris Wilson chris at chris-wilson.co.uk
Tue Jun 20 10:57:34 UTC 2017


Supposedly we were keeping a reference count for the number of users of
a mapping so that we could use valgrind to detect access to the map
outside of the valid section. However, we were incrementing the counter
only when first creating the cached mapping but decrementing on every
unmap. The bo->map_count tracking was wrong and so the debugging code
was completely useless.

In removing the unusable debugging code we can also entirely remove the
global mutex around mapping the buffer for the first time and replace it
with a single atomic operation to update the cache once we retrieve the
mmap.

Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
Cc: Kenneth Graunke <kenneth at whitecape.org>
Cc: Matt Turner <mattst88 at gmail.com>
Cc: Jason Ekstrand <jason.ekstrand at intel.com>
---
 src/mesa/drivers/dri/i965/brw_bufmgr.c | 94 +++++++---------------------------
 src/mesa/drivers/dri/i965/brw_bufmgr.h |  3 +-
 2 files changed, 19 insertions(+), 78 deletions(-)

diff --git a/src/mesa/drivers/dri/i965/brw_bufmgr.c b/src/mesa/drivers/dri/i965/brw_bufmgr.c
index 6cb91f9a23..2156a2087c 100644
--- a/src/mesa/drivers/dri/i965/brw_bufmgr.c
+++ b/src/mesa/drivers/dri/i965/brw_bufmgr.c
@@ -546,21 +546,6 @@ bo_free(struct brw_bo *bo)
    free(bo);
 }
 
-static void
-bo_mark_mmaps_incoherent(struct brw_bo *bo)
-{
-#if HAVE_VALGRIND
-   if (bo->map_cpu)
-      VALGRIND_MAKE_MEM_NOACCESS(bo->map_cpu, bo->size);
-
-   if (bo->map_wc)
-      VALGRIND_MAKE_MEM_NOACCESS(bo->map_wc, bo->size);
-
-   if (bo->map_gtt)
-      VALGRIND_MAKE_MEM_NOACCESS(bo->map_gtt, bo->size);
-#endif
-}
-
 /** Frees all cached buffers significantly older than @time. */
 static void
 cleanup_bo_cache(struct brw_bufmgr *bufmgr, time_t time)
@@ -594,13 +579,6 @@ bo_unreference_final(struct brw_bo *bo, time_t time)
 
    DBG("bo_unreference final: %d (%s)\n", bo->gem_handle, bo->name);
 
-   /* Clear any left-over mappings */
-   if (bo->map_count) {
-      DBG("bo freed with non-zero map-count %d\n", bo->map_count);
-      bo->map_count = 0;
-      bo_mark_mmaps_incoherent(bo);
-   }
-
    bucket = bucket_for_size(bufmgr, bo->size);
    /* Put the buffer into our internal cache for reuse if we can. */
    if (bufmgr->bo_reuse && bo->reusable && bucket != NULL &&
@@ -672,28 +650,29 @@ brw_bo_map_cpu(struct brw_context *brw, struct brw_bo *bo, unsigned flags)
 {
    struct brw_bufmgr *bufmgr = bo->bufmgr;
 
-   pthread_mutex_lock(&bufmgr->lock);
-
    if (!bo->map_cpu) {
       struct drm_i915_gem_mmap mmap_arg;
+      void *map;
 
-      DBG("brw_bo_map_cpu: %d (%s), map_count=%d\n",
-          bo->gem_handle, bo->name, bo->map_count);
+      DBG("brw_bo_map_cpu: %d (%s)\n", bo->gem_handle, bo->name);
 
       memclear(mmap_arg);
       mmap_arg.handle = bo->gem_handle;
       mmap_arg.size = bo->size;
       int ret = drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg);
       if (ret != 0) {
-         ret = -errno;
          DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
              __FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
-         pthread_mutex_unlock(&bufmgr->lock);
          return NULL;
       }
-      bo->map_count++;
+
       VG(VALGRIND_MALLOCLIKE_BLOCK(mmap_arg.addr_ptr, mmap_arg.size, 0, 1));
-      bo->map_cpu = (void *) (uintptr_t) mmap_arg.addr_ptr;
+      map = (void *) (uintptr_t) mmap_arg.addr_ptr;
+
+      if (p_atomic_cmpxchg(&bo->map_cpu, NULL, map)) {
+         VG(VALGRIND_FREELIKE_BLOCK(map, 0));
+         drm_munmap(map, bo->size);
+      }
    }
    DBG("brw_bo_map_cpu: %d (%s) -> %p\n", bo->gem_handle, bo->name,
        bo->map_cpu);
@@ -703,10 +682,6 @@ brw_bo_map_cpu(struct brw_context *brw, struct brw_bo *bo, unsigned flags)
                  flags & MAP_WRITE ? I915_GEM_DOMAIN_CPU : 0);
    }
 
-   bo_mark_mmaps_incoherent(bo);
-   VG(VALGRIND_MAKE_MEM_DEFINED(bo->map_cpu, bo->size));
-   pthread_mutex_unlock(&bufmgr->lock);
-
    return bo->map_cpu;
 }
 
@@ -715,14 +690,12 @@ brw_bo_map_gtt(struct brw_context *brw, struct brw_bo *bo, unsigned flags)
 {
    struct brw_bufmgr *bufmgr = bo->bufmgr;
 
-   pthread_mutex_lock(&bufmgr->lock);
-
    /* Get a mapping of the buffer if we haven't before. */
    if (bo->map_gtt == NULL) {
       struct drm_i915_gem_mmap_gtt mmap_arg;
+      void *map;
 
-      DBG("bo_map_gtt: mmap %d (%s), map_count=%d\n",
-          bo->gem_handle, bo->name, bo->map_count);
+      DBG("bo_map_gtt: mmap %d (%s)\n", bo->gem_handle, bo->name);
 
       memclear(mmap_arg);
       mmap_arg.handle = bo->gem_handle;
@@ -732,35 +705,29 @@ brw_bo_map_gtt(struct brw_context *brw, struct brw_bo *bo, unsigned flags)
       if (ret != 0) {
          DBG("%s:%d: Error preparing buffer map %d (%s): %s .\n",
              __FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
-         pthread_mutex_unlock(&bufmgr->lock);
          return NULL;
       }
 
       /* and mmap it */
-      bo->map_gtt = drm_mmap(0, bo->size, PROT_READ | PROT_WRITE,
-                             MAP_SHARED, bufmgr->fd, mmap_arg.offset);
-      if (bo->map_gtt == MAP_FAILED) {
-         bo->map_gtt = NULL;
+      map = drm_mmap(0, bo->size, PROT_READ | PROT_WRITE,
+                     MAP_SHARED, bufmgr->fd, mmap_arg.offset);
+      if (map == MAP_FAILED) {
          DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
              __FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
-         pthread_mutex_unlock(&bufmgr->lock);
          return NULL;
       }
-      bo->map_count++;
+
+      if (p_atomic_cmpxchg(&bo->map_gtt, NULL, map))
+          drm_munmap(map, bo->size);
    }
 
-   DBG("bo_map_gtt: %d (%s) -> %p\n", bo->gem_handle, bo->name,
-       bo->map_gtt);
+   DBG("bo_map_gtt: %d (%s) -> %p\n", bo->gem_handle, bo->name, bo->map_gtt);
 
    if (!(flags & MAP_ASYNC) || !bufmgr->has_llc) {
       set_domain(brw, "GTT mapping", bo,
                  I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
    }
 
-   bo_mark_mmaps_incoherent(bo);
-   VG(VALGRIND_MAKE_MEM_DEFINED(bo->map_gtt, bo->size));
-   pthread_mutex_unlock(&bufmgr->lock);
-
    return bo->map_gtt;
 }
 
@@ -791,31 +758,6 @@ brw_bo_map(struct brw_context *brw, struct brw_bo *bo, unsigned flags)
 }
 
 int
-brw_bo_unmap(struct brw_bo *bo)
-{
-   struct brw_bufmgr *bufmgr = bo->bufmgr;
-   int ret = 0;
-
-   pthread_mutex_lock(&bufmgr->lock);
-
-   if (bo->map_count <= 0) {
-      DBG("attempted to unmap an unmapped bo\n");
-      pthread_mutex_unlock(&bufmgr->lock);
-      /* Preserve the old behaviour of just treating this as a
-       * no-op rather than reporting the error.
-       */
-      return 0;
-   }
-
-   if (--bo->map_count == 0) {
-      bo_mark_mmaps_incoherent(bo);
-   }
-   pthread_mutex_unlock(&bufmgr->lock);
-
-   return ret;
-}
-
-int
 brw_bo_subdata(struct brw_bo *bo, uint64_t offset,
                uint64_t size, const void *data)
 {
diff --git a/src/mesa/drivers/dri/i965/brw_bufmgr.h b/src/mesa/drivers/dri/i965/brw_bufmgr.h
index dd3a37040a..78659fc166 100644
--- a/src/mesa/drivers/dri/i965/brw_bufmgr.h
+++ b/src/mesa/drivers/dri/i965/brw_bufmgr.h
@@ -119,7 +119,6 @@ struct brw_bo {
    void *map_gtt;
    /** WC CPU address for the buffer, saved across map/unmap cycles */
    void *map_wc;
-   int map_count;
 
    /** BO cache list */
    struct list_head head;
@@ -218,7 +217,7 @@ MUST_CHECK void *brw_bo_map(struct brw_context *brw, struct brw_bo *bo, unsigned
  * Reduces the refcount on the userspace mapping of the buffer
  * object.
  */
-int brw_bo_unmap(struct brw_bo *bo);
+static inline int brw_bo_unmap(struct brw_bo *bo) { return 0; }
 
 /** Write data into an object. */
 int brw_bo_subdata(struct brw_bo *bo, uint64_t offset,
-- 
2.11.0



More information about the mesa-dev mailing list