[Mesa-dev] [PATCH 6/7] gallium/util: replace pipe_mutex_unlock() with mtx_unlock()

Timothy Arceri tarceri at itsqueeze.com
Sun Mar 5 01:32:06 UTC 2017


pipe_mutex_unlock() was made unnecessary with fd33a6bcd7f12.

Replaced using:
find ./src -type f -exec sed -i -- \
's:pipe_mutex_unlock(\([^)]*\)):mtx_unlock(\&\1):g' {} \;
---
 src/gallium/auxiliary/hud/hud_cpufreq.c            |   6 +-
 src/gallium/auxiliary/hud/hud_diskstat.c           |   8 +-
 src/gallium/auxiliary/hud/hud_nic.c                |   6 +-
 src/gallium/auxiliary/hud/hud_sensors_temp.c       |   6 +-
 src/gallium/auxiliary/os/os_thread.h               |   9 +-
 .../auxiliary/pipebuffer/pb_buffer_fenced.c        |  24 +-
 src/gallium/auxiliary/pipebuffer/pb_bufmgr_debug.c |  14 +-
 src/gallium/auxiliary/pipebuffer/pb_bufmgr_mm.c    |  10 +-
 src/gallium/auxiliary/pipebuffer/pb_bufmgr_pool.c  |  12 +-
 src/gallium/auxiliary/pipebuffer/pb_bufmgr_slab.c  |   6 +-
 src/gallium/auxiliary/pipebuffer/pb_cache.c        |  10 +-
 src/gallium/auxiliary/pipebuffer/pb_slab.c         |   8 +-
 src/gallium/auxiliary/rtasm/rtasm_execmem.c        |   4 +-
 src/gallium/auxiliary/util/u_debug_flush.c         |  12 +-
 src/gallium/auxiliary/util/u_debug_memory.c        |   6 +-
 src/gallium/auxiliary/util/u_debug_refcnt.c        |   4 +-
 src/gallium/auxiliary/util/u_debug_symbol.c        |   2 +-
 src/gallium/auxiliary/util/u_queue.c               |  20 +-
 src/gallium/auxiliary/util/u_range.h               |   2 +-
 src/gallium/auxiliary/util/u_ringbuffer.c          |   4 +-
 src/gallium/drivers/ddebug/dd_context.c            |   2 +-
 src/gallium/drivers/ddebug/dd_draw.c               |   6 +-
 src/gallium/drivers/freedreno/freedreno_batch.c    |   8 +-
 .../drivers/freedreno/freedreno_batch_cache.c      |  14 +-
 src/gallium/drivers/freedreno/freedreno_context.h  |   2 +-
 src/gallium/drivers/freedreno/freedreno_draw.c     |   4 +-
 src/gallium/drivers/freedreno/freedreno_resource.c |   2 +-
 src/gallium/drivers/llvmpipe/lp_fence.c            |   4 +-
 src/gallium/drivers/llvmpipe/lp_scene.c            |   2 +-
 src/gallium/drivers/llvmpipe/lp_setup.c            |   2 +-
 src/gallium/drivers/nouveau/nv50/nv50_surface.c    |   2 +-
 src/gallium/drivers/nouveau/nvc0/nvc0_surface.c    |   2 +-
 src/gallium/drivers/r300/r300_blit.c               |   2 +-
 src/gallium/drivers/r300/r300_texture.c            |   2 +-
 src/gallium/drivers/radeon/r600_gpu_load.c         |   2 +-
 src/gallium/drivers/radeon/r600_pipe_common.c      |   2 +-
 src/gallium/drivers/radeon/r600_texture.c          |   4 +-
 src/gallium/drivers/radeonsi/si_shader.c           |   4 +-
 src/gallium/drivers/radeonsi/si_state_shaders.c    |  22 +-
 src/gallium/drivers/rbug/rbug_context.c            | 128 +++---
 src/gallium/drivers/rbug/rbug_core.c               |  94 ++---
 src/gallium/drivers/rbug/rbug_screen.h             |   4 +-
 src/gallium/drivers/svga/svga_resource_buffer.c    |   4 +-
 .../drivers/svga/svga_resource_buffer_upload.c     |   4 +-
 src/gallium/drivers/svga/svga_sampler_view.c       |   6 +-
 src/gallium/drivers/svga/svga_screen_cache.c       |  10 +-
 src/gallium/drivers/trace/tr_dump.c                |  10 +-
 src/gallium/drivers/vc4/vc4_bufmgr.c               |  12 +-
 src/gallium/drivers/vc4/vc4_bufmgr.h               |   2 +-
 src/gallium/state_trackers/dri/dri2.c              |   4 +-
 src/gallium/state_trackers/glx/xlib/xm_api.c       |   8 +-
 src/gallium/state_trackers/nine/nine_lock.c        | 440 ++++++++++-----------
 src/gallium/state_trackers/nine/nine_queue.c       |   8 +-
 src/gallium/state_trackers/nine/nine_state.c       |  16 +-
 src/gallium/state_trackers/omx/entrypoint.c        |   6 +-
 src/gallium/state_trackers/va/buffer.c             |  30 +-
 src/gallium/state_trackers/va/config.c             |   8 +-
 src/gallium/state_trackers/va/context.c            |   8 +-
 src/gallium/state_trackers/va/image.c              |  40 +-
 src/gallium/state_trackers/va/picture.c            |  14 +-
 src/gallium/state_trackers/va/subpicture.c         |  30 +-
 src/gallium/state_trackers/va/surface.c            |  28 +-
 src/gallium/state_trackers/vdpau/bitmap.c          |   8 +-
 src/gallium/state_trackers/vdpau/decode.c          |  18 +-
 src/gallium/state_trackers/vdpau/htab.c            |  10 +-
 src/gallium/state_trackers/vdpau/mixer.c           |  26 +-
 src/gallium/state_trackers/vdpau/output.c          |  36 +-
 src/gallium/state_trackers/vdpau/presentation.c    |  22 +-
 src/gallium/state_trackers/vdpau/query.c           |  26 +-
 src/gallium/state_trackers/vdpau/surface.c         |  30 +-
 .../targets/haiku-softpipe/GalliumContext.cpp      |   2 +-
 src/gallium/winsys/amdgpu/drm/amdgpu_bo.c          |  10 +-
 src/gallium/winsys/amdgpu/drm/amdgpu_cs.c          |   6 +-
 src/gallium/winsys/amdgpu/drm/amdgpu_winsys.c      |  14 +-
 .../winsys/etnaviv/drm/etnaviv_drm_winsys.c        |   4 +-
 .../winsys/freedreno/drm/freedreno_drm_winsys.c    |   4 +-
 .../winsys/nouveau/drm/nouveau_drm_winsys.c        |  10 +-
 src/gallium/winsys/radeon/drm/radeon_drm_bo.c      |  54 +--
 src/gallium/winsys/radeon/drm/radeon_drm_cs.c      |   2 +-
 src/gallium/winsys/radeon/drm/radeon_drm_winsys.c  |  22 +-
 .../winsys/svga/drm/pb_buffer_simple_fenced.c      |  24 +-
 src/gallium/winsys/svga/drm/vmw_context.c          |   2 +-
 src/gallium/winsys/svga/drm/vmw_fence.c            |   8 +-
 src/gallium/winsys/svga/drm/vmw_surface.c          |   4 +-
 src/gallium/winsys/virgl/drm/virgl_drm_winsys.c    |  22 +-
 .../winsys/virgl/vtest/virgl_vtest_winsys.c        |   8 +-
 86 files changed, 772 insertions(+), 775 deletions(-)

diff --git a/src/gallium/auxiliary/hud/hud_cpufreq.c b/src/gallium/auxiliary/hud/hud_cpufreq.c
index bc77e5a..abb930d 100644
--- a/src/gallium/auxiliary/hud/hud_cpufreq.c
+++ b/src/gallium/auxiliary/hud/hud_cpufreq.c
@@ -184,31 +184,31 @@ int
 hud_get_num_cpufreq(bool displayhelp)
 {
    struct dirent *dp;
    struct stat stat_buf;
    char fn[128];
    int cpu_index;
 
    /* Return the number of CPU metrics we support. */
    mtx_lock(&gcpufreq_mutex);
    if (gcpufreq_count) {
-      pipe_mutex_unlock(gcpufreq_mutex);
+      mtx_unlock(&gcpufreq_mutex);
       return gcpufreq_count;
    }
 
    /* Scan /sys/devices.../cpu, for every object type we support, create
     * and persist an object to represent its different metrics.
     */
    list_inithead(&gcpufreq_list);
    DIR *dir = opendir("/sys/devices/system/cpu");
    if (!dir) {
-      pipe_mutex_unlock(gcpufreq_mutex);
+      mtx_unlock(&gcpufreq_mutex);
       return 0;
    }
 
    while ((dp = readdir(dir)) != NULL) {
 
       /* Avoid 'lo' and '..' and '.' */
       if (strlen(dp->d_name) <= 2)
          continue;
 
       if (sscanf(dp->d_name, "cpu%d\n", &cpu_index) != 1)
@@ -240,15 +240,15 @@ hud_get_num_cpufreq(bool displayhelp)
          char line[128];
          snprintf(line, sizeof(line), "    cpufreq-%s-%s",
                  cfi->mode == CPUFREQ_MINIMUM ? "min" :
                  cfi->mode == CPUFREQ_CURRENT ? "cur" :
                  cfi->mode == CPUFREQ_MAXIMUM ? "max" : "undefined", cfi->name);
 
          puts(line);
       }
    }
 
-   pipe_mutex_unlock(gcpufreq_mutex);
+   mtx_unlock(&gcpufreq_mutex);
    return gcpufreq_count;
 }
 
 #endif /* HAVE_GALLIUM_EXTRA_HUD */
diff --git a/src/gallium/auxiliary/hud/hud_diskstat.c b/src/gallium/auxiliary/hud/hud_diskstat.c
index 940758a..df86abd 100644
--- a/src/gallium/auxiliary/hud/hud_diskstat.c
+++ b/src/gallium/auxiliary/hud/hud_diskstat.c
@@ -241,31 +241,31 @@ add_object(const char *basename, const char *name, int objmode)
 int
 hud_get_num_disks(bool displayhelp)
 {
    struct dirent *dp;
    struct stat stat_buf;
    char name[64];
 
    /* Return the number of block devices and partitions. */
    mtx_lock(&gdiskstat_mutex);
    if (gdiskstat_count) {
-      pipe_mutex_unlock(gdiskstat_mutex);
+      mtx_unlock(&gdiskstat_mutex);
       return gdiskstat_count;
    }
 
    /* Scan /sys/block, for every object type we support, create and
     * persist an object to represent its different statistics.
     */
    list_inithead(&gdiskstat_list);
    DIR *dir = opendir("/sys/block/");
    if (!dir) {
-      pipe_mutex_unlock(gdiskstat_mutex);
+      mtx_unlock(&gdiskstat_mutex);
       return 0;
    }
 
    while ((dp = readdir(dir)) != NULL) {
 
       /* Avoid 'lo' and '..' and '.' */
       if (strlen(dp->d_name) <= 2)
          continue;
 
       char basename[256];
@@ -278,21 +278,21 @@ hud_get_num_disks(bool displayhelp)
          continue;              /* Not a regular file */
 
       /* Add a physical block device with R/W stats */
       add_object(basename, dp->d_name, DISKSTAT_RD);
       add_object(basename, dp->d_name, DISKSTAT_WR);
 
       /* Add any partitions */
       struct dirent *dpart;
       DIR *pdir = opendir(basename);
       if (!pdir) {
-         pipe_mutex_unlock(gdiskstat_mutex);
+         mtx_unlock(&gdiskstat_mutex);
          closedir(dir);
          return 0;
       }
 
       while ((dpart = readdir(pdir)) != NULL) {
          /* Avoid 'lo' and '..' and '.' */
          if (strlen(dpart->d_name) <= 2)
             continue;
 
          char p[64];
@@ -313,16 +313,16 @@ hud_get_num_disks(bool displayhelp)
    if (displayhelp) {
       list_for_each_entry(struct diskstat_info, dsi, &gdiskstat_list, list) {
          char line[32];
          snprintf(line, sizeof(line), "    diskstat-%s-%s",
                  dsi->mode == DISKSTAT_RD ? "rd" :
                  dsi->mode == DISKSTAT_WR ? "wr" : "undefined", dsi->name);
 
          puts(line);
       }
    }
-   pipe_mutex_unlock(gdiskstat_mutex);
+   mtx_unlock(&gdiskstat_mutex);
 
    return gdiskstat_count;
 }
 
 #endif /* HAVE_GALLIUM_EXTRA_HUD */
diff --git a/src/gallium/auxiliary/hud/hud_nic.c b/src/gallium/auxiliary/hud/hud_nic.c
index ab74436..835f92e 100644
--- a/src/gallium/auxiliary/hud/hud_nic.c
+++ b/src/gallium/auxiliary/hud/hud_nic.c
@@ -326,31 +326,31 @@ int
 hud_get_num_nics(bool displayhelp)
 {
    struct dirent *dp;
    struct stat stat_buf;
    struct nic_info *nic;
    char name[64];
 
    /* Return the number if network interfaces. */
    mtx_lock(&gnic_mutex);
    if (gnic_count) {
-      pipe_mutex_unlock(gnic_mutex);
+      mtx_unlock(&gnic_mutex);
       return gnic_count;
    }
 
    /* Scan /sys/block, for every object type we support, create and
     * persist an object to represent its different statistics.
     */
    list_inithead(&gnic_list);
    DIR *dir = opendir("/sys/class/net/");
    if (!dir) {
-      pipe_mutex_unlock(gnic_mutex);
+      mtx_unlock(&gnic_mutex);
       return 0;
    }
 
    while ((dp = readdir(dir)) != NULL) {
 
       /* Avoid 'lo' and '..' and '.' */
       if (strlen(dp->d_name) <= 2)
          continue;
 
       char basename[256];
@@ -412,15 +412,15 @@ hud_get_num_nics(bool displayhelp)
       char line[64];
       snprintf(line, sizeof(line), "    nic-%s-%s",
               nic->mode == NIC_DIRECTION_RX ? "rx" :
               nic->mode == NIC_DIRECTION_TX ? "tx" :
               nic->mode == NIC_RSSI_DBM ? "rssi" : "undefined", nic->name);
 
       puts(line);
 
    }
 
-   pipe_mutex_unlock(gnic_mutex);
+   mtx_unlock(&gnic_mutex);
    return gnic_count;
 }
 
 #endif /* HAVE_GALLIUM_EXTRA_HUD */
diff --git a/src/gallium/auxiliary/hud/hud_sensors_temp.c b/src/gallium/auxiliary/hud/hud_sensors_temp.c
index 06d2590..29ee257 100644
--- a/src/gallium/auxiliary/hud/hud_sensors_temp.c
+++ b/src/gallium/auxiliary/hud/hud_sensors_temp.c
@@ -319,27 +319,27 @@ build_sensor_list(void)
   * \param  displayhelp  true if the list of detected devices should be
                          displayed on the console.
   * \return  number of detected lmsensor devices.
   */
 int
 hud_get_num_sensors(bool displayhelp)
 {
    /* Return the number of sensors detected. */
    mtx_lock(&gsensor_temp_mutex);
    if (gsensors_temp_count) {
-      pipe_mutex_unlock(gsensor_temp_mutex);
+      mtx_unlock(&gsensor_temp_mutex);
       return gsensors_temp_count;
    }
 
    int ret = sensors_init(NULL);
    if (ret) {
-      pipe_mutex_unlock(gsensor_temp_mutex);
+      mtx_unlock(&gsensor_temp_mutex);
       return 0;
    }
 
    list_inithead(&gsensors_temp_list);
 
    /* Scan /sys/block, for every object type we support, create and
     * persist an object to represent its different statistics.
     */
    build_sensor_list();
 
@@ -361,15 +361,15 @@ hud_get_num_sensors(bool displayhelp)
             break;
          case SENSORS_POWER_CURRENT:
             snprintf(line, sizeof(line), "    sensors_pow_cu-%s", sti->name);
             break;
          }
 
          puts(line);
       }
    }
 
-   pipe_mutex_unlock(gsensor_temp_mutex);
+   mtx_unlock(&gsensor_temp_mutex);
    return gsensors_temp_count;
 }
 
 #endif /* HAVE_LIBSENSORS */
diff --git a/src/gallium/auxiliary/os/os_thread.h b/src/gallium/auxiliary/os/os_thread.h
index 5b75965..a429f4e 100644
--- a/src/gallium/auxiliary/os/os_thread.h
+++ b/src/gallium/auxiliary/os/os_thread.h
@@ -101,23 +101,20 @@ static inline int pipe_thread_is_self( pipe_thread thread )
 {
 #if defined(HAVE_PTHREAD)
 #  if defined(__GNU_LIBRARY__) && defined(__GLIBC__) && defined(__GLIBC_MINOR__) && \
       (__GLIBC__ >= 3 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 12))
    return pthread_equal(pthread_self(), thread);
 #  endif
 #endif
    return 0;
 }
 
-#define pipe_mutex_unlock(mutex) \
-   (void) mtx_unlock(&(mutex))
-
 #define pipe_mutex_assert_locked(mutex) \
    __pipe_mutex_assert_locked(&(mutex))
 
 static inline void
 __pipe_mutex_assert_locked(mtx_t *mutex)
 {
 #ifdef DEBUG
    /* NOTE: this would not work for recursive mutexes, but
     * mtx_t doesn't support those
     */
@@ -195,21 +192,21 @@ static inline void pipe_barrier_wait(pipe_barrier *barrier)
 
       do {
          cnd_wait(&barrier->condvar, &barrier->mutex);
       } while (sequence == barrier->sequence);
    } else {
       barrier->waiters = 0;
       barrier->sequence++;
       cnd_broadcast(&barrier->condvar);
    }
 
-   pipe_mutex_unlock(barrier->mutex);
+   mtx_unlock(&barrier->mutex);
 }
 
 
 #endif
 
 
 /*
  * Semaphores
  */
 
@@ -236,33 +233,33 @@ pipe_semaphore_destroy(pipe_semaphore *sema)
    cnd_destroy(&sema->cond);
 }
 
 /** Signal/increment semaphore counter */
 static inline void
 pipe_semaphore_signal(pipe_semaphore *sema)
 {
    mtx_lock(&sema->mutex);
    sema->counter++;
    cnd_signal(&sema->cond);
-   pipe_mutex_unlock(sema->mutex);
+   mtx_unlock(&sema->mutex);
 }
 
 /** Wait for semaphore counter to be greater than zero */
 static inline void
 pipe_semaphore_wait(pipe_semaphore *sema)
 {
    mtx_lock(&sema->mutex);
    while (sema->counter <= 0) {
       cnd_wait(&sema->cond, &sema->mutex);
    }
    sema->counter--;
-   pipe_mutex_unlock(sema->mutex);
+   mtx_unlock(&sema->mutex);
 }
 
 
 
 /*
  * Thread-specific data.
  */
 
 typedef struct {
    tss_t key;
diff --git a/src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.c b/src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.c
index b8b4483..7421741 100644
--- a/src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.c
+++ b/src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.c
@@ -341,21 +341,21 @@ fenced_buffer_finish_locked(struct fenced_manager *fenced_mgr,
    assert(pipe_is_referenced(&fenced_buf->base.reference));
    assert(fenced_buf->fence);
 
    if (fenced_buf->fence) {
       struct pipe_fence_handle *fence = NULL;
       int finished;
       boolean proceed;
 
       ops->fence_reference(ops, &fence, fenced_buf->fence);
 
-      pipe_mutex_unlock(fenced_mgr->mutex);
+      mtx_unlock(&fenced_mgr->mutex);
 
       finished = ops->fence_finish(ops, fenced_buf->fence, 0);
 
       mtx_lock(&fenced_mgr->mutex);
 
       assert(pipe_is_referenced(&fenced_buf->base.reference));
 
       /* Only proceed if the fence object didn't change in the meanwhile.
        * Otherwise assume the work has been already carried out by another
        * thread that re-aquired the lock before us.
@@ -649,21 +649,21 @@ fenced_buffer_destroy(struct pb_buffer *buf)
 {
    struct fenced_buffer *fenced_buf = fenced_buffer(buf);
    struct fenced_manager *fenced_mgr = fenced_buf->mgr;
 
    assert(!pipe_is_referenced(&fenced_buf->base.reference));
 
    mtx_lock(&fenced_mgr->mutex);
 
    fenced_buffer_destroy_locked(fenced_mgr, fenced_buf);
 
-   pipe_mutex_unlock(fenced_mgr->mutex);
+   mtx_unlock(&fenced_mgr->mutex);
 }
 
 
 static void *
 fenced_buffer_map(struct pb_buffer *buf,
                   unsigned flags, void *flush_ctx)
 {
    struct fenced_buffer *fenced_buf = fenced_buffer(buf);
    struct fenced_manager *fenced_mgr = fenced_buf->mgr;
    struct pb_fence_ops *ops = fenced_mgr->ops;
@@ -702,21 +702,21 @@ fenced_buffer_map(struct pb_buffer *buf,
       assert(fenced_buf->data);
       map = fenced_buf->data;
    }
 
    if (map) {
       ++fenced_buf->mapcount;
       fenced_buf->flags |= flags & PB_USAGE_CPU_READ_WRITE;
    }
 
  done:
-   pipe_mutex_unlock(fenced_mgr->mutex);
+   mtx_unlock(&fenced_mgr->mutex);
 
    return map;
 }
 
 
 static void
 fenced_buffer_unmap(struct pb_buffer *buf)
 {
    struct fenced_buffer *fenced_buf = fenced_buffer(buf);
    struct fenced_manager *fenced_mgr = fenced_buf->mgr;
@@ -725,21 +725,21 @@ fenced_buffer_unmap(struct pb_buffer *buf)
 
    assert(fenced_buf->mapcount);
    if (fenced_buf->mapcount) {
       if (fenced_buf->buffer)
          pb_unmap(fenced_buf->buffer);
       --fenced_buf->mapcount;
       if (!fenced_buf->mapcount)
          fenced_buf->flags &= ~PB_USAGE_CPU_READ_WRITE;
    }
 
-   pipe_mutex_unlock(fenced_mgr->mutex);
+   mtx_unlock(&fenced_mgr->mutex);
 }
 
 
 static enum pipe_error
 fenced_buffer_validate(struct pb_buffer *buf,
                        struct pb_validate *vl,
                        unsigned flags)
 {
    struct fenced_buffer *fenced_buf = fenced_buffer(buf);
    struct fenced_manager *fenced_mgr = fenced_buf->mgr;
@@ -795,21 +795,21 @@ fenced_buffer_validate(struct pb_buffer *buf,
    }
 
    ret = pb_validate(fenced_buf->buffer, vl, flags);
    if (ret != PIPE_OK)
       goto done;
 
    fenced_buf->vl = vl;
    fenced_buf->validation_flags |= flags;
 
  done:
-   pipe_mutex_unlock(fenced_mgr->mutex);
+   mtx_unlock(&fenced_mgr->mutex);
 
    return ret;
 }
 
 
 static void
 fenced_buffer_fence(struct pb_buffer *buf,
                     struct pipe_fence_handle *fence)
 {
    struct fenced_buffer *fenced_buf = fenced_buffer(buf);
@@ -834,21 +834,21 @@ fenced_buffer_fence(struct pb_buffer *buf,
          fenced_buf->flags |= fenced_buf->validation_flags;
          fenced_buffer_add_locked(fenced_mgr, fenced_buf);
       }
 
       pb_fence(fenced_buf->buffer, fence);
 
       fenced_buf->vl = NULL;
       fenced_buf->validation_flags = 0;
    }
 
-   pipe_mutex_unlock(fenced_mgr->mutex);
+   mtx_unlock(&fenced_mgr->mutex);
 }
 
 
 static void
 fenced_buffer_get_base_buffer(struct pb_buffer *buf,
                               struct pb_buffer **base_buf,
                               pb_size *offset)
 {
    struct fenced_buffer *fenced_buf = fenced_buffer(buf);
    struct fenced_manager *fenced_mgr = fenced_buf->mgr;
@@ -861,21 +861,21 @@ fenced_buffer_get_base_buffer(struct pb_buffer *buf,
    assert(fenced_buf->vl);
    assert(fenced_buf->buffer);
 
    if (fenced_buf->buffer) {
       pb_get_base_buffer(fenced_buf->buffer, base_buf, offset);
    } else {
       *base_buf = buf;
       *offset = 0;
    }
 
-   pipe_mutex_unlock(fenced_mgr->mutex);
+   mtx_unlock(&fenced_mgr->mutex);
 }
 
 
 static const struct pb_vtbl
 fenced_buffer_vtbl = {
    fenced_buffer_destroy,
    fenced_buffer_map,
    fenced_buffer_unmap,
    fenced_buffer_validate,
    fenced_buffer_fence,
@@ -934,71 +934,71 @@ fenced_bufmgr_create_buffer(struct pb_manager *mgr,
 
    /* Give up. */
    if (ret != PIPE_OK) {
       goto no_storage;
    }
 
    assert(fenced_buf->buffer || fenced_buf->data);
 
    LIST_ADDTAIL(&fenced_buf->head, &fenced_mgr->unfenced);
    ++fenced_mgr->num_unfenced;
-   pipe_mutex_unlock(fenced_mgr->mutex);
+   mtx_unlock(&fenced_mgr->mutex);
 
    return &fenced_buf->base;
 
  no_storage:
-   pipe_mutex_unlock(fenced_mgr->mutex);
+   mtx_unlock(&fenced_mgr->mutex);
    FREE(fenced_buf);
  no_buffer:
    return NULL;
 }
 
 
 static void
 fenced_bufmgr_flush(struct pb_manager *mgr)
 {
    struct fenced_manager *fenced_mgr = fenced_manager(mgr);
 
    mtx_lock(&fenced_mgr->mutex);
    while (fenced_manager_check_signalled_locked(fenced_mgr, TRUE))
       ;
-   pipe_mutex_unlock(fenced_mgr->mutex);
+   mtx_unlock(&fenced_mgr->mutex);
 
    assert(fenced_mgr->provider->flush);
    if (fenced_mgr->provider->flush)
       fenced_mgr->provider->flush(fenced_mgr->provider);
 }
 
 
 static void
 fenced_bufmgr_destroy(struct pb_manager *mgr)
 {
    struct fenced_manager *fenced_mgr = fenced_manager(mgr);
 
    mtx_lock(&fenced_mgr->mutex);
 
    /* Wait on outstanding fences. */
    while (fenced_mgr->num_fenced) {
-      pipe_mutex_unlock(fenced_mgr->mutex);
+      mtx_unlock(&fenced_mgr->mutex);
 #if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS)
       sched_yield();
 #endif
       mtx_lock(&fenced_mgr->mutex);
       while (fenced_manager_check_signalled_locked(fenced_mgr, TRUE))
          ;
    }
 
 #ifdef DEBUG
    /* assert(!fenced_mgr->num_unfenced); */
 #endif
 
-   pipe_mutex_unlock(fenced_mgr->mutex);
+   mtx_unlock(&fenced_mgr->mutex);
    mtx_destroy(&fenced_mgr->mutex);
 
    if (fenced_mgr->provider)
       fenced_mgr->provider->destroy(fenced_mgr->provider);
 
    fenced_mgr->ops->destroy(fenced_mgr->ops);
 
    FREE(fenced_mgr);
 }
 
diff --git a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_debug.c b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_debug.c
index 717ab9e..df233c3 100644
--- a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_debug.c
+++ b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_debug.c
@@ -231,21 +231,21 @@ pb_debug_buffer_destroy(struct pb_buffer *_buf)
 {
    struct pb_debug_buffer *buf = pb_debug_buffer(_buf);
    struct pb_debug_manager *mgr = buf->mgr;
    
    assert(!pipe_is_referenced(&buf->base.reference));
    
    pb_debug_buffer_check(buf);
 
    mtx_lock(&mgr->mutex);
    LIST_DEL(&buf->head);
-   pipe_mutex_unlock(mgr->mutex);
+   mtx_unlock(&mgr->mutex);
 
    mtx_destroy(&buf->mutex);
    
    pb_reference(&buf->buffer, NULL);
    FREE(buf);
 }
 
 
 static void *
 pb_debug_buffer_map(struct pb_buffer *_buf, 
@@ -256,36 +256,36 @@ pb_debug_buffer_map(struct pb_buffer *_buf,
    
    pb_debug_buffer_check(buf);
 
    map = pb_map(buf->buffer, flags, flush_ctx);
    if (!map)
       return NULL;
    
    mtx_lock(&buf->mutex);
    ++buf->map_count;
    debug_backtrace_capture(buf->map_backtrace, 1, PB_DEBUG_MAP_BACKTRACE);
-   pipe_mutex_unlock(buf->mutex);
+   mtx_unlock(&buf->mutex);
    
    return (uint8_t *)map + buf->underflow_size;
 }
 
 
 static void
 pb_debug_buffer_unmap(struct pb_buffer *_buf)
 {
    struct pb_debug_buffer *buf = pb_debug_buffer(_buf);   
    
    mtx_lock(&buf->mutex);
    assert(buf->map_count);
    if(buf->map_count)
       --buf->map_count;
-   pipe_mutex_unlock(buf->mutex);
+   mtx_unlock(&buf->mutex);
    
    pb_unmap(buf->buffer);
    
    pb_debug_buffer_check(buf);
 }
 
 
 static void
 pb_debug_buffer_get_base_buffer(struct pb_buffer *_buf,
                                 struct pb_buffer **base_buf,
@@ -303,21 +303,21 @@ pb_debug_buffer_validate(struct pb_buffer *_buf,
                          unsigned flags)
 {
    struct pb_debug_buffer *buf = pb_debug_buffer(_buf);
    
    mtx_lock(&buf->mutex);
    if(buf->map_count) {
       debug_printf("%s: attempting to validate a mapped buffer\n", __FUNCTION__);
       debug_printf("last map backtrace is\n");
       debug_backtrace_dump(buf->map_backtrace, PB_DEBUG_MAP_BACKTRACE);
    }
-   pipe_mutex_unlock(buf->mutex);
+   mtx_unlock(&buf->mutex);
 
    pb_debug_buffer_check(buf);
 
    return pb_validate(buf->buffer, vl, flags);
 }
 
 
 static void
 pb_debug_buffer_fence(struct pb_buffer *_buf, 
                       struct pipe_fence_handle *fence)
@@ -385,21 +385,21 @@ pb_debug_manager_create_buffer(struct pb_manager *_mgr,
    buf->buffer = mgr->provider->create_buffer(mgr->provider, 
                                               real_size, 
                                               &real_desc);
    if(!buf->buffer) {
       FREE(buf);
 #if 0
       mtx_lock(&mgr->mutex);
       debug_printf("%s: failed to create buffer\n", __FUNCTION__);
       if(!LIST_IS_EMPTY(&mgr->list))
          pb_debug_manager_dump_locked(mgr);
-      pipe_mutex_unlock(mgr->mutex);
+      mtx_unlock(&mgr->mutex);
 #endif
       return NULL;
    }
    
    assert(pipe_is_referenced(&buf->buffer->reference));
    assert(pb_check_alignment(real_desc.alignment, buf->buffer->alignment));
    assert(pb_check_usage(real_desc.usage, buf->buffer->usage));
    assert(buf->buffer->size >= real_size);
    
    pipe_reference_init(&buf->base.reference, 1);
@@ -414,21 +414,21 @@ pb_debug_manager_create_buffer(struct pb_manager *_mgr,
    buf->overflow_size = buf->buffer->size - buf->underflow_size - size;
    
    debug_backtrace_capture(buf->create_backtrace, 1, PB_DEBUG_CREATE_BACKTRACE);
 
    pb_debug_buffer_fill(buf);
    
    (void) mtx_init(&buf->mutex, mtx_plain);
    
    mtx_lock(&mgr->mutex);
    LIST_ADDTAIL(&buf->head, &mgr->list);
-   pipe_mutex_unlock(mgr->mutex);
+   mtx_unlock(&mgr->mutex);
 
    return &buf->base;
 }
 
 
 static void
 pb_debug_manager_flush(struct pb_manager *_mgr)
 {
    struct pb_debug_manager *mgr = pb_debug_manager(_mgr);
    assert(mgr->provider->flush);
@@ -440,21 +440,21 @@ pb_debug_manager_flush(struct pb_manager *_mgr)
 static void
 pb_debug_manager_destroy(struct pb_manager *_mgr)
 {
    struct pb_debug_manager *mgr = pb_debug_manager(_mgr);
    
    mtx_lock(&mgr->mutex);
    if(!LIST_IS_EMPTY(&mgr->list)) {
       debug_printf("%s: unfreed buffers\n", __FUNCTION__);
       pb_debug_manager_dump_locked(mgr);
    }
-   pipe_mutex_unlock(mgr->mutex);
+   mtx_unlock(&mgr->mutex);
    
    mtx_destroy(&mgr->mutex);
    mgr->provider->destroy(mgr->provider);
    FREE(mgr);
 }
 
 
 struct pb_manager *
 pb_debug_manager_create(struct pb_manager *provider, 
                         pb_size underflow_size, pb_size overflow_size) 
diff --git a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_mm.c b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_mm.c
index 657b5f3..818cadd 100644
--- a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_mm.c
+++ b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_mm.c
@@ -95,21 +95,21 @@ static void
 mm_buffer_destroy(struct pb_buffer *buf)
 {
    struct mm_buffer *mm_buf = mm_buffer(buf);
    struct mm_pb_manager *mm = mm_buf->mgr;
    
    assert(!pipe_is_referenced(&mm_buf->base.reference));
    
    mtx_lock(&mm->mutex);
    u_mmFreeMem(mm_buf->block);
    FREE(mm_buf);
-   pipe_mutex_unlock(mm->mutex);
+   mtx_unlock(&mm->mutex);
 }
 
 
 static void *
 mm_buffer_map(struct pb_buffer *buf,
               unsigned flags,
               void *flush_ctx)
 {
    struct mm_buffer *mm_buf = mm_buffer(buf);
    struct mm_pb_manager *mm = mm_buf->mgr;
@@ -181,49 +181,49 @@ mm_bufmgr_create_buffer(struct pb_manager *mgr,
 
    /* We don't handle alignments larger then the one initially setup */
    assert(pb_check_alignment(desc->alignment, (pb_size)1 << mm->align2));
    if(!pb_check_alignment(desc->alignment, (pb_size)1 << mm->align2))
       return NULL;
    
    mtx_lock(&mm->mutex);
 
    mm_buf = CALLOC_STRUCT(mm_buffer);
    if (!mm_buf) {
-      pipe_mutex_unlock(mm->mutex);
+      mtx_unlock(&mm->mutex);
       return NULL;
    }
 
    pipe_reference_init(&mm_buf->base.reference, 1);
    mm_buf->base.alignment = desc->alignment;
    mm_buf->base.usage = desc->usage;
    mm_buf->base.size = size;
    
    mm_buf->base.vtbl = &mm_buffer_vtbl;
    
    mm_buf->mgr = mm;
    
    mm_buf->block = u_mmAllocMem(mm->heap, (int)size, (int)mm->align2, 0);
    if(!mm_buf->block) {
 #if 0
       debug_printf("warning: heap full\n");
       mmDumpMemInfo(mm->heap);
 #endif
       FREE(mm_buf);
-      pipe_mutex_unlock(mm->mutex);
+      mtx_unlock(&mm->mutex);
       return NULL;
    }
    
    /* Some sanity checks */
    assert(0 <= (pb_size)mm_buf->block->ofs && (pb_size)mm_buf->block->ofs < mm->size);
    assert(size <= (pb_size)mm_buf->block->size && (pb_size)mm_buf->block->ofs + (pb_size)mm_buf->block->size <= mm->size);
    
-   pipe_mutex_unlock(mm->mutex);
+   mtx_unlock(&mm->mutex);
    return SUPER(mm_buf);
 }
 
 
 static void
 mm_bufmgr_flush(struct pb_manager *mgr)
 {
    /* No-op */
 }
 
@@ -233,21 +233,21 @@ mm_bufmgr_destroy(struct pb_manager *mgr)
 {
    struct mm_pb_manager *mm = mm_pb_manager(mgr);
    
    mtx_lock(&mm->mutex);
 
    u_mmDestroy(mm->heap);
    
    pb_unmap(mm->buffer);
    pb_reference(&mm->buffer, NULL);
    
-   pipe_mutex_unlock(mm->mutex);
+   mtx_unlock(&mm->mutex);
    
    FREE(mgr);
 }
 
 
 struct pb_manager *
 mm_bufmgr_create_from_buffer(struct pb_buffer *buffer, 
                              pb_size size, pb_size align2) 
 {
    struct mm_pb_manager *mm;
diff --git a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_pool.c b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_pool.c
index 83a5568..31087ae 100644
--- a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_pool.c
+++ b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_pool.c
@@ -106,36 +106,36 @@ static void
 pool_buffer_destroy(struct pb_buffer *buf)
 {
    struct pool_buffer *pool_buf = pool_buffer(buf);
    struct pool_pb_manager *pool = pool_buf->mgr;
    
    assert(!pipe_is_referenced(&pool_buf->base.reference));
 
    mtx_lock(&pool->mutex);
    LIST_ADD(&pool_buf->head, &pool->free);
    pool->numFree++;
-   pipe_mutex_unlock(pool->mutex);
+   mtx_unlock(&pool->mutex);
 }
 
 
 static void *
 pool_buffer_map(struct pb_buffer *buf, unsigned flags, void *flush_ctx)
 {
    struct pool_buffer *pool_buf = pool_buffer(buf);
    struct pool_pb_manager *pool = pool_buf->mgr;
    void *map;
 
    /* XXX: it will be necessary to remap here to propagate flush_ctx */
 
    mtx_lock(&pool->mutex);
    map = (unsigned char *) pool->map + pool_buf->start;
-   pipe_mutex_unlock(pool->mutex);
+   mtx_unlock(&pool->mutex);
    return map;
 }
 
 
 static void
 pool_buffer_unmap(struct pb_buffer *buf)
 {
    /* No-op */
 }
 
@@ -192,37 +192,37 @@ pool_bufmgr_create_buffer(struct pb_manager *mgr,
    struct pool_pb_manager *pool = pool_pb_manager(mgr);
    struct pool_buffer *pool_buf;
    struct list_head *item;
 
    assert(size == pool->bufSize);
    assert(pool->bufAlign % desc->alignment == 0);
    
    mtx_lock(&pool->mutex);
 
    if (pool->numFree == 0) {
-      pipe_mutex_unlock(pool->mutex);
+      mtx_unlock(&pool->mutex);
       debug_printf("warning: out of fixed size buffer objects\n");
       return NULL;
    }
 
    item = pool->free.next;
 
    if (item == &pool->free) {
-      pipe_mutex_unlock(pool->mutex);
+      mtx_unlock(&pool->mutex);
       debug_printf("error: fixed size buffer pool corruption\n");
       return NULL;
    }
 
    LIST_DEL(item);
    --pool->numFree;
 
-   pipe_mutex_unlock(pool->mutex);
+   mtx_unlock(&pool->mutex);
    
    pool_buf = LIST_ENTRY(struct pool_buffer, item, head);
    assert(!pipe_is_referenced(&pool_buf->base.reference));
    pipe_reference_init(&pool_buf->base.reference, 1);
    pool_buf->base.alignment = desc->alignment;
    pool_buf->base.usage = desc->usage;
    
    return SUPER(pool_buf);
 }
 
@@ -238,21 +238,21 @@ static void
 pool_bufmgr_destroy(struct pb_manager *mgr)
 {
    struct pool_pb_manager *pool = pool_pb_manager(mgr);
    mtx_lock(&pool->mutex);
 
    FREE(pool->bufs);
    
    pb_unmap(pool->buffer);
    pb_reference(&pool->buffer, NULL);
    
-   pipe_mutex_unlock(pool->mutex);
+   mtx_unlock(&pool->mutex);
    
    FREE(mgr);
 }
 
 
 struct pb_manager *
 pool_bufmgr_create(struct pb_manager *provider, 
                    pb_size numBufs, 
                    pb_size bufSize,
                    const struct pb_desc *desc) 
diff --git a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_slab.c b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_slab.c
index 32e6646..a89236e 100644
--- a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_slab.c
+++ b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_slab.c
@@ -214,21 +214,21 @@ pb_slab_buffer_destroy(struct pb_buffer *_buf)
 
    /* If the slab becomes totally empty, free it */
    if (slab->numFree == slab->numBuffers) {
       list = &slab->head;
       LIST_DELINIT(list);
       pb_reference(&slab->bo, NULL);
       FREE(slab->buffers);
       FREE(slab);
    }
 
-   pipe_mutex_unlock(mgr->mutex);
+   mtx_unlock(&mgr->mutex);
 }
 
 
 static void *
 pb_slab_buffer_map(struct pb_buffer *_buf, 
                    unsigned flags,
                    void *flush_ctx)
 {
    struct pb_slab_buffer *buf = pb_slab_buffer(_buf);
 
@@ -395,37 +395,37 @@ pb_slab_manager_create_buffer(struct pb_manager *_mgr,
    assert(pb_check_usage(desc->usage, mgr->desc.usage));
    if(!pb_check_usage(desc->usage, mgr->desc.usage))
       return NULL;
 
    mtx_lock(&mgr->mutex);
    
    /* Create a new slab, if we run out of partial slabs */
    if (mgr->slabs.next == &mgr->slabs) {
       (void) pb_slab_create(mgr);
       if (mgr->slabs.next == &mgr->slabs) {
-	 pipe_mutex_unlock(mgr->mutex);
+	 mtx_unlock(&mgr->mutex);
 	 return NULL;
       }
    }
    
    /* Allocate the buffer from a partial (or just created) slab */
    list = mgr->slabs.next;
    slab = LIST_ENTRY(struct pb_slab, list, head);
    
    /* If totally full remove from the partial slab list */
    if (--slab->numFree == 0)
       LIST_DELINIT(list);
 
    list = slab->freeBuffers.next;
    LIST_DELINIT(list);
 
-   pipe_mutex_unlock(mgr->mutex);
+   mtx_unlock(&mgr->mutex);
    buf = LIST_ENTRY(struct pb_slab_buffer, list, head);
    
    pipe_reference_init(&buf->base.reference, 1);
    buf->base.alignment = desc->alignment;
    buf->base.usage = desc->usage;
    
    return &buf->base;
 }
 
 
diff --git a/src/gallium/auxiliary/pipebuffer/pb_cache.c b/src/gallium/auxiliary/pipebuffer/pb_cache.c
index 4a72cb5..9b75ff0 100644
--- a/src/gallium/auxiliary/pipebuffer/pb_cache.c
+++ b/src/gallium/auxiliary/pipebuffer/pb_cache.c
@@ -91,30 +91,30 @@ pb_cache_add_buffer(struct pb_cache_entry *entry)
 
    mtx_lock(&mgr->mutex);
    assert(!pipe_is_referenced(&buf->reference));
 
    for (i = 0; i < ARRAY_SIZE(mgr->buckets); i++)
       release_expired_buffers_locked(&mgr->buckets[i]);
 
    /* Directly release any buffer that exceeds the limit. */
    if (mgr->cache_size + buf->size > mgr->max_cache_size) {
       mgr->destroy_buffer(buf);
-      pipe_mutex_unlock(mgr->mutex);
+      mtx_unlock(&mgr->mutex);
       return;
    }
 
    entry->start = os_time_get();
    entry->end = entry->start + mgr->usecs;
    LIST_ADDTAIL(&entry->head, cache);
    ++mgr->num_buffers;
    mgr->cache_size += buf->size;
-   pipe_mutex_unlock(mgr->mutex);
+   mtx_unlock(&mgr->mutex);
 }
 
 /**
  * \return 1   if compatible and can be reclaimed
  *         0   if incompatible
  *        -1   if compatible and can't be reclaimed
  */
 static int
 pb_cache_is_buffer_compat(struct pb_cache_entry *entry,
                           pb_size size, unsigned alignment, unsigned usage)
@@ -201,27 +201,27 @@ pb_cache_reclaim_buffer(struct pb_cache *mgr, pb_size size,
       }
    }
 
    /* found a compatible buffer, return it */
    if (entry) {
       struct pb_buffer *buf = entry->buffer;
 
       mgr->cache_size -= buf->size;
       LIST_DEL(&entry->head);
       --mgr->num_buffers;
-      pipe_mutex_unlock(mgr->mutex);
+      mtx_unlock(&mgr->mutex);
       /* Increase refcount */
       pipe_reference_init(&buf->reference, 1);
       return buf;
    }
 
-   pipe_mutex_unlock(mgr->mutex);
+   mtx_unlock(&mgr->mutex);
    return NULL;
 }
 
 /**
  * Empty the cache. Useful when there is not enough memory.
  */
 void
 pb_cache_release_all_buffers(struct pb_cache *mgr)
 {
    struct list_head *curr, *next;
@@ -234,21 +234,21 @@ pb_cache_release_all_buffers(struct pb_cache *mgr)
 
       curr = cache->next;
       next = curr->next;
       while (curr != cache) {
          buf = LIST_ENTRY(struct pb_cache_entry, curr, head);
          destroy_buffer_locked(buf);
          curr = next;
          next = curr->next;
       }
    }
-   pipe_mutex_unlock(mgr->mutex);
+   mtx_unlock(&mgr->mutex);
 }
 
 void
 pb_cache_init_entry(struct pb_cache *mgr, struct pb_cache_entry *entry,
                     struct pb_buffer *buf, unsigned bucket_index)
 {
    memset(entry, 0, sizeof(*entry));
    entry->buffer = buf;
    entry->mgr = mgr;
    entry->bucket_index = bucket_index;
diff --git a/src/gallium/auxiliary/pipebuffer/pb_slab.c b/src/gallium/auxiliary/pipebuffer/pb_slab.c
index 4a1b269..2702297 100644
--- a/src/gallium/auxiliary/pipebuffer/pb_slab.c
+++ b/src/gallium/auxiliary/pipebuffer/pb_slab.c
@@ -128,64 +128,64 @@ pb_slab_alloc(struct pb_slabs *slabs, unsigned size, unsigned heap)
    }
 
    if (LIST_IS_EMPTY(&group->slabs)) {
       /* Drop the mutex temporarily to prevent a deadlock where the allocation
        * calls back into slab functions (most likely to happen for
        * pb_slab_reclaim if memory is low).
        *
        * There's a chance that racing threads will end up allocating multiple
        * slabs for the same group, but that doesn't hurt correctness.
        */
-      pipe_mutex_unlock(slabs->mutex);
+      mtx_unlock(&slabs->mutex);
       slab = slabs->slab_alloc(slabs->priv, heap, 1 << order, group_index);
       if (!slab)
          return NULL;
       mtx_lock(&slabs->mutex);
 
       LIST_ADD(&slab->head, &group->slabs);
    }
 
    entry = LIST_ENTRY(struct pb_slab_entry, slab->free.next, head);
    LIST_DEL(&entry->head);
    slab->num_free--;
 
-   pipe_mutex_unlock(slabs->mutex);
+   mtx_unlock(&slabs->mutex);
 
    return entry;
 }
 
 /* Free the given slab entry.
  *
  * The entry may still be in use e.g. by in-flight command submissions. The
  * can_reclaim callback function will be called to determine whether the entry
  * can be handed out again by pb_slab_alloc.
  */
 void
 pb_slab_free(struct pb_slabs* slabs, struct pb_slab_entry *entry)
 {
    mtx_lock(&slabs->mutex);
    LIST_ADDTAIL(&entry->head, &slabs->reclaim);
-   pipe_mutex_unlock(slabs->mutex);
+   mtx_unlock(&slabs->mutex);
 }
 
 /* Check if any of the entries handed to pb_slab_free are ready to be re-used.
  *
  * This may end up freeing some slabs and is therefore useful to try to reclaim
  * some no longer used memory. However, calling this function is not strictly
  * required since pb_slab_alloc will eventually do the same thing.
  */
 void
 pb_slabs_reclaim(struct pb_slabs *slabs)
 {
    mtx_lock(&slabs->mutex);
    pb_slabs_reclaim_locked(slabs);
-   pipe_mutex_unlock(slabs->mutex);
+   mtx_unlock(&slabs->mutex);
 }
 
 /* Initialize the slabs manager.
  *
  * The minimum and maximum size of slab entries are 2^min_order and
  * 2^max_order, respectively.
  *
  * priv will be passed to the given callback functions.
  */
 bool
diff --git a/src/gallium/auxiliary/rtasm/rtasm_execmem.c b/src/gallium/auxiliary/rtasm/rtasm_execmem.c
index a1c3de9..fcb67a6 100644
--- a/src/gallium/auxiliary/rtasm/rtasm_execmem.c
+++ b/src/gallium/auxiliary/rtasm/rtasm_execmem.c
@@ -99,39 +99,39 @@ rtasm_exec_malloc(size_t size)
       size = (size + 31) & ~31;  /* next multiple of 32 bytes */
       block = u_mmAllocMem( exec_heap, size, 5, 0 ); /* 5 -> 32-byte alignment */
    }
 
    if (block)
       addr = exec_mem + block->ofs;
    else 
       debug_printf("rtasm_exec_malloc failed\n");
 
 bail:
-   pipe_mutex_unlock(exec_mutex);
+   mtx_unlock(&exec_mutex);
    
    return addr;
 }
 
  
 void 
 rtasm_exec_free(void *addr)
 {
    mtx_lock(&exec_mutex);
 
    if (exec_heap) {
       struct mem_block *block = u_mmFindBlock(exec_heap, (unsigned char *)addr - exec_mem);
    
       if (block)
 	 u_mmFreeMem(block);
    }
 
-   pipe_mutex_unlock(exec_mutex);
+   mtx_unlock(&exec_mutex);
 }
 
 
 #elif defined(PIPE_OS_WINDOWS)
 
 
 /*
  * Avoid Data Execution Prevention.
  */
 
diff --git a/src/gallium/auxiliary/util/u_debug_flush.c b/src/gallium/auxiliary/util/u_debug_flush.c
index dde21f9..06d0cfa 100644
--- a/src/gallium/auxiliary/util/u_debug_flush.c
+++ b/src/gallium/auxiliary/util/u_debug_flush.c
@@ -160,21 +160,21 @@ debug_flush_ctx_create(boolean catch_reference_of_mapped, unsigned bt_depth)
 
    fctx->ref_hash = util_hash_table_create(debug_flush_pointer_hash,
                                            debug_flush_pointer_compare);
 
    if (!fctx->ref_hash)
       goto out_no_ref_hash;
 
    fctx->bt_depth = bt_depth;
    mtx_lock(&list_mutex);
    list_addtail(&fctx->head, &ctx_list);
-   pipe_mutex_unlock(list_mutex);
+   mtx_unlock(&list_mutex);
 
    return fctx;
 
  out_no_ref_hash:
    FREE(fctx);
 out_no_ctx:
    debug_printf("Debug flush context creation failed.\n");
    debug_printf("Debug flush checking for this context will be incomplete.\n");
    return NULL;
 }
@@ -220,78 +220,78 @@ debug_flush_map(struct debug_flush_buf *fbuf, unsigned flags)
       debug_flush_alert("Recursive map detected.", "Map",
                         2, fbuf->bt_depth, TRUE, TRUE, NULL);
       debug_flush_alert(NULL, "Previous map", 0, fbuf->bt_depth, FALSE,
                         FALSE, fbuf->map_frame);
    } else if (!(flags & PIPE_TRANSFER_UNSYNCHRONIZED) ||
               !fbuf->supports_unsync) {
       fbuf->mapped_sync = mapped_sync = TRUE;
    }
    fbuf->map_frame = debug_flush_capture_frame(1, fbuf->bt_depth);
    fbuf->mapped = TRUE;
-   pipe_mutex_unlock(fbuf->mutex);
+   mtx_unlock(&fbuf->mutex);
 
    if (mapped_sync) {
       struct debug_flush_ctx *fctx;
 
       mtx_lock(&list_mutex);
       LIST_FOR_EACH_ENTRY(fctx, &ctx_list, head) {
          struct debug_flush_item *item =
             util_hash_table_get(fctx->ref_hash, fbuf);
 
          if (item && fctx->catch_map_of_referenced) {
             debug_flush_alert("Already referenced map detected.",
                               "Map", 2, fbuf->bt_depth, TRUE, TRUE, NULL);
             debug_flush_alert(NULL, "Reference", 0, item->bt_depth,
                               FALSE, FALSE, item->ref_frame);
          }
       }
-      pipe_mutex_unlock(list_mutex);
+      mtx_unlock(&list_mutex);
    }
 }
 
 void
 debug_flush_unmap(struct debug_flush_buf *fbuf)
 {
    if (!fbuf)
       return;
 
    mtx_lock(&fbuf->mutex);
    if (!fbuf->mapped)
       debug_flush_alert("Unmap not previously mapped detected.", "Map",
                         2, fbuf->bt_depth, FALSE, TRUE, NULL);
 
    fbuf->mapped_sync = FALSE;
    fbuf->mapped = FALSE;
    FREE(fbuf->map_frame);
    fbuf->map_frame = NULL;
-   pipe_mutex_unlock(fbuf->mutex);
+   mtx_unlock(&fbuf->mutex);
 }
 
 void
 debug_flush_cb_reference(struct debug_flush_ctx *fctx,
                          struct debug_flush_buf *fbuf)
 {
    struct debug_flush_item *item;
 
    if (!fctx || !fbuf)
       return;
 
    item = util_hash_table_get(fctx->ref_hash, fbuf);
 
    mtx_lock(&fbuf->mutex);
    if (fbuf->mapped_sync) {
       debug_flush_alert("Reference of mapped buffer detected.", "Reference",
                         2, fctx->bt_depth, TRUE, TRUE, NULL);
       debug_flush_alert(NULL, "Map", 0, fbuf->bt_depth, FALSE,
                         FALSE, fbuf->map_frame);
    }
-   pipe_mutex_unlock(fbuf->mutex);
+   mtx_unlock(&fbuf->mutex);
 
    if (!item) {
       item = CALLOC_STRUCT(debug_flush_item);
       if (item) {
          debug_flush_buf_reference(&item->fbuf, fbuf);
          item->bt_depth = fctx->bt_depth;
          item->ref_frame = debug_flush_capture_frame(2, item->bt_depth);
          if (util_hash_table_set(fctx->ref_hash, fbuf, item) != PIPE_OK) {
             debug_flush_item_destroy(item);
             goto out_no_item;
@@ -321,21 +321,21 @@ debug_flush_might_flush_cb(void *key, void *value, void *data)
                  "%s referenced mapped buffer detected.", reason);
 
    mtx_lock(&fbuf->mutex);
    if (fbuf->mapped_sync) {
       debug_flush_alert(message, reason, 3, item->bt_depth, TRUE, TRUE, NULL);
       debug_flush_alert(NULL, "Map", 0, fbuf->bt_depth, TRUE, FALSE,
                         fbuf->map_frame);
       debug_flush_alert(NULL, "First reference", 0, item->bt_depth, FALSE,
                         FALSE, item->ref_frame);
    }
-   pipe_mutex_unlock(fbuf->mutex);
+   mtx_unlock(&fbuf->mutex);
 
    return PIPE_OK;
 }
 
 void
 debug_flush_might_flush(struct debug_flush_ctx *fctx)
 {
    if (!fctx)
       return;
 
diff --git a/src/gallium/auxiliary/util/u_debug_memory.c b/src/gallium/auxiliary/util/u_debug_memory.c
index d5b0d91..1ba553c 100644
--- a/src/gallium/auxiliary/util/u_debug_memory.c
+++ b/src/gallium/auxiliary/util/u_debug_memory.c
@@ -148,21 +148,21 @@ debug_malloc(const char *file, unsigned line, const char *function,
 
 #if DEBUG_MEMORY_STACK
    debug_backtrace_capture(hdr->backtrace, 0, DEBUG_MEMORY_STACK);
 #endif
 
    ftr = footer_from_header(hdr);
    ftr->magic = DEBUG_MEMORY_MAGIC;
    
    mtx_lock(&list_mutex);
    LIST_ADDTAIL(&hdr->head, &list);
-   pipe_mutex_unlock(list_mutex);
+   mtx_unlock(&list_mutex);
    
    return data_from_header(hdr);
 }
 
 void
 debug_free(const char *file, unsigned line, const char *function,
            void *ptr) 
 {
    struct debug_memory_header *hdr;
    struct debug_memory_footer *ftr;
@@ -193,21 +193,21 @@ debug_free(const char *file, unsigned line, const char *function,
    /* Mark the block as freed but don't really free it */
    hdr->freed = TRUE;
    /* Save file/line where freed */
    hdr->file = file;
    hdr->line = line;
    /* set freed memory to special value */
    memset(ptr, DEBUG_FREED_BYTE, hdr->size);
 #else
    mtx_lock(&list_mutex);
    LIST_DEL(&hdr->head);
-   pipe_mutex_unlock(list_mutex);
+   mtx_unlock(&list_mutex);
    hdr->magic = 0;
    ftr->magic = 0;
    
    os_free(hdr);
 #endif
 }
 
 void *
 debug_calloc(const char *file, unsigned line, const char *function,
              size_t count, size_t size )
@@ -268,21 +268,21 @@ debug_realloc(const char *file, unsigned line, const char *function,
    new_hdr->tag = 0;
 #if DEBUG_FREED_MEMORY
    new_hdr->freed = FALSE;
 #endif
    
    new_ftr = footer_from_header(new_hdr);
    new_ftr->magic = DEBUG_MEMORY_MAGIC;
    
    mtx_lock(&list_mutex);
    LIST_REPLACE(&old_hdr->head, &new_hdr->head);
-   pipe_mutex_unlock(list_mutex);
+   mtx_unlock(&list_mutex);
 
    /* copy data */
    new_ptr = data_from_header(new_hdr);
    memcpy( new_ptr, old_ptr, old_size < new_size ? old_size : new_size );
 
    /* free old */
    old_hdr->magic = 0;
    old_ftr->magic = 0;
    os_free(old_hdr);
 
diff --git a/src/gallium/auxiliary/util/u_debug_refcnt.c b/src/gallium/auxiliary/util/u_debug_refcnt.c
index 1db1787..cb01582 100644
--- a/src/gallium/auxiliary/util/u_debug_refcnt.c
+++ b/src/gallium/auxiliary/util/u_debug_refcnt.c
@@ -105,37 +105,37 @@ debug_serial(void *p, unsigned *pserial)
        */
       serial = ++serials_last;
       if (!serial) {
          debug_error("More than 2^32 objects detected, aborting.\n");
          os_abort();
       }
 
       util_hash_table_set(serials_hash, p, (void *) (uintptr_t) serial);
       found = FALSE;
    }
-   pipe_mutex_unlock(serials_mutex);
+   mtx_unlock(&serials_mutex);
 
    *pserial = serial;
 
    return found;
 }
 
 
 /**
  * Free the serial number for the given pointer.
  */
 static void
 debug_serial_delete(void *p)
 {
    mtx_lock(&serials_mutex);
    util_hash_table_remove(serials_hash, p);
-   pipe_mutex_unlock(serials_mutex);
+   mtx_unlock(&serials_mutex);
 }
 
 
 #define STACK_LEN 64
 
 static void
 dump_stack(const char *symbols[STACK_LEN])
 {
    unsigned i;
    for (i = 0; i < STACK_LEN; ++i) {
diff --git a/src/gallium/auxiliary/util/u_debug_symbol.c b/src/gallium/auxiliary/util/u_debug_symbol.c
index de320b3..f0b0629 100644
--- a/src/gallium/auxiliary/util/u_debug_symbol.c
+++ b/src/gallium/auxiliary/util/u_debug_symbol.c
@@ -306,13 +306,13 @@ debug_symbol_name_cached(const void *addr)
       symbols_hash = util_hash_table_create(hash_ptr, compare_ptr);
    name = util_hash_table_get(symbols_hash, (void*)addr);
    if(!name)
    {
       char buf[1024];
       debug_symbol_name(addr, buf, sizeof(buf));
       name = strdup(buf);
 
       util_hash_table_set(symbols_hash, (void*)addr, (void*)name);
    }
-   pipe_mutex_unlock(symbols_mutex);
+   mtx_unlock(&symbols_mutex);
    return name;
 }
diff --git a/src/gallium/auxiliary/util/u_queue.c b/src/gallium/auxiliary/util/u_queue.c
index 2926d8c..2525230 100644
--- a/src/gallium/auxiliary/util/u_queue.c
+++ b/src/gallium/auxiliary/util/u_queue.c
@@ -45,75 +45,75 @@ static mtx_t exit_mutex = _MTX_INITIALIZER_NP;
 static void
 atexit_handler(void)
 {
    struct util_queue *iter;
 
    mtx_lock(&exit_mutex);
    /* Wait for all queues to assert idle. */
    LIST_FOR_EACH_ENTRY(iter, &queue_list, head) {
       util_queue_killall_and_wait(iter);
    }
-   pipe_mutex_unlock(exit_mutex);
+   mtx_unlock(&exit_mutex);
 }
 
 static void
 global_init(void)
 {
    LIST_INITHEAD(&queue_list);
    atexit(atexit_handler);
 }
 
 static void
 add_to_atexit_list(struct util_queue *queue)
 {
    call_once(&atexit_once_flag, global_init);
 
    mtx_lock(&exit_mutex);
    LIST_ADD(&queue->head, &queue_list);
-   pipe_mutex_unlock(exit_mutex);
+   mtx_unlock(&exit_mutex);
 }
 
 static void
 remove_from_atexit_list(struct util_queue *queue)
 {
    struct util_queue *iter, *tmp;
 
    mtx_lock(&exit_mutex);
    LIST_FOR_EACH_ENTRY_SAFE(iter, tmp, &queue_list, head) {
       if (iter == queue) {
          LIST_DEL(&iter->head);
          break;
       }
    }
-   pipe_mutex_unlock(exit_mutex);
+   mtx_unlock(&exit_mutex);
 }
 
 /****************************************************************************
  * util_queue_fence
  */
 
 static void
 util_queue_fence_signal(struct util_queue_fence *fence)
 {
    mtx_lock(&fence->mutex);
    fence->signalled = true;
    cnd_broadcast(&fence->cond);
-   pipe_mutex_unlock(fence->mutex);
+   mtx_unlock(&fence->mutex);
 }
 
 void
 util_queue_fence_wait(struct util_queue_fence *fence)
 {
    mtx_lock(&fence->mutex);
    while (!fence->signalled)
       cnd_wait(&fence->cond, &fence->mutex);
-   pipe_mutex_unlock(fence->mutex);
+   mtx_unlock(&fence->mutex);
 }
 
 void
 util_queue_fence_init(struct util_queue_fence *fence)
 {
    memset(fence, 0, sizeof(*fence));
    (void) mtx_init(&fence->mutex, mtx_plain);
    cnd_init(&fence->cond);
    fence->signalled = true;
 }
@@ -152,50 +152,50 @@ static PIPE_THREAD_ROUTINE(util_queue_thread_func, input)
       struct util_queue_job job;
 
       mtx_lock(&queue->lock);
       assert(queue->num_queued >= 0 && queue->num_queued <= queue->max_jobs);
 
       /* wait if the queue is empty */
       while (!queue->kill_threads && queue->num_queued == 0)
          cnd_wait(&queue->has_queued_cond, &queue->lock);
 
       if (queue->kill_threads) {
-         pipe_mutex_unlock(queue->lock);
+         mtx_unlock(&queue->lock);
          break;
       }
 
       job = queue->jobs[queue->read_idx];
       memset(&queue->jobs[queue->read_idx], 0, sizeof(struct util_queue_job));
       queue->read_idx = (queue->read_idx + 1) % queue->max_jobs;
 
       queue->num_queued--;
       cnd_signal(&queue->has_space_cond);
-      pipe_mutex_unlock(queue->lock);
+      mtx_unlock(&queue->lock);
 
       if (job.job) {
          job.execute(job.job, thread_index);
          util_queue_fence_signal(job.fence);
          if (job.cleanup)
             job.cleanup(job.job, thread_index);
       }
    }
 
    /* signal remaining jobs before terminating */
    mtx_lock(&queue->lock);
    while (queue->jobs[queue->read_idx].job) {
       util_queue_fence_signal(queue->jobs[queue->read_idx].fence);
 
       queue->jobs[queue->read_idx].job = NULL;
       queue->read_idx = (queue->read_idx + 1) % queue->max_jobs;
    }
    queue->num_queued = 0; /* reset this when exiting the thread */
-   pipe_mutex_unlock(queue->lock);
+   mtx_unlock(&queue->lock);
    return 0;
 }
 
 bool
 util_queue_init(struct util_queue *queue,
                 const char *name,
                 unsigned max_jobs,
                 unsigned num_threads)
 {
    unsigned i;
@@ -261,21 +261,21 @@ fail:
 
 static void
 util_queue_killall_and_wait(struct util_queue *queue)
 {
    unsigned i;
 
    /* Signal all threads to terminate. */
    mtx_lock(&queue->lock);
    queue->kill_threads = 1;
    cnd_broadcast(&queue->has_queued_cond);
-   pipe_mutex_unlock(queue->lock);
+   mtx_unlock(&queue->lock);
 
    for (i = 0; i < queue->num_threads; i++)
       pipe_thread_wait(queue->threads[i]);
    queue->num_threads = 0;
 }
 
 void
 util_queue_destroy(struct util_queue *queue)
 {
    util_queue_killall_and_wait(queue);
@@ -310,21 +310,21 @@ util_queue_add_job(struct util_queue *queue,
    ptr = &queue->jobs[queue->write_idx];
    assert(ptr->job == NULL);
    ptr->job = job;
    ptr->fence = fence;
    ptr->execute = execute;
    ptr->cleanup = cleanup;
    queue->write_idx = (queue->write_idx + 1) % queue->max_jobs;
 
    queue->num_queued++;
    cnd_signal(&queue->has_queued_cond);
-   pipe_mutex_unlock(queue->lock);
+   mtx_unlock(&queue->lock);
 }
 
 int64_t
 util_queue_get_thread_time_nano(struct util_queue *queue, unsigned thread_index)
 {
    /* Allow some flexibility by not raising an error. */
    if (thread_index >= queue->num_threads)
       return 0;
 
    return pipe_thread_get_time_nano(queue->threads[thread_index]);
diff --git a/src/gallium/auxiliary/util/u_range.h b/src/gallium/auxiliary/util/u_range.h
index a09dc9a..7d3fc61 100644
--- a/src/gallium/auxiliary/util/u_range.h
+++ b/src/gallium/auxiliary/util/u_range.h
@@ -55,21 +55,21 @@ util_range_set_empty(struct util_range *range)
 }
 
 /* This is like a union of two sets. */
 static inline void
 util_range_add(struct util_range *range, unsigned start, unsigned end)
 {
    if (start < range->start || end > range->end) {
       mtx_lock(&range->write_mutex);
       range->start = MIN2(start, range->start);
       range->end = MAX2(end, range->end);
-      pipe_mutex_unlock(range->write_mutex);
+      mtx_unlock(&range->write_mutex);
    }
 }
 
 static inline boolean
 util_ranges_intersect(struct util_range *range, unsigned start, unsigned end)
 {
    return MAX2(start, range->start) < MIN2(end, range->end);
 }
 
 
diff --git a/src/gallium/auxiliary/util/u_ringbuffer.c b/src/gallium/auxiliary/util/u_ringbuffer.c
index 6a83d30..fd51f26 100644
--- a/src/gallium/auxiliary/util/u_ringbuffer.c
+++ b/src/gallium/auxiliary/util/u_ringbuffer.c
@@ -96,21 +96,21 @@ void util_ringbuffer_enqueue( struct util_ringbuffer *ring,
        * something, but probably not an array of packet structs:
        */
       ring->buf[ring->head] = packet[i];
       ring->head++;
       ring->head &= ring->mask;
    }
 
    /* Signal change:
     */
    cnd_signal(&ring->change);
-   pipe_mutex_unlock(ring->mutex);
+   mtx_unlock(&ring->mutex);
 }
 
 enum pipe_error util_ringbuffer_dequeue( struct util_ringbuffer *ring,
                                          struct util_packet *packet,
                                          unsigned max_dwords,
                                          boolean wait )
 {
    const struct util_packet *ring_packet;
    unsigned i;
    int ret = PIPE_OK;
@@ -148,13 +148,13 @@ enum pipe_error util_ringbuffer_dequeue( struct util_ringbuffer *ring,
    for (i = 0; i < ring_packet->dwords; i++) {
       packet[i] = ring->buf[ring->tail];
       ring->tail++;
       ring->tail &= ring->mask;
    }
 
 out:
    /* Signal change:
     */
    cnd_signal(&ring->change);
-   pipe_mutex_unlock(ring->mutex);
+   mtx_unlock(&ring->mutex);
    return ret;
 }
diff --git a/src/gallium/drivers/ddebug/dd_context.c b/src/gallium/drivers/ddebug/dd_context.c
index 109d642..a52975d 100644
--- a/src/gallium/drivers/ddebug/dd_context.c
+++ b/src/gallium/drivers/ddebug/dd_context.c
@@ -589,21 +589,21 @@ dd_context_set_stream_output_targets(struct pipe_context *_pipe,
 
 static void
 dd_context_destroy(struct pipe_context *_pipe)
 {
    struct dd_context *dctx = dd_context(_pipe);
    struct pipe_context *pipe = dctx->pipe;
 
    if (dctx->thread) {
       mtx_lock(&dctx->mutex);
       dctx->kill_thread = 1;
-      pipe_mutex_unlock(dctx->mutex);
+      mtx_unlock(&dctx->mutex);
       pipe_thread_wait(dctx->thread);
       mtx_destroy(&dctx->mutex);
       assert(!dctx->records);
    }
 
    if (dctx->fence) {
       pipe->transfer_unmap(pipe, dctx->fence_transfer);
       pipe_resource_reference(&dctx->fence, NULL);
    }
    pipe->destroy(pipe);
diff --git a/src/gallium/drivers/ddebug/dd_draw.c b/src/gallium/drivers/ddebug/dd_draw.c
index 17b404a..59afde8 100644
--- a/src/gallium/drivers/ddebug/dd_draw.c
+++ b/src/gallium/drivers/ddebug/dd_draw.c
@@ -935,30 +935,30 @@ PIPE_THREAD_ROUTINE(dd_thread_pipelined_hang_detect, input)
                record = &(*record)->next;
 
             dd_dump_record(dctx, *record, *dctx->mapped_fence, now);
             dd_kill_process();
          }
 
          record = &(*record)->next;
       }
 
       /* Unlock and sleep before starting all over again. */
-      pipe_mutex_unlock(dctx->mutex);
+      mtx_unlock(&dctx->mutex);
       os_time_sleep(10000); /* 10 ms */
       mtx_lock(&dctx->mutex);
    }
 
    /* Thread termination. */
    while (dctx->records)
       dd_free_record(&dctx->records);
 
-   pipe_mutex_unlock(dctx->mutex);
+   mtx_unlock(&dctx->mutex);
    return 0;
 }
 
 static char *
 dd_get_driver_shader_log(struct dd_context *dctx)
 {
 #if defined(PIPE_OS_LINUX)
    FILE *f;
    char *buf;
    int written_bytes;
@@ -1037,21 +1037,21 @@ dd_pipelined_process_draw(struct dd_context *dctx, struct dd_call *call)
    memset(&record->call, 0, sizeof(record->call));
    dd_copy_call(&record->call, call);
 
    dd_init_copy_of_draw_state(&record->draw_state);
    dd_copy_draw_state(&record->draw_state.base, &dctx->draw_state);
 
    /* Add the record to the list. */
    mtx_lock(&dctx->mutex);
    record->next = dctx->records;
    dctx->records = record;
-   pipe_mutex_unlock(dctx->mutex);
+   mtx_unlock(&dctx->mutex);
 }
 
 static void
 dd_context_flush(struct pipe_context *_pipe,
                  struct pipe_fence_handle **fence, unsigned flags)
 {
    struct dd_context *dctx = dd_context(_pipe);
    struct pipe_context *pipe = dctx->pipe;
 
    switch (dd_screen(dctx->base.screen)->mode) {
diff --git a/src/gallium/drivers/freedreno/freedreno_batch.c b/src/gallium/drivers/freedreno/freedreno_batch.c
index f08b7b3..5cd6a69 100644
--- a/src/gallium/drivers/freedreno/freedreno_batch.c
+++ b/src/gallium/drivers/freedreno/freedreno_batch.c
@@ -165,21 +165,21 @@ batch_reset_resources_locked(struct fd_batch *batch)
 		if (rsc->write_batch == batch)
 			fd_batch_reference_locked(&rsc->write_batch, NULL);
 	}
 }
 
 static void
 batch_reset_resources(struct fd_batch *batch)
 {
 	mtx_lock(&batch->ctx->screen->lock);
 	batch_reset_resources_locked(batch);
-	pipe_mutex_unlock(batch->ctx->screen->lock);
+	mtx_unlock(&batch->ctx->screen->lock);
 }
 
 static void
 batch_reset(struct fd_batch *batch)
 {
 	DBG("%p", batch);
 
 	fd_batch_sync(batch);
 
 	batch_flush_reset_dependencies(batch, false);
@@ -198,21 +198,21 @@ fd_batch_reset(struct fd_batch *batch)
 
 void
 __fd_batch_destroy(struct fd_batch *batch)
 {
 	DBG("%p", batch);
 
 	util_copy_framebuffer_state(&batch->framebuffer, NULL);
 
 	mtx_lock(&batch->ctx->screen->lock);
 	fd_bc_invalidate_batch(batch, true);
-	pipe_mutex_unlock(batch->ctx->screen->lock);
+	mtx_unlock(&batch->ctx->screen->lock);
 
 	batch_fini(batch);
 
 	batch_reset_resources(batch);
 	debug_assert(batch->resources->entries == 0);
 	_mesa_set_destroy(batch->resources, NULL);
 
 	batch_flush_reset_dependencies(batch, false);
 	debug_assert(batch->dependents_mask == 0);
 
@@ -282,21 +282,21 @@ batch_flush(struct fd_batch *batch)
 		batch_reset_resources(batch);
 	}
 
 	debug_assert(batch->reference.count > 0);
 
 	if (batch == batch->ctx->batch) {
 		batch_reset(batch);
 	} else {
 		mtx_lock(&batch->ctx->screen->lock);
 		fd_bc_invalidate_batch(batch, false);
-		pipe_mutex_unlock(batch->ctx->screen->lock);
+		mtx_unlock(&batch->ctx->screen->lock);
 	}
 }
 
 /* NOTE: could drop the last ref to batch */
 void
 fd_batch_flush(struct fd_batch *batch, bool sync)
 {
 	/* NOTE: we need to hold an extra ref across the body of flush,
 	 * since the last ref to this batch could be dropped when cleaning
 	 * up used_resources
@@ -330,21 +330,21 @@ static void
 batch_add_dep(struct fd_batch *batch, struct fd_batch *dep)
 {
 	if (batch->dependents_mask & (1 << dep->idx))
 		return;
 
 	/* if the new depedency already depends on us, we need to flush
 	 * to avoid a loop in the dependency graph.
 	 */
 	if (batch_depends_on(dep, batch)) {
 		DBG("%p: flush forced on %p!", batch, dep);
-		pipe_mutex_unlock(batch->ctx->screen->lock);
+		mtx_unlock(&batch->ctx->screen->lock);
 		fd_batch_flush(dep, false);
 		mtx_lock(&batch->ctx->screen->lock);
 	} else {
 		struct fd_batch *other = NULL;
 		fd_batch_reference_locked(&other, dep);
 		batch->dependents_mask |= (1 << dep->idx);
 		DBG("%p: added dependency on %p", batch, dep);
 	}
 }
 
diff --git a/src/gallium/drivers/freedreno/freedreno_batch_cache.c b/src/gallium/drivers/freedreno/freedreno_batch_cache.c
index 5a881bf..9fea7d6 100644
--- a/src/gallium/drivers/freedreno/freedreno_batch_cache.c
+++ b/src/gallium/drivers/freedreno/freedreno_batch_cache.c
@@ -129,50 +129,50 @@ fd_bc_flush(struct fd_batch_cache *cache, struct fd_context *ctx)
 {
 	struct hash_entry *entry;
 	struct fd_batch *last_batch = NULL;
 
 	mtx_lock(&ctx->screen->lock);
 
 	hash_table_foreach(cache->ht, entry) {
 		struct fd_batch *batch = NULL;
 		fd_batch_reference_locked(&batch, (struct fd_batch *)entry->data);
 		if (batch->ctx == ctx) {
-			pipe_mutex_unlock(ctx->screen->lock);
+			mtx_unlock(&ctx->screen->lock);
 			fd_batch_reference(&last_batch, batch);
 			fd_batch_flush(batch, false);
 			mtx_lock(&ctx->screen->lock);
 		}
 		fd_batch_reference_locked(&batch, NULL);
 	}
 
-	pipe_mutex_unlock(ctx->screen->lock);
+	mtx_unlock(&ctx->screen->lock);
 
 	if (last_batch) {
 		fd_batch_sync(last_batch);
 		fd_batch_reference(&last_batch, NULL);
 	}
 }
 
 void
 fd_bc_invalidate_context(struct fd_context *ctx)
 {
 	struct fd_batch_cache *cache = &ctx->screen->batch_cache;
 	struct fd_batch *batch;
 
 	mtx_lock(&ctx->screen->lock);
 
 	foreach_batch(batch, cache, cache->batch_mask) {
 		if (batch->ctx == ctx)
 			fd_batch_reference_locked(&batch, NULL);
 	}
 
-	pipe_mutex_unlock(ctx->screen->lock);
+	mtx_unlock(&ctx->screen->lock);
 }
 
 void
 fd_bc_invalidate_batch(struct fd_batch *batch, bool destroy)
 {
 	if (!batch)
 		return;
 
 	struct fd_batch_cache *cache = &batch->ctx->screen->batch_cache;
 	struct key *key = (struct key *)batch->key;
@@ -217,21 +217,21 @@ fd_bc_invalidate_resource(struct fd_resource *rsc, bool destroy)
 		rsc->batch_mask = 0;
 
 		fd_batch_reference_locked(&rsc->write_batch, NULL);
 	}
 
 	foreach_batch(batch, &screen->batch_cache, rsc->bc_batch_mask)
 		fd_bc_invalidate_batch(batch, false);
 
 	rsc->bc_batch_mask = 0;
 
-	pipe_mutex_unlock(screen->lock);
+	mtx_unlock(&screen->lock);
 }
 
 struct fd_batch *
 fd_bc_alloc_batch(struct fd_batch_cache *cache, struct fd_context *ctx)
 {
 	struct fd_batch *batch;
 	uint32_t idx;
 
 	mtx_lock(&ctx->screen->lock);
 
@@ -256,21 +256,21 @@ fd_bc_alloc_batch(struct fd_batch_cache *cache, struct fd_context *ctx)
 			if ((cache->batches[i] == ctx->batch) ||
 					!cache->batches[i]->needs_flush)
 				continue;
 			if (!flush_batch || (cache->batches[i]->seqno < flush_batch->seqno))
 				fd_batch_reference_locked(&flush_batch, cache->batches[i]);
 		}
 
 		/* we can drop lock temporarily here, since we hold a ref,
 		 * flush_batch won't disappear under us.
 		 */
-		pipe_mutex_unlock(ctx->screen->lock);
+		mtx_unlock(&ctx->screen->lock);
 		DBG("%p: too many batches!  flush forced!", flush_batch);
 		fd_batch_flush(flush_batch, true);
 		mtx_lock(&ctx->screen->lock);
 
 		/* While the resources get cleaned up automatically, the flush_batch
 		 * doesn't get removed from the dependencies of other batches, so
 		 * it won't be unref'd and will remain in the table.
 		 *
 		 * TODO maybe keep a bitmask of batches that depend on me, to make
 		 * this easier:
@@ -296,21 +296,21 @@ fd_bc_alloc_batch(struct fd_batch_cache *cache, struct fd_context *ctx)
 		goto out;
 
 	batch->seqno = cache->cnt++;
 	batch->idx = idx;
 	cache->batch_mask |= (1 << idx);
 
 	debug_assert(cache->batches[idx] == NULL);
 	cache->batches[idx] = batch;
 
 out:
-	pipe_mutex_unlock(ctx->screen->lock);
+	mtx_unlock(&ctx->screen->lock);
 
 	return batch;
 }
 
 static struct fd_batch *
 batch_from_key(struct fd_batch_cache *cache, struct key *key,
 		struct fd_context *ctx)
 {
 	struct fd_batch *batch = NULL;
 	uint32_t hash = key_hash(key);
@@ -342,21 +342,21 @@ batch_from_key(struct fd_batch_cache *cache, struct key *key,
 
 	_mesa_hash_table_insert_pre_hashed(cache->ht, hash, key, batch);
 	batch->key = key;
 	batch->hash = hash;
 
 	for (unsigned idx = 0; idx < key->num_surfs; idx++) {
 		struct fd_resource *rsc = fd_resource(key->surf[idx].texture);
 		rsc->bc_batch_mask = (1 << batch->idx);
 	}
 
-	pipe_mutex_unlock(ctx->screen->lock);
+	mtx_unlock(&ctx->screen->lock);
 
 	return batch;
 }
 
 static void
 key_surf(struct key *key, unsigned idx, unsigned pos, struct pipe_surface *psurf)
 {
 	key->surf[idx].texture = psurf->texture;
 	key->surf[idx].u = psurf->u;
 	key->surf[idx].pos = pos;
diff --git a/src/gallium/drivers/freedreno/freedreno_context.h b/src/gallium/drivers/freedreno/freedreno_context.h
index d65f19a..cb33b8c 100644
--- a/src/gallium/drivers/freedreno/freedreno_context.h
+++ b/src/gallium/drivers/freedreno/freedreno_context.h
@@ -315,21 +315,21 @@ fd_context_assert_locked(struct fd_context *ctx)
 
 static inline void
 fd_context_lock(struct fd_context *ctx)
 {
 	mtx_lock(&ctx->screen->lock);
 }
 
 static inline void
 fd_context_unlock(struct fd_context *ctx)
 {
-	pipe_mutex_unlock(ctx->screen->lock);
+	mtx_unlock(&ctx->screen->lock);
 }
 
 static inline struct pipe_scissor_state *
 fd_context_get_scissor(struct fd_context *ctx)
 {
 	if (ctx->rasterizer && ctx->rasterizer->scissor)
 		return &ctx->scissor;
 	return &ctx->disabled_scissor;
 }
 
diff --git a/src/gallium/drivers/freedreno/freedreno_draw.c b/src/gallium/drivers/freedreno/freedreno_draw.c
index b98faca..edeb88b 100644
--- a/src/gallium/drivers/freedreno/freedreno_draw.c
+++ b/src/gallium/drivers/freedreno/freedreno_draw.c
@@ -165,21 +165,21 @@ fd_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info)
 	foreach_bit(i, ctx->fragtex.valid_textures)
 		resource_read(batch, ctx->fragtex.textures[i]->texture);
 
 	/* Mark streamout buffers as being written.. */
 	for (i = 0; i < ctx->streamout.num_targets; i++)
 		if (ctx->streamout.targets[i])
 			resource_written(batch, ctx->streamout.targets[i]->buffer);
 
 	resource_written(batch, batch->query_buf);
 
-	pipe_mutex_unlock(ctx->screen->lock);
+	mtx_unlock(&ctx->screen->lock);
 
 	batch->num_draws++;
 
 	prims = u_reduced_prims_for_vertices(info->mode, info->count);
 
 	ctx->stats.draw_calls++;
 
 	/* TODO prims_emitted should be clipped when the stream-out buffer is
 	 * not large enough.  See max_tf_vtx().. probably need to move that
 	 * into common code.  Although a bit more annoying since a2xx doesn't
@@ -339,21 +339,21 @@ fd_clear(struct pipe_context *pctx, unsigned buffers,
 			if (buffers & (PIPE_CLEAR_COLOR0 << i))
 				resource_written(batch, pfb->cbufs[i]->texture);
 
 	if (buffers & (PIPE_CLEAR_DEPTH | PIPE_CLEAR_STENCIL)) {
 		resource_written(batch, pfb->zsbuf->texture);
 		batch->gmem_reason |= FD_GMEM_CLEARS_DEPTH_STENCIL;
 	}
 
 	resource_written(batch, batch->query_buf);
 
-	pipe_mutex_unlock(ctx->screen->lock);
+	mtx_unlock(&ctx->screen->lock);
 
 	DBG("%p: %x %ux%u depth=%f, stencil=%u (%s/%s)", batch, buffers,
 		pfb->width, pfb->height, depth, stencil,
 		util_format_short_name(pipe_surface_format(pfb->cbufs[0])),
 		util_format_short_name(pipe_surface_format(pfb->zsbuf)));
 
 	/* if per-gen backend doesn't implement ctx->clear() generic
 	 * blitter clear:
 	 */
 	if (!ctx->clear) {
diff --git a/src/gallium/drivers/freedreno/freedreno_resource.c b/src/gallium/drivers/freedreno/freedreno_resource.c
index 275de97..a24f3f3 100644
--- a/src/gallium/drivers/freedreno/freedreno_resource.c
+++ b/src/gallium/drivers/freedreno/freedreno_resource.c
@@ -205,21 +205,21 @@ fd_try_shadow_resource(struct fd_context *ctx, struct fd_resource *rsc,
 	 */
 	debug_assert(shadow->batch_mask == 0);
 	struct fd_batch *batch;
 	foreach_batch(batch, &ctx->screen->batch_cache, rsc->batch_mask) {
 		struct set_entry *entry = _mesa_set_search(batch->resources, rsc);
 		_mesa_set_remove(batch->resources, entry);
 		_mesa_set_add(batch->resources, shadow);
 	}
 	swap(rsc->batch_mask, shadow->batch_mask);
 
-	pipe_mutex_unlock(ctx->screen->lock);
+	mtx_unlock(&ctx->screen->lock);
 
 	struct pipe_blit_info blit = {0};
 	blit.dst.resource = prsc;
 	blit.dst.format   = prsc->format;
 	blit.src.resource = pshadow;
 	blit.src.format   = pshadow->format;
 	blit.mask = util_format_get_mask(prsc->format);
 	blit.filter = PIPE_TEX_FILTER_NEAREST;
 
 #define set_box(field, val) do {     \
diff --git a/src/gallium/drivers/llvmpipe/lp_fence.c b/src/gallium/drivers/llvmpipe/lp_fence.c
index e7c4ab6..20cd91c 100644
--- a/src/gallium/drivers/llvmpipe/lp_fence.c
+++ b/src/gallium/drivers/llvmpipe/lp_fence.c
@@ -94,34 +94,34 @@ lp_fence_signal(struct lp_fence *fence)
    assert(fence->count <= fence->rank);
 
    if (LP_DEBUG & DEBUG_FENCE)
       debug_printf("%s count=%u rank=%u\n", __FUNCTION__,
                    fence->count, fence->rank);
 
    /* Wakeup all threads waiting on the mutex:
     */
    cnd_broadcast(&fence->signalled);
 
-   pipe_mutex_unlock(fence->mutex);
+   mtx_unlock(&fence->mutex);
 }
 
 boolean
 lp_fence_signalled(struct lp_fence *f)
 {
    return f->count == f->rank;
 }
 
 void
 lp_fence_wait(struct lp_fence *f)
 {
    if (LP_DEBUG & DEBUG_FENCE)
       debug_printf("%s %d\n", __FUNCTION__, f->id);
 
    mtx_lock(&f->mutex);
    assert(f->issued);
    while (f->count < f->rank) {
       cnd_wait(&f->signalled, &f->mutex);
    }
-   pipe_mutex_unlock(f->mutex);
+   mtx_unlock(&f->mutex);
 }
 
 
diff --git a/src/gallium/drivers/llvmpipe/lp_scene.c b/src/gallium/drivers/llvmpipe/lp_scene.c
index d651192..dfad9fa 100644
--- a/src/gallium/drivers/llvmpipe/lp_scene.c
+++ b/src/gallium/drivers/llvmpipe/lp_scene.c
@@ -495,21 +495,21 @@ lp_scene_bin_iter_next( struct lp_scene *scene , int *x, int *y)
       /* no more bins left */
       goto end;
    }
 
    bin = lp_scene_get_bin(scene, scene->curr_x, scene->curr_y);
    *x = scene->curr_x;
    *y = scene->curr_y;
 
 end:
    /*printf("return bin %p at %d, %d\n", (void *) bin, *bin_x, *bin_y);*/
-   pipe_mutex_unlock(scene->mutex);
+   mtx_unlock(&scene->mutex);
    return bin;
 }
 
 
 void lp_scene_begin_binning( struct lp_scene *scene,
                              struct pipe_framebuffer_state *fb, boolean discard )
 {
    int i;
    unsigned max_layer = ~0;
 
diff --git a/src/gallium/drivers/llvmpipe/lp_setup.c b/src/gallium/drivers/llvmpipe/lp_setup.c
index f701e90..38d9138 100644
--- a/src/gallium/drivers/llvmpipe/lp_setup.c
+++ b/src/gallium/drivers/llvmpipe/lp_setup.c
@@ -172,21 +172,21 @@ lp_setup_rasterize_scene( struct lp_setup_context *setup )
     * rasterization (not in the same context at least) which is what the
     * multiple scenes per setup is about - when we get a new empty scene
     * any old one is already empty again because we waited here for
     * raster tasks to be finished. Ideally, we shouldn't need to wait here
     * and rely on fences elsewhere when waiting is necessary.
     * Certainly, lp_scene_end_rasterization() would need to be deferred too
     * and there's probably other bits why this doesn't actually work.
     */
    lp_rast_queue_scene(screen->rast, scene);
    lp_rast_finish(screen->rast);
-   pipe_mutex_unlock(screen->rast_mutex);
+   mtx_unlock(&screen->rast_mutex);
 
    lp_scene_end_rasterization(setup->scene);
    lp_setup_reset( setup );
 
    LP_DBG(DEBUG_SETUP, "%s done \n", __FUNCTION__);
 }
 
 
 
 static boolean
diff --git a/src/gallium/drivers/nouveau/nv50/nv50_surface.c b/src/gallium/drivers/nouveau/nv50/nv50_surface.c
index 1e77184..908c534 100644
--- a/src/gallium/drivers/nouveau/nv50/nv50_surface.c
+++ b/src/gallium/drivers/nouveau/nv50/nv50_surface.c
@@ -1075,21 +1075,21 @@ nv50_blit_select_fp(struct nv50_blitctx *ctx, const struct pipe_blit_info *info)
       nv50_blit_reinterpret_pipe_texture_target(info->src.resource->target);
 
    const unsigned targ = nv50_blit_texture_type(ptarg);
    const unsigned mode = ctx->mode;
 
    if (!blitter->fp[targ][mode]) {
       mtx_lock(&blitter->mutex);
       if (!blitter->fp[targ][mode])
          blitter->fp[targ][mode] =
             nv50_blitter_make_fp(&ctx->nv50->base.pipe, mode, ptarg);
-      pipe_mutex_unlock(blitter->mutex);
+      mtx_unlock(&blitter->mutex);
    }
    ctx->fp = blitter->fp[targ][mode];
 }
 
 static void
 nv50_blit_set_dst(struct nv50_blitctx *ctx,
                   struct pipe_resource *res, unsigned level, unsigned layer,
                   enum pipe_format format)
 {
    struct nv50_context *nv50 = ctx->nv50;
diff --git a/src/gallium/drivers/nouveau/nvc0/nvc0_surface.c b/src/gallium/drivers/nouveau/nvc0/nvc0_surface.c
index 5c4fda9..9445c05 100644
--- a/src/gallium/drivers/nouveau/nvc0/nvc0_surface.c
+++ b/src/gallium/drivers/nouveau/nvc0/nvc0_surface.c
@@ -915,21 +915,21 @@ nvc0_blit_select_fp(struct nvc0_blitctx *ctx, const struct pipe_blit_info *info)
       nv50_blit_reinterpret_pipe_texture_target(info->src.resource->target);
 
    const unsigned targ = nv50_blit_texture_type(ptarg);
    const unsigned mode = ctx->mode;
 
    if (!blitter->fp[targ][mode]) {
       mtx_lock(&blitter->mutex);
       if (!blitter->fp[targ][mode])
          blitter->fp[targ][mode] =
             nv50_blitter_make_fp(&ctx->nvc0->base.pipe, mode, ptarg);
-      pipe_mutex_unlock(blitter->mutex);
+      mtx_unlock(&blitter->mutex);
    }
    ctx->fp = blitter->fp[targ][mode];
 }
 
 static void
 nvc0_blit_set_dst(struct nvc0_blitctx *ctx,
                   struct pipe_resource *res, unsigned level, unsigned layer,
                   enum pipe_format format)
 {
    struct nvc0_context *nvc0 = ctx->nvc0;
diff --git a/src/gallium/drivers/r300/r300_blit.c b/src/gallium/drivers/r300/r300_blit.c
index 7d58d4c..434cf38 100644
--- a/src/gallium/drivers/r300/r300_blit.c
+++ b/src/gallium/drivers/r300/r300_blit.c
@@ -329,21 +329,21 @@ static void r300_clear(struct pipe_context* pipe,
              * accessing it. */
             if (!r300->screen->cmask_resource) {
                 mtx_lock(&r300->screen->cmask_mutex);
                 /* Double checking (first unlocked, then locked). */
                 if (!r300->screen->cmask_resource) {
                     /* Don't reference this, so that the texture can be
                      * destroyed while set in cmask_resource.
                      * Then in texture_destroy, we set cmask_resource to NULL. */
                     r300->screen->cmask_resource = fb->cbufs[0]->texture;
                 }
-                pipe_mutex_unlock(r300->screen->cmask_mutex);
+                mtx_unlock(&r300->screen->cmask_mutex);
             }
 
             if (r300->screen->cmask_resource == fb->cbufs[0]->texture) {
                 r300_set_clear_color(r300, color);
                 r300_mark_atom_dirty(r300, &r300->cmask_clear);
                 r300_mark_atom_dirty(r300, &r300->gpu_flush);
                 buffers &= ~PIPE_CLEAR_COLOR;
             }
         }
     }
diff --git a/src/gallium/drivers/r300/r300_texture.c b/src/gallium/drivers/r300/r300_texture.c
index b451b9f..32cbdcd 100644
--- a/src/gallium/drivers/r300/r300_texture.c
+++ b/src/gallium/drivers/r300/r300_texture.c
@@ -1027,21 +1027,21 @@ static void r300_texture_destroy(struct pipe_screen *screen,
                                  struct pipe_resource* texture)
 {
     struct r300_screen *rscreen = r300_screen(screen);
     struct r300_resource* tex = (struct r300_resource*)texture;
 
     if (tex->tex.cmask_dwords) {
         mtx_lock(&rscreen->cmask_mutex);
         if (texture == rscreen->cmask_resource) {
             rscreen->cmask_resource = NULL;
         }
-        pipe_mutex_unlock(rscreen->cmask_mutex);
+        mtx_unlock(&rscreen->cmask_mutex);
     }
     pb_reference(&tex->buf, NULL);
     FREE(tex);
 }
 
 boolean r300_resource_get_handle(struct pipe_screen* screen,
                                  struct pipe_context *ctx,
                                  struct pipe_resource *texture,
                                  struct winsys_handle *whandle,
                                  unsigned usage)
diff --git a/src/gallium/drivers/radeon/r600_gpu_load.c b/src/gallium/drivers/radeon/r600_gpu_load.c
index 04d0617..e4411ad 100644
--- a/src/gallium/drivers/radeon/r600_gpu_load.c
+++ b/src/gallium/drivers/radeon/r600_gpu_load.c
@@ -176,21 +176,21 @@ void r600_gpu_load_kill_thread(struct r600_common_screen *rscreen)
 static uint64_t r600_read_mmio_counter(struct r600_common_screen *rscreen,
 				       unsigned busy_index)
 {
 	/* Start the thread if needed. */
 	if (!rscreen->gpu_load_thread) {
 		mtx_lock(&rscreen->gpu_load_mutex);
 		/* Check again inside the mutex. */
 		if (!rscreen->gpu_load_thread)
 			rscreen->gpu_load_thread =
 				pipe_thread_create(r600_gpu_load_thread, rscreen);
-		pipe_mutex_unlock(rscreen->gpu_load_mutex);
+		mtx_unlock(&rscreen->gpu_load_mutex);
 	}
 
 	unsigned busy = p_atomic_read(&rscreen->mmio_counters.array[busy_index]);
 	unsigned idle = p_atomic_read(&rscreen->mmio_counters.array[busy_index + 1]);
 
 	return busy | ((uint64_t)idle << 32);
 }
 
 static unsigned r600_end_mmio_counter(struct r600_common_screen *rscreen,
 				      uint64_t begin, unsigned busy_index)
diff --git a/src/gallium/drivers/radeon/r600_pipe_common.c b/src/gallium/drivers/radeon/r600_pipe_common.c
index 9ff838a..6b34454 100644
--- a/src/gallium/drivers/radeon/r600_pipe_common.c
+++ b/src/gallium/drivers/radeon/r600_pipe_common.c
@@ -1399,12 +1399,12 @@ bool r600_extra_shader_checks(struct r600_common_screen *rscreen, unsigned proce
 }
 
 void r600_screen_clear_buffer(struct r600_common_screen *rscreen, struct pipe_resource *dst,
 			      uint64_t offset, uint64_t size, unsigned value)
 {
 	struct r600_common_context *rctx = (struct r600_common_context*)rscreen->aux_context;
 
 	mtx_lock(&rscreen->aux_context_lock);
 	rctx->dma_clear_buffer(&rctx->b, dst, offset, size, value);
 	rscreen->aux_context->flush(rscreen->aux_context, NULL, 0);
-	pipe_mutex_unlock(rscreen->aux_context_lock);
+	mtx_unlock(&rscreen->aux_context_lock);
 }
diff --git a/src/gallium/drivers/radeon/r600_texture.c b/src/gallium/drivers/radeon/r600_texture.c
index 79c436d..7ca112c 100644
--- a/src/gallium/drivers/radeon/r600_texture.c
+++ b/src/gallium/drivers/radeon/r600_texture.c
@@ -304,21 +304,21 @@ static void r600_eliminate_fast_color_clear(struct r600_common_context *rctx,
 	struct r600_common_screen *rscreen = rctx->screen;
 	struct pipe_context *ctx = &rctx->b;
 
 	if (ctx == rscreen->aux_context)
 		mtx_lock(&rscreen->aux_context_lock);
 
 	ctx->flush_resource(ctx, &rtex->resource.b.b);
 	ctx->flush(ctx, NULL, 0);
 
 	if (ctx == rscreen->aux_context)
-		pipe_mutex_unlock(rscreen->aux_context_lock);
+		mtx_unlock(&rscreen->aux_context_lock);
 }
 
 static void r600_texture_discard_cmask(struct r600_common_screen *rscreen,
 				       struct r600_texture *rtex)
 {
 	if (!rtex->cmask.size)
 		return;
 
 	assert(rtex->resource.b.b.nr_samples <= 1);
 
@@ -394,21 +394,21 @@ bool r600_texture_disable_dcc(struct r600_common_context *rctx,
 		return false;
 
 	if (&rctx->b == rscreen->aux_context)
 		mtx_lock(&rscreen->aux_context_lock);
 
 	/* Decompress DCC. */
 	rctx->decompress_dcc(&rctx->b, rtex);
 	rctx->b.flush(&rctx->b, NULL, 0);
 
 	if (&rctx->b == rscreen->aux_context)
-		pipe_mutex_unlock(rscreen->aux_context_lock);
+		mtx_unlock(&rscreen->aux_context_lock);
 
 	return r600_texture_discard_dcc(rscreen, rtex);
 }
 
 static void r600_degrade_tile_mode_to_linear(struct r600_common_context *rctx,
 					     struct r600_texture *rtex,
 					     bool invalidate_storage)
 {
 	struct pipe_screen *screen = rctx->b.screen;
 	struct r600_texture *new_tex;
diff --git a/src/gallium/drivers/radeonsi/si_shader.c b/src/gallium/drivers/radeonsi/si_shader.c
index e61a5e2..6332562 100644
--- a/src/gallium/drivers/radeonsi/si_shader.c
+++ b/src/gallium/drivers/radeonsi/si_shader.c
@@ -7483,21 +7483,21 @@ si_get_shader_part(struct si_screen *sscreen,
 				 union si_shader_part_key *),
 		   const char *name)
 {
 	struct si_shader_part *result;
 
 	mtx_lock(&sscreen->shader_parts_mutex);
 
 	/* Find existing. */
 	for (result = *list; result; result = result->next) {
 		if (memcmp(&result->key, key, sizeof(*key)) == 0) {
-			pipe_mutex_unlock(sscreen->shader_parts_mutex);
+			mtx_unlock(&sscreen->shader_parts_mutex);
 			return result;
 		}
 	}
 
 	/* Compile a new one. */
 	result = CALLOC_STRUCT(si_shader_part);
 	result->key = *key;
 
 	struct si_shader shader = {};
 	struct si_shader_context ctx;
@@ -7537,21 +7537,21 @@ si_get_shader_part(struct si_screen *sscreen,
 		FREE(result);
 		result = NULL;
 		goto out;
 	}
 
 	result->next = *list;
 	*list = result;
 
 out:
 	si_llvm_dispose(&ctx);
-	pipe_mutex_unlock(sscreen->shader_parts_mutex);
+	mtx_unlock(&sscreen->shader_parts_mutex);
 	return result;
 }
 
 /**
  * Build the vertex shader prolog function.
  *
  * The inputs are the same as VS (a lot of SGPRs and 4 VGPR system values).
  * All inputs are returned unmodified. The vertex load indices are
  * stored after them, which will be used by the API VS for fetching inputs.
  *
diff --git a/src/gallium/drivers/radeonsi/si_state_shaders.c b/src/gallium/drivers/radeonsi/si_state_shaders.c
index c7a8d1f..9cde0aa 100644
--- a/src/gallium/drivers/radeonsi/si_state_shaders.c
+++ b/src/gallium/drivers/radeonsi/si_state_shaders.c
@@ -1263,71 +1263,71 @@ again:
 		/* Don't check the "current" shader. We checked it above. */
 		if (current != iter &&
 		    memcmp(&iter->key, key, sizeof(*key)) == 0) {
 			/* If it's an optimized shader and its compilation has
 			 * been started but isn't done, use the unoptimized
 			 * shader so as not to cause a stall due to compilation.
 			 */
 			if (iter->is_optimized &&
 			    !util_queue_fence_is_signalled(&iter->optimized_ready)) {
 				memset(&key->opt, 0, sizeof(key->opt));
-				pipe_mutex_unlock(sel->mutex);
+				mtx_unlock(&sel->mutex);
 				goto again;
 			}
 
 			if (iter->compilation_failed) {
-				pipe_mutex_unlock(sel->mutex);
+				mtx_unlock(&sel->mutex);
 				return -1; /* skip the draw call */
 			}
 
 			state->current = iter;
-			pipe_mutex_unlock(sel->mutex);
+			mtx_unlock(&sel->mutex);
 			return 0;
 		}
 	}
 
 	/* Build a new shader. */
 	shader = CALLOC_STRUCT(si_shader);
 	if (!shader) {
-		pipe_mutex_unlock(sel->mutex);
+		mtx_unlock(&sel->mutex);
 		return -ENOMEM;
 	}
 	shader->selector = sel;
 	shader->key = *key;
 	shader->compiler_ctx_state = *compiler_state;
 
 	/* Compile the main shader part if it doesn't exist. This can happen
 	 * if the initial guess was wrong. */
 	struct si_shader **mainp = si_get_main_shader_part(sel, key);
 	bool is_pure_monolithic =
 		sscreen->use_monolithic_shaders ||
 		memcmp(&key->mono, &zeroed.mono, sizeof(key->mono)) != 0;
 
 	if (!*mainp && !is_pure_monolithic) {
 		struct si_shader *main_part = CALLOC_STRUCT(si_shader);
 
 		if (!main_part) {
 			FREE(shader);
-			pipe_mutex_unlock(sel->mutex);
+			mtx_unlock(&sel->mutex);
 			return -ENOMEM; /* skip the draw call */
 		}
 
 		main_part->selector = sel;
 		main_part->key.as_es = key->as_es;
 		main_part->key.as_ls = key->as_ls;
 
 		if (si_compile_tgsi_shader(sscreen, compiler_state->tm,
 					   main_part, false,
 					   &compiler_state->debug) != 0) {
 			FREE(main_part);
 			FREE(shader);
-			pipe_mutex_unlock(sel->mutex);
+			mtx_unlock(&sel->mutex);
 			return -ENOMEM; /* skip the draw call */
 		}
 		*mainp = main_part;
 	}
 
 	/* Monolithic-only shaders don't make a distinction between optimized
 	 * and unoptimized. */
 	shader->is_monolithic =
 		is_pure_monolithic ||
 		memcmp(&key->opt, &zeroed.opt, sizeof(key->opt)) != 0;
@@ -1350,31 +1350,31 @@ again:
 	if (shader->is_optimized &&
 	    !is_pure_monolithic &&
 	    thread_index < 0) {
 		/* Compile it asynchronously. */
 		util_queue_add_job(&sscreen->shader_compiler_queue,
 				   shader, &shader->optimized_ready,
 				   si_build_shader_variant, NULL);
 
 		/* Use the default (unoptimized) shader for now. */
 		memset(&key->opt, 0, sizeof(key->opt));
-		pipe_mutex_unlock(sel->mutex);
+		mtx_unlock(&sel->mutex);
 		goto again;
 	}
 
 	assert(!shader->is_optimized);
 	si_build_shader_variant(shader, thread_index);
 
 	if (!shader->compilation_failed)
 		state->current = shader;
 
-	pipe_mutex_unlock(sel->mutex);
+	mtx_unlock(&sel->mutex);
 	return shader->compilation_failed ? -1 : 0;
 }
 
 static int si_shader_select(struct pipe_context *ctx,
 			    struct si_shader_ctx_state *state,
 			    struct si_compiler_ctx_state *compiler_state)
 {
 	struct si_context *sctx = (struct si_context *)ctx;
 	struct si_shader_key key;
 
@@ -1454,38 +1454,38 @@ void si_init_shader_selector_async(void *job, int thread_index)
 		shader->selector = sel;
 		si_parse_next_shader_property(&sel->info, &shader->key);
 
 		tgsi_binary = si_get_tgsi_binary(sel);
 
 		/* Try to load the shader from the shader cache. */
 		mtx_lock(&sscreen->shader_cache_mutex);
 
 		if (tgsi_binary &&
 		    si_shader_cache_load_shader(sscreen, tgsi_binary, shader)) {
-			pipe_mutex_unlock(sscreen->shader_cache_mutex);
+			mtx_unlock(&sscreen->shader_cache_mutex);
 		} else {
-			pipe_mutex_unlock(sscreen->shader_cache_mutex);
+			mtx_unlock(&sscreen->shader_cache_mutex);
 
 			/* Compile the shader if it hasn't been loaded from the cache. */
 			if (si_compile_tgsi_shader(sscreen, tm, shader, false,
 						   debug) != 0) {
 				FREE(shader);
 				FREE(tgsi_binary);
 				fprintf(stderr, "radeonsi: can't compile a main shader part\n");
 				return;
 			}
 
 			if (tgsi_binary) {
 				mtx_lock(&sscreen->shader_cache_mutex);
 				if (!si_shader_cache_insert_shader(sscreen, tgsi_binary, shader, true))
 					FREE(tgsi_binary);
-				pipe_mutex_unlock(sscreen->shader_cache_mutex);
+				mtx_unlock(&sscreen->shader_cache_mutex);
 			}
 		}
 
 		*si_get_main_shader_part(sel, &shader->key) = shader;
 
 		/* Unset "outputs_written" flags for outputs converted to
 		 * DEFAULT_VAL, so that later inter-shader optimizations don't
 		 * try to eliminate outputs that don't exist in the final
 		 * shader.
 		 *
diff --git a/src/gallium/drivers/rbug/rbug_context.c b/src/gallium/drivers/rbug/rbug_context.c
index 8d16ec2..268cf00 100644
--- a/src/gallium/drivers/rbug/rbug_context.c
+++ b/src/gallium/drivers/rbug/rbug_context.c
@@ -42,21 +42,21 @@ rbug_destroy(struct pipe_context *_pipe)
 {
    struct rbug_screen *rb_screen = rbug_screen(_pipe->screen);
    struct rbug_context *rb_pipe = rbug_context(_pipe);
    struct pipe_context *pipe = rb_pipe->pipe;
 
    rbug_screen_remove_from_list(rb_screen, contexts, rb_pipe);
 
    mtx_lock(&rb_pipe->call_mutex);
    pipe->destroy(pipe);
    rb_pipe->pipe = NULL;
-   pipe_mutex_unlock(rb_pipe->call_mutex);
+   mtx_unlock(&rb_pipe->call_mutex);
 
    FREE(rb_pipe);
 }
 
 static void
 rbug_draw_block_locked(struct rbug_context *rb_pipe, int flag)
 {
 
    if (rb_pipe->draw_blocker & flag) {
       rb_pipe->draw_blocked |= flag;
@@ -121,297 +121,297 @@ rbug_draw_vbo(struct pipe_context *_pipe, const struct pipe_draw_info *info)
 
    mtx_lock(&rb_pipe->draw_mutex);
    rbug_draw_block_locked(rb_pipe, RBUG_BLOCK_BEFORE);
 
    mtx_lock(&rb_pipe->call_mutex);
    /* XXX loop over PIPE_SHADER_x here */
    if (!(rb_pipe->curr.shader[PIPE_SHADER_FRAGMENT] && rb_pipe->curr.shader[PIPE_SHADER_FRAGMENT]->disabled) &&
        !(rb_pipe->curr.shader[PIPE_SHADER_GEOMETRY] && rb_pipe->curr.shader[PIPE_SHADER_GEOMETRY]->disabled) &&
        !(rb_pipe->curr.shader[PIPE_SHADER_VERTEX] && rb_pipe->curr.shader[PIPE_SHADER_VERTEX]->disabled))
       pipe->draw_vbo(pipe, info);
-   pipe_mutex_unlock(rb_pipe->call_mutex);
+   mtx_unlock(&rb_pipe->call_mutex);
 
    rbug_draw_block_locked(rb_pipe, RBUG_BLOCK_AFTER);
-   pipe_mutex_unlock(rb_pipe->draw_mutex);
+   mtx_unlock(&rb_pipe->draw_mutex);
 }
 
 static struct pipe_query *
 rbug_create_query(struct pipe_context *_pipe,
                   unsigned query_type,
                   unsigned index)
 {
    struct rbug_context *rb_pipe = rbug_context(_pipe);
    struct pipe_context *pipe = rb_pipe->pipe;
    struct pipe_query *query;
 
    mtx_lock(&rb_pipe->call_mutex);
    query = pipe->create_query(pipe,
                               query_type,
                               index);
-   pipe_mutex_unlock(rb_pipe->call_mutex);
+   mtx_unlock(&rb_pipe->call_mutex);
    return query;
 }
 
 static void
 rbug_destroy_query(struct pipe_context *_pipe,
                    struct pipe_query *query)
 {
    struct rbug_context *rb_pipe = rbug_context(_pipe);
    struct pipe_context *pipe = rb_pipe->pipe;
 
    mtx_lock(&rb_pipe->call_mutex);
    pipe->destroy_query(pipe,
                        query);
-   pipe_mutex_unlock(rb_pipe->call_mutex);
+   mtx_unlock(&rb_pipe->call_mutex);
 }
 
 static boolean
 rbug_begin_query(struct pipe_context *_pipe,
                  struct pipe_query *query)
 {
    struct rbug_context *rb_pipe = rbug_context(_pipe);
    struct pipe_context *pipe = rb_pipe->pipe;
    boolean ret;
 
    mtx_lock(&rb_pipe->call_mutex);
    ret = pipe->begin_query(pipe, query);
-   pipe_mutex_unlock(rb_pipe->call_mutex);
+   mtx_unlock(&rb_pipe->call_mutex);
    return ret;
 }
 
 static bool
 rbug_end_query(struct pipe_context *_pipe,
                struct pipe_query *query)
 {
    struct rbug_context *rb_pipe = rbug_context(_pipe);
    struct pipe_context *pipe = rb_pipe->pipe;
    bool ret;
 
    mtx_lock(&rb_pipe->call_mutex);
    ret = pipe->end_query(pipe,
                          query);
-   pipe_mutex_unlock(rb_pipe->call_mutex);
+   mtx_unlock(&rb_pipe->call_mutex);
 
    return ret;
 }
 
 static boolean
 rbug_get_query_result(struct pipe_context *_pipe,
                       struct pipe_query *query,
                       boolean wait,
                       union pipe_query_result *result)
 {
    struct rbug_context *rb_pipe = rbug_context(_pipe);
    struct pipe_context *pipe = rb_pipe->pipe;
    boolean ret;
 
    mtx_lock(&rb_pipe->call_mutex);
    ret = pipe->get_query_result(pipe,
                                 query,
                                 wait,
                                 result);
-   pipe_mutex_unlock(rb_pipe->call_mutex);
+   mtx_unlock(&rb_pipe->call_mutex);
 
    return ret;
 }
 
 static void
 rbug_set_active_query_state(struct pipe_context *_pipe, boolean enable)
 {
    struct rbug_context *rb_pipe = rbug_context(_pipe);
    struct pipe_context *pipe = rb_pipe->pipe;
 
    mtx_lock(&rb_pipe->call_mutex);
    pipe->set_active_query_state(pipe, enable);
-   pipe_mutex_unlock(rb_pipe->call_mutex);
+   mtx_unlock(&rb_pipe->call_mutex);
 }
 
 static void *
 rbug_create_blend_state(struct pipe_context *_pipe,
                         const struct pipe_blend_state *blend)
 {
    struct rbug_context *rb_pipe = rbug_context(_pipe);
    struct pipe_context *pipe = rb_pipe->pipe;
    void *ret;
 
    mtx_lock(&rb_pipe->call_mutex);
    ret = pipe->create_blend_state(pipe,
                                   blend);
-   pipe_mutex_unlock(rb_pipe->call_mutex);
+   mtx_unlock(&rb_pipe->call_mutex);
 
    return ret;
 }
 
 static void
 rbug_bind_blend_state(struct pipe_context *_pipe,
                       void *blend)
 {
    struct rbug_context *rb_pipe = rbug_context(_pipe);
    struct pipe_context *pipe = rb_pipe->pipe;
 
    mtx_lock(&rb_pipe->call_mutex);
    pipe->bind_blend_state(pipe,
                           blend);
-   pipe_mutex_unlock(rb_pipe->call_mutex);
+   mtx_unlock(&rb_pipe->call_mutex);
 }
 
 static void
 rbug_delete_blend_state(struct pipe_context *_pipe,
                         void *blend)
 {
    struct rbug_context *rb_pipe = rbug_context(_pipe);
    struct pipe_context *pipe = rb_pipe->pipe;
 
    mtx_lock(&rb_pipe->call_mutex);
    pipe->delete_blend_state(pipe,
                             blend);
-   pipe_mutex_unlock(rb_pipe->call_mutex);
+   mtx_unlock(&rb_pipe->call_mutex);
 }
 
 static void *
 rbug_create_sampler_state(struct pipe_context *_pipe,
                           const struct pipe_sampler_state *sampler)
 {
    struct rbug_context *rb_pipe = rbug_context(_pipe);
    struct pipe_context *pipe = rb_pipe->pipe;
    void *ret;
 
    mtx_lock(&rb_pipe->call_mutex);
    ret = pipe->create_sampler_state(pipe,
                                     sampler);
-   pipe_mutex_unlock(rb_pipe->call_mutex);
+   mtx_unlock(&rb_pipe->call_mutex);
 
    return ret;
 }
 
 static void
 rbug_bind_sampler_states(struct pipe_context *_pipe,
                          enum pipe_shader_type shader,
                          unsigned start, unsigned count,
                          void **samplers)
 {
    struct rbug_context *rb_pipe = rbug_context(_pipe);
    struct pipe_context *pipe = rb_pipe->pipe;
 
    mtx_lock(&rb_pipe->call_mutex);
    pipe->bind_sampler_states(pipe, shader, start, count, samplers);
-   pipe_mutex_unlock(rb_pipe->call_mutex);
+   mtx_unlock(&rb_pipe->call_mutex);
 }
 
 static void
 rbug_delete_sampler_state(struct pipe_context *_pipe,
                           void *sampler)
 {
    struct rbug_context *rb_pipe = rbug_context(_pipe);
    struct pipe_context *pipe = rb_pipe->pipe;
 
    mtx_lock(&rb_pipe->call_mutex);
    pipe->delete_sampler_state(pipe,
                               sampler);
-   pipe_mutex_unlock(rb_pipe->call_mutex);
+   mtx_unlock(&rb_pipe->call_mutex);
 }
 
 static void *
 rbug_create_rasterizer_state(struct pipe_context *_pipe,
                              const struct pipe_rasterizer_state *rasterizer)
 {
    struct rbug_context *rb_pipe = rbug_context(_pipe);
    struct pipe_context *pipe = rb_pipe->pipe;
    void *ret;
 
    mtx_lock(&rb_pipe->call_mutex);
    ret = pipe->create_rasterizer_state(pipe,
                                        rasterizer);
-   pipe_mutex_unlock(rb_pipe->call_mutex);
+   mtx_unlock(&rb_pipe->call_mutex);
 
    return ret;
 }
 
 static void
 rbug_bind_rasterizer_state(struct pipe_context *_pipe,
                            void *rasterizer)
 {
    struct rbug_context *rb_pipe = rbug_context(_pipe);
    struct pipe_context *pipe = rb_pipe->pipe;
 
    mtx_lock(&rb_pipe->call_mutex);
    pipe->bind_rasterizer_state(pipe,
                                rasterizer);
-   pipe_mutex_unlock(rb_pipe->call_mutex);
+   mtx_unlock(&rb_pipe->call_mutex);
 }
 
 static void
 rbug_delete_rasterizer_state(struct pipe_context *_pipe,
                              void *rasterizer)
 {
    struct rbug_context *rb_pipe = rbug_context(_pipe);
    struct pipe_context *pipe = rb_pipe->pipe;
 
    mtx_lock(&rb_pipe->call_mutex);
    pipe->delete_rasterizer_state(pipe,
                                  rasterizer);
-   pipe_mutex_unlock(rb_pipe->call_mutex);
+   mtx_unlock(&rb_pipe->call_mutex);
 }
 
 static void *
 rbug_create_depth_stencil_alpha_state(struct pipe_context *_pipe,
                                       const struct pipe_depth_stencil_alpha_state *depth_stencil_alpha)
 {
    struct rbug_context *rb_pipe = rbug_context(_pipe);
    struct pipe_context *pipe = rb_pipe->pipe;
    void *ret;
 
    mtx_lock(&rb_pipe->call_mutex);
    ret = pipe->create_depth_stencil_alpha_state(pipe,
                                                 depth_stencil_alpha);
-   pipe_mutex_unlock(rb_pipe->call_mutex);
+   mtx_unlock(&rb_pipe->call_mutex);
 
    return ret;
 }
 
 static void
 rbug_bind_depth_stencil_alpha_state(struct pipe_context *_pipe,
                                     void *depth_stencil_alpha)
 {
    struct rbug_context *rb_pipe = rbug_context(_pipe);
    struct pipe_context *pipe = rb_pipe->pipe;
 
    mtx_lock(&rb_pipe->call_mutex);
    pipe->bind_depth_stencil_alpha_state(pipe,
                                         depth_stencil_alpha);
-   pipe_mutex_unlock(rb_pipe->call_mutex);
+   mtx_unlock(&rb_pipe->call_mutex);
 }
 
 static void
 rbug_delete_depth_stencil_alpha_state(struct pipe_context *_pipe,
                                       void *depth_stencil_alpha)
 {
    struct rbug_context *rb_pipe = rbug_context(_pipe);
    struct pipe_context *pipe = rb_pipe->pipe;
 
    mtx_lock(&rb_pipe->call_mutex);
    pipe->delete_depth_stencil_alpha_state(pipe,
                                           depth_stencil_alpha);
-   pipe_mutex_unlock(rb_pipe->call_mutex);
+   mtx_unlock(&rb_pipe->call_mutex);
 }
 
 static void *
 rbug_create_fs_state(struct pipe_context *_pipe,
                      const struct pipe_shader_state *state)
 {
    struct rbug_context *rb_pipe = rbug_context(_pipe);
    struct pipe_context *pipe = rb_pipe->pipe;
    void *result;
 
    mtx_lock(&rb_pipe->call_mutex);
    result = pipe->create_fs_state(pipe, state);
-   pipe_mutex_unlock(rb_pipe->call_mutex);
+   mtx_unlock(&rb_pipe->call_mutex);
 
    if (!result)
       return NULL;
 
    return rbug_shader_create(rb_pipe, state, result, RBUG_SHADER_FRAGMENT);
 }
 
 static void
 rbug_bind_fs_state(struct pipe_context *_pipe,
                    void *_fs)
@@ -420,46 +420,46 @@ rbug_bind_fs_state(struct pipe_context *_pipe,
    struct pipe_context *pipe = rb_pipe->pipe;
    void *fs;
 
    mtx_lock(&rb_pipe->call_mutex);
 
    fs = rbug_shader_unwrap(_fs);
    rb_pipe->curr.shader[PIPE_SHADER_FRAGMENT] = rbug_shader(_fs);
    pipe->bind_fs_state(pipe,
                        fs);
 
-   pipe_mutex_unlock(rb_pipe->call_mutex);
+   mtx_unlock(&rb_pipe->call_mutex);
 }
 
 static void
 rbug_delete_fs_state(struct pipe_context *_pipe,
                      void *_fs)
 {
    struct rbug_context *rb_pipe = rbug_context(_pipe);
    struct rbug_shader *rb_shader = rbug_shader(_fs);
 
    mtx_lock(&rb_pipe->call_mutex);
    rbug_shader_destroy(rb_pipe, rb_shader);
-   pipe_mutex_unlock(rb_pipe->call_mutex);
+   mtx_unlock(&rb_pipe->call_mutex);
 }
 
 static void *
 rbug_create_vs_state(struct pipe_context *_pipe,
                      const struct pipe_shader_state *state)
 {
    struct rbug_context *rb_pipe = rbug_context(_pipe);
    struct pipe_context *pipe = rb_pipe->pipe;
    void *result;
 
    mtx_lock(&rb_pipe->call_mutex);
    result = pipe->create_vs_state(pipe, state);
-   pipe_mutex_unlock(rb_pipe->call_mutex);
+   mtx_unlock(&rb_pipe->call_mutex);
 
    if (!result)
       return NULL;
 
    return rbug_shader_create(rb_pipe, state, result, RBUG_SHADER_VERTEX);
 }
 
 static void
 rbug_bind_vs_state(struct pipe_context *_pipe,
                    void *_vs)
@@ -468,46 +468,46 @@ rbug_bind_vs_state(struct pipe_context *_pipe,
    struct pipe_context *pipe = rb_pipe->pipe;
    void *vs;
 
    mtx_lock(&rb_pipe->call_mutex);
 
    vs = rbug_shader_unwrap(_vs);
    rb_pipe->curr.shader[PIPE_SHADER_VERTEX] = rbug_shader(_vs);
    pipe->bind_vs_state(pipe,
                        vs);
 
-   pipe_mutex_unlock(rb_pipe->call_mutex);
+   mtx_unlock(&rb_pipe->call_mutex);
 }
 
 static void
 rbug_delete_vs_state(struct pipe_context *_pipe,
                      void *_vs)
 {
    struct rbug_context *rb_pipe = rbug_context(_pipe);
    struct rbug_shader *rb_shader = rbug_shader(_vs);
 
-   pipe_mutex_unlock(rb_pipe->call_mutex);
+   mtx_unlock(&rb_pipe->call_mutex);
    rbug_shader_destroy(rb_pipe, rb_shader);
-   pipe_mutex_unlock(rb_pipe->call_mutex);
+   mtx_unlock(&rb_pipe->call_mutex);
 }
 
 static void *
 rbug_create_gs_state(struct pipe_context *_pipe,
                      const struct pipe_shader_state *state)
 {
    struct rbug_context *rb_pipe = rbug_context(_pipe);
    struct pipe_context *pipe = rb_pipe->pipe;
    void *result;
 
    mtx_lock(&rb_pipe->call_mutex);
    result = pipe->create_gs_state(pipe, state);
-   pipe_mutex_unlock(rb_pipe->call_mutex);
+   mtx_unlock(&rb_pipe->call_mutex);
 
    if (!result)
       return NULL;
 
    return rbug_shader_create(rb_pipe, state, result, RBUG_SHADER_GEOM);
 }
 
 static void
 rbug_bind_gs_state(struct pipe_context *_pipe,
                    void *_gs)
@@ -516,116 +516,116 @@ rbug_bind_gs_state(struct pipe_context *_pipe,
    struct pipe_context *pipe = rb_pipe->pipe;
    void *gs;
 
    mtx_lock(&rb_pipe->call_mutex);
 
    gs = rbug_shader_unwrap(_gs);
    rb_pipe->curr.shader[PIPE_SHADER_GEOMETRY] = rbug_shader(_gs);
    pipe->bind_gs_state(pipe,
                        gs);
 
-   pipe_mutex_unlock(rb_pipe->call_mutex);
+   mtx_unlock(&rb_pipe->call_mutex);
 }
 
 static void
 rbug_delete_gs_state(struct pipe_context *_pipe,
                      void *_gs)
 {
    struct rbug_context *rb_pipe = rbug_context(_pipe);
    struct rbug_shader *rb_shader = rbug_shader(_gs);
 
    mtx_lock(&rb_pipe->call_mutex);
    rbug_shader_destroy(rb_pipe, rb_shader);
-   pipe_mutex_unlock(rb_pipe->call_mutex);
+   mtx_unlock(&rb_pipe->call_mutex);
 }
 
 static void *
 rbug_create_vertex_elements_state(struct pipe_context *_pipe,
                                   unsigned num_elements,
                                   const struct pipe_vertex_element *vertex_elements)
 {
    struct rbug_context *rb_pipe = rbug_context(_pipe);
    struct pipe_context *pipe = rb_pipe->pipe;
    void *ret;
 
    mtx_lock(&rb_pipe->call_mutex);
    ret = pipe->create_vertex_elements_state(pipe,
                                              num_elements,
                                              vertex_elements);
-   pipe_mutex_unlock(rb_pipe->call_mutex);
+   mtx_unlock(&rb_pipe->call_mutex);
 
    return ret;
 }
 
 static void
 rbug_bind_vertex_elements_state(struct pipe_context *_pipe,
                                 void *velems)
 {
    struct rbug_context *rb_pipe = rbug_context(_pipe);
    struct pipe_context *pipe = rb_pipe->pipe;
 
    mtx_lock(&rb_pipe->call_mutex);
    pipe->bind_vertex_elements_state(pipe,
                                     velems);
-   pipe_mutex_unlock(rb_pipe->call_mutex);
+   mtx_unlock(&rb_pipe->call_mutex);
 }
 
 static void
 rbug_delete_vertex_elements_state(struct pipe_context *_pipe,
                                   void *velems)
 {
    struct rbug_context *rb_pipe = rbug_context(_pipe);
    struct pipe_context *pipe = rb_pipe->pipe;
 
    mtx_lock(&rb_pipe->call_mutex);
    pipe->delete_vertex_elements_state(pipe,
                                       velems);
-   pipe_mutex_unlock(rb_pipe->call_mutex);
+   mtx_unlock(&rb_pipe->call_mutex);
 }
 
 static void
 rbug_set_blend_color(struct pipe_context *_pipe,
                      const struct pipe_blend_color *blend_color)
 {
    struct rbug_context *rb_pipe = rbug_context(_pipe);
    struct pipe_context *pipe = rb_pipe->pipe;
 
    mtx_lock(&rb_pipe->call_mutex);
    pipe->set_blend_color(pipe,
                          blend_color);
-   pipe_mutex_unlock(rb_pipe->call_mutex);
+   mtx_unlock(&rb_pipe->call_mutex);
 }
 
 static void
 rbug_set_stencil_ref(struct pipe_context *_pipe,
                      const struct pipe_stencil_ref *stencil_ref)
 {
    struct rbug_context *rb_pipe = rbug_context(_pipe);
    struct pipe_context *pipe = rb_pipe->pipe;
 
    mtx_lock(&rb_pipe->call_mutex);
    pipe->set_stencil_ref(pipe,
                          stencil_ref);
-   pipe_mutex_unlock(rb_pipe->call_mutex);
+   mtx_unlock(&rb_pipe->call_mutex);
 }
 
 static void
 rbug_set_clip_state(struct pipe_context *_pipe,
                     const struct pipe_clip_state *clip)
 {
    struct rbug_context *rb_pipe = rbug_context(_pipe);
    struct pipe_context *pipe = rb_pipe->pipe;
 
    mtx_lock(&rb_pipe->call_mutex);
    pipe->set_clip_state(pipe,
                         clip);
-   pipe_mutex_unlock(rb_pipe->call_mutex);
+   mtx_unlock(&rb_pipe->call_mutex);
 }
 
 static void
 rbug_set_constant_buffer(struct pipe_context *_pipe,
                          uint shader,
                          uint index,
                          const struct pipe_constant_buffer *_cb)
 {
    struct rbug_context *rb_pipe = rbug_context(_pipe);
    struct pipe_context *pipe = rb_pipe->pipe;
@@ -635,21 +635,21 @@ rbug_set_constant_buffer(struct pipe_context *_pipe,
    if (_cb) {
       cb = *_cb;
       cb.buffer = rbug_resource_unwrap(_cb->buffer);
    }
 
    mtx_lock(&rb_pipe->call_mutex);
    pipe->set_constant_buffer(pipe,
                              shader,
                              index,
                              _cb ? &cb : NULL);
-   pipe_mutex_unlock(rb_pipe->call_mutex);
+   mtx_unlock(&rb_pipe->call_mutex);
 }
 
 static void
 rbug_set_framebuffer_state(struct pipe_context *_pipe,
                            const struct pipe_framebuffer_state *_state)
 {
    struct rbug_context *rb_pipe = rbug_context(_pipe);
    struct pipe_context *pipe = rb_pipe->pipe;
    struct pipe_framebuffer_state unwrapped_state;
    struct pipe_framebuffer_state *state = NULL;
@@ -674,62 +674,62 @@ rbug_set_framebuffer_state(struct pipe_context *_pipe,
       }
       unwrapped_state.zsbuf = rbug_surface_unwrap(_state->zsbuf);
       if (_state->zsbuf)
          rb_pipe->curr.zsbuf = rbug_resource(_state->zsbuf->texture);
       state = &unwrapped_state;
    }
 
    pipe->set_framebuffer_state(pipe,
                                state);
 
-   pipe_mutex_unlock(rb_pipe->call_mutex);
+   mtx_unlock(&rb_pipe->call_mutex);
 }
 
 static void
 rbug_set_polygon_stipple(struct pipe_context *_pipe,
                          const struct pipe_poly_stipple *poly_stipple)
 {
    struct rbug_context *rb_pipe = rbug_context(_pipe);
    struct pipe_context *pipe = rb_pipe->pipe;
 
    mtx_lock(&rb_pipe->call_mutex);
    pipe->set_polygon_stipple(pipe,
                              poly_stipple);
-   pipe_mutex_unlock(rb_pipe->call_mutex);
+   mtx_unlock(&rb_pipe->call_mutex);
 }
 
 static void
 rbug_set_scissor_states(struct pipe_context *_pipe,
                         unsigned start_slot,
                         unsigned num_scissors,
                         const struct pipe_scissor_state *scissor)
 {
    struct rbug_context *rb_pipe = rbug_context(_pipe);
    struct pipe_context *pipe = rb_pipe->pipe;
 
    mtx_lock(&rb_pipe->call_mutex);
    pipe->set_scissor_states(pipe, start_slot, num_scissors, scissor);
-   pipe_mutex_unlock(rb_pipe->call_mutex);
+   mtx_unlock(&rb_pipe->call_mutex);
 }
 
 static void
 rbug_set_viewport_states(struct pipe_context *_pipe,
                          unsigned start_slot,
                          unsigned num_viewports,
                          const struct pipe_viewport_state *viewport)
 {
    struct rbug_context *rb_pipe = rbug_context(_pipe);
    struct pipe_context *pipe = rb_pipe->pipe;
 
    mtx_lock(&rb_pipe->call_mutex);
    pipe->set_viewport_states(pipe, start_slot, num_viewports, viewport);
-   pipe_mutex_unlock(rb_pipe->call_mutex);
+   mtx_unlock(&rb_pipe->call_mutex);
 }
 
 static void
 rbug_set_sampler_views(struct pipe_context *_pipe,
                        enum pipe_shader_type shader,
                        unsigned start,
                        unsigned num,
                        struct pipe_sampler_view **_views)
 {
    struct rbug_context *rb_pipe = rbug_context(_pipe);
@@ -753,21 +753,21 @@ rbug_set_sampler_views(struct pipe_context *_pipe,
       for (i = 0; i < num; i++) {
          rb_pipe->curr.views[shader][i] = rbug_sampler_view(_views[i]);
          rb_pipe->curr.texs[shader][i] = rbug_resource(_views[i] ? _views[i]->texture : NULL);
          unwrapped_views[i] = rbug_sampler_view_unwrap(_views[i]);
       }
       views = unwrapped_views;
    }
 
    pipe->set_sampler_views(pipe, shader, start, num, views);
 
-   pipe_mutex_unlock(rb_pipe->call_mutex);
+   mtx_unlock(&rb_pipe->call_mutex);
 }
 
 static void
 rbug_set_vertex_buffers(struct pipe_context *_pipe,
                         unsigned start_slot, unsigned num_buffers,
                         const struct pipe_vertex_buffer *_buffers)
 {
    struct rbug_context *rb_pipe = rbug_context(_pipe);
    struct pipe_context *pipe = rb_pipe->pipe;
    struct pipe_vertex_buffer unwrapped_buffers[PIPE_MAX_SHADER_INPUTS];
@@ -780,95 +780,95 @@ rbug_set_vertex_buffers(struct pipe_context *_pipe,
       memcpy(unwrapped_buffers, _buffers, num_buffers * sizeof(*_buffers));
       for (i = 0; i < num_buffers; i++)
          unwrapped_buffers[i].buffer = rbug_resource_unwrap(_buffers[i].buffer);
       buffers = unwrapped_buffers;
    }
 
    pipe->set_vertex_buffers(pipe, start_slot,
                             num_buffers,
                             buffers);
 
-   pipe_mutex_unlock(rb_pipe->call_mutex);
+   mtx_unlock(&rb_pipe->call_mutex);
 }
 
 static void
 rbug_set_index_buffer(struct pipe_context *_pipe,
                       const struct pipe_index_buffer *_ib)
 {
    struct rbug_context *rb_pipe = rbug_context(_pipe);
    struct pipe_context *pipe = rb_pipe->pipe;
    struct pipe_index_buffer unwrapped_ib, *ib = NULL;
 
    if (_ib) {
       unwrapped_ib = *_ib;
       unwrapped_ib.buffer = rbug_resource_unwrap(_ib->buffer);
       ib = &unwrapped_ib;
    }
 
    mtx_lock(&rb_pipe->call_mutex);
    pipe->set_index_buffer(pipe, ib);
-   pipe_mutex_unlock(rb_pipe->call_mutex);
+   mtx_unlock(&rb_pipe->call_mutex);
 }
 
 static void
 rbug_set_sample_mask(struct pipe_context *_pipe,
                      unsigned sample_mask)
 {
    struct rbug_context *rb_pipe = rbug_context(_pipe);
    struct pipe_context *pipe = rb_pipe->pipe;
 
    mtx_lock(&rb_pipe->call_mutex);
    pipe->set_sample_mask(pipe, sample_mask);
-   pipe_mutex_unlock(rb_pipe->call_mutex);
+   mtx_unlock(&rb_pipe->call_mutex);
 }
 
 static struct pipe_stream_output_target *
 rbug_create_stream_output_target(struct pipe_context *_pipe,
                                  struct pipe_resource *_res,
                                  unsigned buffer_offset, unsigned buffer_size)
 {
    struct rbug_context *rb_pipe = rbug_context(_pipe);
    struct pipe_context *pipe = rb_pipe->pipe;
    struct pipe_resource *res = rbug_resource_unwrap(_res);
    struct pipe_stream_output_target *target;
 
    mtx_lock(&rb_pipe->call_mutex);
    target = pipe->create_stream_output_target(pipe, res, buffer_offset,
                                               buffer_size);
-   pipe_mutex_unlock(rb_pipe->call_mutex);
+   mtx_unlock(&rb_pipe->call_mutex);
    return target;
 }
 
 static void
 rbug_stream_output_target_destroy(struct pipe_context *_pipe,
                                   struct pipe_stream_output_target *target)
 {
    struct rbug_context *rb_pipe = rbug_context(_pipe);
    struct pipe_context *pipe = rb_pipe->pipe;
 
    mtx_lock(&rb_pipe->call_mutex);
    pipe->stream_output_target_destroy(pipe, target);
-   pipe_mutex_unlock(rb_pipe->call_mutex);
+   mtx_unlock(&rb_pipe->call_mutex);
 }
 
 static void
 rbug_set_stream_output_targets(struct pipe_context *_pipe,
                                unsigned num_targets,
                                struct pipe_stream_output_target **targets,
                                const unsigned *offsets)
 {
    struct rbug_context *rb_pipe = rbug_context(_pipe);
    struct pipe_context *pipe = rb_pipe->pipe;
 
    mtx_lock(&rb_pipe->call_mutex);
    pipe->set_stream_output_targets(pipe, num_targets, targets, offsets);
-   pipe_mutex_unlock(rb_pipe->call_mutex);
+   mtx_unlock(&rb_pipe->call_mutex);
 }
 
 static void
 rbug_resource_copy_region(struct pipe_context *_pipe,
                           struct pipe_resource *_dst,
                           unsigned dst_level,
                           unsigned dstx,
                           unsigned dsty,
                           unsigned dstz,
                           struct pipe_resource *_src,
@@ -885,74 +885,74 @@ rbug_resource_copy_region(struct pipe_context *_pipe,
    mtx_lock(&rb_pipe->call_mutex);
    pipe->resource_copy_region(pipe,
                               dst,
                               dst_level,
                               dstx,
                               dsty,
                               dstz,
                               src,
                               src_level,
                               src_box);
-   pipe_mutex_unlock(rb_pipe->call_mutex);
+   mtx_unlock(&rb_pipe->call_mutex);
 }
 
 static void
 rbug_blit(struct pipe_context *_pipe, const struct pipe_blit_info *_blit_info)
 {
    struct rbug_context *rb_pipe = rbug_context(_pipe);
    struct rbug_resource *rb_resource_dst = rbug_resource(_blit_info->dst.resource);
    struct rbug_resource *rb_resource_src = rbug_resource(_blit_info->src.resource);
    struct pipe_context *pipe = rb_pipe->pipe;
    struct pipe_resource *dst = rb_resource_dst->resource;
    struct pipe_resource *src = rb_resource_src->resource;
    struct pipe_blit_info blit_info;
 
    blit_info = *_blit_info;
    blit_info.dst.resource = dst;
    blit_info.src.resource = src;
 
    mtx_lock(&rb_pipe->call_mutex);
    pipe->blit(pipe, &blit_info);
-   pipe_mutex_unlock(rb_pipe->call_mutex);
+   mtx_unlock(&rb_pipe->call_mutex);
 }
 
 static void
 rbug_flush_resource(struct pipe_context *_pipe,
                     struct pipe_resource *_res)
 {
    struct rbug_context *rb_pipe = rbug_context(_pipe);
    struct rbug_resource *rb_resource_res = rbug_resource(_res);
    struct pipe_context *pipe = rb_pipe->pipe;
    struct pipe_resource *res = rb_resource_res->resource;
 
    mtx_lock(&rb_pipe->call_mutex);
    pipe->flush_resource(pipe, res);
-   pipe_mutex_unlock(rb_pipe->call_mutex);
+   mtx_unlock(&rb_pipe->call_mutex);
 }
 
 static void
 rbug_clear(struct pipe_context *_pipe,
            unsigned buffers,
            const union pipe_color_union *color,
            double depth,
            unsigned stencil)
 {
    struct rbug_context *rb_pipe = rbug_context(_pipe);
    struct pipe_context *pipe = rb_pipe->pipe;
 
    mtx_lock(&rb_pipe->call_mutex);
    pipe->clear(pipe,
                buffers,
                color,
                depth,
                stencil);
-   pipe_mutex_unlock(rb_pipe->call_mutex);
+   mtx_unlock(&rb_pipe->call_mutex);
 }
 
 static void
 rbug_clear_render_target(struct pipe_context *_pipe,
                          struct pipe_surface *_dst,
                          const union pipe_color_union *color,
                          unsigned dstx, unsigned dsty,
                          unsigned width, unsigned height,
                          bool render_condition_enabled)
 {
@@ -963,21 +963,21 @@ rbug_clear_render_target(struct pipe_context *_pipe,
 
    mtx_lock(&rb_pipe->call_mutex);
    pipe->clear_render_target(pipe,
                              dst,
                              color,
                              dstx,
                              dsty,
                              width,
                              height,
                              render_condition_enabled);
-   pipe_mutex_unlock(rb_pipe->call_mutex);
+   mtx_unlock(&rb_pipe->call_mutex);
 }
 
 static void
 rbug_clear_depth_stencil(struct pipe_context *_pipe,
                          struct pipe_surface *_dst,
                          unsigned clear_flags,
                          double depth,
                          unsigned stencil,
                          unsigned dstx, unsigned dsty,
                          unsigned width, unsigned height,
@@ -992,52 +992,52 @@ rbug_clear_depth_stencil(struct pipe_context *_pipe,
    pipe->clear_depth_stencil(pipe,
                              dst,
                              clear_flags,
                              depth,
                              stencil,
                              dstx,
                              dsty,
                              width,
                              height,
                              render_condition_enabled);
-   pipe_mutex_unlock(rb_pipe->call_mutex);
+   mtx_unlock(&rb_pipe->call_mutex);
 }
 
 static void
 rbug_flush(struct pipe_context *_pipe,
            struct pipe_fence_handle **fence,
            unsigned flags)
 {
    struct rbug_context *rb_pipe = rbug_context(_pipe);
    struct pipe_context *pipe = rb_pipe->pipe;
 
    mtx_lock(&rb_pipe->call_mutex);
    pipe->flush(pipe, fence, flags);
-   pipe_mutex_unlock(rb_pipe->call_mutex);
+   mtx_unlock(&rb_pipe->call_mutex);
 }
 
 static struct pipe_sampler_view *
 rbug_context_create_sampler_view(struct pipe_context *_pipe,
                                  struct pipe_resource *_resource,
                                  const struct pipe_sampler_view *templ)
 {
    struct rbug_context *rb_pipe = rbug_context(_pipe);
    struct rbug_resource *rb_resource = rbug_resource(_resource);
    struct pipe_context *pipe = rb_pipe->pipe;
    struct pipe_resource *resource = rb_resource->resource;
    struct pipe_sampler_view *result;
 
    mtx_lock(&rb_pipe->call_mutex);
    result = pipe->create_sampler_view(pipe,
                                       resource,
                                       templ);
-   pipe_mutex_unlock(rb_pipe->call_mutex);
+   mtx_unlock(&rb_pipe->call_mutex);
 
    if (result)
       return rbug_sampler_view_create(rb_pipe, rb_resource, result);
    return NULL;
 }
 
 static void
 rbug_context_sampler_view_destroy(struct pipe_context *_pipe,
                                   struct pipe_sampler_view *_view)
 {
@@ -1053,38 +1053,38 @@ rbug_context_create_surface(struct pipe_context *_pipe,
    struct rbug_context *rb_pipe = rbug_context(_pipe);
    struct rbug_resource *rb_resource = rbug_resource(_resource);
    struct pipe_context *pipe = rb_pipe->pipe;
    struct pipe_resource *resource = rb_resource->resource;
    struct pipe_surface *result;
 
    mtx_lock(&rb_pipe->call_mutex);
    result = pipe->create_surface(pipe,
                                  resource,
                                  surf_tmpl);
-   pipe_mutex_unlock(rb_pipe->call_mutex);
+   mtx_unlock(&rb_pipe->call_mutex);
 
    if (result)
       return rbug_surface_create(rb_pipe, rb_resource, result);
    return NULL;
 }
 
 static void
 rbug_context_surface_destroy(struct pipe_context *_pipe,
                              struct pipe_surface *_surface)
 {
    struct rbug_context *rb_pipe = rbug_context(_pipe);
    struct rbug_surface *rb_surface = rbug_surface(_surface);
 
    mtx_lock(&rb_pipe->call_mutex);
    rbug_surface_destroy(rb_pipe,
                         rb_surface);
-   pipe_mutex_unlock(rb_pipe->call_mutex);
+   mtx_unlock(&rb_pipe->call_mutex);
 }
 
 
 
 static void *
 rbug_context_transfer_map(struct pipe_context *_context,
                           struct pipe_resource *_resource,
                           unsigned level,
                           unsigned usage,
                           const struct pipe_box *box,
@@ -1096,76 +1096,76 @@ rbug_context_transfer_map(struct pipe_context *_context,
    struct pipe_resource *resource = rb_resource->resource;
    struct pipe_transfer *result;
    void *map;
 
    mtx_lock(&rb_pipe->call_mutex);
    map = context->transfer_map(context,
                                resource,
                                level,
                                usage,
                                box, &result);
-   pipe_mutex_unlock(rb_pipe->call_mutex);
+   mtx_unlock(&rb_pipe->call_mutex);
 
    *transfer = rbug_transfer_create(rb_pipe, rb_resource, result);
    return *transfer ? map : NULL;
 }
 
 static void
 rbug_context_transfer_flush_region(struct pipe_context *_context,
                                    struct pipe_transfer *_transfer,
                                    const struct pipe_box *box)
 {
    struct rbug_context *rb_pipe = rbug_context(_context);
    struct rbug_transfer *rb_transfer = rbug_transfer(_transfer);
    struct pipe_context *context = rb_pipe->pipe;
    struct pipe_transfer *transfer = rb_transfer->transfer;
 
    mtx_lock(&rb_pipe->call_mutex);
    context->transfer_flush_region(context,
                                   transfer,
                                   box);
-   pipe_mutex_unlock(rb_pipe->call_mutex);
+   mtx_unlock(&rb_pipe->call_mutex);
 }
 
 
 static void
 rbug_context_transfer_unmap(struct pipe_context *_context,
                             struct pipe_transfer *_transfer)
 {
    struct rbug_context *rb_pipe = rbug_context(_context);
    struct rbug_transfer *rb_transfer = rbug_transfer(_transfer);
    struct pipe_context *context = rb_pipe->pipe;
    struct pipe_transfer *transfer = rb_transfer->transfer;
 
    mtx_lock(&rb_pipe->call_mutex);
    context->transfer_unmap(context,
                            transfer);
    rbug_transfer_destroy(rb_pipe,
                          rb_transfer);
-   pipe_mutex_unlock(rb_pipe->call_mutex);
+   mtx_unlock(&rb_pipe->call_mutex);
 }
 
 
 static void
 rbug_context_buffer_subdata(struct pipe_context *_context,
                             struct pipe_resource *_resource,
                             unsigned usage, unsigned offset,
                             unsigned size, const void *data)
 {
    struct rbug_context *rb_pipe = rbug_context(_context);
    struct rbug_resource *rb_resource = rbug_resource(_resource);
    struct pipe_context *context = rb_pipe->pipe;
    struct pipe_resource *resource = rb_resource->resource;
 
    mtx_lock(&rb_pipe->call_mutex);
    context->buffer_subdata(context, resource, usage, offset, size, data);
-   pipe_mutex_unlock(rb_pipe->call_mutex);
+   mtx_unlock(&rb_pipe->call_mutex);
 }
 
 
 static void
 rbug_context_texture_subdata(struct pipe_context *_context,
                              struct pipe_resource *_resource,
                              unsigned level,
                              unsigned usage,
                              const struct pipe_box *box,
                              const void *data,
@@ -1179,21 +1179,21 @@ rbug_context_texture_subdata(struct pipe_context *_context,
 
    mtx_lock(&rb_pipe->call_mutex);
    context->texture_subdata(context,
                             resource,
                             level,
                             usage,
                             box,
                             data,
                             stride,
                             layer_stride);
-   pipe_mutex_unlock(rb_pipe->call_mutex);
+   mtx_unlock(&rb_pipe->call_mutex);
 }
 
 
 struct pipe_context *
 rbug_context_create(struct pipe_screen *_screen, struct pipe_context *pipe)
 {
    struct rbug_context *rb_pipe;
    struct rbug_screen *rb_screen = rbug_screen(_screen);
 
    if (!rb_screen)
diff --git a/src/gallium/drivers/rbug/rbug_core.c b/src/gallium/drivers/rbug/rbug_core.c
index 323fafe..b3082da 100644
--- a/src/gallium/drivers/rbug/rbug_core.c
+++ b/src/gallium/drivers/rbug/rbug_core.c
@@ -181,21 +181,21 @@ rbug_texture_list(struct rbug_rbug *tr_rbug, struct rbug_header *header, uint32_
    struct rbug_list *ptr;
    rbug_texture_t *texs;
    int i = 0;
 
    mtx_lock(&rb_screen->list_mutex);
    texs = MALLOC(rb_screen->num_resources * sizeof(rbug_texture_t));
    foreach(ptr, &rb_screen->resources) {
       tr_tex = container_of(ptr, struct rbug_resource, list);
       texs[i++] = VOID2U64(tr_tex);
    }
-   pipe_mutex_unlock(rb_screen->list_mutex);
+   mtx_unlock(&rb_screen->list_mutex);
 
    rbug_send_texture_list_reply(tr_rbug->con, serial, texs, i, NULL);
    FREE(texs);
 
    return 0;
 }
 
 static int
 rbug_texture_info(struct rbug_rbug *tr_rbug, struct rbug_header *header, uint32_t serial)
 {
@@ -208,41 +208,41 @@ rbug_texture_info(struct rbug_rbug *tr_rbug, struct rbug_header *header, uint32_
 
    mtx_lock(&rb_screen->list_mutex);
    foreach(ptr, &rb_screen->resources) {
       tr_tex = container_of(ptr, struct rbug_resource, list);
       if (gpti->texture == VOID2U64(tr_tex))
          break;
       tr_tex = NULL;
    }
 
    if (!tr_tex) {
-      pipe_mutex_unlock(rb_screen->list_mutex);
+      mtx_unlock(&rb_screen->list_mutex);
       return -ESRCH;
    }
 
    t = tr_tex->resource;
    num_layers = util_max_layer(t, 0) + 1;
 
    rbug_send_texture_info_reply(tr_rbug->con, serial,
                                t->target, t->format,
                                &t->width0, 1,
                                &t->height0, 1,
                                &num_layers, 1,
                                util_format_get_blockwidth(t->format),
                                util_format_get_blockheight(t->format),
                                util_format_get_blocksize(t->format),
                                t->last_level,
                                t->nr_samples,
                                t->bind,
                                NULL);
 
-   pipe_mutex_unlock(rb_screen->list_mutex);
+   mtx_unlock(&rb_screen->list_mutex);
 
    return 0;
 }
 
 static int
 rbug_texture_read(struct rbug_rbug *tr_rbug, struct rbug_header *header, uint32_t serial)
 {
    struct rbug_proto_texture_read *gptr = (struct rbug_proto_texture_read *)header;
 
    struct rbug_screen *rb_screen = tr_rbug->rb_screen;
@@ -257,21 +257,21 @@ rbug_texture_read(struct rbug_rbug *tr_rbug, struct rbug_header *header, uint32_
 
    mtx_lock(&rb_screen->list_mutex);
    foreach(ptr, &rb_screen->resources) {
       tr_tex = container_of(ptr, struct rbug_resource, list);
       if (gptr->texture == VOID2U64(tr_tex))
          break;
       tr_tex = NULL;
    }
 
    if (!tr_tex) {
-      pipe_mutex_unlock(rb_screen->list_mutex);
+      mtx_unlock(&rb_screen->list_mutex);
       return -ESRCH;
    }
 
    tex = tr_tex->resource;
    map = pipe_transfer_map(context, tex,
                            gptr->level, gptr->face + gptr->zslice,
                            PIPE_TRANSFER_READ,
                            gptr->x, gptr->y, gptr->w, gptr->h, &t);
 
    rbug_send_texture_read_reply(tr_rbug->con, serial,
@@ -280,41 +280,41 @@ rbug_texture_read(struct rbug_rbug *tr_rbug, struct rbug_header *header, uint32_
                                 util_format_get_blockheight(t->resource->format),
                                 util_format_get_blocksize(t->resource->format),
                                 (uint8_t*)map,
                                 t->stride * util_format_get_nblocksy(t->resource->format,
                                                                      t->box.height),
                                 t->stride,
                                 NULL);
 
    context->transfer_unmap(context, t);
 
-   pipe_mutex_unlock(rb_screen->list_mutex);
+   mtx_unlock(&rb_screen->list_mutex);
 
    return 0;
 }
 
 static int
 rbug_context_list(struct rbug_rbug *tr_rbug, struct rbug_header *header, uint32_t serial)
 {
    struct rbug_screen *rb_screen = tr_rbug->rb_screen;
    struct rbug_list *ptr;
    struct rbug_context *rb_context = NULL;
    rbug_context_t *ctxs;
    int i = 0;
 
    mtx_lock(&rb_screen->list_mutex);
    ctxs = MALLOC(rb_screen->num_contexts * sizeof(rbug_context_t));
    foreach(ptr, &rb_screen->contexts) {
       rb_context = container_of(ptr, struct rbug_context, list);
       ctxs[i++] = VOID2U64(rb_context);
    }
-   pipe_mutex_unlock(rb_screen->list_mutex);
+   mtx_unlock(&rb_screen->list_mutex);
 
    rbug_send_context_list_reply(tr_rbug->con, serial, ctxs, i, NULL);
    FREE(ctxs);
 
    return 0;
 }
 
 static int
 rbug_context_info(struct rbug_rbug *tr_rbug, struct rbug_header *header, uint32_t serial)
 {
@@ -323,21 +323,21 @@ rbug_context_info(struct rbug_rbug *tr_rbug, struct rbug_header *header, uint32_
    struct rbug_screen *rb_screen = tr_rbug->rb_screen;
    struct rbug_context *rb_context = NULL;
    rbug_texture_t cbufs[PIPE_MAX_COLOR_BUFS];
    rbug_texture_t texs[PIPE_MAX_SHADER_SAMPLER_VIEWS];
    unsigned i;
 
    mtx_lock(&rb_screen->list_mutex);
    rb_context = rbug_get_context_locked(rb_screen, info->context);
 
    if (!rb_context) {
-      pipe_mutex_unlock(rb_screen->list_mutex);
+      mtx_unlock(&rb_screen->list_mutex);
       return -ESRCH;
    }
 
    /* protect the pipe context */
    mtx_lock(&rb_context->draw_mutex);
    mtx_lock(&rb_context->call_mutex);
 
    for (i = 0; i < rb_context->curr.nr_cbufs; i++)
       cbufs[i] = VOID2U64(rb_context->curr.cbufs[i]);
 
@@ -345,205 +345,205 @@ rbug_context_info(struct rbug_rbug *tr_rbug, struct rbug_header *header, uint32_
    for (i = 0; i < rb_context->curr.num_views[PIPE_SHADER_FRAGMENT]; i++)
       texs[i] = VOID2U64(rb_context->curr.texs[PIPE_SHADER_FRAGMENT][i]);
 
    rbug_send_context_info_reply(tr_rbug->con, serial,
                                 VOID2U64(rb_context->curr.shader[PIPE_SHADER_VERTEX]), VOID2U64(rb_context->curr.shader[PIPE_SHADER_FRAGMENT]),
                                 texs, rb_context->curr.num_views[PIPE_SHADER_FRAGMENT],
                                 cbufs, rb_context->curr.nr_cbufs,
                                 VOID2U64(rb_context->curr.zsbuf),
                                 rb_context->draw_blocker, rb_context->draw_blocked, NULL);
 
-   pipe_mutex_unlock(rb_context->call_mutex);
-   pipe_mutex_unlock(rb_context->draw_mutex);
-   pipe_mutex_unlock(rb_screen->list_mutex);
+   mtx_unlock(&rb_context->call_mutex);
+   mtx_unlock(&rb_context->draw_mutex);
+   mtx_unlock(&rb_screen->list_mutex);
 
    return 0;
 }
 
 static int
 rbug_context_draw_block(struct rbug_rbug *tr_rbug, struct rbug_header *header, uint32_t serial)
 {
    struct rbug_proto_context_draw_block *block = (struct rbug_proto_context_draw_block *)header;
 
    struct rbug_screen *rb_screen = tr_rbug->rb_screen;
    struct rbug_context *rb_context = NULL;
 
    mtx_lock(&rb_screen->list_mutex);
    rb_context = rbug_get_context_locked(rb_screen, block->context);
 
    if (!rb_context) {
-      pipe_mutex_unlock(rb_screen->list_mutex);
+      mtx_unlock(&rb_screen->list_mutex);
       return -ESRCH;
    }
 
    mtx_lock(&rb_context->draw_mutex);
    rb_context->draw_blocker |= block->block;
-   pipe_mutex_unlock(rb_context->draw_mutex);
+   mtx_unlock(&rb_context->draw_mutex);
 
-   pipe_mutex_unlock(rb_screen->list_mutex);
+   mtx_unlock(&rb_screen->list_mutex);
 
    return 0;
 }
 
 static int
 rbug_context_draw_step(struct rbug_rbug *tr_rbug, struct rbug_header *header, uint32_t serial)
 {
    struct rbug_proto_context_draw_step *step = (struct rbug_proto_context_draw_step *)header;
 
    struct rbug_screen *rb_screen = tr_rbug->rb_screen;
    struct rbug_context *rb_context = NULL;
 
    mtx_lock(&rb_screen->list_mutex);
    rb_context = rbug_get_context_locked(rb_screen, step->context);
 
    if (!rb_context) {
-      pipe_mutex_unlock(rb_screen->list_mutex);
+      mtx_unlock(&rb_screen->list_mutex);
       return -ESRCH;
    }
 
    mtx_lock(&rb_context->draw_mutex);
    if (rb_context->draw_blocked & RBUG_BLOCK_RULE) {
       if (step->step & RBUG_BLOCK_RULE)
          rb_context->draw_blocked &= ~RBUG_BLOCK_MASK;
    } else {
       rb_context->draw_blocked &= ~step->step;
    }
-   pipe_mutex_unlock(rb_context->draw_mutex);
+   mtx_unlock(&rb_context->draw_mutex);
 
    cnd_broadcast(&rb_context->draw_cond);
 
-   pipe_mutex_unlock(rb_screen->list_mutex);
+   mtx_unlock(&rb_screen->list_mutex);
 
    return 0;
 }
 
 static int
 rbug_context_draw_unblock(struct rbug_rbug *tr_rbug, struct rbug_header *header, uint32_t serial)
 {
    struct rbug_proto_context_draw_unblock *unblock = (struct rbug_proto_context_draw_unblock *)header;
 
    struct rbug_screen *rb_screen = tr_rbug->rb_screen;
    struct rbug_context *rb_context = NULL;
 
    mtx_lock(&rb_screen->list_mutex);
    rb_context = rbug_get_context_locked(rb_screen, unblock->context);
 
    if (!rb_context) {
-      pipe_mutex_unlock(rb_screen->list_mutex);
+      mtx_unlock(&rb_screen->list_mutex);
       return -ESRCH;
    }
 
    mtx_lock(&rb_context->draw_mutex);
    if (rb_context->draw_blocked & RBUG_BLOCK_RULE) {
       if (unblock->unblock & RBUG_BLOCK_RULE)
          rb_context->draw_blocked &= ~RBUG_BLOCK_MASK;
    } else {
       rb_context->draw_blocked &= ~unblock->unblock;
    }
    rb_context->draw_blocker &= ~unblock->unblock;
-   pipe_mutex_unlock(rb_context->draw_mutex);
+   mtx_unlock(&rb_context->draw_mutex);
 
    cnd_broadcast(&rb_context->draw_cond);
 
-   pipe_mutex_unlock(rb_screen->list_mutex);
+   mtx_unlock(&rb_screen->list_mutex);
 
    return 0;
 }
 
 static int
 rbug_context_draw_rule(struct rbug_rbug *tr_rbug, struct rbug_header *header, uint32_t serial)
 {
    struct rbug_proto_context_draw_rule *rule = (struct rbug_proto_context_draw_rule *)header;
 
    struct rbug_screen *rb_screen = tr_rbug->rb_screen;
    struct rbug_context *rb_context = NULL;
 
    mtx_lock(&rb_screen->list_mutex);
    rb_context = rbug_get_context_locked(rb_screen, rule->context);
 
    if (!rb_context) {
-      pipe_mutex_unlock(rb_screen->list_mutex);
+      mtx_unlock(&rb_screen->list_mutex);
       return -ESRCH;
    }
 
    mtx_lock(&rb_context->draw_mutex);
    rb_context->draw_rule.shader[PIPE_SHADER_VERTEX] = U642VOID(rule->vertex);
    rb_context->draw_rule.shader[PIPE_SHADER_FRAGMENT] = U642VOID(rule->fragment);
    rb_context->draw_rule.texture = U642VOID(rule->texture);
    rb_context->draw_rule.surf = U642VOID(rule->surface);
    rb_context->draw_rule.blocker = rule->block;
    rb_context->draw_blocker |= RBUG_BLOCK_RULE;
-   pipe_mutex_unlock(rb_context->draw_mutex);
+   mtx_unlock(&rb_context->draw_mutex);
 
    cnd_broadcast(&rb_context->draw_cond);
 
-   pipe_mutex_unlock(rb_screen->list_mutex);
+   mtx_unlock(&rb_screen->list_mutex);
 
    return 0;
 }
 
 static int
 rbug_context_flush(struct rbug_rbug *tr_rbug, struct rbug_header *header, uint32_t serial)
 {
    struct rbug_proto_context_flush *flush = (struct rbug_proto_context_flush *)header;
 
    struct rbug_screen *rb_screen = tr_rbug->rb_screen;
    struct rbug_context *rb_context = NULL;
 
    mtx_lock(&rb_screen->list_mutex);
    rb_context = rbug_get_context_locked(rb_screen, flush->context);
 
    if (!rb_context) {
-      pipe_mutex_unlock(rb_screen->list_mutex);
+      mtx_unlock(&rb_screen->list_mutex);
       return -ESRCH;
    }
 
    /* protect the pipe context */
    mtx_lock(&rb_context->call_mutex);
 
    rb_context->pipe->flush(rb_context->pipe, NULL, 0);
 
-   pipe_mutex_unlock(rb_context->call_mutex);
-   pipe_mutex_unlock(rb_screen->list_mutex);
+   mtx_unlock(&rb_context->call_mutex);
+   mtx_unlock(&rb_screen->list_mutex);
 
    return 0;
 }
 
 static int
 rbug_shader_list(struct rbug_rbug *tr_rbug, struct rbug_header *header, uint32_t serial)
 {
    struct rbug_proto_shader_list *list = (struct rbug_proto_shader_list *)header;
 
    struct rbug_screen *rb_screen = tr_rbug->rb_screen;
    struct rbug_context *rb_context = NULL;
    struct rbug_shader *tr_shdr = NULL;
    struct rbug_list *ptr;
    rbug_shader_t *shdrs;
    int i = 0;
 
    mtx_lock(&rb_screen->list_mutex);
    rb_context = rbug_get_context_locked(rb_screen, list->context);
 
    if (!rb_context) {
-      pipe_mutex_unlock(rb_screen->list_mutex);
+      mtx_unlock(&rb_screen->list_mutex);
       return -ESRCH;
    }
 
    mtx_lock(&rb_context->list_mutex);
    shdrs = MALLOC(rb_context->num_shaders * sizeof(rbug_shader_t));
    foreach(ptr, &rb_context->shaders) {
       tr_shdr = container_of(ptr, struct rbug_shader, list);
       shdrs[i++] = VOID2U64(tr_shdr);
    }
 
-   pipe_mutex_unlock(rb_context->list_mutex);
-   pipe_mutex_unlock(rb_screen->list_mutex);
+   mtx_unlock(&rb_context->list_mutex);
+   mtx_unlock(&rb_screen->list_mutex);
 
    rbug_send_shader_list_reply(tr_rbug->con, serial, shdrs, i, NULL);
    FREE(shdrs);
 
    return 0;
 }
 
 static int
 rbug_shader_info(struct rbug_rbug *tr_rbug, struct rbug_header *header, uint32_t serial)
 {
@@ -552,116 +552,116 @@ rbug_shader_info(struct rbug_rbug *tr_rbug, struct rbug_header *header, uint32_t
    struct rbug_screen *rb_screen = tr_rbug->rb_screen;
    struct rbug_context *rb_context = NULL;
    struct rbug_shader *tr_shdr = NULL;
    unsigned original_len;
    unsigned replaced_len;
 
    mtx_lock(&rb_screen->list_mutex);
    rb_context = rbug_get_context_locked(rb_screen, info->context);
 
    if (!rb_context) {
-      pipe_mutex_unlock(rb_screen->list_mutex);
+      mtx_unlock(&rb_screen->list_mutex);
       return -ESRCH;
    }
 
    mtx_lock(&rb_context->list_mutex);
 
    tr_shdr = rbug_get_shader_locked(rb_context, info->shader);
 
    if (!tr_shdr) {
-      pipe_mutex_unlock(rb_context->list_mutex);
-      pipe_mutex_unlock(rb_screen->list_mutex);
+      mtx_unlock(&rb_context->list_mutex);
+      mtx_unlock(&rb_screen->list_mutex);
       return -ESRCH;
    }
 
    /* just in case */
    assert(sizeof(struct tgsi_token) == 4);
 
    original_len = tgsi_num_tokens(tr_shdr->tokens);
    if (tr_shdr->replaced_tokens)
       replaced_len = tgsi_num_tokens(tr_shdr->replaced_tokens);
    else
       replaced_len = 0;
 
    rbug_send_shader_info_reply(tr_rbug->con, serial,
                                (uint32_t*)tr_shdr->tokens, original_len,
                                (uint32_t*)tr_shdr->replaced_tokens, replaced_len,
                                tr_shdr->disabled,
                                NULL);
 
-   pipe_mutex_unlock(rb_context->list_mutex);
-   pipe_mutex_unlock(rb_screen->list_mutex);
+   mtx_unlock(&rb_context->list_mutex);
+   mtx_unlock(&rb_screen->list_mutex);
 
    return 0;
 }
 
 static int
 rbug_shader_disable(struct rbug_rbug *tr_rbug, struct rbug_header *header)
 {
    struct rbug_proto_shader_disable *dis = (struct rbug_proto_shader_disable *)header;
 
    struct rbug_screen *rb_screen = tr_rbug->rb_screen;
    struct rbug_context *rb_context = NULL;
    struct rbug_shader *tr_shdr = NULL;
 
    mtx_lock(&rb_screen->list_mutex);
    rb_context = rbug_get_context_locked(rb_screen, dis->context);
 
    if (!rb_context) {
-      pipe_mutex_unlock(rb_screen->list_mutex);
+      mtx_unlock(&rb_screen->list_mutex);
       return -ESRCH;
    }
 
    mtx_lock(&rb_context->list_mutex);
 
    tr_shdr = rbug_get_shader_locked(rb_context, dis->shader);
 
    if (!tr_shdr) {
-      pipe_mutex_unlock(rb_context->list_mutex);
-      pipe_mutex_unlock(rb_screen->list_mutex);
+      mtx_unlock(&rb_context->list_mutex);
+      mtx_unlock(&rb_screen->list_mutex);
       return -ESRCH;
    }
 
    tr_shdr->disabled = dis->disable;
 
-   pipe_mutex_unlock(rb_context->list_mutex);
-   pipe_mutex_unlock(rb_screen->list_mutex);
+   mtx_unlock(&rb_context->list_mutex);
+   mtx_unlock(&rb_screen->list_mutex);
 
    return 0;
 }
 
 static int
 rbug_shader_replace(struct rbug_rbug *tr_rbug, struct rbug_header *header)
 {
    struct rbug_proto_shader_replace *rep = (struct rbug_proto_shader_replace *)header;
 
    struct rbug_screen *rb_screen = tr_rbug->rb_screen;
    struct rbug_context *rb_context = NULL;
    struct rbug_shader *tr_shdr = NULL;
    struct pipe_context *pipe = NULL;
    void *state;
 
    mtx_lock(&rb_screen->list_mutex);
    rb_context = rbug_get_context_locked(rb_screen, rep->context);
 
    if (!rb_context) {
-      pipe_mutex_unlock(rb_screen->list_mutex);
+      mtx_unlock(&rb_screen->list_mutex);
       return -ESRCH;
    }
 
    mtx_lock(&rb_context->list_mutex);
 
    tr_shdr = rbug_get_shader_locked(rb_context, rep->shader);
 
    if (!tr_shdr) {
-      pipe_mutex_unlock(rb_context->list_mutex);
-      pipe_mutex_unlock(rb_screen->list_mutex);
+      mtx_unlock(&rb_context->list_mutex);
+      mtx_unlock(&rb_screen->list_mutex);
       return -ESRCH;
    }
 
    /* protect the pipe context */
    mtx_lock(&rb_context->call_mutex);
 
    pipe = rb_context->pipe;
 
    /* remove old replaced shader */
    if (tr_shdr->replaced_shader) {
@@ -688,34 +688,34 @@ rbug_shader_replace(struct rbug_rbug *tr_rbug, struct rbug_header *header)
       goto err;
 
    /* bind new shader if the shader is currently a bound */
    if (rb_context->curr.shader[PIPE_SHADER_FRAGMENT] == tr_shdr || rb_context->curr.shader[PIPE_SHADER_VERTEX] == tr_shdr)
       rbug_shader_bind_locked(pipe, tr_shdr, state);
 
    /* save state */
    tr_shdr->replaced_shader = state;
 
 out:
-   pipe_mutex_unlock(rb_context->call_mutex);
-   pipe_mutex_unlock(rb_context->list_mutex);
-   pipe_mutex_unlock(rb_screen->list_mutex);
+   mtx_unlock(&rb_context->call_mutex);
+   mtx_unlock(&rb_context->list_mutex);
+   mtx_unlock(&rb_screen->list_mutex);
 
    return 0;
 
 err:
    FREE(tr_shdr->replaced_tokens);
    tr_shdr->replaced_shader = NULL;
    tr_shdr->replaced_tokens = NULL;
 
-   pipe_mutex_unlock(rb_context->call_mutex);
-   pipe_mutex_unlock(rb_context->list_mutex);
-   pipe_mutex_unlock(rb_screen->list_mutex);
+   mtx_unlock(&rb_context->call_mutex);
+   mtx_unlock(&rb_context->list_mutex);
+   mtx_unlock(&rb_screen->list_mutex);
    return -EINVAL;
 }
 
 static boolean
 rbug_header(struct rbug_rbug *tr_rbug, struct rbug_header *header, uint32_t serial)
 {
    int ret = 0;
 
    switch(header->opcode) {
       case RBUG_OP_PING:
diff --git a/src/gallium/drivers/rbug/rbug_screen.h b/src/gallium/drivers/rbug/rbug_screen.h
index 9e2d8ae..1972005 100644
--- a/src/gallium/drivers/rbug/rbug_screen.h
+++ b/src/gallium/drivers/rbug/rbug_screen.h
@@ -64,29 +64,29 @@ static inline struct rbug_screen *
 rbug_screen(struct pipe_screen *screen)
 {
    return (struct rbug_screen *)screen;
 }
 
 #define rbug_screen_add_to_list(scr, name, obj) \
    do {                                          \
       mtx_lock(&scr->list_mutex);          \
       insert_at_head(&scr->name, &obj->list);    \
       scr->num_##name++;                         \
-      pipe_mutex_unlock(scr->list_mutex);        \
+      mtx_unlock(&scr->list_mutex);        \
    } while (0)
 
 #define rbug_screen_remove_from_list(scr, name, obj) \
    do {                                               \
       mtx_lock(&scr->list_mutex);               \
       remove_from_list(&obj->list);                   \
       scr->num_##name--;                              \
-      pipe_mutex_unlock(scr->list_mutex);             \
+      mtx_unlock(&scr->list_mutex);             \
    } while (0)
 
 
 
 /**********************************************************
  * rbug_core.c
  */
 
 struct rbug_rbug;
 
diff --git a/src/gallium/drivers/svga/svga_resource_buffer.c b/src/gallium/drivers/svga/svga_resource_buffer.c
index 05e91cb..7808903 100644
--- a/src/gallium/drivers/svga/svga_resource_buffer.c
+++ b/src/gallium/drivers/svga/svga_resource_buffer.c
@@ -291,21 +291,21 @@ svga_buffer_transfer_flush_region( struct pipe_context *pipe,
    struct svga_buffer *sbuf = svga_buffer(transfer->resource);
 
    unsigned offset = transfer->box.x + box->x;
    unsigned length = box->width;
 
    assert(transfer->usage & PIPE_TRANSFER_WRITE);
    assert(transfer->usage & PIPE_TRANSFER_FLUSH_EXPLICIT);
 
    mtx_lock(&ss->swc_mutex);
    svga_buffer_add_range(sbuf, offset, offset + length);
-   pipe_mutex_unlock(ss->swc_mutex);
+   mtx_unlock(&ss->swc_mutex);
 }
 
 
 static void
 svga_buffer_transfer_unmap( struct pipe_context *pipe,
                             struct pipe_transfer *transfer )
 {
    struct svga_screen *ss = svga_screen(pipe->screen);
    struct svga_context *svga = svga_context(pipe);
    struct svga_buffer *sbuf = svga_buffer(transfer->resource);
@@ -332,21 +332,21 @@ svga_buffer_transfer_unmap( struct pipe_context *pipe,
           */
 
          SVGA_DBG(DEBUG_DMA, "flushing the whole buffer\n");
 
          sbuf->dma.flags.discard = TRUE;
 
          svga_buffer_add_range(sbuf, 0, sbuf->b.b.width0);
       }
    }
 
-   pipe_mutex_unlock(ss->swc_mutex);
+   mtx_unlock(&ss->swc_mutex);
    FREE(transfer);
    SVGA_STATS_TIME_POP(svga_sws(svga));
 }
 
 
 static void
 svga_buffer_destroy( struct pipe_screen *screen,
 		     struct pipe_resource *buf )
 {
    struct svga_screen *ss = svga_screen(screen); 
diff --git a/src/gallium/drivers/svga/svga_resource_buffer_upload.c b/src/gallium/drivers/svga/svga_resource_buffer_upload.c
index e41f475..9d93b48 100644
--- a/src/gallium/drivers/svga/svga_resource_buffer_upload.c
+++ b/src/gallium/drivers/svga/svga_resource_buffer_upload.c
@@ -639,21 +639,21 @@ svga_buffer_update_hw(struct svga_context *svga, struct svga_buffer *sbuf)
 
       ret = svga_buffer_create_hw_storage(svga_screen(sbuf->b.b.screen), sbuf);
       if (ret != PIPE_OK)
          return ret;
 
       mtx_lock(&ss->swc_mutex);
       map = svga_buffer_hw_storage_map(svga, sbuf, PIPE_TRANSFER_WRITE, &retry);
       assert(map);
       assert(!retry);
       if (!map) {
-	 pipe_mutex_unlock(ss->swc_mutex);
+	 mtx_unlock(&ss->swc_mutex);
          svga_buffer_destroy_hw_storage(ss, sbuf);
          return PIPE_ERROR;
       }
 
       /* Copy data from malloc'd swbuf to the new hardware buffer */
       for (i = 0; i < sbuf->map.num_ranges; i++) {
          unsigned start = sbuf->map.ranges[i].start;
          unsigned len = sbuf->map.ranges[i].end - start;
          memcpy((uint8_t *) map + start, (uint8_t *) sbuf->swbuf + start, len);
       }
@@ -663,21 +663,21 @@ svga_buffer_update_hw(struct svga_context *svga, struct svga_buffer *sbuf)
       /* This user/malloc buffer is now indistinguishable from a gpu buffer */
       assert(sbuf->map.count == 0);
       if (sbuf->map.count == 0) {
          if (sbuf->user)
             sbuf->user = FALSE;
          else
             align_free(sbuf->swbuf);
          sbuf->swbuf = NULL;
       }
 
-      pipe_mutex_unlock(ss->swc_mutex);
+      mtx_unlock(&ss->swc_mutex);
    }
 
    return PIPE_OK;
 }
 
 
 /**
  * Upload the buffer to the host in a piecewise fashion.
  *
  * Used when the buffer is too big to fit in the GMR aperture.
diff --git a/src/gallium/drivers/svga/svga_sampler_view.c b/src/gallium/drivers/svga/svga_sampler_view.c
index 053cfc5..ee4ef3c 100644
--- a/src/gallium/drivers/svga/svga_sampler_view.c
+++ b/src/gallium/drivers/svga/svga_sampler_view.c
@@ -89,27 +89,27 @@ svga_get_tex_sampler_view(struct pipe_context *pipe,
          view = TRUE;
    }
 
    /* First try the cache */
    if (view) {
       mtx_lock(&ss->tex_mutex);
       if (tex->cached_view &&
           tex->cached_view->min_lod == min_lod &&
           tex->cached_view->max_lod == max_lod) {
          svga_sampler_view_reference(&sv, tex->cached_view);
-         pipe_mutex_unlock(ss->tex_mutex);
+         mtx_unlock(&ss->tex_mutex);
          SVGA_DBG(DEBUG_VIEWS, "svga: Sampler view: reuse %p, %u %u, last %u\n",
                               pt, min_lod, max_lod, pt->last_level);
          svga_validate_sampler_view(svga_context(pipe), sv);
          return sv;
       }
-      pipe_mutex_unlock(ss->tex_mutex);
+      mtx_unlock(&ss->tex_mutex);
    }
 
    sv = CALLOC_STRUCT(svga_sampler_view);
    if (!sv)
       return NULL;
 
    pipe_reference_init(&sv->reference, 1);
 
    /* Note: we're not refcounting the texture resource here to avoid
     * a circular dependency.
@@ -158,21 +158,21 @@ svga_get_tex_sampler_view(struct pipe_context *pipe,
       sv->key.cachable = 0;
       sv->handle = tex->handle;
       debug_reference(&sv->reference,
                       (debug_reference_descriptor)
                       svga_debug_describe_sampler_view, 0);
       return sv;
    }
 
    mtx_lock(&ss->tex_mutex);
    svga_sampler_view_reference(&tex->cached_view, sv);
-   pipe_mutex_unlock(ss->tex_mutex);
+   mtx_unlock(&ss->tex_mutex);
 
    debug_reference(&sv->reference,
                    (debug_reference_descriptor)
                    svga_debug_describe_sampler_view, 0);
 
    return sv;
 }
 
 
 void
diff --git a/src/gallium/drivers/svga/svga_screen_cache.c b/src/gallium/drivers/svga/svga_screen_cache.c
index 55f9426..d26e79a 100644
--- a/src/gallium/drivers/svga/svga_screen_cache.c
+++ b/src/gallium/drivers/svga/svga_screen_cache.c
@@ -147,21 +147,21 @@ svga_screen_cache_lookup(struct svga_screen *svgascreen,
          else
             cache->total_size -= surf_size;
 
          break;
       }
 
       curr = next;
       next = curr->next;
    }
 
-   pipe_mutex_unlock(cache->mutex);
+   mtx_unlock(&cache->mutex);
 
    if (SVGA_DEBUG & DEBUG_DMA)
       debug_printf("%s: cache %s after %u tries (bucket %d)\n", __FUNCTION__,
                    handle ? "hit" : "miss", tries, bucket);
 
    return handle;
 }
 
 
 /**
@@ -224,39 +224,39 @@ svga_screen_cache_add(struct svga_screen *svgascreen,
       return;
 
    surf_size = surface_size(key);
 
    *p_handle = NULL;
    mtx_lock(&cache->mutex);
 
    if (surf_size >= SVGA_HOST_SURFACE_CACHE_BYTES) {
       /* this surface is too large to cache, just free it */
       sws->surface_reference(sws, &handle, NULL);
-      pipe_mutex_unlock(cache->mutex);
+      mtx_unlock(&cache->mutex);
       return;
    }
 
    if (cache->total_size + surf_size > SVGA_HOST_SURFACE_CACHE_BYTES) {
       /* Adding this surface would exceed the cache size.
        * Try to discard least recently used entries until we hit the
        * new target cache size.
        */
       unsigned target_size = SVGA_HOST_SURFACE_CACHE_BYTES - surf_size;
 
       svga_screen_cache_shrink(svgascreen, target_size);
 
       if (cache->total_size > target_size) {
          /* we weren't able to shrink the cache as much as we wanted so
           * just discard this surface.
           */
          sws->surface_reference(sws, &handle, NULL);
-         pipe_mutex_unlock(cache->mutex);
+         mtx_unlock(&cache->mutex);
          return;
       }
    }
 
    if (!LIST_IS_EMPTY(&cache->empty)) {
       /* An empty entry has no surface associated with it.
        * Use the first empty entry.
        */
       entry = LIST_ENTRY(struct svga_host_surface_cache_entry,
                          cache->empty.next, head);
@@ -293,21 +293,21 @@ svga_screen_cache_add(struct svga_screen *svgascreen,
 
       cache->total_size += surf_size;
    }
    else {
       /* Couldn't cache the buffer -- this really shouldn't happen */
       SVGA_DBG(DEBUG_CACHE|DEBUG_DMA,
                "unref sid %p (couldn't find space)\n", handle);
       sws->surface_reference(sws, &handle, NULL);
    }
 
-   pipe_mutex_unlock(cache->mutex);
+   mtx_unlock(&cache->mutex);
 }
 
 
 /**
  * Called during the screen flush to move all buffers not in a validate list
  * into the unused list.
  */
 void
 svga_screen_cache_flush(struct svga_screen *svgascreen,
                         struct pipe_fence_handle *fence)
@@ -361,21 +361,21 @@ svga_screen_cache_flush(struct svga_screen *svgascreen,
          sws->surface_invalidate(sws, entry->handle);
 
          /* add the entry to the invalidated list */
          LIST_ADD(&entry->head, &cache->invalidated);
       }
 
       curr = next;
       next = curr->next;
    }
 
-   pipe_mutex_unlock(cache->mutex);
+   mtx_unlock(&cache->mutex);
 }
 
 
 /**
  * Free all the surfaces in the cache.
  * Called when destroying the svga screen object.
  */
 void
 svga_screen_cache_cleanup(struct svga_screen *svgascreen)
 {
diff --git a/src/gallium/drivers/trace/tr_dump.c b/src/gallium/drivers/trace/tr_dump.c
index 2df4f83..9d03b16 100644
--- a/src/gallium/drivers/trace/tr_dump.c
+++ b/src/gallium/drivers/trace/tr_dump.c
@@ -300,21 +300,21 @@ boolean trace_dump_trace_enabled(void)
  * Call lock
  */
 
 void trace_dump_call_lock(void)
 {
    mtx_lock(&call_mutex);
 }
 
 void trace_dump_call_unlock(void)
 {
-   pipe_mutex_unlock(call_mutex);
+   mtx_unlock(&call_mutex);
 }
 
 /*
  * Dumping control
  */
 
 void trace_dumping_start_locked(void)
 {
    dumping = TRUE;
 }
@@ -326,36 +326,36 @@ void trace_dumping_stop_locked(void)
 
 boolean trace_dumping_enabled_locked(void)
 {
    return dumping;
 }
 
 void trace_dumping_start(void)
 {
    mtx_lock(&call_mutex);
    trace_dumping_start_locked();
-   pipe_mutex_unlock(call_mutex);
+   mtx_unlock(&call_mutex);
 }
 
 void trace_dumping_stop(void)
 {
    mtx_lock(&call_mutex);
    trace_dumping_stop_locked();
-   pipe_mutex_unlock(call_mutex);
+   mtx_unlock(&call_mutex);
 }
 
 boolean trace_dumping_enabled(void)
 {
    boolean ret;
    mtx_lock(&call_mutex);
    ret = trace_dumping_enabled_locked();
-   pipe_mutex_unlock(call_mutex);
+   mtx_unlock(&call_mutex);
    return ret;
 }
 
 /*
  * Dump functions
  */
 
 static int64_t call_start_time = 0;
 
 void trace_dump_call_begin_locked(const char *klass, const char *method)
@@ -395,21 +395,21 @@ void trace_dump_call_end_locked(void)
 
 void trace_dump_call_begin(const char *klass, const char *method)
 {
    mtx_lock(&call_mutex);
    trace_dump_call_begin_locked(klass, method);
 }
 
 void trace_dump_call_end(void)
 {
    trace_dump_call_end_locked();
-   pipe_mutex_unlock(call_mutex);
+   mtx_unlock(&call_mutex);
 }
 
 void trace_dump_arg_begin(const char *name)
 {
    if (!dumping)
       return;
 
    trace_dump_indent(2);
    trace_dump_tag_begin1("arg", "name", name);
 }
diff --git a/src/gallium/drivers/vc4/vc4_bufmgr.c b/src/gallium/drivers/vc4/vc4_bufmgr.c
index c46e564..12af7f8 100644
--- a/src/gallium/drivers/vc4/vc4_bufmgr.c
+++ b/src/gallium/drivers/vc4/vc4_bufmgr.c
@@ -100,30 +100,30 @@ vc4_bo_from_cache(struct vc4_screen *screen, uint32_t size, const char *name)
         mtx_lock(&cache->lock);
         if (!list_empty(&cache->size_list[page_index])) {
                 bo = LIST_ENTRY(struct vc4_bo, cache->size_list[page_index].next,
                                 size_list);
 
                 /* Check that the BO has gone idle.  If not, then we want to
                  * allocate something new instead, since we assume that the
                  * user will proceed to CPU map it and fill it with stuff.
                  */
                 if (!vc4_bo_wait(bo, 0, NULL)) {
-                        pipe_mutex_unlock(cache->lock);
+                        mtx_unlock(&cache->lock);
                         return NULL;
                 }
 
                 pipe_reference_init(&bo->reference, 1);
                 vc4_bo_remove_from_cache(cache, bo);
 
                 bo->name = name;
         }
-        pipe_mutex_unlock(cache->lock);
+        mtx_unlock(&cache->lock);
         return bo;
 }
 
 struct vc4_bo *
 vc4_bo_alloc(struct vc4_screen *screen, uint32_t size, const char *name)
 {
         struct vc4_bo *bo;
         int ret;
 
         size = align(size, 4096);
@@ -183,21 +183,21 @@ vc4_bo_alloc(struct vc4_screen *screen, uint32_t size, const char *name)
 
 void
 vc4_bo_last_unreference(struct vc4_bo *bo)
 {
         struct vc4_screen *screen = bo->screen;
 
         struct timespec time;
         clock_gettime(CLOCK_MONOTONIC, &time);
         mtx_lock(&screen->bo_cache.lock);
         vc4_bo_last_unreference_locked_timed(bo, time.tv_sec);
-        pipe_mutex_unlock(screen->bo_cache.lock);
+        mtx_unlock(&screen->bo_cache.lock);
 }
 
 static void
 vc4_bo_free(struct vc4_bo *bo)
 {
         struct vc4_screen *screen = bo->screen;
 
         if (bo->map) {
                 if (using_vc4_simulator && bo->name &&
                     strcmp(bo->name, "winsys") == 0) {
@@ -260,21 +260,21 @@ free_stale_bos(struct vc4_screen *screen, time_t time)
 
 static void
 vc4_bo_cache_free_all(struct vc4_bo_cache *cache)
 {
         mtx_lock(&cache->lock);
         list_for_each_entry_safe(struct vc4_bo, bo, &cache->time_list,
                                  time_list) {
                 vc4_bo_remove_from_cache(cache, bo);
                 vc4_bo_free(bo);
         }
-        pipe_mutex_unlock(cache->lock);
+        mtx_unlock(&cache->lock);
 }
 
 void
 vc4_bo_last_unreference_locked_timed(struct vc4_bo *bo, time_t time)
 {
         struct vc4_screen *screen = bo->screen;
         struct vc4_bo_cache *cache = &screen->bo_cache;
         uint32_t page_index = bo->size / 4096 - 1;
 
         if (!bo->private) {
@@ -340,21 +340,21 @@ vc4_bo_open_handle(struct vc4_screen *screen,
 
 #ifdef USE_VC4_SIMULATOR
         vc4_simulator_open_from_handle(screen->fd, winsys_stride,
                                        bo->handle, bo->size);
         bo->map = malloc(bo->size);
 #endif
 
         util_hash_table_set(screen->bo_handles, (void *)(uintptr_t)handle, bo);
 
 done:
-        pipe_mutex_unlock(screen->bo_handles_mutex);
+        mtx_unlock(&screen->bo_handles_mutex);
         return bo;
 }
 
 struct vc4_bo *
 vc4_bo_open_name(struct vc4_screen *screen, uint32_t name,
                  uint32_t winsys_stride)
 {
         struct drm_gem_open o = {
                 .name = name
         };
@@ -397,21 +397,21 @@ vc4_bo_get_dmabuf(struct vc4_bo *bo)
                                      O_CLOEXEC, &fd);
         if (ret != 0) {
                 fprintf(stderr, "Failed to export gem bo %d to dmabuf\n",
                         bo->handle);
                 return -1;
         }
 
         mtx_lock(&bo->screen->bo_handles_mutex);
         bo->private = false;
         util_hash_table_set(bo->screen->bo_handles, (void *)(uintptr_t)bo->handle, bo);
-        pipe_mutex_unlock(bo->screen->bo_handles_mutex);
+        mtx_unlock(&bo->screen->bo_handles_mutex);
 
         return fd;
 }
 
 struct vc4_bo *
 vc4_bo_alloc_shader(struct vc4_screen *screen, const void *data, uint32_t size)
 {
         struct vc4_bo *bo;
         int ret;
 
diff --git a/src/gallium/drivers/vc4/vc4_bufmgr.h b/src/gallium/drivers/vc4/vc4_bufmgr.h
index e996d0c..838314f 100644
--- a/src/gallium/drivers/vc4/vc4_bufmgr.h
+++ b/src/gallium/drivers/vc4/vc4_bufmgr.h
@@ -94,21 +94,21 @@ vc4_bo_unreference(struct vc4_bo **bo)
         } else {
                 screen = (*bo)->screen;
                 mtx_lock(&screen->bo_handles_mutex);
 
                 if (pipe_reference(&(*bo)->reference, NULL)) {
                         util_hash_table_remove(screen->bo_handles,
                                                (void *)(uintptr_t)(*bo)->handle);
                         vc4_bo_last_unreference(*bo);
                 }
 
-                pipe_mutex_unlock(screen->bo_handles_mutex);
+                mtx_unlock(&screen->bo_handles_mutex);
         }
 
         *bo = NULL;
 }
 
 static inline void
 vc4_bo_unreference_locked_timed(struct vc4_bo **bo, time_t time)
 {
         if (!*bo)
                 return;
diff --git a/src/gallium/state_trackers/dri/dri2.c b/src/gallium/state_trackers/dri/dri2.c
index da663b2..b50e096 100644
--- a/src/gallium/state_trackers/dri/dri2.c
+++ b/src/gallium/state_trackers/dri/dri2.c
@@ -1427,35 +1427,35 @@ dri2_is_opencl_interop_loaded_locked(struct dri_screen *screen)
 
 static bool
 dri2_load_opencl_interop(struct dri_screen *screen)
 {
 #if defined(RTLD_DEFAULT)
    bool success;
 
    mtx_lock(&screen->opencl_func_mutex);
 
    if (dri2_is_opencl_interop_loaded_locked(screen)) {
-      pipe_mutex_unlock(screen->opencl_func_mutex);
+      mtx_unlock(&screen->opencl_func_mutex);
       return true;
    }
 
    screen->opencl_dri_event_add_ref =
       dlsym(RTLD_DEFAULT, "opencl_dri_event_add_ref");
    screen->opencl_dri_event_release =
       dlsym(RTLD_DEFAULT, "opencl_dri_event_release");
    screen->opencl_dri_event_wait =
       dlsym(RTLD_DEFAULT, "opencl_dri_event_wait");
    screen->opencl_dri_event_get_fence =
       dlsym(RTLD_DEFAULT, "opencl_dri_event_get_fence");
 
    success = dri2_is_opencl_interop_loaded_locked(screen);
-   pipe_mutex_unlock(screen->opencl_func_mutex);
+   mtx_unlock(&screen->opencl_func_mutex);
    return success;
 #else
    return false;
 #endif
 }
 
 struct dri2_fence {
    struct dri_screen *driscreen;
    struct pipe_fence_handle *pipe_fence;
    void *cl_event;
diff --git a/src/gallium/state_trackers/glx/xlib/xm_api.c b/src/gallium/state_trackers/glx/xlib/xm_api.c
index 86bb1c4..398152e 100644
--- a/src/gallium/state_trackers/glx/xlib/xm_api.c
+++ b/src/gallium/state_trackers/glx/xlib/xm_api.c
@@ -197,33 +197,33 @@ xmesa_init_display( Display *display )
       return NULL;
    }
 
    mtx_lock(&init_mutex);
 
    /* Look for XMesaDisplay which corresponds to this display */
    info = MesaExtInfo.head;
    while(info) {
       if (info->display == display) {
          /* Found it */
-         pipe_mutex_unlock(init_mutex);
+         mtx_unlock(&init_mutex);
          return  &info->mesaDisplay;
       }
       info = info->next;
    }
 
    /* Not found.  Create new XMesaDisplay */
    /* first allocate X-related resources and hook destroy callback */
 
    /* allocate mesa display info */
    info = (XMesaExtDisplayInfo *) Xmalloc(sizeof(XMesaExtDisplayInfo));
    if (info == NULL) {
-      pipe_mutex_unlock(init_mutex);
+      mtx_unlock(&init_mutex);
       return NULL;
    }
    info->display = display;
    xmdpy = &info->mesaDisplay; /* to be filled out below */
 
    /* chain to the list of displays */
    _XLockMutex(_Xglobal_lock);
    info->next = MesaExtInfo.head;
    MesaExtInfo.head = info;
    MesaExtInfo.ndisplays++;
@@ -248,21 +248,21 @@ xmesa_init_display( Display *display )
       if (xmdpy->screen) {
          xmdpy->screen->destroy(xmdpy->screen);
          xmdpy->screen = NULL;
       }
       free(xmdpy->smapi);
       xmdpy->smapi = NULL;
 
       xmdpy->display = NULL;
    }
 
-   pipe_mutex_unlock(init_mutex);
+   mtx_unlock(&init_mutex);
 
    return xmdpy;
 }
 
 
 /**********************************************************************/
 /*****                     X Utility Functions                    *****/
 /**********************************************************************/
 
 
@@ -367,21 +367,21 @@ get_drawable_size( Display *dpy, Drawable d, uint *width, uint *height )
  */
 void
 xmesa_get_window_size(Display *dpy, XMesaBuffer b,
                       GLuint *width, GLuint *height)
 {
    XMesaDisplay xmdpy = xmesa_init_display(dpy);
    Status stat;
 
    mtx_lock(&xmdpy->mutex);
    stat = get_drawable_size(dpy, b->ws.drawable, width, height);
-   pipe_mutex_unlock(xmdpy->mutex);
+   mtx_unlock(&xmdpy->mutex);
 
    if (!stat) {
       /* probably querying a window that's recently been destroyed */
       _mesa_warning(NULL, "XGetGeometry failed!\n");
       *width = *height = 1;
    }
 }
 
 #define GET_REDMASK(__v)        __v->mesa_visual.redMask
 #define GET_GREENMASK(__v)      __v->mesa_visual.greenMask
diff --git a/src/gallium/state_trackers/nine/nine_lock.c b/src/gallium/state_trackers/nine/nine_lock.c
index 0ac0cd7..ca0f04a 100644
--- a/src/gallium/state_trackers/nine/nine_lock.c
+++ b/src/gallium/state_trackers/nine/nine_lock.c
@@ -52,82 +52,82 @@ static mtx_t d3dlock_global = _MTX_INITIALIZER_NP;
 
 void
 NineLockGlobalMutex()
 {
     mtx_lock(&d3dlock_global);
 }
 
 void
 NineUnlockGlobalMutex()
 {
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
 }
 
 static HRESULT NINE_WINAPI
 LockAuthenticatedChannel9_GetCertificateSize( struct NineAuthenticatedChannel9 *This,
                                               UINT *pCertificateSize )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineAuthenticatedChannel9_GetCertificateSize(This, pCertificateSize);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockAuthenticatedChannel9_GetCertificate( struct NineAuthenticatedChannel9 *This,
                                           UINT CertifacteSize,
                                           BYTE *ppCertificate )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineAuthenticatedChannel9_GetCertificate(This, CertifacteSize, ppCertificate);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockAuthenticatedChannel9_NegotiateKeyExchange( struct NineAuthenticatedChannel9 *This,
                                                 UINT DataSize,
                                                 void *pData )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineAuthenticatedChannel9_NegotiateKeyExchange(This, DataSize, pData);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockAuthenticatedChannel9_Query( struct NineAuthenticatedChannel9 *This,
                                  UINT InputSize,
                                  const void *pInput,
                                  UINT OutputSize,
                                  void *pOutput )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineAuthenticatedChannel9_Query(This, InputSize, pInput, OutputSize, pOutput);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockAuthenticatedChannel9_Configure( struct NineAuthenticatedChannel9 *This,
                                      UINT InputSize,
                                      const void *pInput,
                                      D3DAUTHENTICATEDCHANNEL_CONFIGURE_OUTPUT *pOutput )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineAuthenticatedChannel9_Configure(This, InputSize, pInput, pOutput);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 IDirect3DAuthenticatedChannel9Vtbl LockAuthenticatedChannel9_vtable = {
     (void *)NineUnknown_QueryInterface,
     (void *)NineUnknown_AddRef,
     (void *)NineUnknown_ReleaseWithDtorLock,
     (void *)LockAuthenticatedChannel9_GetCertificateSize,
     (void *)LockAuthenticatedChannel9_GetCertificate,
     (void *)LockAuthenticatedChannel9_NegotiateKeyExchange,
@@ -138,280 +138,280 @@ IDirect3DAuthenticatedChannel9Vtbl LockAuthenticatedChannel9_vtable = {
 static HRESULT NINE_WINAPI
 LockUnknown_SetPrivateData( struct NineUnknown *This,
                             REFGUID refguid,
                             const void *pData,
                             DWORD SizeOfData,
                             DWORD Flags )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineUnknown_SetPrivateData(This, refguid, pData, SizeOfData, Flags);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockUnknown_GetPrivateData( struct NineUnknown *This,
                             REFGUID refguid,
                             void *pData,
                             DWORD *pSizeOfData )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineUnknown_GetPrivateData(This, refguid, pData, pSizeOfData);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockUnknown_FreePrivateData( struct NineUnknown *This,
                              REFGUID refguid )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineUnknown_FreePrivateData(This, refguid);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 #if 0
 static HRESULT NINE_WINAPI
 LockResource9_GetDevice( struct NineResource9 *This,
                          IDirect3DDevice9 **ppDevice )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineUnknown_GetDevice(NineUnknown(This), ppDevice);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 #endif
 
 static DWORD NINE_WINAPI
 LockResource9_SetPriority( struct NineResource9 *This,
                            DWORD PriorityNew )
 {
     DWORD r;
     mtx_lock(&d3dlock_global);
     r = NineResource9_SetPriority(This, PriorityNew);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static DWORD NINE_WINAPI
 LockResource9_GetPriority( struct NineResource9 *This )
 {
     DWORD r;
     mtx_lock(&d3dlock_global);
     r = NineResource9_GetPriority(This);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 #if 0
 static void NINE_WINAPI
 LockResource9_PreLoad( struct NineResource9 *This )
 {
     mtx_lock(&d3dlock_global);
     NineResource9_PreLoad(This);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
 }
 #endif
 
 #if 0
 static D3DRESOURCETYPE NINE_WINAPI
 LockResource9_GetType( struct NineResource9 *This )
 {
     D3DRESOURCETYPE r;
     mtx_lock(&d3dlock_global);
     r = NineResource9_GetType(This);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 #endif
 
 static DWORD NINE_WINAPI
 LockBaseTexture9_SetLOD( struct NineBaseTexture9 *This,
                          DWORD LODNew )
 {
     DWORD r;
     mtx_lock(&d3dlock_global);
     r = NineBaseTexture9_SetLOD(This, LODNew);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static DWORD NINE_WINAPI
 LockBaseTexture9_GetLOD( struct NineBaseTexture9 *This )
 {
     DWORD r;
     mtx_lock(&d3dlock_global);
     r = NineBaseTexture9_GetLOD(This);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static DWORD NINE_WINAPI
 LockBaseTexture9_GetLevelCount( struct NineBaseTexture9 *This )
 {
     DWORD r;
     mtx_lock(&d3dlock_global);
     r = NineBaseTexture9_GetLevelCount(This);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockBaseTexture9_SetAutoGenFilterType( struct NineBaseTexture9 *This,
                                        D3DTEXTUREFILTERTYPE FilterType )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineBaseTexture9_SetAutoGenFilterType(This, FilterType);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static D3DTEXTUREFILTERTYPE NINE_WINAPI
 LockBaseTexture9_GetAutoGenFilterType( struct NineBaseTexture9 *This )
 {
     D3DTEXTUREFILTERTYPE r;
     mtx_lock(&d3dlock_global);
     r = NineBaseTexture9_GetAutoGenFilterType(This);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static void NINE_WINAPI
 LockBaseTexture9_PreLoad( struct NineBaseTexture9 *This )
 {
     mtx_lock(&d3dlock_global);
     NineBaseTexture9_PreLoad(This);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
 }
 
 static void NINE_WINAPI
 LockBaseTexture9_GenerateMipSubLevels( struct NineBaseTexture9 *This )
 {
     mtx_lock(&d3dlock_global);
     NineBaseTexture9_GenerateMipSubLevels(This);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
 }
 
 static HRESULT NINE_WINAPI
 LockCryptoSession9_GetCertificateSize( struct NineCryptoSession9 *This,
                                        UINT *pCertificateSize )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineCryptoSession9_GetCertificateSize(This, pCertificateSize);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockCryptoSession9_GetCertificate( struct NineCryptoSession9 *This,
                                    UINT CertifacteSize,
                                    BYTE *ppCertificate )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineCryptoSession9_GetCertificate(This, CertifacteSize, ppCertificate);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockCryptoSession9_NegotiateKeyExchange( struct NineCryptoSession9 *This,
                                          UINT DataSize,
                                          void *pData )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineCryptoSession9_NegotiateKeyExchange(This, DataSize, pData);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockCryptoSession9_EncryptionBlt( struct NineCryptoSession9 *This,
                                   IDirect3DSurface9 *pSrcSurface,
                                   IDirect3DSurface9 *pDstSurface,
                                   UINT DstSurfaceSize,
                                   void *pIV )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineCryptoSession9_EncryptionBlt(This, pSrcSurface, pDstSurface, DstSurfaceSize, pIV);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockCryptoSession9_DecryptionBlt( struct NineCryptoSession9 *This,
                                   IDirect3DSurface9 *pSrcSurface,
                                   IDirect3DSurface9 *pDstSurface,
                                   UINT SrcSurfaceSize,
                                   D3DENCRYPTED_BLOCK_INFO *pEncryptedBlockInfo,
                                   void *pContentKey,
                                   void *pIV )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineCryptoSession9_DecryptionBlt(This, pSrcSurface, pDstSurface, SrcSurfaceSize, pEncryptedBlockInfo, pContentKey, pIV);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockCryptoSession9_GetSurfacePitch( struct NineCryptoSession9 *This,
                                     IDirect3DSurface9 *pSrcSurface,
                                     UINT *pSurfacePitch )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineCryptoSession9_GetSurfacePitch(This, pSrcSurface, pSurfacePitch);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockCryptoSession9_StartSessionKeyRefresh( struct NineCryptoSession9 *This,
                                            void *pRandomNumber,
                                            UINT RandomNumberSize )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineCryptoSession9_StartSessionKeyRefresh(This, pRandomNumber, RandomNumberSize);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockCryptoSession9_FinishSessionKeyRefresh( struct NineCryptoSession9 *This )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineCryptoSession9_FinishSessionKeyRefresh(This);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockCryptoSession9_GetEncryptionBltKey( struct NineCryptoSession9 *This,
                                         void *pReadbackKey,
                                         UINT KeySize )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineCryptoSession9_GetEncryptionBltKey(This, pReadbackKey, KeySize);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 IDirect3DCryptoSession9Vtbl LockCryptoSession9_vtable = {
     (void *)NineUnknown_QueryInterface,
     (void *)NineUnknown_AddRef,
     (void *)NineUnknown_ReleaseWithDtorLock,
     (void *)LockCryptoSession9_GetCertificateSize,
     (void *)LockCryptoSession9_GetCertificate,
     (void *)LockCryptoSession9_NegotiateKeyExchange,
@@ -425,76 +425,76 @@ IDirect3DCryptoSession9Vtbl LockCryptoSession9_vtable = {
 
 #if 0
 static HRESULT NINE_WINAPI
 LockCubeTexture9_GetLevelDesc( struct NineCubeTexture9 *This,
                                UINT Level,
                                D3DSURFACE_DESC *pDesc )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineCubeTexture9_GetLevelDesc(This, Level, pDesc);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 #endif
 
 #if 0
 static HRESULT NINE_WINAPI
 LockCubeTexture9_GetCubeMapSurface( struct NineCubeTexture9 *This,
                                     D3DCUBEMAP_FACES FaceType,
                                     UINT Level,
                                     IDirect3DSurface9 **ppCubeMapSurface )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineCubeTexture9_GetCubeMapSurface(This, FaceType, Level, ppCubeMapSurface);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 #endif
 
 static HRESULT NINE_WINAPI
 LockCubeTexture9_LockRect( struct NineCubeTexture9 *This,
                            D3DCUBEMAP_FACES FaceType,
                            UINT Level,
                            D3DLOCKED_RECT *pLockedRect,
                            const RECT *pRect,
                            DWORD Flags )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineCubeTexture9_LockRect(This, FaceType, Level, pLockedRect, pRect, Flags);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockCubeTexture9_UnlockRect( struct NineCubeTexture9 *This,
                              D3DCUBEMAP_FACES FaceType,
                              UINT Level )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineCubeTexture9_UnlockRect(This, FaceType, Level);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockCubeTexture9_AddDirtyRect( struct NineCubeTexture9 *This,
                                D3DCUBEMAP_FACES FaceType,
                                const RECT *pDirtyRect )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineCubeTexture9_AddDirtyRect(This, FaceType, pDirtyRect);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 IDirect3DCubeTexture9Vtbl LockCubeTexture9_vtable = {
     (void *)NineUnknown_QueryInterface,
     (void *)NineUnknown_AddRef,
     (void *)NineUnknown_ReleaseWithDtorLock,
     (void *)NineUnknown_GetDevice, /* actually part of Resource9 iface */
     (void *)LockUnknown_SetPrivateData,
     (void *)LockUnknown_GetPrivateData,
@@ -515,1448 +515,1448 @@ IDirect3DCubeTexture9Vtbl LockCubeTexture9_vtable = {
     (void *)LockCubeTexture9_UnlockRect,
     (void *)LockCubeTexture9_AddDirtyRect
 };
 
 static HRESULT NINE_WINAPI
 LockDevice9_TestCooperativeLevel( struct NineDevice9 *This )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_TestCooperativeLevel(This);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static UINT NINE_WINAPI
 LockDevice9_GetAvailableTextureMem( struct NineDevice9 *This )
 {
     UINT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_GetAvailableTextureMem(This);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_EvictManagedResources( struct NineDevice9 *This )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_EvictManagedResources(This);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_GetDirect3D( struct NineDevice9 *This,
                          IDirect3D9 **ppD3D9 )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_GetDirect3D(This, ppD3D9);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 #if 0
 static HRESULT NINE_WINAPI
 LockDevice9_GetDeviceCaps( struct NineDevice9 *This,
                            D3DCAPS9 *pCaps )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_GetDeviceCaps(This, pCaps);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 #endif
 
 static HRESULT NINE_WINAPI
 LockDevice9_GetDisplayMode( struct NineDevice9 *This,
                             UINT iSwapChain,
                             D3DDISPLAYMODE *pMode )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_GetDisplayMode(This, iSwapChain, pMode);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 #if 0
 static HRESULT NINE_WINAPI
 LockDevice9_GetCreationParameters( struct NineDevice9 *This,
                                    D3DDEVICE_CREATION_PARAMETERS *pParameters )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_GetCreationParameters(This, pParameters);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 #endif
 
 static HRESULT NINE_WINAPI
 LockDevice9_SetCursorProperties( struct NineDevice9 *This,
                                  UINT XHotSpot,
                                  UINT YHotSpot,
                                  IDirect3DSurface9 *pCursorBitmap )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_SetCursorProperties(This, XHotSpot, YHotSpot, pCursorBitmap);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static void NINE_WINAPI
 LockDevice9_SetCursorPosition( struct NineDevice9 *This,
                                int X,
                                int Y,
                                DWORD Flags )
 {
     mtx_lock(&d3dlock_global);
     NineDevice9_SetCursorPosition(This, X, Y, Flags);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
 }
 
 static BOOL NINE_WINAPI
 LockDevice9_ShowCursor( struct NineDevice9 *This,
                         BOOL bShow )
 {
     BOOL r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_ShowCursor(This, bShow);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_CreateAdditionalSwapChain( struct NineDevice9 *This,
                                        D3DPRESENT_PARAMETERS *pPresentationParameters,
                                        IDirect3DSwapChain9 **pSwapChain )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_CreateAdditionalSwapChain(This, pPresentationParameters, pSwapChain);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_GetSwapChain( struct NineDevice9 *This,
                           UINT iSwapChain,
                           IDirect3DSwapChain9 **pSwapChain )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_GetSwapChain(This, iSwapChain, pSwapChain);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static UINT NINE_WINAPI
 LockDevice9_GetNumberOfSwapChains( struct NineDevice9 *This )
 {
     UINT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_GetNumberOfSwapChains(This);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_Reset( struct NineDevice9 *This,
                    D3DPRESENT_PARAMETERS *pPresentationParameters )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_Reset(This, pPresentationParameters);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_Present( struct NineDevice9 *This,
                      const RECT *pSourceRect,
                      const RECT *pDestRect,
                      HWND hDestWindowOverride,
                      const RGNDATA *pDirtyRegion )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_Present(This, pSourceRect, pDestRect, hDestWindowOverride, pDirtyRegion);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_GetBackBuffer( struct NineDevice9 *This,
                            UINT iSwapChain,
                            UINT iBackBuffer,
                            D3DBACKBUFFER_TYPE Type,
                            IDirect3DSurface9 **ppBackBuffer )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_GetBackBuffer(This, iSwapChain, iBackBuffer, Type, ppBackBuffer);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_GetRasterStatus( struct NineDevice9 *This,
                              UINT iSwapChain,
                              D3DRASTER_STATUS *pRasterStatus )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_GetRasterStatus(This, iSwapChain, pRasterStatus);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_SetDialogBoxMode( struct NineDevice9 *This,
                               BOOL bEnableDialogs )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_SetDialogBoxMode(This, bEnableDialogs);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static void NINE_WINAPI
 LockDevice9_SetGammaRamp( struct NineDevice9 *This,
                           UINT iSwapChain,
                           DWORD Flags,
                           const D3DGAMMARAMP *pRamp )
 {
     mtx_lock(&d3dlock_global);
     NineDevice9_SetGammaRamp(This, iSwapChain, Flags, pRamp);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
 }
 
 static void NINE_WINAPI
 LockDevice9_GetGammaRamp( struct NineDevice9 *This,
                           UINT iSwapChain,
                           D3DGAMMARAMP *pRamp )
 {
     mtx_lock(&d3dlock_global);
     NineDevice9_GetGammaRamp(This, iSwapChain, pRamp);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_CreateTexture( struct NineDevice9 *This,
                            UINT Width,
                            UINT Height,
                            UINT Levels,
                            DWORD Usage,
                            D3DFORMAT Format,
                            D3DPOOL Pool,
                            IDirect3DTexture9 **ppTexture,
                            HANDLE *pSharedHandle )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_CreateTexture(This, Width, Height, Levels, Usage, Format, Pool, ppTexture, pSharedHandle);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_CreateVolumeTexture( struct NineDevice9 *This,
                                  UINT Width,
                                  UINT Height,
                                  UINT Depth,
                                  UINT Levels,
                                  DWORD Usage,
                                  D3DFORMAT Format,
                                  D3DPOOL Pool,
                                  IDirect3DVolumeTexture9 **ppVolumeTexture,
                                  HANDLE *pSharedHandle )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_CreateVolumeTexture(This, Width, Height, Depth, Levels, Usage, Format, Pool, ppVolumeTexture, pSharedHandle);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_CreateCubeTexture( struct NineDevice9 *This,
                                UINT EdgeLength,
                                UINT Levels,
                                DWORD Usage,
                                D3DFORMAT Format,
                                D3DPOOL Pool,
                                IDirect3DCubeTexture9 **ppCubeTexture,
                                HANDLE *pSharedHandle )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_CreateCubeTexture(This, EdgeLength, Levels, Usage, Format, Pool, ppCubeTexture, pSharedHandle);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_CreateVertexBuffer( struct NineDevice9 *This,
                                 UINT Length,
                                 DWORD Usage,
                                 DWORD FVF,
                                 D3DPOOL Pool,
                                 IDirect3DVertexBuffer9 **ppVertexBuffer,
                                 HANDLE *pSharedHandle )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_CreateVertexBuffer(This, Length, Usage, FVF, Pool, ppVertexBuffer, pSharedHandle);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_CreateIndexBuffer( struct NineDevice9 *This,
                                UINT Length,
                                DWORD Usage,
                                D3DFORMAT Format,
                                D3DPOOL Pool,
                                IDirect3DIndexBuffer9 **ppIndexBuffer,
                                HANDLE *pSharedHandle )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_CreateIndexBuffer(This, Length, Usage, Format, Pool, ppIndexBuffer, pSharedHandle);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_CreateRenderTarget( struct NineDevice9 *This,
                                 UINT Width,
                                 UINT Height,
                                 D3DFORMAT Format,
                                 D3DMULTISAMPLE_TYPE MultiSample,
                                 DWORD MultisampleQuality,
                                 BOOL Lockable,
                                 IDirect3DSurface9 **ppSurface,
                                 HANDLE *pSharedHandle )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_CreateRenderTarget(This, Width, Height, Format, MultiSample, MultisampleQuality, Lockable, ppSurface, pSharedHandle);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_CreateDepthStencilSurface( struct NineDevice9 *This,
                                        UINT Width,
                                        UINT Height,
                                        D3DFORMAT Format,
                                        D3DMULTISAMPLE_TYPE MultiSample,
                                        DWORD MultisampleQuality,
                                        BOOL Discard,
                                        IDirect3DSurface9 **ppSurface,
                                        HANDLE *pSharedHandle )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_CreateDepthStencilSurface(This, Width, Height, Format, MultiSample, MultisampleQuality, Discard, ppSurface, pSharedHandle);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_UpdateSurface( struct NineDevice9 *This,
                            IDirect3DSurface9 *pSourceSurface,
                            const RECT *pSourceRect,
                            IDirect3DSurface9 *pDestinationSurface,
                            const POINT *pDestPoint )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_UpdateSurface(This, pSourceSurface, pSourceRect, pDestinationSurface, pDestPoint);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_UpdateTexture( struct NineDevice9 *This,
                            IDirect3DBaseTexture9 *pSourceTexture,
                            IDirect3DBaseTexture9 *pDestinationTexture )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_UpdateTexture(This, pSourceTexture, pDestinationTexture);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_GetRenderTargetData( struct NineDevice9 *This,
                                  IDirect3DSurface9 *pRenderTarget,
                                  IDirect3DSurface9 *pDestSurface )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_GetRenderTargetData(This, pRenderTarget, pDestSurface);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_GetFrontBufferData( struct NineDevice9 *This,
                                 UINT iSwapChain,
                                 IDirect3DSurface9 *pDestSurface )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_GetFrontBufferData(This, iSwapChain, pDestSurface);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_StretchRect( struct NineDevice9 *This,
                          IDirect3DSurface9 *pSourceSurface,
                          const RECT *pSourceRect,
                          IDirect3DSurface9 *pDestSurface,
                          const RECT *pDestRect,
                          D3DTEXTUREFILTERTYPE Filter )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_StretchRect(This, pSourceSurface, pSourceRect, pDestSurface, pDestRect, Filter);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_ColorFill( struct NineDevice9 *This,
                        IDirect3DSurface9 *pSurface,
                        const RECT *pRect,
                        D3DCOLOR color )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_ColorFill(This, pSurface, pRect, color);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_CreateOffscreenPlainSurface( struct NineDevice9 *This,
                                          UINT Width,
                                          UINT Height,
                                          D3DFORMAT Format,
                                          D3DPOOL Pool,
                                          IDirect3DSurface9 **ppSurface,
                                          HANDLE *pSharedHandle )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_CreateOffscreenPlainSurface(This, Width, Height, Format, Pool, ppSurface, pSharedHandle);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_SetRenderTarget( struct NineDevice9 *This,
                              DWORD RenderTargetIndex,
                              IDirect3DSurface9 *pRenderTarget )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_SetRenderTarget(This, RenderTargetIndex, pRenderTarget);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_GetRenderTarget( struct NineDevice9 *This,
                              DWORD RenderTargetIndex,
                              IDirect3DSurface9 **ppRenderTarget )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_GetRenderTarget(This, RenderTargetIndex, ppRenderTarget);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_SetDepthStencilSurface( struct NineDevice9 *This,
                                     IDirect3DSurface9 *pNewZStencil )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_SetDepthStencilSurface(This, pNewZStencil);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_GetDepthStencilSurface( struct NineDevice9 *This,
                                     IDirect3DSurface9 **ppZStencilSurface )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_GetDepthStencilSurface(This, ppZStencilSurface);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_BeginScene( struct NineDevice9 *This )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_BeginScene(This);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_EndScene( struct NineDevice9 *This )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_EndScene(This);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_Clear( struct NineDevice9 *This,
                    DWORD Count,
                    const D3DRECT *pRects,
                    DWORD Flags,
                    D3DCOLOR Color,
                    float Z,
                    DWORD Stencil )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_Clear(This, Count, pRects, Flags, Color, Z, Stencil);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_SetTransform( struct NineDevice9 *This,
                           D3DTRANSFORMSTATETYPE State,
                           const D3DMATRIX *pMatrix )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_SetTransform(This, State, pMatrix);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_GetTransform( struct NineDevice9 *This,
                           D3DTRANSFORMSTATETYPE State,
                           D3DMATRIX *pMatrix )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_GetTransform(This, State, pMatrix);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_MultiplyTransform( struct NineDevice9 *This,
                                D3DTRANSFORMSTATETYPE State,
                                const D3DMATRIX *pMatrix )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_MultiplyTransform(This, State, pMatrix);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_SetViewport( struct NineDevice9 *This,
                          const D3DVIEWPORT9 *pViewport )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_SetViewport(This, pViewport);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_GetViewport( struct NineDevice9 *This,
                          D3DVIEWPORT9 *pViewport )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_GetViewport(This, pViewport);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_SetMaterial( struct NineDevice9 *This,
                          const D3DMATERIAL9 *pMaterial )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_SetMaterial(This, pMaterial);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_GetMaterial( struct NineDevice9 *This,
                          D3DMATERIAL9 *pMaterial )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_GetMaterial(This, pMaterial);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_SetLight( struct NineDevice9 *This,
                       DWORD Index,
                       const D3DLIGHT9 *pLight )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_SetLight(This, Index, pLight);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_GetLight( struct NineDevice9 *This,
                       DWORD Index,
                       D3DLIGHT9 *pLight )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_GetLight(This, Index, pLight);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_LightEnable( struct NineDevice9 *This,
                          DWORD Index,
                          BOOL Enable )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_LightEnable(This, Index, Enable);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_GetLightEnable( struct NineDevice9 *This,
                             DWORD Index,
                             BOOL *pEnable )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_GetLightEnable(This, Index, pEnable);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_SetClipPlane( struct NineDevice9 *This,
                           DWORD Index,
                           const float *pPlane )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_SetClipPlane(This, Index, pPlane);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_GetClipPlane( struct NineDevice9 *This,
                           DWORD Index,
                           float *pPlane )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_GetClipPlane(This, Index, pPlane);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_SetRenderState( struct NineDevice9 *This,
                             D3DRENDERSTATETYPE State,
                             DWORD Value )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_SetRenderState(This, State, Value);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_GetRenderState( struct NineDevice9 *This,
                             D3DRENDERSTATETYPE State,
                             DWORD *pValue )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_GetRenderState(This, State, pValue);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_CreateStateBlock( struct NineDevice9 *This,
                               D3DSTATEBLOCKTYPE Type,
                               IDirect3DStateBlock9 **ppSB )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_CreateStateBlock(This, Type, ppSB);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_BeginStateBlock( struct NineDevice9 *This )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_BeginStateBlock(This);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_EndStateBlock( struct NineDevice9 *This,
                            IDirect3DStateBlock9 **ppSB )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_EndStateBlock(This, ppSB);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_SetClipStatus( struct NineDevice9 *This,
                            const D3DCLIPSTATUS9 *pClipStatus )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_SetClipStatus(This, pClipStatus);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_GetClipStatus( struct NineDevice9 *This,
                            D3DCLIPSTATUS9 *pClipStatus )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_GetClipStatus(This, pClipStatus);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_GetTexture( struct NineDevice9 *This,
                         DWORD Stage,
                         IDirect3DBaseTexture9 **ppTexture )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_GetTexture(This, Stage, ppTexture);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_SetTexture( struct NineDevice9 *This,
                         DWORD Stage,
                         IDirect3DBaseTexture9 *pTexture )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_SetTexture(This, Stage, pTexture);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_GetTextureStageState( struct NineDevice9 *This,
                                   DWORD Stage,
                                   D3DTEXTURESTAGESTATETYPE Type,
                                   DWORD *pValue )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_GetTextureStageState(This, Stage, Type, pValue);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_SetTextureStageState( struct NineDevice9 *This,
                                   DWORD Stage,
                                   D3DTEXTURESTAGESTATETYPE Type,
                                   DWORD Value )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_SetTextureStageState(This, Stage, Type, Value);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_GetSamplerState( struct NineDevice9 *This,
                              DWORD Sampler,
                              D3DSAMPLERSTATETYPE Type,
                              DWORD *pValue )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_GetSamplerState(This, Sampler, Type, pValue);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_SetSamplerState( struct NineDevice9 *This,
                              DWORD Sampler,
                              D3DSAMPLERSTATETYPE Type,
                              DWORD Value )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_SetSamplerState(This, Sampler, Type, Value);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_ValidateDevice( struct NineDevice9 *This,
                             DWORD *pNumPasses )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_ValidateDevice(This, pNumPasses);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_SetPaletteEntries( struct NineDevice9 *This,
                                UINT PaletteNumber,
                                const PALETTEENTRY *pEntries )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_SetPaletteEntries(This, PaletteNumber, pEntries);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_GetPaletteEntries( struct NineDevice9 *This,
                                UINT PaletteNumber,
                                PALETTEENTRY *pEntries )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_GetPaletteEntries(This, PaletteNumber, pEntries);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_SetCurrentTexturePalette( struct NineDevice9 *This,
                                       UINT PaletteNumber )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_SetCurrentTexturePalette(This, PaletteNumber);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_GetCurrentTexturePalette( struct NineDevice9 *This,
                                       UINT *PaletteNumber )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_GetCurrentTexturePalette(This, PaletteNumber);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_SetScissorRect( struct NineDevice9 *This,
                             const RECT *pRect )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_SetScissorRect(This, pRect);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_GetScissorRect( struct NineDevice9 *This,
                             RECT *pRect )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_GetScissorRect(This, pRect);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_SetSoftwareVertexProcessing( struct NineDevice9 *This,
                                          BOOL bSoftware )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_SetSoftwareVertexProcessing(This, bSoftware);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static BOOL NINE_WINAPI
 LockDevice9_GetSoftwareVertexProcessing( struct NineDevice9 *This )
 {
     BOOL r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_GetSoftwareVertexProcessing(This);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_SetNPatchMode( struct NineDevice9 *This,
                            float nSegments )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_SetNPatchMode(This, nSegments);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static float NINE_WINAPI
 LockDevice9_GetNPatchMode( struct NineDevice9 *This )
 {
     float r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_GetNPatchMode(This);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_DrawPrimitive( struct NineDevice9 *This,
                            D3DPRIMITIVETYPE PrimitiveType,
                            UINT StartVertex,
                            UINT PrimitiveCount )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_DrawPrimitive(This, PrimitiveType, StartVertex, PrimitiveCount);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_DrawIndexedPrimitive( struct NineDevice9 *This,
                                   D3DPRIMITIVETYPE PrimitiveType,
                                   INT BaseVertexIndex,
                                   UINT MinVertexIndex,
                                   UINT NumVertices,
                                   UINT startIndex,
                                   UINT primCount )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_DrawIndexedPrimitive(This, PrimitiveType, BaseVertexIndex, MinVertexIndex, NumVertices, startIndex, primCount);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_DrawPrimitiveUP( struct NineDevice9 *This,
                              D3DPRIMITIVETYPE PrimitiveType,
                              UINT PrimitiveCount,
                              const void *pVertexStreamZeroData,
                              UINT VertexStreamZeroStride )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_DrawPrimitiveUP(This, PrimitiveType, PrimitiveCount, pVertexStreamZeroData, VertexStreamZeroStride);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_DrawIndexedPrimitiveUP( struct NineDevice9 *This,
                                     D3DPRIMITIVETYPE PrimitiveType,
                                     UINT MinVertexIndex,
                                     UINT NumVertices,
                                     UINT PrimitiveCount,
                                     const void *pIndexData,
                                     D3DFORMAT IndexDataFormat,
                                     const void *pVertexStreamZeroData,
                                     UINT VertexStreamZeroStride )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_DrawIndexedPrimitiveUP(This, PrimitiveType, MinVertexIndex, NumVertices, PrimitiveCount, pIndexData, IndexDataFormat, pVertexStreamZeroData, VertexStreamZeroStride);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_ProcessVertices( struct NineDevice9 *This,
                              UINT SrcStartIndex,
                              UINT DestIndex,
                              UINT VertexCount,
                              IDirect3DVertexBuffer9 *pDestBuffer,
                              IDirect3DVertexDeclaration9 *pVertexDecl,
                              DWORD Flags )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_ProcessVertices(This, SrcStartIndex, DestIndex, VertexCount, pDestBuffer, pVertexDecl, Flags);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_CreateVertexDeclaration( struct NineDevice9 *This,
                                      const D3DVERTEXELEMENT9 *pVertexElements,
                                      IDirect3DVertexDeclaration9 **ppDecl )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_CreateVertexDeclaration(This, pVertexElements, ppDecl);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_SetVertexDeclaration( struct NineDevice9 *This,
                                   IDirect3DVertexDeclaration9 *pDecl )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_SetVertexDeclaration(This, pDecl);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_GetVertexDeclaration( struct NineDevice9 *This,
                                   IDirect3DVertexDeclaration9 **ppDecl )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_GetVertexDeclaration(This, ppDecl);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_SetFVF( struct NineDevice9 *This,
                     DWORD FVF )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_SetFVF(This, FVF);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_GetFVF( struct NineDevice9 *This,
                     DWORD *pFVF )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_GetFVF(This, pFVF);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_CreateVertexShader( struct NineDevice9 *This,
                                 const DWORD *pFunction,
                                 IDirect3DVertexShader9 **ppShader )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_CreateVertexShader(This, pFunction, ppShader);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_SetVertexShader( struct NineDevice9 *This,
                              IDirect3DVertexShader9 *pShader )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_SetVertexShader(This, pShader);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_GetVertexShader( struct NineDevice9 *This,
                              IDirect3DVertexShader9 **ppShader )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_GetVertexShader(This, ppShader);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_SetVertexShaderConstantF( struct NineDevice9 *This,
                                       UINT StartRegister,
                                       const float *pConstantData,
                                       UINT Vector4fCount )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_SetVertexShaderConstantF(This, StartRegister, pConstantData, Vector4fCount);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_GetVertexShaderConstantF( struct NineDevice9 *This,
                                       UINT StartRegister,
                                       float *pConstantData,
                                       UINT Vector4fCount )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_GetVertexShaderConstantF(This, StartRegister, pConstantData, Vector4fCount);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_SetVertexShaderConstantI( struct NineDevice9 *This,
                                       UINT StartRegister,
                                       const int *pConstantData,
                                       UINT Vector4iCount )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_SetVertexShaderConstantI(This, StartRegister, pConstantData, Vector4iCount);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_GetVertexShaderConstantI( struct NineDevice9 *This,
                                       UINT StartRegister,
                                       int *pConstantData,
                                       UINT Vector4iCount )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_GetVertexShaderConstantI(This, StartRegister, pConstantData, Vector4iCount);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_SetVertexShaderConstantB( struct NineDevice9 *This,
                                       UINT StartRegister,
                                       const BOOL *pConstantData,
                                       UINT BoolCount )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_SetVertexShaderConstantB(This, StartRegister, pConstantData, BoolCount);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_GetVertexShaderConstantB( struct NineDevice9 *This,
                                       UINT StartRegister,
                                       BOOL *pConstantData,
                                       UINT BoolCount )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_GetVertexShaderConstantB(This, StartRegister, pConstantData, BoolCount);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_SetStreamSource( struct NineDevice9 *This,
                              UINT StreamNumber,
                              IDirect3DVertexBuffer9 *pStreamData,
                              UINT OffsetInBytes,
                              UINT Stride )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_SetStreamSource(This, StreamNumber, pStreamData, OffsetInBytes, Stride);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_GetStreamSource( struct NineDevice9 *This,
                              UINT StreamNumber,
                              IDirect3DVertexBuffer9 **ppStreamData,
                              UINT *pOffsetInBytes,
                              UINT *pStride )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_GetStreamSource(This, StreamNumber, ppStreamData, pOffsetInBytes, pStride);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_SetStreamSourceFreq( struct NineDevice9 *This,
                                  UINT StreamNumber,
                                  UINT Setting )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_SetStreamSourceFreq(This, StreamNumber, Setting);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_GetStreamSourceFreq( struct NineDevice9 *This,
                                  UINT StreamNumber,
                                  UINT *pSetting )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_GetStreamSourceFreq(This, StreamNumber, pSetting);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_SetIndices( struct NineDevice9 *This,
                         IDirect3DIndexBuffer9 *pIndexData )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_SetIndices(This, pIndexData);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_GetIndices( struct NineDevice9 *This,
                         IDirect3DIndexBuffer9 **ppIndexData )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_GetIndices(This, ppIndexData);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_CreatePixelShader( struct NineDevice9 *This,
                                const DWORD *pFunction,
                                IDirect3DPixelShader9 **ppShader )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_CreatePixelShader(This, pFunction, ppShader);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_SetPixelShader( struct NineDevice9 *This,
                             IDirect3DPixelShader9 *pShader )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_SetPixelShader(This, pShader);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_GetPixelShader( struct NineDevice9 *This,
                             IDirect3DPixelShader9 **ppShader )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_GetPixelShader(This, ppShader);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_SetPixelShaderConstantF( struct NineDevice9 *This,
                                      UINT StartRegister,
                                      const float *pConstantData,
                                      UINT Vector4fCount )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_SetPixelShaderConstantF(This, StartRegister, pConstantData, Vector4fCount);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_GetPixelShaderConstantF( struct NineDevice9 *This,
                                      UINT StartRegister,
                                      float *pConstantData,
                                      UINT Vector4fCount )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_GetPixelShaderConstantF(This, StartRegister, pConstantData, Vector4fCount);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_SetPixelShaderConstantI( struct NineDevice9 *This,
                                      UINT StartRegister,
                                      const int *pConstantData,
                                      UINT Vector4iCount )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_SetPixelShaderConstantI(This, StartRegister, pConstantData, Vector4iCount);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_GetPixelShaderConstantI( struct NineDevice9 *This,
                                      UINT StartRegister,
                                      int *pConstantData,
                                      UINT Vector4iCount )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_GetPixelShaderConstantI(This, StartRegister, pConstantData, Vector4iCount);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_SetPixelShaderConstantB( struct NineDevice9 *This,
                                      UINT StartRegister,
                                      const BOOL *pConstantData,
                                      UINT BoolCount )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_SetPixelShaderConstantB(This, StartRegister, pConstantData, BoolCount);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_GetPixelShaderConstantB( struct NineDevice9 *This,
                                      UINT StartRegister,
                                      BOOL *pConstantData,
                                      UINT BoolCount )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_GetPixelShaderConstantB(This, StartRegister, pConstantData, BoolCount);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_DrawRectPatch( struct NineDevice9 *This,
                            UINT Handle,
                            const float *pNumSegs,
                            const D3DRECTPATCH_INFO *pRectPatchInfo )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_DrawRectPatch(This, Handle, pNumSegs, pRectPatchInfo);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_DrawTriPatch( struct NineDevice9 *This,
                           UINT Handle,
                           const float *pNumSegs,
                           const D3DTRIPATCH_INFO *pTriPatchInfo )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_DrawTriPatch(This, Handle, pNumSegs, pTriPatchInfo);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_DeletePatch( struct NineDevice9 *This,
                          UINT Handle )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_DeletePatch(This, Handle);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9_CreateQuery( struct NineDevice9 *This,
                          D3DQUERYTYPE Type,
                          IDirect3DQuery9 **ppQuery )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9_CreateQuery(This, Type, ppQuery);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 IDirect3DDevice9Vtbl LockDevice9_vtable = {
     (void *)NineUnknown_QueryInterface,
     (void *)NineUnknown_AddRef,
     (void *)NineUnknown_ReleaseWithDtorLock,
     (void *)LockDevice9_TestCooperativeLevel,
     (void *)LockDevice9_GetAvailableTextureMem,
     (void *)LockDevice9_EvictManagedResources,
@@ -2078,212 +2078,212 @@ IDirect3DDevice9Vtbl LockDevice9_vtable = {
 static HRESULT NINE_WINAPI
 LockDevice9Ex_SetConvolutionMonoKernel( struct NineDevice9Ex *This,
                                         UINT width,
                                         UINT height,
                                         float *rows,
                                         float *columns )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9Ex_SetConvolutionMonoKernel(This, width, height, rows, columns);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9Ex_ComposeRects( struct NineDevice9Ex *This,
                             IDirect3DSurface9 *pSrc,
                             IDirect3DSurface9 *pDst,
                             IDirect3DVertexBuffer9 *pSrcRectDescs,
                             UINT NumRects,
                             IDirect3DVertexBuffer9 *pDstRectDescs,
                             D3DCOMPOSERECTSOP Operation,
                             int Xoffset,
                             int Yoffset )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9Ex_ComposeRects(This, pSrc, pDst, pSrcRectDescs, NumRects, pDstRectDescs, Operation, Xoffset, Yoffset);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9Ex_PresentEx( struct NineDevice9Ex *This,
                          const RECT *pSourceRect,
                          const RECT *pDestRect,
                          HWND hDestWindowOverride,
                          const RGNDATA *pDirtyRegion,
                          DWORD dwFlags )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9Ex_PresentEx(This, pSourceRect, pDestRect, hDestWindowOverride, pDirtyRegion, dwFlags);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9Ex_GetGPUThreadPriority( struct NineDevice9Ex *This,
                                     INT *pPriority )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9Ex_GetGPUThreadPriority(This, pPriority);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9Ex_SetGPUThreadPriority( struct NineDevice9Ex *This,
                                     INT Priority )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9Ex_SetGPUThreadPriority(This, Priority);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9Ex_WaitForVBlank( struct NineDevice9Ex *This,
                              UINT iSwapChain )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9Ex_WaitForVBlank(This, iSwapChain);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9Ex_CheckResourceResidency( struct NineDevice9Ex *This,
                                       IDirect3DResource9 **pResourceArray,
                                       UINT32 NumResources )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9Ex_CheckResourceResidency(This, pResourceArray, NumResources);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9Ex_SetMaximumFrameLatency( struct NineDevice9Ex *This,
                                       UINT MaxLatency )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9Ex_SetMaximumFrameLatency(This, MaxLatency);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9Ex_GetMaximumFrameLatency( struct NineDevice9Ex *This,
                                       UINT *pMaxLatency )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9Ex_GetMaximumFrameLatency(This, pMaxLatency);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9Ex_CheckDeviceState( struct NineDevice9Ex *This,
                                 HWND hDestinationWindow )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9Ex_CheckDeviceState(This, hDestinationWindow);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9Ex_CreateRenderTargetEx( struct NineDevice9Ex *This,
                                     UINT Width,
                                     UINT Height,
                                     D3DFORMAT Format,
                                     D3DMULTISAMPLE_TYPE MultiSample,
                                     DWORD MultisampleQuality,
                                     BOOL Lockable,
                                     IDirect3DSurface9 **ppSurface,
                                     HANDLE *pSharedHandle,
                                     DWORD Usage )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9Ex_CreateRenderTargetEx(This, Width, Height, Format, MultiSample, MultisampleQuality, Lockable, ppSurface, pSharedHandle, Usage);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9Ex_CreateOffscreenPlainSurfaceEx( struct NineDevice9Ex *This,
                                              UINT Width,
                                              UINT Height,
                                              D3DFORMAT Format,
                                              D3DPOOL Pool,
                                              IDirect3DSurface9 **ppSurface,
                                              HANDLE *pSharedHandle,
                                              DWORD Usage )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9Ex_CreateOffscreenPlainSurfaceEx(This, Width, Height, Format, Pool, ppSurface, pSharedHandle, Usage);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9Ex_CreateDepthStencilSurfaceEx( struct NineDevice9Ex *This,
                                            UINT Width,
                                            UINT Height,
                                            D3DFORMAT Format,
                                            D3DMULTISAMPLE_TYPE MultiSample,
                                            DWORD MultisampleQuality,
                                            BOOL Discard,
                                            IDirect3DSurface9 **ppSurface,
                                            HANDLE *pSharedHandle,
                                            DWORD Usage )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9Ex_CreateDepthStencilSurfaceEx(This, Width, Height, Format, MultiSample, MultisampleQuality, Discard, ppSurface, pSharedHandle, Usage);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9Ex_ResetEx( struct NineDevice9Ex *This,
                        D3DPRESENT_PARAMETERS *pPresentationParameters,
                        D3DDISPLAYMODEEX *pFullscreenDisplayMode )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9Ex_ResetEx(This, pPresentationParameters, pFullscreenDisplayMode);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9Ex_GetDisplayModeEx( struct NineDevice9Ex *This,
                                 UINT iSwapChain,
                                 D3DDISPLAYMODEEX *pMode,
                                 D3DDISPLAYROTATION *pRotation )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9Ex_GetDisplayModeEx(This, iSwapChain, pMode, pRotation);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 IDirect3DDevice9ExVtbl LockDevice9Ex_vtable = {
     (void *)NineUnknown_QueryInterface,
     (void *)NineUnknown_AddRef,
     (void *)NineUnknown_ReleaseWithDtorLock,
     (void *)LockDevice9_TestCooperativeLevel,
     (void *)LockDevice9_GetAvailableTextureMem,
     (void *)LockDevice9_EvictManagedResources,
@@ -2419,48 +2419,48 @@ IDirect3DDevice9ExVtbl LockDevice9Ex_vtable = {
 
 static HRESULT NINE_WINAPI
 LockDevice9Video_GetContentProtectionCaps( struct NineDevice9Video *This,
                                            const GUID *pCryptoType,
                                            const GUID *pDecodeProfile,
                                            D3DCONTENTPROTECTIONCAPS *pCaps )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9Video_GetContentProtectionCaps(This, pCryptoType, pDecodeProfile, pCaps);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9Video_CreateAuthenticatedChannel( struct NineDevice9Video *This,
                                              D3DAUTHENTICATEDCHANNELTYPE ChannelType,
                                              IDirect3DAuthenticatedChannel9 **ppAuthenticatedChannel,
                                              HANDLE *pChannelHandle )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9Video_CreateAuthenticatedChannel(This, ChannelType, ppAuthenticatedChannel, pChannelHandle);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockDevice9Video_CreateCryptoSession( struct NineDevice9Video *This,
                                       const GUID *pCryptoType,
                                       const GUID *pDecodeProfile,
                                       IDirect3DCryptoSession9 **ppCryptoSession,
                                       HANDLE *pCryptoHandle )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineDevice9Video_CreateCryptoSession(This, pCryptoType, pDecodeProfile, ppCryptoSession, pCryptoHandle);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 IDirect3DDevice9VideoVtbl LockDevice9Video_vtable = {
     (void *)NineUnknown_QueryInterface,
     (void *)NineUnknown_AddRef,
     (void *)NineUnknown_ReleaseWithDtorLock,
     (void *)LockDevice9Video_GetContentProtectionCaps,
     (void *)LockDevice9Video_CreateAuthenticatedChannel,
     (void *)LockDevice9Video_CreateCryptoSession
@@ -2469,43 +2469,43 @@ IDirect3DDevice9VideoVtbl LockDevice9Video_vtable = {
 static HRESULT NINE_WINAPI
 LockIndexBuffer9_Lock( struct NineIndexBuffer9 *This,
                        UINT OffsetToLock,
                        UINT SizeToLock,
                        void **ppbData,
                        DWORD Flags )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineIndexBuffer9_Lock(This, OffsetToLock, SizeToLock, ppbData, Flags);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockIndexBuffer9_Unlock( struct NineIndexBuffer9 *This )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineIndexBuffer9_Unlock(This);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 #if 0
 static HRESULT NINE_WINAPI
 LockIndexBuffer9_GetDesc( struct NineIndexBuffer9 *This,
                           D3DINDEXBUFFER_DESC *pDesc )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineIndexBuffer9_GetDesc(This, pDesc);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 #endif
 
 IDirect3DIndexBuffer9Vtbl LockIndexBuffer9_vtable = {
     (void *)NineUnknown_QueryInterface,
     (void *)NineUnknown_AddRef,
     (void *)NineUnknown_ReleaseWithDtorLock,
     (void *)NineUnknown_GetDevice, /* actually part of Resource9 iface */
     (void *)LockUnknown_SetPrivateData,
@@ -2521,103 +2521,103 @@ IDirect3DIndexBuffer9Vtbl LockIndexBuffer9_vtable = {
 };
 
 #if 0
 static HRESULT NINE_WINAPI
 LockPixelShader9_GetDevice( struct NinePixelShader9 *This,
                             IDirect3DDevice9 **ppDevice )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineUnknown_GetDevice(NineUnknown(This), ppDevice);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 #endif
 
 static HRESULT NINE_WINAPI
 LockPixelShader9_GetFunction( struct NinePixelShader9 *This,
                               void *pData,
                               UINT *pSizeOfData )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NinePixelShader9_GetFunction(This, pData, pSizeOfData);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 IDirect3DPixelShader9Vtbl LockPixelShader9_vtable = {
     (void *)NineUnknown_QueryInterface,
     (void *)NineUnknown_AddRef,
     (void *)NineUnknown_ReleaseWithDtorLock,
     (void *)NineUnknown_GetDevice,
     (void *)LockPixelShader9_GetFunction
 };
 
 #if 0
 static HRESULT NINE_WINAPI
 LockQuery9_GetDevice( struct NineQuery9 *This,
                       IDirect3DDevice9 **ppDevice )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineUnknown_GetDevice(NineUnknown(This), ppDevice);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 #endif
 
 #if 0
 static D3DQUERYTYPE NINE_WINAPI
 LockQuery9_GetType( struct NineQuery9 *This )
 {
     D3DQUERYTYPE r;
     mtx_lock(&d3dlock_global);
     r = NineQuery9_GetType(This);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 #endif
 
 #if 0
 static DWORD NINE_WINAPI
 LockQuery9_GetDataSize( struct NineQuery9 *This )
 {
     DWORD r;
     mtx_lock(&d3dlock_global);
     r = NineQuery9_GetDataSize(This);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 #endif
 
 static HRESULT NINE_WINAPI
 LockQuery9_Issue( struct NineQuery9 *This,
                   DWORD dwIssueFlags )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineQuery9_Issue(This, dwIssueFlags);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockQuery9_GetData( struct NineQuery9 *This,
                     void *pData,
                     DWORD dwSize,
                     DWORD dwGetDataFlags )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineQuery9_GetData(This, pData, dwSize, dwGetDataFlags);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 IDirect3DQuery9Vtbl LockQuery9_vtable = {
     (void *)NineUnknown_QueryInterface,
     (void *)NineUnknown_AddRef,
     (void *)NineUnknown_ReleaseWithDtorLock,
     (void *)NineUnknown_GetDevice, /* actually part of Query9 iface */
     (void *)NineQuery9_GetType, /* immutable */
     (void *)NineQuery9_GetDataSize, /* immutable */
@@ -2626,121 +2626,121 @@ IDirect3DQuery9Vtbl LockQuery9_vtable = {
 };
 
 #if 0
 static HRESULT NINE_WINAPI
 LockStateBlock9_GetDevice( struct NineStateBlock9 *This,
                            IDirect3DDevice9 **ppDevice )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineUnknown_GetDevice(NineUnknown(This), ppDevice);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 #endif
 
 static HRESULT NINE_WINAPI
 LockStateBlock9_Capture( struct NineStateBlock9 *This )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineStateBlock9_Capture(This);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockStateBlock9_Apply( struct NineStateBlock9 *This )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineStateBlock9_Apply(This);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 IDirect3DStateBlock9Vtbl LockStateBlock9_vtable = {
     (void *)NineUnknown_QueryInterface,
     (void *)NineUnknown_AddRef,
     (void *)NineUnknown_ReleaseWithDtorLock,
     (void *)NineUnknown_GetDevice, /* actually part of StateBlock9 iface */
     (void *)LockStateBlock9_Capture,
     (void *)LockStateBlock9_Apply
 };
 
 static HRESULT NINE_WINAPI
 LockSurface9_GetContainer( struct NineSurface9 *This,
                            REFIID riid,
                            void **ppContainer )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineSurface9_GetContainer(This, riid, ppContainer);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 #if 0
 static HRESULT NINE_WINAPI
 LockSurface9_GetDesc( struct NineSurface9 *This,
                       D3DSURFACE_DESC *pDesc )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineSurface9_GetDesc(This, pDesc);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 #endif
 
 static HRESULT NINE_WINAPI
 LockSurface9_LockRect( struct NineSurface9 *This,
                        D3DLOCKED_RECT *pLockedRect,
                        const RECT *pRect,
                        DWORD Flags )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineSurface9_LockRect(This, pLockedRect, pRect, Flags);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockSurface9_UnlockRect( struct NineSurface9 *This )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineSurface9_UnlockRect(This);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockSurface9_GetDC( struct NineSurface9 *This,
                     HDC *phdc )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineSurface9_GetDC(This, phdc);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockSurface9_ReleaseDC( struct NineSurface9 *This,
                         HDC hdc )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineSurface9_ReleaseDC(This, hdc);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 IDirect3DSurface9Vtbl LockSurface9_vtable = {
     (void *)NineUnknown_QueryInterface,
     (void *)NineUnknown_AddRef,
     (void *)NineUnknown_ReleaseWithDtorLock,
     (void *)NineUnknown_GetDevice, /* actually part of Resource9 iface */
     (void *)LockUnknown_SetPrivateData,
     (void *)LockUnknown_GetPrivateData,
@@ -2761,91 +2761,91 @@ static HRESULT NINE_WINAPI
 LockSwapChain9_Present( struct NineSwapChain9 *This,
                         const RECT *pSourceRect,
                         const RECT *pDestRect,
                         HWND hDestWindowOverride,
                         const RGNDATA *pDirtyRegion,
                         DWORD dwFlags )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineSwapChain9_Present(This, pSourceRect, pDestRect, hDestWindowOverride, pDirtyRegion, dwFlags);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockSwapChain9_GetFrontBufferData( struct NineSwapChain9 *This,
                                    IDirect3DSurface9 *pDestSurface )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineSwapChain9_GetFrontBufferData(This, pDestSurface);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockSwapChain9_GetBackBuffer( struct NineSwapChain9 *This,
                               UINT iBackBuffer,
                               D3DBACKBUFFER_TYPE Type,
                               IDirect3DSurface9 **ppBackBuffer )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineSwapChain9_GetBackBuffer(This, iBackBuffer, Type, ppBackBuffer);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockSwapChain9_GetRasterStatus( struct NineSwapChain9 *This,
                                 D3DRASTER_STATUS *pRasterStatus )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineSwapChain9_GetRasterStatus(This, pRasterStatus);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockSwapChain9_GetDisplayMode( struct NineSwapChain9 *This,
                                D3DDISPLAYMODE *pMode )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineSwapChain9_GetDisplayMode(This, pMode);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 #if 0
 static HRESULT NINE_WINAPI
 LockSwapChain9_GetDevice( struct NineSwapChain9 *This,
                           IDirect3DDevice9 **ppDevice )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineUnknown_GetDevice(NineUnknown(This), ppDevice);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 #endif
 
 static HRESULT NINE_WINAPI
 LockSwapChain9_GetPresentParameters( struct NineSwapChain9 *This,
                                      D3DPRESENT_PARAMETERS *pPresentationParameters )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineSwapChain9_GetPresentParameters(This, pPresentationParameters);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 IDirect3DSwapChain9Vtbl LockSwapChain9_vtable = {
     (void *)NineUnknown_QueryInterface,
     (void *)NineUnknown_AddRef,
     (void *)NineUnknown_ReleaseWithDtorLock,
     (void *)LockSwapChain9_Present,
     (void *)LockSwapChain9_GetFrontBufferData,
     (void *)LockSwapChain9_GetBackBuffer,
@@ -2855,44 +2855,44 @@ IDirect3DSwapChain9Vtbl LockSwapChain9_vtable = {
     (void *)LockSwapChain9_GetPresentParameters
 };
 
 static HRESULT NINE_WINAPI
 LockSwapChain9Ex_GetLastPresentCount( struct NineSwapChain9Ex *This,
                                       UINT *pLastPresentCount )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineSwapChain9Ex_GetLastPresentCount(This, pLastPresentCount);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockSwapChain9Ex_GetPresentStats( struct NineSwapChain9Ex *This,
                                   D3DPRESENTSTATS *pPresentationStatistics )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineSwapChain9Ex_GetPresentStats(This, pPresentationStatistics);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockSwapChain9Ex_GetDisplayModeEx( struct NineSwapChain9Ex *This,
                                    D3DDISPLAYMODEEX *pMode,
                                    D3DDISPLAYROTATION *pRotation )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineSwapChain9Ex_GetDisplayModeEx(This, pMode, pRotation);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 IDirect3DSwapChain9ExVtbl LockSwapChain9Ex_vtable = {
     (void *)NineUnknown_QueryInterface,
     (void *)NineUnknown_AddRef,
     (void *)NineUnknown_ReleaseWithDtorLock,
     (void *)LockSwapChain9_Present,
     (void *)LockSwapChain9_GetFrontBufferData,
     (void *)LockSwapChain9_GetBackBuffer,
@@ -2907,72 +2907,72 @@ IDirect3DSwapChain9ExVtbl LockSwapChain9Ex_vtable = {
 
 #if 0
 static HRESULT NINE_WINAPI
 LockTexture9_GetLevelDesc( struct NineTexture9 *This,
                            UINT Level,
                            D3DSURFACE_DESC *pDesc )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineTexture9_GetLevelDesc(This, Level, pDesc);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 #endif
 
 #if 0
 static HRESULT NINE_WINAPI
 LockTexture9_GetSurfaceLevel( struct NineTexture9 *This,
                               UINT Level,
                               IDirect3DSurface9 **ppSurfaceLevel )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineTexture9_GetSurfaceLevel(This, Level, ppSurfaceLevel);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 #endif
 
 static HRESULT NINE_WINAPI
 LockTexture9_LockRect( struct NineTexture9 *This,
                        UINT Level,
                        D3DLOCKED_RECT *pLockedRect,
                        const RECT *pRect,
                        DWORD Flags )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineTexture9_LockRect(This, Level, pLockedRect, pRect, Flags);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockTexture9_UnlockRect( struct NineTexture9 *This,
                          UINT Level )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineTexture9_UnlockRect(This, Level);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockTexture9_AddDirtyRect( struct NineTexture9 *This,
                            const RECT *pDirtyRect )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineTexture9_AddDirtyRect(This, pDirtyRect);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 IDirect3DTexture9Vtbl LockTexture9_vtable = {
     (void *)NineUnknown_QueryInterface,
     (void *)NineUnknown_AddRef,
     (void *)NineUnknown_ReleaseWithDtorLock,
     (void *)NineUnknown_GetDevice, /* actually part of Resource9 iface */
     (void *)LockUnknown_SetPrivateData,
     (void *)LockUnknown_GetPrivateData,
@@ -2997,43 +2997,43 @@ IDirect3DTexture9Vtbl LockTexture9_vtable = {
 static HRESULT NINE_WINAPI
 LockVertexBuffer9_Lock( struct NineVertexBuffer9 *This,
                         UINT OffsetToLock,
                         UINT SizeToLock,
                         void **ppbData,
                         DWORD Flags )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineVertexBuffer9_Lock(This, OffsetToLock, SizeToLock, ppbData, Flags);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockVertexBuffer9_Unlock( struct NineVertexBuffer9 *This )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineVertexBuffer9_Unlock(This);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 #if 0
 static HRESULT NINE_WINAPI
 LockVertexBuffer9_GetDesc( struct NineVertexBuffer9 *This,
                            D3DVERTEXBUFFER_DESC *pDesc )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineVertexBuffer9_GetDesc(This, pDesc);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 #endif
 
 IDirect3DVertexBuffer9Vtbl LockVertexBuffer9_vtable = {
     (void *)NineUnknown_QueryInterface,
     (void *)NineUnknown_AddRef,
     (void *)NineUnknown_ReleaseWithDtorLock,
     (void *)NineUnknown_GetDevice, /* actually part of Resource9 iface */
     (void *)LockUnknown_SetPrivateData,
@@ -3049,136 +3049,136 @@ IDirect3DVertexBuffer9Vtbl LockVertexBuffer9_vtable = {
 };
 
 #if 0
 static HRESULT NINE_WINAPI
 LockVertexDeclaration9_GetDevice( struct NineVertexDeclaration9 *This,
                                   IDirect3DDevice9 **ppDevice )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineUnknown_GetDevice(NineUnknown(This), ppDevice);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 #endif
 
 static HRESULT NINE_WINAPI
 LockVertexDeclaration9_GetDeclaration( struct NineVertexDeclaration9 *This,
                                        D3DVERTEXELEMENT9 *pElement,
                                        UINT *pNumElements )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineVertexDeclaration9_GetDeclaration(This, pElement, pNumElements);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 IDirect3DVertexDeclaration9Vtbl LockVertexDeclaration9_vtable = {
     (void *)NineUnknown_QueryInterface,
     (void *)NineUnknown_AddRef,
     (void *)NineUnknown_ReleaseWithDtorLock,
     (void *)NineUnknown_GetDevice, /* actually part of VertexDecl9 iface */
     (void *)LockVertexDeclaration9_GetDeclaration
 };
 
 #if 0
 static HRESULT NINE_WINAPI
 LockVertexShader9_GetDevice( struct NineVertexShader9 *This,
                              IDirect3DDevice9 **ppDevice )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineUnknown_GetDevice(NineUnknown(This), ppDevice);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 #endif
 
 static HRESULT NINE_WINAPI
 LockVertexShader9_GetFunction( struct NineVertexShader9 *This,
                                void *pData,
                                UINT *pSizeOfData )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineVertexShader9_GetFunction(This, pData, pSizeOfData);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 IDirect3DVertexShader9Vtbl LockVertexShader9_vtable = {
     (void *)NineUnknown_QueryInterface,
     (void *)NineUnknown_AddRef,
     (void *)NineUnknown_ReleaseWithDtorLock,
     (void *)NineUnknown_GetDevice,
     (void *)LockVertexShader9_GetFunction
 };
 
 #if 0
 static HRESULT NINE_WINAPI
 LockVolume9_GetDevice( struct NineVolume9 *This,
                        IDirect3DDevice9 **ppDevice )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineUnknown_GetDevice(NineUnknown(This), ppDevice);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 #endif
 
 static HRESULT NINE_WINAPI
 LockVolume9_GetContainer( struct NineVolume9 *This,
                           REFIID riid,
                           void **ppContainer )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineVolume9_GetContainer(This, riid, ppContainer);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 #if 0
 static HRESULT NINE_WINAPI
 LockVolume9_GetDesc( struct NineVolume9 *This,
                      D3DVOLUME_DESC *pDesc )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineVolume9_GetDesc(This, pDesc);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 #endif
 
 static HRESULT NINE_WINAPI
 LockVolume9_LockBox( struct NineVolume9 *This,
                      D3DLOCKED_BOX *pLockedVolume,
                      const D3DBOX *pBox,
                      DWORD Flags )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineVolume9_LockBox(This, pLockedVolume, pBox, Flags);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockVolume9_UnlockBox( struct NineVolume9 *This )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineVolume9_UnlockBox(This);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 IDirect3DVolume9Vtbl LockVolume9_vtable = {
     (void *)NineUnknown_QueryInterface,
     (void *)NineUnknown_AddRef,
     (void *)NineUnknown_ReleaseWithDtorLock,
     (void *)NineUnknown_GetDevice, /* actually part of Volume9 iface */
     (void *)NineUnknown_SetPrivateData,
     (void *)NineUnknown_GetPrivateData,
@@ -3191,72 +3191,72 @@ IDirect3DVolume9Vtbl LockVolume9_vtable = {
 
 #if 0
 static HRESULT NINE_WINAPI
 LockVolumeTexture9_GetLevelDesc( struct NineVolumeTexture9 *This,
                                  UINT Level,
                                  D3DVOLUME_DESC *pDesc )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineVolumeTexture9_GetLevelDesc(This, Level, pDesc);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 #endif
 
 #if 0
 static HRESULT NINE_WINAPI
 LockVolumeTexture9_GetVolumeLevel( struct NineVolumeTexture9 *This,
                                    UINT Level,
                                    IDirect3DVolume9 **ppVolumeLevel )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineVolumeTexture9_GetVolumeLevel(This, Level, ppVolumeLevel);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 #endif
 
 static HRESULT NINE_WINAPI
 LockVolumeTexture9_LockBox( struct NineVolumeTexture9 *This,
                             UINT Level,
                             D3DLOCKED_BOX *pLockedVolume,
                             const D3DBOX *pBox,
                             DWORD Flags )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineVolumeTexture9_LockBox(This, Level, pLockedVolume, pBox, Flags);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockVolumeTexture9_UnlockBox( struct NineVolumeTexture9 *This,
                               UINT Level )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineVolumeTexture9_UnlockBox(This, Level);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 static HRESULT NINE_WINAPI
 LockVolumeTexture9_AddDirtyBox( struct NineVolumeTexture9 *This,
                                 const D3DBOX *pDirtyBox )
 {
     HRESULT r;
     mtx_lock(&d3dlock_global);
     r = NineVolumeTexture9_AddDirtyBox(This, pDirtyBox);
-    pipe_mutex_unlock(d3dlock_global);
+    mtx_unlock(&d3dlock_global);
     return r;
 }
 
 IDirect3DVolumeTexture9Vtbl LockVolumeTexture9_vtable = {
     (void *)NineUnknown_QueryInterface,
     (void *)NineUnknown_AddRef,
     (void *)NineUnknown_ReleaseWithDtorLock,
     (void *)NineUnknown_GetDevice, /* actually part of Resource9 iface */
     (void *)LockUnknown_SetPrivateData,
     (void *)LockUnknown_GetPrivateData,
diff --git a/src/gallium/state_trackers/nine/nine_queue.c b/src/gallium/state_trackers/nine/nine_queue.c
index 39de0ca..2a65a1e 100644
--- a/src/gallium/state_trackers/nine/nine_queue.c
+++ b/src/gallium/state_trackers/nine/nine_queue.c
@@ -85,21 +85,21 @@ nine_queue_wait_flush(struct nine_queue_pool* ctx)
     struct nine_cmdbuf *cmdbuf = &ctx->pool[ctx->tail];
 
     /* wait for cmdbuf full */
     mtx_lock(&ctx->mutex_push);
     while (!cmdbuf->full)
     {
         DBG("waiting for full cmdbuf\n");
         cnd_wait(&ctx->event_push, &ctx->mutex_push);
     }
     DBG("got cmdbuf=%p\n", cmdbuf);
-    pipe_mutex_unlock(ctx->mutex_push);
+    mtx_unlock(&ctx->mutex_push);
 
     cmdbuf->offset = 0;
     ctx->cur_instr = 0;
 }
 
 /* Gets a pointer to the next memory slice.
  * Does not block.
  * Returns NULL on empty cmdbuf. */
 void *
 nine_queue_get(struct nine_queue_pool* ctx)
@@ -108,21 +108,21 @@ nine_queue_get(struct nine_queue_pool* ctx)
     unsigned offset;
 
     /* At this pointer there's always a cmdbuf. */
 
     if (ctx->cur_instr == cmdbuf->num_instr) {
         /* signal waiting producer */
         mtx_lock(&ctx->mutex_pop);
         DBG("freeing cmdbuf=%p\n", cmdbuf);
         cmdbuf->full = 0;
         cnd_signal(&ctx->event_pop);
-        pipe_mutex_unlock(ctx->mutex_pop);
+        mtx_unlock(&ctx->mutex_pop);
 
         ctx->tail = (ctx->tail + 1) & NINE_CMD_BUFS_MASK;
 
         return NULL;
     }
 
     /* At this pointer there's always a cmdbuf with instruction to process. */
     offset = cmdbuf->offset;
     cmdbuf->offset += cmdbuf->instr_size[ctx->cur_instr];
     ctx->cur_instr ++;
@@ -144,35 +144,35 @@ nine_queue_flush(struct nine_queue_pool* ctx)
            cmdbuf, cmdbuf->num_instr, cmdbuf->offset);
 
     /* Nothing to flush */
     if (!cmdbuf->num_instr)
         return;
 
     /* signal waiting worker */
     mtx_lock(&ctx->mutex_push);
     cmdbuf->full = 1;
     cnd_signal(&ctx->event_push);
-    pipe_mutex_unlock(ctx->mutex_push);
+    mtx_unlock(&ctx->mutex_push);
 
     ctx->head = (ctx->head + 1) & NINE_CMD_BUFS_MASK;
 
     cmdbuf = &ctx->pool[ctx->head];
 
     /* wait for queue empty */
     mtx_lock(&ctx->mutex_pop);
     while (cmdbuf->full)
     {
         DBG("waiting for empty cmdbuf\n");
         cnd_wait(&ctx->event_pop, &ctx->mutex_pop);
     }
     DBG("got empty cmdbuf=%p\n", cmdbuf);
-    pipe_mutex_unlock(ctx->mutex_pop);
+    mtx_unlock(&ctx->mutex_pop);
     cmdbuf->offset = 0;
     cmdbuf->num_instr = 0;
 }
 
 /* Gets a a pointer to slice of memory with size @space.
  * Does block if queue is full.
  * Returns NULL on @space > NINE_QUEUE_SIZE. */
 void *
 nine_queue_alloc(struct nine_queue_pool* ctx, unsigned space)
 {
diff --git a/src/gallium/state_trackers/nine/nine_state.c b/src/gallium/state_trackers/nine/nine_state.c
index 2f65414..c3483e4 100644
--- a/src/gallium/state_trackers/nine/nine_state.c
+++ b/src/gallium/state_trackers/nine/nine_state.c
@@ -76,21 +76,21 @@ struct csmt_context {
 /* Wait for instruction to be processed.
  * Caller has to ensure that only one thread waits at time.
  */
 static void
 nine_csmt_wait_processed(struct csmt_context *ctx)
 {
     mtx_lock(&ctx->mutex_processed);
     while (!p_atomic_read(&ctx->processed)) {
         cnd_wait(&ctx->event_processed, &ctx->mutex_processed);
     }
-    pipe_mutex_unlock(ctx->mutex_processed);
+    mtx_unlock(&ctx->mutex_processed);
 }
 
 /* CSMT worker thread */
 static
 PIPE_THREAD_ROUTINE(nine_csmt_worker, arg)
 {
     struct csmt_context *ctx = arg;
     struct csmt_instruction *instr;
     DBG("CSMT worker spawned\n");
 
@@ -102,37 +102,37 @@ PIPE_THREAD_ROUTINE(nine_csmt_worker, arg)
 
         /* Get instruction. NULL on empty cmdbuf. */
         while (!p_atomic_read(&ctx->terminate) &&
                (instr = (struct csmt_instruction *)nine_queue_get(ctx->pool))) {
 
             /* decode */
             if (instr->func(ctx->device, instr)) {
                 mtx_lock(&ctx->mutex_processed);
                 p_atomic_set(&ctx->processed, TRUE);
                 cnd_signal(&ctx->event_processed);
-                pipe_mutex_unlock(ctx->mutex_processed);
+                mtx_unlock(&ctx->mutex_processed);
             }
             if (p_atomic_read(&ctx->toPause)) {
-                pipe_mutex_unlock(ctx->thread_running);
+                mtx_unlock(&ctx->thread_running);
                 /* will wait here the thread can be resumed */
                 mtx_lock(&ctx->thread_resume);
                 mtx_lock(&ctx->thread_running);
-                pipe_mutex_unlock(ctx->thread_resume);
+                mtx_unlock(&ctx->thread_resume);
             }
         }
 
-        pipe_mutex_unlock(ctx->thread_running);
+        mtx_unlock(&ctx->thread_running);
         if (p_atomic_read(&ctx->terminate)) {
             mtx_lock(&ctx->mutex_processed);
             p_atomic_set(&ctx->processed, TRUE);
             cnd_signal(&ctx->event_processed);
-            pipe_mutex_unlock(ctx->mutex_processed);
+            mtx_unlock(&ctx->mutex_processed);
             break;
         }
     }
 
     DBG("CSMT worker destroyed\n");
     return 0;
 }
 
 /* Create a CSMT context.
  * Spawns a worker thread.
@@ -266,22 +266,22 @@ nine_csmt_resume( struct NineDevice9 *device )
 {
     struct csmt_context *ctx = device->csmt_ctx;
 
     if (!device->csmt_active)
         return;
 
     if (!ctx->hasPaused)
         return;
 
     ctx->hasPaused = FALSE;
-    pipe_mutex_unlock(ctx->thread_running);
-    pipe_mutex_unlock(ctx->thread_resume);
+    mtx_unlock(&ctx->thread_running);
+    mtx_unlock(&ctx->thread_resume);
 }
 
 struct pipe_context *
 nine_context_get_pipe( struct NineDevice9 *device )
 {
     nine_csmt_process(device);
     return device->context.pipe;
 }
 
 struct pipe_context *
diff --git a/src/gallium/state_trackers/omx/entrypoint.c b/src/gallium/state_trackers/omx/entrypoint.c
index 0274caa..5afb58b 100644
--- a/src/gallium/state_trackers/omx/entrypoint.c
+++ b/src/gallium/state_trackers/omx/entrypoint.c
@@ -100,41 +100,41 @@ struct vl_screen *omx_get_screen(void)
          omx_screen = vl_dri2_screen_create(omx_display, 0);
          if (!omx_screen) {
             XCloseDisplay(omx_display);
             goto error;
          }
       }
    }
 
    ++omx_usecount;
 
-   pipe_mutex_unlock(omx_lock);
+   mtx_unlock(&omx_lock);
    return omx_screen;
 
 error:
-   pipe_mutex_unlock(omx_lock);
+   mtx_unlock(&omx_lock);
    return NULL;
 }
 
 void omx_put_screen(void)
 {
    mtx_lock(&omx_lock);
    if ((--omx_usecount) == 0) {
       omx_screen->destroy(omx_screen);
       omx_screen = NULL;
 
       if (omx_render_node)
          close(drm_fd);
       else
          XCloseDisplay(omx_display);
    }
-   pipe_mutex_unlock(omx_lock);
+   mtx_unlock(&omx_lock);
 }
 
 OMX_ERRORTYPE omx_workaround_Destructor(OMX_COMPONENTTYPE *comp)
 {
    omx_base_component_PrivateType* priv = (omx_base_component_PrivateType*)comp->pComponentPrivate;
 
    priv->state = OMX_StateInvalid;
    tsem_up(priv->messageSem);
 
    /* wait for thread to exit */
diff --git a/src/gallium/state_trackers/va/buffer.c b/src/gallium/state_trackers/va/buffer.c
index b9bf6f0..fb5b20e 100644
--- a/src/gallium/state_trackers/va/buffer.c
+++ b/src/gallium/state_trackers/va/buffer.c
@@ -59,39 +59,39 @@ vlVaCreateBuffer(VADriverContextP ctx, VAContextID context, VABufferType type,
       FREE(buf);
       return VA_STATUS_ERROR_ALLOCATION_FAILED;
    }
 
    if (data)
       memcpy(buf->data, data, size * num_elements);
 
    drv = VL_VA_DRIVER(ctx);
    mtx_lock(&drv->mutex);
    *buf_id = handle_table_add(drv->htab, buf);
-   pipe_mutex_unlock(drv->mutex);
+   mtx_unlock(&drv->mutex);
 
    return VA_STATUS_SUCCESS;
 }
 
 VAStatus
 vlVaBufferSetNumElements(VADriverContextP ctx, VABufferID buf_id,
                          unsigned int num_elements)
 {
    vlVaDriver *drv;
    vlVaBuffer *buf;
 
    if (!ctx)
       return VA_STATUS_ERROR_INVALID_CONTEXT;
 
    drv = VL_VA_DRIVER(ctx);
    mtx_lock(&drv->mutex);
    buf = handle_table_get(drv->htab, buf_id);
-   pipe_mutex_unlock(drv->mutex);
+   mtx_unlock(&drv->mutex);
    if (!buf)
       return VA_STATUS_ERROR_INVALID_BUFFER;
 
    if (buf->derived_surface.resource)
       return VA_STATUS_ERROR_INVALID_BUFFER;
 
    buf->data = REALLOC(buf->data, buf->size * buf->num_elements,
                        buf->size * num_elements);
    buf->num_elements = num_elements;
 
@@ -113,41 +113,41 @@ vlVaMapBuffer(VADriverContextP ctx, VABufferID buf_id, void **pbuff)
    drv = VL_VA_DRIVER(ctx);
    if (!drv)
       return VA_STATUS_ERROR_INVALID_CONTEXT;
 
    if (!pbuff)
       return VA_STATUS_ERROR_INVALID_PARAMETER;
 
    mtx_lock(&drv->mutex);
    buf = handle_table_get(drv->htab, buf_id);
    if (!buf || buf->export_refcount > 0) {
-      pipe_mutex_unlock(drv->mutex);
+      mtx_unlock(&drv->mutex);
       return VA_STATUS_ERROR_INVALID_BUFFER;
    }
 
    if (buf->derived_surface.resource) {
       *pbuff = pipe_buffer_map(drv->pipe, buf->derived_surface.resource,
                                PIPE_TRANSFER_WRITE,
                                &buf->derived_surface.transfer);
-      pipe_mutex_unlock(drv->mutex);
+      mtx_unlock(&drv->mutex);
 
       if (!buf->derived_surface.transfer || !*pbuff)
          return VA_STATUS_ERROR_INVALID_BUFFER;
 
       if (buf->type == VAEncCodedBufferType) {
          ((VACodedBufferSegment*)buf->data)->buf = *pbuff;
          ((VACodedBufferSegment*)buf->data)->size = buf->coded_size;
          ((VACodedBufferSegment*)buf->data)->next = NULL;
          *pbuff = buf->data;
       }
    } else {
-      pipe_mutex_unlock(drv->mutex);
+      mtx_unlock(&drv->mutex);
       *pbuff = buf->data;
    }
 
    return VA_STATUS_SUCCESS;
 }
 
 VAStatus
 vlVaUnmapBuffer(VADriverContextP ctx, VABufferID buf_id)
 {
    vlVaDriver *drv;
@@ -156,80 +156,80 @@ vlVaUnmapBuffer(VADriverContextP ctx, VABufferID buf_id)
    if (!ctx)
       return VA_STATUS_ERROR_INVALID_CONTEXT;
 
    drv = VL_VA_DRIVER(ctx);
    if (!drv)
       return VA_STATUS_ERROR_INVALID_CONTEXT;
 
    mtx_lock(&drv->mutex);
    buf = handle_table_get(drv->htab, buf_id);
    if (!buf || buf->export_refcount > 0) {
-      pipe_mutex_unlock(drv->mutex);
+      mtx_unlock(&drv->mutex);
       return VA_STATUS_ERROR_INVALID_BUFFER;
    }
 
    if (buf->derived_surface.resource) {
       if (!buf->derived_surface.transfer) {
-         pipe_mutex_unlock(drv->mutex);
+         mtx_unlock(&drv->mutex);
          return VA_STATUS_ERROR_INVALID_BUFFER;
       }
 
       pipe_buffer_unmap(drv->pipe, buf->derived_surface.transfer);
       buf->derived_surface.transfer = NULL;
    }
-   pipe_mutex_unlock(drv->mutex);
+   mtx_unlock(&drv->mutex);
 
    return VA_STATUS_SUCCESS;
 }
 
 VAStatus
 vlVaDestroyBuffer(VADriverContextP ctx, VABufferID buf_id)
 {
    vlVaDriver *drv;
    vlVaBuffer *buf;
 
    if (!ctx)
       return VA_STATUS_ERROR_INVALID_CONTEXT;
 
    drv = VL_VA_DRIVER(ctx);
    mtx_lock(&drv->mutex);
    buf = handle_table_get(drv->htab, buf_id);
    if (!buf) {
-      pipe_mutex_unlock(drv->mutex);
+      mtx_unlock(&drv->mutex);
       return VA_STATUS_ERROR_INVALID_BUFFER;
    }
 
    if (buf->derived_surface.resource)
       pipe_resource_reference(&buf->derived_surface.resource, NULL);
 
    FREE(buf->data);
    FREE(buf);
    handle_table_remove(VL_VA_DRIVER(ctx)->htab, buf_id);
-   pipe_mutex_unlock(drv->mutex);
+   mtx_unlock(&drv->mutex);
 
    return VA_STATUS_SUCCESS;
 }
 
 VAStatus
 vlVaBufferInfo(VADriverContextP ctx, VABufferID buf_id, VABufferType *type,
                unsigned int *size, unsigned int *num_elements)
 {
    vlVaDriver *drv;
    vlVaBuffer *buf;
 
    if (!ctx)
       return VA_STATUS_ERROR_INVALID_CONTEXT;
 
    drv = VL_VA_DRIVER(ctx);
    mtx_lock(&drv->mutex);
    buf = handle_table_get(drv->htab, buf_id);
-   pipe_mutex_unlock(drv->mutex);
+   mtx_unlock(&drv->mutex);
    if (!buf)
       return VA_STATUS_ERROR_INVALID_BUFFER;
 
    *type = buf->type;
    *size = buf->size;
    *num_elements = buf->num_elements;
 
    return VA_STATUS_SUCCESS;
 }
 
@@ -249,21 +249,21 @@ vlVaAcquireBufferHandle(VADriverContextP ctx, VABufferID buf_id,
       0
    };
 
    if (!ctx)
       return VA_STATUS_ERROR_INVALID_CONTEXT;
 
    drv = VL_VA_DRIVER(ctx);
    screen = VL_VA_PSCREEN(ctx);
    mtx_lock(&drv->mutex);
    buf = handle_table_get(VL_VA_DRIVER(ctx)->htab, buf_id);
-   pipe_mutex_unlock(drv->mutex);
+   mtx_unlock(&drv->mutex);
 
    if (!buf)
       return VA_STATUS_ERROR_INVALID_BUFFER;
 
    /* Only VA surface|image like buffers are supported for now .*/
    if (buf->type != VAImageBufferType)
       return VA_STATUS_ERROR_UNSUPPORTED_BUFFERTYPE;
 
    if (!out_buf_info)
       return VA_STATUS_ERROR_INVALID_PARAMETER;
@@ -297,25 +297,25 @@ vlVaAcquireBufferHandle(VADriverContextP ctx, VABufferID buf_id,
 
          mtx_lock(&drv->mutex);
          drv->pipe->flush(drv->pipe, NULL, 0);
 
          memset(&whandle, 0, sizeof(whandle));
          whandle.type = DRM_API_HANDLE_TYPE_FD;
 
          if (!screen->resource_get_handle(screen, drv->pipe,
                                           buf->derived_surface.resource,
                                           &whandle, PIPE_HANDLE_USAGE_READ_WRITE)) {
-            pipe_mutex_unlock(drv->mutex);
+            mtx_unlock(&drv->mutex);
             return VA_STATUS_ERROR_INVALID_BUFFER;
          }
 
-         pipe_mutex_unlock(drv->mutex);
+         mtx_unlock(&drv->mutex);
 
          buf_info->handle = (intptr_t)whandle.handle;
          break;
       }
       default:
          return VA_STATUS_ERROR_UNSUPPORTED_MEMORY_TYPE;
       }
 
       buf_info->type = buf->type;
       buf_info->mem_type = mem_type;
@@ -334,21 +334,21 @@ vlVaReleaseBufferHandle(VADriverContextP ctx, VABufferID buf_id)
 {
    vlVaDriver *drv;
    vlVaBuffer *buf;
 
    if (!ctx)
       return VA_STATUS_ERROR_INVALID_CONTEXT;
 
    drv = VL_VA_DRIVER(ctx);
    mtx_lock(&drv->mutex);
    buf = handle_table_get(drv->htab, buf_id);
-   pipe_mutex_unlock(drv->mutex);
+   mtx_unlock(&drv->mutex);
 
    if (!buf)
       return VA_STATUS_ERROR_INVALID_BUFFER;
 
    if (buf->export_refcount == 0)
       return VA_STATUS_ERROR_INVALID_BUFFER;
 
    if (--buf->export_refcount == 0) {
       VABufferInfo * const buf_info = &buf->export_state;
 
diff --git a/src/gallium/state_trackers/va/config.c b/src/gallium/state_trackers/va/config.c
index 3d4e24b..15beb6c 100644
--- a/src/gallium/state_trackers/va/config.c
+++ b/src/gallium/state_trackers/va/config.c
@@ -195,21 +195,21 @@ vlVaCreateConfig(VADriverContextP ctx, VAProfile profile, VAEntrypoint entrypoin
             }
          }
       }
 
       /* Default value if not specified in the input attributes. */
       if (!config->rt_format)
          config->rt_format = VA_RT_FORMAT_YUV420 | VA_RT_FORMAT_RGB32;
 
       mtx_lock(&drv->mutex);
       *config_id = handle_table_add(drv->htab, config);
-      pipe_mutex_unlock(drv->mutex);
+      mtx_unlock(&drv->mutex);
       return VA_STATUS_SUCCESS;
    }
 
    p = ProfileToPipe(profile);
    if (p == PIPE_VIDEO_PROFILE_UNKNOWN) {
       FREE(config);
       return VA_STATUS_ERROR_UNSUPPORTED_PROFILE;
    }
 
    pscreen = VL_VA_PSCREEN(ctx);
@@ -260,21 +260,21 @@ vlVaCreateConfig(VADriverContextP ctx, VAProfile profile, VAEntrypoint entrypoin
          }
       }
    }
 
    /* Default value if not specified in the input attributes. */
    if (!config->rt_format)
       config->rt_format = VA_RT_FORMAT_YUV420;
 
    mtx_lock(&drv->mutex);
    *config_id = handle_table_add(drv->htab, config);
-   pipe_mutex_unlock(drv->mutex);
+   mtx_unlock(&drv->mutex);
 
    return VA_STATUS_SUCCESS;
 }
 
 VAStatus
 vlVaDestroyConfig(VADriverContextP ctx, VAConfigID config_id)
 {
    vlVaDriver *drv;
    vlVaConfig *config;
 
@@ -287,21 +287,21 @@ vlVaDestroyConfig(VADriverContextP ctx, VAConfigID config_id)
       return VA_STATUS_ERROR_INVALID_CONTEXT;
 
    mtx_lock(&drv->mutex);
    config = handle_table_get(drv->htab, config_id);
 
    if (!config)
       return VA_STATUS_ERROR_INVALID_CONFIG;
 
    FREE(config);
    handle_table_remove(drv->htab, config_id);
-   pipe_mutex_unlock(drv->mutex);
+   mtx_unlock(&drv->mutex);
 
    return VA_STATUS_SUCCESS;
 }
 
 VAStatus
 vlVaQueryConfigAttributes(VADriverContextP ctx, VAConfigID config_id, VAProfile *profile,
                           VAEntrypoint *entrypoint, VAConfigAttrib *attrib_list, int *num_attribs)
 {
    vlVaDriver *drv;
    vlVaConfig *config;
@@ -309,21 +309,21 @@ vlVaQueryConfigAttributes(VADriverContextP ctx, VAConfigID config_id, VAProfile
    if (!ctx)
       return VA_STATUS_ERROR_INVALID_CONTEXT;
 
    drv = VL_VA_DRIVER(ctx);
 
    if (!drv)
       return VA_STATUS_ERROR_INVALID_CONTEXT;
 
    mtx_lock(&drv->mutex);
    config = handle_table_get(drv->htab, config_id);
-   pipe_mutex_unlock(drv->mutex);
+   mtx_unlock(&drv->mutex);
 
    if (!config)
       return VA_STATUS_ERROR_INVALID_CONFIG;
 
    *profile = PipeToProfile(config->profile);
 
    if (config->profile == PIPE_VIDEO_PROFILE_UNKNOWN) {
       *entrypoint = VAEntrypointVideoProc;
       *num_attribs = 0;
       return VA_STATUS_SUCCESS;
diff --git a/src/gallium/state_trackers/va/context.c b/src/gallium/state_trackers/va/context.c
index a345247..4224ed7 100644
--- a/src/gallium/state_trackers/va/context.c
+++ b/src/gallium/state_trackers/va/context.c
@@ -209,21 +209,21 @@ vlVaCreateContext(VADriverContextP ctx, VAConfigID config_id, int picture_width,
    vlVaContext *context;
    vlVaConfig *config;
    int is_vpp;
 
    if (!ctx)
       return VA_STATUS_ERROR_INVALID_CONTEXT;
 
    drv = VL_VA_DRIVER(ctx);
    mtx_lock(&drv->mutex);
    config = handle_table_get(drv->htab, config_id);
-   pipe_mutex_unlock(drv->mutex);
+   mtx_unlock(&drv->mutex);
 
    is_vpp = config->profile == PIPE_VIDEO_PROFILE_UNKNOWN && !picture_width &&
             !picture_height && !flag && !render_targets && !num_render_targets;
 
    if (!(picture_width && picture_height) && !is_vpp)
       return VA_STATUS_ERROR_INVALID_IMAGE_FORMAT;
 
    context = CALLOC(1, sizeof(vlVaContext));
    if (!context)
       return VA_STATUS_ERROR_ALLOCATION_FAILED;
@@ -282,39 +282,39 @@ vlVaCreateContext(VADriverContextP ctx, VAConfigID config_id, int picture_width,
       }
    }
 
    context->desc.base.profile = config->profile;
    context->desc.base.entry_point = config->entrypoint;
    if (config->entrypoint == PIPE_VIDEO_ENTRYPOINT_ENCODE)
       context->desc.h264enc.rate_ctrl.rate_ctrl_method = config->rc;
 
    mtx_lock(&drv->mutex);
    *context_id = handle_table_add(drv->htab, context);
-   pipe_mutex_unlock(drv->mutex);
+   mtx_unlock(&drv->mutex);
 
    return VA_STATUS_SUCCESS;
 }
 
 VAStatus
 vlVaDestroyContext(VADriverContextP ctx, VAContextID context_id)
 {
    vlVaDriver *drv;
    vlVaContext *context;
 
    if (!ctx)
       return VA_STATUS_ERROR_INVALID_CONTEXT;
 
    drv = VL_VA_DRIVER(ctx);
    mtx_lock(&drv->mutex);
    context = handle_table_get(drv->htab, context_id);
    if (!context) {
-      pipe_mutex_unlock(drv->mutex);
+      mtx_unlock(&drv->mutex);
       return VA_STATUS_ERROR_INVALID_CONTEXT;
    }
 
    if (context->decoder) {
       if (context->desc.base.entry_point != PIPE_VIDEO_ENTRYPOINT_ENCODE) {
          if (u_reduce_video_profile(context->decoder->profile) ==
                PIPE_VIDEO_FORMAT_MPEG4_AVC) {
             FREE(context->desc.h264.pps->sps);
             FREE(context->desc.h264.pps);
          }
@@ -325,21 +325,21 @@ vlVaDestroyContext(VADriverContextP ctx, VAContextID context_id)
          }
       }
       context->decoder->destroy(context->decoder);
    }
    if (context->deint) {
       vl_deint_filter_cleanup(context->deint);
       FREE(context->deint);
    }
    FREE(context);
    handle_table_remove(drv->htab, context_id);
-   pipe_mutex_unlock(drv->mutex);
+   mtx_unlock(&drv->mutex);
 
    return VA_STATUS_SUCCESS;
 }
 
 VAStatus
 vlVaTerminate(VADriverContextP ctx)
 {
    vlVaDriver *drv;
 
    if (!ctx)
diff --git a/src/gallium/state_trackers/va/image.c b/src/gallium/state_trackers/va/image.c
index 2c7afe2..2ce22ce 100644
--- a/src/gallium/state_trackers/va/image.c
+++ b/src/gallium/state_trackers/va/image.c
@@ -109,21 +109,21 @@ vlVaCreateImage(VADriverContextP ctx, VAImageFormat *format, int width, int heig
    if (!(format && image && width && height))
       return VA_STATUS_ERROR_INVALID_PARAMETER;
 
    drv = VL_VA_DRIVER(ctx);
 
    img = CALLOC(1, sizeof(VAImage));
    if (!img)
       return VA_STATUS_ERROR_ALLOCATION_FAILED;
    mtx_lock(&drv->mutex);
    img->image_id = handle_table_add(drv->htab, img);
-   pipe_mutex_unlock(drv->mutex);
+   mtx_unlock(&drv->mutex);
 
    img->format = *format;
    img->width = width;
    img->height = height;
    w = align(width, 2);
    h = align(height, 2);
 
    switch (format->fourcc) {
    case VA_FOURCC('N','V','1','2'):
       img->num_planes = 2;
@@ -261,47 +261,47 @@ vlVaDeriveImage(VADriverContextP ctx, VASurfaceID surface, VAImage *image)
    mtx_lock(&drv->mutex);
    img->image_id = handle_table_add(drv->htab, img);
 
    img_buf->type = VAImageBufferType;
    img_buf->size = img->data_size;
    img_buf->num_elements = 1;
 
    pipe_resource_reference(&img_buf->derived_surface.resource, surfaces[0]->texture);
 
    img->buf = handle_table_add(VL_VA_DRIVER(ctx)->htab, img_buf);
-   pipe_mutex_unlock(drv->mutex);
+   mtx_unlock(&drv->mutex);
 
    *image = *img;
 
    return VA_STATUS_SUCCESS;
 }
 
 VAStatus
 vlVaDestroyImage(VADriverContextP ctx, VAImageID image)
 {
    vlVaDriver *drv;
    VAImage  *vaimage;
    VAStatus status;
 
    if (!ctx)
       return VA_STATUS_ERROR_INVALID_CONTEXT;
 
    drv = VL_VA_DRIVER(ctx);
    mtx_lock(&drv->mutex);
    vaimage = handle_table_get(drv->htab, image);
    if (!vaimage) {
-      pipe_mutex_unlock(drv->mutex);
+      mtx_unlock(&drv->mutex);
       return VA_STATUS_ERROR_INVALID_IMAGE;
    }
 
    handle_table_remove(VL_VA_DRIVER(ctx)->htab, image);
-   pipe_mutex_unlock(drv->mutex);
+   mtx_unlock(&drv->mutex);
    status = vlVaDestroyBuffer(ctx, vaimage->buf);
    FREE(vaimage);
    return status;
 }
 
 VAStatus
 vlVaSetImagePalette(VADriverContextP ctx, VAImageID image, unsigned char *palette)
 {
    if (!ctx)
       return VA_STATUS_ERROR_INVALID_CONTEXT;
@@ -324,58 +324,58 @@ vlVaGetImage(VADriverContextP ctx, VASurfaceID surface, int x, int y,
    unsigned pitches[3], i, j;
 
    if (!ctx)
       return VA_STATUS_ERROR_INVALID_CONTEXT;
 
    drv = VL_VA_DRIVER(ctx);
 
    mtx_lock(&drv->mutex);
    surf = handle_table_get(drv->htab, surface);
    if (!surf || !surf->buffer) {
-      pipe_mutex_unlock(drv->mutex);
+      mtx_unlock(&drv->mutex);
       return VA_STATUS_ERROR_INVALID_SURFACE;
    }
 
    vaimage = handle_table_get(drv->htab, image);
    if (!vaimage) {
-      pipe_mutex_unlock(drv->mutex);
+      mtx_unlock(&drv->mutex);
       return VA_STATUS_ERROR_INVALID_IMAGE;
    }
 
    img_buf = handle_table_get(drv->htab, vaimage->buf);
    if (!img_buf) {
-      pipe_mutex_unlock(drv->mutex);
+      mtx_unlock(&drv->mutex);
       return VA_STATUS_ERROR_INVALID_BUFFER;
    }
 
    format = VaFourccToPipeFormat(vaimage->format.fourcc);
    if (format == PIPE_FORMAT_NONE) {
-      pipe_mutex_unlock(drv->mutex);
+      mtx_unlock(&drv->mutex);
       return VA_STATUS_ERROR_OPERATION_FAILED;
    }
 
    if (format != surf->buffer->buffer_format) {
       /* support NV12 to YV12 and IYUV conversion now only */
       if ((format == PIPE_FORMAT_YV12 &&
           surf->buffer->buffer_format == PIPE_FORMAT_NV12) ||
           (format == PIPE_FORMAT_IYUV &&
           surf->buffer->buffer_format == PIPE_FORMAT_NV12))
          convert = true;
       else {
-         pipe_mutex_unlock(drv->mutex);
+         mtx_unlock(&drv->mutex);
          return VA_STATUS_ERROR_OPERATION_FAILED;
       }
    }
 
    views = surf->buffer->get_sampler_view_planes(surf->buffer);
    if (!views) {
-      pipe_mutex_unlock(drv->mutex);
+      mtx_unlock(&drv->mutex);
       return VA_STATUS_ERROR_OPERATION_FAILED;
    }
 
    for (i = 0; i < vaimage->num_planes; i++) {
       data[i] = img_buf->data + vaimage->offsets[i];
       pitches[i] = vaimage->pitches[i];
    }
    if (vaimage->format.fourcc == VA_FOURCC('I','4','2','0')) {
       void *tmp_d;
       unsigned tmp_p;
@@ -391,38 +391,38 @@ vlVaGetImage(VADriverContextP ctx, VASurfaceID surface, int x, int y,
       unsigned width, height;
       if (!views[i]) continue;
       vlVaVideoSurfaceSize(surf, i, &width, &height);
       for (j = 0; j < views[i]->texture->array_size; ++j) {
          struct pipe_box box = {0, 0, j, width, height, 1};
          struct pipe_transfer *transfer;
          uint8_t *map;
          map = drv->pipe->transfer_map(drv->pipe, views[i]->texture, 0,
                   PIPE_TRANSFER_READ, &box, &transfer);
          if (!map) {
-            pipe_mutex_unlock(drv->mutex);
+            mtx_unlock(&drv->mutex);
             return VA_STATUS_ERROR_OPERATION_FAILED;
          }
 
          if (i == 1 && convert) {
             u_copy_nv12_to_yv12(data, pitches, i, j,
                transfer->stride, views[i]->texture->array_size,
                map, box.width, box.height);
          } else {
             util_copy_rect(data[i] + pitches[i] * j,
                views[i]->texture->format,
                pitches[i] * views[i]->texture->array_size, 0, 0,
                box.width, box.height, map, transfer->stride, 0, 0);
          }
          pipe_transfer_unmap(drv->pipe, transfer);
       }
    }
-   pipe_mutex_unlock(drv->mutex);
+   mtx_unlock(&drv->mutex);
 
    return VA_STATUS_SUCCESS;
 }
 
 VAStatus
 vlVaPutImage(VADriverContextP ctx, VASurfaceID surface, VAImageID image,
              int src_x, int src_y, unsigned int src_width, unsigned int src_height,
              int dest_x, int dest_y, unsigned int dest_width, unsigned int dest_height)
 {
    vlVaDriver *drv;
@@ -435,71 +435,71 @@ vlVaPutImage(VADriverContextP ctx, VASurfaceID surface, VAImageID image,
    unsigned pitches[3], i, j;
 
    if (!ctx)
       return VA_STATUS_ERROR_INVALID_CONTEXT;
 
    drv = VL_VA_DRIVER(ctx);
    mtx_lock(&drv->mutex);
 
    surf = handle_table_get(drv->htab, surface);
    if (!surf || !surf->buffer) {
-      pipe_mutex_unlock(drv->mutex);
+      mtx_unlock(&drv->mutex);
       return VA_STATUS_ERROR_INVALID_SURFACE;
    }
 
    vaimage = handle_table_get(drv->htab, image);
    if (!vaimage) {
-      pipe_mutex_unlock(drv->mutex);
+      mtx_unlock(&drv->mutex);
       return VA_STATUS_ERROR_INVALID_IMAGE;
    }
 
    img_buf = handle_table_get(drv->htab, vaimage->buf);
    if (!img_buf) {
-      pipe_mutex_unlock(drv->mutex);
+      mtx_unlock(&drv->mutex);
       return VA_STATUS_ERROR_INVALID_BUFFER;
    }
 
    if (img_buf->derived_surface.resource) {
       /* Attempting to transfer derived image to surface */
-      pipe_mutex_unlock(drv->mutex);
+      mtx_unlock(&drv->mutex);
       return VA_STATUS_ERROR_UNIMPLEMENTED;
    }
 
    format = VaFourccToPipeFormat(vaimage->format.fourcc);
 
    if (format == PIPE_FORMAT_NONE) {
-      pipe_mutex_unlock(drv->mutex);
+      mtx_unlock(&drv->mutex);
       return VA_STATUS_ERROR_OPERATION_FAILED;
    }
 
    if ((format != surf->buffer->buffer_format) &&
          ((format != PIPE_FORMAT_YV12) || (surf->buffer->buffer_format != PIPE_FORMAT_NV12)) &&
          ((format != PIPE_FORMAT_IYUV) || (surf->buffer->buffer_format != PIPE_FORMAT_NV12))) {
       struct pipe_video_buffer *tmp_buf;
       struct pipe_video_buffer templat = surf->templat;
 
       templat.buffer_format = format;
       tmp_buf = drv->pipe->create_video_buffer(drv->pipe, &templat);
 
       if (!tmp_buf) {
-         pipe_mutex_unlock(drv->mutex);
+         mtx_unlock(&drv->mutex);
          return VA_STATUS_ERROR_ALLOCATION_FAILED;
       }
 
       surf->buffer->destroy(surf->buffer);
       surf->buffer = tmp_buf;
       surf->templat.buffer_format = format;
    }
 
    views = surf->buffer->get_sampler_view_planes(surf->buffer);
    if (!views) {
-      pipe_mutex_unlock(drv->mutex);
+      mtx_unlock(&drv->mutex);
       return VA_STATUS_ERROR_OPERATION_FAILED;
    }
 
    for (i = 0; i < vaimage->num_planes; i++) {
       data[i] = img_buf->data + vaimage->offsets[i];
       pitches[i] = vaimage->pitches[i];
    }
    if (vaimage->format.fourcc == VA_FOURCC('I','4','2','0')) {
       void *tmp_d;
       unsigned tmp_p;
@@ -542,14 +542,14 @@ vlVaPutImage(VADriverContextP ctx, VASurfaceID surface, VAImageID image,
                                   map, dst_box.width, dst_box.height);
             pipe_transfer_unmap(drv->pipe, transfer);
          } else {
             drv->pipe->texture_subdata(drv->pipe, tex, 0,
                                        PIPE_TRANSFER_WRITE, &dst_box,
                                        data[i] + pitches[i] * j,
                                        pitches[i] * views[i]->texture->array_size, 0);
          }
       }
    }
-   pipe_mutex_unlock(drv->mutex);
+   mtx_unlock(&drv->mutex);
 
    return VA_STATUS_SUCCESS;
 }
diff --git a/src/gallium/state_trackers/va/picture.c b/src/gallium/state_trackers/va/picture.c
index 5ff178a..00017c8 100644
--- a/src/gallium/state_trackers/va/picture.c
+++ b/src/gallium/state_trackers/va/picture.c
@@ -46,26 +46,26 @@ vlVaBeginPicture(VADriverContextP ctx, VAContextID context_id, VASurfaceID rende
    if (!ctx)
       return VA_STATUS_ERROR_INVALID_CONTEXT;
 
    drv = VL_VA_DRIVER(ctx);
    if (!drv)
       return VA_STATUS_ERROR_INVALID_CONTEXT;
 
    mtx_lock(&drv->mutex);
    context = handle_table_get(drv->htab, context_id);
    if (!context) {
-      pipe_mutex_unlock(drv->mutex);
+      mtx_unlock(&drv->mutex);
       return VA_STATUS_ERROR_INVALID_CONTEXT;
    }
 
    surf = handle_table_get(drv->htab, render_target);
-   pipe_mutex_unlock(drv->mutex);
+   mtx_unlock(&drv->mutex);
    if (!surf || !surf->buffer)
       return VA_STATUS_ERROR_INVALID_SURFACE;
 
    context->target_id = render_target;
    surf->ctx = context_id;
    context->target = surf->buffer;
 
    if (!context->decoder) {
 
       /* VPP */
@@ -477,28 +477,28 @@ vlVaRenderPicture(VADriverContextP ctx, VAContextID context_id, VABufferID *buff
    if (!ctx)
       return VA_STATUS_ERROR_INVALID_CONTEXT;
 
    drv = VL_VA_DRIVER(ctx);
    if (!drv)
       return VA_STATUS_ERROR_INVALID_CONTEXT;
 
    mtx_lock(&drv->mutex);
    context = handle_table_get(drv->htab, context_id);
    if (!context) {
-      pipe_mutex_unlock(drv->mutex);
+      mtx_unlock(&drv->mutex);
       return VA_STATUS_ERROR_INVALID_CONTEXT;
    }
 
    for (i = 0; i < num_buffers; ++i) {
       vlVaBuffer *buf = handle_table_get(drv->htab, buffers[i]);
       if (!buf) {
-         pipe_mutex_unlock(drv->mutex);
+         mtx_unlock(&drv->mutex);
          return VA_STATUS_ERROR_INVALID_BUFFER;
       }
 
       switch (buf->type) {
       case VAPictureParameterBufferType:
          vaStatus = handlePictureParameterBuffer(drv, context, buf);
          break;
 
       case VAIQMatrixBufferType:
          handleIQMatrixBuffer(context, buf);
@@ -528,21 +528,21 @@ vlVaRenderPicture(VADriverContextP ctx, VAContextID context_id, VABufferID *buff
          break;
 
       case VAEncSliceParameterBufferType:
          vaStatus = handleVAEncSliceParameterBufferType(drv, context, buf);
          break;
 
       default:
          break;
       }
    }
-   pipe_mutex_unlock(drv->mutex);
+   mtx_unlock(&drv->mutex);
 
    return vaStatus;
 }
 
 VAStatus
 vlVaEndPicture(VADriverContextP ctx, VAContextID context_id)
 {
    vlVaDriver *drv;
    vlVaContext *context;
    vlVaBuffer *coded_buf;
@@ -551,21 +551,21 @@ vlVaEndPicture(VADriverContextP ctx, VAContextID context_id)
 
    if (!ctx)
       return VA_STATUS_ERROR_INVALID_CONTEXT;
 
    drv = VL_VA_DRIVER(ctx);
    if (!drv)
       return VA_STATUS_ERROR_INVALID_CONTEXT;
 
    mtx_lock(&drv->mutex);
    context = handle_table_get(drv->htab, context_id);
-   pipe_mutex_unlock(drv->mutex);
+   mtx_unlock(&drv->mutex);
    if (!context)
       return VA_STATUS_ERROR_INVALID_CONTEXT;
 
    if (!context->decoder) {
       if (context->templat.profile != PIPE_VIDEO_PROFILE_UNKNOWN)
          return VA_STATUS_ERROR_INVALID_CONTEXT;
 
       /* VPP */
       return VA_STATUS_SUCCESS;
    }
@@ -599,13 +599,13 @@ vlVaEndPicture(VADriverContextP ctx, VAContextID context_id)
       if (p_remain_in_idr == 1) {
          if ((context->desc.h264enc.frame_num_cnt % 2) != 0) {
             context->decoder->flush(context->decoder);
             context->first_single_submitted = true;
          }
          else
             context->first_single_submitted = false;
          surf->force_flushed = true;
       }
    }
-   pipe_mutex_unlock(drv->mutex);
+   mtx_unlock(&drv->mutex);
    return VA_STATUS_SUCCESS;
 }
diff --git a/src/gallium/state_trackers/va/subpicture.c b/src/gallium/state_trackers/va/subpicture.c
index 0d90758..15d52b9 100644
--- a/src/gallium/state_trackers/va/subpicture.c
+++ b/src/gallium/state_trackers/va/subpicture.c
@@ -69,83 +69,83 @@ vlVaCreateSubpicture(VADriverContextP ctx, VAImageID image,
    vlVaSubpicture *sub;
    VAImage *img;
 
    if (!ctx)
       return VA_STATUS_ERROR_INVALID_CONTEXT;
 
    drv = VL_VA_DRIVER(ctx);
    mtx_lock(&drv->mutex);
    img = handle_table_get(drv->htab, image);
    if (!img) {
-      pipe_mutex_unlock(drv->mutex);
+      mtx_unlock(&drv->mutex);
       return VA_STATUS_ERROR_INVALID_IMAGE;
    }
 
    sub = CALLOC(1, sizeof(*sub));
    if (!sub) {
-      pipe_mutex_unlock(drv->mutex);
+      mtx_unlock(&drv->mutex);
       return VA_STATUS_ERROR_ALLOCATION_FAILED;
    }
 
    sub->image = img;
    *subpicture = handle_table_add(VL_VA_DRIVER(ctx)->htab, sub);
-   pipe_mutex_unlock(drv->mutex);
+   mtx_unlock(&drv->mutex);
 
    return VA_STATUS_SUCCESS;
 }
 
 VAStatus
 vlVaDestroySubpicture(VADriverContextP ctx, VASubpictureID subpicture)
 {
    vlVaDriver *drv;
    vlVaSubpicture *sub;
 
    if (!ctx)
       return VA_STATUS_ERROR_INVALID_CONTEXT;
 
    drv = VL_VA_DRIVER(ctx);
    mtx_lock(&drv->mutex);
 
    sub = handle_table_get(drv->htab, subpicture);
    if (!sub) {
-      pipe_mutex_unlock(drv->mutex);
+      mtx_unlock(&drv->mutex);
       return VA_STATUS_ERROR_INVALID_SUBPICTURE;
    }
 
    FREE(sub);
    handle_table_remove(drv->htab, subpicture);
-   pipe_mutex_unlock(drv->mutex);
+   mtx_unlock(&drv->mutex);
 
    return VA_STATUS_SUCCESS;
 }
 
 VAStatus
 vlVaSubpictureImage(VADriverContextP ctx, VASubpictureID subpicture, VAImageID image)
 {
    vlVaDriver *drv;
    vlVaSubpicture *sub;
    VAImage *img;
 
    if (!ctx)
       return VA_STATUS_ERROR_INVALID_CONTEXT;
 
    drv = VL_VA_DRIVER(ctx);
    mtx_lock(&drv->mutex);
 
    img = handle_table_get(drv->htab, image);
    if (!img) {
-      pipe_mutex_unlock(drv->mutex);
+      mtx_unlock(&drv->mutex);
       return VA_STATUS_ERROR_INVALID_IMAGE;
    }
 
    sub = handle_table_get(drv->htab, subpicture);
-   pipe_mutex_unlock(drv->mutex);
+   mtx_unlock(&drv->mutex);
    if (!sub)
       return VA_STATUS_ERROR_INVALID_SUBPICTURE;
 
    sub->image = img;
 
    return VA_STATUS_SUCCESS;
 }
 
 VAStatus
 vlVaSetSubpictureChromakey(VADriverContextP ctx, VASubpictureID subpicture,
@@ -183,28 +183,28 @@ vlVaAssociateSubpicture(VADriverContextP ctx, VASubpictureID subpicture,
    struct u_rect src_rect = {src_x, src_x + src_width, src_y, src_y + src_height};
    struct u_rect dst_rect = {dest_x, dest_x + dest_width, dest_y, dest_y + dest_height};
 
    if (!ctx)
       return VA_STATUS_ERROR_INVALID_CONTEXT;
    drv = VL_VA_DRIVER(ctx);
    mtx_lock(&drv->mutex);
 
    sub = handle_table_get(drv->htab, subpicture);
    if (!sub) {
-      pipe_mutex_unlock(drv->mutex);
+      mtx_unlock(&drv->mutex);
       return VA_STATUS_ERROR_INVALID_SUBPICTURE;
    }
 
    for (i = 0; i < num_surfaces; i++) {
       surf = handle_table_get(drv->htab, target_surfaces[i]);
       if (!surf) {
-         pipe_mutex_unlock(drv->mutex);
+         mtx_unlock(&drv->mutex);
          return VA_STATUS_ERROR_INVALID_SURFACE;
       }
    }
 
    sub->src_rect = src_rect;
    sub->dst_rect = dst_rect;
 
    memset(&tex_temp, 0, sizeof(tex_temp));
    tex_temp.target = PIPE_TEXTURE_2D;
    tex_temp.format = PIPE_FORMAT_B8G8R8A8_UNORM;
@@ -212,40 +212,40 @@ vlVaAssociateSubpicture(VADriverContextP ctx, VASubpictureID subpicture,
    tex_temp.width0 = src_width;
    tex_temp.height0 = src_height;
    tex_temp.depth0 = 1;
    tex_temp.array_size = 1;
    tex_temp.usage = PIPE_USAGE_DYNAMIC;
    tex_temp.bind = PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET;
    tex_temp.flags = 0;
    if (!drv->pipe->screen->is_format_supported(
           drv->pipe->screen, tex_temp.format, tex_temp.target,
           tex_temp.nr_samples, tex_temp.bind)) {
-      pipe_mutex_unlock(drv->mutex);
+      mtx_unlock(&drv->mutex);
       return VA_STATUS_ERROR_ALLOCATION_FAILED;
    }
 
    tex = drv->pipe->screen->resource_create(drv->pipe->screen, &tex_temp);
 
    memset(&sampler_templ, 0, sizeof(sampler_templ));
    u_sampler_view_default_template(&sampler_templ, tex, tex->format);
    sub->sampler = drv->pipe->create_sampler_view(drv->pipe, tex, &sampler_templ);
    pipe_resource_reference(&tex, NULL);
    if (!sub->sampler) {
-      pipe_mutex_unlock(drv->mutex);
+      mtx_unlock(&drv->mutex);
       return VA_STATUS_ERROR_ALLOCATION_FAILED;
    }
 
    for (i = 0; i < num_surfaces; i++) {
       surf = handle_table_get(drv->htab, target_surfaces[i]);
       util_dynarray_append(&surf->subpics, vlVaSubpicture *, sub);
    }
-   pipe_mutex_unlock(drv->mutex);
+   mtx_unlock(&drv->mutex);
 
    return VA_STATUS_SUCCESS;
 }
 
 VAStatus
 vlVaDeassociateSubpicture(VADriverContextP ctx, VASubpictureID subpicture,
                           VASurfaceID *target_surfaces, int num_surfaces)
 {
    int i;
    int j;
@@ -253,37 +253,37 @@ vlVaDeassociateSubpicture(VADriverContextP ctx, VASubpictureID subpicture,
    vlVaSubpicture *sub, **array;
    vlVaDriver *drv;
 
    if (!ctx)
       return VA_STATUS_ERROR_INVALID_CONTEXT;
    drv = VL_VA_DRIVER(ctx);
    mtx_lock(&drv->mutex);
 
    sub = handle_table_get(drv->htab, subpicture);
    if (!sub) {
-      pipe_mutex_unlock(drv->mutex);
+      mtx_unlock(&drv->mutex);
       return VA_STATUS_ERROR_INVALID_SUBPICTURE;
    }
 
    for (i = 0; i < num_surfaces; i++) {
       surf = handle_table_get(drv->htab, target_surfaces[i]);
       if (!surf) {
-         pipe_mutex_unlock(drv->mutex);
+         mtx_unlock(&drv->mutex);
          return VA_STATUS_ERROR_INVALID_SURFACE;
       }
 
       array = surf->subpics.data;
       if (!array)
          continue;
 
       for (j = 0; j < surf->subpics.size/sizeof(vlVaSubpicture *); j++) {
          if (array[j] == sub)
             array[j] = NULL;
       }
 
       while (surf->subpics.size && util_dynarray_top(&surf->subpics, vlVaSubpicture *) == NULL)
          (void)util_dynarray_pop(&surf->subpics, vlVaSubpicture *);
    }
-   pipe_mutex_unlock(drv->mutex);
+   mtx_unlock(&drv->mutex);
 
    return VA_STATUS_SUCCESS;
 }
diff --git a/src/gallium/state_trackers/va/surface.c b/src/gallium/state_trackers/va/surface.c
index 6a1736b..65cd014 100644
--- a/src/gallium/state_trackers/va/surface.c
+++ b/src/gallium/state_trackers/va/surface.c
@@ -67,30 +67,30 @@ vlVaDestroySurfaces(VADriverContextP ctx, VASurfaceID *surface_list, int num_sur
    int i;
 
    if (!ctx)
       return VA_STATUS_ERROR_INVALID_CONTEXT;
 
    drv = VL_VA_DRIVER(ctx);
    mtx_lock(&drv->mutex);
    for (i = 0; i < num_surfaces; ++i) {
       vlVaSurface *surf = handle_table_get(drv->htab, surface_list[i]);
       if (!surf) {
-         pipe_mutex_unlock(drv->mutex);
+         mtx_unlock(&drv->mutex);
          return VA_STATUS_ERROR_INVALID_SURFACE;
       }
       if (surf->buffer)
          surf->buffer->destroy(surf->buffer);
       util_dynarray_fini(&surf->subpics);
       FREE(surf);
       handle_table_remove(drv->htab, surface_list[i]);
    }
-   pipe_mutex_unlock(drv->mutex);
+   mtx_unlock(&drv->mutex);
 
    return VA_STATUS_SUCCESS;
 }
 
 VAStatus
 vlVaSyncSurface(VADriverContextP ctx, VASurfaceID render_target)
 {
    vlVaDriver *drv;
    vlVaContext *context;
    vlVaSurface *surf;
@@ -99,52 +99,52 @@ vlVaSyncSurface(VADriverContextP ctx, VASurfaceID render_target)
       return VA_STATUS_ERROR_INVALID_CONTEXT;
 
    drv = VL_VA_DRIVER(ctx);
    if (!drv)
       return VA_STATUS_ERROR_INVALID_CONTEXT;
 
    mtx_lock(&drv->mutex);
    surf = handle_table_get(drv->htab, render_target);
 
    if (!surf || !surf->buffer) {
-      pipe_mutex_unlock(drv->mutex);
+      mtx_unlock(&drv->mutex);
       return VA_STATUS_ERROR_INVALID_SURFACE;
    }
 
    if (!surf->feedback) {
       // No outstanding operation: nothing to do.
-      pipe_mutex_unlock(drv->mutex);
+      mtx_unlock(&drv->mutex);
       return VA_STATUS_SUCCESS;
    }
 
    context = handle_table_get(drv->htab, surf->ctx);
    if (!context) {
-      pipe_mutex_unlock(drv->mutex);
+      mtx_unlock(&drv->mutex);
       return VA_STATUS_ERROR_INVALID_CONTEXT;
    }
 
    if (context->decoder->entrypoint == PIPE_VIDEO_ENTRYPOINT_ENCODE) {
       int frame_diff;
       if (context->desc.h264enc.frame_num_cnt >= surf->frame_num_cnt)
          frame_diff = context->desc.h264enc.frame_num_cnt - surf->frame_num_cnt;
       else
          frame_diff = 0xFFFFFFFF - surf->frame_num_cnt + 1 + context->desc.h264enc.frame_num_cnt;
       if ((frame_diff == 0) &&
           (surf->force_flushed == false) &&
           (context->desc.h264enc.frame_num_cnt % 2 != 0)) {
          context->decoder->flush(context->decoder);
          context->first_single_submitted = true;
       }
       context->decoder->get_feedback(context->decoder, surf->feedback, &(surf->coded_buf->coded_size));
       surf->feedback = NULL;
    }
-   pipe_mutex_unlock(drv->mutex);
+   mtx_unlock(&drv->mutex);
    return VA_STATUS_SUCCESS;
 }
 
 VAStatus
 vlVaQuerySurfaceStatus(VADriverContextP ctx, VASurfaceID render_target, VASurfaceStatus *status)
 {
    if (!ctx)
       return VA_STATUS_ERROR_INVALID_CONTEXT;
 
    return VA_STATUS_SUCCESS;
@@ -284,72 +284,72 @@ vlVaPutSurface(VADriverContextP ctx, VASurfaceID surface_id, void* draw, short s
    struct u_rect dst_rect = {destx, destx + destw, desty, desty + desth};
    VAStatus status;
 
    if (!ctx)
       return VA_STATUS_ERROR_INVALID_CONTEXT;
 
    drv = VL_VA_DRIVER(ctx);
    mtx_lock(&drv->mutex);
    surf = handle_table_get(drv->htab, surface_id);
    if (!surf) {
-      pipe_mutex_unlock(drv->mutex);
+      mtx_unlock(&drv->mutex);
       return VA_STATUS_ERROR_INVALID_SURFACE;
    }
 
    screen = drv->pipe->screen;
    vscreen = drv->vscreen;
 
    tex = vscreen->texture_from_drawable(vscreen, draw);
    if (!tex) {
-      pipe_mutex_unlock(drv->mutex);
+      mtx_unlock(&drv->mutex);
       return VA_STATUS_ERROR_INVALID_DISPLAY;
    }
 
    dirty_area = vscreen->get_dirty_area(vscreen);
 
    memset(&surf_templ, 0, sizeof(surf_templ));
    surf_templ.format = tex->format;
    surf_draw = drv->pipe->create_surface(drv->pipe, tex, &surf_templ);
    if (!surf_draw) {
       pipe_resource_reference(&tex, NULL);
-      pipe_mutex_unlock(drv->mutex);
+      mtx_unlock(&drv->mutex);
       return VA_STATUS_ERROR_INVALID_DISPLAY;
    }
 
    src_rect.x0 = srcx;
    src_rect.y0 = srcy;
    src_rect.x1 = srcw + srcx;
    src_rect.y1 = srch + srcy;
 
    vl_compositor_clear_layers(&drv->cstate);
    vl_compositor_set_buffer_layer(&drv->cstate, &drv->compositor, 0, surf->buffer, &src_rect, NULL, VL_COMPOSITOR_WEAVE);
    vl_compositor_set_layer_dst_area(&drv->cstate, 0, &dst_rect);
    vl_compositor_render(&drv->cstate, &drv->compositor, surf_draw, dirty_area, true);
 
    status = vlVaPutSubpictures(surf, drv, surf_draw, dirty_area, &src_rect, &dst_rect);
    if (status) {
-      pipe_mutex_unlock(drv->mutex);
+      mtx_unlock(&drv->mutex);
       return status;
    }
 
    /* flush before calling flush_frontbuffer so that rendering is flushed
     * to back buffer so the texture can be copied in flush_frontbuffer
     */
    drv->pipe->flush(drv->pipe, NULL, 0);
 
    screen->flush_frontbuffer(screen, tex, 0, 0,
                              vscreen->get_private(vscreen), NULL);
 
 
    pipe_resource_reference(&tex, NULL);
    pipe_surface_reference(&surf_draw, NULL);
-   pipe_mutex_unlock(drv->mutex);
+   mtx_unlock(&drv->mutex);
 
    return VA_STATUS_SUCCESS;
 }
 
 VAStatus
 vlVaLockSurface(VADriverContextP ctx, VASurfaceID surface, unsigned int *fourcc,
                 unsigned int *luma_stride, unsigned int *chroma_u_stride, unsigned int *chroma_v_stride,
                 unsigned int *luma_offset, unsigned int *chroma_u_offset, unsigned int *chroma_v_offset,
                 unsigned int *buffer_name, void **buffer)
 {
@@ -394,21 +394,21 @@ vlVaQuerySurfaceAttributes(VADriverContextP ctx, VAConfigID config_id,
    if (!ctx)
       return VA_STATUS_ERROR_INVALID_CONTEXT;
 
    drv = VL_VA_DRIVER(ctx);
 
    if (!drv)
       return VA_STATUS_ERROR_INVALID_CONTEXT;
 
    mtx_lock(&drv->mutex);
    config = handle_table_get(drv->htab, config_id);
-   pipe_mutex_unlock(drv->mutex);
+   mtx_unlock(&drv->mutex);
 
    if (!config)
       return VA_STATUS_ERROR_INVALID_CONFIG;
 
    pscreen = VL_VA_PSCREEN(ctx);
 
    if (!pscreen)
       return VA_STATUS_ERROR_INVALID_CONTEXT;
 
    attribs = CALLOC(VL_VA_MAX_IMAGE_FORMATS + VASurfaceAttribCount,
@@ -716,26 +716,26 @@ vlVaCreateSurfaces2(VADriverContextP ctx, unsigned int format,
          vaStatus = suface_from_external_memory(ctx, surf, memory_attibute, i, surfaces, &templat);
          if (vaStatus != VA_STATUS_SUCCESS) {
             FREE(surf);
             goto no_res;
          }
          break;
       default:
          assert(0);
       }
    }
-   pipe_mutex_unlock(drv->mutex);
+   mtx_unlock(&drv->mutex);
 
    return VA_STATUS_SUCCESS;
 
 no_res:
-   pipe_mutex_unlock(drv->mutex);
+   mtx_unlock(&drv->mutex);
    if (i)
       vlVaDestroySurfaces(ctx, surfaces, i);
 
    return VA_STATUS_ERROR_ALLOCATION_FAILED;
 }
 
 VAStatus
 vlVaQueryVideoProcFilters(VADriverContextP ctx, VAContextID context,
                           VAProcFilterType *filters, unsigned int *num_filters)
 {
diff --git a/src/gallium/state_trackers/vdpau/bitmap.c b/src/gallium/state_trackers/vdpau/bitmap.c
index 14f6c36..643be75 100644
--- a/src/gallium/state_trackers/vdpau/bitmap.c
+++ b/src/gallium/state_trackers/vdpau/bitmap.c
@@ -95,55 +95,55 @@ vlVdpBitmapSurfaceCreate(VdpDevice device,
    vlVdpDefaultSamplerViewTemplate(&sv_templ, res);
    vlsurface->sampler_view = pipe->create_sampler_view(pipe, res, &sv_templ);
 
    pipe_resource_reference(&res, NULL);
 
    if (!vlsurface->sampler_view) {
       ret = VDP_STATUS_RESOURCES;
       goto err_unlock;
    }
 
-   pipe_mutex_unlock(dev->mutex);
+   mtx_unlock(&dev->mutex);
 
    *surface = vlAddDataHTAB(vlsurface);
    if (*surface == 0) {
       mtx_lock(&dev->mutex);
       ret = VDP_STATUS_ERROR;
       goto err_sampler;
    }
 
    return VDP_STATUS_OK;
 
 err_sampler:
    pipe_sampler_view_reference(&vlsurface->sampler_view, NULL);
 err_unlock:
-   pipe_mutex_unlock(dev->mutex);
+   mtx_unlock(&dev->mutex);
    DeviceReference(&vlsurface->device, NULL);
    FREE(vlsurface);
    return ret;
 }
 
 /**
  * Destroy a VdpBitmapSurface.
  */
 VdpStatus
 vlVdpBitmapSurfaceDestroy(VdpBitmapSurface surface)
 {
    vlVdpBitmapSurface *vlsurface;
 
    vlsurface = vlGetDataHTAB(surface);
    if (!vlsurface)
       return VDP_STATUS_INVALID_HANDLE;
 
    mtx_lock(&vlsurface->device->mutex);
    pipe_sampler_view_reference(&vlsurface->sampler_view, NULL);
-   pipe_mutex_unlock(vlsurface->device->mutex);
+   mtx_unlock(&vlsurface->device->mutex);
 
    vlRemoveDataHTAB(surface);
    DeviceReference(&vlsurface->device, NULL);
    FREE(vlsurface);
 
    return VDP_STATUS_OK;
 }
 
 /**
  * Retrieve the parameters used to create a VdpBitmapSurface.
@@ -196,14 +196,14 @@ vlVdpBitmapSurfacePutBitsNative(VdpBitmapSurface surface,
 
    pipe = vlsurface->device->context;
 
    mtx_lock(&vlsurface->device->mutex);
 
    dst_box = RectToPipeBox(destination_rect, vlsurface->sampler_view->texture);
    pipe->texture_subdata(pipe, vlsurface->sampler_view->texture, 0,
                          PIPE_TRANSFER_WRITE, &dst_box, *source_data,
                          *source_pitches, 0);
 
-   pipe_mutex_unlock(vlsurface->device->mutex);
+   mtx_unlock(&vlsurface->device->mutex);
 
    return VDP_STATUS_OK;
 }
diff --git a/src/gallium/state_trackers/vdpau/decode.c b/src/gallium/state_trackers/vdpau/decode.c
index 0f8b8ff..66d5225 100644
--- a/src/gallium/state_trackers/vdpau/decode.c
+++ b/src/gallium/state_trackers/vdpau/decode.c
@@ -74,46 +74,46 @@ vlVdpDecoderCreate(VdpDevice device,
    mtx_lock(&dev->mutex);
 
    supported = screen->get_video_param
    (
       screen,
       templat.profile,
       PIPE_VIDEO_ENTRYPOINT_BITSTREAM,
       PIPE_VIDEO_CAP_SUPPORTED
    );
    if (!supported) {
-      pipe_mutex_unlock(dev->mutex);
+      mtx_unlock(&dev->mutex);
       return VDP_STATUS_INVALID_DECODER_PROFILE;
    }
 
    maxwidth = screen->get_video_param
    (
       screen,
       templat.profile,
       PIPE_VIDEO_ENTRYPOINT_BITSTREAM,
       PIPE_VIDEO_CAP_MAX_WIDTH
    );
    maxheight = screen->get_video_param
    (
       screen,
       templat.profile,
       PIPE_VIDEO_ENTRYPOINT_BITSTREAM,
       PIPE_VIDEO_CAP_MAX_HEIGHT
    );
    if (width > maxwidth || height > maxheight) {
-      pipe_mutex_unlock(dev->mutex);
+      mtx_unlock(&dev->mutex);
       return VDP_STATUS_INVALID_SIZE;
    }
 
    vldecoder = CALLOC(1,sizeof(vlVdpDecoder));
    if (!vldecoder) {
-      pipe_mutex_unlock(dev->mutex);
+      mtx_unlock(&dev->mutex);
       return VDP_STATUS_RESOURCES;
    }
 
    DeviceReference(&vldecoder->device, dev);
 
    templat.entrypoint = PIPE_VIDEO_ENTRYPOINT_BITSTREAM;
    templat.chroma_format = PIPE_VIDEO_CHROMA_FORMAT_420;
    templat.width = width;
    templat.height = height;
    templat.max_references = max_references;
@@ -130,49 +130,49 @@ vlVdpDecoderCreate(VdpDevice device,
       goto error_decoder;
    }
 
    *decoder = vlAddDataHTAB(vldecoder);
    if (*decoder == 0) {
       ret = VDP_STATUS_ERROR;
       goto error_handle;
    }
 
    (void) mtx_init(&vldecoder->mutex, mtx_plain);
-   pipe_mutex_unlock(dev->mutex);
+   mtx_unlock(&dev->mutex);
 
    return VDP_STATUS_OK;
 
 error_handle:
    vldecoder->decoder->destroy(vldecoder->decoder);
 
 error_decoder:
-   pipe_mutex_unlock(dev->mutex);
+   mtx_unlock(&dev->mutex);
    DeviceReference(&vldecoder->device, NULL);
    FREE(vldecoder);
    return ret;
 }
 
 /**
  * Destroy a VdpDecoder.
  */
 VdpStatus
 vlVdpDecoderDestroy(VdpDecoder decoder)
 {
    vlVdpDecoder *vldecoder;
 
    vldecoder = (vlVdpDecoder *)vlGetDataHTAB(decoder);
    if (!vldecoder)
       return VDP_STATUS_INVALID_HANDLE;
 
    mtx_lock(&vldecoder->mutex);
    vldecoder->decoder->destroy(vldecoder->decoder);
-   pipe_mutex_unlock(vldecoder->mutex);
+   mtx_unlock(&vldecoder->mutex);
    mtx_destroy(&vldecoder->mutex);
 
    vlRemoveDataHTAB(decoder);
    DeviceReference(&vldecoder->device, NULL);
    FREE(vldecoder);
 
    return VDP_STATUS_OK;
 }
 
 /**
@@ -626,25 +626,25 @@ vlVdpDecoderRender(VdpDecoder decoder,
 
       /* also set interlacing to decoders preferences */
       vlsurf->templat.interlaced = screen->get_video_param(screen, dec->profile, PIPE_VIDEO_ENTRYPOINT_BITSTREAM,
                                                            PIPE_VIDEO_CAP_PREFERS_INTERLACED);
 
       /* and recreate the video buffer */
       vlsurf->video_buffer = dec->context->create_video_buffer(dec->context, &vlsurf->templat);
 
       /* still no luck? get me out of here... */
       if (!vlsurf->video_buffer) {
-         pipe_mutex_unlock(vlsurf->device->mutex);
+         mtx_unlock(&vlsurf->device->mutex);
          return VDP_STATUS_NO_IMPLEMENTATION;
       }
       vlVdpVideoSurfaceClear(vlsurf);
-      pipe_mutex_unlock(vlsurf->device->mutex);
+      mtx_unlock(&vlsurf->device->mutex);
    }
 
    for (i = 0; i < bitstream_buffer_count; ++i) {
       buffers[i] = bitstream_buffers[i].bitstream;
       sizes[i] = bitstream_buffers[i].bitstream_bytes;
    }
 
    memset(&desc, 0, sizeof(desc));
    desc.base.profile = dec->profile;
    switch (u_reduce_video_profile(dec->profile)) {
@@ -671,13 +671,13 @@ vlVdpDecoderRender(VdpDecoder decoder,
       return VDP_STATUS_INVALID_DECODER_PROFILE;
    }
 
    if (ret != VDP_STATUS_OK)
       return ret;
 
    mtx_lock(&vldecoder->mutex);
    dec->begin_frame(dec, vlsurf->video_buffer, &desc.base);
    dec->decode_bitstream(dec, vlsurf->video_buffer, &desc.base, bitstream_buffer_count, buffers, sizes);
    dec->end_frame(dec, vlsurf->video_buffer, &desc.base);
-   pipe_mutex_unlock(vldecoder->mutex);
+   mtx_unlock(&vldecoder->mutex);
    return ret;
 }
diff --git a/src/gallium/state_trackers/vdpau/htab.c b/src/gallium/state_trackers/vdpau/htab.c
index f938a19..f596b2d 100644
--- a/src/gallium/state_trackers/vdpau/htab.c
+++ b/src/gallium/state_trackers/vdpau/htab.c
@@ -35,55 +35,55 @@ static mtx_t htab_lock = _MTX_INITIALIZER_NP;
 boolean vlCreateHTAB(void)
 {
    boolean ret;
 
    /* Make sure handle table handles match VDPAU handles. */
    assert(sizeof(unsigned) <= sizeof(vlHandle));
    mtx_lock(&htab_lock);
    if (!htab)
       htab = handle_table_create();
    ret = htab != NULL;
-   pipe_mutex_unlock(htab_lock);
+   mtx_unlock(&htab_lock);
    return ret;
 }
 
 void vlDestroyHTAB(void)
 {
    mtx_lock(&htab_lock);
    if (htab && !handle_table_get_first_handle(htab)) {
       handle_table_destroy(htab);
       htab = NULL;
    }
-   pipe_mutex_unlock(htab_lock);
+   mtx_unlock(&htab_lock);
 }
 
 vlHandle vlAddDataHTAB(void *data)
 {
    vlHandle handle = 0;
 
    assert(data);
    mtx_lock(&htab_lock);
    if (htab)
       handle = handle_table_add(htab, data);
-   pipe_mutex_unlock(htab_lock);
+   mtx_unlock(&htab_lock);
    return handle;
 }
 
 void* vlGetDataHTAB(vlHandle handle)
 {
    void *data = NULL;
 
    assert(handle);
    mtx_lock(&htab_lock);
    if (htab)
       data = handle_table_get(htab, handle);
-   pipe_mutex_unlock(htab_lock);
+   mtx_unlock(&htab_lock);
    return data;
 }
 
 void vlRemoveDataHTAB(vlHandle handle)
 {
    mtx_lock(&htab_lock);
    if (htab)
       handle_table_remove(htab, handle);
-   pipe_mutex_unlock(htab_lock);
+   mtx_unlock(&htab_lock);
 }
diff --git a/src/gallium/state_trackers/vdpau/mixer.c b/src/gallium/state_trackers/vdpau/mixer.c
index a1c0377..76b5225 100644
--- a/src/gallium/state_trackers/vdpau/mixer.c
+++ b/src/gallium/state_trackers/vdpau/mixer.c
@@ -155,32 +155,32 @@ vlVdpVideoMixerCreate(VdpDevice device,
                 vmixer->video_width, max_size);
       goto no_params;
    }
    if (vmixer->video_height < 48 || vmixer->video_height > max_size) {
       VDPAU_MSG(VDPAU_WARN, "[VDPAU] 48 < %u < %u  not valid for height\n",
                 vmixer->video_height, max_size);
       goto no_params;
    }
    vmixer->luma_key.luma_min = 1.0f;
    vmixer->luma_key.luma_max = 0.0f;
-   pipe_mutex_unlock(dev->mutex);
+   mtx_unlock(&dev->mutex);
 
    return VDP_STATUS_OK;
 
 no_params:
    vlRemoveDataHTAB(*mixer);
 
 no_handle:
 err_csc_matrix:
    vl_compositor_cleanup_state(&vmixer->cstate);
 no_compositor_state:
-   pipe_mutex_unlock(dev->mutex);
+   mtx_unlock(&dev->mutex);
    DeviceReference(&vmixer->device, NULL);
    FREE(vmixer);
    return ret;
 }
 
 /**
  * Destroy a VdpVideoMixer.
  */
 VdpStatus
 vlVdpVideoMixerDestroy(VdpVideoMixer mixer)
@@ -209,21 +209,21 @@ vlVdpVideoMixerDestroy(VdpVideoMixer mixer)
 
    if (vmixer->sharpness.filter) {
       vl_matrix_filter_cleanup(vmixer->sharpness.filter);
       FREE(vmixer->sharpness.filter);
    }
 
    if (vmixer->bicubic.filter) {
       vl_bicubic_filter_cleanup(vmixer->bicubic.filter);
       FREE(vmixer->bicubic.filter);
    }
-   pipe_mutex_unlock(vmixer->device->mutex);
+   mtx_unlock(&vmixer->device->mutex);
    DeviceReference(&vmixer->device, NULL);
 
    FREE(vmixer);
 
    return VDP_STATUS_OK;
 }
 
 /**
  * Perform a video post-processing and compositing operation.
  */
@@ -305,21 +305,21 @@ VdpStatus vlVdpVideoMixerRender(VdpVideoMixer mixer,
 
    case VDP_VIDEO_MIXER_PICTURE_STRUCTURE_BOTTOM_FIELD:
       deinterlace = VL_COMPOSITOR_BOB_BOTTOM;
       break;
 
    case VDP_VIDEO_MIXER_PICTURE_STRUCTURE_FRAME:
       deinterlace = VL_COMPOSITOR_WEAVE;
       break;
 
    default:
-      pipe_mutex_unlock(vmixer->device->mutex);
+      mtx_unlock(&vmixer->device->mutex);
       return VDP_STATUS_INVALID_VIDEO_MIXER_PICTURE_STRUCTURE;
    }
 
    if (deinterlace != VL_COMPOSITOR_WEAVE && vmixer->deint.enabled &&
        video_surface_past_count > 1 && video_surface_future_count > 0) {
       vlVdpSurface *prevprev = vlGetDataHTAB(video_surface_past[1]);
       vlVdpSurface *prev = vlGetDataHTAB(video_surface_past[0]);
       vlVdpSurface *next = vlGetDataHTAB(video_surface_future[0]);
       if (prevprev && prev && next &&
           vl_deint_filter_check_buffers(vmixer->deint.filter,
@@ -380,21 +380,21 @@ VdpStatus vlVdpVideoMixerRender(VdpVideoMixer mixer,
    }
 
    if (!vmixer->bicubic.filter) {
       vl_compositor_set_layer_dst_area(&vmixer->cstate, layer++, RectToPipe(destination_video_rect, &rect));
       vl_compositor_set_dst_clip(&vmixer->cstate, RectToPipe(destination_rect, &clip));
    }
 
    for (i = 0; i < layer_count; ++i) {
       vlVdpOutputSurface *src = vlGetDataHTAB(layers->source_surface);
       if (!src) {
-         pipe_mutex_unlock(vmixer->device->mutex);
+         mtx_unlock(&vmixer->device->mutex);
          return VDP_STATUS_INVALID_HANDLE;
       }
 
       assert(layers->struct_version == VDP_LAYER_VERSION);
 
       vl_compositor_set_rgba_layer(&vmixer->cstate, compositor, layer, src->sampler_view,
                                    RectToPipe(layers->source_rect, &rect), NULL, NULL);
       vl_compositor_set_layer_dst_area(&vmixer->cstate, layer++, RectToPipe(layers->destination_rect, &rect));
 
       ++layers;
@@ -447,21 +447,21 @@ VdpStatus vlVdpVideoMixerRender(VdpVideoMixer mixer,
    if (vmixer->bicubic.filter)
       vl_bicubic_filter_render(vmixer->bicubic.filter,
                                sampler_view, dst->surface,
                                RectToPipe(destination_video_rect, &rect),
                                RectToPipe(destination_rect, &clip));
 
    if(surface != dst->surface) {
       pipe_sampler_view_reference(&sampler_view, NULL);
       pipe_surface_reference(&surface, NULL);
    }
-   pipe_mutex_unlock(vmixer->device->mutex);
+   mtx_unlock(&vmixer->device->mutex);
 
    return VDP_STATUS_OK;
 }
 
 static void
 vlVdpVideoMixerUpdateDeinterlaceFilter(vlVdpVideoMixer *vmixer)
 {
    struct pipe_context *pipe = vmixer->device->context;
    assert(vmixer);
 
@@ -687,36 +687,36 @@ vlVdpVideoMixerSetFeatureEnables(VdpVideoMixer mixer,
       case VDP_VIDEO_MIXER_FEATURE_NOISE_REDUCTION:
          vmixer->noise_reduction.enabled = feature_enables[i];
          vlVdpVideoMixerUpdateNoiseReductionFilter(vmixer);
          break;
 
       case VDP_VIDEO_MIXER_FEATURE_LUMA_KEY:
          vmixer->luma_key.enabled = feature_enables[i];
          if (!debug_get_bool_option("G3DVL_NO_CSC", FALSE))
             if (!vl_compositor_set_csc_matrix(&vmixer->cstate, (const vl_csc_matrix *)&vmixer->csc,
                         vmixer->luma_key.luma_min, vmixer->luma_key.luma_max)) {
-               pipe_mutex_unlock(vmixer->device->mutex);
+               mtx_unlock(&vmixer->device->mutex);
                return VDP_STATUS_ERROR;
             }
          break;
 
       case VDP_VIDEO_MIXER_FEATURE_HIGH_QUALITY_SCALING_L1:
          vmixer->bicubic.enabled = feature_enables[i];
          vlVdpVideoMixerUpdateBicubicFilter(vmixer);
          break;
 
       default:
-         pipe_mutex_unlock(vmixer->device->mutex);
+         mtx_unlock(&vmixer->device->mutex);
          return VDP_STATUS_INVALID_VIDEO_MIXER_FEATURE;
       }
    }
-   pipe_mutex_unlock(vmixer->device->mutex);
+   mtx_unlock(&vmixer->device->mutex);
 
    return VDP_STATUS_OK;
 }
 
 /**
  * Retrieve whether features are enabled.
  */
 VdpStatus
 vlVdpVideoMixerGetFeatureEnables(VdpVideoMixer mixer,
                                  uint32_t feature_count,
@@ -882,25 +882,25 @@ vlVdpVideoMixerSetAttributeValues(VdpVideoMixer mixer,
             goto fail;
          }
          vmixer->skip_chroma_deint = *(uint8_t*)attribute_values[i];
          vlVdpVideoMixerUpdateDeinterlaceFilter(vmixer);
          break;
       default:
          ret = VDP_STATUS_INVALID_VIDEO_MIXER_ATTRIBUTE;
          goto fail;
       }
    }
-   pipe_mutex_unlock(vmixer->device->mutex);
+   mtx_unlock(&vmixer->device->mutex);
 
    return VDP_STATUS_OK;
 fail:
-   pipe_mutex_unlock(vmixer->device->mutex);
+   mtx_unlock(&vmixer->device->mutex);
    return ret;
 }
 
 /**
  * Retrieve parameter values given at creation time.
  */
 VdpStatus
 vlVdpVideoMixerGetParameterValues(VdpVideoMixer mixer,
                                   uint32_t parameter_count,
                                   VdpVideoMixerParameter const *parameters,
@@ -980,25 +980,25 @@ vlVdpVideoMixerGetAttributeValues(VdpVideoMixer mixer,
       case VDP_VIDEO_MIXER_ATTRIBUTE_LUMA_KEY_MAX_LUMA:
          *(float*)attribute_values[i] = vmixer->luma_key.luma_max;
          break;
       case VDP_VIDEO_MIXER_ATTRIBUTE_SHARPNESS_LEVEL:
          *(float*)attribute_values[i] = vmixer->sharpness.value;
          break;
       case VDP_VIDEO_MIXER_ATTRIBUTE_SKIP_CHROMA_DEINTERLACE:
          *(uint8_t*)attribute_values[i] = vmixer->skip_chroma_deint;
          break;
       default:
-         pipe_mutex_unlock(vmixer->device->mutex);
+         mtx_unlock(&vmixer->device->mutex);
          return VDP_STATUS_INVALID_VIDEO_MIXER_ATTRIBUTE;
       }
    }
-   pipe_mutex_unlock(vmixer->device->mutex);
+   mtx_unlock(&vmixer->device->mutex);
    return VDP_STATUS_OK;
 }
 
 /**
  * Generate a color space conversion matrix.
  */
 VdpStatus
 vlVdpGenerateCSCMatrix(VdpProcamp *procamp,
                        VdpColorStandard standard,
                        VdpCSCMatrix *csc_matrix)
diff --git a/src/gallium/state_trackers/vdpau/output.c b/src/gallium/state_trackers/vdpau/output.c
index 5836395..8ef8268 100644
--- a/src/gallium/state_trackers/vdpau/output.c
+++ b/src/gallium/state_trackers/vdpau/output.c
@@ -115,30 +115,30 @@ vlVdpOutputSurfaceCreate(VdpDevice device,
    *surface = vlAddDataHTAB(vlsurface);
    if (*surface == 0)
       goto err_resource;
 
    pipe_resource_reference(&res, NULL);
 
    if (!vl_compositor_init_state(&vlsurface->cstate, pipe))
       goto err_resource;
 
    vl_compositor_reset_dirty_area(&vlsurface->dirty_area);
-   pipe_mutex_unlock(dev->mutex);
+   mtx_unlock(&dev->mutex);
 
    return VDP_STATUS_OK;
 
 err_resource:
    pipe_sampler_view_reference(&vlsurface->sampler_view, NULL);
    pipe_surface_reference(&vlsurface->surface, NULL);
    pipe_resource_reference(&res, NULL);
 err_unlock:
-   pipe_mutex_unlock(dev->mutex);
+   mtx_unlock(&dev->mutex);
    DeviceReference(&vlsurface->device, NULL);
    FREE(vlsurface);
    return VDP_STATUS_ERROR;
 }
 
 /**
  * Destroy a VdpOutputSurface.
  */
 VdpStatus
 vlVdpOutputSurfaceDestroy(VdpOutputSurface surface)
@@ -151,21 +151,21 @@ vlVdpOutputSurfaceDestroy(VdpOutputSurface surface)
       return VDP_STATUS_INVALID_HANDLE;
 
    pipe = vlsurface->device->context;
 
    mtx_lock(&vlsurface->device->mutex);
 
    pipe_surface_reference(&vlsurface->surface, NULL);
    pipe_sampler_view_reference(&vlsurface->sampler_view, NULL);
    pipe->screen->fence_reference(pipe->screen, &vlsurface->fence, NULL);
    vl_compositor_cleanup_state(&vlsurface->cstate);
-   pipe_mutex_unlock(vlsurface->device->mutex);
+   mtx_unlock(&vlsurface->device->mutex);
 
    vlRemoveDataHTAB(surface);
    DeviceReference(&vlsurface->device, NULL);
    FREE(vlsurface);
 
    return VDP_STATUS_OK;
 }
 
 /**
  * Retrieve the parameters used to create a VdpOutputSurface.
@@ -215,29 +215,29 @@ vlVdpOutputSurfaceGetBitsNative(VdpOutputSurface surface,
 
    if (!destination_data || !destination_pitches)
        return VDP_STATUS_INVALID_POINTER;
 
    mtx_lock(&vlsurface->device->mutex);
 
    res = vlsurface->sampler_view->texture;
    box = RectToPipeBox(source_rect, res);
    map = pipe->transfer_map(pipe, res, 0, PIPE_TRANSFER_READ, &box, &transfer);
    if (!map) {
-      pipe_mutex_unlock(vlsurface->device->mutex);
+      mtx_unlock(&vlsurface->device->mutex);
       return VDP_STATUS_RESOURCES;
    }
 
    util_copy_rect(*destination_data, res->format, *destination_pitches, 0, 0,
                   box.width, box.height, map, transfer->stride, 0, 0);
 
    pipe_transfer_unmap(pipe, transfer);
-   pipe_mutex_unlock(vlsurface->device->mutex);
+   mtx_unlock(&vlsurface->device->mutex);
 
    return VDP_STATUS_OK;
 }
 
 /**
  * Copy image data from application memory in the surface's native format to
  * a VdpOutputSurface.
  */
 VdpStatus
 vlVdpOutputSurfacePutBitsNative(VdpOutputSurface surface,
@@ -259,28 +259,28 @@ vlVdpOutputSurfacePutBitsNative(VdpOutputSurface surface,
 
    if (!source_data || !source_pitches)
        return VDP_STATUS_INVALID_POINTER;
 
    mtx_lock(&vlsurface->device->mutex);
 
    dst_box = RectToPipeBox(destination_rect, vlsurface->sampler_view->texture);
 
    /* Check for a no-op. (application bug?) */
    if (!dst_box.width || !dst_box.height) {
-      pipe_mutex_unlock(vlsurface->device->mutex);
+      mtx_unlock(&vlsurface->device->mutex);
       return VDP_STATUS_OK;
    }
 
    pipe->texture_subdata(pipe, vlsurface->sampler_view->texture, 0,
                          PIPE_TRANSFER_WRITE, &dst_box, *source_data,
                          *source_pitches, 0);
-   pipe_mutex_unlock(vlsurface->device->mutex);
+   mtx_unlock(&vlsurface->device->mutex);
 
    return VDP_STATUS_OK;
 }
 
 /**
  * Copy image data from application memory in a specific indexed format to
  * a VdpOutputSurface.
  */
 VdpStatus
 vlVdpOutputSurfacePutBitsIndexed(VdpOutputSurface surface,
@@ -403,28 +403,28 @@ vlVdpOutputSurfacePutBitsIndexed(VdpOutputSurface surface,
    if (!sv_tbl)
       goto error_resource;
 
    vl_compositor_clear_layers(cstate);
    vl_compositor_set_palette_layer(cstate, compositor, 0, sv_idx, sv_tbl, NULL, NULL, false);
    vl_compositor_set_layer_dst_area(cstate, 0, RectToPipe(destination_rect, &dst_rect));
    vl_compositor_render(cstate, compositor, vlsurface->surface, &vlsurface->dirty_area, false);
 
    pipe_sampler_view_reference(&sv_idx, NULL);
    pipe_sampler_view_reference(&sv_tbl, NULL);
-   pipe_mutex_unlock(vlsurface->device->mutex);
+   mtx_unlock(&vlsurface->device->mutex);
 
    return VDP_STATUS_OK;
 
 error_resource:
    pipe_sampler_view_reference(&sv_idx, NULL);
    pipe_sampler_view_reference(&sv_tbl, NULL);
-   pipe_mutex_unlock(vlsurface->device->mutex);
+   mtx_unlock(&vlsurface->device->mutex);
    return VDP_STATUS_RESOURCES;
 }
 
 /**
  * Copy image data from application memory in a specific YCbCr format to
  * a VdpOutputSurface.
  */
 VdpStatus
 vlVdpOutputSurfacePutBitsYCbCr(VdpOutputSurface surface,
                                VdpYCbCrFormat source_ycbcr_format,
@@ -469,28 +469,28 @@ vlVdpOutputSurfacePutBitsYCbCr(VdpOutputSurface surface,
    if (destination_rect) {
       vtmpl.width = abs(destination_rect->x0-destination_rect->x1);
       vtmpl.height = abs(destination_rect->y0-destination_rect->y1);
    } else {
       vtmpl.width = vlsurface->surface->texture->width0;
       vtmpl.height = vlsurface->surface->texture->height0;
    }
 
    vbuffer = pipe->create_video_buffer(pipe, &vtmpl);
    if (!vbuffer) {
-      pipe_mutex_unlock(vlsurface->device->mutex);
+      mtx_unlock(&vlsurface->device->mutex);
       return VDP_STATUS_RESOURCES;
    }
 
    sampler_views = vbuffer->get_sampler_view_planes(vbuffer);
    if (!sampler_views) {
       vbuffer->destroy(vbuffer);
-      pipe_mutex_unlock(vlsurface->device->mutex);
+      mtx_unlock(&vlsurface->device->mutex);
       return VDP_STATUS_RESOURCES;
    }
 
    for (i = 0; i < 3; ++i) {
       struct pipe_sampler_view *sv = sampler_views[i];
       if (!sv) continue;
 
       struct pipe_box dst_box = {
          0, 0, 0,
          sv->texture->width0, sv->texture->height0, 1
@@ -509,26 +509,26 @@ vlVdpOutputSurfacePutBitsYCbCr(VdpOutputSurface surface,
       if (!vl_compositor_set_csc_matrix(cstate, csc_matrix, 1.0f, 0.0f))
          goto err_csc_matrix;
    }
 
    vl_compositor_clear_layers(cstate);
    vl_compositor_set_buffer_layer(cstate, compositor, 0, vbuffer, NULL, NULL, VL_COMPOSITOR_WEAVE);
    vl_compositor_set_layer_dst_area(cstate, 0, RectToPipe(destination_rect, &dst_rect));
    vl_compositor_render(cstate, compositor, vlsurface->surface, &vlsurface->dirty_area, false);
 
    vbuffer->destroy(vbuffer);
-   pipe_mutex_unlock(vlsurface->device->mutex);
+   mtx_unlock(&vlsurface->device->mutex);
 
    return VDP_STATUS_OK;
 err_csc_matrix:
    vbuffer->destroy(vbuffer);
-   pipe_mutex_unlock(vlsurface->device->mutex);
+   mtx_unlock(&vlsurface->device->mutex);
    return VDP_STATUS_ERROR;
 }
 
 static unsigned
 BlendFactorToPipe(VdpOutputSurfaceRenderBlendFactor factor)
 {
    switch (factor) {
    case VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ZERO:
       return PIPE_BLENDFACTOR_ZERO;
    case VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE:
@@ -694,21 +694,21 @@ vlVdpOutputSurfaceRenderOutputSurface(VdpOutputSurface destination_surface,
                                 ColorsToPipe(colors, flags, vlcolors));
    STATIC_ASSERT(VL_COMPOSITOR_ROTATE_0 == VDP_OUTPUT_SURFACE_RENDER_ROTATE_0);
    STATIC_ASSERT(VL_COMPOSITOR_ROTATE_90 == VDP_OUTPUT_SURFACE_RENDER_ROTATE_90);
    STATIC_ASSERT(VL_COMPOSITOR_ROTATE_180 == VDP_OUTPUT_SURFACE_RENDER_ROTATE_180);
    STATIC_ASSERT(VL_COMPOSITOR_ROTATE_270 == VDP_OUTPUT_SURFACE_RENDER_ROTATE_270);
    vl_compositor_set_layer_rotation(cstate, 0, flags & 3);
    vl_compositor_set_layer_dst_area(cstate, 0, RectToPipe(destination_rect, &dst_rect));
    vl_compositor_render(cstate, compositor, dst_vlsurface->surface, &dst_vlsurface->dirty_area, false);
 
    context->delete_blend_state(context, blend);
-   pipe_mutex_unlock(dst_vlsurface->device->mutex);
+   mtx_unlock(&dst_vlsurface->device->mutex);
 
    return VDP_STATUS_OK;
 }
 
 /**
  * Composite a sub-rectangle of a VdpBitmapSurface into a sub-rectangle of
  * a VdpOutputSurface; Output Surface object VdpOutputSurface.
  */
 VdpStatus
 vlVdpOutputSurfaceRenderBitmapSurface(VdpOutputSurface destination_surface,
@@ -760,36 +760,36 @@ vlVdpOutputSurfaceRenderBitmapSurface(VdpOutputSurface destination_surface,
    vl_compositor_clear_layers(cstate);
    vl_compositor_set_layer_blend(cstate, 0, blend, false);
    vl_compositor_set_rgba_layer(cstate, compositor, 0, src_sv,
                                 RectToPipe(source_rect, &src_rect), NULL,
                                 ColorsToPipe(colors, flags, vlcolors));
    vl_compositor_set_layer_rotation(cstate, 0, flags & 3);
    vl_compositor_set_layer_dst_area(cstate, 0, RectToPipe(destination_rect, &dst_rect));
    vl_compositor_render(cstate, compositor, dst_vlsurface->surface, &dst_vlsurface->dirty_area, false);
 
    context->delete_blend_state(context, blend);
-   pipe_mutex_unlock(dst_vlsurface->device->mutex);
+   mtx_unlock(&dst_vlsurface->device->mutex);
 
    return VDP_STATUS_OK;
 }
 
 struct pipe_resource *vlVdpOutputSurfaceGallium(VdpOutputSurface surface)
 {
    vlVdpOutputSurface *vlsurface;
 
    vlsurface = vlGetDataHTAB(surface);
    if (!vlsurface || !vlsurface->surface)
       return NULL;
 
    mtx_lock(&vlsurface->device->mutex);
    vlsurface->device->context->flush(vlsurface->device->context, NULL, 0);
-   pipe_mutex_unlock(vlsurface->device->mutex);
+   mtx_unlock(&vlsurface->device->mutex);
 
    return vlsurface->surface->texture;
 }
 
 VdpStatus vlVdpOutputSurfaceDMABuf(VdpOutputSurface surface,
                                    struct VdpSurfaceDMABufDesc *result)
 {
    vlVdpOutputSurface *vlsurface;
    struct pipe_screen *pscreen;
    struct winsys_handle whandle;
@@ -804,25 +804,25 @@ VdpStatus vlVdpOutputSurfaceDMABuf(VdpOutputSurface surface,
    mtx_lock(&vlsurface->device->mutex);
    vlsurface->device->context->flush(vlsurface->device->context, NULL, 0);
 
    memset(&whandle, 0, sizeof(struct winsys_handle));
    whandle.type = DRM_API_HANDLE_TYPE_FD;
 
    pscreen = vlsurface->surface->texture->screen;
    if (!pscreen->resource_get_handle(pscreen, vlsurface->device->context,
                                      vlsurface->surface->texture, &whandle,
                                      PIPE_HANDLE_USAGE_READ_WRITE)) {
-      pipe_mutex_unlock(vlsurface->device->mutex);
+      mtx_unlock(&vlsurface->device->mutex);
       return VDP_STATUS_NO_IMPLEMENTATION;
    }
 
-   pipe_mutex_unlock(vlsurface->device->mutex);
+   mtx_unlock(&vlsurface->device->mutex);
 
    result->handle = whandle.handle;
    result->width = vlsurface->surface->width;
    result->height = vlsurface->surface->height;
    result->offset = whandle.offset;
    result->stride = whandle.stride;
    result->format = PipeToFormatRGBA(vlsurface->surface->format);
 
    return VDP_STATUS_OK;
 }
diff --git a/src/gallium/state_trackers/vdpau/presentation.c b/src/gallium/state_trackers/vdpau/presentation.c
index ee32bac..7869f4c 100644
--- a/src/gallium/state_trackers/vdpau/presentation.c
+++ b/src/gallium/state_trackers/vdpau/presentation.c
@@ -60,25 +60,25 @@ vlVdpPresentationQueueCreate(VdpDevice device,
 
    pq = CALLOC(1, sizeof(vlVdpPresentationQueue));
    if (!pq)
       return VDP_STATUS_RESOURCES;
 
    DeviceReference(&pq->device, dev);
    pq->drawable = pqt->drawable;
 
    mtx_lock(&dev->mutex);
    if (!vl_compositor_init_state(&pq->cstate, dev->context)) {
-      pipe_mutex_unlock(dev->mutex);
+      mtx_unlock(&dev->mutex);
       ret = VDP_STATUS_ERROR;
       goto no_compositor;
    }
-   pipe_mutex_unlock(dev->mutex);
+   mtx_unlock(&dev->mutex);
 
    *presentation_queue = vlAddDataHTAB(pq);
    if (*presentation_queue == 0) {
       ret = VDP_STATUS_ERROR;
       goto no_handle;
    }
 
    return VDP_STATUS_OK;
 
 no_handle:
@@ -95,21 +95,21 @@ VdpStatus
 vlVdpPresentationQueueDestroy(VdpPresentationQueue presentation_queue)
 {
    vlVdpPresentationQueue *pq;
 
    pq = vlGetDataHTAB(presentation_queue);
    if (!pq)
       return VDP_STATUS_INVALID_HANDLE;
 
    mtx_lock(&pq->device->mutex);
    vl_compositor_cleanup_state(&pq->cstate);
-   pipe_mutex_unlock(pq->device->mutex);
+   mtx_unlock(&pq->device->mutex);
 
    vlRemoveDataHTAB(presentation_queue);
    DeviceReference(&pq->device, NULL);
    FREE(pq);
 
    return VDP_STATUS_OK;
 }
 
 /**
  * Configure the background color setting.
@@ -128,21 +128,21 @@ vlVdpPresentationQueueSetBackgroundColor(VdpPresentationQueue presentation_queue
    if (!pq)
       return VDP_STATUS_INVALID_HANDLE;
 
    color.f[0] = background_color->red;
    color.f[1] = background_color->green;
    color.f[2] = background_color->blue;
    color.f[3] = background_color->alpha;
 
    mtx_lock(&pq->device->mutex);
    vl_compositor_set_clear_color(&pq->cstate, &color);
-   pipe_mutex_unlock(pq->device->mutex);
+   mtx_unlock(&pq->device->mutex);
 
    return VDP_STATUS_OK;
 }
 
 /**
  * Retrieve the current background color setting.
  */
 VdpStatus
 vlVdpPresentationQueueGetBackgroundColor(VdpPresentationQueue presentation_queue,
                                          VdpColor *const background_color)
@@ -152,21 +152,21 @@ vlVdpPresentationQueueGetBackgroundColor(VdpPresentationQueue presentation_queue
 
    if (!background_color)
       return VDP_STATUS_INVALID_POINTER;
 
    pq = vlGetDataHTAB(presentation_queue);
    if (!pq)
       return VDP_STATUS_INVALID_HANDLE;
 
    mtx_lock(&pq->device->mutex);
    vl_compositor_get_clear_color(&pq->cstate, &color);
-   pipe_mutex_unlock(pq->device->mutex);
+   mtx_unlock(&pq->device->mutex);
 
    background_color->red = color.f[0];
    background_color->green = color.f[1];
    background_color->blue = color.f[2];
    background_color->alpha = color.f[3];
 
    return VDP_STATUS_OK;
 }
 
 /**
@@ -181,21 +181,21 @@ vlVdpPresentationQueueGetTime(VdpPresentationQueue presentation_queue,
    if (!current_time)
       return VDP_STATUS_INVALID_POINTER;
 
    pq = vlGetDataHTAB(presentation_queue);
    if (!pq)
       return VDP_STATUS_INVALID_HANDLE;
 
    mtx_lock(&pq->device->mutex);
    *current_time = pq->device->vscreen->get_timestamp(pq->device->vscreen,
                                                       (void *)pq->drawable);
-   pipe_mutex_unlock(pq->device->mutex);
+   mtx_unlock(&pq->device->mutex);
 
    return VDP_STATUS_OK;
 }
 
 /**
  * Enter a surface into the presentation queue.
  */
 VdpStatus
 vlVdpPresentationQueueDisplay(VdpPresentationQueue presentation_queue,
                               VdpOutputSurface surface,
@@ -228,21 +228,21 @@ vlVdpPresentationQueueDisplay(VdpPresentationQueue presentation_queue,
    pipe = pq->device->context;
    compositor = &pq->device->compositor;
    cstate = &pq->cstate;
    vscreen = pq->device->vscreen;
 
    mtx_lock(&pq->device->mutex);
    if (vscreen->set_back_texture_from_output && surf->send_to_X)
       vscreen->set_back_texture_from_output(vscreen, surf->surface->texture, clip_width, clip_height);
    tex = vscreen->texture_from_drawable(vscreen, (void *)pq->drawable);
    if (!tex) {
-      pipe_mutex_unlock(pq->device->mutex);
+      mtx_unlock(&pq->device->mutex);
       return VDP_STATUS_INVALID_HANDLE;
    }
 
    if (!vscreen->set_back_texture_from_output || !surf->send_to_X) {
       dirty_area = vscreen->get_dirty_area(vscreen);
 
       memset(&surf_templ, 0, sizeof(surf_templ));
       surf_templ.format = tex->format;
       surf_draw = pipe->create_surface(pipe, tex, &surf_templ);
 
@@ -286,21 +286,21 @@ vlVdpPresentationQueueDisplay(VdpPresentationQueue presentation_queue,
          if (system(cmd) != 0)
             VDPAU_MSG(VDPAU_ERR, "[VDPAU] Dumping surface %d failed.\n", surface);
       }
       framenum++;
    }
 
    if (!vscreen->set_back_texture_from_output || !surf->send_to_X) {
       pipe_resource_reference(&tex, NULL);
       pipe_surface_reference(&surf_draw, NULL);
    }
-   pipe_mutex_unlock(pq->device->mutex);
+   mtx_unlock(&pq->device->mutex);
 
    return VDP_STATUS_OK;
 }
 
 /**
  * Wait for a surface to finish being displayed.
  */
 VdpStatus
 vlVdpPresentationQueueBlockUntilSurfaceIdle(VdpPresentationQueue presentation_queue,
                                             VdpOutputSurface surface,
@@ -320,21 +320,21 @@ vlVdpPresentationQueueBlockUntilSurfaceIdle(VdpPresentationQueue presentation_qu
    surf = vlGetDataHTAB(surface);
    if (!surf)
       return VDP_STATUS_INVALID_HANDLE;
 
    mtx_lock(&pq->device->mutex);
    if (surf->fence) {
       screen = pq->device->vscreen->pscreen;
       screen->fence_finish(screen, NULL, surf->fence, PIPE_TIMEOUT_INFINITE);
       screen->fence_reference(screen, &surf->fence, NULL);
    }
-   pipe_mutex_unlock(pq->device->mutex);
+   mtx_unlock(&pq->device->mutex);
 
    return vlVdpPresentationQueueGetTime(presentation_queue, first_presentation_time);
 }
 
 /**
  * Poll the current queue status of a surface.
  */
 VdpStatus
 vlVdpPresentationQueueQuerySurfaceStatus(VdpPresentationQueue presentation_queue,
                                          VdpOutputSurface surface,
@@ -362,23 +362,23 @@ vlVdpPresentationQueueQuerySurfaceStatus(VdpPresentationQueue presentation_queue
       if (pq->last_surf == surf)
          *status = VDP_PRESENTATION_QUEUE_STATUS_VISIBLE;
       else
          *status = VDP_PRESENTATION_QUEUE_STATUS_IDLE;
    } else {
       mtx_lock(&pq->device->mutex);
       screen = pq->device->vscreen->pscreen;
       if (screen->fence_finish(screen, NULL, surf->fence, 0)) {
          screen->fence_reference(screen, &surf->fence, NULL);
          *status = VDP_PRESENTATION_QUEUE_STATUS_VISIBLE;
-         pipe_mutex_unlock(pq->device->mutex);
+         mtx_unlock(&pq->device->mutex);
 
          // We actually need to query the timestamp of the last VSYNC event from the hardware
          vlVdpPresentationQueueGetTime(presentation_queue, first_presentation_time);
          *first_presentation_time += 1;
       } else {
          *status = VDP_PRESENTATION_QUEUE_STATUS_QUEUED;
-         pipe_mutex_unlock(pq->device->mutex);
+         mtx_unlock(&pq->device->mutex);
       }
    }
 
    return VDP_STATUS_OK;
 }
diff --git a/src/gallium/state_trackers/vdpau/query.c b/src/gallium/state_trackers/vdpau/query.c
index 87011cb..6b8b5a6 100644
--- a/src/gallium/state_trackers/vdpau/query.c
+++ b/src/gallium/state_trackers/vdpau/query.c
@@ -80,21 +80,21 @@ vlVdpVideoSurfaceQueryCapabilities(VdpDevice device, VdpChromaType surface_chrom
 
    pscreen = dev->vscreen->pscreen;
    if (!pscreen)
       return VDP_STATUS_RESOURCES;
 
    mtx_lock(&dev->mutex);
 
    /* XXX: Current limits */
    *is_supported = true;
    max_2d_texture_level = pscreen->get_param(pscreen, PIPE_CAP_MAX_TEXTURE_2D_LEVELS);
-   pipe_mutex_unlock(dev->mutex);
+   mtx_unlock(&dev->mutex);
    if (!max_2d_texture_level)
       return VDP_STATUS_RESOURCES;
 
    /* I am not quite sure if it is max_2d_texture_level-1 or just max_2d_texture_level */
    *max_width = *max_height = pow(2,max_2d_texture_level-1);
 
    return VDP_STATUS_OK;
 }
 
 /**
@@ -128,21 +128,21 @@ vlVdpVideoSurfaceQueryGetPutBitsYCbCrCapabilities(VdpDevice device, VdpChromaTyp
 
    case VDP_YCBCR_FORMAT_YV12:
       *is_supported = surface_chroma_type == VDP_CHROMA_TYPE_420;
 
       /* We can convert YV12 to NV12 on the fly! */
       if (*is_supported &&
           pscreen->is_video_format_supported(pscreen,
                                              PIPE_FORMAT_NV12,
                                              PIPE_VIDEO_PROFILE_UNKNOWN,
                                              PIPE_VIDEO_ENTRYPOINT_BITSTREAM)) {
-         pipe_mutex_unlock(dev->mutex);
+         mtx_unlock(&dev->mutex);
          return VDP_STATUS_OK;
       }
       break;
 
    case VDP_YCBCR_FORMAT_UYVY:
    case VDP_YCBCR_FORMAT_YUYV:
       *is_supported = surface_chroma_type == VDP_CHROMA_TYPE_422;
       break;
 
    case VDP_YCBCR_FORMAT_Y8U8V8A8:
@@ -155,21 +155,21 @@ vlVdpVideoSurfaceQueryGetPutBitsYCbCrCapabilities(VdpDevice device, VdpChromaTyp
       break;
    }
 
    *is_supported &= pscreen->is_video_format_supported
    (
       pscreen,
       FormatYCBCRToPipe(bits_ycbcr_format),
       PIPE_VIDEO_PROFILE_UNKNOWN,
       PIPE_VIDEO_ENTRYPOINT_BITSTREAM
    );
-   pipe_mutex_unlock(dev->mutex);
+   mtx_unlock(&dev->mutex);
 
    return VDP_STATUS_OK;
 }
 
 /**
  * Query the implementation's VdpDecoder capabilities.
  */
 VdpStatus
 vlVdpDecoderQueryCapabilities(VdpDevice device, VdpDecoderProfile profile,
                               VdpBool *is_supported, uint32_t *max_level, uint32_t *max_macroblocks,
@@ -206,21 +206,21 @@ vlVdpDecoderQueryCapabilities(VdpDevice device, VdpDecoderProfile profile,
                                              PIPE_VIDEO_CAP_MAX_HEIGHT);
       *max_level = pscreen->get_video_param(pscreen, p_profile, PIPE_VIDEO_ENTRYPOINT_BITSTREAM,
                                             PIPE_VIDEO_CAP_MAX_LEVEL);
       *max_macroblocks = (*max_width/16)*(*max_height/16);
    } else {
       *max_width = 0;
       *max_height = 0;
       *max_level = 0;
       *max_macroblocks = 0;
    }
-   pipe_mutex_unlock(dev->mutex);
+   mtx_unlock(&dev->mutex);
 
    return VDP_STATUS_OK;
 }
 
 /**
  * Query the implementation's VdpOutputSurface capabilities.
  */
 VdpStatus
 vlVdpOutputSurfaceQueryCapabilities(VdpDevice device, VdpRGBAFormat surface_rgba_format,
                                     VdpBool *is_supported, uint32_t *max_width, uint32_t *max_height)
@@ -248,30 +248,30 @@ vlVdpOutputSurfaceQueryCapabilities(VdpDevice device, VdpRGBAFormat surface_rgba
    *is_supported = pscreen->is_format_supported
    (
       pscreen, format, PIPE_TEXTURE_3D, 1,
       PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET
    );
    if (*is_supported) {
       uint32_t max_2d_texture_level = pscreen->get_param(
          pscreen, PIPE_CAP_MAX_TEXTURE_2D_LEVELS);
 
       if (!max_2d_texture_level) {
-         pipe_mutex_unlock(dev->mutex);
+         mtx_unlock(&dev->mutex);
          return VDP_STATUS_ERROR;
       }
 
       *max_width = *max_height = pow(2, max_2d_texture_level - 1);
    } else {
       *max_width = 0;
       *max_height = 0;
    }
-   pipe_mutex_unlock(dev->mutex);
+   mtx_unlock(&dev->mutex);
 
    return VDP_STATUS_OK;
 }
 
 /**
  * Query the implementation's capability to perform a PutBits operation using
  * application data matching the surface's format.
  */
 VdpStatus
 vlVdpOutputSurfaceQueryGetPutBitsNativeCapabilities(VdpDevice device, VdpRGBAFormat surface_rgba_format,
@@ -295,21 +295,21 @@ vlVdpOutputSurfaceQueryGetPutBitsNativeCapabilities(VdpDevice device, VdpRGBAFor
 
    if (!is_supported)
       return VDP_STATUS_INVALID_POINTER;
 
    mtx_lock(&dev->mutex);
    *is_supported = pscreen->is_format_supported
    (
       pscreen, format, PIPE_TEXTURE_2D, 1,
       PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET
    );
-   pipe_mutex_unlock(dev->mutex);
+   mtx_unlock(&dev->mutex);
 
    return VDP_STATUS_OK;
 }
 
 /**
  * Query the implementation's capability to perform a PutBits operation using
  * application data in a specific indexed format.
  */
 VdpStatus
 vlVdpOutputSurfaceQueryPutBitsIndexedCapabilities(VdpDevice device,
@@ -356,21 +356,21 @@ vlVdpOutputSurfaceQueryPutBitsIndexedCapabilities(VdpDevice device,
    (
       pscreen, index_format, PIPE_TEXTURE_2D, 1,
       PIPE_BIND_SAMPLER_VIEW
    );
 
    *is_supported &= pscreen->is_format_supported
    (
       pscreen, colortbl_format, PIPE_TEXTURE_1D, 1,
       PIPE_BIND_SAMPLER_VIEW
    );
-   pipe_mutex_unlock(dev->mutex);
+   mtx_unlock(&dev->mutex);
 
    return VDP_STATUS_OK;
 }
 
 /**
  * Query the implementation's capability to perform a PutBits operation using
  * application data in a specific YCbCr/YUB format.
  */
 VdpStatus
 vlVdpOutputSurfaceQueryPutBitsYCbCrCapabilities(VdpDevice device, VdpRGBAFormat surface_rgba_format,
@@ -406,21 +406,21 @@ vlVdpOutputSurfaceQueryPutBitsYCbCrCapabilities(VdpDevice device, VdpRGBAFormat
       pscreen, rgba_format, PIPE_TEXTURE_2D, 1,
       PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET
    );
 
    *is_supported &= pscreen->is_video_format_supported
    (
       pscreen, ycbcr_format,
       PIPE_VIDEO_PROFILE_UNKNOWN,
       PIPE_VIDEO_ENTRYPOINT_BITSTREAM
    );
-   pipe_mutex_unlock(dev->mutex);
+   mtx_unlock(&dev->mutex);
 
    return VDP_STATUS_OK;
 }
 
 /**
  * Query the implementation's VdpBitmapSurface capabilities.
  */
 VdpStatus
 vlVdpBitmapSurfaceQueryCapabilities(VdpDevice device, VdpRGBAFormat surface_rgba_format,
                                     VdpBool *is_supported, uint32_t *max_width, uint32_t *max_height)
@@ -448,30 +448,30 @@ vlVdpBitmapSurfaceQueryCapabilities(VdpDevice device, VdpRGBAFormat surface_rgba
    *is_supported = pscreen->is_format_supported
    (
       pscreen, format, PIPE_TEXTURE_3D, 1,
       PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET
    );
    if (*is_supported) {
       uint32_t max_2d_texture_level = pscreen->get_param(
          pscreen, PIPE_CAP_MAX_TEXTURE_2D_LEVELS);
 
       if (!max_2d_texture_level) {
-         pipe_mutex_unlock(dev->mutex);
+         mtx_unlock(&dev->mutex);
          return VDP_STATUS_ERROR;
       }
 
       *max_width = *max_height = pow(2, max_2d_texture_level - 1);
    } else {
       *max_width = 0;
       *max_height = 0;
    }
-   pipe_mutex_unlock(dev->mutex);
+   mtx_unlock(&dev->mutex);
 
    return VDP_STATUS_OK;
 }
 
 /**
  * Query the implementation's support for a specific feature.
  */
 VdpStatus
 vlVdpVideoMixerQueryFeatureSupport(VdpDevice device, VdpVideoMixerFeature feature,
                                    VdpBool *is_supported)
@@ -549,24 +549,24 @@ vlVdpVideoMixerQueryParameterValueRange(VdpDevice device, VdpVideoMixerParameter
                                                       PIPE_VIDEO_CAP_MAX_HEIGHT);
       break;
 
    case VDP_VIDEO_MIXER_PARAMETER_LAYERS:
       *(uint32_t*)min_value = 0;
       *(uint32_t*)max_value = 4;
       break;
 
    case VDP_VIDEO_MIXER_PARAMETER_CHROMA_TYPE:
    default:
-      pipe_mutex_unlock(dev->mutex);
+      mtx_unlock(&dev->mutex);
       return VDP_STATUS_INVALID_VIDEO_MIXER_PARAMETER;
    }
-   pipe_mutex_unlock(dev->mutex);
+   mtx_unlock(&dev->mutex);
    return VDP_STATUS_OK;
 }
 
 /**
  * Query the implementation's support for a specific attribute.
  */
 VdpStatus
 vlVdpVideoMixerQueryAttributeSupport(VdpDevice device, VdpVideoMixerAttribute attribute,
                                      VdpBool *is_supported)
 {
diff --git a/src/gallium/state_trackers/vdpau/surface.c b/src/gallium/state_trackers/vdpau/surface.c
index 39d5849..884ae30 100644
--- a/src/gallium/state_trackers/vdpau/surface.c
+++ b/src/gallium/state_trackers/vdpau/surface.c
@@ -97,21 +97,21 @@ vlVdpVideoSurfaceCreate(VdpDevice device, VdpChromaType chroma_type,
       pipe->screen,
       PIPE_VIDEO_PROFILE_UNKNOWN,
       PIPE_VIDEO_ENTRYPOINT_BITSTREAM,
       PIPE_VIDEO_CAP_PREFERS_INTERLACED
    );
    if (p_surf->templat.buffer_format != PIPE_FORMAT_NONE)
       p_surf->video_buffer = pipe->create_video_buffer(pipe, &p_surf->templat);
 
    /* do not mandate early allocation of a video buffer */
    vlVdpVideoSurfaceClear(p_surf);
-   pipe_mutex_unlock(dev->mutex);
+   mtx_unlock(&dev->mutex);
 
    *surface = vlAddDataHTAB(p_surf);
    if (*surface == 0) {
       ret = VDP_STATUS_ERROR;
       goto no_handle;
    }
 
    return VDP_STATUS_OK;
 
 no_handle:
@@ -134,21 +134,21 @@ vlVdpVideoSurfaceDestroy(VdpVideoSurface surface)
 {
    vlVdpSurface *p_surf;
 
    p_surf = (vlVdpSurface *)vlGetDataHTAB((vlHandle)surface);
    if (!p_surf)
       return VDP_STATUS_INVALID_HANDLE;
 
    mtx_lock(&p_surf->device->mutex);
    if (p_surf->video_buffer)
       p_surf->video_buffer->destroy(p_surf->video_buffer);
-   pipe_mutex_unlock(p_surf->device->mutex);
+   mtx_unlock(&p_surf->device->mutex);
 
    vlRemoveDataHTAB(surface);
    DeviceReference(&p_surf->device, NULL);
    FREE(p_surf);
 
    return VDP_STATUS_OK;
 }
 
 /**
  * Retrieve the parameters used to create a VdpVideoSurface.
@@ -234,21 +234,21 @@ vlVdpVideoSurfaceGetBitsYCbCr(VdpVideoSurface surface,
       else if ((format == PIPE_FORMAT_YUYV && buffer_format == PIPE_FORMAT_UYVY) ||
                (format == PIPE_FORMAT_UYVY && buffer_format == PIPE_FORMAT_YUYV))
          conversion = CONVERSION_SWAP_YUYV_UYVY;
       else
          return VDP_STATUS_NO_IMPLEMENTATION;
    }
 
    mtx_lock(&vlsurface->device->mutex);
    sampler_views = vlsurface->video_buffer->get_sampler_view_planes(vlsurface->video_buffer);
    if (!sampler_views) {
-      pipe_mutex_unlock(vlsurface->device->mutex);
+      mtx_unlock(&vlsurface->device->mutex);
       return VDP_STATUS_RESOURCES;
    }
 
    for (i = 0; i < 3; ++i) {
       unsigned width, height;
       struct pipe_sampler_view *sv = sampler_views[i];
       if (!sv) continue;
 
       vlVdpVideoSurfaceSize(vlsurface, i, &width, &height);
 
@@ -256,21 +256,21 @@ vlVdpVideoSurfaceGetBitsYCbCr(VdpVideoSurface surface,
          struct pipe_box box = {
             0, 0, j,
             width, height, 1
          };
          struct pipe_transfer *transfer;
          uint8_t *map;
 
          map = pipe->transfer_map(pipe, sv->texture, 0,
                                        PIPE_TRANSFER_READ, &box, &transfer);
          if (!map) {
-            pipe_mutex_unlock(vlsurface->device->mutex);
+            mtx_unlock(&vlsurface->device->mutex);
             return VDP_STATUS_RESOURCES;
          }
 
          if (conversion == CONVERSION_NV12_TO_YV12 && i == 1) {
             u_copy_nv12_to_yv12(destination_data, destination_pitches,
                                 i, j, transfer->stride, sv->texture->array_size,
                                 map, box.width, box.height);
          } else if (conversion == CONVERSION_YV12_TO_NV12 && i > 0) {
             u_copy_yv12_to_nv12(destination_data, destination_pitches,
                                 i, j, transfer->stride, sv->texture->array_size,
@@ -281,21 +281,21 @@ vlVdpVideoSurfaceGetBitsYCbCr(VdpVideoSurface surface,
                                    map, box.width, box.height);
          } else {
             util_copy_rect(destination_data[i] + destination_pitches[i] * j, sv->texture->format,
                            destination_pitches[i] * sv->texture->array_size, 0, 0,
                            box.width, box.height, map, transfer->stride, 0, 0);
          }
 
          pipe_transfer_unmap(pipe, transfer);
       }
    }
-   pipe_mutex_unlock(vlsurface->device->mutex);
+   mtx_unlock(&vlsurface->device->mutex);
 
    return VDP_STATUS_OK;
 }
 
 /**
  * Copy image data from application memory in a specific YCbCr format to
  * a VdpVideoSurface.
  */
 VdpStatus
 vlVdpVideoSurfacePutBitsYCbCr(VdpVideoSurface surface,
@@ -330,57 +330,57 @@ vlVdpVideoSurfacePutBitsYCbCr(VdpVideoSurface surface,
 
       /* Determine the most suitable format for the new surface */
       if (!screen->is_video_format_supported(screen, nformat,
                                              PIPE_VIDEO_PROFILE_UNKNOWN,
                                              PIPE_VIDEO_ENTRYPOINT_BITSTREAM)) {
          nformat = screen->get_video_param(screen,
                                            PIPE_VIDEO_PROFILE_UNKNOWN,
                                            PIPE_VIDEO_ENTRYPOINT_BITSTREAM,
                                            PIPE_VIDEO_CAP_PREFERED_FORMAT);
          if (nformat == PIPE_FORMAT_NONE) {
-            pipe_mutex_unlock(p_surf->device->mutex);
+            mtx_unlock(&p_surf->device->mutex);
             return VDP_STATUS_NO_IMPLEMENTATION;
          }
       }
 
       if (p_surf->video_buffer == NULL  ||
           nformat != p_surf->video_buffer->buffer_format) {
          /* destroy the old one */
          if (p_surf->video_buffer)
             p_surf->video_buffer->destroy(p_surf->video_buffer);
 
          /* adjust the template parameters */
          p_surf->templat.buffer_format = nformat;
 
          /* and try to create the video buffer with the new format */
          p_surf->video_buffer = pipe->create_video_buffer(pipe, &p_surf->templat);
 
          /* stil no luck? ok forget it we don't support it */
          if (!p_surf->video_buffer) {
-            pipe_mutex_unlock(p_surf->device->mutex);
+            mtx_unlock(&p_surf->device->mutex);
             return VDP_STATUS_NO_IMPLEMENTATION;
          }
          vlVdpVideoSurfaceClear(p_surf);
       }
    }
 
    if (pformat != p_surf->video_buffer->buffer_format) {
       if (pformat == PIPE_FORMAT_YV12 &&
           p_surf->video_buffer->buffer_format == PIPE_FORMAT_NV12)
          conversion = CONVERSION_YV12_TO_NV12;
       else
          return VDP_STATUS_NO_IMPLEMENTATION;
    }
 
    sampler_views = p_surf->video_buffer->get_sampler_view_planes(p_surf->video_buffer);
    if (!sampler_views) {
-      pipe_mutex_unlock(p_surf->device->mutex);
+      mtx_unlock(&p_surf->device->mutex);
       return VDP_STATUS_RESOURCES;
    }
 
    for (i = 0; i < 3; ++i) {
       unsigned width, height;
       struct pipe_sampler_view *sv = sampler_views[i];
       struct pipe_resource *tex;
       if (!sv || !source_pitches[i]) continue;
 
       tex = sv->texture;
@@ -392,21 +392,21 @@ vlVdpVideoSurfacePutBitsYCbCr(VdpVideoSurface surface,
             width, height, 1
          };
 
          if (conversion == CONVERSION_YV12_TO_NV12 && i == 1) {
             struct pipe_transfer *transfer;
             uint8_t *map;
 
             map = pipe->transfer_map(pipe, tex, 0, usage,
                                      &dst_box, &transfer);
             if (!map) {
-               pipe_mutex_unlock(p_surf->device->mutex);
+               mtx_unlock(&p_surf->device->mutex);
                return VDP_STATUS_RESOURCES;
             }
 
             u_copy_nv12_from_yv12(source_data, source_pitches,
                                   i, j, transfer->stride, tex->array_size,
                                   map, dst_box.width, dst_box.height);
 
             pipe_transfer_unmap(pipe, transfer);
          } else {
             pipe->texture_subdata(pipe, tex, 0,
@@ -415,21 +415,21 @@ vlVdpVideoSurfacePutBitsYCbCr(VdpVideoSurface surface,
                                   source_pitches[i] * tex->array_size,
                                   0);
          }
          /*
           * This surface has already been synced
           * by the first map.
           */
          usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
       }
    }
-   pipe_mutex_unlock(p_surf->device->mutex);
+   mtx_unlock(&p_surf->device->mutex);
 
    return VDP_STATUS_OK;
 }
 
 /**
  * Helper function to initially clear the VideoSurface after (re-)creation
  */
 void
 vlVdpVideoSurfaceClear(vlVdpSurface *vlsurf)
 {
@@ -465,21 +465,21 @@ struct pipe_video_buffer *vlVdpVideoSurfaceGallium(VdpVideoSurface surface)
    if (!p_surf)
       return NULL;
 
    mtx_lock(&p_surf->device->mutex);
    if (p_surf->video_buffer == NULL) {
       struct pipe_context *pipe = p_surf->device->context;
 
       /* try to create a video buffer if we don't already have one */
       p_surf->video_buffer = pipe->create_video_buffer(pipe, &p_surf->templat);
    }
-   pipe_mutex_unlock(p_surf->device->mutex);
+   mtx_unlock(&p_surf->device->mutex);
 
    return p_surf->video_buffer;
 }
 
 VdpStatus vlVdpVideoSurfaceDMABuf(VdpVideoSurface surface,
                                   VdpVideoSurfacePlane plane,
                                   struct VdpSurfaceDMABufDesc *result)
 {
    vlVdpSurface *p_surf = vlGetDataHTAB(surface);
 
@@ -504,43 +504,43 @@ VdpStatus vlVdpVideoSurfaceDMABuf(VdpVideoSurface surface,
    if (p_surf->video_buffer == NULL) {
       struct pipe_context *pipe = p_surf->device->context;
 
       /* try to create a video buffer if we don't already have one */
       p_surf->video_buffer = pipe->create_video_buffer(pipe, &p_surf->templat);
    }
 
    /* Check if surface match interop requirements */
    if (p_surf->video_buffer == NULL || !p_surf->video_buffer->interlaced ||
        p_surf->video_buffer->buffer_format != PIPE_FORMAT_NV12) {
-      pipe_mutex_unlock(p_surf->device->mutex);
+      mtx_unlock(&p_surf->device->mutex);
       return VDP_STATUS_NO_IMPLEMENTATION;
    }
 
    surf = p_surf->video_buffer->get_surfaces(p_surf->video_buffer)[plane];
    if (!surf) {
-      pipe_mutex_unlock(p_surf->device->mutex);
+      mtx_unlock(&p_surf->device->mutex);
       return VDP_STATUS_RESOURCES;
    }
 
    memset(&whandle, 0, sizeof(struct winsys_handle));
    whandle.type = DRM_API_HANDLE_TYPE_FD;
    whandle.layer = surf->u.tex.first_layer;
 
    pscreen = surf->texture->screen;
    if (!pscreen->resource_get_handle(pscreen, p_surf->device->context,
                                      surf->texture, &whandle,
                                      PIPE_HANDLE_USAGE_READ_WRITE)) {
-      pipe_mutex_unlock(p_surf->device->mutex);
+      mtx_unlock(&p_surf->device->mutex);
       return VDP_STATUS_NO_IMPLEMENTATION;
    }
 
-   pipe_mutex_unlock(p_surf->device->mutex);
+   mtx_unlock(&p_surf->device->mutex);
 
    result->handle = whandle.handle;
    result->width = surf->width;
    result->height = surf->height;
    result->offset = whandle.offset;
    result->stride = whandle.stride;
 
    if (surf->format == PIPE_FORMAT_R8_UNORM)
       result->format = VDP_RGBA_FORMAT_R8;
    else
diff --git a/src/gallium/targets/haiku-softpipe/GalliumContext.cpp b/src/gallium/targets/haiku-softpipe/GalliumContext.cpp
index 02ffd01..0356f65 100644
--- a/src/gallium/targets/haiku-softpipe/GalliumContext.cpp
+++ b/src/gallium/targets/haiku-softpipe/GalliumContext.cpp
@@ -407,13 +407,13 @@ GalliumContext::Lock()
 {
 	CALLED();
 	mtx_lock(&fMutex);
 }
 
 
 void
 GalliumContext::Unlock()
 {
 	CALLED();
-	pipe_mutex_unlock(fMutex);
+	mtx_unlock(&fMutex);
 }
 /* vim: set tabstop=4: */
diff --git a/src/gallium/winsys/amdgpu/drm/amdgpu_bo.c b/src/gallium/winsys/amdgpu/drm/amdgpu_bo.c
index 2f0dcb6..c7dd116 100644
--- a/src/gallium/winsys/amdgpu/drm/amdgpu_bo.c
+++ b/src/gallium/winsys/amdgpu/drm/amdgpu_bo.c
@@ -92,54 +92,54 @@ static bool amdgpu_bo_wait(struct pb_buffer *_buf, uint64_t timeout,
 
       /* Release the idle fences to avoid checking them again later. */
       for (unsigned i = 0; i < idle_fences; ++i)
          amdgpu_fence_reference(&bo->fences[i], NULL);
 
       memmove(&bo->fences[0], &bo->fences[idle_fences],
               (bo->num_fences - idle_fences) * sizeof(*bo->fences));
       bo->num_fences -= idle_fences;
 
       buffer_idle = !bo->num_fences;
-      pipe_mutex_unlock(ws->bo_fence_lock);
+      mtx_unlock(&ws->bo_fence_lock);
 
       return buffer_idle;
    } else {
       bool buffer_idle = true;
 
       mtx_lock(&ws->bo_fence_lock);
       while (bo->num_fences && buffer_idle) {
          struct pipe_fence_handle *fence = NULL;
          bool fence_idle = false;
 
          amdgpu_fence_reference(&fence, bo->fences[0]);
 
          /* Wait for the fence. */
-         pipe_mutex_unlock(ws->bo_fence_lock);
+         mtx_unlock(&ws->bo_fence_lock);
          if (amdgpu_fence_wait(fence, abs_timeout, true))
             fence_idle = true;
          else
             buffer_idle = false;
          mtx_lock(&ws->bo_fence_lock);
 
          /* Release an idle fence to avoid checking it again later, keeping in
           * mind that the fence array may have been modified by other threads.
           */
          if (fence_idle && bo->num_fences && bo->fences[0] == fence) {
             amdgpu_fence_reference(&bo->fences[0], NULL);
             memmove(&bo->fences[0], &bo->fences[1],
                     (bo->num_fences - 1) * sizeof(*bo->fences));
             bo->num_fences--;
          }
 
          amdgpu_fence_reference(&fence, NULL);
       }
-      pipe_mutex_unlock(ws->bo_fence_lock);
+      mtx_unlock(&ws->bo_fence_lock);
 
       return buffer_idle;
    }
 }
 
 static enum radeon_bo_domain amdgpu_bo_get_initial_domain(
       struct pb_buffer *buf)
 {
    return ((struct amdgpu_winsys_bo*)buf)->initial_domain;
 }
@@ -156,21 +156,21 @@ static void amdgpu_bo_remove_fences(struct amdgpu_winsys_bo *bo)
 
 void amdgpu_bo_destroy(struct pb_buffer *_buf)
 {
    struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
 
    assert(bo->bo && "must not be called for slab entries");
 
    mtx_lock(&bo->ws->global_bo_list_lock);
    LIST_DEL(&bo->u.real.global_list_item);
    bo->ws->num_buffers--;
-   pipe_mutex_unlock(bo->ws->global_bo_list_lock);
+   mtx_unlock(&bo->ws->global_bo_list_lock);
 
    amdgpu_bo_va_op(bo->bo, 0, bo->base.size, bo->va, 0, AMDGPU_VA_OP_UNMAP);
    amdgpu_va_range_free(bo->u.real.va_handle);
    amdgpu_bo_free(bo->bo);
 
    amdgpu_bo_remove_fences(bo);
 
    if (bo->initial_domain & RADEON_DOMAIN_VRAM)
       bo->ws->allocated_vram -= align64(bo->base.size, bo->ws->info.gart_page_size);
    else if (bo->initial_domain & RADEON_DOMAIN_GTT)
@@ -345,21 +345,21 @@ static const struct pb_vtbl amdgpu_winsys_bo_vtbl = {
 
 static void amdgpu_add_buffer_to_global_list(struct amdgpu_winsys_bo *bo)
 {
    struct amdgpu_winsys *ws = bo->ws;
 
    assert(bo->bo);
 
    mtx_lock(&ws->global_bo_list_lock);
    LIST_ADDTAIL(&bo->u.real.global_list_item, &ws->global_bo_list);
    ws->num_buffers++;
-   pipe_mutex_unlock(ws->global_bo_list_lock);
+   mtx_unlock(&ws->global_bo_list_lock);
 }
 
 static struct amdgpu_winsys_bo *amdgpu_create_bo(struct amdgpu_winsys *ws,
                                                  uint64_t size,
                                                  unsigned alignment,
                                                  unsigned usage,
                                                  enum radeon_bo_domain initial_domain,
                                                  unsigned flags,
                                                  unsigned pb_cache_bucket)
 {
diff --git a/src/gallium/winsys/amdgpu/drm/amdgpu_cs.c b/src/gallium/winsys/amdgpu/drm/amdgpu_cs.c
index bb255f2..cdd8e6c 100644
--- a/src/gallium/winsys/amdgpu/drm/amdgpu_cs.c
+++ b/src/gallium/winsys/amdgpu/drm/amdgpu_cs.c
@@ -1034,36 +1034,36 @@ void amdgpu_cs_submit_ib(void *job, int thread_index)
     */
    if (debug_get_option_all_bos()) {
       struct amdgpu_winsys_bo *bo;
       amdgpu_bo_handle *handles;
       unsigned num = 0;
 
       mtx_lock(&ws->global_bo_list_lock);
 
       handles = malloc(sizeof(handles[0]) * ws->num_buffers);
       if (!handles) {
-         pipe_mutex_unlock(ws->global_bo_list_lock);
+         mtx_unlock(&ws->global_bo_list_lock);
          amdgpu_cs_context_cleanup(cs);
          cs->error_code = -ENOMEM;
          return;
       }
 
       LIST_FOR_EACH_ENTRY(bo, &ws->global_bo_list, u.real.global_list_item) {
          assert(num < ws->num_buffers);
          handles[num++] = bo->bo;
       }
 
       r = amdgpu_bo_list_create(ws->dev, ws->num_buffers,
                                 handles, NULL,
                                 &cs->request.resources);
       free(handles);
-      pipe_mutex_unlock(ws->global_bo_list_lock);
+      mtx_unlock(&ws->global_bo_list_lock);
    } else {
       r = amdgpu_bo_list_create(ws->dev, cs->num_real_buffers,
                                 cs->handles, cs->flags,
                                 &cs->request.resources);
    }
 
    if (r) {
       fprintf(stderr, "amdgpu: buffer list creation failed (%d)\n", r);
       cs->request.resources = NULL;
       amdgpu_fence_signalled(cs->fence);
@@ -1215,21 +1215,21 @@ static int amdgpu_cs_flush(struct radeon_winsys_cs *rcs,
       amdgpu_add_fence_dependencies(cs);
 
       /* Swap command streams. "cst" is going to be submitted. */
       cs->csc = cs->cst;
       cs->cst = cur;
 
       /* Submit. */
       util_queue_add_job(&ws->cs_queue, cs, &cs->flush_completed,
                          amdgpu_cs_submit_ib, NULL);
       /* The submission has been queued, unlock the fence now. */
-      pipe_mutex_unlock(ws->bo_fence_lock);
+      mtx_unlock(&ws->bo_fence_lock);
 
       if (!(flags & RADEON_FLUSH_ASYNC)) {
          amdgpu_cs_sync_flush(rcs);
          error_code = cur->error_code;
       }
    } else {
       amdgpu_cs_context_cleanup(cs->csc);
    }
 
    amdgpu_get_new_ib(&ws->base, cs, IB_MAIN);
diff --git a/src/gallium/winsys/amdgpu/drm/amdgpu_winsys.c b/src/gallium/winsys/amdgpu/drm/amdgpu_winsys.c
index bcb466f..2551b95 100644
--- a/src/gallium/winsys/amdgpu/drm/amdgpu_winsys.c
+++ b/src/gallium/winsys/amdgpu/drm/amdgpu_winsys.c
@@ -507,21 +507,21 @@ static bool amdgpu_winsys_unref(struct radeon_winsys *rws)
     * from the table.
     * This must happen while the mutex is locked, so that
     * amdgpu_winsys_create in another thread doesn't get the winsys
     * from the table when the counter drops to 0. */
    mtx_lock(&dev_tab_mutex);
 
    destroy = pipe_reference(&ws->reference, NULL);
    if (destroy && dev_tab)
       util_hash_table_remove(dev_tab, ws->dev);
 
-   pipe_mutex_unlock(dev_tab_mutex);
+   mtx_unlock(&dev_tab_mutex);
    return destroy;
 }
 
 PUBLIC struct radeon_winsys *
 amdgpu_winsys_create(int fd, radeon_screen_create_t screen_create)
 {
    struct amdgpu_winsys *ws;
    drmVersionPtr version = drmGetVersion(fd);
    amdgpu_device_handle dev;
    uint32_t drm_major, drm_minor, r;
@@ -535,30 +535,30 @@ amdgpu_winsys_create(int fd, radeon_screen_create_t screen_create)
 
    /* Look up the winsys from the dev table. */
    mtx_lock(&dev_tab_mutex);
    if (!dev_tab)
       dev_tab = util_hash_table_create(hash_dev, compare_dev);
 
    /* Initialize the amdgpu device. This should always return the same pointer
     * for the same fd. */
    r = amdgpu_device_initialize(fd, &drm_major, &drm_minor, &dev);
    if (r) {
-      pipe_mutex_unlock(dev_tab_mutex);
+      mtx_unlock(&dev_tab_mutex);
       fprintf(stderr, "amdgpu: amdgpu_device_initialize failed.\n");
       return NULL;
    }
 
    /* Lookup a winsys if we have already created one for this device. */
    ws = util_hash_table_get(dev_tab, dev);
    if (ws) {
       pipe_reference(NULL, &ws->reference);
-      pipe_mutex_unlock(dev_tab_mutex);
+      mtx_unlock(&dev_tab_mutex);
       return &ws->base;
    }
 
    /* Create a new winsys. */
    ws = CALLOC_STRUCT(amdgpu_winsys);
    if (!ws)
       goto fail;
 
    ws->dev = dev;
    ws->info.drm_major = drm_major;
@@ -597,44 +597,44 @@ amdgpu_winsys_create(int fd, radeon_screen_create_t screen_create)
    amdgpu_bo_init_functions(ws);
    amdgpu_cs_init_functions(ws);
    amdgpu_surface_init_functions(ws);
 
    LIST_INITHEAD(&ws->global_bo_list);
    (void) mtx_init(&ws->global_bo_list_lock, mtx_plain);
    (void) mtx_init(&ws->bo_fence_lock, mtx_plain);
 
    if (!util_queue_init(&ws->cs_queue, "amdgpu_cs", 8, 1)) {
       amdgpu_winsys_destroy(&ws->base);
-      pipe_mutex_unlock(dev_tab_mutex);
+      mtx_unlock(&dev_tab_mutex);
       return NULL;
    }
 
    /* Create the screen at the end. The winsys must be initialized
     * completely.
     *
     * Alternatively, we could create the screen based on "ws->gen"
     * and link all drivers into one binary blob. */
    ws->base.screen = screen_create(&ws->base);
    if (!ws->base.screen) {
       amdgpu_winsys_destroy(&ws->base);
-      pipe_mutex_unlock(dev_tab_mutex);
+      mtx_unlock(&dev_tab_mutex);
       return NULL;
    }
 
    util_hash_table_set(dev_tab, dev, ws);
 
    /* We must unlock the mutex once the winsys is fully initialized, so that
     * other threads attempting to create the winsys from the same fd will
     * get a fully initialized winsys and not just half-way initialized. */
-   pipe_mutex_unlock(dev_tab_mutex);
+   mtx_unlock(&dev_tab_mutex);
 
    return &ws->base;
 
 fail_cache:
    pb_cache_deinit(&ws->bo_cache);
    do_winsys_deinit(ws);
 fail_alloc:
    FREE(ws);
 fail:
-   pipe_mutex_unlock(dev_tab_mutex);
+   mtx_unlock(&dev_tab_mutex);
    return NULL;
 }
diff --git a/src/gallium/winsys/etnaviv/drm/etnaviv_drm_winsys.c b/src/gallium/winsys/etnaviv/drm/etnaviv_drm_winsys.c
index dc48934..8e3f7a0 100644
--- a/src/gallium/winsys/etnaviv/drm/etnaviv_drm_winsys.c
+++ b/src/gallium/winsys/etnaviv/drm/etnaviv_drm_winsys.c
@@ -76,21 +76,21 @@ etna_drm_screen_destroy(struct pipe_screen *pscreen)
 {
    struct etna_screen *screen = etna_screen(pscreen);
    boolean destroy;
 
    mtx_lock(&etna_screen_mutex);
    destroy = --screen->refcnt == 0;
    if (destroy) {
       int fd = etna_device_fd(screen->dev);
       util_hash_table_remove(etna_tab, intptr_to_pointer(fd));
    }
-   pipe_mutex_unlock(etna_screen_mutex);
+   mtx_unlock(&etna_screen_mutex);
 
    if (destroy) {
       pscreen->destroy = screen->winsys_priv;
       pscreen->destroy(pscreen);
    }
 }
 
 static unsigned hash_fd(void *key)
 {
    int fd = pointer_to_intptr(key);
@@ -138,21 +138,21 @@ etna_drm_screen_create_renderonly(struct renderonly *ro)
 
          /* Bit of a hack, to avoid circular linkage dependency,
          * ie. pipe driver having to call in to winsys, we
          * override the pipe drivers screen->destroy() */
          etna_screen(pscreen)->winsys_priv = pscreen->destroy;
       pscreen->destroy = etna_drm_screen_destroy;
       }
    }
 
 unlock:
-   pipe_mutex_unlock(etna_screen_mutex);
+   mtx_unlock(&etna_screen_mutex);
    return pscreen;
 }
 
 struct pipe_screen *
 etna_drm_screen_create(int fd)
 {
    struct renderonly ro = {
       .create_for_resource = renderonly_create_gpu_import_for_resource,
       .kms_fd = -1,
       .gpu_fd = fd
diff --git a/src/gallium/winsys/freedreno/drm/freedreno_drm_winsys.c b/src/gallium/winsys/freedreno/drm/freedreno_drm_winsys.c
index 2de429e..c1ea22a 100644
--- a/src/gallium/winsys/freedreno/drm/freedreno_drm_winsys.c
+++ b/src/gallium/winsys/freedreno/drm/freedreno_drm_winsys.c
@@ -49,21 +49,21 @@ fd_drm_screen_destroy(struct pipe_screen *pscreen)
 {
 	struct fd_screen *screen = fd_screen(pscreen);
 	boolean destroy;
 
 	mtx_lock(&fd_screen_mutex);
 	destroy = --screen->refcnt == 0;
 	if (destroy) {
 		int fd = fd_device_fd(screen->dev);
 		util_hash_table_remove(fd_tab, intptr_to_pointer(fd));
 	}
-	pipe_mutex_unlock(fd_screen_mutex);
+	mtx_unlock(&fd_screen_mutex);
 
 	if (destroy) {
 		pscreen->destroy = screen->winsys_priv;
 		pscreen->destroy(pscreen);
 	}
 }
 
 static unsigned hash_fd(void *key)
 {
 	int fd = pointer_to_intptr(key);
@@ -115,13 +115,13 @@ fd_drm_screen_create(int fd)
 			/* Bit of a hack, to avoid circular linkage dependency,
 			 * ie. pipe driver having to call in to winsys, we
 			 * override the pipe drivers screen->destroy():
 			 */
 			fd_screen(pscreen)->winsys_priv = pscreen->destroy;
 			pscreen->destroy = fd_drm_screen_destroy;
 		}
 	}
 
 unlock:
-	pipe_mutex_unlock(fd_screen_mutex);
+	mtx_unlock(&fd_screen_mutex);
 	return pscreen;
 }
diff --git a/src/gallium/winsys/nouveau/drm/nouveau_drm_winsys.c b/src/gallium/winsys/nouveau/drm/nouveau_drm_winsys.c
index a2a9fd6..4ca2d35 100644
--- a/src/gallium/winsys/nouveau/drm/nouveau_drm_winsys.c
+++ b/src/gallium/winsys/nouveau/drm/nouveau_drm_winsys.c
@@ -25,21 +25,21 @@ bool nouveau_drm_screen_unref(struct nouveau_screen *screen)
 {
 	int ret;
 	if (screen->refcount == -1)
 		return true;
 
 	mtx_lock(&nouveau_screen_mutex);
 	ret = --screen->refcount;
 	assert(ret >= 0);
 	if (ret == 0)
 		util_hash_table_remove(fd_tab, intptr_to_pointer(screen->drm->fd));
-	pipe_mutex_unlock(nouveau_screen_mutex);
+	mtx_unlock(&nouveau_screen_mutex);
 	return ret == 0;
 }
 
 static unsigned hash_fd(void *key)
 {
     int fd = pointer_to_intptr(key);
     struct stat stat;
     fstat(fd, &stat);
 
     return stat.st_dev ^ stat.st_ino ^ stat.st_rdev;
@@ -64,29 +64,29 @@ nouveau_drm_screen_create(int fd)
 	struct nouveau_drm *drm = NULL;
 	struct nouveau_device *dev = NULL;
 	struct nouveau_screen *(*init)(struct nouveau_device *);
 	struct nouveau_screen *screen = NULL;
 	int ret, dupfd;
 
 	mtx_lock(&nouveau_screen_mutex);
 	if (!fd_tab) {
 		fd_tab = util_hash_table_create(hash_fd, compare_fd);
 		if (!fd_tab) {
-			pipe_mutex_unlock(nouveau_screen_mutex);
+			mtx_unlock(&nouveau_screen_mutex);
 			return NULL;
 		}
 	}
 
 	screen = util_hash_table_get(fd_tab, intptr_to_pointer(fd));
 	if (screen) {
 		screen->refcount++;
-		pipe_mutex_unlock(nouveau_screen_mutex);
+		mtx_unlock(&nouveau_screen_mutex);
 		return &screen->base;
 	}
 
 	/* Since the screen re-use is based on the device node and not the fd,
 	 * create a copy of the fd to be owned by the device. Otherwise a
 	 * scenario could occur where two screens are created, and the first
 	 * one is shut down, along with the fd being closed. The second
 	 * (identical) screen would now have a reference to the closed fd. We
 	 * avoid this by duplicating the original fd. Note that
 	 * nouveau_device_wrap does not close the fd in case of a device
@@ -136,24 +136,24 @@ nouveau_drm_screen_create(int fd)
 	screen = init(dev);
 	if (!screen || !screen->base.context_create)
 		goto err;
 
 	/* Use dupfd in hash table, to avoid errors if the original fd gets
 	 * closed by its owner. The hash key needs to live at least as long as
 	 * the screen.
 	 */
 	util_hash_table_set(fd_tab, intptr_to_pointer(dupfd), screen);
 	screen->refcount = 1;
-	pipe_mutex_unlock(nouveau_screen_mutex);
+	mtx_unlock(&nouveau_screen_mutex);
 	return &screen->base;
 
 err:
 	if (screen) {
 		screen->base.destroy(&screen->base);
 	} else {
 		nouveau_device_del(&dev);
 		nouveau_drm_del(&drm);
 		close(dupfd);
 	}
-	pipe_mutex_unlock(nouveau_screen_mutex);
+	mtx_unlock(&nouveau_screen_mutex);
 	return NULL;
 }
diff --git a/src/gallium/winsys/radeon/drm/radeon_drm_bo.c b/src/gallium/winsys/radeon/drm/radeon_drm_bo.c
index e302273..d4f4763 100644
--- a/src/gallium/winsys/radeon/drm/radeon_drm_bo.c
+++ b/src/gallium/winsys/radeon/drm/radeon_drm_bo.c
@@ -81,21 +81,21 @@ static bool radeon_bo_is_busy(struct radeon_bo *bo)
     for (num_idle = 0; num_idle < bo->u.slab.num_fences; ++num_idle) {
         if (radeon_real_bo_is_busy(bo->u.slab.fences[num_idle])) {
             busy = true;
             break;
         }
         radeon_bo_reference(&bo->u.slab.fences[num_idle], NULL);
     }
     memmove(&bo->u.slab.fences[0], &bo->u.slab.fences[num_idle],
             (bo->u.slab.num_fences - num_idle) * sizeof(bo->u.slab.fences[0]));
     bo->u.slab.num_fences -= num_idle;
-    pipe_mutex_unlock(bo->rws->bo_fence_lock);
+    mtx_unlock(&bo->rws->bo_fence_lock);
 
     return busy;
 }
 
 static void radeon_real_bo_wait_idle(struct radeon_bo *bo)
 {
     struct drm_radeon_gem_wait_idle args = {0};
 
     args.handle = bo->handle;
     while (drmCommandWrite(bo->rws->fd, DRM_RADEON_GEM_WAIT_IDLE,
@@ -104,35 +104,35 @@ static void radeon_real_bo_wait_idle(struct radeon_bo *bo)
 
 static void radeon_bo_wait_idle(struct radeon_bo *bo)
 {
     if (bo->handle) {
         radeon_real_bo_wait_idle(bo);
     } else {
         mtx_lock(&bo->rws->bo_fence_lock);
         while (bo->u.slab.num_fences) {
             struct radeon_bo *fence = NULL;
             radeon_bo_reference(&fence, bo->u.slab.fences[0]);
-            pipe_mutex_unlock(bo->rws->bo_fence_lock);
+            mtx_unlock(&bo->rws->bo_fence_lock);
 
             /* Wait without holding the fence lock. */
             radeon_real_bo_wait_idle(fence);
 
             mtx_lock(&bo->rws->bo_fence_lock);
             if (bo->u.slab.num_fences && fence == bo->u.slab.fences[0]) {
                 radeon_bo_reference(&bo->u.slab.fences[0], NULL);
                 memmove(&bo->u.slab.fences[0], &bo->u.slab.fences[1],
                         (bo->u.slab.num_fences - 1) * sizeof(bo->u.slab.fences[0]));
                 bo->u.slab.num_fences--;
             }
             radeon_bo_reference(&fence, NULL);
         }
-        pipe_mutex_unlock(bo->rws->bo_fence_lock);
+        mtx_unlock(&bo->rws->bo_fence_lock);
     }
 }
 
 static bool radeon_bo_wait(struct pb_buffer *_buf, uint64_t timeout,
                            enum radeon_bo_usage usage)
 {
     struct radeon_bo *bo = radeon_bo(_buf);
     int64_t abs_timeout;
 
     /* No timeout. Just query. */
@@ -211,54 +211,54 @@ static uint64_t radeon_bomgr_find_va(struct radeon_drm_winsys *rws,
         waste = offset % alignment;
         waste = waste ? alignment - waste : 0;
         offset += waste;
         if (offset >= (hole->offset + hole->size)) {
             continue;
         }
         if (!waste && hole->size == size) {
             offset = hole->offset;
             list_del(&hole->list);
             FREE(hole);
-            pipe_mutex_unlock(rws->bo_va_mutex);
+            mtx_unlock(&rws->bo_va_mutex);
             return offset;
         }
         if ((hole->size - waste) > size) {
             if (waste) {
                 n = CALLOC_STRUCT(radeon_bo_va_hole);
                 n->size = waste;
                 n->offset = hole->offset;
                 list_add(&n->list, &hole->list);
             }
             hole->size -= (size + waste);
             hole->offset += size + waste;
-            pipe_mutex_unlock(rws->bo_va_mutex);
+            mtx_unlock(&rws->bo_va_mutex);
             return offset;
         }
         if ((hole->size - waste) == size) {
             hole->size = waste;
-            pipe_mutex_unlock(rws->bo_va_mutex);
+            mtx_unlock(&rws->bo_va_mutex);
             return offset;
         }
     }
 
     offset = rws->va_offset;
     waste = offset % alignment;
     waste = waste ? alignment - waste : 0;
     if (waste) {
         n = CALLOC_STRUCT(radeon_bo_va_hole);
         n->size = waste;
         n->offset = offset;
         list_add(&n->list, &rws->va_holes);
     }
     offset += waste;
     rws->va_offset += size + waste;
-    pipe_mutex_unlock(rws->bo_va_mutex);
+    mtx_unlock(&rws->bo_va_mutex);
     return offset;
 }
 
 static void radeon_bomgr_free_va(struct radeon_drm_winsys *rws,
                                  uint64_t va, uint64_t size)
 {
     struct radeon_bo_va_hole *hole;
 
     size = align(size, rws->info.gart_page_size);
 
@@ -311,40 +311,40 @@ static void radeon_bomgr_free_va(struct radeon_drm_winsys *rws,
          * maybe print a warning
          */
         next = CALLOC_STRUCT(radeon_bo_va_hole);
         if (next) {
             next->size = size;
             next->offset = va;
             list_add(&next->list, &hole->list);
         }
     }
 out:
-    pipe_mutex_unlock(rws->bo_va_mutex);
+    mtx_unlock(&rws->bo_va_mutex);
 }
 
 void radeon_bo_destroy(struct pb_buffer *_buf)
 {
     struct radeon_bo *bo = radeon_bo(_buf);
     struct radeon_drm_winsys *rws = bo->rws;
     struct drm_gem_close args;
 
     assert(bo->handle && "must not be called for slab entries");
 
     memset(&args, 0, sizeof(args));
 
     mtx_lock(&rws->bo_handles_mutex);
     util_hash_table_remove(rws->bo_handles, (void*)(uintptr_t)bo->handle);
     if (bo->flink_name) {
         util_hash_table_remove(rws->bo_names,
                                (void*)(uintptr_t)bo->flink_name);
     }
-    pipe_mutex_unlock(rws->bo_handles_mutex);
+    mtx_unlock(&rws->bo_handles_mutex);
 
     if (bo->u.real.ptr)
         os_munmap(bo->u.real.ptr, bo->base.size);
 
     if (rws->info.has_virtual_memory) {
         if (rws->va_unmap_working) {
             struct drm_radeon_gem_va va;
 
             va.handle = bo->handle;
             va.vm_id = 0;
@@ -415,60 +415,60 @@ void *radeon_bo_do_map(struct radeon_bo *bo)
     } else {
         offset = bo->va - bo->u.slab.real->va;
         bo = bo->u.slab.real;
     }
 
     /* Map the buffer. */
     mtx_lock(&bo->u.real.map_mutex);
     /* Return the pointer if it's already mapped. */
     if (bo->u.real.ptr) {
         bo->u.real.map_count++;
-        pipe_mutex_unlock(bo->u.real.map_mutex);
+        mtx_unlock(&bo->u.real.map_mutex);
         return (uint8_t*)bo->u.real.ptr + offset;
     }
     args.handle = bo->handle;
     args.offset = 0;
     args.size = (uint64_t)bo->base.size;
     if (drmCommandWriteRead(bo->rws->fd,
                             DRM_RADEON_GEM_MMAP,
                             &args,
                             sizeof(args))) {
-        pipe_mutex_unlock(bo->u.real.map_mutex);
+        mtx_unlock(&bo->u.real.map_mutex);
         fprintf(stderr, "radeon: gem_mmap failed: %p 0x%08X\n",
                 bo, bo->handle);
         return NULL;
     }
 
     ptr = os_mmap(0, args.size, PROT_READ|PROT_WRITE, MAP_SHARED,
                bo->rws->fd, args.addr_ptr);
     if (ptr == MAP_FAILED) {
         /* Clear the cache and try again. */
         pb_cache_release_all_buffers(&bo->rws->bo_cache);
 
         ptr = os_mmap(0, args.size, PROT_READ|PROT_WRITE, MAP_SHARED,
                       bo->rws->fd, args.addr_ptr);
         if (ptr == MAP_FAILED) {
-            pipe_mutex_unlock(bo->u.real.map_mutex);
+            mtx_unlock(&bo->u.real.map_mutex);
             fprintf(stderr, "radeon: mmap failed, errno: %i\n", errno);
             return NULL;
         }
     }
     bo->u.real.ptr = ptr;
     bo->u.real.map_count = 1;
 
     if (bo->initial_domain & RADEON_DOMAIN_VRAM)
        bo->rws->mapped_vram += bo->base.size;
     else
        bo->rws->mapped_gtt += bo->base.size;
     bo->rws->num_mapped_buffers++;
 
-    pipe_mutex_unlock(bo->u.real.map_mutex);
+    mtx_unlock(&bo->u.real.map_mutex);
     return (uint8_t*)bo->u.real.ptr + offset;
 }
 
 static void *radeon_bo_map(struct pb_buffer *buf,
                            struct radeon_winsys_cs *rcs,
                            enum pipe_transfer_usage usage)
 {
     struct radeon_bo *bo = (struct radeon_bo*)buf;
     struct radeon_drm_cs *cs = (struct radeon_drm_cs*)rcs;
 
@@ -548,40 +548,40 @@ static void radeon_bo_unmap(struct pb_buffer *_buf)
     struct radeon_bo *bo = (struct radeon_bo*)_buf;
 
     if (bo->user_ptr)
         return;
 
     if (!bo->handle)
         bo = bo->u.slab.real;
 
     mtx_lock(&bo->u.real.map_mutex);
     if (!bo->u.real.ptr) {
-        pipe_mutex_unlock(bo->u.real.map_mutex);
+        mtx_unlock(&bo->u.real.map_mutex);
         return; /* it's not been mapped */
     }
 
     assert(bo->u.real.map_count);
     if (--bo->u.real.map_count) {
-        pipe_mutex_unlock(bo->u.real.map_mutex);
+        mtx_unlock(&bo->u.real.map_mutex);
         return; /* it's been mapped multiple times */
     }
 
     os_munmap(bo->u.real.ptr, bo->base.size);
     bo->u.real.ptr = NULL;
 
     if (bo->initial_domain & RADEON_DOMAIN_VRAM)
        bo->rws->mapped_vram -= bo->base.size;
     else
        bo->rws->mapped_gtt -= bo->base.size;
     bo->rws->num_mapped_buffers--;
 
-    pipe_mutex_unlock(bo->u.real.map_mutex);
+    mtx_unlock(&bo->u.real.map_mutex);
 }
 
 static const struct pb_vtbl radeon_bo_vtbl = {
     radeon_bo_destroy_or_cache
     /* other functions are never called */
 };
 
 static struct radeon_bo *radeon_create_bo(struct radeon_drm_winsys *rws,
                                           unsigned size, unsigned alignment,
                                           unsigned usage,
@@ -664,27 +664,27 @@ static struct radeon_bo *radeon_create_bo(struct radeon_drm_winsys *rws,
             fprintf(stderr, "radeon:    va        : 0x%016llx\n", (unsigned long long)bo->va);
             radeon_bo_destroy(&bo->base);
             return NULL;
         }
         mtx_lock(&rws->bo_handles_mutex);
         if (va.operation == RADEON_VA_RESULT_VA_EXIST) {
             struct pb_buffer *b = &bo->base;
             struct radeon_bo *old_bo =
                 util_hash_table_get(rws->bo_vas, (void*)(uintptr_t)va.offset);
 
-            pipe_mutex_unlock(rws->bo_handles_mutex);
+            mtx_unlock(&rws->bo_handles_mutex);
             pb_reference(&b, &old_bo->base);
             return radeon_bo(b);
         }
 
         util_hash_table_set(rws->bo_vas, (void*)(uintptr_t)bo->va, bo);
-        pipe_mutex_unlock(rws->bo_handles_mutex);
+        mtx_unlock(&rws->bo_handles_mutex);
     }
 
     if (initial_domains & RADEON_DOMAIN_VRAM)
         rws->allocated_vram += align(size, rws->info.gart_page_size);
     else if (initial_domains & RADEON_DOMAIN_GTT)
         rws->allocated_gtt += align(size, rws->info.gart_page_size);
 
     return bo;
 }
 
@@ -1025,21 +1025,21 @@ no_slab:
         bo = radeon_create_bo(ws, size, alignment, usage, domain, flags,
                               pb_cache_bucket);
         if (!bo)
             return NULL;
     }
 
     bo->u.real.use_reusable_pool = true;
 
     mtx_lock(&ws->bo_handles_mutex);
     util_hash_table_set(ws->bo_handles, (void*)(uintptr_t)bo->handle, bo);
-    pipe_mutex_unlock(ws->bo_handles_mutex);
+    mtx_unlock(&ws->bo_handles_mutex);
 
     return &bo->base;
 }
 
 static struct pb_buffer *radeon_winsys_bo_from_ptr(struct radeon_winsys *rws,
                                                    void *pointer, uint64_t size)
 {
     struct radeon_drm_winsys *ws = radeon_drm_winsys(rws);
     struct drm_radeon_gem_userptr args;
     struct radeon_bo *bo;
@@ -1073,21 +1073,21 @@ static struct pb_buffer *radeon_winsys_bo_from_ptr(struct radeon_winsys *rws,
     bo->base.vtbl = &radeon_bo_vtbl;
     bo->rws = ws;
     bo->user_ptr = pointer;
     bo->va = 0;
     bo->initial_domain = RADEON_DOMAIN_GTT;
     bo->hash = __sync_fetch_and_add(&ws->next_bo_hash, 1);
     (void) mtx_init(&bo->u.real.map_mutex, mtx_plain);
 
     util_hash_table_set(ws->bo_handles, (void*)(uintptr_t)bo->handle, bo);
 
-    pipe_mutex_unlock(ws->bo_handles_mutex);
+    mtx_unlock(&ws->bo_handles_mutex);
 
     if (ws->info.has_virtual_memory) {
         struct drm_radeon_gem_va va;
 
         bo->va = radeon_bomgr_find_va(ws, bo->base.size, 1 << 20);
 
         va.handle = bo->handle;
         va.operation = RADEON_VA_MAP;
         va.vm_id = 0;
         va.offset = bo->va;
@@ -1100,27 +1100,27 @@ static struct pb_buffer *radeon_winsys_bo_from_ptr(struct radeon_winsys *rws,
             fprintf(stderr, "radeon: Failed to assign virtual address space\n");
             radeon_bo_destroy(&bo->base);
             return NULL;
         }
         mtx_lock(&ws->bo_handles_mutex);
         if (va.operation == RADEON_VA_RESULT_VA_EXIST) {
             struct pb_buffer *b = &bo->base;
             struct radeon_bo *old_bo =
                 util_hash_table_get(ws->bo_vas, (void*)(uintptr_t)va.offset);
 
-            pipe_mutex_unlock(ws->bo_handles_mutex);
+            mtx_unlock(&ws->bo_handles_mutex);
             pb_reference(&b, &old_bo->base);
             return b;
         }
 
         util_hash_table_set(ws->bo_vas, (void*)(uintptr_t)bo->va, bo);
-        pipe_mutex_unlock(ws->bo_handles_mutex);
+        mtx_unlock(&ws->bo_handles_mutex);
     }
 
     ws->allocated_gtt += align(bo->base.size, ws->info.gart_page_size);
 
     return (struct pb_buffer*)bo;
 }
 
 static struct pb_buffer *radeon_winsys_bo_from_handle(struct radeon_winsys *rws,
                                                       struct winsys_handle *whandle,
                                                       unsigned *stride,
@@ -1211,21 +1211,21 @@ static struct pb_buffer *radeon_winsys_bo_from_handle(struct radeon_winsys *rws,
     bo->va = 0;
     bo->hash = __sync_fetch_and_add(&ws->next_bo_hash, 1);
     (void) mtx_init(&bo->u.real.map_mutex, mtx_plain);
 
     if (bo->flink_name)
         util_hash_table_set(ws->bo_names, (void*)(uintptr_t)bo->flink_name, bo);
 
     util_hash_table_set(ws->bo_handles, (void*)(uintptr_t)bo->handle, bo);
 
 done:
-    pipe_mutex_unlock(ws->bo_handles_mutex);
+    mtx_unlock(&ws->bo_handles_mutex);
 
     if (stride)
         *stride = whandle->stride;
     if (offset)
         *offset = whandle->offset;
 
     if (ws->info.has_virtual_memory && !bo->va) {
         struct drm_radeon_gem_va va;
 
         bo->va = radeon_bomgr_find_va(ws, bo->base.size, 1 << 20);
@@ -1243,40 +1243,40 @@ done:
             fprintf(stderr, "radeon: Failed to assign virtual address space\n");
             radeon_bo_destroy(&bo->base);
             return NULL;
         }
         mtx_lock(&ws->bo_handles_mutex);
         if (va.operation == RADEON_VA_RESULT_VA_EXIST) {
             struct pb_buffer *b = &bo->base;
             struct radeon_bo *old_bo =
                 util_hash_table_get(ws->bo_vas, (void*)(uintptr_t)va.offset);
 
-            pipe_mutex_unlock(ws->bo_handles_mutex);
+            mtx_unlock(&ws->bo_handles_mutex);
             pb_reference(&b, &old_bo->base);
             return b;
         }
 
         util_hash_table_set(ws->bo_vas, (void*)(uintptr_t)bo->va, bo);
-        pipe_mutex_unlock(ws->bo_handles_mutex);
+        mtx_unlock(&ws->bo_handles_mutex);
     }
 
     bo->initial_domain = radeon_bo_get_initial_domain((void*)bo);
 
     if (bo->initial_domain & RADEON_DOMAIN_VRAM)
         ws->allocated_vram += align(bo->base.size, ws->info.gart_page_size);
     else if (bo->initial_domain & RADEON_DOMAIN_GTT)
         ws->allocated_gtt += align(bo->base.size, ws->info.gart_page_size);
 
     return (struct pb_buffer*)bo;
 
 fail:
-    pipe_mutex_unlock(ws->bo_handles_mutex);
+    mtx_unlock(&ws->bo_handles_mutex);
     return NULL;
 }
 
 static bool radeon_winsys_bo_get_handle(struct pb_buffer *buffer,
                                         unsigned stride, unsigned offset,
                                         unsigned slice_size,
                                         struct winsys_handle *whandle)
 {
     struct drm_gem_flink flink;
     struct radeon_bo *bo = radeon_bo(buffer);
@@ -1296,21 +1296,21 @@ static bool radeon_winsys_bo_get_handle(struct pb_buffer *buffer,
             flink.handle = bo->handle;
 
             if (ioctl(ws->fd, DRM_IOCTL_GEM_FLINK, &flink)) {
                 return false;
             }
 
             bo->flink_name = flink.name;
 
             mtx_lock(&ws->bo_handles_mutex);
             util_hash_table_set(ws->bo_names, (void*)(uintptr_t)bo->flink_name, bo);
-            pipe_mutex_unlock(ws->bo_handles_mutex);
+            mtx_unlock(&ws->bo_handles_mutex);
         }
         whandle->handle = bo->flink_name;
     } else if (whandle->type == DRM_API_HANDLE_TYPE_KMS) {
         whandle->handle = bo->handle;
     } else if (whandle->type == DRM_API_HANDLE_TYPE_FD) {
         if (drmPrimeHandleToFD(ws->fd, bo->handle, DRM_CLOEXEC, (int*)&whandle->handle))
             return false;
     }
 
     whandle->stride = stride;
diff --git a/src/gallium/winsys/radeon/drm/radeon_drm_cs.c b/src/gallium/winsys/radeon/drm/radeon_drm_cs.c
index 3f615f8..d431bfc 100644
--- a/src/gallium/winsys/radeon/drm/radeon_drm_cs.c
+++ b/src/gallium/winsys/radeon/drm/radeon_drm_cs.c
@@ -595,21 +595,21 @@ static int radeon_drm_cs_flush(struct radeon_winsys_cs *rcs,
 
         if (pfence)
             radeon_fence_reference(pfence, fence);
 
         mtx_lock(&cs->ws->bo_fence_lock);
         for (unsigned i = 0; i < cs->csc->num_slab_buffers; ++i) {
             struct radeon_bo *bo = cs->csc->slab_buffers[i].bo;
             p_atomic_inc(&bo->num_active_ioctls);
             radeon_bo_slab_fence(bo, (struct radeon_bo *)fence);
         }
-        pipe_mutex_unlock(cs->ws->bo_fence_lock);
+        mtx_unlock(&cs->ws->bo_fence_lock);
 
         radeon_fence_reference(&fence, NULL);
     } else {
         radeon_fence_reference(&cs->next_fence, NULL);
     }
 
     radeon_drm_cs_sync_flush(rcs);
 
     /* Swap command streams. */
     tmp = cs->csc;
diff --git a/src/gallium/winsys/radeon/drm/radeon_drm_winsys.c b/src/gallium/winsys/radeon/drm/radeon_drm_winsys.c
index 562d15e..2e7bfe9 100644
--- a/src/gallium/winsys/radeon/drm/radeon_drm_winsys.c
+++ b/src/gallium/winsys/radeon/drm/radeon_drm_winsys.c
@@ -64,51 +64,51 @@ static bool radeon_set_fd_access(struct radeon_drm_cs *applier,
     struct drm_radeon_info info;
     unsigned value = enable ? 1 : 0;
 
     memset(&info, 0, sizeof(info));
 
     mtx_lock(&*mutex);
 
     /* Early exit if we are sure the request will fail. */
     if (enable) {
         if (*owner) {
-            pipe_mutex_unlock(*mutex);
+            mtx_unlock(&*mutex);
             return false;
         }
     } else {
         if (*owner != applier) {
-            pipe_mutex_unlock(*mutex);
+            mtx_unlock(&*mutex);
             return false;
         }
     }
 
     /* Pass through the request to the kernel. */
     info.value = (unsigned long)&value;
     info.request = request;
     if (drmCommandWriteRead(applier->ws->fd, DRM_RADEON_INFO,
                             &info, sizeof(info)) != 0) {
-        pipe_mutex_unlock(*mutex);
+        mtx_unlock(&*mutex);
         return false;
     }
 
     /* Update the rights in the winsys. */
     if (enable) {
         if (value) {
             *owner = applier;
-            pipe_mutex_unlock(*mutex);
+            mtx_unlock(&*mutex);
             return true;
         }
     } else {
         *owner = NULL;
     }
 
-    pipe_mutex_unlock(*mutex);
+    mtx_unlock(&*mutex);
     return false;
 }
 
 static bool radeon_get_drm_value(int fd, unsigned request,
                                  const char *errname, uint32_t *out)
 {
     struct drm_radeon_info info;
     int retval;
 
     memset(&info, 0, sizeof(info));
@@ -708,21 +708,21 @@ static bool radeon_winsys_unref(struct radeon_winsys *ws)
     /* When the reference counter drops to zero, remove the fd from the table.
      * This must happen while the mutex is locked, so that
      * radeon_drm_winsys_create in another thread doesn't get the winsys
      * from the table when the counter drops to 0. */
     mtx_lock(&fd_tab_mutex);
 
     destroy = pipe_reference(&rws->reference, NULL);
     if (destroy && fd_tab)
         util_hash_table_remove(fd_tab, intptr_to_pointer(rws->fd));
 
-    pipe_mutex_unlock(fd_tab_mutex);
+    mtx_unlock(&fd_tab_mutex);
     return destroy;
 }
 
 #define PTR_TO_UINT(x) ((unsigned)((intptr_t)(x)))
 
 static unsigned handle_hash(void *key)
 {
     return PTR_TO_UINT(key);
 }
 
@@ -737,27 +737,27 @@ radeon_drm_winsys_create(int fd, radeon_screen_create_t screen_create)
     struct radeon_drm_winsys *ws;
 
     mtx_lock(&fd_tab_mutex);
     if (!fd_tab) {
         fd_tab = util_hash_table_create(hash_fd, compare_fd);
     }
 
     ws = util_hash_table_get(fd_tab, intptr_to_pointer(fd));
     if (ws) {
         pipe_reference(NULL, &ws->reference);
-        pipe_mutex_unlock(fd_tab_mutex);
+        mtx_unlock(&fd_tab_mutex);
         return &ws->base;
     }
 
     ws = CALLOC_STRUCT(radeon_drm_winsys);
     if (!ws) {
-        pipe_mutex_unlock(fd_tab_mutex);
+        mtx_unlock(&fd_tab_mutex);
         return NULL;
     }
 
     ws->fd = fcntl(fd, F_DUPFD_CLOEXEC, 3);
 
     if (!do_winsys_init(ws))
         goto fail1;
 
     pb_cache_init(&ws->bo_cache, 500000, ws->check_vm ? 1.0f : 2.0f, 0,
                   MIN2(ws->info.vram_size, ws->info.gart_size),
@@ -823,38 +823,38 @@ radeon_drm_winsys_create(int fd, radeon_screen_create_t screen_create)
         util_queue_init(&ws->cs_queue, "radeon_cs", 8, 1);
 
     /* Create the screen at the end. The winsys must be initialized
      * completely.
      *
      * Alternatively, we could create the screen based on "ws->gen"
      * and link all drivers into one binary blob. */
     ws->base.screen = screen_create(&ws->base);
     if (!ws->base.screen) {
         radeon_winsys_destroy(&ws->base);
-        pipe_mutex_unlock(fd_tab_mutex);
+        mtx_unlock(&fd_tab_mutex);
         return NULL;
     }
 
     util_hash_table_set(fd_tab, intptr_to_pointer(ws->fd), ws);
 
     /* We must unlock the mutex once the winsys is fully initialized, so that
      * other threads attempting to create the winsys from the same fd will
      * get a fully initialized winsys and not just half-way initialized. */
-    pipe_mutex_unlock(fd_tab_mutex);
+    mtx_unlock(&fd_tab_mutex);
 
     return &ws->base;
 
 fail_slab:
     if (ws->info.has_virtual_memory)
         pb_slabs_deinit(&ws->bo_slabs);
 fail_cache:
     pb_cache_deinit(&ws->bo_cache);
 fail1:
-    pipe_mutex_unlock(fd_tab_mutex);
+    mtx_unlock(&fd_tab_mutex);
     if (ws->surf_man)
         radeon_surface_manager_free(ws->surf_man);
     if (ws->fd >= 0)
         close(ws->fd);
 
     FREE(ws);
     return NULL;
 }
diff --git a/src/gallium/winsys/svga/drm/pb_buffer_simple_fenced.c b/src/gallium/winsys/svga/drm/pb_buffer_simple_fenced.c
index 85d2afc..f7211c2 100644
--- a/src/gallium/winsys/svga/drm/pb_buffer_simple_fenced.c
+++ b/src/gallium/winsys/svga/drm/pb_buffer_simple_fenced.c
@@ -304,21 +304,21 @@ fenced_buffer_finish_locked(struct fenced_manager *fenced_mgr,
    assert(pipe_is_referenced(&fenced_buf->base.reference));
    assert(fenced_buf->fence);
 
    if(fenced_buf->fence) {
       struct pipe_fence_handle *fence = NULL;
       int finished;
       boolean proceed;
 
       ops->fence_reference(ops, &fence, fenced_buf->fence);
 
-      pipe_mutex_unlock(fenced_mgr->mutex);
+      mtx_unlock(&fenced_mgr->mutex);
 
       finished = ops->fence_finish(ops, fenced_buf->fence, 0);
 
       mtx_lock(&fenced_mgr->mutex);
 
       assert(pipe_is_referenced(&fenced_buf->base.reference));
 
       /*
        * Only proceed if the fence object didn't change in the meanwhile.
        * Otherwise assume the work has been already carried out by another
@@ -505,21 +505,21 @@ fenced_buffer_destroy(struct pb_buffer *buf)
 {
    struct fenced_buffer *fenced_buf = fenced_buffer(buf);
    struct fenced_manager *fenced_mgr = fenced_buf->mgr;
 
    assert(!pipe_is_referenced(&fenced_buf->base.reference));
 
    mtx_lock(&fenced_mgr->mutex);
 
    fenced_buffer_destroy_locked(fenced_mgr, fenced_buf);
 
-   pipe_mutex_unlock(fenced_mgr->mutex);
+   mtx_unlock(&fenced_mgr->mutex);
 }
 
 
 static void *
 fenced_buffer_map(struct pb_buffer *buf,
                   unsigned flags, void *flush_ctx)
 {
    struct fenced_buffer *fenced_buf = fenced_buffer(buf);
    struct fenced_manager *fenced_mgr = fenced_buf->mgr;
    struct pb_fence_ops *ops = fenced_mgr->ops;
@@ -557,21 +557,21 @@ fenced_buffer_map(struct pb_buffer *buf,
    }
 
    map = pb_map(fenced_buf->buffer, flags, flush_ctx);
 
    if(map) {
       ++fenced_buf->mapcount;
       fenced_buf->flags |= flags & PB_USAGE_CPU_READ_WRITE;
    }
 
 done:
-   pipe_mutex_unlock(fenced_mgr->mutex);
+   mtx_unlock(&fenced_mgr->mutex);
 
    return map;
 }
 
 
 static void
 fenced_buffer_unmap(struct pb_buffer *buf)
 {
    struct fenced_buffer *fenced_buf = fenced_buffer(buf);
    struct fenced_manager *fenced_mgr = fenced_buf->mgr;
@@ -580,21 +580,21 @@ fenced_buffer_unmap(struct pb_buffer *buf)
 
    assert(fenced_buf->mapcount);
    if(fenced_buf->mapcount) {
       if (fenced_buf->buffer)
          pb_unmap(fenced_buf->buffer);
       --fenced_buf->mapcount;
       if(!fenced_buf->mapcount)
          fenced_buf->flags &= ~PB_USAGE_CPU_READ_WRITE;
    }
 
-   pipe_mutex_unlock(fenced_mgr->mutex);
+   mtx_unlock(&fenced_mgr->mutex);
 }
 
 
 static enum pipe_error
 fenced_buffer_validate(struct pb_buffer *buf,
                        struct pb_validate *vl,
                        unsigned flags)
 {
    struct fenced_buffer *fenced_buf = fenced_buffer(buf);
    struct fenced_manager *fenced_mgr = fenced_buf->mgr;
@@ -628,21 +628,21 @@ fenced_buffer_validate(struct pb_buffer *buf,
    }
 
    ret = pb_validate(fenced_buf->buffer, vl, flags);
    if (ret != PIPE_OK)
       goto done;
 
    fenced_buf->vl = vl;
    fenced_buf->validation_flags |= flags;
 
 done:
-   pipe_mutex_unlock(fenced_mgr->mutex);
+   mtx_unlock(&fenced_mgr->mutex);
 
    return ret;
 }
 
 
 static void
 fenced_buffer_fence(struct pb_buffer *buf,
                     struct pipe_fence_handle *fence)
 {
    struct fenced_buffer *fenced_buf = fenced_buffer(buf);
@@ -669,21 +669,21 @@ fenced_buffer_fence(struct pb_buffer *buf,
          fenced_buf->flags |= fenced_buf->validation_flags;
          fenced_buffer_add_locked(fenced_mgr, fenced_buf);
       }
 
       pb_fence(fenced_buf->buffer, fence);
 
       fenced_buf->vl = NULL;
       fenced_buf->validation_flags = 0;
    }
 
-   pipe_mutex_unlock(fenced_mgr->mutex);
+   mtx_unlock(&fenced_mgr->mutex);
 }
 
 
 static void
 fenced_buffer_get_base_buffer(struct pb_buffer *buf,
                               struct pb_buffer **base_buf,
                               pb_size *offset)
 {
    struct fenced_buffer *fenced_buf = fenced_buffer(buf);
    struct fenced_manager *fenced_mgr = fenced_buf->mgr;
@@ -692,21 +692,21 @@ fenced_buffer_get_base_buffer(struct pb_buffer *buf,
 
    assert(fenced_buf->buffer);
 
    if(fenced_buf->buffer)
       pb_get_base_buffer(fenced_buf->buffer, base_buf, offset);
    else {
       *base_buf = buf;
       *offset = 0;
    }
 
-   pipe_mutex_unlock(fenced_mgr->mutex);
+   mtx_unlock(&fenced_mgr->mutex);
 }
 
 
 static const struct pb_vtbl
 fenced_buffer_vtbl = {
       fenced_buffer_destroy,
       fenced_buffer_map,
       fenced_buffer_unmap,
       fenced_buffer_validate,
       fenced_buffer_fence,
@@ -751,71 +751,71 @@ fenced_bufmgr_create_buffer(struct pb_manager *mgr,
     * Give up.
     */
    if(ret != PIPE_OK) {
       goto no_storage;
    }
 
    assert(fenced_buf->buffer);
 
    LIST_ADDTAIL(&fenced_buf->head, &fenced_mgr->unfenced);
    ++fenced_mgr->num_unfenced;
-   pipe_mutex_unlock(fenced_mgr->mutex);
+   mtx_unlock(&fenced_mgr->mutex);
 
    return &fenced_buf->base;
 
 no_storage:
-   pipe_mutex_unlock(fenced_mgr->mutex);
+   mtx_unlock(&fenced_mgr->mutex);
    FREE(fenced_buf);
 no_buffer:
    return NULL;
 }
 
 
 static void
 fenced_bufmgr_flush(struct pb_manager *mgr)
 {
    struct fenced_manager *fenced_mgr = fenced_manager(mgr);
 
    mtx_lock(&fenced_mgr->mutex);
    while(fenced_manager_check_signalled_locked(fenced_mgr, TRUE))
       ;
-   pipe_mutex_unlock(fenced_mgr->mutex);
+   mtx_unlock(&fenced_mgr->mutex);
 
    assert(fenced_mgr->provider->flush);
    if(fenced_mgr->provider->flush)
       fenced_mgr->provider->flush(fenced_mgr->provider);
 }
 
 
 static void
 fenced_bufmgr_destroy(struct pb_manager *mgr)
 {
    struct fenced_manager *fenced_mgr = fenced_manager(mgr);
 
    mtx_lock(&fenced_mgr->mutex);
 
    /* Wait on outstanding fences */
    while (fenced_mgr->num_fenced) {
-      pipe_mutex_unlock(fenced_mgr->mutex);
+      mtx_unlock(&fenced_mgr->mutex);
 #if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS)
       sched_yield();
 #endif
       mtx_lock(&fenced_mgr->mutex);
       while(fenced_manager_check_signalled_locked(fenced_mgr, TRUE))
          ;
    }
 
 #ifdef DEBUG
    /*assert(!fenced_mgr->num_unfenced);*/
 #endif
 
-   pipe_mutex_unlock(fenced_mgr->mutex);
+   mtx_unlock(&fenced_mgr->mutex);
    mtx_destroy(&fenced_mgr->mutex);
 
    FREE(fenced_mgr);
 }
 
 
 struct pb_manager *
 simple_fenced_bufmgr_create(struct pb_manager *provider,
                             struct pb_fence_ops *ops)
 {
diff --git a/src/gallium/winsys/svga/drm/vmw_context.c b/src/gallium/winsys/svga/drm/vmw_context.c
index a5dd66f..002994e 100644
--- a/src/gallium/winsys/svga/drm/vmw_context.c
+++ b/src/gallium/winsys/svga/drm/vmw_context.c
@@ -526,21 +526,21 @@ vmw_swc_surface_relocation(struct svga_winsys_context *swc,
 
       /*
        * Make sure backup buffer ends up fenced.
        */
 
       mtx_lock(&vsurf->mutex);
       assert(vsurf->buf != NULL);
       
       vmw_swc_mob_relocation(swc, mobid, NULL, (struct svga_winsys_buffer *)
                              vsurf->buf, 0, flags);
-      pipe_mutex_unlock(vsurf->mutex);
+      mtx_unlock(&vsurf->mutex);
    }
 }
 
 static void
 vmw_swc_shader_relocation(struct svga_winsys_context *swc,
 			  uint32 *shid,
 			  uint32 *mobid,
 			  uint32 *offset,
 			  struct svga_winsys_gb_shader *shader,
                           unsigned flags)
diff --git a/src/gallium/winsys/svga/drm/vmw_fence.c b/src/gallium/winsys/svga/drm/vmw_fence.c
index 23713fc..edf205e 100644
--- a/src/gallium/winsys/svga/drm/vmw_fence.c
+++ b/src/gallium/winsys/svga/drm/vmw_fence.c
@@ -97,21 +97,21 @@ vmw_fence_ops(struct pb_fence_ops *ops)
  *
  */
 static void
 vmw_fences_release(struct vmw_fence_ops *ops)
 {
    struct vmw_fence *fence, *n;
 
    mtx_lock(&ops->mutex);
    LIST_FOR_EACH_ENTRY_SAFE(fence, n, &ops->not_signaled, ops_list)
       LIST_DELINIT(&fence->ops_list);
-   pipe_mutex_unlock(ops->mutex);
+   mtx_unlock(&ops->mutex);
 }
 
 /**
  * vmw_fences_signal - Traverse the not_signaled list and try to
  * signal unsignaled fences.
  *
  * @ops: Pointer to a struct pb_fence_ops.
  * @signaled: Seqno that has signaled.
  * @emitted: Last seqno emitted by the kernel.
  * @has_emitted: Whether we provide the emitted value.
@@ -145,21 +145,21 @@ vmw_fences_signal(struct pb_fence_ops *fence_ops,
       if (!vmw_fence_seq_is_signaled(fence->seqno, signaled, emitted))
          break;
 
       p_atomic_set(&fence->signalled, 1);
       LIST_DELINIT(&fence->ops_list);
    }
    ops->last_signaled = signaled;
    ops->last_emitted = emitted;
 
 out_unlock:
-   pipe_mutex_unlock(ops->mutex);
+   mtx_unlock(&ops->mutex);
 }
 
 
 /**
  * vmw_fence - return the vmw_fence object identified by a
  * struct pipe_fence_handle *
  *
  * @fence: The opaque pipe fence handle.
  */
 static inline struct vmw_fence *
@@ -196,21 +196,21 @@ vmw_fence_create(struct pb_fence_ops *fence_ops, uint32_t handle,
    mtx_lock(&ops->mutex);
 
    if (vmw_fence_seq_is_signaled(seqno, ops->last_signaled, seqno)) {
       p_atomic_set(&fence->signalled, 1);
       LIST_INITHEAD(&fence->ops_list);
    } else {
       p_atomic_set(&fence->signalled, 0);
       LIST_ADDTAIL(&fence->ops_list, &ops->not_signaled);
    }
 
-   pipe_mutex_unlock(ops->mutex);
+   mtx_unlock(&ops->mutex);
 
    return (struct pipe_fence_handle *) fence;
 }
 
 
 /**
  * vmw_fence_reference - Reference / unreference a vmw fence object.
  *
  * @vws: Pointer to the winsys screen.
  * @ptr: Pointer to reference transfer destination.
@@ -224,21 +224,21 @@ vmw_fence_reference(struct vmw_winsys_screen *vws,
    if (*ptr) {
       struct vmw_fence *vfence = vmw_fence(*ptr);
 
       if (p_atomic_dec_zero(&vfence->refcount)) {
          struct vmw_fence_ops *ops = vmw_fence_ops(vws->fence_ops);
 
 	 vmw_ioctl_fence_unref(vws, vfence->handle);
 
          mtx_lock(&ops->mutex);
          LIST_DELINIT(&vfence->ops_list);
-         pipe_mutex_unlock(ops->mutex);
+         mtx_unlock(&ops->mutex);
 
 	 FREE(vfence);
       }
    }
 
    if (fence) {
       struct vmw_fence *vfence = vmw_fence(fence);
 
       p_atomic_inc(&vfence->refcount);
    }
diff --git a/src/gallium/winsys/svga/drm/vmw_surface.c b/src/gallium/winsys/svga/drm/vmw_surface.c
index 460949d..69408ff 100644
--- a/src/gallium/winsys/svga/drm/vmw_surface.c
+++ b/src/gallium/winsys/svga/drm/vmw_surface.c
@@ -147,40 +147,40 @@ vmw_svga_winsys_surface_map(struct svga_winsys_context *swc,
    pb_flags |= (flags & PIPE_TRANSFER_DONTBLOCK);
    data = vmw_svga_winsys_buffer_map(&vws->base, vsrf->buf, pb_flags);
    if (data == NULL)
       goto out_unlock;
 
 out_mapped:
    ++vsrf->mapcount;
    vsrf->data = data;
    vsrf->map_mode = flags & (PIPE_TRANSFER_READ | PIPE_TRANSFER_WRITE);
 out_unlock:
-   pipe_mutex_unlock(vsrf->mutex);
+   mtx_unlock(&vsrf->mutex);
    return data;
 }
 
 
 void
 vmw_svga_winsys_surface_unmap(struct svga_winsys_context *swc,
                               struct svga_winsys_surface *srf,
                               boolean *rebind)
 {
    struct vmw_svga_winsys_surface *vsrf = vmw_svga_winsys_surface(srf);
    mtx_lock(&vsrf->mutex);
    if (--vsrf->mapcount == 0) {
       *rebind = vsrf->rebind;
       vsrf->rebind = FALSE;
       vmw_svga_winsys_buffer_unmap(&vsrf->screen->base, vsrf->buf);
    } else {
       *rebind = FALSE;
    }
-   pipe_mutex_unlock(vsrf->mutex);
+   mtx_unlock(&vsrf->mutex);
 }
 
 void
 vmw_svga_winsys_surface_reference(struct vmw_svga_winsys_surface **pdst,
                                   struct vmw_svga_winsys_surface *src)
 {
    struct pipe_reference *src_ref;
    struct pipe_reference *dst_ref;
    struct vmw_svga_winsys_surface *dst;
 
diff --git a/src/gallium/winsys/virgl/drm/virgl_drm_winsys.c b/src/gallium/winsys/virgl/drm/virgl_drm_winsys.c
index 3986305..36c7512 100644
--- a/src/gallium/winsys/virgl/drm/virgl_drm_winsys.c
+++ b/src/gallium/winsys/virgl/drm/virgl_drm_winsys.c
@@ -50,28 +50,28 @@ static inline boolean can_cache_resource(struct virgl_hw_res *res)
 
 static void virgl_hw_res_destroy(struct virgl_drm_winsys *qdws,
                                  struct virgl_hw_res *res)
 {
       struct drm_gem_close args;
 
       if (res->flinked) {
          mtx_lock(&qdws->bo_handles_mutex);
          util_hash_table_remove(qdws->bo_names,
                                 (void *)(uintptr_t)res->flink);
-         pipe_mutex_unlock(qdws->bo_handles_mutex);
+         mtx_unlock(&qdws->bo_handles_mutex);
       }
 
       if (res->bo_handle) {
          mtx_lock(&qdws->bo_handles_mutex);
          util_hash_table_remove(qdws->bo_handles,
                                 (void *)(uintptr_t)res->bo_handle);
-         pipe_mutex_unlock(qdws->bo_handles_mutex);
+         mtx_unlock(&qdws->bo_handles_mutex);
       }
 
       if (res->ptr)
          os_munmap(res->ptr, res->size);
 
       memset(&args, 0, sizeof(args));
       args.handle = res->bo_handle;
       drmIoctl(qdws->fd, DRM_IOCTL_GEM_CLOSE, &args);
       FREE(res);
 }
@@ -102,21 +102,21 @@ virgl_cache_flush(struct virgl_drm_winsys *qdws)
    curr = qdws->delayed.next;
    next = curr->next;
 
    while (curr != &qdws->delayed) {
       res = LIST_ENTRY(struct virgl_hw_res, curr, head);
       LIST_DEL(&res->head);
       virgl_hw_res_destroy(qdws, res);
       curr = next;
       next = curr->next;
    }
-   pipe_mutex_unlock(qdws->mutex);
+   mtx_unlock(&qdws->mutex);
 }
 static void
 virgl_drm_winsys_destroy(struct virgl_winsys *qws)
 {
    struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
 
    virgl_cache_flush(qdws);
 
    util_hash_table_destroy(qdws->bo_handles);
    util_hash_table_destroy(qdws->bo_names);
@@ -158,21 +158,21 @@ static void virgl_drm_resource_reference(struct virgl_drm_winsys *qdws,
       if (!can_cache_resource(old)) {
          virgl_hw_res_destroy(qdws, old);
       } else {
          mtx_lock(&qdws->mutex);
          virgl_cache_list_check_free(qdws);
 
          old->start = os_time_get();
          old->end = old->start + qdws->usecs;
          LIST_ADDTAIL(&old->head, &qdws->delayed);
          qdws->num_delayed++;
-         pipe_mutex_unlock(qdws->mutex);
+         mtx_unlock(&qdws->mutex);
       }
    }
    *dres = sres;
 }
 
 static struct virgl_hw_res *
 virgl_drm_winsys_resource_create(struct virgl_winsys *qws,
                                  enum pipe_texture_target target,
                                  uint32_t format,
                                  uint32_t bind,
@@ -346,26 +346,26 @@ virgl_drm_winsys_resource_cache_create(struct virgl_winsys *qws,
          if (ret == -1)
             break;
          curr = next;
          next = curr->next;
       }
    }
 
    if (res) {
       LIST_DEL(&res->head);
       --qdws->num_delayed;
-      pipe_mutex_unlock(qdws->mutex);
+      mtx_unlock(&qdws->mutex);
       pipe_reference_init(&res->reference, 1);
       return res;
    }
 
-   pipe_mutex_unlock(qdws->mutex);
+   mtx_unlock(&qdws->mutex);
 
 alloc:
    res = virgl_drm_winsys_resource_create(qws, target, format, bind,
                                            width, height, depth, array_size,
                                            last_level, nr_samples, size);
    if (bind == VIRGL_BIND_CONSTANT_BUFFER || bind == VIRGL_BIND_INDEX_BUFFER ||
        bind == VIRGL_BIND_VERTEX_BUFFER)
       res->cacheable = TRUE;
    return res;
 }
@@ -446,21 +446,21 @@ virgl_drm_winsys_resource_create_handle(struct virgl_winsys *qws,
    res->res_handle = info_arg.res_handle;
 
    res->size = info_arg.size;
    res->stride = info_arg.stride;
    pipe_reference_init(&res->reference, 1);
    res->num_cs_references = 0;
 
    util_hash_table_set(qdws->bo_handles, (void *)(uintptr_t)handle, res);
 
 done:
-   pipe_mutex_unlock(qdws->bo_handles_mutex);
+   mtx_unlock(&qdws->bo_handles_mutex);
    return res;
 }
 
 static boolean virgl_drm_winsys_resource_get_handle(struct virgl_winsys *qws,
                                                     struct virgl_hw_res *res,
                                                     uint32_t stride,
                                                     struct winsys_handle *whandle)
  {
    struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
    struct drm_gem_flink flink;
@@ -474,31 +474,31 @@ static boolean virgl_drm_winsys_resource_get_handle(struct virgl_winsys *qws,
          flink.handle = res->bo_handle;
 
          if (drmIoctl(qdws->fd, DRM_IOCTL_GEM_FLINK, &flink)) {
             return FALSE;
          }
          res->flinked = TRUE;
          res->flink = flink.name;
 
          mtx_lock(&qdws->bo_handles_mutex);
          util_hash_table_set(qdws->bo_names, (void *)(uintptr_t)res->flink, res);
-         pipe_mutex_unlock(qdws->bo_handles_mutex);
+         mtx_unlock(&qdws->bo_handles_mutex);
       }
       whandle->handle = res->flink;
    } else if (whandle->type == DRM_API_HANDLE_TYPE_KMS) {
       whandle->handle = res->bo_handle;
    } else if (whandle->type == DRM_API_HANDLE_TYPE_FD) {
       if (drmPrimeHandleToFD(qdws->fd, res->bo_handle, DRM_CLOEXEC, (int*)&whandle->handle))
             return FALSE;
       mtx_lock(&qdws->bo_handles_mutex);
       util_hash_table_set(qdws->bo_handles, (void *)(uintptr_t)res->bo_handle, res);
-      pipe_mutex_unlock(qdws->bo_handles_mutex);
+      mtx_unlock(&qdws->bo_handles_mutex);
    }
    whandle->stride = stride;
    return TRUE;
 }
 
 static void virgl_drm_winsys_resource_unref(struct virgl_winsys *qws,
                                             struct virgl_hw_res *hres)
 {
    struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
 
@@ -813,21 +813,21 @@ virgl_drm_screen_destroy(struct pipe_screen *pscreen)
 {
    struct virgl_screen *screen = virgl_screen(pscreen);
    boolean destroy;
 
    mtx_lock(&virgl_screen_mutex);
    destroy = --screen->refcnt == 0;
    if (destroy) {
       int fd = virgl_drm_winsys(screen->vws)->fd;
       util_hash_table_remove(fd_tab, intptr_to_pointer(fd));
    }
-   pipe_mutex_unlock(virgl_screen_mutex);
+   mtx_unlock(&virgl_screen_mutex);
 
    if (destroy) {
       pscreen->destroy = screen->winsys_priv;
       pscreen->destroy(pscreen);
    }
 }
 
 static unsigned hash_fd(void *key)
 {
    int fd = pointer_to_intptr(key);
@@ -878,13 +878,13 @@ virgl_drm_screen_create(int fd)
          /* Bit of a hack, to avoid circular linkage dependency,
           * ie. pipe driver having to call in to winsys, we
           * override the pipe drivers screen->destroy():
           */
          virgl_screen(pscreen)->winsys_priv = pscreen->destroy;
          pscreen->destroy = virgl_drm_screen_destroy;
       }
    }
 
 unlock:
-   pipe_mutex_unlock(virgl_screen_mutex);
+   mtx_unlock(&virgl_screen_mutex);
    return pscreen;
 }
diff --git a/src/gallium/winsys/virgl/vtest/virgl_vtest_winsys.c b/src/gallium/winsys/virgl/vtest/virgl_vtest_winsys.c
index 70bd6af..404ba58 100644
--- a/src/gallium/winsys/virgl/vtest/virgl_vtest_winsys.c
+++ b/src/gallium/winsys/virgl/vtest/virgl_vtest_winsys.c
@@ -148,21 +148,21 @@ virgl_cache_flush(struct virgl_vtest_winsys *vtws)
    curr = vtws->delayed.next;
    next = curr->next;
 
    while (curr != &vtws->delayed) {
       res = LIST_ENTRY(struct virgl_hw_res, curr, head);
       LIST_DEL(&res->head);
       virgl_hw_res_destroy(vtws, res);
       curr = next;
       next = curr->next;
    }
-   pipe_mutex_unlock(vtws->mutex);
+   mtx_unlock(&vtws->mutex);
 }
 
 static void
 virgl_cache_list_check_free(struct virgl_vtest_winsys *vtws)
 {
    struct list_head *curr, *next;
    struct virgl_hw_res *res;
    int64_t now;
 
    now = os_time_get();
@@ -189,21 +189,21 @@ static void virgl_vtest_resource_reference(struct virgl_vtest_winsys *vtws,
       if (!can_cache_resource(old)) {
          virgl_hw_res_destroy(vtws, old);
       } else {
          mtx_lock(&vtws->mutex);
          virgl_cache_list_check_free(vtws);
 
          old->start = os_time_get();
          old->end = old->start + vtws->usecs;
          LIST_ADDTAIL(&old->head, &vtws->delayed);
          vtws->num_delayed++;
-         pipe_mutex_unlock(vtws->mutex);
+         mtx_unlock(&vtws->mutex);
       }
    }
    *dres = sres;
 }
 
 static struct virgl_hw_res *
 virgl_vtest_winsys_resource_create(struct virgl_winsys *vws,
                                    enum pipe_texture_target target,
                                    uint32_t format,
                                    uint32_t bind,
@@ -369,26 +369,26 @@ virgl_vtest_winsys_resource_cache_create(struct virgl_winsys *vws,
          if (ret == -1)
             break;
          curr = next;
          next = curr->next;
       }
    }
 
    if (res) {
       LIST_DEL(&res->head);
       --vtws->num_delayed;
-      pipe_mutex_unlock(vtws->mutex);
+      mtx_unlock(&vtws->mutex);
       pipe_reference_init(&res->reference, 1);
       return res;
    }
 
-   pipe_mutex_unlock(vtws->mutex);
+   mtx_unlock(&vtws->mutex);
 
 alloc:
    res = virgl_vtest_winsys_resource_create(vws, target, format, bind,
                                             width, height, depth, array_size,
                                             last_level, nr_samples, size);
    if (bind == VIRGL_BIND_CONSTANT_BUFFER || bind == VIRGL_BIND_INDEX_BUFFER ||
        bind == VIRGL_BIND_VERTEX_BUFFER)
       res->cacheable = TRUE;
    return res;
 }
-- 
2.9.3



More information about the mesa-dev mailing list