[Mesa-dev] [PATCH 5/7] gallium/util: replace pipe_mutex_lock() with mtx_lock()
Timothy Arceri
tarceri at itsqueeze.com
Sun Mar 5 01:32:05 UTC 2017
replace pipe_mutex_lock() was made unnecessary with fd33a6bcd7f12.
Replaced using:
find ./src -type f -exec sed -i -- \
's:pipe_mutex_lock(\([^)]*\)):mtx_lock(\&\1):g' {} \;
---
src/gallium/auxiliary/hud/hud_cpufreq.c | 2 +-
src/gallium/auxiliary/hud/hud_diskstat.c | 2 +-
src/gallium/auxiliary/hud/hud_nic.c | 2 +-
src/gallium/auxiliary/hud/hud_sensors_temp.c | 2 +-
src/gallium/auxiliary/os/os_thread.h | 9 +-
.../auxiliary/pipebuffer/pb_buffer_fenced.c | 22 +-
src/gallium/auxiliary/pipebuffer/pb_bufmgr_debug.c | 14 +-
src/gallium/auxiliary/pipebuffer/pb_bufmgr_mm.c | 6 +-
src/gallium/auxiliary/pipebuffer/pb_bufmgr_pool.c | 8 +-
src/gallium/auxiliary/pipebuffer/pb_bufmgr_slab.c | 4 +-
src/gallium/auxiliary/pipebuffer/pb_cache.c | 6 +-
src/gallium/auxiliary/pipebuffer/pb_slab.c | 8 +-
src/gallium/auxiliary/rtasm/rtasm_execmem.c | 4 +-
src/gallium/auxiliary/util/u_debug_flush.c | 12 +-
src/gallium/auxiliary/util/u_debug_memory.c | 6 +-
src/gallium/auxiliary/util/u_debug_refcnt.c | 4 +-
src/gallium/auxiliary/util/u_debug_symbol.c | 2 +-
src/gallium/auxiliary/util/u_queue.c | 18 +-
src/gallium/auxiliary/util/u_range.h | 2 +-
src/gallium/auxiliary/util/u_ringbuffer.c | 4 +-
src/gallium/drivers/ddebug/dd_context.c | 2 +-
src/gallium/drivers/ddebug/dd_draw.c | 6 +-
src/gallium/drivers/freedreno/freedreno_batch.c | 8 +-
.../drivers/freedreno/freedreno_batch_cache.c | 14 +-
src/gallium/drivers/freedreno/freedreno_context.h | 2 +-
src/gallium/drivers/freedreno/freedreno_draw.c | 4 +-
src/gallium/drivers/freedreno/freedreno_resource.c | 2 +-
src/gallium/drivers/llvmpipe/lp_fence.c | 4 +-
src/gallium/drivers/llvmpipe/lp_scene.c | 2 +-
src/gallium/drivers/llvmpipe/lp_setup.c | 2 +-
src/gallium/drivers/nouveau/nv50/nv50_surface.c | 2 +-
src/gallium/drivers/nouveau/nvc0/nvc0_surface.c | 2 +-
src/gallium/drivers/r300/r300_blit.c | 2 +-
src/gallium/drivers/r300/r300_texture.c | 2 +-
src/gallium/drivers/radeon/r600_gpu_load.c | 2 +-
src/gallium/drivers/radeon/r600_pipe_common.c | 2 +-
src/gallium/drivers/radeon/r600_texture.c | 4 +-
src/gallium/drivers/radeonsi/si_shader.c | 2 +-
src/gallium/drivers/radeonsi/si_state_shaders.c | 6 +-
src/gallium/drivers/rbug/rbug_context.c | 124 +++---
src/gallium/drivers/rbug/rbug_core.c | 52 +--
src/gallium/drivers/rbug/rbug_screen.h | 4 +-
src/gallium/drivers/svga/svga_resource_buffer.c | 4 +-
.../drivers/svga/svga_resource_buffer_upload.c | 2 +-
src/gallium/drivers/svga/svga_sampler_view.c | 4 +-
src/gallium/drivers/svga/svga_screen_cache.c | 6 +-
src/gallium/drivers/trace/tr_dump.c | 10 +-
src/gallium/drivers/vc4/vc4_bufmgr.c | 10 +-
src/gallium/drivers/vc4/vc4_bufmgr.h | 2 +-
src/gallium/state_trackers/dri/dri2.c | 2 +-
src/gallium/state_trackers/glx/xlib/xm_api.c | 4 +-
src/gallium/state_trackers/hgl/hgl.c | 2 +-
src/gallium/state_trackers/nine/nine_lock.c | 440 ++++++++++-----------
src/gallium/state_trackers/nine/nine_queue.c | 8 +-
src/gallium/state_trackers/nine/nine_state.c | 16 +-
src/gallium/state_trackers/omx/entrypoint.c | 4 +-
src/gallium/state_trackers/va/buffer.c | 18 +-
src/gallium/state_trackers/va/config.c | 8 +-
src/gallium/state_trackers/va/context.c | 6 +-
src/gallium/state_trackers/va/image.c | 10 +-
src/gallium/state_trackers/va/picture.c | 8 +-
src/gallium/state_trackers/va/subpicture.c | 10 +-
src/gallium/state_trackers/va/surface.c | 10 +-
src/gallium/state_trackers/vdpau/bitmap.c | 8 +-
src/gallium/state_trackers/vdpau/decode.c | 8 +-
src/gallium/state_trackers/vdpau/htab.c | 10 +-
src/gallium/state_trackers/vdpau/mixer.c | 12 +-
src/gallium/state_trackers/vdpau/output.c | 20 +-
src/gallium/state_trackers/vdpau/presentation.c | 16 +-
src/gallium/state_trackers/vdpau/query.c | 18 +-
src/gallium/state_trackers/vdpau/surface.c | 12 +-
.../targets/haiku-softpipe/GalliumContext.cpp | 2 +-
src/gallium/winsys/amdgpu/drm/amdgpu_bo.c | 10 +-
src/gallium/winsys/amdgpu/drm/amdgpu_cs.c | 4 +-
src/gallium/winsys/amdgpu/drm/amdgpu_winsys.c | 4 +-
.../winsys/etnaviv/drm/etnaviv_drm_winsys.c | 4 +-
.../winsys/freedreno/drm/freedreno_drm_winsys.c | 4 +-
.../winsys/nouveau/drm/nouveau_drm_winsys.c | 4 +-
src/gallium/winsys/radeon/drm/radeon_drm_bo.c | 30 +-
src/gallium/winsys/radeon/drm/radeon_drm_cs.c | 2 +-
src/gallium/winsys/radeon/drm/radeon_drm_winsys.c | 6 +-
.../winsys/svga/drm/pb_buffer_simple_fenced.c | 22 +-
src/gallium/winsys/svga/drm/vmw_context.c | 2 +-
src/gallium/winsys/svga/drm/vmw_fence.c | 8 +-
src/gallium/winsys/svga/drm/vmw_surface.c | 4 +-
src/gallium/winsys/virgl/drm/virgl_drm_winsys.c | 20 +-
.../winsys/virgl/vtest/virgl_vtest_winsys.c | 6 +-
87 files changed, 602 insertions(+), 605 deletions(-)
diff --git a/src/gallium/auxiliary/hud/hud_cpufreq.c b/src/gallium/auxiliary/hud/hud_cpufreq.c
index 41e5827..bc77e5a 100644
--- a/src/gallium/auxiliary/hud/hud_cpufreq.c
+++ b/src/gallium/auxiliary/hud/hud_cpufreq.c
@@ -182,21 +182,21 @@ add_object(const char *name, const char *fn, int objmode, int cpu_index)
*/
int
hud_get_num_cpufreq(bool displayhelp)
{
struct dirent *dp;
struct stat stat_buf;
char fn[128];
int cpu_index;
/* Return the number of CPU metrics we support. */
- pipe_mutex_lock(gcpufreq_mutex);
+ mtx_lock(&gcpufreq_mutex);
if (gcpufreq_count) {
pipe_mutex_unlock(gcpufreq_mutex);
return gcpufreq_count;
}
/* Scan /sys/devices.../cpu, for every object type we support, create
* and persist an object to represent its different metrics.
*/
list_inithead(&gcpufreq_list);
DIR *dir = opendir("/sys/devices/system/cpu");
diff --git a/src/gallium/auxiliary/hud/hud_diskstat.c b/src/gallium/auxiliary/hud/hud_diskstat.c
index fb64e3d..940758a 100644
--- a/src/gallium/auxiliary/hud/hud_diskstat.c
+++ b/src/gallium/auxiliary/hud/hud_diskstat.c
@@ -239,21 +239,21 @@ add_object(const char *basename, const char *name, int objmode)
* \return number of detected block I/O devices.
*/
int
hud_get_num_disks(bool displayhelp)
{
struct dirent *dp;
struct stat stat_buf;
char name[64];
/* Return the number of block devices and partitions. */
- pipe_mutex_lock(gdiskstat_mutex);
+ mtx_lock(&gdiskstat_mutex);
if (gdiskstat_count) {
pipe_mutex_unlock(gdiskstat_mutex);
return gdiskstat_count;
}
/* Scan /sys/block, for every object type we support, create and
* persist an object to represent its different statistics.
*/
list_inithead(&gdiskstat_list);
DIR *dir = opendir("/sys/block/");
diff --git a/src/gallium/auxiliary/hud/hud_nic.c b/src/gallium/auxiliary/hud/hud_nic.c
index 2fbeaa5..ab74436 100644
--- a/src/gallium/auxiliary/hud/hud_nic.c
+++ b/src/gallium/auxiliary/hud/hud_nic.c
@@ -324,21 +324,21 @@ query_nic_bitrate(struct nic_info *nic, const char *dirbase)
*/
int
hud_get_num_nics(bool displayhelp)
{
struct dirent *dp;
struct stat stat_buf;
struct nic_info *nic;
char name[64];
/* Return the number if network interfaces. */
- pipe_mutex_lock(gnic_mutex);
+ mtx_lock(&gnic_mutex);
if (gnic_count) {
pipe_mutex_unlock(gnic_mutex);
return gnic_count;
}
/* Scan /sys/block, for every object type we support, create and
* persist an object to represent its different statistics.
*/
list_inithead(&gnic_list);
DIR *dir = opendir("/sys/class/net/");
diff --git a/src/gallium/auxiliary/hud/hud_sensors_temp.c b/src/gallium/auxiliary/hud/hud_sensors_temp.c
index 4d723cc..06d2590 100644
--- a/src/gallium/auxiliary/hud/hud_sensors_temp.c
+++ b/src/gallium/auxiliary/hud/hud_sensors_temp.c
@@ -317,21 +317,21 @@ build_sensor_list(void)
/**
* Initialize internal object arrays and display lmsensors HUD help.
* \param displayhelp true if the list of detected devices should be
displayed on the console.
* \return number of detected lmsensor devices.
*/
int
hud_get_num_sensors(bool displayhelp)
{
/* Return the number of sensors detected. */
- pipe_mutex_lock(gsensor_temp_mutex);
+ mtx_lock(&gsensor_temp_mutex);
if (gsensors_temp_count) {
pipe_mutex_unlock(gsensor_temp_mutex);
return gsensors_temp_count;
}
int ret = sensors_init(NULL);
if (ret) {
pipe_mutex_unlock(gsensor_temp_mutex);
return 0;
}
diff --git a/src/gallium/auxiliary/os/os_thread.h b/src/gallium/auxiliary/os/os_thread.h
index 571e3c6..5b75965 100644
--- a/src/gallium/auxiliary/os/os_thread.h
+++ b/src/gallium/auxiliary/os/os_thread.h
@@ -101,23 +101,20 @@ static inline int pipe_thread_is_self( pipe_thread thread )
{
#if defined(HAVE_PTHREAD)
# if defined(__GNU_LIBRARY__) && defined(__GLIBC__) && defined(__GLIBC_MINOR__) && \
(__GLIBC__ >= 3 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 12))
return pthread_equal(pthread_self(), thread);
# endif
#endif
return 0;
}
-#define pipe_mutex_lock(mutex) \
- (void) mtx_lock(&(mutex))
-
#define pipe_mutex_unlock(mutex) \
(void) mtx_unlock(&(mutex))
#define pipe_mutex_assert_locked(mutex) \
__pipe_mutex_assert_locked(&(mutex))
static inline void
__pipe_mutex_assert_locked(mtx_t *mutex)
{
#ifdef DEBUG
@@ -181,21 +178,21 @@ static inline void pipe_barrier_init(pipe_barrier *barrier, unsigned count)
static inline void pipe_barrier_destroy(pipe_barrier *barrier)
{
assert(barrier->waiters == 0);
mtx_destroy(&barrier->mutex);
cnd_destroy(&barrier->condvar);
}
static inline void pipe_barrier_wait(pipe_barrier *barrier)
{
- pipe_mutex_lock(barrier->mutex);
+ mtx_lock(&barrier->mutex);
assert(barrier->waiters < barrier->count);
barrier->waiters++;
if (barrier->waiters < barrier->count) {
uint64_t sequence = barrier->sequence;
do {
cnd_wait(&barrier->condvar, &barrier->mutex);
} while (sequence == barrier->sequence);
@@ -236,31 +233,31 @@ static inline void
pipe_semaphore_destroy(pipe_semaphore *sema)
{
mtx_destroy(&sema->mutex);
cnd_destroy(&sema->cond);
}
/** Signal/increment semaphore counter */
static inline void
pipe_semaphore_signal(pipe_semaphore *sema)
{
- pipe_mutex_lock(sema->mutex);
+ mtx_lock(&sema->mutex);
sema->counter++;
cnd_signal(&sema->cond);
pipe_mutex_unlock(sema->mutex);
}
/** Wait for semaphore counter to be greater than zero */
static inline void
pipe_semaphore_wait(pipe_semaphore *sema)
{
- pipe_mutex_lock(sema->mutex);
+ mtx_lock(&sema->mutex);
while (sema->counter <= 0) {
cnd_wait(&sema->cond, &sema->mutex);
}
sema->counter--;
pipe_mutex_unlock(sema->mutex);
}
/*
diff --git a/src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.c b/src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.c
index b3b7828..b8b4483 100644
--- a/src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.c
+++ b/src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.c
@@ -345,21 +345,21 @@ fenced_buffer_finish_locked(struct fenced_manager *fenced_mgr,
struct pipe_fence_handle *fence = NULL;
int finished;
boolean proceed;
ops->fence_reference(ops, &fence, fenced_buf->fence);
pipe_mutex_unlock(fenced_mgr->mutex);
finished = ops->fence_finish(ops, fenced_buf->fence, 0);
- pipe_mutex_lock(fenced_mgr->mutex);
+ mtx_lock(&fenced_mgr->mutex);
assert(pipe_is_referenced(&fenced_buf->base.reference));
/* Only proceed if the fence object didn't change in the meanwhile.
* Otherwise assume the work has been already carried out by another
* thread that re-aquired the lock before us.
*/
proceed = fence == fenced_buf->fence ? TRUE : FALSE;
ops->fence_reference(ops, &fence, NULL);
@@ -645,38 +645,38 @@ fenced_buffer_copy_storage_to_cpu_locked(struct fenced_buffer *fenced_buf)
static void
fenced_buffer_destroy(struct pb_buffer *buf)
{
struct fenced_buffer *fenced_buf = fenced_buffer(buf);
struct fenced_manager *fenced_mgr = fenced_buf->mgr;
assert(!pipe_is_referenced(&fenced_buf->base.reference));
- pipe_mutex_lock(fenced_mgr->mutex);
+ mtx_lock(&fenced_mgr->mutex);
fenced_buffer_destroy_locked(fenced_mgr, fenced_buf);
pipe_mutex_unlock(fenced_mgr->mutex);
}
static void *
fenced_buffer_map(struct pb_buffer *buf,
unsigned flags, void *flush_ctx)
{
struct fenced_buffer *fenced_buf = fenced_buffer(buf);
struct fenced_manager *fenced_mgr = fenced_buf->mgr;
struct pb_fence_ops *ops = fenced_mgr->ops;
void *map = NULL;
- pipe_mutex_lock(fenced_mgr->mutex);
+ mtx_lock(&fenced_mgr->mutex);
assert(!(flags & PB_USAGE_GPU_READ_WRITE));
/* Serialize writes. */
while ((fenced_buf->flags & PB_USAGE_GPU_WRITE) ||
((fenced_buf->flags & PB_USAGE_GPU_READ) &&
(flags & PB_USAGE_CPU_WRITE))) {
/* Don't wait for the GPU to finish accessing it,
* if blocking is forbidden.
@@ -714,21 +714,21 @@ fenced_buffer_map(struct pb_buffer *buf,
return map;
}
static void
fenced_buffer_unmap(struct pb_buffer *buf)
{
struct fenced_buffer *fenced_buf = fenced_buffer(buf);
struct fenced_manager *fenced_mgr = fenced_buf->mgr;
- pipe_mutex_lock(fenced_mgr->mutex);
+ mtx_lock(&fenced_mgr->mutex);
assert(fenced_buf->mapcount);
if (fenced_buf->mapcount) {
if (fenced_buf->buffer)
pb_unmap(fenced_buf->buffer);
--fenced_buf->mapcount;
if (!fenced_buf->mapcount)
fenced_buf->flags &= ~PB_USAGE_CPU_READ_WRITE;
}
@@ -738,21 +738,21 @@ fenced_buffer_unmap(struct pb_buffer *buf)
static enum pipe_error
fenced_buffer_validate(struct pb_buffer *buf,
struct pb_validate *vl,
unsigned flags)
{
struct fenced_buffer *fenced_buf = fenced_buffer(buf);
struct fenced_manager *fenced_mgr = fenced_buf->mgr;
enum pipe_error ret;
- pipe_mutex_lock(fenced_mgr->mutex);
+ mtx_lock(&fenced_mgr->mutex);
if (!vl) {
/* Invalidate. */
fenced_buf->vl = NULL;
fenced_buf->validation_flags = 0;
ret = PIPE_OK;
goto done;
}
assert(flags & PB_USAGE_GPU_READ_WRITE);
@@ -809,21 +809,21 @@ fenced_buffer_validate(struct pb_buffer *buf,
static void
fenced_buffer_fence(struct pb_buffer *buf,
struct pipe_fence_handle *fence)
{
struct fenced_buffer *fenced_buf = fenced_buffer(buf);
struct fenced_manager *fenced_mgr = fenced_buf->mgr;
struct pb_fence_ops *ops = fenced_mgr->ops;
- pipe_mutex_lock(fenced_mgr->mutex);
+ mtx_lock(&fenced_mgr->mutex);
assert(pipe_is_referenced(&fenced_buf->base.reference));
assert(fenced_buf->buffer);
if (fence != fenced_buf->fence) {
assert(fenced_buf->vl);
assert(fenced_buf->validation_flags);
if (fenced_buf->fence) {
MAYBE_UNUSED boolean destroyed = fenced_buffer_remove_locked(fenced_mgr, fenced_buf);
@@ -846,21 +846,21 @@ fenced_buffer_fence(struct pb_buffer *buf,
static void
fenced_buffer_get_base_buffer(struct pb_buffer *buf,
struct pb_buffer **base_buf,
pb_size *offset)
{
struct fenced_buffer *fenced_buf = fenced_buffer(buf);
struct fenced_manager *fenced_mgr = fenced_buf->mgr;
- pipe_mutex_lock(fenced_mgr->mutex);
+ mtx_lock(&fenced_mgr->mutex);
/* This should only be called when the buffer is validated. Typically
* when processing relocations.
*/
assert(fenced_buf->vl);
assert(fenced_buf->buffer);
if (fenced_buf->buffer) {
pb_get_base_buffer(fenced_buf->buffer, base_buf, offset);
} else {
@@ -910,21 +910,21 @@ fenced_bufmgr_create_buffer(struct pb_manager *mgr,
pipe_reference_init(&fenced_buf->base.reference, 1);
fenced_buf->base.alignment = desc->alignment;
fenced_buf->base.usage = desc->usage;
fenced_buf->base.size = size;
fenced_buf->size = size;
fenced_buf->desc = *desc;
fenced_buf->base.vtbl = &fenced_buffer_vtbl;
fenced_buf->mgr = fenced_mgr;
- pipe_mutex_lock(fenced_mgr->mutex);
+ mtx_lock(&fenced_mgr->mutex);
/* Try to create GPU storage without stalling. */
ret = fenced_buffer_create_gpu_storage_locked(fenced_mgr, fenced_buf, FALSE);
/* Attempt to use CPU memory to avoid stalling the GPU. */
if (ret != PIPE_OK) {
ret = fenced_buffer_create_cpu_storage_locked(fenced_mgr, fenced_buf);
}
/* Create GPU storage, waiting for some to be available. */
@@ -951,45 +951,45 @@ fenced_bufmgr_create_buffer(struct pb_manager *mgr,
no_buffer:
return NULL;
}
static void
fenced_bufmgr_flush(struct pb_manager *mgr)
{
struct fenced_manager *fenced_mgr = fenced_manager(mgr);
- pipe_mutex_lock(fenced_mgr->mutex);
+ mtx_lock(&fenced_mgr->mutex);
while (fenced_manager_check_signalled_locked(fenced_mgr, TRUE))
;
pipe_mutex_unlock(fenced_mgr->mutex);
assert(fenced_mgr->provider->flush);
if (fenced_mgr->provider->flush)
fenced_mgr->provider->flush(fenced_mgr->provider);
}
static void
fenced_bufmgr_destroy(struct pb_manager *mgr)
{
struct fenced_manager *fenced_mgr = fenced_manager(mgr);
- pipe_mutex_lock(fenced_mgr->mutex);
+ mtx_lock(&fenced_mgr->mutex);
/* Wait on outstanding fences. */
while (fenced_mgr->num_fenced) {
pipe_mutex_unlock(fenced_mgr->mutex);
#if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS)
sched_yield();
#endif
- pipe_mutex_lock(fenced_mgr->mutex);
+ mtx_lock(&fenced_mgr->mutex);
while (fenced_manager_check_signalled_locked(fenced_mgr, TRUE))
;
}
#ifdef DEBUG
/* assert(!fenced_mgr->num_unfenced); */
#endif
pipe_mutex_unlock(fenced_mgr->mutex);
mtx_destroy(&fenced_mgr->mutex);
diff --git a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_debug.c b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_debug.c
index 33f068e..717ab9e 100644
--- a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_debug.c
+++ b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_debug.c
@@ -229,21 +229,21 @@ pb_debug_buffer_check(struct pb_debug_buffer *buf)
static void
pb_debug_buffer_destroy(struct pb_buffer *_buf)
{
struct pb_debug_buffer *buf = pb_debug_buffer(_buf);
struct pb_debug_manager *mgr = buf->mgr;
assert(!pipe_is_referenced(&buf->base.reference));
pb_debug_buffer_check(buf);
- pipe_mutex_lock(mgr->mutex);
+ mtx_lock(&mgr->mutex);
LIST_DEL(&buf->head);
pipe_mutex_unlock(mgr->mutex);
mtx_destroy(&buf->mutex);
pb_reference(&buf->buffer, NULL);
FREE(buf);
}
@@ -253,35 +253,35 @@ pb_debug_buffer_map(struct pb_buffer *_buf,
{
struct pb_debug_buffer *buf = pb_debug_buffer(_buf);
void *map;
pb_debug_buffer_check(buf);
map = pb_map(buf->buffer, flags, flush_ctx);
if (!map)
return NULL;
- pipe_mutex_lock(buf->mutex);
+ mtx_lock(&buf->mutex);
++buf->map_count;
debug_backtrace_capture(buf->map_backtrace, 1, PB_DEBUG_MAP_BACKTRACE);
pipe_mutex_unlock(buf->mutex);
return (uint8_t *)map + buf->underflow_size;
}
static void
pb_debug_buffer_unmap(struct pb_buffer *_buf)
{
struct pb_debug_buffer *buf = pb_debug_buffer(_buf);
- pipe_mutex_lock(buf->mutex);
+ mtx_lock(&buf->mutex);
assert(buf->map_count);
if(buf->map_count)
--buf->map_count;
pipe_mutex_unlock(buf->mutex);
pb_unmap(buf->buffer);
pb_debug_buffer_check(buf);
}
@@ -297,21 +297,21 @@ pb_debug_buffer_get_base_buffer(struct pb_buffer *_buf,
}
static enum pipe_error
pb_debug_buffer_validate(struct pb_buffer *_buf,
struct pb_validate *vl,
unsigned flags)
{
struct pb_debug_buffer *buf = pb_debug_buffer(_buf);
- pipe_mutex_lock(buf->mutex);
+ mtx_lock(&buf->mutex);
if(buf->map_count) {
debug_printf("%s: attempting to validate a mapped buffer\n", __FUNCTION__);
debug_printf("last map backtrace is\n");
debug_backtrace_dump(buf->map_backtrace, PB_DEBUG_MAP_BACKTRACE);
}
pipe_mutex_unlock(buf->mutex);
pb_debug_buffer_check(buf);
return pb_validate(buf->buffer, vl, flags);
@@ -381,21 +381,21 @@ pb_debug_manager_create_buffer(struct pb_manager *_mgr,
real_desc = *desc;
real_desc.usage |= PB_USAGE_CPU_WRITE;
real_desc.usage |= PB_USAGE_CPU_READ;
buf->buffer = mgr->provider->create_buffer(mgr->provider,
real_size,
&real_desc);
if(!buf->buffer) {
FREE(buf);
#if 0
- pipe_mutex_lock(mgr->mutex);
+ mtx_lock(&mgr->mutex);
debug_printf("%s: failed to create buffer\n", __FUNCTION__);
if(!LIST_IS_EMPTY(&mgr->list))
pb_debug_manager_dump_locked(mgr);
pipe_mutex_unlock(mgr->mutex);
#endif
return NULL;
}
assert(pipe_is_referenced(&buf->buffer->reference));
assert(pb_check_alignment(real_desc.alignment, buf->buffer->alignment));
@@ -412,21 +412,21 @@ pb_debug_manager_create_buffer(struct pb_manager *_mgr,
buf->underflow_size = mgr->underflow_size;
buf->overflow_size = buf->buffer->size - buf->underflow_size - size;
debug_backtrace_capture(buf->create_backtrace, 1, PB_DEBUG_CREATE_BACKTRACE);
pb_debug_buffer_fill(buf);
(void) mtx_init(&buf->mutex, mtx_plain);
- pipe_mutex_lock(mgr->mutex);
+ mtx_lock(&mgr->mutex);
LIST_ADDTAIL(&buf->head, &mgr->list);
pipe_mutex_unlock(mgr->mutex);
return &buf->base;
}
static void
pb_debug_manager_flush(struct pb_manager *_mgr)
{
@@ -435,21 +435,21 @@ pb_debug_manager_flush(struct pb_manager *_mgr)
if(mgr->provider->flush)
mgr->provider->flush(mgr->provider);
}
static void
pb_debug_manager_destroy(struct pb_manager *_mgr)
{
struct pb_debug_manager *mgr = pb_debug_manager(_mgr);
- pipe_mutex_lock(mgr->mutex);
+ mtx_lock(&mgr->mutex);
if(!LIST_IS_EMPTY(&mgr->list)) {
debug_printf("%s: unfreed buffers\n", __FUNCTION__);
pb_debug_manager_dump_locked(mgr);
}
pipe_mutex_unlock(mgr->mutex);
mtx_destroy(&mgr->mutex);
mgr->provider->destroy(mgr->provider);
FREE(mgr);
}
diff --git a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_mm.c b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_mm.c
index 52cd115..657b5f3 100644
--- a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_mm.c
+++ b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_mm.c
@@ -92,21 +92,21 @@ mm_buffer(struct pb_buffer *buf)
static void
mm_buffer_destroy(struct pb_buffer *buf)
{
struct mm_buffer *mm_buf = mm_buffer(buf);
struct mm_pb_manager *mm = mm_buf->mgr;
assert(!pipe_is_referenced(&mm_buf->base.reference));
- pipe_mutex_lock(mm->mutex);
+ mtx_lock(&mm->mutex);
u_mmFreeMem(mm_buf->block);
FREE(mm_buf);
pipe_mutex_unlock(mm->mutex);
}
static void *
mm_buffer_map(struct pb_buffer *buf,
unsigned flags,
void *flush_ctx)
@@ -177,21 +177,21 @@ mm_bufmgr_create_buffer(struct pb_manager *mgr,
const struct pb_desc *desc)
{
struct mm_pb_manager *mm = mm_pb_manager(mgr);
struct mm_buffer *mm_buf;
/* We don't handle alignments larger then the one initially setup */
assert(pb_check_alignment(desc->alignment, (pb_size)1 << mm->align2));
if(!pb_check_alignment(desc->alignment, (pb_size)1 << mm->align2))
return NULL;
- pipe_mutex_lock(mm->mutex);
+ mtx_lock(&mm->mutex);
mm_buf = CALLOC_STRUCT(mm_buffer);
if (!mm_buf) {
pipe_mutex_unlock(mm->mutex);
return NULL;
}
pipe_reference_init(&mm_buf->base.reference, 1);
mm_buf->base.alignment = desc->alignment;
mm_buf->base.usage = desc->usage;
@@ -226,21 +226,21 @@ mm_bufmgr_flush(struct pb_manager *mgr)
{
/* No-op */
}
static void
mm_bufmgr_destroy(struct pb_manager *mgr)
{
struct mm_pb_manager *mm = mm_pb_manager(mgr);
- pipe_mutex_lock(mm->mutex);
+ mtx_lock(&mm->mutex);
u_mmDestroy(mm->heap);
pb_unmap(mm->buffer);
pb_reference(&mm->buffer, NULL);
pipe_mutex_unlock(mm->mutex);
FREE(mgr);
}
diff --git a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_pool.c b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_pool.c
index fe221fc..83a5568 100644
--- a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_pool.c
+++ b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_pool.c
@@ -103,37 +103,37 @@ pool_buffer(struct pb_buffer *buf)
static void
pool_buffer_destroy(struct pb_buffer *buf)
{
struct pool_buffer *pool_buf = pool_buffer(buf);
struct pool_pb_manager *pool = pool_buf->mgr;
assert(!pipe_is_referenced(&pool_buf->base.reference));
- pipe_mutex_lock(pool->mutex);
+ mtx_lock(&pool->mutex);
LIST_ADD(&pool_buf->head, &pool->free);
pool->numFree++;
pipe_mutex_unlock(pool->mutex);
}
static void *
pool_buffer_map(struct pb_buffer *buf, unsigned flags, void *flush_ctx)
{
struct pool_buffer *pool_buf = pool_buffer(buf);
struct pool_pb_manager *pool = pool_buf->mgr;
void *map;
/* XXX: it will be necessary to remap here to propagate flush_ctx */
- pipe_mutex_lock(pool->mutex);
+ mtx_lock(&pool->mutex);
map = (unsigned char *) pool->map + pool_buf->start;
pipe_mutex_unlock(pool->mutex);
return map;
}
static void
pool_buffer_unmap(struct pb_buffer *buf)
{
/* No-op */
@@ -189,21 +189,21 @@ pool_bufmgr_create_buffer(struct pb_manager *mgr,
pb_size size,
const struct pb_desc *desc)
{
struct pool_pb_manager *pool = pool_pb_manager(mgr);
struct pool_buffer *pool_buf;
struct list_head *item;
assert(size == pool->bufSize);
assert(pool->bufAlign % desc->alignment == 0);
- pipe_mutex_lock(pool->mutex);
+ mtx_lock(&pool->mutex);
if (pool->numFree == 0) {
pipe_mutex_unlock(pool->mutex);
debug_printf("warning: out of fixed size buffer objects\n");
return NULL;
}
item = pool->free.next;
if (item == &pool->free) {
@@ -231,21 +231,21 @@ static void
pool_bufmgr_flush(struct pb_manager *mgr)
{
/* No-op */
}
static void
pool_bufmgr_destroy(struct pb_manager *mgr)
{
struct pool_pb_manager *pool = pool_pb_manager(mgr);
- pipe_mutex_lock(pool->mutex);
+ mtx_lock(&pool->mutex);
FREE(pool->bufs);
pb_unmap(pool->buffer);
pb_reference(&pool->buffer, NULL);
pipe_mutex_unlock(pool->mutex);
FREE(mgr);
}
diff --git a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_slab.c b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_slab.c
index 43313d8..32e6646 100644
--- a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_slab.c
+++ b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_slab.c
@@ -192,21 +192,21 @@ pb_slab_range_manager(struct pb_manager *mgr)
* it on the slab FREE list.
*/
static void
pb_slab_buffer_destroy(struct pb_buffer *_buf)
{
struct pb_slab_buffer *buf = pb_slab_buffer(_buf);
struct pb_slab *slab = buf->slab;
struct pb_slab_manager *mgr = slab->mgr;
struct list_head *list = &buf->head;
- pipe_mutex_lock(mgr->mutex);
+ mtx_lock(&mgr->mutex);
assert(!pipe_is_referenced(&buf->base.reference));
buf->mapCount = 0;
LIST_DEL(list);
LIST_ADDTAIL(list, &slab->freeBuffers);
slab->numFree++;
if (slab->head.next == &slab->head)
@@ -389,21 +389,21 @@ pb_slab_manager_create_buffer(struct pb_manager *_mgr,
if(!pb_check_alignment(desc->alignment, mgr->desc.alignment))
return NULL;
assert(pb_check_alignment(desc->alignment, mgr->bufSize));
if(!pb_check_alignment(desc->alignment, mgr->bufSize))
return NULL;
assert(pb_check_usage(desc->usage, mgr->desc.usage));
if(!pb_check_usage(desc->usage, mgr->desc.usage))
return NULL;
- pipe_mutex_lock(mgr->mutex);
+ mtx_lock(&mgr->mutex);
/* Create a new slab, if we run out of partial slabs */
if (mgr->slabs.next == &mgr->slabs) {
(void) pb_slab_create(mgr);
if (mgr->slabs.next == &mgr->slabs) {
pipe_mutex_unlock(mgr->mutex);
return NULL;
}
}
diff --git a/src/gallium/auxiliary/pipebuffer/pb_cache.c b/src/gallium/auxiliary/pipebuffer/pb_cache.c
index adae222..4a72cb5 100644
--- a/src/gallium/auxiliary/pipebuffer/pb_cache.c
+++ b/src/gallium/auxiliary/pipebuffer/pb_cache.c
@@ -82,21 +82,21 @@ release_expired_buffers_locked(struct list_head *cache)
* being released.
*/
void
pb_cache_add_buffer(struct pb_cache_entry *entry)
{
struct pb_cache *mgr = entry->mgr;
struct list_head *cache = &mgr->buckets[entry->bucket_index];
struct pb_buffer *buf = entry->buffer;
unsigned i;
- pipe_mutex_lock(mgr->mutex);
+ mtx_lock(&mgr->mutex);
assert(!pipe_is_referenced(&buf->reference));
for (i = 0; i < ARRAY_SIZE(mgr->buckets); i++)
release_expired_buffers_locked(&mgr->buckets[i]);
/* Directly release any buffer that exceeds the limit. */
if (mgr->cache_size + buf->size > mgr->max_cache_size) {
mgr->destroy_buffer(buf);
pipe_mutex_unlock(mgr->mutex);
return;
@@ -148,21 +148,21 @@ pb_cache_reclaim_buffer(struct pb_cache *mgr, pb_size size,
unsigned alignment, unsigned usage,
unsigned bucket_index)
{
struct pb_cache_entry *entry;
struct pb_cache_entry *cur_entry;
struct list_head *cur, *next;
int64_t now;
int ret = 0;
struct list_head *cache = &mgr->buckets[bucket_index];
- pipe_mutex_lock(mgr->mutex);
+ mtx_lock(&mgr->mutex);
entry = NULL;
cur = cache->next;
next = cur->next;
/* search in the expired buffers, freeing them in the process */
now = os_time_get();
while (cur != cache) {
cur_entry = LIST_ENTRY(struct pb_cache_entry, cur, head);
@@ -221,21 +221,21 @@ pb_cache_reclaim_buffer(struct pb_cache *mgr, pb_size size,
/**
* Empty the cache. Useful when there is not enough memory.
*/
void
pb_cache_release_all_buffers(struct pb_cache *mgr)
{
struct list_head *curr, *next;
struct pb_cache_entry *buf;
unsigned i;
- pipe_mutex_lock(mgr->mutex);
+ mtx_lock(&mgr->mutex);
for (i = 0; i < ARRAY_SIZE(mgr->buckets); i++) {
struct list_head *cache = &mgr->buckets[i];
curr = cache->next;
next = curr->next;
while (curr != cache) {
buf = LIST_ENTRY(struct pb_cache_entry, curr, head);
destroy_buffer_locked(buf);
curr = next;
next = curr->next;
diff --git a/src/gallium/auxiliary/pipebuffer/pb_slab.c b/src/gallium/auxiliary/pipebuffer/pb_slab.c
index 9ad88db..4a1b269 100644
--- a/src/gallium/auxiliary/pipebuffer/pb_slab.c
+++ b/src/gallium/auxiliary/pipebuffer/pb_slab.c
@@ -102,21 +102,21 @@ pb_slab_alloc(struct pb_slabs *slabs, unsigned size, unsigned heap)
struct pb_slab_group *group;
struct pb_slab *slab;
struct pb_slab_entry *entry;
assert(order < slabs->min_order + slabs->num_orders);
assert(heap < slabs->num_heaps);
group_index = heap * slabs->num_orders + (order - slabs->min_order);
group = &slabs->groups[group_index];
- pipe_mutex_lock(slabs->mutex);
+ mtx_lock(&slabs->mutex);
/* If there is no candidate slab at all, or the first slab has no free
* entries, try reclaiming entries.
*/
if (LIST_IS_EMPTY(&group->slabs) ||
LIST_IS_EMPTY(&LIST_ENTRY(struct pb_slab, group->slabs.next, head)->free))
pb_slabs_reclaim_locked(slabs);
/* Remove slabs without free entries. */
while (!LIST_IS_EMPTY(&group->slabs)) {
@@ -132,21 +132,21 @@ pb_slab_alloc(struct pb_slabs *slabs, unsigned size, unsigned heap)
* calls back into slab functions (most likely to happen for
* pb_slab_reclaim if memory is low).
*
* There's a chance that racing threads will end up allocating multiple
* slabs for the same group, but that doesn't hurt correctness.
*/
pipe_mutex_unlock(slabs->mutex);
slab = slabs->slab_alloc(slabs->priv, heap, 1 << order, group_index);
if (!slab)
return NULL;
- pipe_mutex_lock(slabs->mutex);
+ mtx_lock(&slabs->mutex);
LIST_ADD(&slab->head, &group->slabs);
}
entry = LIST_ENTRY(struct pb_slab_entry, slab->free.next, head);
LIST_DEL(&entry->head);
slab->num_free--;
pipe_mutex_unlock(slabs->mutex);
@@ -155,35 +155,35 @@ pb_slab_alloc(struct pb_slabs *slabs, unsigned size, unsigned heap)
/* Free the given slab entry.
*
* The entry may still be in use e.g. by in-flight command submissions. The
* can_reclaim callback function will be called to determine whether the entry
* can be handed out again by pb_slab_alloc.
*/
void
pb_slab_free(struct pb_slabs* slabs, struct pb_slab_entry *entry)
{
- pipe_mutex_lock(slabs->mutex);
+ mtx_lock(&slabs->mutex);
LIST_ADDTAIL(&entry->head, &slabs->reclaim);
pipe_mutex_unlock(slabs->mutex);
}
/* Check if any of the entries handed to pb_slab_free are ready to be re-used.
*
* This may end up freeing some slabs and is therefore useful to try to reclaim
* some no longer used memory. However, calling this function is not strictly
* required since pb_slab_alloc will eventually do the same thing.
*/
void
pb_slabs_reclaim(struct pb_slabs *slabs)
{
- pipe_mutex_lock(slabs->mutex);
+ mtx_lock(&slabs->mutex);
pb_slabs_reclaim_locked(slabs);
pipe_mutex_unlock(slabs->mutex);
}
/* Initialize the slabs manager.
*
* The minimum and maximum size of slab entries are 2^min_order and
* 2^max_order, respectively.
*
* priv will be passed to the given callback functions.
diff --git a/src/gallium/auxiliary/rtasm/rtasm_execmem.c b/src/gallium/auxiliary/rtasm/rtasm_execmem.c
index a60d521..a1c3de9 100644
--- a/src/gallium/auxiliary/rtasm/rtasm_execmem.c
+++ b/src/gallium/auxiliary/rtasm/rtasm_execmem.c
@@ -83,21 +83,21 @@ init_heap(void)
return (exec_mem != MAP_FAILED);
}
void *
rtasm_exec_malloc(size_t size)
{
struct mem_block *block = NULL;
void *addr = NULL;
- pipe_mutex_lock(exec_mutex);
+ mtx_lock(&exec_mutex);
if (!init_heap())
goto bail;
if (exec_heap) {
size = (size + 31) & ~31; /* next multiple of 32 bytes */
block = u_mmAllocMem( exec_heap, size, 5, 0 ); /* 5 -> 32-byte alignment */
}
if (block)
@@ -108,21 +108,21 @@ rtasm_exec_malloc(size_t size)
bail:
pipe_mutex_unlock(exec_mutex);
return addr;
}
void
rtasm_exec_free(void *addr)
{
- pipe_mutex_lock(exec_mutex);
+ mtx_lock(&exec_mutex);
if (exec_heap) {
struct mem_block *block = u_mmFindBlock(exec_heap, (unsigned char *)addr - exec_mem);
if (block)
u_mmFreeMem(block);
}
pipe_mutex_unlock(exec_mutex);
}
diff --git a/src/gallium/auxiliary/util/u_debug_flush.c b/src/gallium/auxiliary/util/u_debug_flush.c
index bcce4f4..dde21f9 100644
--- a/src/gallium/auxiliary/util/u_debug_flush.c
+++ b/src/gallium/auxiliary/util/u_debug_flush.c
@@ -158,21 +158,21 @@ debug_flush_ctx_create(boolean catch_reference_of_mapped, unsigned bt_depth)
if (!fctx)
goto out_no_ctx;
fctx->ref_hash = util_hash_table_create(debug_flush_pointer_hash,
debug_flush_pointer_compare);
if (!fctx->ref_hash)
goto out_no_ref_hash;
fctx->bt_depth = bt_depth;
- pipe_mutex_lock(list_mutex);
+ mtx_lock(&list_mutex);
list_addtail(&fctx->head, &ctx_list);
pipe_mutex_unlock(list_mutex);
return fctx;
out_no_ref_hash:
FREE(fctx);
out_no_ctx:
debug_printf("Debug flush context creation failed.\n");
debug_printf("Debug flush checking for this context will be incomplete.\n");
@@ -208,38 +208,38 @@ debug_flush_alert(const char *s, const char *op,
void
debug_flush_map(struct debug_flush_buf *fbuf, unsigned flags)
{
boolean mapped_sync = FALSE;
if (!fbuf)
return;
- pipe_mutex_lock(fbuf->mutex);
+ mtx_lock(&fbuf->mutex);
if (fbuf->mapped) {
debug_flush_alert("Recursive map detected.", "Map",
2, fbuf->bt_depth, TRUE, TRUE, NULL);
debug_flush_alert(NULL, "Previous map", 0, fbuf->bt_depth, FALSE,
FALSE, fbuf->map_frame);
} else if (!(flags & PIPE_TRANSFER_UNSYNCHRONIZED) ||
!fbuf->supports_unsync) {
fbuf->mapped_sync = mapped_sync = TRUE;
}
fbuf->map_frame = debug_flush_capture_frame(1, fbuf->bt_depth);
fbuf->mapped = TRUE;
pipe_mutex_unlock(fbuf->mutex);
if (mapped_sync) {
struct debug_flush_ctx *fctx;
- pipe_mutex_lock(list_mutex);
+ mtx_lock(&list_mutex);
LIST_FOR_EACH_ENTRY(fctx, &ctx_list, head) {
struct debug_flush_item *item =
util_hash_table_get(fctx->ref_hash, fbuf);
if (item && fctx->catch_map_of_referenced) {
debug_flush_alert("Already referenced map detected.",
"Map", 2, fbuf->bt_depth, TRUE, TRUE, NULL);
debug_flush_alert(NULL, "Reference", 0, item->bt_depth,
FALSE, FALSE, item->ref_frame);
}
@@ -247,21 +247,21 @@ debug_flush_map(struct debug_flush_buf *fbuf, unsigned flags)
pipe_mutex_unlock(list_mutex);
}
}
void
debug_flush_unmap(struct debug_flush_buf *fbuf)
{
if (!fbuf)
return;
- pipe_mutex_lock(fbuf->mutex);
+ mtx_lock(&fbuf->mutex);
if (!fbuf->mapped)
debug_flush_alert("Unmap not previously mapped detected.", "Map",
2, fbuf->bt_depth, FALSE, TRUE, NULL);
fbuf->mapped_sync = FALSE;
fbuf->mapped = FALSE;
FREE(fbuf->map_frame);
fbuf->map_frame = NULL;
pipe_mutex_unlock(fbuf->mutex);
}
@@ -270,21 +270,21 @@ void
debug_flush_cb_reference(struct debug_flush_ctx *fctx,
struct debug_flush_buf *fbuf)
{
struct debug_flush_item *item;
if (!fctx || !fbuf)
return;
item = util_hash_table_get(fctx->ref_hash, fbuf);
- pipe_mutex_lock(fbuf->mutex);
+ mtx_lock(&fbuf->mutex);
if (fbuf->mapped_sync) {
debug_flush_alert("Reference of mapped buffer detected.", "Reference",
2, fctx->bt_depth, TRUE, TRUE, NULL);
debug_flush_alert(NULL, "Map", 0, fbuf->bt_depth, FALSE,
FALSE, fbuf->map_frame);
}
pipe_mutex_unlock(fbuf->mutex);
if (!item) {
item = CALLOC_STRUCT(debug_flush_item);
@@ -313,21 +313,21 @@ debug_flush_might_flush_cb(void *key, void *value, void *data)
{
struct debug_flush_item *item =
(struct debug_flush_item *) value;
struct debug_flush_buf *fbuf = item->fbuf;
const char *reason = (const char *) data;
char message[80];
util_snprintf(message, sizeof(message),
"%s referenced mapped buffer detected.", reason);
- pipe_mutex_lock(fbuf->mutex);
+ mtx_lock(&fbuf->mutex);
if (fbuf->mapped_sync) {
debug_flush_alert(message, reason, 3, item->bt_depth, TRUE, TRUE, NULL);
debug_flush_alert(NULL, "Map", 0, fbuf->bt_depth, TRUE, FALSE,
fbuf->map_frame);
debug_flush_alert(NULL, "First reference", 0, item->bt_depth, FALSE,
FALSE, item->ref_frame);
}
pipe_mutex_unlock(fbuf->mutex);
return PIPE_OK;
diff --git a/src/gallium/auxiliary/util/u_debug_memory.c b/src/gallium/auxiliary/util/u_debug_memory.c
index 2f7031d..d5b0d91 100644
--- a/src/gallium/auxiliary/util/u_debug_memory.c
+++ b/src/gallium/auxiliary/util/u_debug_memory.c
@@ -146,21 +146,21 @@ debug_malloc(const char *file, unsigned line, const char *function,
hdr->freed = FALSE;
#endif
#if DEBUG_MEMORY_STACK
debug_backtrace_capture(hdr->backtrace, 0, DEBUG_MEMORY_STACK);
#endif
ftr = footer_from_header(hdr);
ftr->magic = DEBUG_MEMORY_MAGIC;
- pipe_mutex_lock(list_mutex);
+ mtx_lock(&list_mutex);
LIST_ADDTAIL(&hdr->head, &list);
pipe_mutex_unlock(list_mutex);
return data_from_header(hdr);
}
void
debug_free(const char *file, unsigned line, const char *function,
void *ptr)
{
@@ -191,21 +191,21 @@ debug_free(const char *file, unsigned line, const char *function,
/* Check for double-free */
assert(!hdr->freed);
/* Mark the block as freed but don't really free it */
hdr->freed = TRUE;
/* Save file/line where freed */
hdr->file = file;
hdr->line = line;
/* set freed memory to special value */
memset(ptr, DEBUG_FREED_BYTE, hdr->size);
#else
- pipe_mutex_lock(list_mutex);
+ mtx_lock(&list_mutex);
LIST_DEL(&hdr->head);
pipe_mutex_unlock(list_mutex);
hdr->magic = 0;
ftr->magic = 0;
os_free(hdr);
#endif
}
void *
@@ -266,21 +266,21 @@ debug_realloc(const char *file, unsigned line, const char *function,
new_hdr->size = new_size;
new_hdr->magic = DEBUG_MEMORY_MAGIC;
new_hdr->tag = 0;
#if DEBUG_FREED_MEMORY
new_hdr->freed = FALSE;
#endif
new_ftr = footer_from_header(new_hdr);
new_ftr->magic = DEBUG_MEMORY_MAGIC;
- pipe_mutex_lock(list_mutex);
+ mtx_lock(&list_mutex);
LIST_REPLACE(&old_hdr->head, &new_hdr->head);
pipe_mutex_unlock(list_mutex);
/* copy data */
new_ptr = data_from_header(new_hdr);
memcpy( new_ptr, old_ptr, old_size < new_size ? old_size : new_size );
/* free old */
old_hdr->magic = 0;
old_ftr->magic = 0;
diff --git a/src/gallium/auxiliary/util/u_debug_refcnt.c b/src/gallium/auxiliary/util/u_debug_refcnt.c
index 754ee8b..1db1787 100644
--- a/src/gallium/auxiliary/util/u_debug_refcnt.c
+++ b/src/gallium/auxiliary/util/u_debug_refcnt.c
@@ -87,21 +87,21 @@ debug_serial(void *p, unsigned *pserial)
boolean found = TRUE;
#ifdef PIPE_SUBSYSTEM_WINDOWS_USER
static boolean first = TRUE;
if (first) {
(void) mtx_init(&serials_mutex, mtx_plain);
first = FALSE;
}
#endif
- pipe_mutex_lock(serials_mutex);
+ mtx_lock(&serials_mutex);
if (!serials_hash)
serials_hash = util_hash_table_create(hash_ptr, compare_ptr);
serial = (unsigned) (uintptr_t) util_hash_table_get(serials_hash, p);
if (!serial) {
/* time to stop logging... (you'll have a 100 GB logfile at least at
* this point) TODO: avoid this
*/
serial = ++serials_last;
if (!serial) {
@@ -119,21 +119,21 @@ debug_serial(void *p, unsigned *pserial)
return found;
}
/**
* Free the serial number for the given pointer.
*/
static void
debug_serial_delete(void *p)
{
- pipe_mutex_lock(serials_mutex);
+ mtx_lock(&serials_mutex);
util_hash_table_remove(serials_hash, p);
pipe_mutex_unlock(serials_mutex);
}
#define STACK_LEN 64
static void
dump_stack(const char *symbols[STACK_LEN])
{
diff --git a/src/gallium/auxiliary/util/u_debug_symbol.c b/src/gallium/auxiliary/util/u_debug_symbol.c
index 9a4eafa..de320b3 100644
--- a/src/gallium/auxiliary/util/u_debug_symbol.c
+++ b/src/gallium/auxiliary/util/u_debug_symbol.c
@@ -294,21 +294,21 @@ debug_symbol_name_cached(const void *addr)
const char* name;
#ifdef PIPE_SUBSYSTEM_WINDOWS_USER
static boolean first = TRUE;
if (first) {
(void) mtx_init(&symbols_mutex, mtx_plain);
first = FALSE;
}
#endif
- pipe_mutex_lock(symbols_mutex);
+ mtx_lock(&symbols_mutex);
if(!symbols_hash)
symbols_hash = util_hash_table_create(hash_ptr, compare_ptr);
name = util_hash_table_get(symbols_hash, (void*)addr);
if(!name)
{
char buf[1024];
debug_symbol_name(addr, buf, sizeof(buf));
name = strdup(buf);
util_hash_table_set(symbols_hash, (void*)addr, (void*)name);
diff --git a/src/gallium/auxiliary/util/u_queue.c b/src/gallium/auxiliary/util/u_queue.c
index 092f91a..2926d8c 100644
--- a/src/gallium/auxiliary/util/u_queue.c
+++ b/src/gallium/auxiliary/util/u_queue.c
@@ -40,77 +40,77 @@ static void util_queue_killall_and_wait(struct util_queue *queue);
static once_flag atexit_once_flag = ONCE_FLAG_INIT;
static struct list_head queue_list;
static mtx_t exit_mutex = _MTX_INITIALIZER_NP;
static void
atexit_handler(void)
{
struct util_queue *iter;
- pipe_mutex_lock(exit_mutex);
+ mtx_lock(&exit_mutex);
/* Wait for all queues to assert idle. */
LIST_FOR_EACH_ENTRY(iter, &queue_list, head) {
util_queue_killall_and_wait(iter);
}
pipe_mutex_unlock(exit_mutex);
}
static void
global_init(void)
{
LIST_INITHEAD(&queue_list);
atexit(atexit_handler);
}
static void
add_to_atexit_list(struct util_queue *queue)
{
call_once(&atexit_once_flag, global_init);
- pipe_mutex_lock(exit_mutex);
+ mtx_lock(&exit_mutex);
LIST_ADD(&queue->head, &queue_list);
pipe_mutex_unlock(exit_mutex);
}
static void
remove_from_atexit_list(struct util_queue *queue)
{
struct util_queue *iter, *tmp;
- pipe_mutex_lock(exit_mutex);
+ mtx_lock(&exit_mutex);
LIST_FOR_EACH_ENTRY_SAFE(iter, tmp, &queue_list, head) {
if (iter == queue) {
LIST_DEL(&iter->head);
break;
}
}
pipe_mutex_unlock(exit_mutex);
}
/****************************************************************************
* util_queue_fence
*/
static void
util_queue_fence_signal(struct util_queue_fence *fence)
{
- pipe_mutex_lock(fence->mutex);
+ mtx_lock(&fence->mutex);
fence->signalled = true;
cnd_broadcast(&fence->cond);
pipe_mutex_unlock(fence->mutex);
}
void
util_queue_fence_wait(struct util_queue_fence *fence)
{
- pipe_mutex_lock(fence->mutex);
+ mtx_lock(&fence->mutex);
while (!fence->signalled)
cnd_wait(&fence->cond, &fence->mutex);
pipe_mutex_unlock(fence->mutex);
}
void
util_queue_fence_init(struct util_queue_fence *fence)
{
memset(fence, 0, sizeof(*fence));
(void) mtx_init(&fence->mutex, mtx_plain);
@@ -144,21 +144,21 @@ static PIPE_THREAD_ROUTINE(util_queue_thread_func, input)
if (queue->name) {
char name[16];
util_snprintf(name, sizeof(name), "%s:%i", queue->name, thread_index);
pipe_thread_setname(name);
}
while (1) {
struct util_queue_job job;
- pipe_mutex_lock(queue->lock);
+ mtx_lock(&queue->lock);
assert(queue->num_queued >= 0 && queue->num_queued <= queue->max_jobs);
/* wait if the queue is empty */
while (!queue->kill_threads && queue->num_queued == 0)
cnd_wait(&queue->has_queued_cond, &queue->lock);
if (queue->kill_threads) {
pipe_mutex_unlock(queue->lock);
break;
}
@@ -173,21 +173,21 @@ static PIPE_THREAD_ROUTINE(util_queue_thread_func, input)
if (job.job) {
job.execute(job.job, thread_index);
util_queue_fence_signal(job.fence);
if (job.cleanup)
job.cleanup(job.job, thread_index);
}
}
/* signal remaining jobs before terminating */
- pipe_mutex_lock(queue->lock);
+ mtx_lock(&queue->lock);
while (queue->jobs[queue->read_idx].job) {
util_queue_fence_signal(queue->jobs[queue->read_idx].fence);
queue->jobs[queue->read_idx].job = NULL;
queue->read_idx = (queue->read_idx + 1) % queue->max_jobs;
}
queue->num_queued = 0; /* reset this when exiting the thread */
pipe_mutex_unlock(queue->lock);
return 0;
}
@@ -258,21 +258,21 @@ fail:
memset(queue, 0, sizeof(*queue));
return false;
}
static void
util_queue_killall_and_wait(struct util_queue *queue)
{
unsigned i;
/* Signal all threads to terminate. */
- pipe_mutex_lock(queue->lock);
+ mtx_lock(&queue->lock);
queue->kill_threads = 1;
cnd_broadcast(&queue->has_queued_cond);
pipe_mutex_unlock(queue->lock);
for (i = 0; i < queue->num_threads; i++)
pipe_thread_wait(queue->threads[i]);
queue->num_threads = 0;
}
void
@@ -293,21 +293,21 @@ util_queue_add_job(struct util_queue *queue,
void *job,
struct util_queue_fence *fence,
util_queue_execute_func execute,
util_queue_execute_func cleanup)
{
struct util_queue_job *ptr;
assert(fence->signalled);
fence->signalled = false;
- pipe_mutex_lock(queue->lock);
+ mtx_lock(&queue->lock);
assert(queue->num_queued >= 0 && queue->num_queued <= queue->max_jobs);
/* if the queue is full, wait until there is space */
while (queue->num_queued == queue->max_jobs)
cnd_wait(&queue->has_space_cond, &queue->lock);
ptr = &queue->jobs[queue->write_idx];
assert(ptr->job == NULL);
ptr->job = job;
ptr->fence = fence;
diff --git a/src/gallium/auxiliary/util/u_range.h b/src/gallium/auxiliary/util/u_range.h
index d4a4ae1..a09dc9a 100644
--- a/src/gallium/auxiliary/util/u_range.h
+++ b/src/gallium/auxiliary/util/u_range.h
@@ -52,21 +52,21 @@ util_range_set_empty(struct util_range *range)
{
range->start = ~0;
range->end = 0;
}
/* This is like a union of two sets. */
static inline void
util_range_add(struct util_range *range, unsigned start, unsigned end)
{
if (start < range->start || end > range->end) {
- pipe_mutex_lock(range->write_mutex);
+ mtx_lock(&range->write_mutex);
range->start = MIN2(start, range->start);
range->end = MAX2(end, range->end);
pipe_mutex_unlock(range->write_mutex);
}
}
static inline boolean
util_ranges_intersect(struct util_range *range, unsigned start, unsigned end)
{
return MAX2(start, range->start) < MIN2(end, range->end);
diff --git a/src/gallium/auxiliary/util/u_ringbuffer.c b/src/gallium/auxiliary/util/u_ringbuffer.c
index c13517a..6a83d30 100644
--- a/src/gallium/auxiliary/util/u_ringbuffer.c
+++ b/src/gallium/auxiliary/util/u_ringbuffer.c
@@ -69,21 +69,21 @@ static inline boolean util_ringbuffer_empty( const struct util_ringbuffer *ring
return util_ringbuffer_space(ring) == ring->mask;
}
void util_ringbuffer_enqueue( struct util_ringbuffer *ring,
const struct util_packet *packet )
{
unsigned i;
/* XXX: over-reliance on mutexes, etc:
*/
- pipe_mutex_lock(ring->mutex);
+ mtx_lock(&ring->mutex);
/* make sure we don't request an impossible amount of space
*/
assert(packet->dwords <= ring->mask);
/* Wait for free space:
*/
while (util_ringbuffer_space(ring) < packet->dwords)
cnd_wait(&ring->change, &ring->mutex);
@@ -110,21 +110,21 @@ enum pipe_error util_ringbuffer_dequeue( struct util_ringbuffer *ring,
struct util_packet *packet,
unsigned max_dwords,
boolean wait )
{
const struct util_packet *ring_packet;
unsigned i;
int ret = PIPE_OK;
/* XXX: over-reliance on mutexes, etc:
*/
- pipe_mutex_lock(ring->mutex);
+ mtx_lock(&ring->mutex);
/* Get next ring entry:
*/
if (wait) {
while (util_ringbuffer_empty(ring))
cnd_wait(&ring->change, &ring->mutex);
}
else {
if (util_ringbuffer_empty(ring)) {
ret = PIPE_ERROR_OUT_OF_MEMORY;
diff --git a/src/gallium/drivers/ddebug/dd_context.c b/src/gallium/drivers/ddebug/dd_context.c
index 550f764..109d642 100644
--- a/src/gallium/drivers/ddebug/dd_context.c
+++ b/src/gallium/drivers/ddebug/dd_context.c
@@ -587,21 +587,21 @@ dd_context_set_stream_output_targets(struct pipe_context *_pipe,
pipe->set_stream_output_targets(pipe, num_targets, tgs, offsets);
}
static void
dd_context_destroy(struct pipe_context *_pipe)
{
struct dd_context *dctx = dd_context(_pipe);
struct pipe_context *pipe = dctx->pipe;
if (dctx->thread) {
- pipe_mutex_lock(dctx->mutex);
+ mtx_lock(&dctx->mutex);
dctx->kill_thread = 1;
pipe_mutex_unlock(dctx->mutex);
pipe_thread_wait(dctx->thread);
mtx_destroy(&dctx->mutex);
assert(!dctx->records);
}
if (dctx->fence) {
pipe->transfer_unmap(pipe, dctx->fence_transfer);
pipe_resource_reference(&dctx->fence, NULL);
diff --git a/src/gallium/drivers/ddebug/dd_draw.c b/src/gallium/drivers/ddebug/dd_draw.c
index 7bc7844..17b404a 100644
--- a/src/gallium/drivers/ddebug/dd_draw.c
+++ b/src/gallium/drivers/ddebug/dd_draw.c
@@ -897,21 +897,21 @@ dd_dump_record(struct dd_context *dctx, struct dd_draw_record *record,
PIPE_DUMP_DEVICE_STATUS_REGISTERS);
dd_dump_dmesg(f);
fclose(f);
}
PIPE_THREAD_ROUTINE(dd_thread_pipelined_hang_detect, input)
{
struct dd_context *dctx = (struct dd_context *)input;
struct dd_screen *dscreen = dd_screen(dctx->base.screen);
- pipe_mutex_lock(dctx->mutex);
+ mtx_lock(&dctx->mutex);
while (!dctx->kill_thread) {
struct dd_draw_record **record = &dctx->records;
/* Loop over all records. */
while (*record) {
int64_t now;
/* If the fence has been signalled, release the record and all older
* records.
@@ -937,21 +937,21 @@ PIPE_THREAD_ROUTINE(dd_thread_pipelined_hang_detect, input)
dd_dump_record(dctx, *record, *dctx->mapped_fence, now);
dd_kill_process();
}
record = &(*record)->next;
}
/* Unlock and sleep before starting all over again. */
pipe_mutex_unlock(dctx->mutex);
os_time_sleep(10000); /* 10 ms */
- pipe_mutex_lock(dctx->mutex);
+ mtx_lock(&dctx->mutex);
}
/* Thread termination. */
while (dctx->records)
dd_free_record(&dctx->records);
pipe_mutex_unlock(dctx->mutex);
return 0;
}
@@ -1034,21 +1034,21 @@ dd_pipelined_process_draw(struct dd_context *dctx, struct dd_call *call)
record->sequence_no = dctx->sequence_no;
record->driver_state_log = log;
memset(&record->call, 0, sizeof(record->call));
dd_copy_call(&record->call, call);
dd_init_copy_of_draw_state(&record->draw_state);
dd_copy_draw_state(&record->draw_state.base, &dctx->draw_state);
/* Add the record to the list. */
- pipe_mutex_lock(dctx->mutex);
+ mtx_lock(&dctx->mutex);
record->next = dctx->records;
dctx->records = record;
pipe_mutex_unlock(dctx->mutex);
}
static void
dd_context_flush(struct pipe_context *_pipe,
struct pipe_fence_handle **fence, unsigned flags)
{
struct dd_context *dctx = dd_context(_pipe);
diff --git a/src/gallium/drivers/freedreno/freedreno_batch.c b/src/gallium/drivers/freedreno/freedreno_batch.c
index c6dcf11..f08b7b3 100644
--- a/src/gallium/drivers/freedreno/freedreno_batch.c
+++ b/src/gallium/drivers/freedreno/freedreno_batch.c
@@ -163,21 +163,21 @@ batch_reset_resources_locked(struct fd_batch *batch)
debug_assert(rsc->batch_mask & (1 << batch->idx));
rsc->batch_mask &= ~(1 << batch->idx);
if (rsc->write_batch == batch)
fd_batch_reference_locked(&rsc->write_batch, NULL);
}
}
static void
batch_reset_resources(struct fd_batch *batch)
{
- pipe_mutex_lock(batch->ctx->screen->lock);
+ mtx_lock(&batch->ctx->screen->lock);
batch_reset_resources_locked(batch);
pipe_mutex_unlock(batch->ctx->screen->lock);
}
static void
batch_reset(struct fd_batch *batch)
{
DBG("%p", batch);
fd_batch_sync(batch);
@@ -196,21 +196,21 @@ fd_batch_reset(struct fd_batch *batch)
batch_reset(batch);
}
void
__fd_batch_destroy(struct fd_batch *batch)
{
DBG("%p", batch);
util_copy_framebuffer_state(&batch->framebuffer, NULL);
- pipe_mutex_lock(batch->ctx->screen->lock);
+ mtx_lock(&batch->ctx->screen->lock);
fd_bc_invalidate_batch(batch, true);
pipe_mutex_unlock(batch->ctx->screen->lock);
batch_fini(batch);
batch_reset_resources(batch);
debug_assert(batch->resources->entries == 0);
_mesa_set_destroy(batch->resources, NULL);
batch_flush_reset_dependencies(batch, false);
@@ -280,21 +280,21 @@ batch_flush(struct fd_batch *batch)
} else {
fd_gmem_render_tiles(batch);
batch_reset_resources(batch);
}
debug_assert(batch->reference.count > 0);
if (batch == batch->ctx->batch) {
batch_reset(batch);
} else {
- pipe_mutex_lock(batch->ctx->screen->lock);
+ mtx_lock(&batch->ctx->screen->lock);
fd_bc_invalidate_batch(batch, false);
pipe_mutex_unlock(batch->ctx->screen->lock);
}
}
/* NOTE: could drop the last ref to batch */
void
fd_batch_flush(struct fd_batch *batch, bool sync)
{
/* NOTE: we need to hold an extra ref across the body of flush,
@@ -332,21 +332,21 @@ batch_add_dep(struct fd_batch *batch, struct fd_batch *dep)
if (batch->dependents_mask & (1 << dep->idx))
return;
/* if the new depedency already depends on us, we need to flush
* to avoid a loop in the dependency graph.
*/
if (batch_depends_on(dep, batch)) {
DBG("%p: flush forced on %p!", batch, dep);
pipe_mutex_unlock(batch->ctx->screen->lock);
fd_batch_flush(dep, false);
- pipe_mutex_lock(batch->ctx->screen->lock);
+ mtx_lock(&batch->ctx->screen->lock);
} else {
struct fd_batch *other = NULL;
fd_batch_reference_locked(&other, dep);
batch->dependents_mask |= (1 << dep->idx);
DBG("%p: added dependency on %p", batch, dep);
}
}
void
fd_batch_resource_used(struct fd_batch *batch, struct fd_resource *rsc, bool write)
diff --git a/src/gallium/drivers/freedreno/freedreno_batch_cache.c b/src/gallium/drivers/freedreno/freedreno_batch_cache.c
index f3d5078..5a881bf 100644
--- a/src/gallium/drivers/freedreno/freedreno_batch_cache.c
+++ b/src/gallium/drivers/freedreno/freedreno_batch_cache.c
@@ -123,49 +123,49 @@ fd_bc_fini(struct fd_batch_cache *cache)
{
_mesa_hash_table_destroy(cache->ht, NULL);
}
void
fd_bc_flush(struct fd_batch_cache *cache, struct fd_context *ctx)
{
struct hash_entry *entry;
struct fd_batch *last_batch = NULL;
- pipe_mutex_lock(ctx->screen->lock);
+ mtx_lock(&ctx->screen->lock);
hash_table_foreach(cache->ht, entry) {
struct fd_batch *batch = NULL;
fd_batch_reference_locked(&batch, (struct fd_batch *)entry->data);
if (batch->ctx == ctx) {
pipe_mutex_unlock(ctx->screen->lock);
fd_batch_reference(&last_batch, batch);
fd_batch_flush(batch, false);
- pipe_mutex_lock(ctx->screen->lock);
+ mtx_lock(&ctx->screen->lock);
}
fd_batch_reference_locked(&batch, NULL);
}
pipe_mutex_unlock(ctx->screen->lock);
if (last_batch) {
fd_batch_sync(last_batch);
fd_batch_reference(&last_batch, NULL);
}
}
void
fd_bc_invalidate_context(struct fd_context *ctx)
{
struct fd_batch_cache *cache = &ctx->screen->batch_cache;
struct fd_batch *batch;
- pipe_mutex_lock(ctx->screen->lock);
+ mtx_lock(&ctx->screen->lock);
foreach_batch(batch, cache, cache->batch_mask) {
if (batch->ctx == ctx)
fd_batch_reference_locked(&batch, NULL);
}
pipe_mutex_unlock(ctx->screen->lock);
}
void
@@ -200,21 +200,21 @@ fd_bc_invalidate_batch(struct fd_batch *batch, bool destroy)
batch->key = NULL;
free(key);
}
void
fd_bc_invalidate_resource(struct fd_resource *rsc, bool destroy)
{
struct fd_screen *screen = fd_screen(rsc->base.b.screen);
struct fd_batch *batch;
- pipe_mutex_lock(screen->lock);
+ mtx_lock(&screen->lock);
if (destroy) {
foreach_batch(batch, &screen->batch_cache, rsc->batch_mask) {
struct set_entry *entry = _mesa_set_search(batch->resources, rsc);
_mesa_set_remove(batch->resources, entry);
}
rsc->batch_mask = 0;
fd_batch_reference_locked(&rsc->write_batch, NULL);
}
@@ -226,21 +226,21 @@ fd_bc_invalidate_resource(struct fd_resource *rsc, bool destroy)
pipe_mutex_unlock(screen->lock);
}
struct fd_batch *
fd_bc_alloc_batch(struct fd_batch_cache *cache, struct fd_context *ctx)
{
struct fd_batch *batch;
uint32_t idx;
- pipe_mutex_lock(ctx->screen->lock);
+ mtx_lock(&ctx->screen->lock);
while ((idx = ffs(~cache->batch_mask)) == 0) {
#if 0
for (unsigned i = 0; i < ARRAY_SIZE(cache->batches); i++) {
batch = cache->batches[i];
debug_printf("%d: needs_flush=%d, depends:", batch->idx, batch->needs_flush);
struct set_entry *entry;
set_foreach(batch->dependencies, entry) {
struct fd_batch *dep = (struct fd_batch *)entry->key;
debug_printf(" %d", dep->idx);
@@ -259,21 +259,21 @@ fd_bc_alloc_batch(struct fd_batch_cache *cache, struct fd_context *ctx)
if (!flush_batch || (cache->batches[i]->seqno < flush_batch->seqno))
fd_batch_reference_locked(&flush_batch, cache->batches[i]);
}
/* we can drop lock temporarily here, since we hold a ref,
* flush_batch won't disappear under us.
*/
pipe_mutex_unlock(ctx->screen->lock);
DBG("%p: too many batches! flush forced!", flush_batch);
fd_batch_flush(flush_batch, true);
- pipe_mutex_lock(ctx->screen->lock);
+ mtx_lock(&ctx->screen->lock);
/* While the resources get cleaned up automatically, the flush_batch
* doesn't get removed from the dependencies of other batches, so
* it won't be unref'd and will remain in the table.
*
* TODO maybe keep a bitmask of batches that depend on me, to make
* this easier:
*/
for (unsigned i = 0; i < ARRAY_SIZE(cache->batches); i++) {
struct fd_batch *other = cache->batches[i];
@@ -331,21 +331,21 @@ batch_from_key(struct fd_batch_cache *cache, struct key *key,
DBG("%p: surf[%u]: %p (%s) (%u,%u / %u,%u,%u)", batch, key->surf[idx].pos,
key->surf[idx].texture, util_format_name(key->surf[idx].format),
key->surf[idx].u.buf.first_element, key->surf[idx].u.buf.last_element,
key->surf[idx].u.tex.first_layer, key->surf[idx].u.tex.last_layer,
key->surf[idx].u.tex.level);
}
#endif
if (!batch)
return NULL;
- pipe_mutex_lock(ctx->screen->lock);
+ mtx_lock(&ctx->screen->lock);
_mesa_hash_table_insert_pre_hashed(cache->ht, hash, key, batch);
batch->key = key;
batch->hash = hash;
for (unsigned idx = 0; idx < key->num_surfs; idx++) {
struct fd_resource *rsc = fd_resource(key->surf[idx].texture);
rsc->bc_batch_mask = (1 << batch->idx);
}
diff --git a/src/gallium/drivers/freedreno/freedreno_context.h b/src/gallium/drivers/freedreno/freedreno_context.h
index 995e7d4..d65f19a 100644
--- a/src/gallium/drivers/freedreno/freedreno_context.h
+++ b/src/gallium/drivers/freedreno/freedreno_context.h
@@ -309,21 +309,21 @@ fd_context(struct pipe_context *pctx)
static inline void
fd_context_assert_locked(struct fd_context *ctx)
{
pipe_mutex_assert_locked(ctx->screen->lock);
}
static inline void
fd_context_lock(struct fd_context *ctx)
{
- pipe_mutex_lock(ctx->screen->lock);
+ mtx_lock(&ctx->screen->lock);
}
static inline void
fd_context_unlock(struct fd_context *ctx)
{
pipe_mutex_unlock(ctx->screen->lock);
}
static inline struct pipe_scissor_state *
fd_context_get_scissor(struct fd_context *ctx)
diff --git a/src/gallium/drivers/freedreno/freedreno_draw.c b/src/gallium/drivers/freedreno/freedreno_draw.c
index 5d5b7c1..b98faca 100644
--- a/src/gallium/drivers/freedreno/freedreno_draw.c
+++ b/src/gallium/drivers/freedreno/freedreno_draw.c
@@ -103,21 +103,21 @@ fd_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info)
/* NOTE: needs to be before resource_written(batch->query_buf), otherwise
* query_buf may not be created yet.
*/
fd_hw_query_set_stage(batch, batch->draw, FD_STAGE_DRAW);
/*
* Figure out the buffers/features we need:
*/
- pipe_mutex_lock(ctx->screen->lock);
+ mtx_lock(&ctx->screen->lock);
if (fd_depth_enabled(ctx)) {
buffers |= FD_BUFFER_DEPTH;
resource_written(batch, pfb->zsbuf->texture);
batch->gmem_reason |= FD_GMEM_DEPTH_ENABLED;
}
if (fd_stencil_enabled(ctx)) {
buffers |= FD_BUFFER_STENCIL;
resource_written(batch, pfb->zsbuf->texture);
@@ -325,21 +325,21 @@ fd_clear(struct pipe_context *pctx, unsigned buffers,
if (cleared_buffers & PIPE_CLEAR_COLOR)
batch->cleared_scissor.color = *scissor;
if (cleared_buffers & PIPE_CLEAR_DEPTH)
batch->cleared_scissor.depth = *scissor;
if (cleared_buffers & PIPE_CLEAR_STENCIL)
batch->cleared_scissor.stencil = *scissor;
}
batch->resolve |= buffers;
batch->needs_flush = true;
- pipe_mutex_lock(ctx->screen->lock);
+ mtx_lock(&ctx->screen->lock);
if (buffers & PIPE_CLEAR_COLOR)
for (i = 0; i < pfb->nr_cbufs; i++)
if (buffers & (PIPE_CLEAR_COLOR0 << i))
resource_written(batch, pfb->cbufs[i]->texture);
if (buffers & (PIPE_CLEAR_DEPTH | PIPE_CLEAR_STENCIL)) {
resource_written(batch, pfb->zsbuf->texture);
batch->gmem_reason |= FD_GMEM_CLEARS_DEPTH_STENCIL;
}
diff --git a/src/gallium/drivers/freedreno/freedreno_resource.c b/src/gallium/drivers/freedreno/freedreno_resource.c
index 5a2bdfc..275de97 100644
--- a/src/gallium/drivers/freedreno/freedreno_resource.c
+++ b/src/gallium/drivers/freedreno/freedreno_resource.c
@@ -172,21 +172,21 @@ fd_try_shadow_resource(struct fd_context *ctx, struct fd_resource *rsc,
return false;
assert(!ctx->in_shadow);
ctx->in_shadow = true;
/* get rid of any references that batch-cache might have to us (which
* should empty/destroy rsc->batches hashset)
*/
fd_bc_invalidate_resource(rsc, false);
- pipe_mutex_lock(ctx->screen->lock);
+ mtx_lock(&ctx->screen->lock);
/* Swap the backing bo's, so shadow becomes the old buffer,
* blit from shadow to new buffer. From here on out, we
* cannot fail.
*
* Note that we need to do it in this order, otherwise if
* we go down cpu blit path, the recursive transfer_map()
* sees the wrong status..
*/
struct fd_resource *shadow = fd_resource(pshadow);
diff --git a/src/gallium/drivers/llvmpipe/lp_fence.c b/src/gallium/drivers/llvmpipe/lp_fence.c
index ec26bfe..e7c4ab6 100644
--- a/src/gallium/drivers/llvmpipe/lp_fence.c
+++ b/src/gallium/drivers/llvmpipe/lp_fence.c
@@ -81,21 +81,21 @@ lp_fence_destroy(struct lp_fence *fence)
/**
* Called by the rendering threads to increment the fence counter.
* When the counter == the rank, the fence is finished.
*/
void
lp_fence_signal(struct lp_fence *fence)
{
if (LP_DEBUG & DEBUG_FENCE)
debug_printf("%s %d\n", __FUNCTION__, fence->id);
- pipe_mutex_lock(fence->mutex);
+ mtx_lock(&fence->mutex);
fence->count++;
assert(fence->count <= fence->rank);
if (LP_DEBUG & DEBUG_FENCE)
debug_printf("%s count=%u rank=%u\n", __FUNCTION__,
fence->count, fence->rank);
/* Wakeup all threads waiting on the mutex:
*/
@@ -109,19 +109,19 @@ lp_fence_signalled(struct lp_fence *f)
{
return f->count == f->rank;
}
void
lp_fence_wait(struct lp_fence *f)
{
if (LP_DEBUG & DEBUG_FENCE)
debug_printf("%s %d\n", __FUNCTION__, f->id);
- pipe_mutex_lock(f->mutex);
+ mtx_lock(&f->mutex);
assert(f->issued);
while (f->count < f->rank) {
cnd_wait(&f->signalled, &f->mutex);
}
pipe_mutex_unlock(f->mutex);
}
diff --git a/src/gallium/drivers/llvmpipe/lp_scene.c b/src/gallium/drivers/llvmpipe/lp_scene.c
index 66dd1d2..d651192 100644
--- a/src/gallium/drivers/llvmpipe/lp_scene.c
+++ b/src/gallium/drivers/llvmpipe/lp_scene.c
@@ -477,21 +477,21 @@ lp_scene_bin_iter_begin( struct lp_scene *scene )
* Return pointer to next bin to be rendered.
* The lp_scene::curr_x and ::curr_y fields will be advanced.
* Multiple rendering threads will call this function to get a chunk
* of work (a bin) to work on.
*/
struct cmd_bin *
lp_scene_bin_iter_next( struct lp_scene *scene , int *x, int *y)
{
struct cmd_bin *bin = NULL;
- pipe_mutex_lock(scene->mutex);
+ mtx_lock(&scene->mutex);
if (scene->curr_x < 0) {
/* first bin */
scene->curr_x = 0;
scene->curr_y = 0;
}
else if (!next_bin(scene)) {
/* no more bins left */
goto end;
}
diff --git a/src/gallium/drivers/llvmpipe/lp_setup.c b/src/gallium/drivers/llvmpipe/lp_setup.c
index 768775b..f701e90 100644
--- a/src/gallium/drivers/llvmpipe/lp_setup.c
+++ b/src/gallium/drivers/llvmpipe/lp_setup.c
@@ -158,21 +158,21 @@ lp_setup_rasterize_scene( struct lp_setup_context *setup )
memcpy(scene->active_queries, setup->active_queries,
scene->num_active_queries * sizeof(scene->active_queries[0]));
lp_scene_end_binning(scene);
lp_fence_reference(&setup->last_fence, scene->fence);
if (setup->last_fence)
setup->last_fence->issued = TRUE;
- pipe_mutex_lock(screen->rast_mutex);
+ mtx_lock(&screen->rast_mutex);
/* FIXME: We enqueue the scene then wait on the rasterizer to finish.
* This means we never actually run any vertex stuff in parallel to
* rasterization (not in the same context at least) which is what the
* multiple scenes per setup is about - when we get a new empty scene
* any old one is already empty again because we waited here for
* raster tasks to be finished. Ideally, we shouldn't need to wait here
* and rely on fences elsewhere when waiting is necessary.
* Certainly, lp_scene_end_rasterization() would need to be deferred too
* and there's probably other bits why this doesn't actually work.
diff --git a/src/gallium/drivers/nouveau/nv50/nv50_surface.c b/src/gallium/drivers/nouveau/nv50/nv50_surface.c
index 46cd2e6..1e77184 100644
--- a/src/gallium/drivers/nouveau/nv50/nv50_surface.c
+++ b/src/gallium/drivers/nouveau/nv50/nv50_surface.c
@@ -1071,21 +1071,21 @@ nv50_blit_select_fp(struct nv50_blitctx *ctx, const struct pipe_blit_info *info)
{
struct nv50_blitter *blitter = ctx->nv50->screen->blitter;
const enum pipe_texture_target ptarg =
nv50_blit_reinterpret_pipe_texture_target(info->src.resource->target);
const unsigned targ = nv50_blit_texture_type(ptarg);
const unsigned mode = ctx->mode;
if (!blitter->fp[targ][mode]) {
- pipe_mutex_lock(blitter->mutex);
+ mtx_lock(&blitter->mutex);
if (!blitter->fp[targ][mode])
blitter->fp[targ][mode] =
nv50_blitter_make_fp(&ctx->nv50->base.pipe, mode, ptarg);
pipe_mutex_unlock(blitter->mutex);
}
ctx->fp = blitter->fp[targ][mode];
}
static void
nv50_blit_set_dst(struct nv50_blitctx *ctx,
diff --git a/src/gallium/drivers/nouveau/nvc0/nvc0_surface.c b/src/gallium/drivers/nouveau/nvc0/nvc0_surface.c
index 1e91fa8..5c4fda9 100644
--- a/src/gallium/drivers/nouveau/nvc0/nvc0_surface.c
+++ b/src/gallium/drivers/nouveau/nvc0/nvc0_surface.c
@@ -911,21 +911,21 @@ nvc0_blit_select_fp(struct nvc0_blitctx *ctx, const struct pipe_blit_info *info)
{
struct nvc0_blitter *blitter = ctx->nvc0->screen->blitter;
const enum pipe_texture_target ptarg =
nv50_blit_reinterpret_pipe_texture_target(info->src.resource->target);
const unsigned targ = nv50_blit_texture_type(ptarg);
const unsigned mode = ctx->mode;
if (!blitter->fp[targ][mode]) {
- pipe_mutex_lock(blitter->mutex);
+ mtx_lock(&blitter->mutex);
if (!blitter->fp[targ][mode])
blitter->fp[targ][mode] =
nv50_blitter_make_fp(&ctx->nvc0->base.pipe, mode, ptarg);
pipe_mutex_unlock(blitter->mutex);
}
ctx->fp = blitter->fp[targ][mode];
}
static void
nvc0_blit_set_dst(struct nvc0_blitctx *ctx,
diff --git a/src/gallium/drivers/r300/r300_blit.c b/src/gallium/drivers/r300/r300_blit.c
index b5c3ae5..7d58d4c 100644
--- a/src/gallium/drivers/r300/r300_blit.c
+++ b/src/gallium/drivers/r300/r300_blit.c
@@ -321,21 +321,21 @@ static void r300_clear(struct pipe_context* pipe,
r300->rws->cs_request_feature(r300->cs,
RADEON_FID_R300_CMASK_ACCESS,
TRUE);
}
/* Setup the clear. */
if (r300->cmask_access) {
/* Pair the resource with the CMASK to avoid other resources
* accessing it. */
if (!r300->screen->cmask_resource) {
- pipe_mutex_lock(r300->screen->cmask_mutex);
+ mtx_lock(&r300->screen->cmask_mutex);
/* Double checking (first unlocked, then locked). */
if (!r300->screen->cmask_resource) {
/* Don't reference this, so that the texture can be
* destroyed while set in cmask_resource.
* Then in texture_destroy, we set cmask_resource to NULL. */
r300->screen->cmask_resource = fb->cbufs[0]->texture;
}
pipe_mutex_unlock(r300->screen->cmask_mutex);
}
diff --git a/src/gallium/drivers/r300/r300_texture.c b/src/gallium/drivers/r300/r300_texture.c
index 929c3fe..b451b9f 100644
--- a/src/gallium/drivers/r300/r300_texture.c
+++ b/src/gallium/drivers/r300/r300_texture.c
@@ -1023,21 +1023,21 @@ static void r300_texture_setup_fb_state(struct r300_surface *surf)
}
}
static void r300_texture_destroy(struct pipe_screen *screen,
struct pipe_resource* texture)
{
struct r300_screen *rscreen = r300_screen(screen);
struct r300_resource* tex = (struct r300_resource*)texture;
if (tex->tex.cmask_dwords) {
- pipe_mutex_lock(rscreen->cmask_mutex);
+ mtx_lock(&rscreen->cmask_mutex);
if (texture == rscreen->cmask_resource) {
rscreen->cmask_resource = NULL;
}
pipe_mutex_unlock(rscreen->cmask_mutex);
}
pb_reference(&tex->buf, NULL);
FREE(tex);
}
boolean r300_resource_get_handle(struct pipe_screen* screen,
diff --git a/src/gallium/drivers/radeon/r600_gpu_load.c b/src/gallium/drivers/radeon/r600_gpu_load.c
index 24f54c0..04d0617 100644
--- a/src/gallium/drivers/radeon/r600_gpu_load.c
+++ b/src/gallium/drivers/radeon/r600_gpu_load.c
@@ -171,21 +171,21 @@ void r600_gpu_load_kill_thread(struct r600_common_screen *rscreen)
p_atomic_inc(&rscreen->gpu_load_stop_thread);
pipe_thread_wait(rscreen->gpu_load_thread);
rscreen->gpu_load_thread = 0;
}
static uint64_t r600_read_mmio_counter(struct r600_common_screen *rscreen,
unsigned busy_index)
{
/* Start the thread if needed. */
if (!rscreen->gpu_load_thread) {
- pipe_mutex_lock(rscreen->gpu_load_mutex);
+ mtx_lock(&rscreen->gpu_load_mutex);
/* Check again inside the mutex. */
if (!rscreen->gpu_load_thread)
rscreen->gpu_load_thread =
pipe_thread_create(r600_gpu_load_thread, rscreen);
pipe_mutex_unlock(rscreen->gpu_load_mutex);
}
unsigned busy = p_atomic_read(&rscreen->mmio_counters.array[busy_index]);
unsigned idle = p_atomic_read(&rscreen->mmio_counters.array[busy_index + 1]);
diff --git a/src/gallium/drivers/radeon/r600_pipe_common.c b/src/gallium/drivers/radeon/r600_pipe_common.c
index a257d81..9ff838a 100644
--- a/src/gallium/drivers/radeon/r600_pipe_common.c
+++ b/src/gallium/drivers/radeon/r600_pipe_common.c
@@ -1396,15 +1396,15 @@ bool r600_extra_shader_checks(struct r600_common_screen *rscreen, unsigned proce
{
return (rscreen->debug_flags & DBG_CHECK_IR) ||
r600_can_dump_shader(rscreen, processor);
}
void r600_screen_clear_buffer(struct r600_common_screen *rscreen, struct pipe_resource *dst,
uint64_t offset, uint64_t size, unsigned value)
{
struct r600_common_context *rctx = (struct r600_common_context*)rscreen->aux_context;
- pipe_mutex_lock(rscreen->aux_context_lock);
+ mtx_lock(&rscreen->aux_context_lock);
rctx->dma_clear_buffer(&rctx->b, dst, offset, size, value);
rscreen->aux_context->flush(rscreen->aux_context, NULL, 0);
pipe_mutex_unlock(rscreen->aux_context_lock);
}
diff --git a/src/gallium/drivers/radeon/r600_texture.c b/src/gallium/drivers/radeon/r600_texture.c
index 0865d35..79c436d 100644
--- a/src/gallium/drivers/radeon/r600_texture.c
+++ b/src/gallium/drivers/radeon/r600_texture.c
@@ -298,21 +298,21 @@ static void r600_texture_init_metadata(struct r600_texture *rtex,
metadata->scanout = (surface->flags & RADEON_SURF_SCANOUT) != 0;
}
static void r600_eliminate_fast_color_clear(struct r600_common_context *rctx,
struct r600_texture *rtex)
{
struct r600_common_screen *rscreen = rctx->screen;
struct pipe_context *ctx = &rctx->b;
if (ctx == rscreen->aux_context)
- pipe_mutex_lock(rscreen->aux_context_lock);
+ mtx_lock(&rscreen->aux_context_lock);
ctx->flush_resource(ctx, &rtex->resource.b.b);
ctx->flush(ctx, NULL, 0);
if (ctx == rscreen->aux_context)
pipe_mutex_unlock(rscreen->aux_context_lock);
}
static void r600_texture_discard_cmask(struct r600_common_screen *rscreen,
struct r600_texture *rtex)
@@ -387,21 +387,21 @@ static bool r600_texture_discard_dcc(struct r600_common_screen *rscreen,
*/
bool r600_texture_disable_dcc(struct r600_common_context *rctx,
struct r600_texture *rtex)
{
struct r600_common_screen *rscreen = rctx->screen;
if (!r600_can_disable_dcc(rtex))
return false;
if (&rctx->b == rscreen->aux_context)
- pipe_mutex_lock(rscreen->aux_context_lock);
+ mtx_lock(&rscreen->aux_context_lock);
/* Decompress DCC. */
rctx->decompress_dcc(&rctx->b, rtex);
rctx->b.flush(&rctx->b, NULL, 0);
if (&rctx->b == rscreen->aux_context)
pipe_mutex_unlock(rscreen->aux_context_lock);
return r600_texture_discard_dcc(rscreen, rtex);
}
diff --git a/src/gallium/drivers/radeonsi/si_shader.c b/src/gallium/drivers/radeonsi/si_shader.c
index 212a9be..e61a5e2 100644
--- a/src/gallium/drivers/radeonsi/si_shader.c
+++ b/src/gallium/drivers/radeonsi/si_shader.c
@@ -7478,21 +7478,21 @@ si_get_shader_part(struct si_screen *sscreen,
bool prolog,
union si_shader_part_key *key,
LLVMTargetMachineRef tm,
struct pipe_debug_callback *debug,
void (*build)(struct si_shader_context *,
union si_shader_part_key *),
const char *name)
{
struct si_shader_part *result;
- pipe_mutex_lock(sscreen->shader_parts_mutex);
+ mtx_lock(&sscreen->shader_parts_mutex);
/* Find existing. */
for (result = *list; result; result = result->next) {
if (memcmp(&result->key, key, sizeof(*key)) == 0) {
pipe_mutex_unlock(sscreen->shader_parts_mutex);
return result;
}
}
/* Compile a new one. */
diff --git a/src/gallium/drivers/radeonsi/si_state_shaders.c b/src/gallium/drivers/radeonsi/si_state_shaders.c
index c370f15..c7a8d1f 100644
--- a/src/gallium/drivers/radeonsi/si_state_shaders.c
+++ b/src/gallium/drivers/radeonsi/si_state_shaders.c
@@ -1249,21 +1249,21 @@ again:
/* This must be done before the mutex is locked, because async GS
* compilation calls this function too, and therefore must enter
* the mutex first.
*
* Only wait if we are in a draw call. Don't wait if we are
* in a compiler thread.
*/
if (thread_index < 0)
util_queue_fence_wait(&sel->ready);
- pipe_mutex_lock(sel->mutex);
+ mtx_lock(&sel->mutex);
/* Find the shader variant. */
for (iter = sel->first_variant; iter; iter = iter->next_variant) {
/* Don't check the "current" shader. We checked it above. */
if (current != iter &&
memcmp(&iter->key, key, sizeof(*key)) == 0) {
/* If it's an optimized shader and its compilation has
* been started but isn't done, use the unoptimized
* shader so as not to cause a stall due to compilation.
*/
@@ -1450,39 +1450,39 @@ void si_init_shader_selector_async(void *job, int thread_index)
fprintf(stderr, "radeonsi: can't allocate a main shader part\n");
return;
}
shader->selector = sel;
si_parse_next_shader_property(&sel->info, &shader->key);
tgsi_binary = si_get_tgsi_binary(sel);
/* Try to load the shader from the shader cache. */
- pipe_mutex_lock(sscreen->shader_cache_mutex);
+ mtx_lock(&sscreen->shader_cache_mutex);
if (tgsi_binary &&
si_shader_cache_load_shader(sscreen, tgsi_binary, shader)) {
pipe_mutex_unlock(sscreen->shader_cache_mutex);
} else {
pipe_mutex_unlock(sscreen->shader_cache_mutex);
/* Compile the shader if it hasn't been loaded from the cache. */
if (si_compile_tgsi_shader(sscreen, tm, shader, false,
debug) != 0) {
FREE(shader);
FREE(tgsi_binary);
fprintf(stderr, "radeonsi: can't compile a main shader part\n");
return;
}
if (tgsi_binary) {
- pipe_mutex_lock(sscreen->shader_cache_mutex);
+ mtx_lock(&sscreen->shader_cache_mutex);
if (!si_shader_cache_insert_shader(sscreen, tgsi_binary, shader, true))
FREE(tgsi_binary);
pipe_mutex_unlock(sscreen->shader_cache_mutex);
}
}
*si_get_main_shader_part(sel, &shader->key) = shader;
/* Unset "outputs_written" flags for outputs converted to
* DEFAULT_VAL, so that later inter-shader optimizations don't
diff --git a/src/gallium/drivers/rbug/rbug_context.c b/src/gallium/drivers/rbug/rbug_context.c
index 4723c49..8d16ec2 100644
--- a/src/gallium/drivers/rbug/rbug_context.c
+++ b/src/gallium/drivers/rbug/rbug_context.c
@@ -39,21 +39,21 @@
static void
rbug_destroy(struct pipe_context *_pipe)
{
struct rbug_screen *rb_screen = rbug_screen(_pipe->screen);
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
rbug_screen_remove_from_list(rb_screen, contexts, rb_pipe);
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
pipe->destroy(pipe);
rb_pipe->pipe = NULL;
pipe_mutex_unlock(rb_pipe->call_mutex);
FREE(rb_pipe);
}
static void
rbug_draw_block_locked(struct rbug_context *rb_pipe, int flag)
{
@@ -112,370 +112,370 @@ rbug_draw_block_locked(struct rbug_context *rb_pipe, int flag)
}
}
static void
rbug_draw_vbo(struct pipe_context *_pipe, const struct pipe_draw_info *info)
{
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
- pipe_mutex_lock(rb_pipe->draw_mutex);
+ mtx_lock(&rb_pipe->draw_mutex);
rbug_draw_block_locked(rb_pipe, RBUG_BLOCK_BEFORE);
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
/* XXX loop over PIPE_SHADER_x here */
if (!(rb_pipe->curr.shader[PIPE_SHADER_FRAGMENT] && rb_pipe->curr.shader[PIPE_SHADER_FRAGMENT]->disabled) &&
!(rb_pipe->curr.shader[PIPE_SHADER_GEOMETRY] && rb_pipe->curr.shader[PIPE_SHADER_GEOMETRY]->disabled) &&
!(rb_pipe->curr.shader[PIPE_SHADER_VERTEX] && rb_pipe->curr.shader[PIPE_SHADER_VERTEX]->disabled))
pipe->draw_vbo(pipe, info);
pipe_mutex_unlock(rb_pipe->call_mutex);
rbug_draw_block_locked(rb_pipe, RBUG_BLOCK_AFTER);
pipe_mutex_unlock(rb_pipe->draw_mutex);
}
static struct pipe_query *
rbug_create_query(struct pipe_context *_pipe,
unsigned query_type,
unsigned index)
{
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
struct pipe_query *query;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
query = pipe->create_query(pipe,
query_type,
index);
pipe_mutex_unlock(rb_pipe->call_mutex);
return query;
}
static void
rbug_destroy_query(struct pipe_context *_pipe,
struct pipe_query *query)
{
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
pipe->destroy_query(pipe,
query);
pipe_mutex_unlock(rb_pipe->call_mutex);
}
static boolean
rbug_begin_query(struct pipe_context *_pipe,
struct pipe_query *query)
{
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
boolean ret;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
ret = pipe->begin_query(pipe, query);
pipe_mutex_unlock(rb_pipe->call_mutex);
return ret;
}
static bool
rbug_end_query(struct pipe_context *_pipe,
struct pipe_query *query)
{
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
bool ret;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
ret = pipe->end_query(pipe,
query);
pipe_mutex_unlock(rb_pipe->call_mutex);
return ret;
}
static boolean
rbug_get_query_result(struct pipe_context *_pipe,
struct pipe_query *query,
boolean wait,
union pipe_query_result *result)
{
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
boolean ret;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
ret = pipe->get_query_result(pipe,
query,
wait,
result);
pipe_mutex_unlock(rb_pipe->call_mutex);
return ret;
}
static void
rbug_set_active_query_state(struct pipe_context *_pipe, boolean enable)
{
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
pipe->set_active_query_state(pipe, enable);
pipe_mutex_unlock(rb_pipe->call_mutex);
}
static void *
rbug_create_blend_state(struct pipe_context *_pipe,
const struct pipe_blend_state *blend)
{
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
void *ret;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
ret = pipe->create_blend_state(pipe,
blend);
pipe_mutex_unlock(rb_pipe->call_mutex);
return ret;
}
static void
rbug_bind_blend_state(struct pipe_context *_pipe,
void *blend)
{
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
pipe->bind_blend_state(pipe,
blend);
pipe_mutex_unlock(rb_pipe->call_mutex);
}
static void
rbug_delete_blend_state(struct pipe_context *_pipe,
void *blend)
{
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
pipe->delete_blend_state(pipe,
blend);
pipe_mutex_unlock(rb_pipe->call_mutex);
}
static void *
rbug_create_sampler_state(struct pipe_context *_pipe,
const struct pipe_sampler_state *sampler)
{
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
void *ret;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
ret = pipe->create_sampler_state(pipe,
sampler);
pipe_mutex_unlock(rb_pipe->call_mutex);
return ret;
}
static void
rbug_bind_sampler_states(struct pipe_context *_pipe,
enum pipe_shader_type shader,
unsigned start, unsigned count,
void **samplers)
{
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
pipe->bind_sampler_states(pipe, shader, start, count, samplers);
pipe_mutex_unlock(rb_pipe->call_mutex);
}
static void
rbug_delete_sampler_state(struct pipe_context *_pipe,
void *sampler)
{
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
pipe->delete_sampler_state(pipe,
sampler);
pipe_mutex_unlock(rb_pipe->call_mutex);
}
static void *
rbug_create_rasterizer_state(struct pipe_context *_pipe,
const struct pipe_rasterizer_state *rasterizer)
{
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
void *ret;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
ret = pipe->create_rasterizer_state(pipe,
rasterizer);
pipe_mutex_unlock(rb_pipe->call_mutex);
return ret;
}
static void
rbug_bind_rasterizer_state(struct pipe_context *_pipe,
void *rasterizer)
{
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
pipe->bind_rasterizer_state(pipe,
rasterizer);
pipe_mutex_unlock(rb_pipe->call_mutex);
}
static void
rbug_delete_rasterizer_state(struct pipe_context *_pipe,
void *rasterizer)
{
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
pipe->delete_rasterizer_state(pipe,
rasterizer);
pipe_mutex_unlock(rb_pipe->call_mutex);
}
static void *
rbug_create_depth_stencil_alpha_state(struct pipe_context *_pipe,
const struct pipe_depth_stencil_alpha_state *depth_stencil_alpha)
{
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
void *ret;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
ret = pipe->create_depth_stencil_alpha_state(pipe,
depth_stencil_alpha);
pipe_mutex_unlock(rb_pipe->call_mutex);
return ret;
}
static void
rbug_bind_depth_stencil_alpha_state(struct pipe_context *_pipe,
void *depth_stencil_alpha)
{
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
pipe->bind_depth_stencil_alpha_state(pipe,
depth_stencil_alpha);
pipe_mutex_unlock(rb_pipe->call_mutex);
}
static void
rbug_delete_depth_stencil_alpha_state(struct pipe_context *_pipe,
void *depth_stencil_alpha)
{
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
pipe->delete_depth_stencil_alpha_state(pipe,
depth_stencil_alpha);
pipe_mutex_unlock(rb_pipe->call_mutex);
}
static void *
rbug_create_fs_state(struct pipe_context *_pipe,
const struct pipe_shader_state *state)
{
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
void *result;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
result = pipe->create_fs_state(pipe, state);
pipe_mutex_unlock(rb_pipe->call_mutex);
if (!result)
return NULL;
return rbug_shader_create(rb_pipe, state, result, RBUG_SHADER_FRAGMENT);
}
static void
rbug_bind_fs_state(struct pipe_context *_pipe,
void *_fs)
{
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
void *fs;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
fs = rbug_shader_unwrap(_fs);
rb_pipe->curr.shader[PIPE_SHADER_FRAGMENT] = rbug_shader(_fs);
pipe->bind_fs_state(pipe,
fs);
pipe_mutex_unlock(rb_pipe->call_mutex);
}
static void
rbug_delete_fs_state(struct pipe_context *_pipe,
void *_fs)
{
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct rbug_shader *rb_shader = rbug_shader(_fs);
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
rbug_shader_destroy(rb_pipe, rb_shader);
pipe_mutex_unlock(rb_pipe->call_mutex);
}
static void *
rbug_create_vs_state(struct pipe_context *_pipe,
const struct pipe_shader_state *state)
{
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
void *result;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
result = pipe->create_vs_state(pipe, state);
pipe_mutex_unlock(rb_pipe->call_mutex);
if (!result)
return NULL;
return rbug_shader_create(rb_pipe, state, result, RBUG_SHADER_VERTEX);
}
static void
rbug_bind_vs_state(struct pipe_context *_pipe,
void *_vs)
{
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
void *vs;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
vs = rbug_shader_unwrap(_vs);
rb_pipe->curr.shader[PIPE_SHADER_VERTEX] = rbug_shader(_vs);
pipe->bind_vs_state(pipe,
vs);
pipe_mutex_unlock(rb_pipe->call_mutex);
}
static void
@@ -491,138 +491,138 @@ rbug_delete_vs_state(struct pipe_context *_pipe,
}
static void *
rbug_create_gs_state(struct pipe_context *_pipe,
const struct pipe_shader_state *state)
{
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
void *result;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
result = pipe->create_gs_state(pipe, state);
pipe_mutex_unlock(rb_pipe->call_mutex);
if (!result)
return NULL;
return rbug_shader_create(rb_pipe, state, result, RBUG_SHADER_GEOM);
}
static void
rbug_bind_gs_state(struct pipe_context *_pipe,
void *_gs)
{
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
void *gs;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
gs = rbug_shader_unwrap(_gs);
rb_pipe->curr.shader[PIPE_SHADER_GEOMETRY] = rbug_shader(_gs);
pipe->bind_gs_state(pipe,
gs);
pipe_mutex_unlock(rb_pipe->call_mutex);
}
static void
rbug_delete_gs_state(struct pipe_context *_pipe,
void *_gs)
{
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct rbug_shader *rb_shader = rbug_shader(_gs);
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
rbug_shader_destroy(rb_pipe, rb_shader);
pipe_mutex_unlock(rb_pipe->call_mutex);
}
static void *
rbug_create_vertex_elements_state(struct pipe_context *_pipe,
unsigned num_elements,
const struct pipe_vertex_element *vertex_elements)
{
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
void *ret;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
ret = pipe->create_vertex_elements_state(pipe,
num_elements,
vertex_elements);
pipe_mutex_unlock(rb_pipe->call_mutex);
return ret;
}
static void
rbug_bind_vertex_elements_state(struct pipe_context *_pipe,
void *velems)
{
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
pipe->bind_vertex_elements_state(pipe,
velems);
pipe_mutex_unlock(rb_pipe->call_mutex);
}
static void
rbug_delete_vertex_elements_state(struct pipe_context *_pipe,
void *velems)
{
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
pipe->delete_vertex_elements_state(pipe,
velems);
pipe_mutex_unlock(rb_pipe->call_mutex);
}
static void
rbug_set_blend_color(struct pipe_context *_pipe,
const struct pipe_blend_color *blend_color)
{
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
pipe->set_blend_color(pipe,
blend_color);
pipe_mutex_unlock(rb_pipe->call_mutex);
}
static void
rbug_set_stencil_ref(struct pipe_context *_pipe,
const struct pipe_stencil_ref *stencil_ref)
{
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
pipe->set_stencil_ref(pipe,
stencil_ref);
pipe_mutex_unlock(rb_pipe->call_mutex);
}
static void
rbug_set_clip_state(struct pipe_context *_pipe,
const struct pipe_clip_state *clip)
{
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
pipe->set_clip_state(pipe,
clip);
pipe_mutex_unlock(rb_pipe->call_mutex);
}
static void
rbug_set_constant_buffer(struct pipe_context *_pipe,
uint shader,
uint index,
const struct pipe_constant_buffer *_cb)
@@ -630,40 +630,40 @@ rbug_set_constant_buffer(struct pipe_context *_pipe,
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
struct pipe_constant_buffer cb;
/* XXX hmm? unwrap the input state */
if (_cb) {
cb = *_cb;
cb.buffer = rbug_resource_unwrap(_cb->buffer);
}
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
pipe->set_constant_buffer(pipe,
shader,
index,
_cb ? &cb : NULL);
pipe_mutex_unlock(rb_pipe->call_mutex);
}
static void
rbug_set_framebuffer_state(struct pipe_context *_pipe,
const struct pipe_framebuffer_state *_state)
{
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
struct pipe_framebuffer_state unwrapped_state;
struct pipe_framebuffer_state *state = NULL;
unsigned i;
/* must protect curr status */
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
rb_pipe->curr.nr_cbufs = 0;
memset(rb_pipe->curr.cbufs, 0, sizeof(rb_pipe->curr.cbufs));
rb_pipe->curr.zsbuf = NULL;
/* unwrap the input state */
if (_state) {
memcpy(&unwrapped_state, _state, sizeof(unwrapped_state));
rb_pipe->curr.nr_cbufs = _state->nr_cbufs;
@@ -684,71 +684,71 @@ rbug_set_framebuffer_state(struct pipe_context *_pipe,
pipe_mutex_unlock(rb_pipe->call_mutex);
}
static void
rbug_set_polygon_stipple(struct pipe_context *_pipe,
const struct pipe_poly_stipple *poly_stipple)
{
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
pipe->set_polygon_stipple(pipe,
poly_stipple);
pipe_mutex_unlock(rb_pipe->call_mutex);
}
static void
rbug_set_scissor_states(struct pipe_context *_pipe,
unsigned start_slot,
unsigned num_scissors,
const struct pipe_scissor_state *scissor)
{
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
pipe->set_scissor_states(pipe, start_slot, num_scissors, scissor);
pipe_mutex_unlock(rb_pipe->call_mutex);
}
static void
rbug_set_viewport_states(struct pipe_context *_pipe,
unsigned start_slot,
unsigned num_viewports,
const struct pipe_viewport_state *viewport)
{
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
pipe->set_viewport_states(pipe, start_slot, num_viewports, viewport);
pipe_mutex_unlock(rb_pipe->call_mutex);
}
static void
rbug_set_sampler_views(struct pipe_context *_pipe,
enum pipe_shader_type shader,
unsigned start,
unsigned num,
struct pipe_sampler_view **_views)
{
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
struct pipe_sampler_view *unwrapped_views[PIPE_MAX_SHADER_SAMPLER_VIEWS];
struct pipe_sampler_view **views = NULL;
unsigned i;
assert(start == 0); /* XXX fix */
/* must protect curr status */
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
rb_pipe->curr.num_views[shader] = 0;
memset(rb_pipe->curr.views[shader], 0, sizeof(rb_pipe->curr.views[shader]));
memset(rb_pipe->curr.texs[shader], 0, sizeof(rb_pipe->curr.texs[shader]));
memset(unwrapped_views, 0, sizeof(unwrapped_views));
if (_views) {
rb_pipe->curr.num_views[shader] = num;
for (i = 0; i < num; i++) {
rb_pipe->curr.views[shader][i] = rbug_sampler_view(_views[i]);
@@ -767,21 +767,21 @@ static void
rbug_set_vertex_buffers(struct pipe_context *_pipe,
unsigned start_slot, unsigned num_buffers,
const struct pipe_vertex_buffer *_buffers)
{
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
struct pipe_vertex_buffer unwrapped_buffers[PIPE_MAX_SHADER_INPUTS];
struct pipe_vertex_buffer *buffers = NULL;
unsigned i;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
if (num_buffers && _buffers) {
memcpy(unwrapped_buffers, _buffers, num_buffers * sizeof(*_buffers));
for (i = 0; i < num_buffers; i++)
unwrapped_buffers[i].buffer = rbug_resource_unwrap(_buffers[i].buffer);
buffers = unwrapped_buffers;
}
pipe->set_vertex_buffers(pipe, start_slot,
num_buffers,
@@ -797,76 +797,76 @@ rbug_set_index_buffer(struct pipe_context *_pipe,
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
struct pipe_index_buffer unwrapped_ib, *ib = NULL;
if (_ib) {
unwrapped_ib = *_ib;
unwrapped_ib.buffer = rbug_resource_unwrap(_ib->buffer);
ib = &unwrapped_ib;
}
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
pipe->set_index_buffer(pipe, ib);
pipe_mutex_unlock(rb_pipe->call_mutex);
}
static void
rbug_set_sample_mask(struct pipe_context *_pipe,
unsigned sample_mask)
{
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
pipe->set_sample_mask(pipe, sample_mask);
pipe_mutex_unlock(rb_pipe->call_mutex);
}
static struct pipe_stream_output_target *
rbug_create_stream_output_target(struct pipe_context *_pipe,
struct pipe_resource *_res,
unsigned buffer_offset, unsigned buffer_size)
{
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
struct pipe_resource *res = rbug_resource_unwrap(_res);
struct pipe_stream_output_target *target;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
target = pipe->create_stream_output_target(pipe, res, buffer_offset,
buffer_size);
pipe_mutex_unlock(rb_pipe->call_mutex);
return target;
}
static void
rbug_stream_output_target_destroy(struct pipe_context *_pipe,
struct pipe_stream_output_target *target)
{
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
pipe->stream_output_target_destroy(pipe, target);
pipe_mutex_unlock(rb_pipe->call_mutex);
}
static void
rbug_set_stream_output_targets(struct pipe_context *_pipe,
unsigned num_targets,
struct pipe_stream_output_target **targets,
const unsigned *offsets)
{
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
pipe->set_stream_output_targets(pipe, num_targets, targets, offsets);
pipe_mutex_unlock(rb_pipe->call_mutex);
}
static void
rbug_resource_copy_region(struct pipe_context *_pipe,
struct pipe_resource *_dst,
unsigned dst_level,
unsigned dstx,
unsigned dsty,
@@ -875,21 +875,21 @@ rbug_resource_copy_region(struct pipe_context *_pipe,
unsigned src_level,
const struct pipe_box *src_box)
{
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct rbug_resource *rb_resource_dst = rbug_resource(_dst);
struct rbug_resource *rb_resource_src = rbug_resource(_src);
struct pipe_context *pipe = rb_pipe->pipe;
struct pipe_resource *dst = rb_resource_dst->resource;
struct pipe_resource *src = rb_resource_src->resource;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
pipe->resource_copy_region(pipe,
dst,
dst_level,
dstx,
dsty,
dstz,
src,
src_level,
src_box);
pipe_mutex_unlock(rb_pipe->call_mutex);
@@ -903,50 +903,50 @@ rbug_blit(struct pipe_context *_pipe, const struct pipe_blit_info *_blit_info)
struct rbug_resource *rb_resource_src = rbug_resource(_blit_info->src.resource);
struct pipe_context *pipe = rb_pipe->pipe;
struct pipe_resource *dst = rb_resource_dst->resource;
struct pipe_resource *src = rb_resource_src->resource;
struct pipe_blit_info blit_info;
blit_info = *_blit_info;
blit_info.dst.resource = dst;
blit_info.src.resource = src;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
pipe->blit(pipe, &blit_info);
pipe_mutex_unlock(rb_pipe->call_mutex);
}
static void
rbug_flush_resource(struct pipe_context *_pipe,
struct pipe_resource *_res)
{
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct rbug_resource *rb_resource_res = rbug_resource(_res);
struct pipe_context *pipe = rb_pipe->pipe;
struct pipe_resource *res = rb_resource_res->resource;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
pipe->flush_resource(pipe, res);
pipe_mutex_unlock(rb_pipe->call_mutex);
}
static void
rbug_clear(struct pipe_context *_pipe,
unsigned buffers,
const union pipe_color_union *color,
double depth,
unsigned stencil)
{
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
pipe->clear(pipe,
buffers,
color,
depth,
stencil);
pipe_mutex_unlock(rb_pipe->call_mutex);
}
static void
rbug_clear_render_target(struct pipe_context *_pipe,
@@ -954,21 +954,21 @@ rbug_clear_render_target(struct pipe_context *_pipe,
const union pipe_color_union *color,
unsigned dstx, unsigned dsty,
unsigned width, unsigned height,
bool render_condition_enabled)
{
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct rbug_surface *rb_surface_dst = rbug_surface(_dst);
struct pipe_context *pipe = rb_pipe->pipe;
struct pipe_surface *dst = rb_surface_dst->surface;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
pipe->clear_render_target(pipe,
dst,
color,
dstx,
dsty,
width,
height,
render_condition_enabled);
pipe_mutex_unlock(rb_pipe->call_mutex);
}
@@ -981,21 +981,21 @@ rbug_clear_depth_stencil(struct pipe_context *_pipe,
unsigned stencil,
unsigned dstx, unsigned dsty,
unsigned width, unsigned height,
bool render_condition_enabled)
{
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct rbug_surface *rb_surface_dst = rbug_surface(_dst);
struct pipe_context *pipe = rb_pipe->pipe;
struct pipe_surface *dst = rb_surface_dst->surface;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
pipe->clear_depth_stencil(pipe,
dst,
clear_flags,
depth,
stencil,
dstx,
dsty,
width,
height,
render_condition_enabled);
@@ -1003,37 +1003,37 @@ rbug_clear_depth_stencil(struct pipe_context *_pipe,
}
static void
rbug_flush(struct pipe_context *_pipe,
struct pipe_fence_handle **fence,
unsigned flags)
{
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
pipe->flush(pipe, fence, flags);
pipe_mutex_unlock(rb_pipe->call_mutex);
}
static struct pipe_sampler_view *
rbug_context_create_sampler_view(struct pipe_context *_pipe,
struct pipe_resource *_resource,
const struct pipe_sampler_view *templ)
{
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct rbug_resource *rb_resource = rbug_resource(_resource);
struct pipe_context *pipe = rb_pipe->pipe;
struct pipe_resource *resource = rb_resource->resource;
struct pipe_sampler_view *result;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
result = pipe->create_sampler_view(pipe,
resource,
templ);
pipe_mutex_unlock(rb_pipe->call_mutex);
if (result)
return rbug_sampler_view_create(rb_pipe, rb_resource, result);
return NULL;
}
@@ -1049,39 +1049,39 @@ static struct pipe_surface *
rbug_context_create_surface(struct pipe_context *_pipe,
struct pipe_resource *_resource,
const struct pipe_surface *surf_tmpl)
{
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct rbug_resource *rb_resource = rbug_resource(_resource);
struct pipe_context *pipe = rb_pipe->pipe;
struct pipe_resource *resource = rb_resource->resource;
struct pipe_surface *result;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
result = pipe->create_surface(pipe,
resource,
surf_tmpl);
pipe_mutex_unlock(rb_pipe->call_mutex);
if (result)
return rbug_surface_create(rb_pipe, rb_resource, result);
return NULL;
}
static void
rbug_context_surface_destroy(struct pipe_context *_pipe,
struct pipe_surface *_surface)
{
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct rbug_surface *rb_surface = rbug_surface(_surface);
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
rbug_surface_destroy(rb_pipe,
rb_surface);
pipe_mutex_unlock(rb_pipe->call_mutex);
}
static void *
rbug_context_transfer_map(struct pipe_context *_context,
struct pipe_resource *_resource,
@@ -1090,21 +1090,21 @@ rbug_context_transfer_map(struct pipe_context *_context,
const struct pipe_box *box,
struct pipe_transfer **transfer)
{
struct rbug_context *rb_pipe = rbug_context(_context);
struct rbug_resource *rb_resource = rbug_resource(_resource);
struct pipe_context *context = rb_pipe->pipe;
struct pipe_resource *resource = rb_resource->resource;
struct pipe_transfer *result;
void *map;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
map = context->transfer_map(context,
resource,
level,
usage,
box, &result);
pipe_mutex_unlock(rb_pipe->call_mutex);
*transfer = rbug_transfer_create(rb_pipe, rb_resource, result);
return *transfer ? map : NULL;
}
@@ -1112,79 +1112,79 @@ rbug_context_transfer_map(struct pipe_context *_context,
static void
rbug_context_transfer_flush_region(struct pipe_context *_context,
struct pipe_transfer *_transfer,
const struct pipe_box *box)
{
struct rbug_context *rb_pipe = rbug_context(_context);
struct rbug_transfer *rb_transfer = rbug_transfer(_transfer);
struct pipe_context *context = rb_pipe->pipe;
struct pipe_transfer *transfer = rb_transfer->transfer;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
context->transfer_flush_region(context,
transfer,
box);
pipe_mutex_unlock(rb_pipe->call_mutex);
}
static void
rbug_context_transfer_unmap(struct pipe_context *_context,
struct pipe_transfer *_transfer)
{
struct rbug_context *rb_pipe = rbug_context(_context);
struct rbug_transfer *rb_transfer = rbug_transfer(_transfer);
struct pipe_context *context = rb_pipe->pipe;
struct pipe_transfer *transfer = rb_transfer->transfer;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
context->transfer_unmap(context,
transfer);
rbug_transfer_destroy(rb_pipe,
rb_transfer);
pipe_mutex_unlock(rb_pipe->call_mutex);
}
static void
rbug_context_buffer_subdata(struct pipe_context *_context,
struct pipe_resource *_resource,
unsigned usage, unsigned offset,
unsigned size, const void *data)
{
struct rbug_context *rb_pipe = rbug_context(_context);
struct rbug_resource *rb_resource = rbug_resource(_resource);
struct pipe_context *context = rb_pipe->pipe;
struct pipe_resource *resource = rb_resource->resource;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
context->buffer_subdata(context, resource, usage, offset, size, data);
pipe_mutex_unlock(rb_pipe->call_mutex);
}
static void
rbug_context_texture_subdata(struct pipe_context *_context,
struct pipe_resource *_resource,
unsigned level,
unsigned usage,
const struct pipe_box *box,
const void *data,
unsigned stride,
unsigned layer_stride)
{
struct rbug_context *rb_pipe = rbug_context(_context);
struct rbug_resource *rb_resource = rbug_resource(_resource);
struct pipe_context *context = rb_pipe->pipe;
struct pipe_resource *resource = rb_resource->resource;
- pipe_mutex_lock(rb_pipe->call_mutex);
+ mtx_lock(&rb_pipe->call_mutex);
context->texture_subdata(context,
resource,
level,
usage,
box,
data,
stride,
layer_stride);
pipe_mutex_unlock(rb_pipe->call_mutex);
}
diff --git a/src/gallium/drivers/rbug/rbug_core.c b/src/gallium/drivers/rbug/rbug_core.c
index 3bb781b..323fafe 100644
--- a/src/gallium/drivers/rbug/rbug_core.c
+++ b/src/gallium/drivers/rbug/rbug_core.c
@@ -175,21 +175,21 @@ rbug_shader_delete_locked(struct pipe_context *pipe,
static int
rbug_texture_list(struct rbug_rbug *tr_rbug, struct rbug_header *header, uint32_t serial)
{
struct rbug_screen *rb_screen = tr_rbug->rb_screen;
struct rbug_resource *tr_tex = NULL;
struct rbug_list *ptr;
rbug_texture_t *texs;
int i = 0;
- pipe_mutex_lock(rb_screen->list_mutex);
+ mtx_lock(&rb_screen->list_mutex);
texs = MALLOC(rb_screen->num_resources * sizeof(rbug_texture_t));
foreach(ptr, &rb_screen->resources) {
tr_tex = container_of(ptr, struct rbug_resource, list);
texs[i++] = VOID2U64(tr_tex);
}
pipe_mutex_unlock(rb_screen->list_mutex);
rbug_send_texture_list_reply(tr_rbug->con, serial, texs, i, NULL);
FREE(texs);
@@ -199,21 +199,21 @@ rbug_texture_list(struct rbug_rbug *tr_rbug, struct rbug_header *header, uint32_
static int
rbug_texture_info(struct rbug_rbug *tr_rbug, struct rbug_header *header, uint32_t serial)
{
struct rbug_screen *rb_screen = tr_rbug->rb_screen;
struct rbug_resource *tr_tex = NULL;
struct rbug_proto_texture_info *gpti = (struct rbug_proto_texture_info *)header;
struct rbug_list *ptr;
struct pipe_resource *t;
unsigned num_layers;
- pipe_mutex_lock(rb_screen->list_mutex);
+ mtx_lock(&rb_screen->list_mutex);
foreach(ptr, &rb_screen->resources) {
tr_tex = container_of(ptr, struct rbug_resource, list);
if (gpti->texture == VOID2U64(tr_tex))
break;
tr_tex = NULL;
}
if (!tr_tex) {
pipe_mutex_unlock(rb_screen->list_mutex);
return -ESRCH;
@@ -248,21 +248,21 @@ rbug_texture_read(struct rbug_rbug *tr_rbug, struct rbug_header *header, uint32_
struct rbug_screen *rb_screen = tr_rbug->rb_screen;
struct rbug_resource *tr_tex = NULL;
struct rbug_list *ptr;
struct pipe_context *context = rb_screen->private_context;
struct pipe_resource *tex;
struct pipe_transfer *t;
void *map;
- pipe_mutex_lock(rb_screen->list_mutex);
+ mtx_lock(&rb_screen->list_mutex);
foreach(ptr, &rb_screen->resources) {
tr_tex = container_of(ptr, struct rbug_resource, list);
if (gptr->texture == VOID2U64(tr_tex))
break;
tr_tex = NULL;
}
if (!tr_tex) {
pipe_mutex_unlock(rb_screen->list_mutex);
return -ESRCH;
@@ -294,21 +294,21 @@ rbug_texture_read(struct rbug_rbug *tr_rbug, struct rbug_header *header, uint32_
static int
rbug_context_list(struct rbug_rbug *tr_rbug, struct rbug_header *header, uint32_t serial)
{
struct rbug_screen *rb_screen = tr_rbug->rb_screen;
struct rbug_list *ptr;
struct rbug_context *rb_context = NULL;
rbug_context_t *ctxs;
int i = 0;
- pipe_mutex_lock(rb_screen->list_mutex);
+ mtx_lock(&rb_screen->list_mutex);
ctxs = MALLOC(rb_screen->num_contexts * sizeof(rbug_context_t));
foreach(ptr, &rb_screen->contexts) {
rb_context = container_of(ptr, struct rbug_context, list);
ctxs[i++] = VOID2U64(rb_context);
}
pipe_mutex_unlock(rb_screen->list_mutex);
rbug_send_context_list_reply(tr_rbug->con, serial, ctxs, i, NULL);
FREE(ctxs);
@@ -319,31 +319,31 @@ static int
rbug_context_info(struct rbug_rbug *tr_rbug, struct rbug_header *header, uint32_t serial)
{
struct rbug_proto_context_info *info = (struct rbug_proto_context_info *)header;
struct rbug_screen *rb_screen = tr_rbug->rb_screen;
struct rbug_context *rb_context = NULL;
rbug_texture_t cbufs[PIPE_MAX_COLOR_BUFS];
rbug_texture_t texs[PIPE_MAX_SHADER_SAMPLER_VIEWS];
unsigned i;
- pipe_mutex_lock(rb_screen->list_mutex);
+ mtx_lock(&rb_screen->list_mutex);
rb_context = rbug_get_context_locked(rb_screen, info->context);
if (!rb_context) {
pipe_mutex_unlock(rb_screen->list_mutex);
return -ESRCH;
}
/* protect the pipe context */
- pipe_mutex_lock(rb_context->draw_mutex);
- pipe_mutex_lock(rb_context->call_mutex);
+ mtx_lock(&rb_context->draw_mutex);
+ mtx_lock(&rb_context->call_mutex);
for (i = 0; i < rb_context->curr.nr_cbufs; i++)
cbufs[i] = VOID2U64(rb_context->curr.cbufs[i]);
/* XXX what about vertex/geometry shader texture views? */
for (i = 0; i < rb_context->curr.num_views[PIPE_SHADER_FRAGMENT]; i++)
texs[i] = VOID2U64(rb_context->curr.texs[PIPE_SHADER_FRAGMENT][i]);
rbug_send_context_info_reply(tr_rbug->con, serial,
VOID2U64(rb_context->curr.shader[PIPE_SHADER_VERTEX]), VOID2U64(rb_context->curr.shader[PIPE_SHADER_FRAGMENT]),
@@ -360,54 +360,54 @@ rbug_context_info(struct rbug_rbug *tr_rbug, struct rbug_header *header, uint32_
}
static int
rbug_context_draw_block(struct rbug_rbug *tr_rbug, struct rbug_header *header, uint32_t serial)
{
struct rbug_proto_context_draw_block *block = (struct rbug_proto_context_draw_block *)header;
struct rbug_screen *rb_screen = tr_rbug->rb_screen;
struct rbug_context *rb_context = NULL;
- pipe_mutex_lock(rb_screen->list_mutex);
+ mtx_lock(&rb_screen->list_mutex);
rb_context = rbug_get_context_locked(rb_screen, block->context);
if (!rb_context) {
pipe_mutex_unlock(rb_screen->list_mutex);
return -ESRCH;
}
- pipe_mutex_lock(rb_context->draw_mutex);
+ mtx_lock(&rb_context->draw_mutex);
rb_context->draw_blocker |= block->block;
pipe_mutex_unlock(rb_context->draw_mutex);
pipe_mutex_unlock(rb_screen->list_mutex);
return 0;
}
static int
rbug_context_draw_step(struct rbug_rbug *tr_rbug, struct rbug_header *header, uint32_t serial)
{
struct rbug_proto_context_draw_step *step = (struct rbug_proto_context_draw_step *)header;
struct rbug_screen *rb_screen = tr_rbug->rb_screen;
struct rbug_context *rb_context = NULL;
- pipe_mutex_lock(rb_screen->list_mutex);
+ mtx_lock(&rb_screen->list_mutex);
rb_context = rbug_get_context_locked(rb_screen, step->context);
if (!rb_context) {
pipe_mutex_unlock(rb_screen->list_mutex);
return -ESRCH;
}
- pipe_mutex_lock(rb_context->draw_mutex);
+ mtx_lock(&rb_context->draw_mutex);
if (rb_context->draw_blocked & RBUG_BLOCK_RULE) {
if (step->step & RBUG_BLOCK_RULE)
rb_context->draw_blocked &= ~RBUG_BLOCK_MASK;
} else {
rb_context->draw_blocked &= ~step->step;
}
pipe_mutex_unlock(rb_context->draw_mutex);
cnd_broadcast(&rb_context->draw_cond);
@@ -417,29 +417,29 @@ rbug_context_draw_step(struct rbug_rbug *tr_rbug, struct rbug_header *header, ui
}
static int
rbug_context_draw_unblock(struct rbug_rbug *tr_rbug, struct rbug_header *header, uint32_t serial)
{
struct rbug_proto_context_draw_unblock *unblock = (struct rbug_proto_context_draw_unblock *)header;
struct rbug_screen *rb_screen = tr_rbug->rb_screen;
struct rbug_context *rb_context = NULL;
- pipe_mutex_lock(rb_screen->list_mutex);
+ mtx_lock(&rb_screen->list_mutex);
rb_context = rbug_get_context_locked(rb_screen, unblock->context);
if (!rb_context) {
pipe_mutex_unlock(rb_screen->list_mutex);
return -ESRCH;
}
- pipe_mutex_lock(rb_context->draw_mutex);
+ mtx_lock(&rb_context->draw_mutex);
if (rb_context->draw_blocked & RBUG_BLOCK_RULE) {
if (unblock->unblock & RBUG_BLOCK_RULE)
rb_context->draw_blocked &= ~RBUG_BLOCK_MASK;
} else {
rb_context->draw_blocked &= ~unblock->unblock;
}
rb_context->draw_blocker &= ~unblock->unblock;
pipe_mutex_unlock(rb_context->draw_mutex);
cnd_broadcast(&rb_context->draw_cond);
@@ -450,29 +450,29 @@ rbug_context_draw_unblock(struct rbug_rbug *tr_rbug, struct rbug_header *header,
}
static int
rbug_context_draw_rule(struct rbug_rbug *tr_rbug, struct rbug_header *header, uint32_t serial)
{
struct rbug_proto_context_draw_rule *rule = (struct rbug_proto_context_draw_rule *)header;
struct rbug_screen *rb_screen = tr_rbug->rb_screen;
struct rbug_context *rb_context = NULL;
- pipe_mutex_lock(rb_screen->list_mutex);
+ mtx_lock(&rb_screen->list_mutex);
rb_context = rbug_get_context_locked(rb_screen, rule->context);
if (!rb_context) {
pipe_mutex_unlock(rb_screen->list_mutex);
return -ESRCH;
}
- pipe_mutex_lock(rb_context->draw_mutex);
+ mtx_lock(&rb_context->draw_mutex);
rb_context->draw_rule.shader[PIPE_SHADER_VERTEX] = U642VOID(rule->vertex);
rb_context->draw_rule.shader[PIPE_SHADER_FRAGMENT] = U642VOID(rule->fragment);
rb_context->draw_rule.texture = U642VOID(rule->texture);
rb_context->draw_rule.surf = U642VOID(rule->surface);
rb_context->draw_rule.blocker = rule->block;
rb_context->draw_blocker |= RBUG_BLOCK_RULE;
pipe_mutex_unlock(rb_context->draw_mutex);
cnd_broadcast(&rb_context->draw_cond);
@@ -482,30 +482,30 @@ rbug_context_draw_rule(struct rbug_rbug *tr_rbug, struct rbug_header *header, ui
}
static int
rbug_context_flush(struct rbug_rbug *tr_rbug, struct rbug_header *header, uint32_t serial)
{
struct rbug_proto_context_flush *flush = (struct rbug_proto_context_flush *)header;
struct rbug_screen *rb_screen = tr_rbug->rb_screen;
struct rbug_context *rb_context = NULL;
- pipe_mutex_lock(rb_screen->list_mutex);
+ mtx_lock(&rb_screen->list_mutex);
rb_context = rbug_get_context_locked(rb_screen, flush->context);
if (!rb_context) {
pipe_mutex_unlock(rb_screen->list_mutex);
return -ESRCH;
}
/* protect the pipe context */
- pipe_mutex_lock(rb_context->call_mutex);
+ mtx_lock(&rb_context->call_mutex);
rb_context->pipe->flush(rb_context->pipe, NULL, 0);
pipe_mutex_unlock(rb_context->call_mutex);
pipe_mutex_unlock(rb_screen->list_mutex);
return 0;
}
static int
@@ -513,29 +513,29 @@ rbug_shader_list(struct rbug_rbug *tr_rbug, struct rbug_header *header, uint32_t
{
struct rbug_proto_shader_list *list = (struct rbug_proto_shader_list *)header;
struct rbug_screen *rb_screen = tr_rbug->rb_screen;
struct rbug_context *rb_context = NULL;
struct rbug_shader *tr_shdr = NULL;
struct rbug_list *ptr;
rbug_shader_t *shdrs;
int i = 0;
- pipe_mutex_lock(rb_screen->list_mutex);
+ mtx_lock(&rb_screen->list_mutex);
rb_context = rbug_get_context_locked(rb_screen, list->context);
if (!rb_context) {
pipe_mutex_unlock(rb_screen->list_mutex);
return -ESRCH;
}
- pipe_mutex_lock(rb_context->list_mutex);
+ mtx_lock(&rb_context->list_mutex);
shdrs = MALLOC(rb_context->num_shaders * sizeof(rbug_shader_t));
foreach(ptr, &rb_context->shaders) {
tr_shdr = container_of(ptr, struct rbug_shader, list);
shdrs[i++] = VOID2U64(tr_shdr);
}
pipe_mutex_unlock(rb_context->list_mutex);
pipe_mutex_unlock(rb_screen->list_mutex);
rbug_send_shader_list_reply(tr_rbug->con, serial, shdrs, i, NULL);
@@ -548,29 +548,29 @@ static int
rbug_shader_info(struct rbug_rbug *tr_rbug, struct rbug_header *header, uint32_t serial)
{
struct rbug_proto_shader_info *info = (struct rbug_proto_shader_info *)header;
struct rbug_screen *rb_screen = tr_rbug->rb_screen;
struct rbug_context *rb_context = NULL;
struct rbug_shader *tr_shdr = NULL;
unsigned original_len;
unsigned replaced_len;
- pipe_mutex_lock(rb_screen->list_mutex);
+ mtx_lock(&rb_screen->list_mutex);
rb_context = rbug_get_context_locked(rb_screen, info->context);
if (!rb_context) {
pipe_mutex_unlock(rb_screen->list_mutex);
return -ESRCH;
}
- pipe_mutex_lock(rb_context->list_mutex);
+ mtx_lock(&rb_context->list_mutex);
tr_shdr = rbug_get_shader_locked(rb_context, info->shader);
if (!tr_shdr) {
pipe_mutex_unlock(rb_context->list_mutex);
pipe_mutex_unlock(rb_screen->list_mutex);
return -ESRCH;
}
/* just in case */
@@ -596,29 +596,29 @@ rbug_shader_info(struct rbug_rbug *tr_rbug, struct rbug_header *header, uint32_t
static int
rbug_shader_disable(struct rbug_rbug *tr_rbug, struct rbug_header *header)
{
struct rbug_proto_shader_disable *dis = (struct rbug_proto_shader_disable *)header;
struct rbug_screen *rb_screen = tr_rbug->rb_screen;
struct rbug_context *rb_context = NULL;
struct rbug_shader *tr_shdr = NULL;
- pipe_mutex_lock(rb_screen->list_mutex);
+ mtx_lock(&rb_screen->list_mutex);
rb_context = rbug_get_context_locked(rb_screen, dis->context);
if (!rb_context) {
pipe_mutex_unlock(rb_screen->list_mutex);
return -ESRCH;
}
- pipe_mutex_lock(rb_context->list_mutex);
+ mtx_lock(&rb_context->list_mutex);
tr_shdr = rbug_get_shader_locked(rb_context, dis->shader);
if (!tr_shdr) {
pipe_mutex_unlock(rb_context->list_mutex);
pipe_mutex_unlock(rb_screen->list_mutex);
return -ESRCH;
}
tr_shdr->disabled = dis->disable;
@@ -633,40 +633,40 @@ static int
rbug_shader_replace(struct rbug_rbug *tr_rbug, struct rbug_header *header)
{
struct rbug_proto_shader_replace *rep = (struct rbug_proto_shader_replace *)header;
struct rbug_screen *rb_screen = tr_rbug->rb_screen;
struct rbug_context *rb_context = NULL;
struct rbug_shader *tr_shdr = NULL;
struct pipe_context *pipe = NULL;
void *state;
- pipe_mutex_lock(rb_screen->list_mutex);
+ mtx_lock(&rb_screen->list_mutex);
rb_context = rbug_get_context_locked(rb_screen, rep->context);
if (!rb_context) {
pipe_mutex_unlock(rb_screen->list_mutex);
return -ESRCH;
}
- pipe_mutex_lock(rb_context->list_mutex);
+ mtx_lock(&rb_context->list_mutex);
tr_shdr = rbug_get_shader_locked(rb_context, rep->shader);
if (!tr_shdr) {
pipe_mutex_unlock(rb_context->list_mutex);
pipe_mutex_unlock(rb_screen->list_mutex);
return -ESRCH;
}
/* protect the pipe context */
- pipe_mutex_lock(rb_context->call_mutex);
+ mtx_lock(&rb_context->call_mutex);
pipe = rb_context->pipe;
/* remove old replaced shader */
if (tr_shdr->replaced_shader) {
/* if this shader is bound rebind the original shader */
if (rb_context->curr.shader[PIPE_SHADER_FRAGMENT] == tr_shdr || rb_context->curr.shader[PIPE_SHADER_VERTEX] == tr_shdr)
rbug_shader_bind_locked(pipe, tr_shdr, tr_shdr->shader);
FREE(tr_shdr->replaced_tokens);
diff --git a/src/gallium/drivers/rbug/rbug_screen.h b/src/gallium/drivers/rbug/rbug_screen.h
index 67e2876..9e2d8ae 100644
--- a/src/gallium/drivers/rbug/rbug_screen.h
+++ b/src/gallium/drivers/rbug/rbug_screen.h
@@ -61,29 +61,29 @@ struct rbug_screen
};
static inline struct rbug_screen *
rbug_screen(struct pipe_screen *screen)
{
return (struct rbug_screen *)screen;
}
#define rbug_screen_add_to_list(scr, name, obj) \
do { \
- pipe_mutex_lock(scr->list_mutex); \
+ mtx_lock(&scr->list_mutex); \
insert_at_head(&scr->name, &obj->list); \
scr->num_##name++; \
pipe_mutex_unlock(scr->list_mutex); \
} while (0)
#define rbug_screen_remove_from_list(scr, name, obj) \
do { \
- pipe_mutex_lock(scr->list_mutex); \
+ mtx_lock(&scr->list_mutex); \
remove_from_list(&obj->list); \
scr->num_##name--; \
pipe_mutex_unlock(scr->list_mutex); \
} while (0)
/**********************************************************
* rbug_core.c
*/
diff --git a/src/gallium/drivers/svga/svga_resource_buffer.c b/src/gallium/drivers/svga/svga_resource_buffer.c
index 99ed1a2..05e91cb 100644
--- a/src/gallium/drivers/svga/svga_resource_buffer.c
+++ b/src/gallium/drivers/svga/svga_resource_buffer.c
@@ -289,37 +289,37 @@ svga_buffer_transfer_flush_region( struct pipe_context *pipe,
{
struct svga_screen *ss = svga_screen(pipe->screen);
struct svga_buffer *sbuf = svga_buffer(transfer->resource);
unsigned offset = transfer->box.x + box->x;
unsigned length = box->width;
assert(transfer->usage & PIPE_TRANSFER_WRITE);
assert(transfer->usage & PIPE_TRANSFER_FLUSH_EXPLICIT);
- pipe_mutex_lock(ss->swc_mutex);
+ mtx_lock(&ss->swc_mutex);
svga_buffer_add_range(sbuf, offset, offset + length);
pipe_mutex_unlock(ss->swc_mutex);
}
static void
svga_buffer_transfer_unmap( struct pipe_context *pipe,
struct pipe_transfer *transfer )
{
struct svga_screen *ss = svga_screen(pipe->screen);
struct svga_context *svga = svga_context(pipe);
struct svga_buffer *sbuf = svga_buffer(transfer->resource);
SVGA_STATS_TIME_PUSH(svga_sws(svga), SVGA_STATS_TIME_BUFFERTRANSFERUNMAP);
- pipe_mutex_lock(ss->swc_mutex);
+ mtx_lock(&ss->swc_mutex);
assert(sbuf->map.count);
if (sbuf->map.count) {
--sbuf->map.count;
}
if (svga_buffer_has_hw_storage(sbuf)) {
svga_buffer_hw_storage_unmap(svga, sbuf);
}
diff --git a/src/gallium/drivers/svga/svga_resource_buffer_upload.c b/src/gallium/drivers/svga/svga_resource_buffer_upload.c
index b327a16..e41f475 100644
--- a/src/gallium/drivers/svga/svga_resource_buffer_upload.c
+++ b/src/gallium/drivers/svga/svga_resource_buffer_upload.c
@@ -634,21 +634,21 @@ svga_buffer_update_hw(struct svga_context *svga, struct svga_buffer *sbuf)
unsigned i;
assert(sbuf->swbuf);
if (!sbuf->swbuf)
return PIPE_ERROR;
ret = svga_buffer_create_hw_storage(svga_screen(sbuf->b.b.screen), sbuf);
if (ret != PIPE_OK)
return ret;
- pipe_mutex_lock(ss->swc_mutex);
+ mtx_lock(&ss->swc_mutex);
map = svga_buffer_hw_storage_map(svga, sbuf, PIPE_TRANSFER_WRITE, &retry);
assert(map);
assert(!retry);
if (!map) {
pipe_mutex_unlock(ss->swc_mutex);
svga_buffer_destroy_hw_storage(ss, sbuf);
return PIPE_ERROR;
}
/* Copy data from malloc'd swbuf to the new hardware buffer */
diff --git a/src/gallium/drivers/svga/svga_sampler_view.c b/src/gallium/drivers/svga/svga_sampler_view.c
index d43f8cc..053cfc5 100644
--- a/src/gallium/drivers/svga/svga_sampler_view.c
+++ b/src/gallium/drivers/svga/svga_sampler_view.c
@@ -84,21 +84,21 @@ svga_get_tex_sampler_view(struct pipe_context *pipe,
if (ss->debug.no_sampler_view)
view = FALSE;
if (ss->debug.force_sampler_view)
view = TRUE;
}
/* First try the cache */
if (view) {
- pipe_mutex_lock(ss->tex_mutex);
+ mtx_lock(&ss->tex_mutex);
if (tex->cached_view &&
tex->cached_view->min_lod == min_lod &&
tex->cached_view->max_lod == max_lod) {
svga_sampler_view_reference(&sv, tex->cached_view);
pipe_mutex_unlock(ss->tex_mutex);
SVGA_DBG(DEBUG_VIEWS, "svga: Sampler view: reuse %p, %u %u, last %u\n",
pt, min_lod, max_lod, pt->last_level);
svga_validate_sampler_view(svga_context(pipe), sv);
return sv;
}
@@ -156,21 +156,21 @@ svga_get_tex_sampler_view(struct pipe_context *pipe,
if (!sv->handle) {
sv->key.cachable = 0;
sv->handle = tex->handle;
debug_reference(&sv->reference,
(debug_reference_descriptor)
svga_debug_describe_sampler_view, 0);
return sv;
}
- pipe_mutex_lock(ss->tex_mutex);
+ mtx_lock(&ss->tex_mutex);
svga_sampler_view_reference(&tex->cached_view, sv);
pipe_mutex_unlock(ss->tex_mutex);
debug_reference(&sv->reference,
(debug_reference_descriptor)
svga_debug_describe_sampler_view, 0);
return sv;
}
diff --git a/src/gallium/drivers/svga/svga_screen_cache.c b/src/gallium/drivers/svga/svga_screen_cache.c
index c14996d..55f9426 100644
--- a/src/gallium/drivers/svga/svga_screen_cache.c
+++ b/src/gallium/drivers/svga/svga_screen_cache.c
@@ -97,21 +97,21 @@ svga_screen_cache_lookup(struct svga_screen *svgascreen,
struct svga_host_surface_cache_entry *entry;
struct svga_winsys_surface *handle = NULL;
struct list_head *curr, *next;
unsigned bucket;
unsigned tries = 0;
assert(key->cachable);
bucket = svga_screen_cache_bucket(key);
- pipe_mutex_lock(cache->mutex);
+ mtx_lock(&cache->mutex);
curr = cache->bucket[bucket].next;
next = curr->next;
while (curr != &cache->bucket[bucket]) {
++tries;
entry = LIST_ENTRY(struct svga_host_surface_cache_entry, curr, bucket_head);
assert(entry->handle);
@@ -219,21 +219,21 @@ svga_screen_cache_add(struct svga_screen *svgascreen,
unsigned surf_size;
assert(key->cachable);
if (!handle)
return;
surf_size = surface_size(key);
*p_handle = NULL;
- pipe_mutex_lock(cache->mutex);
+ mtx_lock(&cache->mutex);
if (surf_size >= SVGA_HOST_SURFACE_CACHE_BYTES) {
/* this surface is too large to cache, just free it */
sws->surface_reference(sws, &handle, NULL);
pipe_mutex_unlock(cache->mutex);
return;
}
if (cache->total_size + surf_size > SVGA_HOST_SURFACE_CACHE_BYTES) {
/* Adding this surface would exceed the cache size.
@@ -311,21 +311,21 @@ svga_screen_cache_add(struct svga_screen *svgascreen,
void
svga_screen_cache_flush(struct svga_screen *svgascreen,
struct pipe_fence_handle *fence)
{
struct svga_host_surface_cache *cache = &svgascreen->cache;
struct svga_winsys_screen *sws = svgascreen->sws;
struct svga_host_surface_cache_entry *entry;
struct list_head *curr, *next;
unsigned bucket;
- pipe_mutex_lock(cache->mutex);
+ mtx_lock(&cache->mutex);
/* Loop over entries in the invalidated list */
curr = cache->invalidated.next;
next = curr->next;
while (curr != &cache->invalidated) {
entry = LIST_ENTRY(struct svga_host_surface_cache_entry, curr, head);
assert(entry->handle);
if (sws->surface_is_flushed(sws, entry->handle)) {
diff --git a/src/gallium/drivers/trace/tr_dump.c b/src/gallium/drivers/trace/tr_dump.c
index b052e2a..2df4f83 100644
--- a/src/gallium/drivers/trace/tr_dump.c
+++ b/src/gallium/drivers/trace/tr_dump.c
@@ -295,21 +295,21 @@ boolean trace_dump_trace_enabled(void)
{
return stream ? TRUE : FALSE;
}
/*
* Call lock
*/
void trace_dump_call_lock(void)
{
- pipe_mutex_lock(call_mutex);
+ mtx_lock(&call_mutex);
}
void trace_dump_call_unlock(void)
{
pipe_mutex_unlock(call_mutex);
}
/*
* Dumping control
*/
@@ -324,36 +324,36 @@ void trace_dumping_stop_locked(void)
dumping = FALSE;
}
boolean trace_dumping_enabled_locked(void)
{
return dumping;
}
void trace_dumping_start(void)
{
- pipe_mutex_lock(call_mutex);
+ mtx_lock(&call_mutex);
trace_dumping_start_locked();
pipe_mutex_unlock(call_mutex);
}
void trace_dumping_stop(void)
{
- pipe_mutex_lock(call_mutex);
+ mtx_lock(&call_mutex);
trace_dumping_stop_locked();
pipe_mutex_unlock(call_mutex);
}
boolean trace_dumping_enabled(void)
{
boolean ret;
- pipe_mutex_lock(call_mutex);
+ mtx_lock(&call_mutex);
ret = trace_dumping_enabled_locked();
pipe_mutex_unlock(call_mutex);
return ret;
}
/*
* Dump functions
*/
static int64_t call_start_time = 0;
@@ -388,21 +388,21 @@ void trace_dump_call_end_locked(void)
trace_dump_call_time(call_end_time - call_start_time);
trace_dump_indent(1);
trace_dump_tag_end("call");
trace_dump_newline();
fflush(stream);
}
void trace_dump_call_begin(const char *klass, const char *method)
{
- pipe_mutex_lock(call_mutex);
+ mtx_lock(&call_mutex);
trace_dump_call_begin_locked(klass, method);
}
void trace_dump_call_end(void)
{
trace_dump_call_end_locked();
pipe_mutex_unlock(call_mutex);
}
void trace_dump_arg_begin(const char *name)
diff --git a/src/gallium/drivers/vc4/vc4_bufmgr.c b/src/gallium/drivers/vc4/vc4_bufmgr.c
index c0ff531..c46e564 100644
--- a/src/gallium/drivers/vc4/vc4_bufmgr.c
+++ b/src/gallium/drivers/vc4/vc4_bufmgr.c
@@ -90,21 +90,21 @@ vc4_bo_remove_from_cache(struct vc4_bo_cache *cache, struct vc4_bo *bo)
static struct vc4_bo *
vc4_bo_from_cache(struct vc4_screen *screen, uint32_t size, const char *name)
{
struct vc4_bo_cache *cache = &screen->bo_cache;
uint32_t page_index = size / 4096 - 1;
if (cache->size_list_size <= page_index)
return NULL;
struct vc4_bo *bo = NULL;
- pipe_mutex_lock(cache->lock);
+ mtx_lock(&cache->lock);
if (!list_empty(&cache->size_list[page_index])) {
bo = LIST_ENTRY(struct vc4_bo, cache->size_list[page_index].next,
size_list);
/* Check that the BO has gone idle. If not, then we want to
* allocate something new instead, since we assume that the
* user will proceed to CPU map it and fill it with stuff.
*/
if (!vc4_bo_wait(bo, 0, NULL)) {
pipe_mutex_unlock(cache->lock);
@@ -181,21 +181,21 @@ vc4_bo_alloc(struct vc4_screen *screen, uint32_t size, const char *name)
return bo;
}
void
vc4_bo_last_unreference(struct vc4_bo *bo)
{
struct vc4_screen *screen = bo->screen;
struct timespec time;
clock_gettime(CLOCK_MONOTONIC, &time);
- pipe_mutex_lock(screen->bo_cache.lock);
+ mtx_lock(&screen->bo_cache.lock);
vc4_bo_last_unreference_locked_timed(bo, time.tv_sec);
pipe_mutex_unlock(screen->bo_cache.lock);
}
static void
vc4_bo_free(struct vc4_bo *bo)
{
struct vc4_screen *screen = bo->screen;
if (bo->map) {
@@ -254,21 +254,21 @@ free_stale_bos(struct vc4_screen *screen, time_t time)
if (dump_stats && freed_any) {
fprintf(stderr, "Freed stale BOs:\n");
vc4_bo_dump_stats(screen);
}
}
static void
vc4_bo_cache_free_all(struct vc4_bo_cache *cache)
{
- pipe_mutex_lock(cache->lock);
+ mtx_lock(&cache->lock);
list_for_each_entry_safe(struct vc4_bo, bo, &cache->time_list,
time_list) {
vc4_bo_remove_from_cache(cache, bo);
vc4_bo_free(bo);
}
pipe_mutex_unlock(cache->lock);
}
void
vc4_bo_last_unreference_locked_timed(struct vc4_bo *bo, time_t time)
@@ -315,21 +315,21 @@ vc4_bo_last_unreference_locked_timed(struct vc4_bo *bo, time_t time)
static struct vc4_bo *
vc4_bo_open_handle(struct vc4_screen *screen,
uint32_t winsys_stride,
uint32_t handle, uint32_t size)
{
struct vc4_bo *bo;
assert(size);
- pipe_mutex_lock(screen->bo_handles_mutex);
+ mtx_lock(&screen->bo_handles_mutex);
bo = util_hash_table_get(screen->bo_handles, (void*)(uintptr_t)handle);
if (bo) {
pipe_reference(NULL, &bo->reference);
goto done;
}
bo = CALLOC_STRUCT(vc4_bo);
pipe_reference_init(&bo->reference, 1);
bo->screen = screen;
@@ -394,21 +394,21 @@ vc4_bo_get_dmabuf(struct vc4_bo *bo)
{
int fd;
int ret = drmPrimeHandleToFD(bo->screen->fd, bo->handle,
O_CLOEXEC, &fd);
if (ret != 0) {
fprintf(stderr, "Failed to export gem bo %d to dmabuf\n",
bo->handle);
return -1;
}
- pipe_mutex_lock(bo->screen->bo_handles_mutex);
+ mtx_lock(&bo->screen->bo_handles_mutex);
bo->private = false;
util_hash_table_set(bo->screen->bo_handles, (void *)(uintptr_t)bo->handle, bo);
pipe_mutex_unlock(bo->screen->bo_handles_mutex);
return fd;
}
struct vc4_bo *
vc4_bo_alloc_shader(struct vc4_screen *screen, const void *data, uint32_t size)
{
diff --git a/src/gallium/drivers/vc4/vc4_bufmgr.h b/src/gallium/drivers/vc4/vc4_bufmgr.h
index bcabfd2..e996d0c 100644
--- a/src/gallium/drivers/vc4/vc4_bufmgr.h
+++ b/src/gallium/drivers/vc4/vc4_bufmgr.h
@@ -86,21 +86,21 @@ vc4_bo_unreference(struct vc4_bo **bo)
struct vc4_screen *screen;
if (!*bo)
return;
if ((*bo)->private) {
/* Avoid the mutex for private BOs */
if (pipe_reference(&(*bo)->reference, NULL))
vc4_bo_last_unreference(*bo);
} else {
screen = (*bo)->screen;
- pipe_mutex_lock(screen->bo_handles_mutex);
+ mtx_lock(&screen->bo_handles_mutex);
if (pipe_reference(&(*bo)->reference, NULL)) {
util_hash_table_remove(screen->bo_handles,
(void *)(uintptr_t)(*bo)->handle);
vc4_bo_last_unreference(*bo);
}
pipe_mutex_unlock(screen->bo_handles_mutex);
}
diff --git a/src/gallium/state_trackers/dri/dri2.c b/src/gallium/state_trackers/dri/dri2.c
index 1ad15b5..da663b2 100644
--- a/src/gallium/state_trackers/dri/dri2.c
+++ b/src/gallium/state_trackers/dri/dri2.c
@@ -1424,21 +1424,21 @@ dri2_is_opencl_interop_loaded_locked(struct dri_screen *screen)
screen->opencl_dri_event_wait &&
screen->opencl_dri_event_get_fence;
}
static bool
dri2_load_opencl_interop(struct dri_screen *screen)
{
#if defined(RTLD_DEFAULT)
bool success;
- pipe_mutex_lock(screen->opencl_func_mutex);
+ mtx_lock(&screen->opencl_func_mutex);
if (dri2_is_opencl_interop_loaded_locked(screen)) {
pipe_mutex_unlock(screen->opencl_func_mutex);
return true;
}
screen->opencl_dri_event_add_ref =
dlsym(RTLD_DEFAULT, "opencl_dri_event_add_ref");
screen->opencl_dri_event_release =
dlsym(RTLD_DEFAULT, "opencl_dri_event_release");
diff --git a/src/gallium/state_trackers/glx/xlib/xm_api.c b/src/gallium/state_trackers/glx/xlib/xm_api.c
index 92d2cec..86bb1c4 100644
--- a/src/gallium/state_trackers/glx/xlib/xm_api.c
+++ b/src/gallium/state_trackers/glx/xlib/xm_api.c
@@ -190,21 +190,21 @@ static XMesaDisplay
xmesa_init_display( Display *display )
{
static mtx_t init_mutex = _MTX_INITIALIZER_NP;
XMesaDisplay xmdpy;
XMesaExtDisplayInfo *info;
if (display == NULL) {
return NULL;
}
- pipe_mutex_lock(init_mutex);
+ mtx_lock(&init_mutex);
/* Look for XMesaDisplay which corresponds to this display */
info = MesaExtInfo.head;
while(info) {
if (info->display == display) {
/* Found it */
pipe_mutex_unlock(init_mutex);
return &info->mesaDisplay;
}
info = info->next;
@@ -365,21 +365,21 @@ get_drawable_size( Display *dpy, Drawable d, uint *width, uint *height )
* \param width returns width in pixels
* \param height returns height in pixels
*/
void
xmesa_get_window_size(Display *dpy, XMesaBuffer b,
GLuint *width, GLuint *height)
{
XMesaDisplay xmdpy = xmesa_init_display(dpy);
Status stat;
- pipe_mutex_lock(xmdpy->mutex);
+ mtx_lock(&xmdpy->mutex);
stat = get_drawable_size(dpy, b->ws.drawable, width, height);
pipe_mutex_unlock(xmdpy->mutex);
if (!stat) {
/* probably querying a window that's recently been destroyed */
_mesa_warning(NULL, "XGetGeometry failed!\n");
*width = *height = 1;
}
}
diff --git a/src/gallium/state_trackers/hgl/hgl.c b/src/gallium/state_trackers/hgl/hgl.c
index 0e122fe..1b70281 100644
--- a/src/gallium/state_trackers/hgl/hgl.c
+++ b/src/gallium/state_trackers/hgl/hgl.c
@@ -59,21 +59,21 @@ static boolean
hgl_st_framebuffer_flush_front(struct st_context_iface *stctxi,
struct st_framebuffer_iface* stfbi, enum st_attachment_type statt)
{
CALLED();
//struct hgl_context* context = hgl_st_context(stctxi);
//struct hgl_buffer* buffer = hgl_st_context(stfbi);
#if 0
struct stw_st_framebuffer *stwfb = stw_st_framebuffer(stfb);
- pipe_mutex_lock(stwfb->fb->mutex);
+ mtx_lock(&stwfb->fb->mutex);
struct pipe_resource* resource = textures[statt];
if (resource)
stw_framebuffer_present_locked(...);
#endif
return TRUE;
}
diff --git a/src/gallium/state_trackers/nine/nine_lock.c b/src/gallium/state_trackers/nine/nine_lock.c
index 5b53559..0ac0cd7 100644
--- a/src/gallium/state_trackers/nine/nine_lock.c
+++ b/src/gallium/state_trackers/nine/nine_lock.c
@@ -46,86 +46,86 @@
#include "nine_lock.h"
#include "os/os_thread.h"
/* Global mutex as described by MSDN */
static mtx_t d3dlock_global = _MTX_INITIALIZER_NP;
void
NineLockGlobalMutex()
{
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
}
void
NineUnlockGlobalMutex()
{
pipe_mutex_unlock(d3dlock_global);
}
static HRESULT NINE_WINAPI
LockAuthenticatedChannel9_GetCertificateSize( struct NineAuthenticatedChannel9 *This,
UINT *pCertificateSize )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineAuthenticatedChannel9_GetCertificateSize(This, pCertificateSize);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockAuthenticatedChannel9_GetCertificate( struct NineAuthenticatedChannel9 *This,
UINT CertifacteSize,
BYTE *ppCertificate )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineAuthenticatedChannel9_GetCertificate(This, CertifacteSize, ppCertificate);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockAuthenticatedChannel9_NegotiateKeyExchange( struct NineAuthenticatedChannel9 *This,
UINT DataSize,
void *pData )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineAuthenticatedChannel9_NegotiateKeyExchange(This, DataSize, pData);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockAuthenticatedChannel9_Query( struct NineAuthenticatedChannel9 *This,
UINT InputSize,
const void *pInput,
UINT OutputSize,
void *pOutput )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineAuthenticatedChannel9_Query(This, InputSize, pInput, OutputSize, pOutput);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockAuthenticatedChannel9_Configure( struct NineAuthenticatedChannel9 *This,
UINT InputSize,
const void *pInput,
D3DAUTHENTICATEDCHANNEL_CONFIGURE_OUTPUT *pOutput )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineAuthenticatedChannel9_Configure(This, InputSize, pInput, pOutput);
pipe_mutex_unlock(d3dlock_global);
return r;
}
IDirect3DAuthenticatedChannel9Vtbl LockAuthenticatedChannel9_vtable = {
(void *)NineUnknown_QueryInterface,
(void *)NineUnknown_AddRef,
(void *)NineUnknown_ReleaseWithDtorLock,
(void *)LockAuthenticatedChannel9_GetCertificateSize,
@@ -136,280 +136,280 @@ IDirect3DAuthenticatedChannel9Vtbl LockAuthenticatedChannel9_vtable = {
};
static HRESULT NINE_WINAPI
LockUnknown_SetPrivateData( struct NineUnknown *This,
REFGUID refguid,
const void *pData,
DWORD SizeOfData,
DWORD Flags )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineUnknown_SetPrivateData(This, refguid, pData, SizeOfData, Flags);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockUnknown_GetPrivateData( struct NineUnknown *This,
REFGUID refguid,
void *pData,
DWORD *pSizeOfData )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineUnknown_GetPrivateData(This, refguid, pData, pSizeOfData);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockUnknown_FreePrivateData( struct NineUnknown *This,
REFGUID refguid )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineUnknown_FreePrivateData(This, refguid);
pipe_mutex_unlock(d3dlock_global);
return r;
}
#if 0
static HRESULT NINE_WINAPI
LockResource9_GetDevice( struct NineResource9 *This,
IDirect3DDevice9 **ppDevice )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineUnknown_GetDevice(NineUnknown(This), ppDevice);
pipe_mutex_unlock(d3dlock_global);
return r;
}
#endif
static DWORD NINE_WINAPI
LockResource9_SetPriority( struct NineResource9 *This,
DWORD PriorityNew )
{
DWORD r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineResource9_SetPriority(This, PriorityNew);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static DWORD NINE_WINAPI
LockResource9_GetPriority( struct NineResource9 *This )
{
DWORD r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineResource9_GetPriority(This);
pipe_mutex_unlock(d3dlock_global);
return r;
}
#if 0
static void NINE_WINAPI
LockResource9_PreLoad( struct NineResource9 *This )
{
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
NineResource9_PreLoad(This);
pipe_mutex_unlock(d3dlock_global);
}
#endif
#if 0
static D3DRESOURCETYPE NINE_WINAPI
LockResource9_GetType( struct NineResource9 *This )
{
D3DRESOURCETYPE r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineResource9_GetType(This);
pipe_mutex_unlock(d3dlock_global);
return r;
}
#endif
static DWORD NINE_WINAPI
LockBaseTexture9_SetLOD( struct NineBaseTexture9 *This,
DWORD LODNew )
{
DWORD r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineBaseTexture9_SetLOD(This, LODNew);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static DWORD NINE_WINAPI
LockBaseTexture9_GetLOD( struct NineBaseTexture9 *This )
{
DWORD r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineBaseTexture9_GetLOD(This);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static DWORD NINE_WINAPI
LockBaseTexture9_GetLevelCount( struct NineBaseTexture9 *This )
{
DWORD r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineBaseTexture9_GetLevelCount(This);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockBaseTexture9_SetAutoGenFilterType( struct NineBaseTexture9 *This,
D3DTEXTUREFILTERTYPE FilterType )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineBaseTexture9_SetAutoGenFilterType(This, FilterType);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static D3DTEXTUREFILTERTYPE NINE_WINAPI
LockBaseTexture9_GetAutoGenFilterType( struct NineBaseTexture9 *This )
{
D3DTEXTUREFILTERTYPE r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineBaseTexture9_GetAutoGenFilterType(This);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static void NINE_WINAPI
LockBaseTexture9_PreLoad( struct NineBaseTexture9 *This )
{
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
NineBaseTexture9_PreLoad(This);
pipe_mutex_unlock(d3dlock_global);
}
static void NINE_WINAPI
LockBaseTexture9_GenerateMipSubLevels( struct NineBaseTexture9 *This )
{
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
NineBaseTexture9_GenerateMipSubLevels(This);
pipe_mutex_unlock(d3dlock_global);
}
static HRESULT NINE_WINAPI
LockCryptoSession9_GetCertificateSize( struct NineCryptoSession9 *This,
UINT *pCertificateSize )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineCryptoSession9_GetCertificateSize(This, pCertificateSize);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockCryptoSession9_GetCertificate( struct NineCryptoSession9 *This,
UINT CertifacteSize,
BYTE *ppCertificate )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineCryptoSession9_GetCertificate(This, CertifacteSize, ppCertificate);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockCryptoSession9_NegotiateKeyExchange( struct NineCryptoSession9 *This,
UINT DataSize,
void *pData )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineCryptoSession9_NegotiateKeyExchange(This, DataSize, pData);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockCryptoSession9_EncryptionBlt( struct NineCryptoSession9 *This,
IDirect3DSurface9 *pSrcSurface,
IDirect3DSurface9 *pDstSurface,
UINT DstSurfaceSize,
void *pIV )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineCryptoSession9_EncryptionBlt(This, pSrcSurface, pDstSurface, DstSurfaceSize, pIV);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockCryptoSession9_DecryptionBlt( struct NineCryptoSession9 *This,
IDirect3DSurface9 *pSrcSurface,
IDirect3DSurface9 *pDstSurface,
UINT SrcSurfaceSize,
D3DENCRYPTED_BLOCK_INFO *pEncryptedBlockInfo,
void *pContentKey,
void *pIV )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineCryptoSession9_DecryptionBlt(This, pSrcSurface, pDstSurface, SrcSurfaceSize, pEncryptedBlockInfo, pContentKey, pIV);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockCryptoSession9_GetSurfacePitch( struct NineCryptoSession9 *This,
IDirect3DSurface9 *pSrcSurface,
UINT *pSurfacePitch )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineCryptoSession9_GetSurfacePitch(This, pSrcSurface, pSurfacePitch);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockCryptoSession9_StartSessionKeyRefresh( struct NineCryptoSession9 *This,
void *pRandomNumber,
UINT RandomNumberSize )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineCryptoSession9_StartSessionKeyRefresh(This, pRandomNumber, RandomNumberSize);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockCryptoSession9_FinishSessionKeyRefresh( struct NineCryptoSession9 *This )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineCryptoSession9_FinishSessionKeyRefresh(This);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockCryptoSession9_GetEncryptionBltKey( struct NineCryptoSession9 *This,
void *pReadbackKey,
UINT KeySize )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineCryptoSession9_GetEncryptionBltKey(This, pReadbackKey, KeySize);
pipe_mutex_unlock(d3dlock_global);
return r;
}
IDirect3DCryptoSession9Vtbl LockCryptoSession9_vtable = {
(void *)NineUnknown_QueryInterface,
(void *)NineUnknown_AddRef,
(void *)NineUnknown_ReleaseWithDtorLock,
(void *)LockCryptoSession9_GetCertificateSize,
@@ -423,76 +423,76 @@ IDirect3DCryptoSession9Vtbl LockCryptoSession9_vtable = {
(void *)LockCryptoSession9_GetEncryptionBltKey
};
#if 0
static HRESULT NINE_WINAPI
LockCubeTexture9_GetLevelDesc( struct NineCubeTexture9 *This,
UINT Level,
D3DSURFACE_DESC *pDesc )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineCubeTexture9_GetLevelDesc(This, Level, pDesc);
pipe_mutex_unlock(d3dlock_global);
return r;
}
#endif
#if 0
static HRESULT NINE_WINAPI
LockCubeTexture9_GetCubeMapSurface( struct NineCubeTexture9 *This,
D3DCUBEMAP_FACES FaceType,
UINT Level,
IDirect3DSurface9 **ppCubeMapSurface )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineCubeTexture9_GetCubeMapSurface(This, FaceType, Level, ppCubeMapSurface);
pipe_mutex_unlock(d3dlock_global);
return r;
}
#endif
static HRESULT NINE_WINAPI
LockCubeTexture9_LockRect( struct NineCubeTexture9 *This,
D3DCUBEMAP_FACES FaceType,
UINT Level,
D3DLOCKED_RECT *pLockedRect,
const RECT *pRect,
DWORD Flags )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineCubeTexture9_LockRect(This, FaceType, Level, pLockedRect, pRect, Flags);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockCubeTexture9_UnlockRect( struct NineCubeTexture9 *This,
D3DCUBEMAP_FACES FaceType,
UINT Level )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineCubeTexture9_UnlockRect(This, FaceType, Level);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockCubeTexture9_AddDirtyRect( struct NineCubeTexture9 *This,
D3DCUBEMAP_FACES FaceType,
const RECT *pDirtyRect )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineCubeTexture9_AddDirtyRect(This, FaceType, pDirtyRect);
pipe_mutex_unlock(d3dlock_global);
return r;
}
IDirect3DCubeTexture9Vtbl LockCubeTexture9_vtable = {
(void *)NineUnknown_QueryInterface,
(void *)NineUnknown_AddRef,
(void *)NineUnknown_ReleaseWithDtorLock,
(void *)NineUnknown_GetDevice, /* actually part of Resource9 iface */
@@ -513,1448 +513,1448 @@ IDirect3DCubeTexture9Vtbl LockCubeTexture9_vtable = {
(void *)NineCubeTexture9_GetCubeMapSurface, /* AddRef */
(void *)LockCubeTexture9_LockRect,
(void *)LockCubeTexture9_UnlockRect,
(void *)LockCubeTexture9_AddDirtyRect
};
static HRESULT NINE_WINAPI
LockDevice9_TestCooperativeLevel( struct NineDevice9 *This )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_TestCooperativeLevel(This);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static UINT NINE_WINAPI
LockDevice9_GetAvailableTextureMem( struct NineDevice9 *This )
{
UINT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_GetAvailableTextureMem(This);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_EvictManagedResources( struct NineDevice9 *This )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_EvictManagedResources(This);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_GetDirect3D( struct NineDevice9 *This,
IDirect3D9 **ppD3D9 )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_GetDirect3D(This, ppD3D9);
pipe_mutex_unlock(d3dlock_global);
return r;
}
#if 0
static HRESULT NINE_WINAPI
LockDevice9_GetDeviceCaps( struct NineDevice9 *This,
D3DCAPS9 *pCaps )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_GetDeviceCaps(This, pCaps);
pipe_mutex_unlock(d3dlock_global);
return r;
}
#endif
static HRESULT NINE_WINAPI
LockDevice9_GetDisplayMode( struct NineDevice9 *This,
UINT iSwapChain,
D3DDISPLAYMODE *pMode )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_GetDisplayMode(This, iSwapChain, pMode);
pipe_mutex_unlock(d3dlock_global);
return r;
}
#if 0
static HRESULT NINE_WINAPI
LockDevice9_GetCreationParameters( struct NineDevice9 *This,
D3DDEVICE_CREATION_PARAMETERS *pParameters )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_GetCreationParameters(This, pParameters);
pipe_mutex_unlock(d3dlock_global);
return r;
}
#endif
static HRESULT NINE_WINAPI
LockDevice9_SetCursorProperties( struct NineDevice9 *This,
UINT XHotSpot,
UINT YHotSpot,
IDirect3DSurface9 *pCursorBitmap )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_SetCursorProperties(This, XHotSpot, YHotSpot, pCursorBitmap);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static void NINE_WINAPI
LockDevice9_SetCursorPosition( struct NineDevice9 *This,
int X,
int Y,
DWORD Flags )
{
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
NineDevice9_SetCursorPosition(This, X, Y, Flags);
pipe_mutex_unlock(d3dlock_global);
}
static BOOL NINE_WINAPI
LockDevice9_ShowCursor( struct NineDevice9 *This,
BOOL bShow )
{
BOOL r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_ShowCursor(This, bShow);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_CreateAdditionalSwapChain( struct NineDevice9 *This,
D3DPRESENT_PARAMETERS *pPresentationParameters,
IDirect3DSwapChain9 **pSwapChain )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_CreateAdditionalSwapChain(This, pPresentationParameters, pSwapChain);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_GetSwapChain( struct NineDevice9 *This,
UINT iSwapChain,
IDirect3DSwapChain9 **pSwapChain )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_GetSwapChain(This, iSwapChain, pSwapChain);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static UINT NINE_WINAPI
LockDevice9_GetNumberOfSwapChains( struct NineDevice9 *This )
{
UINT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_GetNumberOfSwapChains(This);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_Reset( struct NineDevice9 *This,
D3DPRESENT_PARAMETERS *pPresentationParameters )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_Reset(This, pPresentationParameters);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_Present( struct NineDevice9 *This,
const RECT *pSourceRect,
const RECT *pDestRect,
HWND hDestWindowOverride,
const RGNDATA *pDirtyRegion )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_Present(This, pSourceRect, pDestRect, hDestWindowOverride, pDirtyRegion);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_GetBackBuffer( struct NineDevice9 *This,
UINT iSwapChain,
UINT iBackBuffer,
D3DBACKBUFFER_TYPE Type,
IDirect3DSurface9 **ppBackBuffer )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_GetBackBuffer(This, iSwapChain, iBackBuffer, Type, ppBackBuffer);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_GetRasterStatus( struct NineDevice9 *This,
UINT iSwapChain,
D3DRASTER_STATUS *pRasterStatus )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_GetRasterStatus(This, iSwapChain, pRasterStatus);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_SetDialogBoxMode( struct NineDevice9 *This,
BOOL bEnableDialogs )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_SetDialogBoxMode(This, bEnableDialogs);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static void NINE_WINAPI
LockDevice9_SetGammaRamp( struct NineDevice9 *This,
UINT iSwapChain,
DWORD Flags,
const D3DGAMMARAMP *pRamp )
{
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
NineDevice9_SetGammaRamp(This, iSwapChain, Flags, pRamp);
pipe_mutex_unlock(d3dlock_global);
}
static void NINE_WINAPI
LockDevice9_GetGammaRamp( struct NineDevice9 *This,
UINT iSwapChain,
D3DGAMMARAMP *pRamp )
{
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
NineDevice9_GetGammaRamp(This, iSwapChain, pRamp);
pipe_mutex_unlock(d3dlock_global);
}
static HRESULT NINE_WINAPI
LockDevice9_CreateTexture( struct NineDevice9 *This,
UINT Width,
UINT Height,
UINT Levels,
DWORD Usage,
D3DFORMAT Format,
D3DPOOL Pool,
IDirect3DTexture9 **ppTexture,
HANDLE *pSharedHandle )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_CreateTexture(This, Width, Height, Levels, Usage, Format, Pool, ppTexture, pSharedHandle);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_CreateVolumeTexture( struct NineDevice9 *This,
UINT Width,
UINT Height,
UINT Depth,
UINT Levels,
DWORD Usage,
D3DFORMAT Format,
D3DPOOL Pool,
IDirect3DVolumeTexture9 **ppVolumeTexture,
HANDLE *pSharedHandle )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_CreateVolumeTexture(This, Width, Height, Depth, Levels, Usage, Format, Pool, ppVolumeTexture, pSharedHandle);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_CreateCubeTexture( struct NineDevice9 *This,
UINT EdgeLength,
UINT Levels,
DWORD Usage,
D3DFORMAT Format,
D3DPOOL Pool,
IDirect3DCubeTexture9 **ppCubeTexture,
HANDLE *pSharedHandle )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_CreateCubeTexture(This, EdgeLength, Levels, Usage, Format, Pool, ppCubeTexture, pSharedHandle);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_CreateVertexBuffer( struct NineDevice9 *This,
UINT Length,
DWORD Usage,
DWORD FVF,
D3DPOOL Pool,
IDirect3DVertexBuffer9 **ppVertexBuffer,
HANDLE *pSharedHandle )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_CreateVertexBuffer(This, Length, Usage, FVF, Pool, ppVertexBuffer, pSharedHandle);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_CreateIndexBuffer( struct NineDevice9 *This,
UINT Length,
DWORD Usage,
D3DFORMAT Format,
D3DPOOL Pool,
IDirect3DIndexBuffer9 **ppIndexBuffer,
HANDLE *pSharedHandle )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_CreateIndexBuffer(This, Length, Usage, Format, Pool, ppIndexBuffer, pSharedHandle);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_CreateRenderTarget( struct NineDevice9 *This,
UINT Width,
UINT Height,
D3DFORMAT Format,
D3DMULTISAMPLE_TYPE MultiSample,
DWORD MultisampleQuality,
BOOL Lockable,
IDirect3DSurface9 **ppSurface,
HANDLE *pSharedHandle )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_CreateRenderTarget(This, Width, Height, Format, MultiSample, MultisampleQuality, Lockable, ppSurface, pSharedHandle);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_CreateDepthStencilSurface( struct NineDevice9 *This,
UINT Width,
UINT Height,
D3DFORMAT Format,
D3DMULTISAMPLE_TYPE MultiSample,
DWORD MultisampleQuality,
BOOL Discard,
IDirect3DSurface9 **ppSurface,
HANDLE *pSharedHandle )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_CreateDepthStencilSurface(This, Width, Height, Format, MultiSample, MultisampleQuality, Discard, ppSurface, pSharedHandle);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_UpdateSurface( struct NineDevice9 *This,
IDirect3DSurface9 *pSourceSurface,
const RECT *pSourceRect,
IDirect3DSurface9 *pDestinationSurface,
const POINT *pDestPoint )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_UpdateSurface(This, pSourceSurface, pSourceRect, pDestinationSurface, pDestPoint);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_UpdateTexture( struct NineDevice9 *This,
IDirect3DBaseTexture9 *pSourceTexture,
IDirect3DBaseTexture9 *pDestinationTexture )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_UpdateTexture(This, pSourceTexture, pDestinationTexture);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_GetRenderTargetData( struct NineDevice9 *This,
IDirect3DSurface9 *pRenderTarget,
IDirect3DSurface9 *pDestSurface )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_GetRenderTargetData(This, pRenderTarget, pDestSurface);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_GetFrontBufferData( struct NineDevice9 *This,
UINT iSwapChain,
IDirect3DSurface9 *pDestSurface )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_GetFrontBufferData(This, iSwapChain, pDestSurface);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_StretchRect( struct NineDevice9 *This,
IDirect3DSurface9 *pSourceSurface,
const RECT *pSourceRect,
IDirect3DSurface9 *pDestSurface,
const RECT *pDestRect,
D3DTEXTUREFILTERTYPE Filter )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_StretchRect(This, pSourceSurface, pSourceRect, pDestSurface, pDestRect, Filter);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_ColorFill( struct NineDevice9 *This,
IDirect3DSurface9 *pSurface,
const RECT *pRect,
D3DCOLOR color )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_ColorFill(This, pSurface, pRect, color);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_CreateOffscreenPlainSurface( struct NineDevice9 *This,
UINT Width,
UINT Height,
D3DFORMAT Format,
D3DPOOL Pool,
IDirect3DSurface9 **ppSurface,
HANDLE *pSharedHandle )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_CreateOffscreenPlainSurface(This, Width, Height, Format, Pool, ppSurface, pSharedHandle);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_SetRenderTarget( struct NineDevice9 *This,
DWORD RenderTargetIndex,
IDirect3DSurface9 *pRenderTarget )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_SetRenderTarget(This, RenderTargetIndex, pRenderTarget);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_GetRenderTarget( struct NineDevice9 *This,
DWORD RenderTargetIndex,
IDirect3DSurface9 **ppRenderTarget )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_GetRenderTarget(This, RenderTargetIndex, ppRenderTarget);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_SetDepthStencilSurface( struct NineDevice9 *This,
IDirect3DSurface9 *pNewZStencil )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_SetDepthStencilSurface(This, pNewZStencil);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_GetDepthStencilSurface( struct NineDevice9 *This,
IDirect3DSurface9 **ppZStencilSurface )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_GetDepthStencilSurface(This, ppZStencilSurface);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_BeginScene( struct NineDevice9 *This )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_BeginScene(This);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_EndScene( struct NineDevice9 *This )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_EndScene(This);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_Clear( struct NineDevice9 *This,
DWORD Count,
const D3DRECT *pRects,
DWORD Flags,
D3DCOLOR Color,
float Z,
DWORD Stencil )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_Clear(This, Count, pRects, Flags, Color, Z, Stencil);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_SetTransform( struct NineDevice9 *This,
D3DTRANSFORMSTATETYPE State,
const D3DMATRIX *pMatrix )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_SetTransform(This, State, pMatrix);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_GetTransform( struct NineDevice9 *This,
D3DTRANSFORMSTATETYPE State,
D3DMATRIX *pMatrix )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_GetTransform(This, State, pMatrix);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_MultiplyTransform( struct NineDevice9 *This,
D3DTRANSFORMSTATETYPE State,
const D3DMATRIX *pMatrix )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_MultiplyTransform(This, State, pMatrix);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_SetViewport( struct NineDevice9 *This,
const D3DVIEWPORT9 *pViewport )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_SetViewport(This, pViewport);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_GetViewport( struct NineDevice9 *This,
D3DVIEWPORT9 *pViewport )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_GetViewport(This, pViewport);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_SetMaterial( struct NineDevice9 *This,
const D3DMATERIAL9 *pMaterial )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_SetMaterial(This, pMaterial);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_GetMaterial( struct NineDevice9 *This,
D3DMATERIAL9 *pMaterial )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_GetMaterial(This, pMaterial);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_SetLight( struct NineDevice9 *This,
DWORD Index,
const D3DLIGHT9 *pLight )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_SetLight(This, Index, pLight);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_GetLight( struct NineDevice9 *This,
DWORD Index,
D3DLIGHT9 *pLight )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_GetLight(This, Index, pLight);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_LightEnable( struct NineDevice9 *This,
DWORD Index,
BOOL Enable )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_LightEnable(This, Index, Enable);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_GetLightEnable( struct NineDevice9 *This,
DWORD Index,
BOOL *pEnable )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_GetLightEnable(This, Index, pEnable);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_SetClipPlane( struct NineDevice9 *This,
DWORD Index,
const float *pPlane )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_SetClipPlane(This, Index, pPlane);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_GetClipPlane( struct NineDevice9 *This,
DWORD Index,
float *pPlane )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_GetClipPlane(This, Index, pPlane);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_SetRenderState( struct NineDevice9 *This,
D3DRENDERSTATETYPE State,
DWORD Value )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_SetRenderState(This, State, Value);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_GetRenderState( struct NineDevice9 *This,
D3DRENDERSTATETYPE State,
DWORD *pValue )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_GetRenderState(This, State, pValue);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_CreateStateBlock( struct NineDevice9 *This,
D3DSTATEBLOCKTYPE Type,
IDirect3DStateBlock9 **ppSB )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_CreateStateBlock(This, Type, ppSB);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_BeginStateBlock( struct NineDevice9 *This )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_BeginStateBlock(This);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_EndStateBlock( struct NineDevice9 *This,
IDirect3DStateBlock9 **ppSB )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_EndStateBlock(This, ppSB);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_SetClipStatus( struct NineDevice9 *This,
const D3DCLIPSTATUS9 *pClipStatus )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_SetClipStatus(This, pClipStatus);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_GetClipStatus( struct NineDevice9 *This,
D3DCLIPSTATUS9 *pClipStatus )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_GetClipStatus(This, pClipStatus);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_GetTexture( struct NineDevice9 *This,
DWORD Stage,
IDirect3DBaseTexture9 **ppTexture )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_GetTexture(This, Stage, ppTexture);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_SetTexture( struct NineDevice9 *This,
DWORD Stage,
IDirect3DBaseTexture9 *pTexture )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_SetTexture(This, Stage, pTexture);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_GetTextureStageState( struct NineDevice9 *This,
DWORD Stage,
D3DTEXTURESTAGESTATETYPE Type,
DWORD *pValue )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_GetTextureStageState(This, Stage, Type, pValue);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_SetTextureStageState( struct NineDevice9 *This,
DWORD Stage,
D3DTEXTURESTAGESTATETYPE Type,
DWORD Value )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_SetTextureStageState(This, Stage, Type, Value);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_GetSamplerState( struct NineDevice9 *This,
DWORD Sampler,
D3DSAMPLERSTATETYPE Type,
DWORD *pValue )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_GetSamplerState(This, Sampler, Type, pValue);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_SetSamplerState( struct NineDevice9 *This,
DWORD Sampler,
D3DSAMPLERSTATETYPE Type,
DWORD Value )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_SetSamplerState(This, Sampler, Type, Value);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_ValidateDevice( struct NineDevice9 *This,
DWORD *pNumPasses )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_ValidateDevice(This, pNumPasses);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_SetPaletteEntries( struct NineDevice9 *This,
UINT PaletteNumber,
const PALETTEENTRY *pEntries )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_SetPaletteEntries(This, PaletteNumber, pEntries);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_GetPaletteEntries( struct NineDevice9 *This,
UINT PaletteNumber,
PALETTEENTRY *pEntries )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_GetPaletteEntries(This, PaletteNumber, pEntries);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_SetCurrentTexturePalette( struct NineDevice9 *This,
UINT PaletteNumber )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_SetCurrentTexturePalette(This, PaletteNumber);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_GetCurrentTexturePalette( struct NineDevice9 *This,
UINT *PaletteNumber )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_GetCurrentTexturePalette(This, PaletteNumber);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_SetScissorRect( struct NineDevice9 *This,
const RECT *pRect )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_SetScissorRect(This, pRect);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_GetScissorRect( struct NineDevice9 *This,
RECT *pRect )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_GetScissorRect(This, pRect);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_SetSoftwareVertexProcessing( struct NineDevice9 *This,
BOOL bSoftware )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_SetSoftwareVertexProcessing(This, bSoftware);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static BOOL NINE_WINAPI
LockDevice9_GetSoftwareVertexProcessing( struct NineDevice9 *This )
{
BOOL r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_GetSoftwareVertexProcessing(This);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_SetNPatchMode( struct NineDevice9 *This,
float nSegments )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_SetNPatchMode(This, nSegments);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static float NINE_WINAPI
LockDevice9_GetNPatchMode( struct NineDevice9 *This )
{
float r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_GetNPatchMode(This);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_DrawPrimitive( struct NineDevice9 *This,
D3DPRIMITIVETYPE PrimitiveType,
UINT StartVertex,
UINT PrimitiveCount )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_DrawPrimitive(This, PrimitiveType, StartVertex, PrimitiveCount);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_DrawIndexedPrimitive( struct NineDevice9 *This,
D3DPRIMITIVETYPE PrimitiveType,
INT BaseVertexIndex,
UINT MinVertexIndex,
UINT NumVertices,
UINT startIndex,
UINT primCount )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_DrawIndexedPrimitive(This, PrimitiveType, BaseVertexIndex, MinVertexIndex, NumVertices, startIndex, primCount);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_DrawPrimitiveUP( struct NineDevice9 *This,
D3DPRIMITIVETYPE PrimitiveType,
UINT PrimitiveCount,
const void *pVertexStreamZeroData,
UINT VertexStreamZeroStride )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_DrawPrimitiveUP(This, PrimitiveType, PrimitiveCount, pVertexStreamZeroData, VertexStreamZeroStride);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_DrawIndexedPrimitiveUP( struct NineDevice9 *This,
D3DPRIMITIVETYPE PrimitiveType,
UINT MinVertexIndex,
UINT NumVertices,
UINT PrimitiveCount,
const void *pIndexData,
D3DFORMAT IndexDataFormat,
const void *pVertexStreamZeroData,
UINT VertexStreamZeroStride )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_DrawIndexedPrimitiveUP(This, PrimitiveType, MinVertexIndex, NumVertices, PrimitiveCount, pIndexData, IndexDataFormat, pVertexStreamZeroData, VertexStreamZeroStride);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_ProcessVertices( struct NineDevice9 *This,
UINT SrcStartIndex,
UINT DestIndex,
UINT VertexCount,
IDirect3DVertexBuffer9 *pDestBuffer,
IDirect3DVertexDeclaration9 *pVertexDecl,
DWORD Flags )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_ProcessVertices(This, SrcStartIndex, DestIndex, VertexCount, pDestBuffer, pVertexDecl, Flags);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_CreateVertexDeclaration( struct NineDevice9 *This,
const D3DVERTEXELEMENT9 *pVertexElements,
IDirect3DVertexDeclaration9 **ppDecl )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_CreateVertexDeclaration(This, pVertexElements, ppDecl);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_SetVertexDeclaration( struct NineDevice9 *This,
IDirect3DVertexDeclaration9 *pDecl )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_SetVertexDeclaration(This, pDecl);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_GetVertexDeclaration( struct NineDevice9 *This,
IDirect3DVertexDeclaration9 **ppDecl )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_GetVertexDeclaration(This, ppDecl);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_SetFVF( struct NineDevice9 *This,
DWORD FVF )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_SetFVF(This, FVF);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_GetFVF( struct NineDevice9 *This,
DWORD *pFVF )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_GetFVF(This, pFVF);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_CreateVertexShader( struct NineDevice9 *This,
const DWORD *pFunction,
IDirect3DVertexShader9 **ppShader )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_CreateVertexShader(This, pFunction, ppShader);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_SetVertexShader( struct NineDevice9 *This,
IDirect3DVertexShader9 *pShader )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_SetVertexShader(This, pShader);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_GetVertexShader( struct NineDevice9 *This,
IDirect3DVertexShader9 **ppShader )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_GetVertexShader(This, ppShader);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_SetVertexShaderConstantF( struct NineDevice9 *This,
UINT StartRegister,
const float *pConstantData,
UINT Vector4fCount )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_SetVertexShaderConstantF(This, StartRegister, pConstantData, Vector4fCount);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_GetVertexShaderConstantF( struct NineDevice9 *This,
UINT StartRegister,
float *pConstantData,
UINT Vector4fCount )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_GetVertexShaderConstantF(This, StartRegister, pConstantData, Vector4fCount);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_SetVertexShaderConstantI( struct NineDevice9 *This,
UINT StartRegister,
const int *pConstantData,
UINT Vector4iCount )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_SetVertexShaderConstantI(This, StartRegister, pConstantData, Vector4iCount);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_GetVertexShaderConstantI( struct NineDevice9 *This,
UINT StartRegister,
int *pConstantData,
UINT Vector4iCount )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_GetVertexShaderConstantI(This, StartRegister, pConstantData, Vector4iCount);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_SetVertexShaderConstantB( struct NineDevice9 *This,
UINT StartRegister,
const BOOL *pConstantData,
UINT BoolCount )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_SetVertexShaderConstantB(This, StartRegister, pConstantData, BoolCount);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_GetVertexShaderConstantB( struct NineDevice9 *This,
UINT StartRegister,
BOOL *pConstantData,
UINT BoolCount )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_GetVertexShaderConstantB(This, StartRegister, pConstantData, BoolCount);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_SetStreamSource( struct NineDevice9 *This,
UINT StreamNumber,
IDirect3DVertexBuffer9 *pStreamData,
UINT OffsetInBytes,
UINT Stride )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_SetStreamSource(This, StreamNumber, pStreamData, OffsetInBytes, Stride);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_GetStreamSource( struct NineDevice9 *This,
UINT StreamNumber,
IDirect3DVertexBuffer9 **ppStreamData,
UINT *pOffsetInBytes,
UINT *pStride )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_GetStreamSource(This, StreamNumber, ppStreamData, pOffsetInBytes, pStride);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_SetStreamSourceFreq( struct NineDevice9 *This,
UINT StreamNumber,
UINT Setting )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_SetStreamSourceFreq(This, StreamNumber, Setting);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_GetStreamSourceFreq( struct NineDevice9 *This,
UINT StreamNumber,
UINT *pSetting )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_GetStreamSourceFreq(This, StreamNumber, pSetting);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_SetIndices( struct NineDevice9 *This,
IDirect3DIndexBuffer9 *pIndexData )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_SetIndices(This, pIndexData);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_GetIndices( struct NineDevice9 *This,
IDirect3DIndexBuffer9 **ppIndexData )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_GetIndices(This, ppIndexData);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_CreatePixelShader( struct NineDevice9 *This,
const DWORD *pFunction,
IDirect3DPixelShader9 **ppShader )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_CreatePixelShader(This, pFunction, ppShader);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_SetPixelShader( struct NineDevice9 *This,
IDirect3DPixelShader9 *pShader )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_SetPixelShader(This, pShader);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_GetPixelShader( struct NineDevice9 *This,
IDirect3DPixelShader9 **ppShader )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_GetPixelShader(This, ppShader);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_SetPixelShaderConstantF( struct NineDevice9 *This,
UINT StartRegister,
const float *pConstantData,
UINT Vector4fCount )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_SetPixelShaderConstantF(This, StartRegister, pConstantData, Vector4fCount);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_GetPixelShaderConstantF( struct NineDevice9 *This,
UINT StartRegister,
float *pConstantData,
UINT Vector4fCount )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_GetPixelShaderConstantF(This, StartRegister, pConstantData, Vector4fCount);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_SetPixelShaderConstantI( struct NineDevice9 *This,
UINT StartRegister,
const int *pConstantData,
UINT Vector4iCount )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_SetPixelShaderConstantI(This, StartRegister, pConstantData, Vector4iCount);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_GetPixelShaderConstantI( struct NineDevice9 *This,
UINT StartRegister,
int *pConstantData,
UINT Vector4iCount )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_GetPixelShaderConstantI(This, StartRegister, pConstantData, Vector4iCount);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_SetPixelShaderConstantB( struct NineDevice9 *This,
UINT StartRegister,
const BOOL *pConstantData,
UINT BoolCount )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_SetPixelShaderConstantB(This, StartRegister, pConstantData, BoolCount);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_GetPixelShaderConstantB( struct NineDevice9 *This,
UINT StartRegister,
BOOL *pConstantData,
UINT BoolCount )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_GetPixelShaderConstantB(This, StartRegister, pConstantData, BoolCount);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_DrawRectPatch( struct NineDevice9 *This,
UINT Handle,
const float *pNumSegs,
const D3DRECTPATCH_INFO *pRectPatchInfo )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_DrawRectPatch(This, Handle, pNumSegs, pRectPatchInfo);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_DrawTriPatch( struct NineDevice9 *This,
UINT Handle,
const float *pNumSegs,
const D3DTRIPATCH_INFO *pTriPatchInfo )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_DrawTriPatch(This, Handle, pNumSegs, pTriPatchInfo);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_DeletePatch( struct NineDevice9 *This,
UINT Handle )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_DeletePatch(This, Handle);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9_CreateQuery( struct NineDevice9 *This,
D3DQUERYTYPE Type,
IDirect3DQuery9 **ppQuery )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9_CreateQuery(This, Type, ppQuery);
pipe_mutex_unlock(d3dlock_global);
return r;
}
IDirect3DDevice9Vtbl LockDevice9_vtable = {
(void *)NineUnknown_QueryInterface,
(void *)NineUnknown_AddRef,
(void *)NineUnknown_ReleaseWithDtorLock,
(void *)LockDevice9_TestCooperativeLevel,
@@ -2076,212 +2076,212 @@ IDirect3DDevice9Vtbl LockDevice9_vtable = {
};
static HRESULT NINE_WINAPI
LockDevice9Ex_SetConvolutionMonoKernel( struct NineDevice9Ex *This,
UINT width,
UINT height,
float *rows,
float *columns )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9Ex_SetConvolutionMonoKernel(This, width, height, rows, columns);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9Ex_ComposeRects( struct NineDevice9Ex *This,
IDirect3DSurface9 *pSrc,
IDirect3DSurface9 *pDst,
IDirect3DVertexBuffer9 *pSrcRectDescs,
UINT NumRects,
IDirect3DVertexBuffer9 *pDstRectDescs,
D3DCOMPOSERECTSOP Operation,
int Xoffset,
int Yoffset )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9Ex_ComposeRects(This, pSrc, pDst, pSrcRectDescs, NumRects, pDstRectDescs, Operation, Xoffset, Yoffset);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9Ex_PresentEx( struct NineDevice9Ex *This,
const RECT *pSourceRect,
const RECT *pDestRect,
HWND hDestWindowOverride,
const RGNDATA *pDirtyRegion,
DWORD dwFlags )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9Ex_PresentEx(This, pSourceRect, pDestRect, hDestWindowOverride, pDirtyRegion, dwFlags);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9Ex_GetGPUThreadPriority( struct NineDevice9Ex *This,
INT *pPriority )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9Ex_GetGPUThreadPriority(This, pPriority);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9Ex_SetGPUThreadPriority( struct NineDevice9Ex *This,
INT Priority )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9Ex_SetGPUThreadPriority(This, Priority);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9Ex_WaitForVBlank( struct NineDevice9Ex *This,
UINT iSwapChain )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9Ex_WaitForVBlank(This, iSwapChain);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9Ex_CheckResourceResidency( struct NineDevice9Ex *This,
IDirect3DResource9 **pResourceArray,
UINT32 NumResources )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9Ex_CheckResourceResidency(This, pResourceArray, NumResources);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9Ex_SetMaximumFrameLatency( struct NineDevice9Ex *This,
UINT MaxLatency )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9Ex_SetMaximumFrameLatency(This, MaxLatency);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9Ex_GetMaximumFrameLatency( struct NineDevice9Ex *This,
UINT *pMaxLatency )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9Ex_GetMaximumFrameLatency(This, pMaxLatency);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9Ex_CheckDeviceState( struct NineDevice9Ex *This,
HWND hDestinationWindow )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9Ex_CheckDeviceState(This, hDestinationWindow);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9Ex_CreateRenderTargetEx( struct NineDevice9Ex *This,
UINT Width,
UINT Height,
D3DFORMAT Format,
D3DMULTISAMPLE_TYPE MultiSample,
DWORD MultisampleQuality,
BOOL Lockable,
IDirect3DSurface9 **ppSurface,
HANDLE *pSharedHandle,
DWORD Usage )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9Ex_CreateRenderTargetEx(This, Width, Height, Format, MultiSample, MultisampleQuality, Lockable, ppSurface, pSharedHandle, Usage);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9Ex_CreateOffscreenPlainSurfaceEx( struct NineDevice9Ex *This,
UINT Width,
UINT Height,
D3DFORMAT Format,
D3DPOOL Pool,
IDirect3DSurface9 **ppSurface,
HANDLE *pSharedHandle,
DWORD Usage )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9Ex_CreateOffscreenPlainSurfaceEx(This, Width, Height, Format, Pool, ppSurface, pSharedHandle, Usage);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9Ex_CreateDepthStencilSurfaceEx( struct NineDevice9Ex *This,
UINT Width,
UINT Height,
D3DFORMAT Format,
D3DMULTISAMPLE_TYPE MultiSample,
DWORD MultisampleQuality,
BOOL Discard,
IDirect3DSurface9 **ppSurface,
HANDLE *pSharedHandle,
DWORD Usage )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9Ex_CreateDepthStencilSurfaceEx(This, Width, Height, Format, MultiSample, MultisampleQuality, Discard, ppSurface, pSharedHandle, Usage);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9Ex_ResetEx( struct NineDevice9Ex *This,
D3DPRESENT_PARAMETERS *pPresentationParameters,
D3DDISPLAYMODEEX *pFullscreenDisplayMode )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9Ex_ResetEx(This, pPresentationParameters, pFullscreenDisplayMode);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9Ex_GetDisplayModeEx( struct NineDevice9Ex *This,
UINT iSwapChain,
D3DDISPLAYMODEEX *pMode,
D3DDISPLAYROTATION *pRotation )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9Ex_GetDisplayModeEx(This, iSwapChain, pMode, pRotation);
pipe_mutex_unlock(d3dlock_global);
return r;
}
IDirect3DDevice9ExVtbl LockDevice9Ex_vtable = {
(void *)NineUnknown_QueryInterface,
(void *)NineUnknown_AddRef,
(void *)NineUnknown_ReleaseWithDtorLock,
(void *)LockDevice9_TestCooperativeLevel,
@@ -2417,48 +2417,48 @@ IDirect3DDevice9ExVtbl LockDevice9Ex_vtable = {
(void *)LockDevice9Ex_GetDisplayModeEx
};
static HRESULT NINE_WINAPI
LockDevice9Video_GetContentProtectionCaps( struct NineDevice9Video *This,
const GUID *pCryptoType,
const GUID *pDecodeProfile,
D3DCONTENTPROTECTIONCAPS *pCaps )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9Video_GetContentProtectionCaps(This, pCryptoType, pDecodeProfile, pCaps);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9Video_CreateAuthenticatedChannel( struct NineDevice9Video *This,
D3DAUTHENTICATEDCHANNELTYPE ChannelType,
IDirect3DAuthenticatedChannel9 **ppAuthenticatedChannel,
HANDLE *pChannelHandle )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9Video_CreateAuthenticatedChannel(This, ChannelType, ppAuthenticatedChannel, pChannelHandle);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockDevice9Video_CreateCryptoSession( struct NineDevice9Video *This,
const GUID *pCryptoType,
const GUID *pDecodeProfile,
IDirect3DCryptoSession9 **ppCryptoSession,
HANDLE *pCryptoHandle )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineDevice9Video_CreateCryptoSession(This, pCryptoType, pDecodeProfile, ppCryptoSession, pCryptoHandle);
pipe_mutex_unlock(d3dlock_global);
return r;
}
IDirect3DDevice9VideoVtbl LockDevice9Video_vtable = {
(void *)NineUnknown_QueryInterface,
(void *)NineUnknown_AddRef,
(void *)NineUnknown_ReleaseWithDtorLock,
(void *)LockDevice9Video_GetContentProtectionCaps,
@@ -2467,43 +2467,43 @@ IDirect3DDevice9VideoVtbl LockDevice9Video_vtable = {
};
static HRESULT NINE_WINAPI
LockIndexBuffer9_Lock( struct NineIndexBuffer9 *This,
UINT OffsetToLock,
UINT SizeToLock,
void **ppbData,
DWORD Flags )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineIndexBuffer9_Lock(This, OffsetToLock, SizeToLock, ppbData, Flags);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockIndexBuffer9_Unlock( struct NineIndexBuffer9 *This )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineIndexBuffer9_Unlock(This);
pipe_mutex_unlock(d3dlock_global);
return r;
}
#if 0
static HRESULT NINE_WINAPI
LockIndexBuffer9_GetDesc( struct NineIndexBuffer9 *This,
D3DINDEXBUFFER_DESC *pDesc )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineIndexBuffer9_GetDesc(This, pDesc);
pipe_mutex_unlock(d3dlock_global);
return r;
}
#endif
IDirect3DIndexBuffer9Vtbl LockIndexBuffer9_vtable = {
(void *)NineUnknown_QueryInterface,
(void *)NineUnknown_AddRef,
(void *)NineUnknown_ReleaseWithDtorLock,
@@ -2519,103 +2519,103 @@ IDirect3DIndexBuffer9Vtbl LockIndexBuffer9_vtable = {
(void *)LockIndexBuffer9_Unlock,
(void *)NineIndexBuffer9_GetDesc /* immutable */
};
#if 0
static HRESULT NINE_WINAPI
LockPixelShader9_GetDevice( struct NinePixelShader9 *This,
IDirect3DDevice9 **ppDevice )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineUnknown_GetDevice(NineUnknown(This), ppDevice);
pipe_mutex_unlock(d3dlock_global);
return r;
}
#endif
static HRESULT NINE_WINAPI
LockPixelShader9_GetFunction( struct NinePixelShader9 *This,
void *pData,
UINT *pSizeOfData )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NinePixelShader9_GetFunction(This, pData, pSizeOfData);
pipe_mutex_unlock(d3dlock_global);
return r;
}
IDirect3DPixelShader9Vtbl LockPixelShader9_vtable = {
(void *)NineUnknown_QueryInterface,
(void *)NineUnknown_AddRef,
(void *)NineUnknown_ReleaseWithDtorLock,
(void *)NineUnknown_GetDevice,
(void *)LockPixelShader9_GetFunction
};
#if 0
static HRESULT NINE_WINAPI
LockQuery9_GetDevice( struct NineQuery9 *This,
IDirect3DDevice9 **ppDevice )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineUnknown_GetDevice(NineUnknown(This), ppDevice);
pipe_mutex_unlock(d3dlock_global);
return r;
}
#endif
#if 0
static D3DQUERYTYPE NINE_WINAPI
LockQuery9_GetType( struct NineQuery9 *This )
{
D3DQUERYTYPE r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineQuery9_GetType(This);
pipe_mutex_unlock(d3dlock_global);
return r;
}
#endif
#if 0
static DWORD NINE_WINAPI
LockQuery9_GetDataSize( struct NineQuery9 *This )
{
DWORD r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineQuery9_GetDataSize(This);
pipe_mutex_unlock(d3dlock_global);
return r;
}
#endif
static HRESULT NINE_WINAPI
LockQuery9_Issue( struct NineQuery9 *This,
DWORD dwIssueFlags )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineQuery9_Issue(This, dwIssueFlags);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockQuery9_GetData( struct NineQuery9 *This,
void *pData,
DWORD dwSize,
DWORD dwGetDataFlags )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineQuery9_GetData(This, pData, dwSize, dwGetDataFlags);
pipe_mutex_unlock(d3dlock_global);
return r;
}
IDirect3DQuery9Vtbl LockQuery9_vtable = {
(void *)NineUnknown_QueryInterface,
(void *)NineUnknown_AddRef,
(void *)NineUnknown_ReleaseWithDtorLock,
(void *)NineUnknown_GetDevice, /* actually part of Query9 iface */
@@ -2624,121 +2624,121 @@ IDirect3DQuery9Vtbl LockQuery9_vtable = {
(void *)LockQuery9_Issue,
(void *)LockQuery9_GetData
};
#if 0
static HRESULT NINE_WINAPI
LockStateBlock9_GetDevice( struct NineStateBlock9 *This,
IDirect3DDevice9 **ppDevice )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineUnknown_GetDevice(NineUnknown(This), ppDevice);
pipe_mutex_unlock(d3dlock_global);
return r;
}
#endif
static HRESULT NINE_WINAPI
LockStateBlock9_Capture( struct NineStateBlock9 *This )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineStateBlock9_Capture(This);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockStateBlock9_Apply( struct NineStateBlock9 *This )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineStateBlock9_Apply(This);
pipe_mutex_unlock(d3dlock_global);
return r;
}
IDirect3DStateBlock9Vtbl LockStateBlock9_vtable = {
(void *)NineUnknown_QueryInterface,
(void *)NineUnknown_AddRef,
(void *)NineUnknown_ReleaseWithDtorLock,
(void *)NineUnknown_GetDevice, /* actually part of StateBlock9 iface */
(void *)LockStateBlock9_Capture,
(void *)LockStateBlock9_Apply
};
static HRESULT NINE_WINAPI
LockSurface9_GetContainer( struct NineSurface9 *This,
REFIID riid,
void **ppContainer )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineSurface9_GetContainer(This, riid, ppContainer);
pipe_mutex_unlock(d3dlock_global);
return r;
}
#if 0
static HRESULT NINE_WINAPI
LockSurface9_GetDesc( struct NineSurface9 *This,
D3DSURFACE_DESC *pDesc )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineSurface9_GetDesc(This, pDesc);
pipe_mutex_unlock(d3dlock_global);
return r;
}
#endif
static HRESULT NINE_WINAPI
LockSurface9_LockRect( struct NineSurface9 *This,
D3DLOCKED_RECT *pLockedRect,
const RECT *pRect,
DWORD Flags )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineSurface9_LockRect(This, pLockedRect, pRect, Flags);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockSurface9_UnlockRect( struct NineSurface9 *This )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineSurface9_UnlockRect(This);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockSurface9_GetDC( struct NineSurface9 *This,
HDC *phdc )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineSurface9_GetDC(This, phdc);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockSurface9_ReleaseDC( struct NineSurface9 *This,
HDC hdc )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineSurface9_ReleaseDC(This, hdc);
pipe_mutex_unlock(d3dlock_global);
return r;
}
IDirect3DSurface9Vtbl LockSurface9_vtable = {
(void *)NineUnknown_QueryInterface,
(void *)NineUnknown_AddRef,
(void *)NineUnknown_ReleaseWithDtorLock,
(void *)NineUnknown_GetDevice, /* actually part of Resource9 iface */
@@ -2759,91 +2759,91 @@ IDirect3DSurface9Vtbl LockSurface9_vtable = {
static HRESULT NINE_WINAPI
LockSwapChain9_Present( struct NineSwapChain9 *This,
const RECT *pSourceRect,
const RECT *pDestRect,
HWND hDestWindowOverride,
const RGNDATA *pDirtyRegion,
DWORD dwFlags )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineSwapChain9_Present(This, pSourceRect, pDestRect, hDestWindowOverride, pDirtyRegion, dwFlags);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockSwapChain9_GetFrontBufferData( struct NineSwapChain9 *This,
IDirect3DSurface9 *pDestSurface )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineSwapChain9_GetFrontBufferData(This, pDestSurface);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockSwapChain9_GetBackBuffer( struct NineSwapChain9 *This,
UINT iBackBuffer,
D3DBACKBUFFER_TYPE Type,
IDirect3DSurface9 **ppBackBuffer )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineSwapChain9_GetBackBuffer(This, iBackBuffer, Type, ppBackBuffer);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockSwapChain9_GetRasterStatus( struct NineSwapChain9 *This,
D3DRASTER_STATUS *pRasterStatus )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineSwapChain9_GetRasterStatus(This, pRasterStatus);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockSwapChain9_GetDisplayMode( struct NineSwapChain9 *This,
D3DDISPLAYMODE *pMode )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineSwapChain9_GetDisplayMode(This, pMode);
pipe_mutex_unlock(d3dlock_global);
return r;
}
#if 0
static HRESULT NINE_WINAPI
LockSwapChain9_GetDevice( struct NineSwapChain9 *This,
IDirect3DDevice9 **ppDevice )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineUnknown_GetDevice(NineUnknown(This), ppDevice);
pipe_mutex_unlock(d3dlock_global);
return r;
}
#endif
static HRESULT NINE_WINAPI
LockSwapChain9_GetPresentParameters( struct NineSwapChain9 *This,
D3DPRESENT_PARAMETERS *pPresentationParameters )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineSwapChain9_GetPresentParameters(This, pPresentationParameters);
pipe_mutex_unlock(d3dlock_global);
return r;
}
IDirect3DSwapChain9Vtbl LockSwapChain9_vtable = {
(void *)NineUnknown_QueryInterface,
(void *)NineUnknown_AddRef,
(void *)NineUnknown_ReleaseWithDtorLock,
(void *)LockSwapChain9_Present,
@@ -2853,44 +2853,44 @@ IDirect3DSwapChain9Vtbl LockSwapChain9_vtable = {
(void *)LockSwapChain9_GetDisplayMode,
(void *)NineUnknown_GetDevice, /* actually part of SwapChain9 iface */
(void *)LockSwapChain9_GetPresentParameters
};
static HRESULT NINE_WINAPI
LockSwapChain9Ex_GetLastPresentCount( struct NineSwapChain9Ex *This,
UINT *pLastPresentCount )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineSwapChain9Ex_GetLastPresentCount(This, pLastPresentCount);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockSwapChain9Ex_GetPresentStats( struct NineSwapChain9Ex *This,
D3DPRESENTSTATS *pPresentationStatistics )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineSwapChain9Ex_GetPresentStats(This, pPresentationStatistics);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockSwapChain9Ex_GetDisplayModeEx( struct NineSwapChain9Ex *This,
D3DDISPLAYMODEEX *pMode,
D3DDISPLAYROTATION *pRotation )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineSwapChain9Ex_GetDisplayModeEx(This, pMode, pRotation);
pipe_mutex_unlock(d3dlock_global);
return r;
}
IDirect3DSwapChain9ExVtbl LockSwapChain9Ex_vtable = {
(void *)NineUnknown_QueryInterface,
(void *)NineUnknown_AddRef,
(void *)NineUnknown_ReleaseWithDtorLock,
(void *)LockSwapChain9_Present,
@@ -2905,72 +2905,72 @@ IDirect3DSwapChain9ExVtbl LockSwapChain9Ex_vtable = {
(void *)LockSwapChain9Ex_GetDisplayModeEx
};
#if 0
static HRESULT NINE_WINAPI
LockTexture9_GetLevelDesc( struct NineTexture9 *This,
UINT Level,
D3DSURFACE_DESC *pDesc )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineTexture9_GetLevelDesc(This, Level, pDesc);
pipe_mutex_unlock(d3dlock_global);
return r;
}
#endif
#if 0
static HRESULT NINE_WINAPI
LockTexture9_GetSurfaceLevel( struct NineTexture9 *This,
UINT Level,
IDirect3DSurface9 **ppSurfaceLevel )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineTexture9_GetSurfaceLevel(This, Level, ppSurfaceLevel);
pipe_mutex_unlock(d3dlock_global);
return r;
}
#endif
static HRESULT NINE_WINAPI
LockTexture9_LockRect( struct NineTexture9 *This,
UINT Level,
D3DLOCKED_RECT *pLockedRect,
const RECT *pRect,
DWORD Flags )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineTexture9_LockRect(This, Level, pLockedRect, pRect, Flags);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockTexture9_UnlockRect( struct NineTexture9 *This,
UINT Level )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineTexture9_UnlockRect(This, Level);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockTexture9_AddDirtyRect( struct NineTexture9 *This,
const RECT *pDirtyRect )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineTexture9_AddDirtyRect(This, pDirtyRect);
pipe_mutex_unlock(d3dlock_global);
return r;
}
IDirect3DTexture9Vtbl LockTexture9_vtable = {
(void *)NineUnknown_QueryInterface,
(void *)NineUnknown_AddRef,
(void *)NineUnknown_ReleaseWithDtorLock,
(void *)NineUnknown_GetDevice, /* actually part of Resource9 iface */
@@ -2995,43 +2995,43 @@ IDirect3DTexture9Vtbl LockTexture9_vtable = {
};
static HRESULT NINE_WINAPI
LockVertexBuffer9_Lock( struct NineVertexBuffer9 *This,
UINT OffsetToLock,
UINT SizeToLock,
void **ppbData,
DWORD Flags )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineVertexBuffer9_Lock(This, OffsetToLock, SizeToLock, ppbData, Flags);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockVertexBuffer9_Unlock( struct NineVertexBuffer9 *This )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineVertexBuffer9_Unlock(This);
pipe_mutex_unlock(d3dlock_global);
return r;
}
#if 0
static HRESULT NINE_WINAPI
LockVertexBuffer9_GetDesc( struct NineVertexBuffer9 *This,
D3DVERTEXBUFFER_DESC *pDesc )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineVertexBuffer9_GetDesc(This, pDesc);
pipe_mutex_unlock(d3dlock_global);
return r;
}
#endif
IDirect3DVertexBuffer9Vtbl LockVertexBuffer9_vtable = {
(void *)NineUnknown_QueryInterface,
(void *)NineUnknown_AddRef,
(void *)NineUnknown_ReleaseWithDtorLock,
@@ -3047,136 +3047,136 @@ IDirect3DVertexBuffer9Vtbl LockVertexBuffer9_vtable = {
(void *)LockVertexBuffer9_Unlock,
(void *)NineVertexBuffer9_GetDesc /* immutable */
};
#if 0
static HRESULT NINE_WINAPI
LockVertexDeclaration9_GetDevice( struct NineVertexDeclaration9 *This,
IDirect3DDevice9 **ppDevice )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineUnknown_GetDevice(NineUnknown(This), ppDevice);
pipe_mutex_unlock(d3dlock_global);
return r;
}
#endif
static HRESULT NINE_WINAPI
LockVertexDeclaration9_GetDeclaration( struct NineVertexDeclaration9 *This,
D3DVERTEXELEMENT9 *pElement,
UINT *pNumElements )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineVertexDeclaration9_GetDeclaration(This, pElement, pNumElements);
pipe_mutex_unlock(d3dlock_global);
return r;
}
IDirect3DVertexDeclaration9Vtbl LockVertexDeclaration9_vtable = {
(void *)NineUnknown_QueryInterface,
(void *)NineUnknown_AddRef,
(void *)NineUnknown_ReleaseWithDtorLock,
(void *)NineUnknown_GetDevice, /* actually part of VertexDecl9 iface */
(void *)LockVertexDeclaration9_GetDeclaration
};
#if 0
static HRESULT NINE_WINAPI
LockVertexShader9_GetDevice( struct NineVertexShader9 *This,
IDirect3DDevice9 **ppDevice )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineUnknown_GetDevice(NineUnknown(This), ppDevice);
pipe_mutex_unlock(d3dlock_global);
return r;
}
#endif
static HRESULT NINE_WINAPI
LockVertexShader9_GetFunction( struct NineVertexShader9 *This,
void *pData,
UINT *pSizeOfData )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineVertexShader9_GetFunction(This, pData, pSizeOfData);
pipe_mutex_unlock(d3dlock_global);
return r;
}
IDirect3DVertexShader9Vtbl LockVertexShader9_vtable = {
(void *)NineUnknown_QueryInterface,
(void *)NineUnknown_AddRef,
(void *)NineUnknown_ReleaseWithDtorLock,
(void *)NineUnknown_GetDevice,
(void *)LockVertexShader9_GetFunction
};
#if 0
static HRESULT NINE_WINAPI
LockVolume9_GetDevice( struct NineVolume9 *This,
IDirect3DDevice9 **ppDevice )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineUnknown_GetDevice(NineUnknown(This), ppDevice);
pipe_mutex_unlock(d3dlock_global);
return r;
}
#endif
static HRESULT NINE_WINAPI
LockVolume9_GetContainer( struct NineVolume9 *This,
REFIID riid,
void **ppContainer )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineVolume9_GetContainer(This, riid, ppContainer);
pipe_mutex_unlock(d3dlock_global);
return r;
}
#if 0
static HRESULT NINE_WINAPI
LockVolume9_GetDesc( struct NineVolume9 *This,
D3DVOLUME_DESC *pDesc )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineVolume9_GetDesc(This, pDesc);
pipe_mutex_unlock(d3dlock_global);
return r;
}
#endif
static HRESULT NINE_WINAPI
LockVolume9_LockBox( struct NineVolume9 *This,
D3DLOCKED_BOX *pLockedVolume,
const D3DBOX *pBox,
DWORD Flags )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineVolume9_LockBox(This, pLockedVolume, pBox, Flags);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockVolume9_UnlockBox( struct NineVolume9 *This )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineVolume9_UnlockBox(This);
pipe_mutex_unlock(d3dlock_global);
return r;
}
IDirect3DVolume9Vtbl LockVolume9_vtable = {
(void *)NineUnknown_QueryInterface,
(void *)NineUnknown_AddRef,
(void *)NineUnknown_ReleaseWithDtorLock,
(void *)NineUnknown_GetDevice, /* actually part of Volume9 iface */
@@ -3189,72 +3189,72 @@ IDirect3DVolume9Vtbl LockVolume9_vtable = {
(void *)LockVolume9_UnlockBox
};
#if 0
static HRESULT NINE_WINAPI
LockVolumeTexture9_GetLevelDesc( struct NineVolumeTexture9 *This,
UINT Level,
D3DVOLUME_DESC *pDesc )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineVolumeTexture9_GetLevelDesc(This, Level, pDesc);
pipe_mutex_unlock(d3dlock_global);
return r;
}
#endif
#if 0
static HRESULT NINE_WINAPI
LockVolumeTexture9_GetVolumeLevel( struct NineVolumeTexture9 *This,
UINT Level,
IDirect3DVolume9 **ppVolumeLevel )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineVolumeTexture9_GetVolumeLevel(This, Level, ppVolumeLevel);
pipe_mutex_unlock(d3dlock_global);
return r;
}
#endif
static HRESULT NINE_WINAPI
LockVolumeTexture9_LockBox( struct NineVolumeTexture9 *This,
UINT Level,
D3DLOCKED_BOX *pLockedVolume,
const D3DBOX *pBox,
DWORD Flags )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineVolumeTexture9_LockBox(This, Level, pLockedVolume, pBox, Flags);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockVolumeTexture9_UnlockBox( struct NineVolumeTexture9 *This,
UINT Level )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineVolumeTexture9_UnlockBox(This, Level);
pipe_mutex_unlock(d3dlock_global);
return r;
}
static HRESULT NINE_WINAPI
LockVolumeTexture9_AddDirtyBox( struct NineVolumeTexture9 *This,
const D3DBOX *pDirtyBox )
{
HRESULT r;
- pipe_mutex_lock(d3dlock_global);
+ mtx_lock(&d3dlock_global);
r = NineVolumeTexture9_AddDirtyBox(This, pDirtyBox);
pipe_mutex_unlock(d3dlock_global);
return r;
}
IDirect3DVolumeTexture9Vtbl LockVolumeTexture9_vtable = {
(void *)NineUnknown_QueryInterface,
(void *)NineUnknown_AddRef,
(void *)NineUnknown_ReleaseWithDtorLock,
(void *)NineUnknown_GetDevice, /* actually part of Resource9 iface */
diff --git a/src/gallium/state_trackers/nine/nine_queue.c b/src/gallium/state_trackers/nine/nine_queue.c
index 07dfee5..39de0ca 100644
--- a/src/gallium/state_trackers/nine/nine_queue.c
+++ b/src/gallium/state_trackers/nine/nine_queue.c
@@ -78,21 +78,21 @@ struct nine_queue_pool {
mtx_t mutex_push;
};
/* Consumer functions: */
void
nine_queue_wait_flush(struct nine_queue_pool* ctx)
{
struct nine_cmdbuf *cmdbuf = &ctx->pool[ctx->tail];
/* wait for cmdbuf full */
- pipe_mutex_lock(ctx->mutex_push);
+ mtx_lock(&ctx->mutex_push);
while (!cmdbuf->full)
{
DBG("waiting for full cmdbuf\n");
cnd_wait(&ctx->event_push, &ctx->mutex_push);
}
DBG("got cmdbuf=%p\n", cmdbuf);
pipe_mutex_unlock(ctx->mutex_push);
cmdbuf->offset = 0;
ctx->cur_instr = 0;
@@ -104,21 +104,21 @@ nine_queue_wait_flush(struct nine_queue_pool* ctx)
void *
nine_queue_get(struct nine_queue_pool* ctx)
{
struct nine_cmdbuf *cmdbuf = &ctx->pool[ctx->tail];
unsigned offset;
/* At this pointer there's always a cmdbuf. */
if (ctx->cur_instr == cmdbuf->num_instr) {
/* signal waiting producer */
- pipe_mutex_lock(ctx->mutex_pop);
+ mtx_lock(&ctx->mutex_pop);
DBG("freeing cmdbuf=%p\n", cmdbuf);
cmdbuf->full = 0;
cnd_signal(&ctx->event_pop);
pipe_mutex_unlock(ctx->mutex_pop);
ctx->tail = (ctx->tail + 1) & NINE_CMD_BUFS_MASK;
return NULL;
}
@@ -141,31 +141,31 @@ nine_queue_flush(struct nine_queue_pool* ctx)
struct nine_cmdbuf *cmdbuf = &ctx->pool[ctx->head];
DBG("flushing cmdbuf=%p instr=%d size=%d\n",
cmdbuf, cmdbuf->num_instr, cmdbuf->offset);
/* Nothing to flush */
if (!cmdbuf->num_instr)
return;
/* signal waiting worker */
- pipe_mutex_lock(ctx->mutex_push);
+ mtx_lock(&ctx->mutex_push);
cmdbuf->full = 1;
cnd_signal(&ctx->event_push);
pipe_mutex_unlock(ctx->mutex_push);
ctx->head = (ctx->head + 1) & NINE_CMD_BUFS_MASK;
cmdbuf = &ctx->pool[ctx->head];
/* wait for queue empty */
- pipe_mutex_lock(ctx->mutex_pop);
+ mtx_lock(&ctx->mutex_pop);
while (cmdbuf->full)
{
DBG("waiting for empty cmdbuf\n");
cnd_wait(&ctx->event_pop, &ctx->mutex_pop);
}
DBG("got empty cmdbuf=%p\n", cmdbuf);
pipe_mutex_unlock(ctx->mutex_pop);
cmdbuf->offset = 0;
cmdbuf->num_instr = 0;
}
diff --git a/src/gallium/state_trackers/nine/nine_state.c b/src/gallium/state_trackers/nine/nine_state.c
index 978fa01..2f65414 100644
--- a/src/gallium/state_trackers/nine/nine_state.c
+++ b/src/gallium/state_trackers/nine/nine_state.c
@@ -72,64 +72,64 @@ struct csmt_context {
mtx_t thread_running;
mtx_t thread_resume;
};
/* Wait for instruction to be processed.
* Caller has to ensure that only one thread waits at time.
*/
static void
nine_csmt_wait_processed(struct csmt_context *ctx)
{
- pipe_mutex_lock(ctx->mutex_processed);
+ mtx_lock(&ctx->mutex_processed);
while (!p_atomic_read(&ctx->processed)) {
cnd_wait(&ctx->event_processed, &ctx->mutex_processed);
}
pipe_mutex_unlock(ctx->mutex_processed);
}
/* CSMT worker thread */
static
PIPE_THREAD_ROUTINE(nine_csmt_worker, arg)
{
struct csmt_context *ctx = arg;
struct csmt_instruction *instr;
DBG("CSMT worker spawned\n");
pipe_thread_setname("CSMT-Worker");
while (1) {
nine_queue_wait_flush(ctx->pool);
- pipe_mutex_lock(ctx->thread_running);
+ mtx_lock(&ctx->thread_running);
/* Get instruction. NULL on empty cmdbuf. */
while (!p_atomic_read(&ctx->terminate) &&
(instr = (struct csmt_instruction *)nine_queue_get(ctx->pool))) {
/* decode */
if (instr->func(ctx->device, instr)) {
- pipe_mutex_lock(ctx->mutex_processed);
+ mtx_lock(&ctx->mutex_processed);
p_atomic_set(&ctx->processed, TRUE);
cnd_signal(&ctx->event_processed);
pipe_mutex_unlock(ctx->mutex_processed);
}
if (p_atomic_read(&ctx->toPause)) {
pipe_mutex_unlock(ctx->thread_running);
/* will wait here the thread can be resumed */
- pipe_mutex_lock(ctx->thread_resume);
- pipe_mutex_lock(ctx->thread_running);
+ mtx_lock(&ctx->thread_resume);
+ mtx_lock(&ctx->thread_running);
pipe_mutex_unlock(ctx->thread_resume);
}
}
pipe_mutex_unlock(ctx->thread_running);
if (p_atomic_read(&ctx->terminate)) {
- pipe_mutex_lock(ctx->mutex_processed);
+ mtx_lock(&ctx->mutex_processed);
p_atomic_set(&ctx->processed, TRUE);
cnd_signal(&ctx->event_processed);
pipe_mutex_unlock(ctx->mutex_processed);
break;
}
}
DBG("CSMT worker destroyed\n");
return 0;
}
@@ -245,25 +245,25 @@ nine_csmt_pause( struct NineDevice9 *device )
{
struct csmt_context *ctx = device->csmt_ctx;
if (!device->csmt_active)
return;
/* No need to pause the thread */
if (nine_queue_no_flushed_work(ctx->pool))
return;
- pipe_mutex_lock(ctx->thread_resume);
+ mtx_lock(&ctx->thread_resume);
p_atomic_set(&ctx->toPause, TRUE);
/* Wait the thread is paused */
- pipe_mutex_lock(ctx->thread_running);
+ mtx_lock(&ctx->thread_running);
ctx->hasPaused = TRUE;
p_atomic_set(&ctx->toPause, FALSE);
}
static void
nine_csmt_resume( struct NineDevice9 *device )
{
struct csmt_context *ctx = device->csmt_ctx;
if (!device->csmt_active)
diff --git a/src/gallium/state_trackers/omx/entrypoint.c b/src/gallium/state_trackers/omx/entrypoint.c
index c12eb20..0274caa 100644
--- a/src/gallium/state_trackers/omx/entrypoint.c
+++ b/src/gallium/state_trackers/omx/entrypoint.c
@@ -68,21 +68,21 @@ int omx_component_library_Setup(stLoaderComponentType **stComponents)
r = vid_enc_LoaderComponent(stComponents[1]);
if (r != OMX_ErrorNone)
return OMX_ErrorInsufficientResources;
return 2;
}
struct vl_screen *omx_get_screen(void)
{
static bool first_time = true;
- pipe_mutex_lock(omx_lock);
+ mtx_lock(&omx_lock);
if (!omx_screen) {
if (first_time) {
omx_render_node = debug_get_option("OMX_RENDER_NODE", NULL);
first_time = false;
}
if (omx_render_node) {
drm_fd = loader_open_device(omx_render_node);
if (drm_fd < 0)
goto error;
@@ -110,21 +110,21 @@ struct vl_screen *omx_get_screen(void)
pipe_mutex_unlock(omx_lock);
return omx_screen;
error:
pipe_mutex_unlock(omx_lock);
return NULL;
}
void omx_put_screen(void)
{
- pipe_mutex_lock(omx_lock);
+ mtx_lock(&omx_lock);
if ((--omx_usecount) == 0) {
omx_screen->destroy(omx_screen);
omx_screen = NULL;
if (omx_render_node)
close(drm_fd);
else
XCloseDisplay(omx_display);
}
pipe_mutex_unlock(omx_lock);
diff --git a/src/gallium/state_trackers/va/buffer.c b/src/gallium/state_trackers/va/buffer.c
index 93f012c..b9bf6f0 100644
--- a/src/gallium/state_trackers/va/buffer.c
+++ b/src/gallium/state_trackers/va/buffer.c
@@ -57,39 +57,39 @@ vlVaCreateBuffer(VADriverContextP ctx, VAContextID context, VABufferType type,
if (!buf->data) {
FREE(buf);
return VA_STATUS_ERROR_ALLOCATION_FAILED;
}
if (data)
memcpy(buf->data, data, size * num_elements);
drv = VL_VA_DRIVER(ctx);
- pipe_mutex_lock(drv->mutex);
+ mtx_lock(&drv->mutex);
*buf_id = handle_table_add(drv->htab, buf);
pipe_mutex_unlock(drv->mutex);
return VA_STATUS_SUCCESS;
}
VAStatus
vlVaBufferSetNumElements(VADriverContextP ctx, VABufferID buf_id,
unsigned int num_elements)
{
vlVaDriver *drv;
vlVaBuffer *buf;
if (!ctx)
return VA_STATUS_ERROR_INVALID_CONTEXT;
drv = VL_VA_DRIVER(ctx);
- pipe_mutex_lock(drv->mutex);
+ mtx_lock(&drv->mutex);
buf = handle_table_get(drv->htab, buf_id);
pipe_mutex_unlock(drv->mutex);
if (!buf)
return VA_STATUS_ERROR_INVALID_BUFFER;
if (buf->derived_surface.resource)
return VA_STATUS_ERROR_INVALID_BUFFER;
buf->data = REALLOC(buf->data, buf->size * buf->num_elements,
buf->size * num_elements);
@@ -110,21 +110,21 @@ vlVaMapBuffer(VADriverContextP ctx, VABufferID buf_id, void **pbuff)
if (!ctx)
return VA_STATUS_ERROR_INVALID_CONTEXT;
drv = VL_VA_DRIVER(ctx);
if (!drv)
return VA_STATUS_ERROR_INVALID_CONTEXT;
if (!pbuff)
return VA_STATUS_ERROR_INVALID_PARAMETER;
- pipe_mutex_lock(drv->mutex);
+ mtx_lock(&drv->mutex);
buf = handle_table_get(drv->htab, buf_id);
if (!buf || buf->export_refcount > 0) {
pipe_mutex_unlock(drv->mutex);
return VA_STATUS_ERROR_INVALID_BUFFER;
}
if (buf->derived_surface.resource) {
*pbuff = pipe_buffer_map(drv->pipe, buf->derived_surface.resource,
PIPE_TRANSFER_WRITE,
&buf->derived_surface.transfer);
@@ -153,21 +153,21 @@ vlVaUnmapBuffer(VADriverContextP ctx, VABufferID buf_id)
vlVaDriver *drv;
vlVaBuffer *buf;
if (!ctx)
return VA_STATUS_ERROR_INVALID_CONTEXT;
drv = VL_VA_DRIVER(ctx);
if (!drv)
return VA_STATUS_ERROR_INVALID_CONTEXT;
- pipe_mutex_lock(drv->mutex);
+ mtx_lock(&drv->mutex);
buf = handle_table_get(drv->htab, buf_id);
if (!buf || buf->export_refcount > 0) {
pipe_mutex_unlock(drv->mutex);
return VA_STATUS_ERROR_INVALID_BUFFER;
}
if (buf->derived_surface.resource) {
if (!buf->derived_surface.transfer) {
pipe_mutex_unlock(drv->mutex);
return VA_STATUS_ERROR_INVALID_BUFFER;
@@ -184,21 +184,21 @@ vlVaUnmapBuffer(VADriverContextP ctx, VABufferID buf_id)
VAStatus
vlVaDestroyBuffer(VADriverContextP ctx, VABufferID buf_id)
{
vlVaDriver *drv;
vlVaBuffer *buf;
if (!ctx)
return VA_STATUS_ERROR_INVALID_CONTEXT;
drv = VL_VA_DRIVER(ctx);
- pipe_mutex_lock(drv->mutex);
+ mtx_lock(&drv->mutex);
buf = handle_table_get(drv->htab, buf_id);
if (!buf) {
pipe_mutex_unlock(drv->mutex);
return VA_STATUS_ERROR_INVALID_BUFFER;
}
if (buf->derived_surface.resource)
pipe_resource_reference(&buf->derived_surface.resource, NULL);
FREE(buf->data);
@@ -213,21 +213,21 @@ VAStatus
vlVaBufferInfo(VADriverContextP ctx, VABufferID buf_id, VABufferType *type,
unsigned int *size, unsigned int *num_elements)
{
vlVaDriver *drv;
vlVaBuffer *buf;
if (!ctx)
return VA_STATUS_ERROR_INVALID_CONTEXT;
drv = VL_VA_DRIVER(ctx);
- pipe_mutex_lock(drv->mutex);
+ mtx_lock(&drv->mutex);
buf = handle_table_get(drv->htab, buf_id);
pipe_mutex_unlock(drv->mutex);
if (!buf)
return VA_STATUS_ERROR_INVALID_BUFFER;
*type = buf->type;
*size = buf->size;
*num_elements = buf->num_elements;
return VA_STATUS_SUCCESS;
@@ -247,21 +247,21 @@ vlVaAcquireBufferHandle(VADriverContextP ctx, VABufferID buf_id,
static const uint32_t mem_types[] = {
VA_SURFACE_ATTRIB_MEM_TYPE_DRM_PRIME,
0
};
if (!ctx)
return VA_STATUS_ERROR_INVALID_CONTEXT;
drv = VL_VA_DRIVER(ctx);
screen = VL_VA_PSCREEN(ctx);
- pipe_mutex_lock(drv->mutex);
+ mtx_lock(&drv->mutex);
buf = handle_table_get(VL_VA_DRIVER(ctx)->htab, buf_id);
pipe_mutex_unlock(drv->mutex);
if (!buf)
return VA_STATUS_ERROR_INVALID_BUFFER;
/* Only VA surface|image like buffers are supported for now .*/
if (buf->type != VAImageBufferType)
return VA_STATUS_ERROR_UNSUPPORTED_BUFFERTYPE;
@@ -288,21 +288,21 @@ vlVaAcquireBufferHandle(VADriverContextP ctx, VABufferID buf_id,
if (buf->export_refcount > 0) {
if (buf->export_state.mem_type != mem_type)
return VA_STATUS_ERROR_INVALID_PARAMETER;
} else {
VABufferInfo * const buf_info = &buf->export_state;
switch (mem_type) {
case VA_SURFACE_ATTRIB_MEM_TYPE_DRM_PRIME: {
struct winsys_handle whandle;
- pipe_mutex_lock(drv->mutex);
+ mtx_lock(&drv->mutex);
drv->pipe->flush(drv->pipe, NULL, 0);
memset(&whandle, 0, sizeof(whandle));
whandle.type = DRM_API_HANDLE_TYPE_FD;
if (!screen->resource_get_handle(screen, drv->pipe,
buf->derived_surface.resource,
&whandle, PIPE_HANDLE_USAGE_READ_WRITE)) {
pipe_mutex_unlock(drv->mutex);
return VA_STATUS_ERROR_INVALID_BUFFER;
@@ -332,21 +332,21 @@ vlVaAcquireBufferHandle(VADriverContextP ctx, VABufferID buf_id,
VAStatus
vlVaReleaseBufferHandle(VADriverContextP ctx, VABufferID buf_id)
{
vlVaDriver *drv;
vlVaBuffer *buf;
if (!ctx)
return VA_STATUS_ERROR_INVALID_CONTEXT;
drv = VL_VA_DRIVER(ctx);
- pipe_mutex_lock(drv->mutex);
+ mtx_lock(&drv->mutex);
buf = handle_table_get(drv->htab, buf_id);
pipe_mutex_unlock(drv->mutex);
if (!buf)
return VA_STATUS_ERROR_INVALID_BUFFER;
if (buf->export_refcount == 0)
return VA_STATUS_ERROR_INVALID_BUFFER;
if (--buf->export_refcount == 0) {
diff --git a/src/gallium/state_trackers/va/config.c b/src/gallium/state_trackers/va/config.c
index da52a58..3d4e24b 100644
--- a/src/gallium/state_trackers/va/config.c
+++ b/src/gallium/state_trackers/va/config.c
@@ -193,21 +193,21 @@ vlVaCreateConfig(VADriverContextP ctx, VAProfile profile, VAEntrypoint entrypoin
FREE(config);
return VA_STATUS_ERROR_UNSUPPORTED_RT_FORMAT;
}
}
}
/* Default value if not specified in the input attributes. */
if (!config->rt_format)
config->rt_format = VA_RT_FORMAT_YUV420 | VA_RT_FORMAT_RGB32;
- pipe_mutex_lock(drv->mutex);
+ mtx_lock(&drv->mutex);
*config_id = handle_table_add(drv->htab, config);
pipe_mutex_unlock(drv->mutex);
return VA_STATUS_SUCCESS;
}
p = ProfileToPipe(profile);
if (p == PIPE_VIDEO_PROFILE_UNKNOWN) {
FREE(config);
return VA_STATUS_ERROR_UNSUPPORTED_PROFILE;
}
@@ -258,42 +258,42 @@ vlVaCreateConfig(VADriverContextP ctx, VAProfile profile, VAEntrypoint entrypoin
FREE(config);
return VA_STATUS_ERROR_UNSUPPORTED_RT_FORMAT;
}
}
}
/* Default value if not specified in the input attributes. */
if (!config->rt_format)
config->rt_format = VA_RT_FORMAT_YUV420;
- pipe_mutex_lock(drv->mutex);
+ mtx_lock(&drv->mutex);
*config_id = handle_table_add(drv->htab, config);
pipe_mutex_unlock(drv->mutex);
return VA_STATUS_SUCCESS;
}
VAStatus
vlVaDestroyConfig(VADriverContextP ctx, VAConfigID config_id)
{
vlVaDriver *drv;
vlVaConfig *config;
if (!ctx)
return VA_STATUS_ERROR_INVALID_CONTEXT;
drv = VL_VA_DRIVER(ctx);
if (!drv)
return VA_STATUS_ERROR_INVALID_CONTEXT;
- pipe_mutex_lock(drv->mutex);
+ mtx_lock(&drv->mutex);
config = handle_table_get(drv->htab, config_id);
if (!config)
return VA_STATUS_ERROR_INVALID_CONFIG;
FREE(config);
handle_table_remove(drv->htab, config_id);
pipe_mutex_unlock(drv->mutex);
return VA_STATUS_SUCCESS;
@@ -307,21 +307,21 @@ vlVaQueryConfigAttributes(VADriverContextP ctx, VAConfigID config_id, VAProfile
vlVaConfig *config;
if (!ctx)
return VA_STATUS_ERROR_INVALID_CONTEXT;
drv = VL_VA_DRIVER(ctx);
if (!drv)
return VA_STATUS_ERROR_INVALID_CONTEXT;
- pipe_mutex_lock(drv->mutex);
+ mtx_lock(&drv->mutex);
config = handle_table_get(drv->htab, config_id);
pipe_mutex_unlock(drv->mutex);
if (!config)
return VA_STATUS_ERROR_INVALID_CONFIG;
*profile = PipeToProfile(config->profile);
if (config->profile == PIPE_VIDEO_PROFILE_UNKNOWN) {
*entrypoint = VAEntrypointVideoProc;
diff --git a/src/gallium/state_trackers/va/context.c b/src/gallium/state_trackers/va/context.c
index 36e314f..a345247 100644
--- a/src/gallium/state_trackers/va/context.c
+++ b/src/gallium/state_trackers/va/context.c
@@ -207,21 +207,21 @@ vlVaCreateContext(VADriverContextP ctx, VAConfigID config_id, int picture_width,
{
vlVaDriver *drv;
vlVaContext *context;
vlVaConfig *config;
int is_vpp;
if (!ctx)
return VA_STATUS_ERROR_INVALID_CONTEXT;
drv = VL_VA_DRIVER(ctx);
- pipe_mutex_lock(drv->mutex);
+ mtx_lock(&drv->mutex);
config = handle_table_get(drv->htab, config_id);
pipe_mutex_unlock(drv->mutex);
is_vpp = config->profile == PIPE_VIDEO_PROFILE_UNKNOWN && !picture_width &&
!picture_height && !flag && !render_targets && !num_render_targets;
if (!(picture_width && picture_height) && !is_vpp)
return VA_STATUS_ERROR_INVALID_IMAGE_FORMAT;
context = CALLOC(1, sizeof(vlVaContext));
@@ -280,38 +280,38 @@ vlVaCreateContext(VADriverContextP ctx, VAConfigID config_id, int picture_width,
default:
break;
}
}
context->desc.base.profile = config->profile;
context->desc.base.entry_point = config->entrypoint;
if (config->entrypoint == PIPE_VIDEO_ENTRYPOINT_ENCODE)
context->desc.h264enc.rate_ctrl.rate_ctrl_method = config->rc;
- pipe_mutex_lock(drv->mutex);
+ mtx_lock(&drv->mutex);
*context_id = handle_table_add(drv->htab, context);
pipe_mutex_unlock(drv->mutex);
return VA_STATUS_SUCCESS;
}
VAStatus
vlVaDestroyContext(VADriverContextP ctx, VAContextID context_id)
{
vlVaDriver *drv;
vlVaContext *context;
if (!ctx)
return VA_STATUS_ERROR_INVALID_CONTEXT;
drv = VL_VA_DRIVER(ctx);
- pipe_mutex_lock(drv->mutex);
+ mtx_lock(&drv->mutex);
context = handle_table_get(drv->htab, context_id);
if (!context) {
pipe_mutex_unlock(drv->mutex);
return VA_STATUS_ERROR_INVALID_CONTEXT;
}
if (context->decoder) {
if (context->desc.base.entry_point != PIPE_VIDEO_ENTRYPOINT_ENCODE) {
if (u_reduce_video_profile(context->decoder->profile) ==
PIPE_VIDEO_FORMAT_MPEG4_AVC) {
diff --git a/src/gallium/state_trackers/va/image.c b/src/gallium/state_trackers/va/image.c
index 47d31de..2c7afe2 100644
--- a/src/gallium/state_trackers/va/image.c
+++ b/src/gallium/state_trackers/va/image.c
@@ -107,21 +107,21 @@ vlVaCreateImage(VADriverContextP ctx, VAImageFormat *format, int width, int heig
return VA_STATUS_ERROR_INVALID_CONTEXT;
if (!(format && image && width && height))
return VA_STATUS_ERROR_INVALID_PARAMETER;
drv = VL_VA_DRIVER(ctx);
img = CALLOC(1, sizeof(VAImage));
if (!img)
return VA_STATUS_ERROR_ALLOCATION_FAILED;
- pipe_mutex_lock(drv->mutex);
+ mtx_lock(&drv->mutex);
img->image_id = handle_table_add(drv->htab, img);
pipe_mutex_unlock(drv->mutex);
img->format = *format;
img->width = width;
img->height = height;
w = align(width, 2);
h = align(height, 2);
switch (format->fourcc) {
@@ -251,21 +251,21 @@ vlVaDeriveImage(VADriverContextP ctx, VASurfaceID surface, VAImage *image)
FREE(img);
return VA_STATUS_ERROR_INVALID_IMAGE_FORMAT;
}
img_buf = CALLOC(1, sizeof(vlVaBuffer));
if (!img_buf) {
FREE(img);
return VA_STATUS_ERROR_ALLOCATION_FAILED;
}
- pipe_mutex_lock(drv->mutex);
+ mtx_lock(&drv->mutex);
img->image_id = handle_table_add(drv->htab, img);
img_buf->type = VAImageBufferType;
img_buf->size = img->data_size;
img_buf->num_elements = 1;
pipe_resource_reference(&img_buf->derived_surface.resource, surfaces[0]->texture);
img->buf = handle_table_add(VL_VA_DRIVER(ctx)->htab, img_buf);
pipe_mutex_unlock(drv->mutex);
@@ -279,21 +279,21 @@ VAStatus
vlVaDestroyImage(VADriverContextP ctx, VAImageID image)
{
vlVaDriver *drv;
VAImage *vaimage;
VAStatus status;
if (!ctx)
return VA_STATUS_ERROR_INVALID_CONTEXT;
drv = VL_VA_DRIVER(ctx);
- pipe_mutex_lock(drv->mutex);
+ mtx_lock(&drv->mutex);
vaimage = handle_table_get(drv->htab, image);
if (!vaimage) {
pipe_mutex_unlock(drv->mutex);
return VA_STATUS_ERROR_INVALID_IMAGE;
}
handle_table_remove(VL_VA_DRIVER(ctx)->htab, image);
pipe_mutex_unlock(drv->mutex);
status = vlVaDestroyBuffer(ctx, vaimage->buf);
FREE(vaimage);
@@ -321,21 +321,21 @@ vlVaGetImage(VADriverContextP ctx, VASurfaceID surface, int x, int y,
enum pipe_format format;
bool convert = false;
void *data[3];
unsigned pitches[3], i, j;
if (!ctx)
return VA_STATUS_ERROR_INVALID_CONTEXT;
drv = VL_VA_DRIVER(ctx);
- pipe_mutex_lock(drv->mutex);
+ mtx_lock(&drv->mutex);
surf = handle_table_get(drv->htab, surface);
if (!surf || !surf->buffer) {
pipe_mutex_unlock(drv->mutex);
return VA_STATUS_ERROR_INVALID_SURFACE;
}
vaimage = handle_table_get(drv->htab, image);
if (!vaimage) {
pipe_mutex_unlock(drv->mutex);
return VA_STATUS_ERROR_INVALID_IMAGE;
@@ -431,21 +431,21 @@ vlVaPutImage(VADriverContextP ctx, VASurfaceID surface, VAImageID image,
VAImage *vaimage;
struct pipe_sampler_view **views;
enum pipe_format format;
void *data[3];
unsigned pitches[3], i, j;
if (!ctx)
return VA_STATUS_ERROR_INVALID_CONTEXT;
drv = VL_VA_DRIVER(ctx);
- pipe_mutex_lock(drv->mutex);
+ mtx_lock(&drv->mutex);
surf = handle_table_get(drv->htab, surface);
if (!surf || !surf->buffer) {
pipe_mutex_unlock(drv->mutex);
return VA_STATUS_ERROR_INVALID_SURFACE;
}
vaimage = handle_table_get(drv->htab, image);
if (!vaimage) {
pipe_mutex_unlock(drv->mutex);
diff --git a/src/gallium/state_trackers/va/picture.c b/src/gallium/state_trackers/va/picture.c
index 82584ea..5ff178a 100644
--- a/src/gallium/state_trackers/va/picture.c
+++ b/src/gallium/state_trackers/va/picture.c
@@ -43,21 +43,21 @@ vlVaBeginPicture(VADriverContextP ctx, VAContextID context_id, VASurfaceID rende
vlVaContext *context;
vlVaSurface *surf;
if (!ctx)
return VA_STATUS_ERROR_INVALID_CONTEXT;
drv = VL_VA_DRIVER(ctx);
if (!drv)
return VA_STATUS_ERROR_INVALID_CONTEXT;
- pipe_mutex_lock(drv->mutex);
+ mtx_lock(&drv->mutex);
context = handle_table_get(drv->htab, context_id);
if (!context) {
pipe_mutex_unlock(drv->mutex);
return VA_STATUS_ERROR_INVALID_CONTEXT;
}
surf = handle_table_get(drv->htab, render_target);
pipe_mutex_unlock(drv->mutex);
if (!surf || !surf->buffer)
return VA_STATUS_ERROR_INVALID_SURFACE;
@@ -474,21 +474,21 @@ vlVaRenderPicture(VADriverContextP ctx, VAContextID context_id, VABufferID *buff
unsigned i;
if (!ctx)
return VA_STATUS_ERROR_INVALID_CONTEXT;
drv = VL_VA_DRIVER(ctx);
if (!drv)
return VA_STATUS_ERROR_INVALID_CONTEXT;
- pipe_mutex_lock(drv->mutex);
+ mtx_lock(&drv->mutex);
context = handle_table_get(drv->htab, context_id);
if (!context) {
pipe_mutex_unlock(drv->mutex);
return VA_STATUS_ERROR_INVALID_CONTEXT;
}
for (i = 0; i < num_buffers; ++i) {
vlVaBuffer *buf = handle_table_get(drv->htab, buffers[i]);
if (!buf) {
pipe_mutex_unlock(drv->mutex);
@@ -549,35 +549,35 @@ vlVaEndPicture(VADriverContextP ctx, VAContextID context_id)
vlVaSurface *surf;
void *feedback;
if (!ctx)
return VA_STATUS_ERROR_INVALID_CONTEXT;
drv = VL_VA_DRIVER(ctx);
if (!drv)
return VA_STATUS_ERROR_INVALID_CONTEXT;
- pipe_mutex_lock(drv->mutex);
+ mtx_lock(&drv->mutex);
context = handle_table_get(drv->htab, context_id);
pipe_mutex_unlock(drv->mutex);
if (!context)
return VA_STATUS_ERROR_INVALID_CONTEXT;
if (!context->decoder) {
if (context->templat.profile != PIPE_VIDEO_PROFILE_UNKNOWN)
return VA_STATUS_ERROR_INVALID_CONTEXT;
/* VPP */
return VA_STATUS_SUCCESS;
}
- pipe_mutex_lock(drv->mutex);
+ mtx_lock(&drv->mutex);
surf = handle_table_get(drv->htab, context->target_id);
context->mpeg4.frame_num++;
if (context->decoder->entrypoint == PIPE_VIDEO_ENTRYPOINT_ENCODE) {
coded_buf = context->coded_buf;
getEncParamPreset(context);
context->desc.h264enc.frame_num_cnt++;
context->decoder->begin_frame(context->decoder, context->target, &context->desc.base);
context->decoder->encode_bitstream(context->decoder, context->target,
coded_buf->derived_surface.resource, &feedback);
diff --git a/src/gallium/state_trackers/va/subpicture.c b/src/gallium/state_trackers/va/subpicture.c
index f546e56..0d90758 100644
--- a/src/gallium/state_trackers/va/subpicture.c
+++ b/src/gallium/state_trackers/va/subpicture.c
@@ -66,21 +66,21 @@ vlVaCreateSubpicture(VADriverContextP ctx, VAImageID image,
VASubpictureID *subpicture)
{
vlVaDriver *drv;
vlVaSubpicture *sub;
VAImage *img;
if (!ctx)
return VA_STATUS_ERROR_INVALID_CONTEXT;
drv = VL_VA_DRIVER(ctx);
- pipe_mutex_lock(drv->mutex);
+ mtx_lock(&drv->mutex);
img = handle_table_get(drv->htab, image);
if (!img) {
pipe_mutex_unlock(drv->mutex);
return VA_STATUS_ERROR_INVALID_IMAGE;
}
sub = CALLOC(1, sizeof(*sub));
if (!sub) {
pipe_mutex_unlock(drv->mutex);
return VA_STATUS_ERROR_ALLOCATION_FAILED;
@@ -96,21 +96,21 @@ vlVaCreateSubpicture(VADriverContextP ctx, VAImageID image,
VAStatus
vlVaDestroySubpicture(VADriverContextP ctx, VASubpictureID subpicture)
{
vlVaDriver *drv;
vlVaSubpicture *sub;
if (!ctx)
return VA_STATUS_ERROR_INVALID_CONTEXT;
drv = VL_VA_DRIVER(ctx);
- pipe_mutex_lock(drv->mutex);
+ mtx_lock(&drv->mutex);
sub = handle_table_get(drv->htab, subpicture);
if (!sub) {
pipe_mutex_unlock(drv->mutex);
return VA_STATUS_ERROR_INVALID_SUBPICTURE;
}
FREE(sub);
handle_table_remove(drv->htab, subpicture);
pipe_mutex_unlock(drv->mutex);
@@ -122,21 +122,21 @@ VAStatus
vlVaSubpictureImage(VADriverContextP ctx, VASubpictureID subpicture, VAImageID image)
{
vlVaDriver *drv;
vlVaSubpicture *sub;
VAImage *img;
if (!ctx)
return VA_STATUS_ERROR_INVALID_CONTEXT;
drv = VL_VA_DRIVER(ctx);
- pipe_mutex_lock(drv->mutex);
+ mtx_lock(&drv->mutex);
img = handle_table_get(drv->htab, image);
if (!img) {
pipe_mutex_unlock(drv->mutex);
return VA_STATUS_ERROR_INVALID_IMAGE;
}
sub = handle_table_get(drv->htab, subpicture);
pipe_mutex_unlock(drv->mutex);
if (!sub)
@@ -179,21 +179,21 @@ vlVaAssociateSubpicture(VADriverContextP ctx, VASubpictureID subpicture,
struct pipe_sampler_view sampler_templ;
vlVaDriver *drv;
vlVaSurface *surf;
int i;
struct u_rect src_rect = {src_x, src_x + src_width, src_y, src_y + src_height};
struct u_rect dst_rect = {dest_x, dest_x + dest_width, dest_y, dest_y + dest_height};
if (!ctx)
return VA_STATUS_ERROR_INVALID_CONTEXT;
drv = VL_VA_DRIVER(ctx);
- pipe_mutex_lock(drv->mutex);
+ mtx_lock(&drv->mutex);
sub = handle_table_get(drv->htab, subpicture);
if (!sub) {
pipe_mutex_unlock(drv->mutex);
return VA_STATUS_ERROR_INVALID_SUBPICTURE;
}
for (i = 0; i < num_surfaces; i++) {
surf = handle_table_get(drv->htab, target_surfaces[i]);
if (!surf) {
@@ -249,21 +249,21 @@ vlVaDeassociateSubpicture(VADriverContextP ctx, VASubpictureID subpicture,
{
int i;
int j;
vlVaSurface *surf;
vlVaSubpicture *sub, **array;
vlVaDriver *drv;
if (!ctx)
return VA_STATUS_ERROR_INVALID_CONTEXT;
drv = VL_VA_DRIVER(ctx);
- pipe_mutex_lock(drv->mutex);
+ mtx_lock(&drv->mutex);
sub = handle_table_get(drv->htab, subpicture);
if (!sub) {
pipe_mutex_unlock(drv->mutex);
return VA_STATUS_ERROR_INVALID_SUBPICTURE;
}
for (i = 0; i < num_surfaces; i++) {
surf = handle_table_get(drv->htab, target_surfaces[i]);
if (!surf) {
diff --git a/src/gallium/state_trackers/va/surface.c b/src/gallium/state_trackers/va/surface.c
index 0e1dbe0..6a1736b 100644
--- a/src/gallium/state_trackers/va/surface.c
+++ b/src/gallium/state_trackers/va/surface.c
@@ -63,21 +63,21 @@ vlVaCreateSurfaces(VADriverContextP ctx, int width, int height, int format,
VAStatus
vlVaDestroySurfaces(VADriverContextP ctx, VASurfaceID *surface_list, int num_surfaces)
{
vlVaDriver *drv;
int i;
if (!ctx)
return VA_STATUS_ERROR_INVALID_CONTEXT;
drv = VL_VA_DRIVER(ctx);
- pipe_mutex_lock(drv->mutex);
+ mtx_lock(&drv->mutex);
for (i = 0; i < num_surfaces; ++i) {
vlVaSurface *surf = handle_table_get(drv->htab, surface_list[i]);
if (!surf) {
pipe_mutex_unlock(drv->mutex);
return VA_STATUS_ERROR_INVALID_SURFACE;
}
if (surf->buffer)
surf->buffer->destroy(surf->buffer);
util_dynarray_fini(&surf->subpics);
FREE(surf);
@@ -95,21 +95,21 @@ vlVaSyncSurface(VADriverContextP ctx, VASurfaceID render_target)
vlVaContext *context;
vlVaSurface *surf;
if (!ctx)
return VA_STATUS_ERROR_INVALID_CONTEXT;
drv = VL_VA_DRIVER(ctx);
if (!drv)
return VA_STATUS_ERROR_INVALID_CONTEXT;
- pipe_mutex_lock(drv->mutex);
+ mtx_lock(&drv->mutex);
surf = handle_table_get(drv->htab, render_target);
if (!surf || !surf->buffer) {
pipe_mutex_unlock(drv->mutex);
return VA_STATUS_ERROR_INVALID_SURFACE;
}
if (!surf->feedback) {
// No outstanding operation: nothing to do.
pipe_mutex_unlock(drv->mutex);
@@ -281,21 +281,21 @@ vlVaPutSurface(VADriverContextP ctx, VASurfaceID surface_id, void* draw, short s
struct pipe_surface surf_templ, *surf_draw;
struct vl_screen *vscreen;
struct u_rect src_rect, *dirty_area;
struct u_rect dst_rect = {destx, destx + destw, desty, desty + desth};
VAStatus status;
if (!ctx)
return VA_STATUS_ERROR_INVALID_CONTEXT;
drv = VL_VA_DRIVER(ctx);
- pipe_mutex_lock(drv->mutex);
+ mtx_lock(&drv->mutex);
surf = handle_table_get(drv->htab, surface_id);
if (!surf) {
pipe_mutex_unlock(drv->mutex);
return VA_STATUS_ERROR_INVALID_SURFACE;
}
screen = drv->pipe->screen;
vscreen = drv->vscreen;
tex = vscreen->texture_from_drawable(vscreen, draw);
@@ -392,21 +392,21 @@ vlVaQuerySurfaceAttributes(VADriverContextP ctx, VAConfigID config_id,
}
if (!ctx)
return VA_STATUS_ERROR_INVALID_CONTEXT;
drv = VL_VA_DRIVER(ctx);
if (!drv)
return VA_STATUS_ERROR_INVALID_CONTEXT;
- pipe_mutex_lock(drv->mutex);
+ mtx_lock(&drv->mutex);
config = handle_table_get(drv->htab, config_id);
pipe_mutex_unlock(drv->mutex);
if (!config)
return VA_STATUS_ERROR_INVALID_CONFIG;
pscreen = VL_VA_PSCREEN(ctx);
if (!pscreen)
return VA_STATUS_ERROR_INVALID_CONTEXT;
@@ -679,21 +679,21 @@ vlVaCreateSurfaces2(VADriverContextP ctx, unsigned int format,
templat.chroma_format = ChromaToPipe(format);
templat.width = width;
templat.height = height;
if (debug_get_option_nointerlace())
templat.interlaced = false;
memset(surfaces, VA_INVALID_ID, num_surfaces * sizeof(VASurfaceID));
- pipe_mutex_lock(drv->mutex);
+ mtx_lock(&drv->mutex);
for (i = 0; i < num_surfaces; i++) {
vlVaSurface *surf = CALLOC(1, sizeof(vlVaSurface));
if (!surf)
goto no_res;
surf->templat = templat;
switch (memory_type) {
case VA_SURFACE_ATTRIB_MEM_TYPE_VA:
/* The application will clear the TILING flag when the surface is
diff --git a/src/gallium/state_trackers/vdpau/bitmap.c b/src/gallium/state_trackers/vdpau/bitmap.c
index d9ec60d..14f6c36 100644
--- a/src/gallium/state_trackers/vdpau/bitmap.c
+++ b/src/gallium/state_trackers/vdpau/bitmap.c
@@ -72,21 +72,21 @@ vlVdpBitmapSurfaceCreate(VdpDevice device,
memset(&res_tmpl, 0, sizeof(res_tmpl));
res_tmpl.target = PIPE_TEXTURE_2D;
res_tmpl.format = VdpFormatRGBAToPipe(rgba_format);
res_tmpl.width0 = width;
res_tmpl.height0 = height;
res_tmpl.depth0 = 1;
res_tmpl.array_size = 1;
res_tmpl.bind = PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET;
res_tmpl.usage = frequently_accessed ? PIPE_USAGE_DYNAMIC : PIPE_USAGE_DEFAULT;
- pipe_mutex_lock(dev->mutex);
+ mtx_lock(&dev->mutex);
if (!CheckSurfaceParams(pipe->screen, &res_tmpl)) {
ret = VDP_STATUS_RESOURCES;
goto err_unlock;
}
res = pipe->screen->resource_create(pipe->screen, &res_tmpl);
if (!res) {
ret = VDP_STATUS_RESOURCES;
goto err_unlock;
@@ -99,21 +99,21 @@ vlVdpBitmapSurfaceCreate(VdpDevice device,
if (!vlsurface->sampler_view) {
ret = VDP_STATUS_RESOURCES;
goto err_unlock;
}
pipe_mutex_unlock(dev->mutex);
*surface = vlAddDataHTAB(vlsurface);
if (*surface == 0) {
- pipe_mutex_lock(dev->mutex);
+ mtx_lock(&dev->mutex);
ret = VDP_STATUS_ERROR;
goto err_sampler;
}
return VDP_STATUS_OK;
err_sampler:
pipe_sampler_view_reference(&vlsurface->sampler_view, NULL);
err_unlock:
pipe_mutex_unlock(dev->mutex);
@@ -127,21 +127,21 @@ err_unlock:
*/
VdpStatus
vlVdpBitmapSurfaceDestroy(VdpBitmapSurface surface)
{
vlVdpBitmapSurface *vlsurface;
vlsurface = vlGetDataHTAB(surface);
if (!vlsurface)
return VDP_STATUS_INVALID_HANDLE;
- pipe_mutex_lock(vlsurface->device->mutex);
+ mtx_lock(&vlsurface->device->mutex);
pipe_sampler_view_reference(&vlsurface->sampler_view, NULL);
pipe_mutex_unlock(vlsurface->device->mutex);
vlRemoveDataHTAB(surface);
DeviceReference(&vlsurface->device, NULL);
FREE(vlsurface);
return VDP_STATUS_OK;
}
@@ -189,21 +189,21 @@ vlVdpBitmapSurfacePutBitsNative(VdpBitmapSurface surface,
vlsurface = vlGetDataHTAB(surface);
if (!vlsurface)
return VDP_STATUS_INVALID_HANDLE;
if (!(source_data && source_pitches))
return VDP_STATUS_INVALID_POINTER;
pipe = vlsurface->device->context;
- pipe_mutex_lock(vlsurface->device->mutex);
+ mtx_lock(&vlsurface->device->mutex);
dst_box = RectToPipeBox(destination_rect, vlsurface->sampler_view->texture);
pipe->texture_subdata(pipe, vlsurface->sampler_view->texture, 0,
PIPE_TRANSFER_WRITE, &dst_box, *source_data,
*source_pitches, 0);
pipe_mutex_unlock(vlsurface->device->mutex);
return VDP_STATUS_OK;
}
diff --git a/src/gallium/state_trackers/vdpau/decode.c b/src/gallium/state_trackers/vdpau/decode.c
index 387371b..0f8b8ff 100644
--- a/src/gallium/state_trackers/vdpau/decode.c
+++ b/src/gallium/state_trackers/vdpau/decode.c
@@ -64,21 +64,21 @@ vlVdpDecoderCreate(VdpDevice device,
if (templat.profile == PIPE_VIDEO_PROFILE_UNKNOWN)
return VDP_STATUS_INVALID_DECODER_PROFILE;
dev = vlGetDataHTAB(device);
if (!dev)
return VDP_STATUS_INVALID_HANDLE;
pipe = dev->context;
screen = dev->vscreen->pscreen;
- pipe_mutex_lock(dev->mutex);
+ mtx_lock(&dev->mutex);
supported = screen->get_video_param
(
screen,
templat.profile,
PIPE_VIDEO_ENTRYPOINT_BITSTREAM,
PIPE_VIDEO_CAP_SUPPORTED
);
if (!supported) {
pipe_mutex_unlock(dev->mutex);
@@ -156,21 +156,21 @@ error_decoder:
*/
VdpStatus
vlVdpDecoderDestroy(VdpDecoder decoder)
{
vlVdpDecoder *vldecoder;
vldecoder = (vlVdpDecoder *)vlGetDataHTAB(decoder);
if (!vldecoder)
return VDP_STATUS_INVALID_HANDLE;
- pipe_mutex_lock(vldecoder->mutex);
+ mtx_lock(&vldecoder->mutex);
vldecoder->decoder->destroy(vldecoder->decoder);
pipe_mutex_unlock(vldecoder->mutex);
mtx_destroy(&vldecoder->mutex);
vlRemoveDataHTAB(decoder);
DeviceReference(&vldecoder->device, NULL);
FREE(vldecoder);
return VDP_STATUS_OK;
}
@@ -607,21 +607,21 @@ vlVdpDecoderRender(VdpDecoder decoder,
buffer_support[0] = screen->get_video_param(screen, dec->profile, PIPE_VIDEO_ENTRYPOINT_BITSTREAM,
PIPE_VIDEO_CAP_SUPPORTS_PROGRESSIVE);
buffer_support[1] = screen->get_video_param(screen, dec->profile, PIPE_VIDEO_ENTRYPOINT_BITSTREAM,
PIPE_VIDEO_CAP_SUPPORTS_INTERLACED);
if (vlsurf->video_buffer == NULL ||
!screen->is_video_format_supported(screen, vlsurf->video_buffer->buffer_format,
dec->profile, PIPE_VIDEO_ENTRYPOINT_BITSTREAM) ||
!buffer_support[vlsurf->video_buffer->interlaced]) {
- pipe_mutex_lock(vlsurf->device->mutex);
+ mtx_lock(&vlsurf->device->mutex);
/* destroy the old one */
if (vlsurf->video_buffer)
vlsurf->video_buffer->destroy(vlsurf->video_buffer);
/* set the buffer format to the prefered one */
vlsurf->templat.buffer_format = screen->get_video_param(screen, dec->profile, PIPE_VIDEO_ENTRYPOINT_BITSTREAM,
PIPE_VIDEO_CAP_PREFERED_FORMAT);
/* also set interlacing to decoders preferences */
@@ -667,17 +667,17 @@ vlVdpDecoderRender(VdpDecoder decoder,
desc.h265.pps = &pps_h265;
ret = vlVdpDecoderRenderH265(&desc.h265, (VdpPictureInfoHEVC *)picture_info);
break;
default:
return VDP_STATUS_INVALID_DECODER_PROFILE;
}
if (ret != VDP_STATUS_OK)
return ret;
- pipe_mutex_lock(vldecoder->mutex);
+ mtx_lock(&vldecoder->mutex);
dec->begin_frame(dec, vlsurf->video_buffer, &desc.base);
dec->decode_bitstream(dec, vlsurf->video_buffer, &desc.base, bitstream_buffer_count, buffers, sizes);
dec->end_frame(dec, vlsurf->video_buffer, &desc.base);
pipe_mutex_unlock(vldecoder->mutex);
return ret;
}
diff --git a/src/gallium/state_trackers/vdpau/htab.c b/src/gallium/state_trackers/vdpau/htab.c
index 277ea0c..f938a19 100644
--- a/src/gallium/state_trackers/vdpau/htab.c
+++ b/src/gallium/state_trackers/vdpau/htab.c
@@ -31,59 +31,59 @@
static struct handle_table *htab = NULL;
static mtx_t htab_lock = _MTX_INITIALIZER_NP;
boolean vlCreateHTAB(void)
{
boolean ret;
/* Make sure handle table handles match VDPAU handles. */
assert(sizeof(unsigned) <= sizeof(vlHandle));
- pipe_mutex_lock(htab_lock);
+ mtx_lock(&htab_lock);
if (!htab)
htab = handle_table_create();
ret = htab != NULL;
pipe_mutex_unlock(htab_lock);
return ret;
}
void vlDestroyHTAB(void)
{
- pipe_mutex_lock(htab_lock);
+ mtx_lock(&htab_lock);
if (htab && !handle_table_get_first_handle(htab)) {
handle_table_destroy(htab);
htab = NULL;
}
pipe_mutex_unlock(htab_lock);
}
vlHandle vlAddDataHTAB(void *data)
{
vlHandle handle = 0;
assert(data);
- pipe_mutex_lock(htab_lock);
+ mtx_lock(&htab_lock);
if (htab)
handle = handle_table_add(htab, data);
pipe_mutex_unlock(htab_lock);
return handle;
}
void* vlGetDataHTAB(vlHandle handle)
{
void *data = NULL;
assert(handle);
- pipe_mutex_lock(htab_lock);
+ mtx_lock(&htab_lock);
if (htab)
data = handle_table_get(htab, handle);
pipe_mutex_unlock(htab_lock);
return data;
}
void vlRemoveDataHTAB(vlHandle handle)
{
- pipe_mutex_lock(htab_lock);
+ mtx_lock(&htab_lock);
if (htab)
handle_table_remove(htab, handle);
pipe_mutex_unlock(htab_lock);
}
diff --git a/src/gallium/state_trackers/vdpau/mixer.c b/src/gallium/state_trackers/vdpau/mixer.c
index 37a6fcd..a1c0377 100644
--- a/src/gallium/state_trackers/vdpau/mixer.c
+++ b/src/gallium/state_trackers/vdpau/mixer.c
@@ -56,21 +56,21 @@ vlVdpVideoMixerCreate(VdpDevice device,
if (!dev)
return VDP_STATUS_INVALID_HANDLE;
screen = dev->vscreen->pscreen;
vmixer = CALLOC(1, sizeof(vlVdpVideoMixer));
if (!vmixer)
return VDP_STATUS_RESOURCES;
DeviceReference(&vmixer->device, dev);
- pipe_mutex_lock(dev->mutex);
+ mtx_lock(&dev->mutex);
if (!vl_compositor_init_state(&vmixer->cstate, dev->context)) {
ret = VDP_STATUS_ERROR;
goto no_compositor_state;
}
vl_csc_get_matrix(VL_CSC_COLOR_STANDARD_BT_601, NULL, true, &vmixer->csc);
if (!debug_get_bool_option("G3DVL_NO_CSC", FALSE)) {
if (!vl_compositor_set_csc_matrix(&vmixer->cstate, (const vl_csc_matrix *)&vmixer->csc, 1.0f, 0.0f)) {
ret = VDP_STATUS_ERROR;
@@ -184,21 +184,21 @@ no_compositor_state:
*/
VdpStatus
vlVdpVideoMixerDestroy(VdpVideoMixer mixer)
{
vlVdpVideoMixer *vmixer;
vmixer = vlGetDataHTAB(mixer);
if (!vmixer)
return VDP_STATUS_INVALID_HANDLE;
- pipe_mutex_lock(vmixer->device->mutex);
+ mtx_lock(&vmixer->device->mutex);
vlRemoveDataHTAB(mixer);
vl_compositor_cleanup_state(&vmixer->cstate);
if (vmixer->deint.filter) {
vl_deint_filter_cleanup(vmixer->deint.filter);
FREE(vmixer->deint.filter);
}
@@ -283,21 +283,21 @@ VdpStatus vlVdpVideoMixerRender(VdpVideoMixer mixer,
dst = vlGetDataHTAB(destination_surface);
if (!dst)
return VDP_STATUS_INVALID_HANDLE;
if (background_surface != VDP_INVALID_HANDLE) {
bg = vlGetDataHTAB(background_surface);
if (!bg)
return VDP_STATUS_INVALID_HANDLE;
}
- pipe_mutex_lock(vmixer->device->mutex);
+ mtx_lock(&vmixer->device->mutex);
vl_compositor_clear_layers(&vmixer->cstate);
if (bg)
vl_compositor_set_rgba_layer(&vmixer->cstate, compositor, layer++, bg->sampler_view,
RectToPipe(background_source_rect, &rect), NULL, NULL);
switch (current_picture_structure) {
case VDP_VIDEO_MIXER_PICTURE_STRUCTURE_TOP_FIELD:
deinterlace = VL_COMPOSITOR_BOB_TOP;
@@ -651,21 +651,21 @@ vlVdpVideoMixerSetFeatureEnables(VdpVideoMixer mixer,
vlVdpVideoMixer *vmixer;
unsigned i;
if (!(features && feature_enables))
return VDP_STATUS_INVALID_POINTER;
vmixer = vlGetDataHTAB(mixer);
if (!vmixer)
return VDP_STATUS_INVALID_HANDLE;
- pipe_mutex_lock(vmixer->device->mutex);
+ mtx_lock(&vmixer->device->mutex);
for (i = 0; i < feature_count; ++i) {
switch (features[i]) {
/* they are valid, but we doesn't support them */
case VDP_VIDEO_MIXER_FEATURE_DEINTERLACE_TEMPORAL_SPATIAL:
case VDP_VIDEO_MIXER_FEATURE_HIGH_QUALITY_SCALING_L2:
case VDP_VIDEO_MIXER_FEATURE_HIGH_QUALITY_SCALING_L3:
case VDP_VIDEO_MIXER_FEATURE_HIGH_QUALITY_SCALING_L4:
case VDP_VIDEO_MIXER_FEATURE_HIGH_QUALITY_SCALING_L5:
case VDP_VIDEO_MIXER_FEATURE_HIGH_QUALITY_SCALING_L6:
case VDP_VIDEO_MIXER_FEATURE_HIGH_QUALITY_SCALING_L7:
@@ -789,21 +789,21 @@ vlVdpVideoMixerSetAttributeValues(VdpVideoMixer mixer,
unsigned i;
VdpStatus ret;
if (!(attributes && attribute_values))
return VDP_STATUS_INVALID_POINTER;
vlVdpVideoMixer *vmixer = vlGetDataHTAB(mixer);
if (!vmixer)
return VDP_STATUS_INVALID_HANDLE;
- pipe_mutex_lock(vmixer->device->mutex);
+ mtx_lock(&vmixer->device->mutex);
for (i = 0; i < attribute_count; ++i) {
switch (attributes[i]) {
case VDP_VIDEO_MIXER_ATTRIBUTE_BACKGROUND_COLOR:
background_color = attribute_values[i];
color.f[0] = background_color->red;
color.f[1] = background_color->green;
color.f[2] = background_color->blue;
color.f[3] = background_color->alpha;
vl_compositor_set_clear_color(&vmixer->cstate, &color);
break;
@@ -948,21 +948,21 @@ vlVdpVideoMixerGetAttributeValues(VdpVideoMixer mixer,
unsigned i;
VdpCSCMatrix **vdp_csc;
if (!(attributes && attribute_values))
return VDP_STATUS_INVALID_POINTER;
vlVdpVideoMixer *vmixer = vlGetDataHTAB(mixer);
if (!vmixer)
return VDP_STATUS_INVALID_HANDLE;
- pipe_mutex_lock(vmixer->device->mutex);
+ mtx_lock(&vmixer->device->mutex);
for (i = 0; i < attribute_count; ++i) {
switch (attributes[i]) {
case VDP_VIDEO_MIXER_ATTRIBUTE_BACKGROUND_COLOR:
vl_compositor_get_clear_color(&vmixer->cstate, attribute_values[i]);
break;
case VDP_VIDEO_MIXER_ATTRIBUTE_CSC_MATRIX:
vdp_csc = attribute_values[i];
if (!vmixer->custom_csc) {
*vdp_csc = NULL;
break;
diff --git a/src/gallium/state_trackers/vdpau/output.c b/src/gallium/state_trackers/vdpau/output.c
index 6506280..5836395 100644
--- a/src/gallium/state_trackers/vdpau/output.c
+++ b/src/gallium/state_trackers/vdpau/output.c
@@ -85,21 +85,21 @@ vlVdpOutputSurfaceCreate(VdpDevice device,
res_tmpl.target = PIPE_TEXTURE_2D;
res_tmpl.format = VdpFormatRGBAToPipe(rgba_format);
res_tmpl.width0 = width;
res_tmpl.height0 = height;
res_tmpl.depth0 = 1;
res_tmpl.array_size = 1;
res_tmpl.bind = PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET |
PIPE_BIND_SHARED | PIPE_BIND_SCANOUT;
res_tmpl.usage = PIPE_USAGE_DEFAULT;
- pipe_mutex_lock(dev->mutex);
+ mtx_lock(&dev->mutex);
if (!CheckSurfaceParams(pipe->screen, &res_tmpl))
goto err_unlock;
res = pipe->screen->resource_create(pipe->screen, &res_tmpl);
if (!res)
goto err_unlock;
vlVdpDefaultSamplerViewTemplate(&sv_templ, res);
vlsurface->sampler_view = pipe->create_sampler_view(pipe, res, &sv_templ);
@@ -145,21 +145,21 @@ vlVdpOutputSurfaceDestroy(VdpOutputSurface surface)
{
vlVdpOutputSurface *vlsurface;
struct pipe_context *pipe;
vlsurface = vlGetDataHTAB(surface);
if (!vlsurface)
return VDP_STATUS_INVALID_HANDLE;
pipe = vlsurface->device->context;
- pipe_mutex_lock(vlsurface->device->mutex);
+ mtx_lock(&vlsurface->device->mutex);
pipe_surface_reference(&vlsurface->surface, NULL);
pipe_sampler_view_reference(&vlsurface->sampler_view, NULL);
pipe->screen->fence_reference(pipe->screen, &vlsurface->fence, NULL);
vl_compositor_cleanup_state(&vlsurface->cstate);
pipe_mutex_unlock(vlsurface->device->mutex);
vlRemoveDataHTAB(surface);
DeviceReference(&vlsurface->device, NULL);
FREE(vlsurface);
@@ -209,21 +209,21 @@ vlVdpOutputSurfaceGetBitsNative(VdpOutputSurface surface,
if (!vlsurface)
return VDP_STATUS_INVALID_HANDLE;
pipe = vlsurface->device->context;
if (!pipe)
return VDP_STATUS_INVALID_HANDLE;
if (!destination_data || !destination_pitches)
return VDP_STATUS_INVALID_POINTER;
- pipe_mutex_lock(vlsurface->device->mutex);
+ mtx_lock(&vlsurface->device->mutex);
res = vlsurface->sampler_view->texture;
box = RectToPipeBox(source_rect, res);
map = pipe->transfer_map(pipe, res, 0, PIPE_TRANSFER_READ, &box, &transfer);
if (!map) {
pipe_mutex_unlock(vlsurface->device->mutex);
return VDP_STATUS_RESOURCES;
}
util_copy_rect(*destination_data, res->format, *destination_pitches, 0, 0,
@@ -253,21 +253,21 @@ vlVdpOutputSurfacePutBitsNative(VdpOutputSurface surface,
if (!vlsurface)
return VDP_STATUS_INVALID_HANDLE;
pipe = vlsurface->device->context;
if (!pipe)
return VDP_STATUS_INVALID_HANDLE;
if (!source_data || !source_pitches)
return VDP_STATUS_INVALID_POINTER;
- pipe_mutex_lock(vlsurface->device->mutex);
+ mtx_lock(&vlsurface->device->mutex);
dst_box = RectToPipeBox(destination_rect, vlsurface->sampler_view->texture);
/* Check for a no-op. (application bug?) */
if (!dst_box.width || !dst_box.height) {
pipe_mutex_unlock(vlsurface->device->mutex);
return VDP_STATUS_OK;
}
pipe->texture_subdata(pipe, vlsurface->sampler_view->texture, 0,
@@ -337,21 +337,21 @@ vlVdpOutputSurfacePutBitsIndexed(VdpOutputSurface surface,
res_tmpl.height0 = abs(destination_rect->y0-destination_rect->y1);
} else {
res_tmpl.width0 = vlsurface->surface->texture->width0;
res_tmpl.height0 = vlsurface->surface->texture->height0;
}
res_tmpl.depth0 = 1;
res_tmpl.array_size = 1;
res_tmpl.usage = PIPE_USAGE_STAGING;
res_tmpl.bind = PIPE_BIND_SAMPLER_VIEW;
- pipe_mutex_lock(vlsurface->device->mutex);
+ mtx_lock(&vlsurface->device->mutex);
if (!CheckSurfaceParams(context->screen, &res_tmpl))
goto error_resource;
res = context->screen->resource_create(context->screen, &res_tmpl);
if (!res)
goto error_resource;
box.x = box.y = box.z = 0;
box.width = res->width0;
@@ -454,21 +454,21 @@ vlVdpOutputSurfacePutBitsYCbCr(VdpOutputSurface surface,
compositor = &vlsurface->device->compositor;
cstate = &vlsurface->cstate;
format = FormatYCBCRToPipe(source_ycbcr_format);
if (format == PIPE_FORMAT_NONE)
return VDP_STATUS_INVALID_Y_CB_CR_FORMAT;
if (!source_data || !source_pitches)
return VDP_STATUS_INVALID_POINTER;
- pipe_mutex_lock(vlsurface->device->mutex);
+ mtx_lock(&vlsurface->device->mutex);
memset(&vtmpl, 0, sizeof(vtmpl));
vtmpl.buffer_format = format;
vtmpl.chroma_format = FormatYCBCRToPipeChroma(source_ycbcr_format);
if (destination_rect) {
vtmpl.width = abs(destination_rect->x0-destination_rect->x1);
vtmpl.height = abs(destination_rect->y0-destination_rect->y1);
} else {
vtmpl.width = vlsurface->surface->texture->width0;
vtmpl.height = vlsurface->surface->texture->height0;
@@ -672,21 +672,21 @@ vlVdpOutputSurfaceRenderOutputSurface(VdpOutputSurface destination_surface,
vlVdpOutputSurface *src_vlsurface = vlGetDataHTAB(source_surface);
if (!src_vlsurface)
return VDP_STATUS_INVALID_HANDLE;
if (dst_vlsurface->device != src_vlsurface->device)
return VDP_STATUS_HANDLE_DEVICE_MISMATCH;
src_sv = src_vlsurface->sampler_view;
}
- pipe_mutex_lock(dst_vlsurface->device->mutex);
+ mtx_lock(&dst_vlsurface->device->mutex);
context = dst_vlsurface->device->context;
compositor = &dst_vlsurface->device->compositor;
cstate = &dst_vlsurface->cstate;
blend = BlenderToPipe(context, blend_state);
vl_compositor_clear_layers(cstate);
vl_compositor_set_layer_blend(cstate, 0, blend, false);
vl_compositor_set_rgba_layer(cstate, compositor, 0, src_sv,
@@ -746,21 +746,21 @@ vlVdpOutputSurfaceRenderBitmapSurface(VdpOutputSurface destination_surface,
if (dst_vlsurface->device != src_vlsurface->device)
return VDP_STATUS_HANDLE_DEVICE_MISMATCH;
src_sv = src_vlsurface->sampler_view;
}
context = dst_vlsurface->device->context;
compositor = &dst_vlsurface->device->compositor;
cstate = &dst_vlsurface->cstate;
- pipe_mutex_lock(dst_vlsurface->device->mutex);
+ mtx_lock(&dst_vlsurface->device->mutex);
blend = BlenderToPipe(context, blend_state);
vl_compositor_clear_layers(cstate);
vl_compositor_set_layer_blend(cstate, 0, blend, false);
vl_compositor_set_rgba_layer(cstate, compositor, 0, src_sv,
RectToPipe(source_rect, &src_rect), NULL,
ColorsToPipe(colors, flags, vlcolors));
vl_compositor_set_layer_rotation(cstate, 0, flags & 3);
vl_compositor_set_layer_dst_area(cstate, 0, RectToPipe(destination_rect, &dst_rect));
@@ -773,42 +773,42 @@ vlVdpOutputSurfaceRenderBitmapSurface(VdpOutputSurface destination_surface,
}
struct pipe_resource *vlVdpOutputSurfaceGallium(VdpOutputSurface surface)
{
vlVdpOutputSurface *vlsurface;
vlsurface = vlGetDataHTAB(surface);
if (!vlsurface || !vlsurface->surface)
return NULL;
- pipe_mutex_lock(vlsurface->device->mutex);
+ mtx_lock(&vlsurface->device->mutex);
vlsurface->device->context->flush(vlsurface->device->context, NULL, 0);
pipe_mutex_unlock(vlsurface->device->mutex);
return vlsurface->surface->texture;
}
VdpStatus vlVdpOutputSurfaceDMABuf(VdpOutputSurface surface,
struct VdpSurfaceDMABufDesc *result)
{
vlVdpOutputSurface *vlsurface;
struct pipe_screen *pscreen;
struct winsys_handle whandle;
memset(result, 0, sizeof(*result));
result->handle = -1;
vlsurface = vlGetDataHTAB(surface);
if (!vlsurface || !vlsurface->surface)
return VDP_STATUS_INVALID_HANDLE;
- pipe_mutex_lock(vlsurface->device->mutex);
+ mtx_lock(&vlsurface->device->mutex);
vlsurface->device->context->flush(vlsurface->device->context, NULL, 0);
memset(&whandle, 0, sizeof(struct winsys_handle));
whandle.type = DRM_API_HANDLE_TYPE_FD;
pscreen = vlsurface->surface->texture->screen;
if (!pscreen->resource_get_handle(pscreen, vlsurface->device->context,
vlsurface->surface->texture, &whandle,
PIPE_HANDLE_USAGE_READ_WRITE)) {
pipe_mutex_unlock(vlsurface->device->mutex);
diff --git a/src/gallium/state_trackers/vdpau/presentation.c b/src/gallium/state_trackers/vdpau/presentation.c
index 78cafc8..ee32bac 100644
--- a/src/gallium/state_trackers/vdpau/presentation.c
+++ b/src/gallium/state_trackers/vdpau/presentation.c
@@ -58,21 +58,21 @@ vlVdpPresentationQueueCreate(VdpDevice device,
if (dev != pqt->device)
return VDP_STATUS_HANDLE_DEVICE_MISMATCH;
pq = CALLOC(1, sizeof(vlVdpPresentationQueue));
if (!pq)
return VDP_STATUS_RESOURCES;
DeviceReference(&pq->device, dev);
pq->drawable = pqt->drawable;
- pipe_mutex_lock(dev->mutex);
+ mtx_lock(&dev->mutex);
if (!vl_compositor_init_state(&pq->cstate, dev->context)) {
pipe_mutex_unlock(dev->mutex);
ret = VDP_STATUS_ERROR;
goto no_compositor;
}
pipe_mutex_unlock(dev->mutex);
*presentation_queue = vlAddDataHTAB(pq);
if (*presentation_queue == 0) {
ret = VDP_STATUS_ERROR;
@@ -93,21 +93,21 @@ no_compositor:
*/
VdpStatus
vlVdpPresentationQueueDestroy(VdpPresentationQueue presentation_queue)
{
vlVdpPresentationQueue *pq;
pq = vlGetDataHTAB(presentation_queue);
if (!pq)
return VDP_STATUS_INVALID_HANDLE;
- pipe_mutex_lock(pq->device->mutex);
+ mtx_lock(&pq->device->mutex);
vl_compositor_cleanup_state(&pq->cstate);
pipe_mutex_unlock(pq->device->mutex);
vlRemoveDataHTAB(presentation_queue);
DeviceReference(&pq->device, NULL);
FREE(pq);
return VDP_STATUS_OK;
}
@@ -126,21 +126,21 @@ vlVdpPresentationQueueSetBackgroundColor(VdpPresentationQueue presentation_queue
pq = vlGetDataHTAB(presentation_queue);
if (!pq)
return VDP_STATUS_INVALID_HANDLE;
color.f[0] = background_color->red;
color.f[1] = background_color->green;
color.f[2] = background_color->blue;
color.f[3] = background_color->alpha;
- pipe_mutex_lock(pq->device->mutex);
+ mtx_lock(&pq->device->mutex);
vl_compositor_set_clear_color(&pq->cstate, &color);
pipe_mutex_unlock(pq->device->mutex);
return VDP_STATUS_OK;
}
/**
* Retrieve the current background color setting.
*/
VdpStatus
@@ -150,21 +150,21 @@ vlVdpPresentationQueueGetBackgroundColor(VdpPresentationQueue presentation_queue
vlVdpPresentationQueue *pq;
union pipe_color_union color;
if (!background_color)
return VDP_STATUS_INVALID_POINTER;
pq = vlGetDataHTAB(presentation_queue);
if (!pq)
return VDP_STATUS_INVALID_HANDLE;
- pipe_mutex_lock(pq->device->mutex);
+ mtx_lock(&pq->device->mutex);
vl_compositor_get_clear_color(&pq->cstate, &color);
pipe_mutex_unlock(pq->device->mutex);
background_color->red = color.f[0];
background_color->green = color.f[1];
background_color->blue = color.f[2];
background_color->alpha = color.f[3];
return VDP_STATUS_OK;
}
@@ -178,21 +178,21 @@ vlVdpPresentationQueueGetTime(VdpPresentationQueue presentation_queue,
{
vlVdpPresentationQueue *pq;
if (!current_time)
return VDP_STATUS_INVALID_POINTER;
pq = vlGetDataHTAB(presentation_queue);
if (!pq)
return VDP_STATUS_INVALID_HANDLE;
- pipe_mutex_lock(pq->device->mutex);
+ mtx_lock(&pq->device->mutex);
*current_time = pq->device->vscreen->get_timestamp(pq->device->vscreen,
(void *)pq->drawable);
pipe_mutex_unlock(pq->device->mutex);
return VDP_STATUS_OK;
}
/**
* Enter a surface into the presentation queue.
*/
@@ -223,21 +223,21 @@ vlVdpPresentationQueueDisplay(VdpPresentationQueue presentation_queue,
surf = vlGetDataHTAB(surface);
if (!surf)
return VDP_STATUS_INVALID_HANDLE;
pipe = pq->device->context;
compositor = &pq->device->compositor;
cstate = &pq->cstate;
vscreen = pq->device->vscreen;
- pipe_mutex_lock(pq->device->mutex);
+ mtx_lock(&pq->device->mutex);
if (vscreen->set_back_texture_from_output && surf->send_to_X)
vscreen->set_back_texture_from_output(vscreen, surf->surface->texture, clip_width, clip_height);
tex = vscreen->texture_from_drawable(vscreen, (void *)pq->drawable);
if (!tex) {
pipe_mutex_unlock(pq->device->mutex);
return VDP_STATUS_INVALID_HANDLE;
}
if (!vscreen->set_back_texture_from_output || !surf->send_to_X) {
dirty_area = vscreen->get_dirty_area(vscreen);
@@ -314,21 +314,21 @@ vlVdpPresentationQueueBlockUntilSurfaceIdle(VdpPresentationQueue presentation_qu
return VDP_STATUS_INVALID_POINTER;
pq = vlGetDataHTAB(presentation_queue);
if (!pq)
return VDP_STATUS_INVALID_HANDLE;
surf = vlGetDataHTAB(surface);
if (!surf)
return VDP_STATUS_INVALID_HANDLE;
- pipe_mutex_lock(pq->device->mutex);
+ mtx_lock(&pq->device->mutex);
if (surf->fence) {
screen = pq->device->vscreen->pscreen;
screen->fence_finish(screen, NULL, surf->fence, PIPE_TIMEOUT_INFINITE);
screen->fence_reference(screen, &surf->fence, NULL);
}
pipe_mutex_unlock(pq->device->mutex);
return vlVdpPresentationQueueGetTime(presentation_queue, first_presentation_time);
}
@@ -357,21 +357,21 @@ vlVdpPresentationQueueQuerySurfaceStatus(VdpPresentationQueue presentation_queue
return VDP_STATUS_INVALID_HANDLE;
*first_presentation_time = 0;
if (!surf->fence) {
if (pq->last_surf == surf)
*status = VDP_PRESENTATION_QUEUE_STATUS_VISIBLE;
else
*status = VDP_PRESENTATION_QUEUE_STATUS_IDLE;
} else {
- pipe_mutex_lock(pq->device->mutex);
+ mtx_lock(&pq->device->mutex);
screen = pq->device->vscreen->pscreen;
if (screen->fence_finish(screen, NULL, surf->fence, 0)) {
screen->fence_reference(screen, &surf->fence, NULL);
*status = VDP_PRESENTATION_QUEUE_STATUS_VISIBLE;
pipe_mutex_unlock(pq->device->mutex);
// We actually need to query the timestamp of the last VSYNC event from the hardware
vlVdpPresentationQueueGetTime(presentation_queue, first_presentation_time);
*first_presentation_time += 1;
} else {
diff --git a/src/gallium/state_trackers/vdpau/query.c b/src/gallium/state_trackers/vdpau/query.c
index 435cafd..87011cb 100644
--- a/src/gallium/state_trackers/vdpau/query.c
+++ b/src/gallium/state_trackers/vdpau/query.c
@@ -75,21 +75,21 @@ vlVdpVideoSurfaceQueryCapabilities(VdpDevice device, VdpChromaType surface_chrom
return VDP_STATUS_INVALID_POINTER;
dev = vlGetDataHTAB(device);
if (!dev)
return VDP_STATUS_INVALID_HANDLE;
pscreen = dev->vscreen->pscreen;
if (!pscreen)
return VDP_STATUS_RESOURCES;
- pipe_mutex_lock(dev->mutex);
+ mtx_lock(&dev->mutex);
/* XXX: Current limits */
*is_supported = true;
max_2d_texture_level = pscreen->get_param(pscreen, PIPE_CAP_MAX_TEXTURE_2D_LEVELS);
pipe_mutex_unlock(dev->mutex);
if (!max_2d_texture_level)
return VDP_STATUS_RESOURCES;
/* I am not quite sure if it is max_2d_texture_level-1 or just max_2d_texture_level */
*max_width = *max_height = pow(2,max_2d_texture_level-1);
@@ -112,21 +112,21 @@ vlVdpVideoSurfaceQueryGetPutBitsYCbCrCapabilities(VdpDevice device, VdpChromaTyp
return VDP_STATUS_INVALID_POINTER;
dev = vlGetDataHTAB(device);
if (!dev)
return VDP_STATUS_INVALID_HANDLE;
pscreen = dev->vscreen->pscreen;
if (!pscreen)
return VDP_STATUS_RESOURCES;
- pipe_mutex_lock(dev->mutex);
+ mtx_lock(&dev->mutex);
switch(bits_ycbcr_format) {
case VDP_YCBCR_FORMAT_NV12:
*is_supported = surface_chroma_type == VDP_CHROMA_TYPE_420;
break;
case VDP_YCBCR_FORMAT_YV12:
*is_supported = surface_chroma_type == VDP_CHROMA_TYPE_420;
/* We can convert YV12 to NV12 on the fly! */
@@ -189,21 +189,21 @@ vlVdpDecoderQueryCapabilities(VdpDevice device, VdpDecoderProfile profile,
pscreen = dev->vscreen->pscreen;
if (!pscreen)
return VDP_STATUS_RESOURCES;
p_profile = ProfileToPipe(profile);
if (p_profile == PIPE_VIDEO_PROFILE_UNKNOWN) {
*is_supported = false;
return VDP_STATUS_OK;
}
- pipe_mutex_lock(dev->mutex);
+ mtx_lock(&dev->mutex);
*is_supported = pscreen->get_video_param(pscreen, p_profile, PIPE_VIDEO_ENTRYPOINT_BITSTREAM,
PIPE_VIDEO_CAP_SUPPORTED);
if (*is_supported) {
*max_width = pscreen->get_video_param(pscreen, p_profile, PIPE_VIDEO_ENTRYPOINT_BITSTREAM,
PIPE_VIDEO_CAP_MAX_WIDTH);
*max_height = pscreen->get_video_param(pscreen, p_profile, PIPE_VIDEO_ENTRYPOINT_BITSTREAM,
PIPE_VIDEO_CAP_MAX_HEIGHT);
*max_level = pscreen->get_video_param(pscreen, p_profile, PIPE_VIDEO_ENTRYPOINT_BITSTREAM,
PIPE_VIDEO_CAP_MAX_LEVEL);
*max_macroblocks = (*max_width/16)*(*max_height/16);
@@ -237,21 +237,21 @@ vlVdpOutputSurfaceQueryCapabilities(VdpDevice device, VdpRGBAFormat surface_rgba
if (!pscreen)
return VDP_STATUS_RESOURCES;
format = VdpFormatRGBAToPipe(surface_rgba_format);
if (format == PIPE_FORMAT_NONE || format == PIPE_FORMAT_A8_UNORM)
return VDP_STATUS_INVALID_RGBA_FORMAT;
if (!(is_supported && max_width && max_height))
return VDP_STATUS_INVALID_POINTER;
- pipe_mutex_lock(dev->mutex);
+ mtx_lock(&dev->mutex);
*is_supported = pscreen->is_format_supported
(
pscreen, format, PIPE_TEXTURE_3D, 1,
PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET
);
if (*is_supported) {
uint32_t max_2d_texture_level = pscreen->get_param(
pscreen, PIPE_CAP_MAX_TEXTURE_2D_LEVELS);
if (!max_2d_texture_level) {
@@ -289,21 +289,21 @@ vlVdpOutputSurfaceQueryGetPutBitsNativeCapabilities(VdpDevice device, VdpRGBAFor
if (!pscreen)
return VDP_STATUS_ERROR;
format = VdpFormatRGBAToPipe(surface_rgba_format);
if (format == PIPE_FORMAT_NONE || format == PIPE_FORMAT_A8_UNORM)
return VDP_STATUS_INVALID_RGBA_FORMAT;
if (!is_supported)
return VDP_STATUS_INVALID_POINTER;
- pipe_mutex_lock(dev->mutex);
+ mtx_lock(&dev->mutex);
*is_supported = pscreen->is_format_supported
(
pscreen, format, PIPE_TEXTURE_2D, 1,
PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET
);
pipe_mutex_unlock(dev->mutex);
return VDP_STATUS_OK;
}
@@ -338,21 +338,21 @@ vlVdpOutputSurfaceQueryPutBitsIndexedCapabilities(VdpDevice device,
if (index_format == PIPE_FORMAT_NONE)
return VDP_STATUS_INVALID_INDEXED_FORMAT;
colortbl_format = FormatColorTableToPipe(color_table_format);
if (colortbl_format == PIPE_FORMAT_NONE)
return VDP_STATUS_INVALID_COLOR_TABLE_FORMAT;
if (!is_supported)
return VDP_STATUS_INVALID_POINTER;
- pipe_mutex_lock(dev->mutex);
+ mtx_lock(&dev->mutex);
*is_supported = pscreen->is_format_supported
(
pscreen, rgba_format, PIPE_TEXTURE_2D, 1,
PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET
);
*is_supported &= pscreen->is_format_supported
(
pscreen, index_format, PIPE_TEXTURE_2D, 1,
PIPE_BIND_SAMPLER_VIEW
@@ -393,21 +393,21 @@ vlVdpOutputSurfaceQueryPutBitsYCbCrCapabilities(VdpDevice device, VdpRGBAFormat
if (rgba_format == PIPE_FORMAT_NONE || rgba_format == PIPE_FORMAT_A8_UNORM)
return VDP_STATUS_INVALID_RGBA_FORMAT;
ycbcr_format = FormatYCBCRToPipe(bits_ycbcr_format);
if (ycbcr_format == PIPE_FORMAT_NONE)
return VDP_STATUS_INVALID_INDEXED_FORMAT;
if (!is_supported)
return VDP_STATUS_INVALID_POINTER;
- pipe_mutex_lock(dev->mutex);
+ mtx_lock(&dev->mutex);
*is_supported = pscreen->is_format_supported
(
pscreen, rgba_format, PIPE_TEXTURE_2D, 1,
PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET
);
*is_supported &= pscreen->is_video_format_supported
(
pscreen, ycbcr_format,
PIPE_VIDEO_PROFILE_UNKNOWN,
@@ -437,21 +437,21 @@ vlVdpBitmapSurfaceQueryCapabilities(VdpDevice device, VdpRGBAFormat surface_rgba
if (!pscreen)
return VDP_STATUS_RESOURCES;
format = VdpFormatRGBAToPipe(surface_rgba_format);
if (format == PIPE_FORMAT_NONE)
return VDP_STATUS_INVALID_RGBA_FORMAT;
if (!(is_supported && max_width && max_height))
return VDP_STATUS_INVALID_POINTER;
- pipe_mutex_lock(dev->mutex);
+ mtx_lock(&dev->mutex);
*is_supported = pscreen->is_format_supported
(
pscreen, format, PIPE_TEXTURE_3D, 1,
PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET
);
if (*is_supported) {
uint32_t max_2d_texture_level = pscreen->get_param(
pscreen, PIPE_CAP_MAX_TEXTURE_2D_LEVELS);
if (!max_2d_texture_level) {
@@ -526,21 +526,21 @@ vlVdpVideoMixerQueryParameterValueRange(VdpDevice device, VdpVideoMixerParameter
void *min_value, void *max_value)
{
vlVdpDevice *dev = vlGetDataHTAB(device);
struct pipe_screen *screen;
if (!dev)
return VDP_STATUS_INVALID_HANDLE;
if (!(min_value && max_value))
return VDP_STATUS_INVALID_POINTER;
- pipe_mutex_lock(dev->mutex);
+ mtx_lock(&dev->mutex);
screen = dev->vscreen->pscreen;
switch (parameter) {
case VDP_VIDEO_MIXER_PARAMETER_VIDEO_SURFACE_WIDTH:
*(uint32_t*)min_value = 48;
*(uint32_t*)max_value = screen->get_video_param(screen, PIPE_VIDEO_PROFILE_UNKNOWN,
PIPE_VIDEO_ENTRYPOINT_BITSTREAM,
PIPE_VIDEO_CAP_MAX_WIDTH);
break;
case VDP_VIDEO_MIXER_PARAMETER_VIDEO_SURFACE_HEIGHT:
*(uint32_t*)min_value = 48;
diff --git a/src/gallium/state_trackers/vdpau/surface.c b/src/gallium/state_trackers/vdpau/surface.c
index e0dff4e..39d5849 100644
--- a/src/gallium/state_trackers/vdpau/surface.c
+++ b/src/gallium/state_trackers/vdpau/surface.c
@@ -73,21 +73,21 @@ vlVdpVideoSurfaceCreate(VdpDevice device, VdpChromaType chroma_type,
vlVdpDevice *dev = vlGetDataHTAB(device);
if (!dev) {
ret = VDP_STATUS_INVALID_HANDLE;
goto inv_device;
}
DeviceReference(&p_surf->device, dev);
pipe = dev->context;
- pipe_mutex_lock(dev->mutex);
+ mtx_lock(&dev->mutex);
memset(&p_surf->templat, 0, sizeof(p_surf->templat));
p_surf->templat.buffer_format = pipe->screen->get_video_param
(
pipe->screen,
PIPE_VIDEO_PROFILE_UNKNOWN,
PIPE_VIDEO_ENTRYPOINT_BITSTREAM,
PIPE_VIDEO_CAP_PREFERED_FORMAT
);
p_surf->templat.chroma_format = ChromaToPipe(chroma_type);
p_surf->templat.width = width;
@@ -131,21 +131,21 @@ inv_size:
*/
VdpStatus
vlVdpVideoSurfaceDestroy(VdpVideoSurface surface)
{
vlVdpSurface *p_surf;
p_surf = (vlVdpSurface *)vlGetDataHTAB((vlHandle)surface);
if (!p_surf)
return VDP_STATUS_INVALID_HANDLE;
- pipe_mutex_lock(p_surf->device->mutex);
+ mtx_lock(&p_surf->device->mutex);
if (p_surf->video_buffer)
p_surf->video_buffer->destroy(p_surf->video_buffer);
pipe_mutex_unlock(p_surf->device->mutex);
vlRemoveDataHTAB(surface);
DeviceReference(&p_surf->device, NULL);
FREE(p_surf);
return VDP_STATUS_OK;
}
@@ -231,21 +231,21 @@ vlVdpVideoSurfaceGetBitsYCbCr(VdpVideoSurface surface,
conversion = CONVERSION_NV12_TO_YV12;
else if (format == PIPE_FORMAT_NV12 && buffer_format == PIPE_FORMAT_YV12)
conversion = CONVERSION_YV12_TO_NV12;
else if ((format == PIPE_FORMAT_YUYV && buffer_format == PIPE_FORMAT_UYVY) ||
(format == PIPE_FORMAT_UYVY && buffer_format == PIPE_FORMAT_YUYV))
conversion = CONVERSION_SWAP_YUYV_UYVY;
else
return VDP_STATUS_NO_IMPLEMENTATION;
}
- pipe_mutex_lock(vlsurface->device->mutex);
+ mtx_lock(&vlsurface->device->mutex);
sampler_views = vlsurface->video_buffer->get_sampler_view_planes(vlsurface->video_buffer);
if (!sampler_views) {
pipe_mutex_unlock(vlsurface->device->mutex);
return VDP_STATUS_RESOURCES;
}
for (i = 0; i < 3; ++i) {
unsigned width, height;
struct pipe_sampler_view *sv = sampler_views[i];
if (!sv) continue;
@@ -314,21 +314,21 @@ vlVdpVideoSurfacePutBitsYCbCr(VdpVideoSurface surface,
if (!p_surf)
return VDP_STATUS_INVALID_HANDLE;
pipe = p_surf->device->context;
if (!pipe)
return VDP_STATUS_INVALID_HANDLE;
if (!source_data || !source_pitches)
return VDP_STATUS_INVALID_POINTER;
- pipe_mutex_lock(p_surf->device->mutex);
+ mtx_lock(&p_surf->device->mutex);
if (p_surf->video_buffer == NULL ||
((pformat != p_surf->video_buffer->buffer_format))) {
enum pipe_format nformat = pformat;
struct pipe_screen *screen = pipe->screen;
/* Determine the most suitable format for the new surface */
if (!screen->is_video_format_supported(screen, nformat,
PIPE_VIDEO_PROFILE_UNKNOWN,
PIPE_VIDEO_ENTRYPOINT_BITSTREAM)) {
@@ -458,21 +458,21 @@ vlVdpVideoSurfaceClear(vlVdpSurface *vlsurf)
/**
* Interop to mesa state tracker
*/
struct pipe_video_buffer *vlVdpVideoSurfaceGallium(VdpVideoSurface surface)
{
vlVdpSurface *p_surf = vlGetDataHTAB(surface);
if (!p_surf)
return NULL;
- pipe_mutex_lock(p_surf->device->mutex);
+ mtx_lock(&p_surf->device->mutex);
if (p_surf->video_buffer == NULL) {
struct pipe_context *pipe = p_surf->device->context;
/* try to create a video buffer if we don't already have one */
p_surf->video_buffer = pipe->create_video_buffer(pipe, &p_surf->templat);
}
pipe_mutex_unlock(p_surf->device->mutex);
return p_surf->video_buffer;
}
@@ -493,21 +493,21 @@ VdpStatus vlVdpVideoSurfaceDMABuf(VdpVideoSurface surface,
if (plane > 3)
return VDP_STATUS_INVALID_VALUE;
if (!result)
return VDP_STATUS_INVALID_POINTER;
memset(result, 0, sizeof(*result));
result->handle = -1;
- pipe_mutex_lock(p_surf->device->mutex);
+ mtx_lock(&p_surf->device->mutex);
if (p_surf->video_buffer == NULL) {
struct pipe_context *pipe = p_surf->device->context;
/* try to create a video buffer if we don't already have one */
p_surf->video_buffer = pipe->create_video_buffer(pipe, &p_surf->templat);
}
/* Check if surface match interop requirements */
if (p_surf->video_buffer == NULL || !p_surf->video_buffer->interlaced ||
p_surf->video_buffer->buffer_format != PIPE_FORMAT_NV12) {
diff --git a/src/gallium/targets/haiku-softpipe/GalliumContext.cpp b/src/gallium/targets/haiku-softpipe/GalliumContext.cpp
index 9662149..02ffd01 100644
--- a/src/gallium/targets/haiku-softpipe/GalliumContext.cpp
+++ b/src/gallium/targets/haiku-softpipe/GalliumContext.cpp
@@ -399,21 +399,21 @@ GalliumContext::Invalidate(uint32 width, uint32 height)
// Is this the best way to invalidate?
p_atomic_inc(&fContext[fCurrentContext]->read->stfbi->stamp);
p_atomic_inc(&fContext[fCurrentContext]->draw->stfbi->stamp);
}
void
GalliumContext::Lock()
{
CALLED();
- pipe_mutex_lock(fMutex);
+ mtx_lock(&fMutex);
}
void
GalliumContext::Unlock()
{
CALLED();
pipe_mutex_unlock(fMutex);
}
/* vim: set tabstop=4: */
diff --git a/src/gallium/winsys/amdgpu/drm/amdgpu_bo.c b/src/gallium/winsys/amdgpu/drm/amdgpu_bo.c
index 5b9bd8c..2f0dcb6 100644
--- a/src/gallium/winsys/amdgpu/drm/amdgpu_bo.c
+++ b/src/gallium/winsys/amdgpu/drm/amdgpu_bo.c
@@ -76,21 +76,21 @@ static bool amdgpu_bo_wait(struct pb_buffer *_buf, uint64_t timeout,
if (r)
fprintf(stderr, "%s: amdgpu_bo_wait_for_idle failed %i\n", __func__,
r);
return !buffer_busy;
}
if (timeout == 0) {
unsigned idle_fences;
bool buffer_idle;
- pipe_mutex_lock(ws->bo_fence_lock);
+ mtx_lock(&ws->bo_fence_lock);
for (idle_fences = 0; idle_fences < bo->num_fences; ++idle_fences) {
if (!amdgpu_fence_wait(bo->fences[idle_fences], 0, false))
break;
}
/* Release the idle fences to avoid checking them again later. */
for (unsigned i = 0; i < idle_fences; ++i)
amdgpu_fence_reference(&bo->fences[i], NULL);
@@ -98,34 +98,34 @@ static bool amdgpu_bo_wait(struct pb_buffer *_buf, uint64_t timeout,
(bo->num_fences - idle_fences) * sizeof(*bo->fences));
bo->num_fences -= idle_fences;
buffer_idle = !bo->num_fences;
pipe_mutex_unlock(ws->bo_fence_lock);
return buffer_idle;
} else {
bool buffer_idle = true;
- pipe_mutex_lock(ws->bo_fence_lock);
+ mtx_lock(&ws->bo_fence_lock);
while (bo->num_fences && buffer_idle) {
struct pipe_fence_handle *fence = NULL;
bool fence_idle = false;
amdgpu_fence_reference(&fence, bo->fences[0]);
/* Wait for the fence. */
pipe_mutex_unlock(ws->bo_fence_lock);
if (amdgpu_fence_wait(fence, abs_timeout, true))
fence_idle = true;
else
buffer_idle = false;
- pipe_mutex_lock(ws->bo_fence_lock);
+ mtx_lock(&ws->bo_fence_lock);
/* Release an idle fence to avoid checking it again later, keeping in
* mind that the fence array may have been modified by other threads.
*/
if (fence_idle && bo->num_fences && bo->fences[0] == fence) {
amdgpu_fence_reference(&bo->fences[0], NULL);
memmove(&bo->fences[0], &bo->fences[1],
(bo->num_fences - 1) * sizeof(*bo->fences));
bo->num_fences--;
}
@@ -153,21 +153,21 @@ static void amdgpu_bo_remove_fences(struct amdgpu_winsys_bo *bo)
bo->num_fences = 0;
bo->max_fences = 0;
}
void amdgpu_bo_destroy(struct pb_buffer *_buf)
{
struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
assert(bo->bo && "must not be called for slab entries");
- pipe_mutex_lock(bo->ws->global_bo_list_lock);
+ mtx_lock(&bo->ws->global_bo_list_lock);
LIST_DEL(&bo->u.real.global_list_item);
bo->ws->num_buffers--;
pipe_mutex_unlock(bo->ws->global_bo_list_lock);
amdgpu_bo_va_op(bo->bo, 0, bo->base.size, bo->va, 0, AMDGPU_VA_OP_UNMAP);
amdgpu_va_range_free(bo->u.real.va_handle);
amdgpu_bo_free(bo->bo);
amdgpu_bo_remove_fences(bo);
@@ -342,21 +342,21 @@ static const struct pb_vtbl amdgpu_winsys_bo_vtbl = {
amdgpu_bo_destroy_or_cache
/* other functions are never called */
};
static void amdgpu_add_buffer_to_global_list(struct amdgpu_winsys_bo *bo)
{
struct amdgpu_winsys *ws = bo->ws;
assert(bo->bo);
- pipe_mutex_lock(ws->global_bo_list_lock);
+ mtx_lock(&ws->global_bo_list_lock);
LIST_ADDTAIL(&bo->u.real.global_list_item, &ws->global_bo_list);
ws->num_buffers++;
pipe_mutex_unlock(ws->global_bo_list_lock);
}
static struct amdgpu_winsys_bo *amdgpu_create_bo(struct amdgpu_winsys *ws,
uint64_t size,
unsigned alignment,
unsigned usage,
enum radeon_bo_domain initial_domain,
diff --git a/src/gallium/winsys/amdgpu/drm/amdgpu_cs.c b/src/gallium/winsys/amdgpu/drm/amdgpu_cs.c
index 01f38d5..bb255f2 100644
--- a/src/gallium/winsys/amdgpu/drm/amdgpu_cs.c
+++ b/src/gallium/winsys/amdgpu/drm/amdgpu_cs.c
@@ -1030,21 +1030,21 @@ void amdgpu_cs_submit_ib(void *job, int thread_index)
}
/* Create the buffer list.
* Use a buffer list containing all allocated buffers if requested.
*/
if (debug_get_option_all_bos()) {
struct amdgpu_winsys_bo *bo;
amdgpu_bo_handle *handles;
unsigned num = 0;
- pipe_mutex_lock(ws->global_bo_list_lock);
+ mtx_lock(&ws->global_bo_list_lock);
handles = malloc(sizeof(handles[0]) * ws->num_buffers);
if (!handles) {
pipe_mutex_unlock(ws->global_bo_list_lock);
amdgpu_cs_context_cleanup(cs);
cs->error_code = -ENOMEM;
return;
}
LIST_FOR_EACH_ENTRY(bo, &ws->global_bo_list, u.real.global_list_item) {
@@ -1204,21 +1204,21 @@ static int amdgpu_cs_flush(struct radeon_winsys_cs *rcs,
amdgpu_fence_reference(fence, cur->fence);
amdgpu_cs_sync_flush(rcs);
/* Prepare buffers.
*
* This fence must be held until the submission is queued to ensure
* that the order of fence dependency updates matches the order of
* submissions.
*/
- pipe_mutex_lock(ws->bo_fence_lock);
+ mtx_lock(&ws->bo_fence_lock);
amdgpu_add_fence_dependencies(cs);
/* Swap command streams. "cst" is going to be submitted. */
cs->csc = cs->cst;
cs->cst = cur;
/* Submit. */
util_queue_add_job(&ws->cs_queue, cs, &cs->flush_completed,
amdgpu_cs_submit_ib, NULL);
/* The submission has been queued, unlock the fence now. */
diff --git a/src/gallium/winsys/amdgpu/drm/amdgpu_winsys.c b/src/gallium/winsys/amdgpu/drm/amdgpu_winsys.c
index 9d1075c..bcb466f 100644
--- a/src/gallium/winsys/amdgpu/drm/amdgpu_winsys.c
+++ b/src/gallium/winsys/amdgpu/drm/amdgpu_winsys.c
@@ -501,21 +501,21 @@ static int compare_dev(void *key1, void *key2)
static bool amdgpu_winsys_unref(struct radeon_winsys *rws)
{
struct amdgpu_winsys *ws = (struct amdgpu_winsys*)rws;
bool destroy;
/* When the reference counter drops to zero, remove the device pointer
* from the table.
* This must happen while the mutex is locked, so that
* amdgpu_winsys_create in another thread doesn't get the winsys
* from the table when the counter drops to 0. */
- pipe_mutex_lock(dev_tab_mutex);
+ mtx_lock(&dev_tab_mutex);
destroy = pipe_reference(&ws->reference, NULL);
if (destroy && dev_tab)
util_hash_table_remove(dev_tab, ws->dev);
pipe_mutex_unlock(dev_tab_mutex);
return destroy;
}
PUBLIC struct radeon_winsys *
@@ -527,21 +527,21 @@ amdgpu_winsys_create(int fd, radeon_screen_create_t screen_create)
uint32_t drm_major, drm_minor, r;
/* The DRM driver version of amdgpu is 3.x.x. */
if (version->version_major != 3) {
drmFreeVersion(version);
return NULL;
}
drmFreeVersion(version);
/* Look up the winsys from the dev table. */
- pipe_mutex_lock(dev_tab_mutex);
+ mtx_lock(&dev_tab_mutex);
if (!dev_tab)
dev_tab = util_hash_table_create(hash_dev, compare_dev);
/* Initialize the amdgpu device. This should always return the same pointer
* for the same fd. */
r = amdgpu_device_initialize(fd, &drm_major, &drm_minor, &dev);
if (r) {
pipe_mutex_unlock(dev_tab_mutex);
fprintf(stderr, "amdgpu: amdgpu_device_initialize failed.\n");
return NULL;
diff --git a/src/gallium/winsys/etnaviv/drm/etnaviv_drm_winsys.c b/src/gallium/winsys/etnaviv/drm/etnaviv_drm_winsys.c
index 141191f..dc48934 100644
--- a/src/gallium/winsys/etnaviv/drm/etnaviv_drm_winsys.c
+++ b/src/gallium/winsys/etnaviv/drm/etnaviv_drm_winsys.c
@@ -70,21 +70,21 @@ screen_create(struct renderonly *ro)
static struct util_hash_table *etna_tab = NULL;
static mtx_t etna_screen_mutex = _MTX_INITIALIZER_NP;
static void
etna_drm_screen_destroy(struct pipe_screen *pscreen)
{
struct etna_screen *screen = etna_screen(pscreen);
boolean destroy;
- pipe_mutex_lock(etna_screen_mutex);
+ mtx_lock(&etna_screen_mutex);
destroy = --screen->refcnt == 0;
if (destroy) {
int fd = etna_device_fd(screen->dev);
util_hash_table_remove(etna_tab, intptr_to_pointer(fd));
}
pipe_mutex_unlock(etna_screen_mutex);
if (destroy) {
pscreen->destroy = screen->winsys_priv;
pscreen->destroy(pscreen);
@@ -113,21 +113,21 @@ static int compare_fd(void *key1, void *key2)
return stat1.st_dev != stat2.st_dev ||
stat1.st_ino != stat2.st_ino ||
stat1.st_rdev != stat2.st_rdev;
}
struct pipe_screen *
etna_drm_screen_create_renderonly(struct renderonly *ro)
{
struct pipe_screen *pscreen = NULL;
- pipe_mutex_lock(etna_screen_mutex);
+ mtx_lock(&etna_screen_mutex);
if (!etna_tab) {
etna_tab = util_hash_table_create(hash_fd, compare_fd);
if (!etna_tab)
goto unlock;
}
pscreen = util_hash_table_get(etna_tab, intptr_to_pointer(ro->gpu_fd));
if (pscreen) {
etna_screen(pscreen)->refcnt++;
} else {
diff --git a/src/gallium/winsys/freedreno/drm/freedreno_drm_winsys.c b/src/gallium/winsys/freedreno/drm/freedreno_drm_winsys.c
index 9ccbce1..2de429e 100644
--- a/src/gallium/winsys/freedreno/drm/freedreno_drm_winsys.c
+++ b/src/gallium/winsys/freedreno/drm/freedreno_drm_winsys.c
@@ -43,21 +43,21 @@
static struct util_hash_table *fd_tab = NULL;
static mtx_t fd_screen_mutex = _MTX_INITIALIZER_NP;
static void
fd_drm_screen_destroy(struct pipe_screen *pscreen)
{
struct fd_screen *screen = fd_screen(pscreen);
boolean destroy;
- pipe_mutex_lock(fd_screen_mutex);
+ mtx_lock(&fd_screen_mutex);
destroy = --screen->refcnt == 0;
if (destroy) {
int fd = fd_device_fd(screen->dev);
util_hash_table_remove(fd_tab, intptr_to_pointer(fd));
}
pipe_mutex_unlock(fd_screen_mutex);
if (destroy) {
pscreen->destroy = screen->winsys_priv;
pscreen->destroy(pscreen);
@@ -84,21 +84,21 @@ static int compare_fd(void *key1, void *key2)
return stat1.st_dev != stat2.st_dev ||
stat1.st_ino != stat2.st_ino ||
stat1.st_rdev != stat2.st_rdev;
}
struct pipe_screen *
fd_drm_screen_create(int fd)
{
struct pipe_screen *pscreen = NULL;
- pipe_mutex_lock(fd_screen_mutex);
+ mtx_lock(&fd_screen_mutex);
if (!fd_tab) {
fd_tab = util_hash_table_create(hash_fd, compare_fd);
if (!fd_tab)
goto unlock;
}
pscreen = util_hash_table_get(fd_tab, intptr_to_pointer(fd));
if (pscreen) {
fd_screen(pscreen)->refcnt++;
} else {
diff --git a/src/gallium/winsys/nouveau/drm/nouveau_drm_winsys.c b/src/gallium/winsys/nouveau/drm/nouveau_drm_winsys.c
index f7b1e5e..a2a9fd6 100644
--- a/src/gallium/winsys/nouveau/drm/nouveau_drm_winsys.c
+++ b/src/gallium/winsys/nouveau/drm/nouveau_drm_winsys.c
@@ -20,21 +20,21 @@
static struct util_hash_table *fd_tab = NULL;
static mtx_t nouveau_screen_mutex = _MTX_INITIALIZER_NP;
bool nouveau_drm_screen_unref(struct nouveau_screen *screen)
{
int ret;
if (screen->refcount == -1)
return true;
- pipe_mutex_lock(nouveau_screen_mutex);
+ mtx_lock(&nouveau_screen_mutex);
ret = --screen->refcount;
assert(ret >= 0);
if (ret == 0)
util_hash_table_remove(fd_tab, intptr_to_pointer(screen->drm->fd));
pipe_mutex_unlock(nouveau_screen_mutex);
return ret == 0;
}
static unsigned hash_fd(void *key)
{
@@ -60,21 +60,21 @@ static int compare_fd(void *key1, void *key2)
PUBLIC struct pipe_screen *
nouveau_drm_screen_create(int fd)
{
struct nouveau_drm *drm = NULL;
struct nouveau_device *dev = NULL;
struct nouveau_screen *(*init)(struct nouveau_device *);
struct nouveau_screen *screen = NULL;
int ret, dupfd;
- pipe_mutex_lock(nouveau_screen_mutex);
+ mtx_lock(&nouveau_screen_mutex);
if (!fd_tab) {
fd_tab = util_hash_table_create(hash_fd, compare_fd);
if (!fd_tab) {
pipe_mutex_unlock(nouveau_screen_mutex);
return NULL;
}
}
screen = util_hash_table_get(fd_tab, intptr_to_pointer(fd));
if (screen) {
diff --git a/src/gallium/winsys/radeon/drm/radeon_drm_bo.c b/src/gallium/winsys/radeon/drm/radeon_drm_bo.c
index 786b1f6..e302273 100644
--- a/src/gallium/winsys/radeon/drm/radeon_drm_bo.c
+++ b/src/gallium/winsys/radeon/drm/radeon_drm_bo.c
@@ -70,21 +70,21 @@ static bool radeon_real_bo_is_busy(struct radeon_bo *bo)
}
static bool radeon_bo_is_busy(struct radeon_bo *bo)
{
unsigned num_idle;
bool busy = false;
if (bo->handle)
return radeon_real_bo_is_busy(bo);
- pipe_mutex_lock(bo->rws->bo_fence_lock);
+ mtx_lock(&bo->rws->bo_fence_lock);
for (num_idle = 0; num_idle < bo->u.slab.num_fences; ++num_idle) {
if (radeon_real_bo_is_busy(bo->u.slab.fences[num_idle])) {
busy = true;
break;
}
radeon_bo_reference(&bo->u.slab.fences[num_idle], NULL);
}
memmove(&bo->u.slab.fences[0], &bo->u.slab.fences[num_idle],
(bo->u.slab.num_fences - num_idle) * sizeof(bo->u.slab.fences[0]));
bo->u.slab.num_fences -= num_idle;
@@ -100,30 +100,30 @@ static void radeon_real_bo_wait_idle(struct radeon_bo *bo)
args.handle = bo->handle;
while (drmCommandWrite(bo->rws->fd, DRM_RADEON_GEM_WAIT_IDLE,
&args, sizeof(args)) == -EBUSY);
}
static void radeon_bo_wait_idle(struct radeon_bo *bo)
{
if (bo->handle) {
radeon_real_bo_wait_idle(bo);
} else {
- pipe_mutex_lock(bo->rws->bo_fence_lock);
+ mtx_lock(&bo->rws->bo_fence_lock);
while (bo->u.slab.num_fences) {
struct radeon_bo *fence = NULL;
radeon_bo_reference(&fence, bo->u.slab.fences[0]);
pipe_mutex_unlock(bo->rws->bo_fence_lock);
/* Wait without holding the fence lock. */
radeon_real_bo_wait_idle(fence);
- pipe_mutex_lock(bo->rws->bo_fence_lock);
+ mtx_lock(&bo->rws->bo_fence_lock);
if (bo->u.slab.num_fences && fence == bo->u.slab.fences[0]) {
radeon_bo_reference(&bo->u.slab.fences[0], NULL);
memmove(&bo->u.slab.fences[0], &bo->u.slab.fences[1],
(bo->u.slab.num_fences - 1) * sizeof(bo->u.slab.fences[0]));
bo->u.slab.num_fences--;
}
radeon_bo_reference(&fence, NULL);
}
pipe_mutex_unlock(bo->rws->bo_fence_lock);
}
@@ -197,21 +197,21 @@ static uint64_t radeon_bomgr_find_va(struct radeon_drm_winsys *rws,
uint64_t size, uint64_t alignment)
{
struct radeon_bo_va_hole *hole, *n;
uint64_t offset = 0, waste = 0;
/* All VM address space holes will implicitly start aligned to the
* size alignment, so we don't need to sanitize the alignment here
*/
size = align(size, rws->info.gart_page_size);
- pipe_mutex_lock(rws->bo_va_mutex);
+ mtx_lock(&rws->bo_va_mutex);
/* first look for a hole */
LIST_FOR_EACH_ENTRY_SAFE(hole, n, &rws->va_holes, list) {
offset = hole->offset;
waste = offset % alignment;
waste = waste ? alignment - waste : 0;
offset += waste;
if (offset >= (hole->offset + hole->size)) {
continue;
}
if (!waste && hole->size == size) {
@@ -255,21 +255,21 @@ static uint64_t radeon_bomgr_find_va(struct radeon_drm_winsys *rws,
return offset;
}
static void radeon_bomgr_free_va(struct radeon_drm_winsys *rws,
uint64_t va, uint64_t size)
{
struct radeon_bo_va_hole *hole;
size = align(size, rws->info.gart_page_size);
- pipe_mutex_lock(rws->bo_va_mutex);
+ mtx_lock(&rws->bo_va_mutex);
if ((va + size) == rws->va_offset) {
rws->va_offset = va;
/* Delete uppermost hole if it reaches the new top */
if (!LIST_IS_EMPTY(&rws->va_holes)) {
hole = container_of(rws->va_holes.next, hole, list);
if ((hole->offset + hole->size) == va) {
rws->va_offset = hole->offset;
list_del(&hole->list);
FREE(hole);
}
@@ -324,21 +324,21 @@ out:
void radeon_bo_destroy(struct pb_buffer *_buf)
{
struct radeon_bo *bo = radeon_bo(_buf);
struct radeon_drm_winsys *rws = bo->rws;
struct drm_gem_close args;
assert(bo->handle && "must not be called for slab entries");
memset(&args, 0, sizeof(args));
- pipe_mutex_lock(rws->bo_handles_mutex);
+ mtx_lock(&rws->bo_handles_mutex);
util_hash_table_remove(rws->bo_handles, (void*)(uintptr_t)bo->handle);
if (bo->flink_name) {
util_hash_table_remove(rws->bo_names,
(void*)(uintptr_t)bo->flink_name);
}
pipe_mutex_unlock(rws->bo_handles_mutex);
if (bo->u.real.ptr)
os_munmap(bo->u.real.ptr, bo->base.size);
@@ -411,21 +411,21 @@ void *radeon_bo_do_map(struct radeon_bo *bo)
return bo->user_ptr;
if (bo->handle) {
offset = 0;
} else {
offset = bo->va - bo->u.slab.real->va;
bo = bo->u.slab.real;
}
/* Map the buffer. */
- pipe_mutex_lock(bo->u.real.map_mutex);
+ mtx_lock(&bo->u.real.map_mutex);
/* Return the pointer if it's already mapped. */
if (bo->u.real.ptr) {
bo->u.real.map_count++;
pipe_mutex_unlock(bo->u.real.map_mutex);
return (uint8_t*)bo->u.real.ptr + offset;
}
args.handle = bo->handle;
args.offset = 0;
args.size = (uint64_t)bo->base.size;
if (drmCommandWriteRead(bo->rws->fd,
@@ -546,21 +546,21 @@ static void *radeon_bo_map(struct pb_buffer *buf,
static void radeon_bo_unmap(struct pb_buffer *_buf)
{
struct radeon_bo *bo = (struct radeon_bo*)_buf;
if (bo->user_ptr)
return;
if (!bo->handle)
bo = bo->u.slab.real;
- pipe_mutex_lock(bo->u.real.map_mutex);
+ mtx_lock(&bo->u.real.map_mutex);
if (!bo->u.real.ptr) {
pipe_mutex_unlock(bo->u.real.map_mutex);
return; /* it's not been mapped */
}
assert(bo->u.real.map_count);
if (--bo->u.real.map_count) {
pipe_mutex_unlock(bo->u.real.map_mutex);
return; /* it's been mapped multiple times */
}
@@ -658,21 +658,21 @@ static struct radeon_bo *radeon_create_bo(struct radeon_drm_winsys *rws,
r = drmCommandWriteRead(rws->fd, DRM_RADEON_GEM_VA, &va, sizeof(va));
if (r && va.operation == RADEON_VA_RESULT_ERROR) {
fprintf(stderr, "radeon: Failed to allocate virtual address for buffer:\n");
fprintf(stderr, "radeon: size : %d bytes\n", size);
fprintf(stderr, "radeon: alignment : %d bytes\n", alignment);
fprintf(stderr, "radeon: domains : %d\n", args.initial_domain);
fprintf(stderr, "radeon: va : 0x%016llx\n", (unsigned long long)bo->va);
radeon_bo_destroy(&bo->base);
return NULL;
}
- pipe_mutex_lock(rws->bo_handles_mutex);
+ mtx_lock(&rws->bo_handles_mutex);
if (va.operation == RADEON_VA_RESULT_VA_EXIST) {
struct pb_buffer *b = &bo->base;
struct radeon_bo *old_bo =
util_hash_table_get(rws->bo_vas, (void*)(uintptr_t)va.offset);
pipe_mutex_unlock(rws->bo_handles_mutex);
pb_reference(&b, &old_bo->base);
return radeon_bo(b);
}
@@ -1023,21 +1023,21 @@ no_slab:
pb_slabs_reclaim(&ws->bo_slabs);
pb_cache_release_all_buffers(&ws->bo_cache);
bo = radeon_create_bo(ws, size, alignment, usage, domain, flags,
pb_cache_bucket);
if (!bo)
return NULL;
}
bo->u.real.use_reusable_pool = true;
- pipe_mutex_lock(ws->bo_handles_mutex);
+ mtx_lock(&ws->bo_handles_mutex);
util_hash_table_set(ws->bo_handles, (void*)(uintptr_t)bo->handle, bo);
pipe_mutex_unlock(ws->bo_handles_mutex);
return &bo->base;
}
static struct pb_buffer *radeon_winsys_bo_from_ptr(struct radeon_winsys *rws,
void *pointer, uint64_t size)
{
struct radeon_drm_winsys *ws = radeon_drm_winsys(rws);
@@ -1056,21 +1056,21 @@ static struct pb_buffer *radeon_winsys_bo_from_ptr(struct radeon_winsys *rws,
RADEON_GEM_USERPTR_VALIDATE |
RADEON_GEM_USERPTR_REGISTER;
if (drmCommandWriteRead(ws->fd, DRM_RADEON_GEM_USERPTR,
&args, sizeof(args))) {
FREE(bo);
return NULL;
}
assert(args.handle != 0);
- pipe_mutex_lock(ws->bo_handles_mutex);
+ mtx_lock(&ws->bo_handles_mutex);
/* Initialize it. */
pipe_reference_init(&bo->base.reference, 1);
bo->handle = args.handle;
bo->base.alignment = 0;
bo->base.size = size;
bo->base.vtbl = &radeon_bo_vtbl;
bo->rws = ws;
bo->user_ptr = pointer;
bo->va = 0;
@@ -1094,21 +1094,21 @@ static struct pb_buffer *radeon_winsys_bo_from_ptr(struct radeon_winsys *rws,
va.flags = RADEON_VM_PAGE_READABLE |
RADEON_VM_PAGE_WRITEABLE |
RADEON_VM_PAGE_SNOOPED;
va.offset = bo->va;
r = drmCommandWriteRead(ws->fd, DRM_RADEON_GEM_VA, &va, sizeof(va));
if (r && va.operation == RADEON_VA_RESULT_ERROR) {
fprintf(stderr, "radeon: Failed to assign virtual address space\n");
radeon_bo_destroy(&bo->base);
return NULL;
}
- pipe_mutex_lock(ws->bo_handles_mutex);
+ mtx_lock(&ws->bo_handles_mutex);
if (va.operation == RADEON_VA_RESULT_VA_EXIST) {
struct pb_buffer *b = &bo->base;
struct radeon_bo *old_bo =
util_hash_table_get(ws->bo_vas, (void*)(uintptr_t)va.offset);
pipe_mutex_unlock(ws->bo_handles_mutex);
pb_reference(&b, &old_bo->base);
return b;
}
@@ -1137,21 +1137,21 @@ static struct pb_buffer *radeon_winsys_bo_from_handle(struct radeon_winsys *rws,
whandle->offset);
return NULL;
}
/* We must maintain a list of pairs <handle, bo>, so that we always return
* the same BO for one particular handle. If we didn't do that and created
* more than one BO for the same handle and then relocated them in a CS,
* we would hit a deadlock in the kernel.
*
* The list of pairs is guarded by a mutex, of course. */
- pipe_mutex_lock(ws->bo_handles_mutex);
+ mtx_lock(&ws->bo_handles_mutex);
if (whandle->type == DRM_API_HANDLE_TYPE_SHARED) {
/* First check if there already is an existing bo for the handle. */
bo = util_hash_table_get(ws->bo_names, (void*)(uintptr_t)whandle->handle);
} else if (whandle->type == DRM_API_HANDLE_TYPE_FD) {
/* We must first get the GEM handle, as fds are unreliable keys */
r = drmPrimeFDToHandle(ws->fd, whandle->handle, &handle);
if (r)
goto fail;
bo = util_hash_table_get(ws->bo_handles, (void*)(uintptr_t)handle);
@@ -1237,21 +1237,21 @@ done:
va.flags = RADEON_VM_PAGE_READABLE |
RADEON_VM_PAGE_WRITEABLE |
RADEON_VM_PAGE_SNOOPED;
va.offset = bo->va;
r = drmCommandWriteRead(ws->fd, DRM_RADEON_GEM_VA, &va, sizeof(va));
if (r && va.operation == RADEON_VA_RESULT_ERROR) {
fprintf(stderr, "radeon: Failed to assign virtual address space\n");
radeon_bo_destroy(&bo->base);
return NULL;
}
- pipe_mutex_lock(ws->bo_handles_mutex);
+ mtx_lock(&ws->bo_handles_mutex);
if (va.operation == RADEON_VA_RESULT_VA_EXIST) {
struct pb_buffer *b = &bo->base;
struct radeon_bo *old_bo =
util_hash_table_get(ws->bo_vas, (void*)(uintptr_t)va.offset);
pipe_mutex_unlock(ws->bo_handles_mutex);
pb_reference(&b, &old_bo->base);
return b;
}
@@ -1294,21 +1294,21 @@ static bool radeon_winsys_bo_get_handle(struct pb_buffer *buffer,
if (whandle->type == DRM_API_HANDLE_TYPE_SHARED) {
if (!bo->flink_name) {
flink.handle = bo->handle;
if (ioctl(ws->fd, DRM_IOCTL_GEM_FLINK, &flink)) {
return false;
}
bo->flink_name = flink.name;
- pipe_mutex_lock(ws->bo_handles_mutex);
+ mtx_lock(&ws->bo_handles_mutex);
util_hash_table_set(ws->bo_names, (void*)(uintptr_t)bo->flink_name, bo);
pipe_mutex_unlock(ws->bo_handles_mutex);
}
whandle->handle = bo->flink_name;
} else if (whandle->type == DRM_API_HANDLE_TYPE_KMS) {
whandle->handle = bo->handle;
} else if (whandle->type == DRM_API_HANDLE_TYPE_FD) {
if (drmPrimeHandleToFD(ws->fd, bo->handle, DRM_CLOEXEC, (int*)&whandle->handle))
return false;
}
diff --git a/src/gallium/winsys/radeon/drm/radeon_drm_cs.c b/src/gallium/winsys/radeon/drm/radeon_drm_cs.c
index fb6a6bb..3f615f8 100644
--- a/src/gallium/winsys/radeon/drm/radeon_drm_cs.c
+++ b/src/gallium/winsys/radeon/drm/radeon_drm_cs.c
@@ -589,21 +589,21 @@ static int radeon_drm_cs_flush(struct radeon_winsys_cs *rcs,
if (cs->next_fence) {
fence = cs->next_fence;
cs->next_fence = NULL;
} else {
fence = radeon_cs_create_fence(rcs);
}
if (pfence)
radeon_fence_reference(pfence, fence);
- pipe_mutex_lock(cs->ws->bo_fence_lock);
+ mtx_lock(&cs->ws->bo_fence_lock);
for (unsigned i = 0; i < cs->csc->num_slab_buffers; ++i) {
struct radeon_bo *bo = cs->csc->slab_buffers[i].bo;
p_atomic_inc(&bo->num_active_ioctls);
radeon_bo_slab_fence(bo, (struct radeon_bo *)fence);
}
pipe_mutex_unlock(cs->ws->bo_fence_lock);
radeon_fence_reference(&fence, NULL);
} else {
radeon_fence_reference(&cs->next_fence, NULL);
diff --git a/src/gallium/winsys/radeon/drm/radeon_drm_winsys.c b/src/gallium/winsys/radeon/drm/radeon_drm_winsys.c
index bbcf7a2..562d15e 100644
--- a/src/gallium/winsys/radeon/drm/radeon_drm_winsys.c
+++ b/src/gallium/winsys/radeon/drm/radeon_drm_winsys.c
@@ -59,21 +59,21 @@ static bool radeon_set_fd_access(struct radeon_drm_cs *applier,
struct radeon_drm_cs **owner,
mtx_t *mutex,
unsigned request, const char *request_name,
bool enable)
{
struct drm_radeon_info info;
unsigned value = enable ? 1 : 0;
memset(&info, 0, sizeof(info));
- pipe_mutex_lock(*mutex);
+ mtx_lock(&*mutex);
/* Early exit if we are sure the request will fail. */
if (enable) {
if (*owner) {
pipe_mutex_unlock(*mutex);
return false;
}
} else {
if (*owner != applier) {
pipe_mutex_unlock(*mutex);
@@ -702,21 +702,21 @@ DEBUG_GET_ONCE_BOOL_OPTION(thread, "RADEON_THREAD", true)
static bool radeon_winsys_unref(struct radeon_winsys *ws)
{
struct radeon_drm_winsys *rws = (struct radeon_drm_winsys*)ws;
bool destroy;
/* When the reference counter drops to zero, remove the fd from the table.
* This must happen while the mutex is locked, so that
* radeon_drm_winsys_create in another thread doesn't get the winsys
* from the table when the counter drops to 0. */
- pipe_mutex_lock(fd_tab_mutex);
+ mtx_lock(&fd_tab_mutex);
destroy = pipe_reference(&rws->reference, NULL);
if (destroy && fd_tab)
util_hash_table_remove(fd_tab, intptr_to_pointer(rws->fd));
pipe_mutex_unlock(fd_tab_mutex);
return destroy;
}
#define PTR_TO_UINT(x) ((unsigned)((intptr_t)(x)))
@@ -729,21 +729,21 @@ static unsigned handle_hash(void *key)
static int handle_compare(void *key1, void *key2)
{
return PTR_TO_UINT(key1) != PTR_TO_UINT(key2);
}
PUBLIC struct radeon_winsys *
radeon_drm_winsys_create(int fd, radeon_screen_create_t screen_create)
{
struct radeon_drm_winsys *ws;
- pipe_mutex_lock(fd_tab_mutex);
+ mtx_lock(&fd_tab_mutex);
if (!fd_tab) {
fd_tab = util_hash_table_create(hash_fd, compare_fd);
}
ws = util_hash_table_get(fd_tab, intptr_to_pointer(fd));
if (ws) {
pipe_reference(NULL, &ws->reference);
pipe_mutex_unlock(fd_tab_mutex);
return &ws->base;
}
diff --git a/src/gallium/winsys/svga/drm/pb_buffer_simple_fenced.c b/src/gallium/winsys/svga/drm/pb_buffer_simple_fenced.c
index 293fe7e..85d2afc 100644
--- a/src/gallium/winsys/svga/drm/pb_buffer_simple_fenced.c
+++ b/src/gallium/winsys/svga/drm/pb_buffer_simple_fenced.c
@@ -308,21 +308,21 @@ fenced_buffer_finish_locked(struct fenced_manager *fenced_mgr,
struct pipe_fence_handle *fence = NULL;
int finished;
boolean proceed;
ops->fence_reference(ops, &fence, fenced_buf->fence);
pipe_mutex_unlock(fenced_mgr->mutex);
finished = ops->fence_finish(ops, fenced_buf->fence, 0);
- pipe_mutex_lock(fenced_mgr->mutex);
+ mtx_lock(&fenced_mgr->mutex);
assert(pipe_is_referenced(&fenced_buf->base.reference));
/*
* Only proceed if the fence object didn't change in the meanwhile.
* Otherwise assume the work has been already carried out by another
* thread that re-acquired the lock before us.
*/
proceed = fence == fenced_buf->fence ? TRUE : FALSE;
@@ -501,38 +501,38 @@ fenced_buffer_create_gpu_storage_locked(struct fenced_manager *fenced_mgr,
static void
fenced_buffer_destroy(struct pb_buffer *buf)
{
struct fenced_buffer *fenced_buf = fenced_buffer(buf);
struct fenced_manager *fenced_mgr = fenced_buf->mgr;
assert(!pipe_is_referenced(&fenced_buf->base.reference));
- pipe_mutex_lock(fenced_mgr->mutex);
+ mtx_lock(&fenced_mgr->mutex);
fenced_buffer_destroy_locked(fenced_mgr, fenced_buf);
pipe_mutex_unlock(fenced_mgr->mutex);
}
static void *
fenced_buffer_map(struct pb_buffer *buf,
unsigned flags, void *flush_ctx)
{
struct fenced_buffer *fenced_buf = fenced_buffer(buf);
struct fenced_manager *fenced_mgr = fenced_buf->mgr;
struct pb_fence_ops *ops = fenced_mgr->ops;
void *map = NULL;
- pipe_mutex_lock(fenced_mgr->mutex);
+ mtx_lock(&fenced_mgr->mutex);
assert(!(flags & PB_USAGE_GPU_READ_WRITE));
/*
* Serialize writes.
*/
while((fenced_buf->flags & PB_USAGE_GPU_WRITE) ||
((fenced_buf->flags & PB_USAGE_GPU_READ) &&
(flags & PB_USAGE_CPU_WRITE))) {
@@ -569,21 +569,21 @@ done:
return map;
}
static void
fenced_buffer_unmap(struct pb_buffer *buf)
{
struct fenced_buffer *fenced_buf = fenced_buffer(buf);
struct fenced_manager *fenced_mgr = fenced_buf->mgr;
- pipe_mutex_lock(fenced_mgr->mutex);
+ mtx_lock(&fenced_mgr->mutex);
assert(fenced_buf->mapcount);
if(fenced_buf->mapcount) {
if (fenced_buf->buffer)
pb_unmap(fenced_buf->buffer);
--fenced_buf->mapcount;
if(!fenced_buf->mapcount)
fenced_buf->flags &= ~PB_USAGE_CPU_READ_WRITE;
}
@@ -593,21 +593,21 @@ fenced_buffer_unmap(struct pb_buffer *buf)
static enum pipe_error
fenced_buffer_validate(struct pb_buffer *buf,
struct pb_validate *vl,
unsigned flags)
{
struct fenced_buffer *fenced_buf = fenced_buffer(buf);
struct fenced_manager *fenced_mgr = fenced_buf->mgr;
enum pipe_error ret;
- pipe_mutex_lock(fenced_mgr->mutex);
+ mtx_lock(&fenced_mgr->mutex);
if(!vl) {
/* invalidate */
fenced_buf->vl = NULL;
fenced_buf->validation_flags = 0;
ret = PIPE_OK;
goto done;
}
assert(flags & PB_USAGE_GPU_READ_WRITE);
@@ -642,21 +642,21 @@ done:
static void
fenced_buffer_fence(struct pb_buffer *buf,
struct pipe_fence_handle *fence)
{
struct fenced_buffer *fenced_buf = fenced_buffer(buf);
struct fenced_manager *fenced_mgr = fenced_buf->mgr;
struct pb_fence_ops *ops = fenced_mgr->ops;
- pipe_mutex_lock(fenced_mgr->mutex);
+ mtx_lock(&fenced_mgr->mutex);
assert(pipe_is_referenced(&fenced_buf->base.reference));
assert(fenced_buf->buffer);
if(fence != fenced_buf->fence) {
assert(fenced_buf->vl);
assert(fenced_buf->validation_flags);
if (fenced_buf->fence) {
boolean destroyed;
@@ -681,21 +681,21 @@ fenced_buffer_fence(struct pb_buffer *buf,
static void
fenced_buffer_get_base_buffer(struct pb_buffer *buf,
struct pb_buffer **base_buf,
pb_size *offset)
{
struct fenced_buffer *fenced_buf = fenced_buffer(buf);
struct fenced_manager *fenced_mgr = fenced_buf->mgr;
- pipe_mutex_lock(fenced_mgr->mutex);
+ mtx_lock(&fenced_mgr->mutex);
assert(fenced_buf->buffer);
if(fenced_buf->buffer)
pb_get_base_buffer(fenced_buf->buffer, base_buf, offset);
else {
*base_buf = buf;
*offset = 0;
}
@@ -732,21 +732,21 @@ fenced_bufmgr_create_buffer(struct pb_manager *mgr,
pipe_reference_init(&fenced_buf->base.reference, 1);
fenced_buf->base.alignment = desc->alignment;
fenced_buf->base.usage = desc->usage;
fenced_buf->base.size = size;
fenced_buf->size = size;
fenced_buf->base.vtbl = &fenced_buffer_vtbl;
fenced_buf->mgr = fenced_mgr;
- pipe_mutex_lock(fenced_mgr->mutex);
+ mtx_lock(&fenced_mgr->mutex);
/*
* Try to create GPU storage without stalling,
*/
ret = fenced_buffer_create_gpu_storage_locked(fenced_mgr, fenced_buf,
desc, TRUE);
/*
* Give up.
*/
@@ -768,45 +768,45 @@ no_storage:
no_buffer:
return NULL;
}
static void
fenced_bufmgr_flush(struct pb_manager *mgr)
{
struct fenced_manager *fenced_mgr = fenced_manager(mgr);
- pipe_mutex_lock(fenced_mgr->mutex);
+ mtx_lock(&fenced_mgr->mutex);
while(fenced_manager_check_signalled_locked(fenced_mgr, TRUE))
;
pipe_mutex_unlock(fenced_mgr->mutex);
assert(fenced_mgr->provider->flush);
if(fenced_mgr->provider->flush)
fenced_mgr->provider->flush(fenced_mgr->provider);
}
static void
fenced_bufmgr_destroy(struct pb_manager *mgr)
{
struct fenced_manager *fenced_mgr = fenced_manager(mgr);
- pipe_mutex_lock(fenced_mgr->mutex);
+ mtx_lock(&fenced_mgr->mutex);
/* Wait on outstanding fences */
while (fenced_mgr->num_fenced) {
pipe_mutex_unlock(fenced_mgr->mutex);
#if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS)
sched_yield();
#endif
- pipe_mutex_lock(fenced_mgr->mutex);
+ mtx_lock(&fenced_mgr->mutex);
while(fenced_manager_check_signalled_locked(fenced_mgr, TRUE))
;
}
#ifdef DEBUG
/*assert(!fenced_mgr->num_unfenced);*/
#endif
pipe_mutex_unlock(fenced_mgr->mutex);
mtx_destroy(&fenced_mgr->mutex);
diff --git a/src/gallium/winsys/svga/drm/vmw_context.c b/src/gallium/winsys/svga/drm/vmw_context.c
index 8d23bff..a5dd66f 100644
--- a/src/gallium/winsys/svga/drm/vmw_context.c
+++ b/src/gallium/winsys/svga/drm/vmw_context.c
@@ -521,21 +521,21 @@ vmw_swc_surface_relocation(struct svga_winsys_context *swc,
vsurf = vmw_svga_winsys_surface(surface);
vmw_swc_surface_only_relocation(swc, where, vsurf, flags);
if (swc->have_gb_objects && vsurf->buf != NULL) {
/*
* Make sure backup buffer ends up fenced.
*/
- pipe_mutex_lock(vsurf->mutex);
+ mtx_lock(&vsurf->mutex);
assert(vsurf->buf != NULL);
vmw_swc_mob_relocation(swc, mobid, NULL, (struct svga_winsys_buffer *)
vsurf->buf, 0, flags);
pipe_mutex_unlock(vsurf->mutex);
}
}
static void
vmw_swc_shader_relocation(struct svga_winsys_context *swc,
diff --git a/src/gallium/winsys/svga/drm/vmw_fence.c b/src/gallium/winsys/svga/drm/vmw_fence.c
index b18d5bf..23713fc 100644
--- a/src/gallium/winsys/svga/drm/vmw_fence.c
+++ b/src/gallium/winsys/svga/drm/vmw_fence.c
@@ -94,21 +94,21 @@ vmw_fence_ops(struct pb_fence_ops *ops)
* list.
*
* @ops: Pointer to a struct vmw_fence_ops.
*
*/
static void
vmw_fences_release(struct vmw_fence_ops *ops)
{
struct vmw_fence *fence, *n;
- pipe_mutex_lock(ops->mutex);
+ mtx_lock(&ops->mutex);
LIST_FOR_EACH_ENTRY_SAFE(fence, n, &ops->not_signaled, ops_list)
LIST_DELINIT(&fence->ops_list);
pipe_mutex_unlock(ops->mutex);
}
/**
* vmw_fences_signal - Traverse the not_signaled list and try to
* signal unsignaled fences.
*
* @ops: Pointer to a struct pb_fence_ops.
@@ -123,21 +123,21 @@ vmw_fences_signal(struct pb_fence_ops *fence_ops,
uint32_t emitted,
boolean has_emitted)
{
struct vmw_fence_ops *ops = NULL;
struct vmw_fence *fence, *n;
if (fence_ops == NULL)
return;
ops = vmw_fence_ops(fence_ops);
- pipe_mutex_lock(ops->mutex);
+ mtx_lock(&ops->mutex);
if (!has_emitted) {
emitted = ops->last_emitted;
if (emitted - signaled > (1 << 30))
emitted = signaled;
}
if (signaled == ops->last_signaled && emitted == ops->last_emitted)
goto out_unlock;
@@ -186,21 +186,21 @@ vmw_fence_create(struct pb_fence_ops *fence_ops, uint32_t handle,
struct vmw_fence_ops *ops = vmw_fence_ops(fence_ops);
if (!fence)
return NULL;
p_atomic_set(&fence->refcount, 1);
fence->handle = handle;
fence->mask = mask;
fence->seqno = seqno;
p_atomic_set(&fence->signalled, 0);
- pipe_mutex_lock(ops->mutex);
+ mtx_lock(&ops->mutex);
if (vmw_fence_seq_is_signaled(seqno, ops->last_signaled, seqno)) {
p_atomic_set(&fence->signalled, 1);
LIST_INITHEAD(&fence->ops_list);
} else {
p_atomic_set(&fence->signalled, 0);
LIST_ADDTAIL(&fence->ops_list, &ops->not_signaled);
}
pipe_mutex_unlock(ops->mutex);
@@ -222,21 +222,21 @@ vmw_fence_reference(struct vmw_winsys_screen *vws,
struct pipe_fence_handle *fence)
{
if (*ptr) {
struct vmw_fence *vfence = vmw_fence(*ptr);
if (p_atomic_dec_zero(&vfence->refcount)) {
struct vmw_fence_ops *ops = vmw_fence_ops(vws->fence_ops);
vmw_ioctl_fence_unref(vws, vfence->handle);
- pipe_mutex_lock(ops->mutex);
+ mtx_lock(&ops->mutex);
LIST_DELINIT(&vfence->ops_list);
pipe_mutex_unlock(ops->mutex);
FREE(vfence);
}
}
if (fence) {
struct vmw_fence *vfence = vmw_fence(fence);
diff --git a/src/gallium/winsys/svga/drm/vmw_surface.c b/src/gallium/winsys/svga/drm/vmw_surface.c
index 9fadbf9..460949d 100644
--- a/src/gallium/winsys/svga/drm/vmw_surface.c
+++ b/src/gallium/winsys/svga/drm/vmw_surface.c
@@ -41,21 +41,21 @@ vmw_svga_winsys_surface_map(struct svga_winsys_context *swc,
unsigned flags, boolean *retry)
{
struct vmw_svga_winsys_surface *vsrf = vmw_svga_winsys_surface(srf);
void *data = NULL;
struct pb_buffer *pb_buf;
uint32_t pb_flags;
struct vmw_winsys_screen *vws = vsrf->screen;
*retry = FALSE;
assert((flags & (PIPE_TRANSFER_READ | PIPE_TRANSFER_WRITE)) != 0);
- pipe_mutex_lock(vsrf->mutex);
+ mtx_lock(&vsrf->mutex);
if (vsrf->mapcount) {
/*
* Only allow multiple readers to map.
*/
if ((flags & PIPE_TRANSFER_WRITE) ||
(vsrf->map_mode & PIPE_TRANSFER_WRITE))
goto out_unlock;
data = vsrf->data;
@@ -158,21 +158,21 @@ out_unlock:
return data;
}
void
vmw_svga_winsys_surface_unmap(struct svga_winsys_context *swc,
struct svga_winsys_surface *srf,
boolean *rebind)
{
struct vmw_svga_winsys_surface *vsrf = vmw_svga_winsys_surface(srf);
- pipe_mutex_lock(vsrf->mutex);
+ mtx_lock(&vsrf->mutex);
if (--vsrf->mapcount == 0) {
*rebind = vsrf->rebind;
vsrf->rebind = FALSE;
vmw_svga_winsys_buffer_unmap(&vsrf->screen->base, vsrf->buf);
} else {
*rebind = FALSE;
}
pipe_mutex_unlock(vsrf->mutex);
}
diff --git a/src/gallium/winsys/virgl/drm/virgl_drm_winsys.c b/src/gallium/winsys/virgl/drm/virgl_drm_winsys.c
index 4f3fa4d..3986305 100644
--- a/src/gallium/winsys/virgl/drm/virgl_drm_winsys.c
+++ b/src/gallium/winsys/virgl/drm/virgl_drm_winsys.c
@@ -47,28 +47,28 @@ static inline boolean can_cache_resource(struct virgl_hw_res *res)
{
return res->cacheable == TRUE;
}
static void virgl_hw_res_destroy(struct virgl_drm_winsys *qdws,
struct virgl_hw_res *res)
{
struct drm_gem_close args;
if (res->flinked) {
- pipe_mutex_lock(qdws->bo_handles_mutex);
+ mtx_lock(&qdws->bo_handles_mutex);
util_hash_table_remove(qdws->bo_names,
(void *)(uintptr_t)res->flink);
pipe_mutex_unlock(qdws->bo_handles_mutex);
}
if (res->bo_handle) {
- pipe_mutex_lock(qdws->bo_handles_mutex);
+ mtx_lock(&qdws->bo_handles_mutex);
util_hash_table_remove(qdws->bo_handles,
(void *)(uintptr_t)res->bo_handle);
pipe_mutex_unlock(qdws->bo_handles_mutex);
}
if (res->ptr)
os_munmap(res->ptr, res->size);
memset(&args, 0, sizeof(args));
args.handle = res->bo_handle;
@@ -91,21 +91,21 @@ static boolean virgl_drm_resource_is_busy(struct virgl_drm_winsys *qdws,
return TRUE;
return FALSE;
}
static void
virgl_cache_flush(struct virgl_drm_winsys *qdws)
{
struct list_head *curr, *next;
struct virgl_hw_res *res;
- pipe_mutex_lock(qdws->mutex);
+ mtx_lock(&qdws->mutex);
curr = qdws->delayed.next;
next = curr->next;
while (curr != &qdws->delayed) {
res = LIST_ENTRY(struct virgl_hw_res, curr, head);
LIST_DEL(&res->head);
virgl_hw_res_destroy(qdws, res);
curr = next;
next = curr->next;
}
@@ -151,21 +151,21 @@ virgl_cache_list_check_free(struct virgl_drm_winsys *qdws)
static void virgl_drm_resource_reference(struct virgl_drm_winsys *qdws,
struct virgl_hw_res **dres,
struct virgl_hw_res *sres)
{
struct virgl_hw_res *old = *dres;
if (pipe_reference(&(*dres)->reference, &sres->reference)) {
if (!can_cache_resource(old)) {
virgl_hw_res_destroy(qdws, old);
} else {
- pipe_mutex_lock(qdws->mutex);
+ mtx_lock(&qdws->mutex);
virgl_cache_list_check_free(qdws);
old->start = os_time_get();
old->end = old->start + qdws->usecs;
LIST_ADDTAIL(&old->head, &qdws->delayed);
qdws->num_delayed++;
pipe_mutex_unlock(qdws->mutex);
}
}
*dres = sres;
@@ -303,21 +303,21 @@ virgl_drm_winsys_resource_cache_create(struct virgl_winsys *qws,
struct virgl_hw_res *res, *curr_res;
struct list_head *curr, *next;
int64_t now;
int ret;
/* only store binds for vertex/index/const buffers */
if (bind != VIRGL_BIND_CONSTANT_BUFFER && bind != VIRGL_BIND_INDEX_BUFFER &&
bind != VIRGL_BIND_VERTEX_BUFFER && bind != VIRGL_BIND_CUSTOM)
goto alloc;
- pipe_mutex_lock(qdws->mutex);
+ mtx_lock(&qdws->mutex);
res = NULL;
curr = qdws->delayed.next;
next = curr->next;
now = os_time_get();
while (curr != &qdws->delayed) {
curr_res = LIST_ENTRY(struct virgl_hw_res, curr, head);
if (!res && ((ret = virgl_is_res_compat(qdws, curr_res, size, bind, format)) > 0))
@@ -379,21 +379,21 @@ virgl_drm_winsys_resource_create_handle(struct virgl_winsys *qws,
struct drm_virtgpu_resource_info info_arg = {};
struct virgl_hw_res *res;
uint32_t handle = whandle->handle;
if (whandle->offset != 0) {
fprintf(stderr, "attempt to import unsupported winsys offset %u\n",
whandle->offset);
return NULL;
}
- pipe_mutex_lock(qdws->bo_handles_mutex);
+ mtx_lock(&qdws->bo_handles_mutex);
if (whandle->type == DRM_API_HANDLE_TYPE_SHARED) {
res = util_hash_table_get(qdws->bo_names, (void*)(uintptr_t)handle);
if (res) {
struct virgl_hw_res *r = NULL;
virgl_drm_resource_reference(qdws, &r, res);
goto done;
}
}
@@ -472,31 +472,31 @@ static boolean virgl_drm_winsys_resource_get_handle(struct virgl_winsys *qws,
if (!res->flinked) {
memset(&flink, 0, sizeof(flink));
flink.handle = res->bo_handle;
if (drmIoctl(qdws->fd, DRM_IOCTL_GEM_FLINK, &flink)) {
return FALSE;
}
res->flinked = TRUE;
res->flink = flink.name;
- pipe_mutex_lock(qdws->bo_handles_mutex);
+ mtx_lock(&qdws->bo_handles_mutex);
util_hash_table_set(qdws->bo_names, (void *)(uintptr_t)res->flink, res);
pipe_mutex_unlock(qdws->bo_handles_mutex);
}
whandle->handle = res->flink;
} else if (whandle->type == DRM_API_HANDLE_TYPE_KMS) {
whandle->handle = res->bo_handle;
} else if (whandle->type == DRM_API_HANDLE_TYPE_FD) {
if (drmPrimeHandleToFD(qdws->fd, res->bo_handle, DRM_CLOEXEC, (int*)&whandle->handle))
return FALSE;
- pipe_mutex_lock(qdws->bo_handles_mutex);
+ mtx_lock(&qdws->bo_handles_mutex);
util_hash_table_set(qdws->bo_handles, (void *)(uintptr_t)res->bo_handle, res);
pipe_mutex_unlock(qdws->bo_handles_mutex);
}
whandle->stride = stride;
return TRUE;
}
static void virgl_drm_winsys_resource_unref(struct virgl_winsys *qws,
struct virgl_hw_res *hres)
{
@@ -807,21 +807,21 @@ virgl_drm_winsys_create(int drmFD)
static struct util_hash_table *fd_tab = NULL;
static mtx_t virgl_screen_mutex = _MTX_INITIALIZER_NP;
static void
virgl_drm_screen_destroy(struct pipe_screen *pscreen)
{
struct virgl_screen *screen = virgl_screen(pscreen);
boolean destroy;
- pipe_mutex_lock(virgl_screen_mutex);
+ mtx_lock(&virgl_screen_mutex);
destroy = --screen->refcnt == 0;
if (destroy) {
int fd = virgl_drm_winsys(screen->vws)->fd;
util_hash_table_remove(fd_tab, intptr_to_pointer(fd));
}
pipe_mutex_unlock(virgl_screen_mutex);
if (destroy) {
pscreen->destroy = screen->winsys_priv;
pscreen->destroy(pscreen);
@@ -848,21 +848,21 @@ static int compare_fd(void *key1, void *key2)
return stat1.st_dev != stat2.st_dev ||
stat1.st_ino != stat2.st_ino ||
stat1.st_rdev != stat2.st_rdev;
}
struct pipe_screen *
virgl_drm_screen_create(int fd)
{
struct pipe_screen *pscreen = NULL;
- pipe_mutex_lock(virgl_screen_mutex);
+ mtx_lock(&virgl_screen_mutex);
if (!fd_tab) {
fd_tab = util_hash_table_create(hash_fd, compare_fd);
if (!fd_tab)
goto unlock;
}
pscreen = util_hash_table_get(fd_tab, intptr_to_pointer(fd));
if (pscreen) {
virgl_screen(pscreen)->refcnt++;
} else {
diff --git a/src/gallium/winsys/virgl/vtest/virgl_vtest_winsys.c b/src/gallium/winsys/virgl/vtest/virgl_vtest_winsys.c
index dde53e7..70bd6af 100644
--- a/src/gallium/winsys/virgl/vtest/virgl_vtest_winsys.c
+++ b/src/gallium/winsys/virgl/vtest/virgl_vtest_winsys.c
@@ -137,21 +137,21 @@ static boolean virgl_vtest_resource_is_busy(struct virgl_vtest_winsys *vtws,
return ret == 1 ? TRUE : FALSE;
}
static void
virgl_cache_flush(struct virgl_vtest_winsys *vtws)
{
struct list_head *curr, *next;
struct virgl_hw_res *res;
- pipe_mutex_lock(vtws->mutex);
+ mtx_lock(&vtws->mutex);
curr = vtws->delayed.next;
next = curr->next;
while (curr != &vtws->delayed) {
res = LIST_ENTRY(struct virgl_hw_res, curr, head);
LIST_DEL(&res->head);
virgl_hw_res_destroy(vtws, res);
curr = next;
next = curr->next;
}
@@ -182,21 +182,21 @@ virgl_cache_list_check_free(struct virgl_vtest_winsys *vtws)
static void virgl_vtest_resource_reference(struct virgl_vtest_winsys *vtws,
struct virgl_hw_res **dres,
struct virgl_hw_res *sres)
{
struct virgl_hw_res *old = *dres;
if (pipe_reference(&(*dres)->reference, &sres->reference)) {
if (!can_cache_resource(old)) {
virgl_hw_res_destroy(vtws, old);
} else {
- pipe_mutex_lock(vtws->mutex);
+ mtx_lock(&vtws->mutex);
virgl_cache_list_check_free(vtws);
old->start = os_time_get();
old->end = old->start + vtws->usecs;
LIST_ADDTAIL(&old->head, &vtws->delayed);
vtws->num_delayed++;
pipe_mutex_unlock(vtws->mutex);
}
}
*dres = sres;
@@ -326,21 +326,21 @@ virgl_vtest_winsys_resource_cache_create(struct virgl_winsys *vws,
struct virgl_hw_res *res, *curr_res;
struct list_head *curr, *next;
int64_t now;
int ret;
/* only store binds for vertex/index/const buffers */
if (bind != VIRGL_BIND_CONSTANT_BUFFER && bind != VIRGL_BIND_INDEX_BUFFER &&
bind != VIRGL_BIND_VERTEX_BUFFER && bind != VIRGL_BIND_CUSTOM)
goto alloc;
- pipe_mutex_lock(vtws->mutex);
+ mtx_lock(&vtws->mutex);
res = NULL;
curr = vtws->delayed.next;
next = curr->next;
now = os_time_get();
while (curr != &vtws->delayed) {
curr_res = LIST_ENTRY(struct virgl_hw_res, curr, head);
if (!res && ((ret = virgl_is_res_compat(vtws, curr_res, size, bind, format)) > 0))
--
2.9.3
More information about the mesa-dev
mailing list