[PATCH 3/4] amdgpu: add mutex for across process reason
Chunming Zhou
David1.Zhou at amd.com
Thu Aug 18 07:55:58 UTC 2016
Change-Id: I69b5c8d86f9e1ed32bb20e899b74ad4146c0e988
Signed-off-by: Chunming Zhou <David1.Zhou at amd.com>
---
amdgpu/amdgpu_cs.c | 36 +++++++++++++++++++++++++++++-------
amdgpu/amdgpu_internal.h | 2 ++
2 files changed, 31 insertions(+), 7 deletions(-)
diff --git a/amdgpu/amdgpu_cs.c b/amdgpu/amdgpu_cs.c
index a3ff34e..ecbd4d7 100644
--- a/amdgpu/amdgpu_cs.c
+++ b/amdgpu/amdgpu_cs.c
@@ -541,6 +541,12 @@ int amdgpu_cs_create_semaphore_object(amdgpu_device_handle device_handle,
}
(*sem)->buf_handle = buf_handle;
atomic_set(&(*sem)->refcount, 1);
+ r = sem_init(&(*sem)->mutex, 1, 1);
+ if (r) {
+ amdgpu_bo_cpu_unmap(buf_handle);
+ amdgpu_bo_free(buf_handle);
+ return r;
+ }
(*sem)->version = 2;
return 0;
@@ -549,6 +555,7 @@ int amdgpu_cs_create_semaphore_object(amdgpu_device_handle device_handle,
int amdgpu_cs_create_semaphore(amdgpu_semaphore_handle *sem)
{
struct amdgpu_semaphore *gpu_semaphore;
+ int r;
if (NULL == sem)
return -EINVAL;
@@ -559,6 +566,11 @@ int amdgpu_cs_create_semaphore(amdgpu_semaphore_handle *sem)
gpu_semaphore->version = 1;
atomic_set(&gpu_semaphore->refcount, 1);
+ r = sem_init(&gpu_semaphore->mutex, 1, 1);
+ if (r) {
+ free(gpu_semaphore);
+ return r;
+ }
*sem = gpu_semaphore;
return 0;
@@ -581,6 +593,7 @@ int amdgpu_cs_signal_semaphore(amdgpu_context_handle ctx,
/* sem has been signaled */
if (sem->signal_fence.ctx_id)
return -EINVAL;
+ sem_wait(&sem->mutex);
pthread_mutex_lock(&ctx->sequence_mutex);
sem->signal_fence.ctx_id = ctx->id;
sem->signal_fence.ip_type = ip_type;
@@ -589,6 +602,7 @@ int amdgpu_cs_signal_semaphore(amdgpu_context_handle ctx,
sem->signal_fence.seq_no = ctx->last_seq[ip_type][ip_instance][ring];
update_references(NULL, &sem->refcount);
pthread_mutex_unlock(&ctx->sequence_mutex);
+ sem_post(&sem->mutex);
return 0;
}
@@ -609,10 +623,11 @@ int amdgpu_cs_wait_semaphore(amdgpu_context_handle ctx,
/* must signal first */
if (0 == sem->signal_fence.ctx_id)
return -EINVAL;
-
+ sem_wait(&sem->mutex);
pthread_mutex_lock(&ctx->sequence_mutex);
list_add(&sem->list, &ctx->sem_list[ip_type][ip_instance][ring]);
pthread_mutex_unlock(&ctx->sequence_mutex);
+ sem_post(&sem->mutex);
return 0;
}
@@ -622,13 +637,13 @@ static int amdgpu_cs_reset_sem(amdgpu_semaphore_handle sem)
return -EINVAL;
if (0 == sem->signal_fence.ctx_id)
return -EINVAL;
-
+ sem_wait(&sem->mutex);
sem->signal_fence.ctx_id = 0;
sem->signal_fence.ip_type = 0;
sem->signal_fence.ip_instance = 0;
sem->signal_fence.ring = 0;
sem->signal_fence.seq_no = 0;
-
+ sem_post(&sem->mutex);
return 0;
}
@@ -636,8 +651,9 @@ static int amdgpu_cs_unreference_sem(amdgpu_semaphore_handle sem)
{
if (NULL == sem)
return -EINVAL;
-
+ sem_wait(&sem->mutex);
if (update_references(&sem->refcount, NULL)) {
+ sem_post(&sem->mutex);
if (sem->version == 1)
free(sem);
else if (sem->version == 2) {
@@ -645,7 +661,9 @@ static int amdgpu_cs_unreference_sem(amdgpu_semaphore_handle sem)
amdgpu_bo_cpu_unmap(buf_handle);
amdgpu_bo_free(buf_handle);
}
+ return 0;
}
+ sem_post(&sem->mutex);
return 0;
}
@@ -657,10 +675,14 @@ int amdgpu_cs_destroy_semaphore(amdgpu_semaphore_handle sem)
int amdgpu_cs_export_semaphore(amdgpu_semaphore_handle sem,
uint32_t *shared_handle)
{
- return amdgpu_bo_export(sem->buf_handle,
- amdgpu_bo_handle_type_dma_buf_fd,
- shared_handle);
+ int r;
+ sem_wait(&sem->mutex);
+ r = amdgpu_bo_export(sem->buf_handle,
+ amdgpu_bo_handle_type_dma_buf_fd,
+ shared_handle);
+ sem_post(&sem->mutex);
+ return r;
}
int amdgpu_cs_import_semaphore(amdgpu_semaphore_handle *sem,
diff --git a/amdgpu/amdgpu_internal.h b/amdgpu/amdgpu_internal.h
index 7c422da..f035786 100644
--- a/amdgpu/amdgpu_internal.h
+++ b/amdgpu/amdgpu_internal.h
@@ -31,6 +31,7 @@
#include <assert.h>
#include <pthread.h>
+#include <semaphore.h>
#include "libdrm_macros.h"
#include "xf86atomic.h"
@@ -135,6 +136,7 @@ struct amdgpu_semaphore {
struct list_head list;
struct drm_amdgpu_fence signal_fence;
amdgpu_bo_handle buf_handle;
+ sem_t mutex;
uint32_t version;
};
--
1.9.1
More information about the amd-gfx
mailing list