[Mesa-dev] [PATCH] winsys/radeon: cleanup CS offloading

Christian König deathsimple at vodafone.de
Sat Oct 19 12:09:15 CEST 2013


From: Christian König <christian.koenig at amd.com>

Using atomic function for ncs is superfluous since it is
protected by a mutex anyway. Also lock the mutex only once
while retrieving the next CS for submission.

Signed-off-by: Christian König <christian.koenig at amd.com>
---
 src/gallium/winsys/radeon/drm/radeon_drm_winsys.c | 31 ++++++++---------------
 1 file changed, 10 insertions(+), 21 deletions(-)

diff --git a/src/gallium/winsys/radeon/drm/radeon_drm_winsys.c b/src/gallium/winsys/radeon/drm/radeon_drm_winsys.c
index 4f43093..f8aeb96 100644
--- a/src/gallium/winsys/radeon/drm/radeon_drm_winsys.c
+++ b/src/gallium/winsys/radeon/drm/radeon_drm_winsys.c
@@ -542,13 +542,12 @@ void radeon_drm_ws_queue_cs(struct radeon_drm_winsys *ws, struct radeon_drm_cs *
 {
 retry:
     pipe_mutex_lock(ws->cs_stack_lock);
-    if (p_atomic_read(&ws->ncs) >= RING_LAST) {
+    if (ws->ncs >= RING_LAST) {
         /* no room left for a flush */
         pipe_mutex_unlock(ws->cs_stack_lock);
         goto retry;
     }
-    ws->cs_stack[p_atomic_read(&ws->ncs)] = cs;
-    p_atomic_inc(&ws->ncs);
+    ws->cs_stack[ws->ncs++] = cs;
     pipe_mutex_unlock(ws->cs_stack_lock);
     pipe_semaphore_signal(&ws->cs_queued);
 }
@@ -557,41 +556,31 @@ static PIPE_THREAD_ROUTINE(radeon_drm_cs_emit_ioctl, param)
 {
     struct radeon_drm_winsys *ws = (struct radeon_drm_winsys *)param;
     struct radeon_drm_cs *cs;
-    unsigned i, empty_stack;
+    unsigned i;
 
     while (1) {
         pipe_semaphore_wait(&ws->cs_queued);
         if (ws->kill_thread)
             break;
-next:
+
         pipe_mutex_lock(ws->cs_stack_lock);
         cs = ws->cs_stack[0];
+        for (i = 1; i < ws->ncs; i++)
+            ws->cs_stack[i - 1] = ws->cs_stack[i];
+        ws->cs_stack[--ws->ncs] = NULL;
         pipe_mutex_unlock(ws->cs_stack_lock);
 
         if (cs) {
             radeon_drm_cs_emit_ioctl_oneshot(cs, cs->cst);
-
-            pipe_mutex_lock(ws->cs_stack_lock);
-            for (i = 1; i < p_atomic_read(&ws->ncs); i++) {
-                ws->cs_stack[i - 1] = ws->cs_stack[i];
-            }
-            ws->cs_stack[p_atomic_read(&ws->ncs) - 1] = NULL;
-            empty_stack = p_atomic_dec_zero(&ws->ncs);
-            pipe_mutex_unlock(ws->cs_stack_lock);
-
             pipe_semaphore_signal(&cs->flush_completed);
-
-            if (!empty_stack) {
-                goto next;
-            }
         }
     }
     pipe_mutex_lock(ws->cs_stack_lock);
-    for (i = 0; i < p_atomic_read(&ws->ncs); i++) {
+    for (i = 0; i < ws->ncs; i++) {
         pipe_semaphore_signal(&ws->cs_stack[i]->flush_completed);
         ws->cs_stack[i] = NULL;
     }
-    p_atomic_set(&ws->ncs, 0);
+    ws->ncs = 0;
     pipe_mutex_unlock(ws->cs_stack_lock);
     return NULL;
 }
@@ -655,7 +644,7 @@ struct radeon_winsys *radeon_drm_winsys_create(int fd)
     pipe_mutex_init(ws->cmask_owner_mutex);
     pipe_mutex_init(ws->cs_stack_lock);
 
-    p_atomic_set(&ws->ncs, 0);
+    ws->ncs = 0;
     pipe_semaphore_init(&ws->cs_queued, 0);
     if (ws->num_cpus > 1 && debug_get_option_thread())
         ws->thread = pipe_thread_create(radeon_drm_cs_emit_ioctl, ws);
-- 
1.8.1.2



More information about the mesa-dev mailing list