[Mesa-dev] [PATCH] winsys/radeon: Create async thread only once
Christian König
deathsimple at vodafone.de
Mon Aug 29 01:29:46 PDT 2011
I haven't really tested this, so please speak up if anybody finds any
regression, but I'm going to push this if there are no objections.
Beside being a real killer for performance, the whole "[New Thread
0x7fffe60fb700 (LWP 5212)]/[Thread 0x7fffe60fb700 (LWP 5212) exited]" in
gdb is quite annoying.
Christian.
Am Samstag, den 27.08.2011, 22:57 +0200 schrieb Maarten Lankhorst:
> I noticed that a thread was created for every time async flush was called, so I moved it and used some semaphores to synch.
>
> Signed-off-by: Maarten Lankhorst <m.b.lankhorst at gmail.com>
>
> ---
> diff --git a/src/gallium/winsys/radeon/drm/radeon_drm_cs.c b/src/gallium/winsys/radeon/drm/radeon_drm_cs.c
> index c309354..dd3a4a3 100644
> --- a/src/gallium/winsys/radeon/drm/radeon_drm_cs.c
> +++ b/src/gallium/winsys/radeon/drm/radeon_drm_cs.c
> @@ -130,6 +130,9 @@ static void radeon_destroy_cs_context(struct radeon_cs_context *csc)
> FREE(csc->relocs);
> }
>
> +DEBUG_GET_ONCE_BOOL_OPTION(thread, "RADEON_THREAD", TRUE)
> +static PIPE_THREAD_ROUTINE(radeon_drm_cs_emit_ioctl, param);
> +
> static struct radeon_winsys_cs *radeon_drm_cs_create(struct radeon_winsys *rws)
> {
> struct radeon_drm_winsys *ws = radeon_drm_winsys(rws);
> @@ -139,6 +142,8 @@ static struct radeon_winsys_cs *radeon_drm_cs_create(struct radeon_winsys *rws)
> if (!cs) {
> return NULL;
> }
> + pipe_semaphore_init(&cs->flush_queued, 0);
> + pipe_semaphore_init(&cs->flush_completed, 0);
>
> cs->ws = ws;
>
> @@ -158,6 +163,8 @@ static struct radeon_winsys_cs *radeon_drm_cs_create(struct radeon_winsys *rws)
> cs->base.buf = cs->csc->buf;
>
> p_atomic_inc(&ws->num_cs);
> + if (cs->ws->num_cpus > 1 && debug_get_option_thread())
> + cs->thread = pipe_thread_create(radeon_drm_cs_emit_ioctl, cs);
> return &cs->base;
> }
>
> @@ -357,9 +364,8 @@ static void radeon_drm_cs_write_reloc(struct radeon_winsys_cs *rcs,
> OUT_CS(&cs->base, index * RELOC_DWORDS);
> }
>
> -static PIPE_THREAD_ROUTINE(radeon_drm_cs_emit_ioctl, param)
> +static void radeon_drm_cs_emit_ioctl_oneshot(struct radeon_cs_context *csc)
> {
> - struct radeon_cs_context *csc = (struct radeon_cs_context*)param;
> unsigned i;
>
> if (drmCommandWriteRead(csc->fd, DRM_RADEON_CS,
> @@ -381,20 +387,32 @@ static PIPE_THREAD_ROUTINE(radeon_drm_cs_emit_ioctl, param)
> p_atomic_dec(&csc->relocs_bo[i]->num_active_ioctls);
>
> radeon_cs_context_cleanup(csc);
> +}
> +
> +static PIPE_THREAD_ROUTINE(radeon_drm_cs_emit_ioctl, param)
> +{
> + struct radeon_drm_cs *cs = (struct radeon_drm_cs*)param;
> +
> + while (1) {
> + pipe_semaphore_wait(&cs->flush_queued);
> + if (cs->kill_thread)
> + break;
> + radeon_drm_cs_emit_ioctl_oneshot(cs->cst);
> + pipe_semaphore_signal(&cs->flush_completed);
> + }
> + pipe_semaphore_signal(&cs->flush_completed);
> return NULL;
> }
>
> void radeon_drm_cs_sync_flush(struct radeon_drm_cs *cs)
> {
> /* Wait for any pending ioctl to complete. */
> - if (cs->thread) {
> - pipe_thread_wait(cs->thread);
> - cs->thread = 0;
> + if (cs->thread && cs->flush_started) {
> + pipe_semaphore_wait(&cs->flush_completed);
> + cs->flush_started = 0;
> }
> }
>
> -DEBUG_GET_ONCE_BOOL_OPTION(thread, "RADEON_THREAD", TRUE)
> -
> static void radeon_drm_cs_flush(struct radeon_winsys_cs *rcs, unsigned flags)
> {
> struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
> @@ -402,33 +420,33 @@ static void radeon_drm_cs_flush(struct radeon_winsys_cs *rcs, unsigned flags)
>
> radeon_drm_cs_sync_flush(cs);
>
> + /* Flip command streams. */
> + tmp = cs->csc;
> + cs->csc = cs->cst;
> + cs->cst = tmp;
> +
> /* If the CS is not empty, emit it in a newly-spawned thread. */
> if (cs->base.cdw) {
> - unsigned i, crelocs = cs->csc->crelocs;
> + unsigned i, crelocs = cs->cst->crelocs;
>
> - cs->csc->chunks[0].length_dw = cs->base.cdw;
> + cs->cst->chunks[0].length_dw = cs->base.cdw;
>
> for (i = 0; i < crelocs; i++) {
> /* Update the number of active asynchronous CS ioctls for the buffer. */
> - p_atomic_inc(&cs->csc->relocs_bo[i]->num_active_ioctls);
> + p_atomic_inc(&cs->cst->relocs_bo[i]->num_active_ioctls);
> }
>
> - if (cs->ws->num_cpus > 1 && debug_get_option_thread() &&
> + if (cs->thread &&
> (flags & RADEON_FLUSH_ASYNC)) {
> - cs->thread = pipe_thread_create(radeon_drm_cs_emit_ioctl, cs->csc);
> - assert(cs->thread);
> + cs->flush_started = 1;
> + pipe_semaphore_signal(&cs->flush_queued);
> } else {
> - radeon_drm_cs_emit_ioctl(cs->csc);
> + radeon_drm_cs_emit_ioctl_oneshot(cs->cst);
> }
> } else {
> - radeon_cs_context_cleanup(cs->csc);
> + radeon_cs_context_cleanup(cs->cst);
> }
>
> - /* Flip command streams. */
> - tmp = cs->csc;
> - cs->csc = cs->cst;
> - cs->cst = tmp;
> -
> /* Prepare a new CS. */
> cs->base.buf = cs->csc->buf;
> cs->base.cdw = 0;
> @@ -438,6 +456,15 @@ static void radeon_drm_cs_destroy(struct radeon_winsys_cs *rcs)
> {
> struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
> radeon_drm_cs_sync_flush(cs);
> + if (cs->thread) {
> + cs->kill_thread = 1;
> + pipe_semaphore_signal(&cs->flush_queued);
> + pipe_semaphore_wait(&cs->flush_completed);
> + pipe_thread_wait(cs->thread);
> + pipe_thread_destroy(cs->thread);
> + }
> + pipe_semaphore_destroy(&cs->flush_queued);
> + pipe_semaphore_destroy(&cs->flush_completed);
> radeon_cs_context_cleanup(&cs->csc1);
> radeon_cs_context_cleanup(&cs->csc2);
> p_atomic_dec(&cs->ws->num_cs);
> diff --git a/src/gallium/winsys/radeon/drm/radeon_drm_cs.h b/src/gallium/winsys/radeon/drm/radeon_drm_cs.h
> index fe28532..e8e34c2 100644
> --- a/src/gallium/winsys/radeon/drm/radeon_drm_cs.h
> +++ b/src/gallium/winsys/radeon/drm/radeon_drm_cs.h
> @@ -75,6 +75,8 @@ struct radeon_drm_cs {
> void *flush_data;
>
> pipe_thread thread;
> + int flush_started, kill_thread;
> + pipe_semaphore flush_queued, flush_completed;
> };
>
> int radeon_get_reloc(struct radeon_cs_context *csc, struct radeon_bo *bo);
>
>
> _______________________________________________
> mesa-dev mailing list
> mesa-dev at lists.freedesktop.org
> http://lists.freedesktop.org/mailman/listinfo/mesa-dev
More information about the mesa-dev
mailing list