[Mesa-dev] [PATCHv3 05/16] util: add a generic thread pool data structure

Chia-I Wu olvaffe at gmail.com
Tue Aug 19 23:40:26 PDT 2014


It can be used to implement, for example, threaded glCompileShader and
glLinkProgram.  Two basic tests are included to verify the basic functions,
and to give us some confidence about its thread-safety.

v2: allow tasks to "complete" other tasks

Signed-off-by: Chia-I Wu <olv at lunarg.com>
Reviewed-by: Brian Paul <brianp at vmware.com>

v3: move to src/util/ and mention the tests
---
 configure.ac                                  |   3 +-
 src/util/Makefile.am                          |   5 +-
 src/util/Makefile.sources                     |   3 +-
 src/util/tests/threadpool/Makefile.am         |  36 ++
 src/util/tests/threadpool/threadpool_test.cpp | 137 ++++++++
 src/util/threadpool.c                         | 476 ++++++++++++++++++++++++++
 src/util/threadpool.h                         |  67 ++++
 7 files changed, 724 insertions(+), 3 deletions(-)
 create mode 100644 src/util/tests/threadpool/Makefile.am
 create mode 100644 src/util/tests/threadpool/threadpool_test.cpp
 create mode 100644 src/util/threadpool.c
 create mode 100644 src/util/threadpool.h

diff --git a/configure.ac b/configure.ac
index 57e9f7d..2f7268f 100644
--- a/configure.ac
+++ b/configure.ac
@@ -2261,7 +2261,8 @@ AC_CONFIG_FILES([Makefile
 		src/mesa/drivers/x11/Makefile
 		src/mesa/main/tests/Makefile
 		src/util/Makefile
-		src/util/tests/hash_table/Makefile])
+		src/util/tests/hash_table/Makefile
+		src/util/tests/threadpool/Makefile])
 
 dnl Sort the dirs alphabetically
 GALLIUM_TARGET_DIRS=`echo $GALLIUM_TARGET_DIRS|tr " " "\n"|sort -u|tr "\n" " "`
diff --git a/src/util/Makefile.am b/src/util/Makefile.am
index 4733a1a..da6815e 100644
--- a/src/util/Makefile.am
+++ b/src/util/Makefile.am
@@ -19,7 +19,7 @@
 # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 # IN THE SOFTWARE.
 
-SUBDIRS = . tests/hash_table
+SUBDIRS = . tests/hash_table tests/threadpool
 
 include Makefile.sources
 
@@ -34,6 +34,9 @@ libmesautil_la_SOURCES = \
 	$(MESA_UTIL_FILES) \
 	$(MESA_UTIL_GENERATED_FILES)
 
+libmesautil_la_LIBADD = \
+	$(PTHREAD_LIBS)
+
 BUILT_SOURCES = $(MESA_UTIL_GENERATED_FILES)
 CLEANFILES = $(BUILT_SOURCES)
 
diff --git a/src/util/Makefile.sources b/src/util/Makefile.sources
index 86466dc..65f98f7 100644
--- a/src/util/Makefile.sources
+++ b/src/util/Makefile.sources
@@ -1,7 +1,8 @@
 MESA_UTIL_FILES :=	\
 	hash_table.c	\
 	ralloc.c \
-	strtod.cpp
+	strtod.cpp \
+	threadpool.c
 
 MESA_UTIL_GENERATED_FILES = \
 	format_srgb.c
diff --git a/src/util/tests/threadpool/Makefile.am b/src/util/tests/threadpool/Makefile.am
new file mode 100644
index 0000000..2aa010c
--- /dev/null
+++ b/src/util/tests/threadpool/Makefile.am
@@ -0,0 +1,36 @@
+# Copyright © 2009 Intel Corporation
+#
+#  Permission is hereby granted, free of charge, to any person obtaining a
+#  copy of this software and associated documentation files (the "Software"),
+#  to deal in the Software without restriction, including without limitation
+#  on the rights to use, copy, modify, merge, publish, distribute, sub
+#  license, and/or sell copies of the Software, and to permit persons to whom
+#  the Software is furnished to do so, subject to the following conditions:
+#
+#  The above copyright notice and this permission notice (including the next
+#  paragraph) shall be included in all copies or substantial portions of the
+#  Software.
+#
+#  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+#  IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+#  FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.  IN NO EVENT SHALL
+#  ADAM JACKSON BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+#  IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+#  CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+AM_CPPFLAGS = \
+	-I$(top_srcdir)/include \
+	-I$(top_srcdir)/src/gtest/include \
+	-I$(top_srcdir)/src/util \
+	$(DEFINES)
+
+TESTS = threadpool-test
+
+check_PROGRAMS = threadpool-test
+
+threadpool_test_SOURCES = threadpool_test.cpp
+threadpool_test_CFLAGS = $(PTHREAD_CFLAGS)
+threadpool_test_LDADD =				\
+	$(top_builddir)/src/util/libmesautil.la \
+	$(top_builddir)/src/gtest/libgtest.la	\
+	$(PTHREAD_LIBS)
diff --git a/src/util/tests/threadpool/threadpool_test.cpp b/src/util/tests/threadpool/threadpool_test.cpp
new file mode 100644
index 0000000..63f55c5
--- /dev/null
+++ b/src/util/tests/threadpool/threadpool_test.cpp
@@ -0,0 +1,137 @@
+/*
+ * Copyright © 2014 LunarG, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#include <gtest/gtest.h>
+#include <string.h>
+#include <stdbool.h>
+#include <stdlib.h>
+#include <time.h>
+#include <unistd.h>
+#include "c11/threads.h"
+
+#include "threadpool.h"
+
+#define NUM_THREADS 10
+#define OPS_PER_THREAD 100
+#define MAX_TASKS 10
+
+static void
+race_cb(void *data)
+{
+   usleep(1000 * 5);
+}
+
+static int
+race_random_op(void *data)
+{
+   struct _mesa_threadpool *pool = (struct _mesa_threadpool *) data;
+   struct _mesa_threadpool_task *tasks[MAX_TASKS];
+   int num_tasks = 0;
+   int num_ops = 0;
+   int i;
+
+   while (num_ops < OPS_PER_THREAD) {
+      int op = (random() % 1000);
+
+      if (op < 480) { /* 48% */
+         if (num_tasks < MAX_TASKS) {
+            tasks[num_tasks++] =
+               _mesa_threadpool_queue_task(pool, race_cb, NULL);
+         }
+      }
+      else if (op < 980) { /* 50% */
+         if (num_tasks)
+            _mesa_threadpool_complete_task(pool, tasks[--num_tasks]);
+      }
+      else if (op < 995) { /* 1.5% */
+         for (i = 0; i < num_tasks; i++)
+            _mesa_threadpool_complete_task(pool, tasks[i]);
+         num_tasks = 0;
+      }
+      else { /* 0.5% */
+         _mesa_threadpool_join(pool, (op < 998));
+      }
+
+      num_ops++;
+   }
+
+   for (i = 0; i < num_tasks; i++)
+      _mesa_threadpool_complete_task(pool, tasks[i]);
+
+   _mesa_threadpool_unref(pool);
+
+   return 0;
+}
+
+/**
+ * \name Thread safety
+ */
+/*@{*/
+TEST(threadpool_test, race)
+{
+   struct _mesa_threadpool *pool;
+   thrd_t threads[NUM_THREADS];
+   int i;
+
+   srandom(time(NULL));
+   pool = _mesa_threadpool_create(4);
+   for (i = 0; i < NUM_THREADS; i++) {
+      thrd_create(&threads[i], race_random_op,
+            (void *) _mesa_threadpool_ref(pool));
+   }
+   _mesa_threadpool_unref(pool);
+
+   for (i = 0; i < NUM_THREADS; i++)
+      thrd_join(threads[i], NULL);
+
+   /* this is not really a unit test */
+   EXPECT_TRUE(true);
+}
+
+static void
+basic_cb(void *data)
+{
+   int *val = (int *) data;
+
+   usleep(1000 * 5);
+   *val = 1;
+}
+
+TEST(threadpool_test, basic)
+{
+   struct _mesa_threadpool *pool;
+   struct _mesa_threadpool_task *tasks[2];
+   int vals[2];
+
+   pool = _mesa_threadpool_create(2);
+
+   vals[0] = vals[1] = 0;
+   tasks[0] = _mesa_threadpool_queue_task(pool, basic_cb, (void *) &vals[0]);
+   tasks[1] = _mesa_threadpool_queue_task(pool, basic_cb, (void *) &vals[1]);
+   _mesa_threadpool_complete_task(pool, tasks[0]);
+   _mesa_threadpool_complete_task(pool, tasks[1]);
+   EXPECT_TRUE(vals[0] == 1 && vals[1] == 1);
+
+   _mesa_threadpool_unref(pool);
+}
+
+/*@}*/
diff --git a/src/util/threadpool.c b/src/util/threadpool.c
new file mode 100644
index 0000000..224f411
--- /dev/null
+++ b/src/util/threadpool.c
@@ -0,0 +1,476 @@
+/*
+ * Mesa 3-D graphics library
+ *
+ * Copyright (C) 2014  LunarG, Inc.   All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <stdio.h>
+#include <stdbool.h>
+#include "c11/threads.h"
+#include "macros.h"
+#include "simple_list.h"
+#include "threadpool.h"
+
+enum _mesa_threadpool_control {
+   MESA_THREADPOOL_NORMAL,    /* threads wait when there is no task */
+   MESA_THREADPOOL_QUIT,      /* threads quit when there is no task */
+   MESA_THREADPOOL_QUIT_NOW   /* threads quit as soon as possible */
+};
+
+enum _mesa_threadpool_task_state {
+   MESA_THREADPOOL_TASK_PENDING,    /* task is on the pending list */
+   MESA_THREADPOOL_TASK_ACTIVE,     /* task is being worked on */
+   MESA_THREADPOOL_TASK_COMPLETED,  /* task has been completed */
+   MESA_THREADPOOL_TASK_CANCELLED   /* task is cancelled */
+};
+
+struct _mesa_threadpool_task {
+   /* these are protected by the pool's mutex */
+   struct simple_node link; /* must be the first */
+   enum _mesa_threadpool_task_state state;
+   cnd_t completed;
+
+   void (*func)(void *);
+   void *data;
+};
+
+struct _mesa_threadpool {
+   mtx_t mutex;
+   int refcnt;
+
+   enum _mesa_threadpool_control thread_control;
+   thrd_t *threads;
+   int num_threads, max_threads;
+   int idle_threads; /* number of threads that are idle */
+   cnd_t thread_wakeup;
+   cnd_t thread_joined;
+
+   struct simple_node pending_tasks;
+   int num_pending_tasks;
+   int num_tasks;
+};
+
+static struct _mesa_threadpool_task *
+task_create(void)
+{
+   struct _mesa_threadpool_task *task;
+
+   task = malloc(sizeof(*task));
+   if (!task)
+      return NULL;
+
+   if (cnd_init(&task->completed)) {
+      free(task);
+      return NULL;
+   }
+
+   task->state = MESA_THREADPOOL_TASK_PENDING;
+
+   return task;
+}
+
+static void
+task_destroy(struct _mesa_threadpool_task *task)
+{
+   cnd_destroy(&task->completed);
+   free(task);
+}
+
+static void
+pool_exec_task(struct _mesa_threadpool *pool,
+               struct _mesa_threadpool_task *task)
+{
+   assert(task->state == MESA_THREADPOOL_TASK_PENDING);
+
+   remove_from_list(&task->link);
+   pool->num_pending_tasks--;
+
+   task->state = MESA_THREADPOOL_TASK_ACTIVE;
+
+   /* do the work! */
+   mtx_unlock(&pool->mutex);
+   task->func(task->data);
+   mtx_lock(&pool->mutex);
+
+   task->state = MESA_THREADPOOL_TASK_COMPLETED;
+}
+
+static int
+_mesa_threadpool_worker(void *arg)
+{
+   struct _mesa_threadpool *pool = (struct _mesa_threadpool *) arg;
+
+   mtx_lock(&pool->mutex);
+
+   while (true) {
+      struct _mesa_threadpool_task *task;
+
+      /* wait until there are tasks */
+      while (is_empty_list(&pool->pending_tasks) &&
+             pool->thread_control == MESA_THREADPOOL_NORMAL) {
+         pool->idle_threads++;
+         cnd_wait(&pool->thread_wakeup, &pool->mutex);
+         pool->idle_threads--;
+      }
+
+      if (pool->thread_control != MESA_THREADPOOL_NORMAL) {
+         if (pool->thread_control == MESA_THREADPOOL_QUIT_NOW ||
+             is_empty_list(&pool->pending_tasks))
+            break;
+      }
+
+      assert(!is_empty_list(&pool->pending_tasks));
+      task = (struct _mesa_threadpool_task *)
+         first_elem(&pool->pending_tasks);
+
+      pool_exec_task(pool, task);
+      cnd_signal(&task->completed);
+   }
+
+   mtx_unlock(&pool->mutex);
+
+   return 0;
+}
+
+/**
+ * Queue a new task.
+ */
+struct _mesa_threadpool_task *
+_mesa_threadpool_queue_task(struct _mesa_threadpool *pool,
+                            void (*func)(void *), void *data)
+{
+   struct _mesa_threadpool_task *task;
+
+   task = task_create();
+   if (!task)
+      return NULL;
+
+   task->func = func;
+   task->data = data;
+
+   mtx_lock(&pool->mutex);
+
+   /* someone is joining with the threads */
+   while (unlikely(pool->thread_control != MESA_THREADPOOL_NORMAL))
+      cnd_wait(&pool->thread_joined, &pool->mutex);
+
+   /* spawn threads as needed */
+   if (pool->idle_threads <= pool->num_pending_tasks &&
+       pool->num_threads < pool->max_threads) {
+      int err;
+
+      err = thrd_create(&pool->threads[pool->num_threads],
+                        _mesa_threadpool_worker, (void *) pool);
+      if (!err)
+         pool->num_threads++;
+
+      if (!pool->num_threads) {
+         mtx_unlock(&pool->mutex);
+         task_destroy(task);
+         return NULL;
+      }
+   }
+
+   insert_at_tail(&pool->pending_tasks, &task->link);
+   pool->num_tasks++;
+   pool->num_pending_tasks++;
+   cnd_signal(&pool->thread_wakeup);
+
+   mtx_unlock(&pool->mutex);
+
+   return task;
+}
+
+/**
+ * Wait and destroy the tasks.
+ */
+static bool
+pool_wait_tasks(struct _mesa_threadpool *pool,
+                struct _mesa_threadpool_task **tasks,
+                int num_tasks)
+{
+   bool all_completed = true;
+   int i;
+
+   for (i = 0; i < num_tasks; i++) {
+      struct _mesa_threadpool_task *task = tasks[i];
+
+      while (task->state != MESA_THREADPOOL_TASK_COMPLETED &&
+             task->state != MESA_THREADPOOL_TASK_CANCELLED)
+         cnd_wait(&task->completed, &pool->mutex);
+
+      if (task->state != MESA_THREADPOOL_TASK_COMPLETED)
+         all_completed = false;
+
+      task_destroy(task);
+   }
+
+   pool->num_tasks -= num_tasks;
+
+   return all_completed;
+}
+
+/**
+ * Wait for \p tasks to complete, and destroy them.  If some of \p tasks
+ * cannot not be completed, return false.
+ *
+ * This function can be called from within the worker threads.
+ */
+bool
+_mesa_threadpool_complete_tasks(struct _mesa_threadpool *pool,
+                                struct _mesa_threadpool_task **tasks,
+                                int num_tasks)
+{
+   bool prioritized = false, completed;
+   int i;
+
+   mtx_lock(&pool->mutex);
+
+   /* we need to do something about tasks that are pending */
+   for (i = 0; i < num_tasks; i++) {
+      struct _mesa_threadpool_task *task = tasks[i];
+
+      if (task->state != MESA_THREADPOOL_TASK_PENDING)
+         continue;
+
+      /* move them to the head so that they are executed next */
+      if (!prioritized) {
+         int j;
+
+         for (j = i + 1; j < num_tasks; j++) {
+            if (task->state == MESA_THREADPOOL_TASK_PENDING)
+               move_to_head(&pool->pending_tasks, &task->link);
+         }
+         prioritized = true;
+      }
+
+      /*
+       * Execute right away for we would have to wait for the worker threads
+       * otherwise, which is no faster.  More importantly, when this is called
+       * from within worker threads, there may be no idle thread available to
+       * execute them.
+       */
+      pool_exec_task(pool, task);
+   }
+
+   completed = pool_wait_tasks(pool, tasks, num_tasks);
+
+   mtx_unlock(&pool->mutex);
+
+   return completed;
+}
+
+/**
+ * This is equivalent to calling \p _mesa_threadpool_complete_tasks with one
+ * task.
+ */
+bool
+_mesa_threadpool_complete_task(struct _mesa_threadpool *pool,
+                               struct _mesa_threadpool_task *task)
+{
+   bool completed;
+
+   mtx_lock(&pool->mutex);
+
+   if (task->state == MESA_THREADPOOL_TASK_PENDING)
+      pool_exec_task(pool, task);
+
+   completed = pool_wait_tasks(pool, &task, 1);
+
+   mtx_unlock(&pool->mutex);
+
+   return completed;
+}
+
+static void
+pool_cancel_pending_tasks(struct _mesa_threadpool *pool)
+{
+   struct simple_node *node, *temp;
+
+   if (is_empty_list(&pool->pending_tasks))
+      return;
+
+   foreach_s(node, temp, &pool->pending_tasks) {
+      struct _mesa_threadpool_task *task =
+         (struct _mesa_threadpool_task *) node;
+
+      remove_from_list(&task->link);
+      task->state = MESA_THREADPOOL_TASK_CANCELLED;
+
+      /* in case some thread is already waiting */
+      cnd_signal(&task->completed);
+   }
+
+   pool->num_pending_tasks = 0;
+}
+
+static void
+pool_join_threads(struct _mesa_threadpool *pool, bool graceful)
+{
+   int joined_threads = 0;
+
+   if (!pool->num_threads)
+      return;
+
+   pool->thread_control = (graceful) ?
+      MESA_THREADPOOL_QUIT : MESA_THREADPOOL_QUIT_NOW;
+
+   while (joined_threads < pool->num_threads) {
+      int i = joined_threads, num_threads = pool->num_threads;
+
+      cnd_broadcast(&pool->thread_wakeup);
+      mtx_unlock(&pool->mutex);
+      while (i < num_threads)
+         thrd_join(pool->threads[i++], NULL);
+      mtx_lock(&pool->mutex);
+
+      joined_threads = num_threads;
+   }
+
+   pool->thread_control = MESA_THREADPOOL_NORMAL;
+   pool->num_threads = 0;
+   assert(!pool->idle_threads);
+}
+
+/**
+ * Join with all pool threads.  When \p graceful is true, wait for the pending
+ * tasks to be completed.
+ */
+void
+_mesa_threadpool_join(struct _mesa_threadpool *pool, bool graceful)
+{
+   mtx_lock(&pool->mutex);
+
+   /* someone is already joining with the threads */
+   while (unlikely(pool->thread_control != MESA_THREADPOOL_NORMAL))
+      cnd_wait(&pool->thread_joined, &pool->mutex);
+
+   if (pool->num_threads) {
+      pool_join_threads(pool, graceful);
+      /* wake up whoever is waiting */
+      cnd_broadcast(&pool->thread_joined);
+   }
+
+   if (!graceful)
+      pool_cancel_pending_tasks(pool);
+
+   assert(pool->num_threads == 0);
+   assert(is_empty_list(&pool->pending_tasks) && !pool->num_pending_tasks);
+
+   mtx_unlock(&pool->mutex);
+}
+
+/**
+ * Decrease the reference count.  Destroy \p pool when the reference count
+ * reaches zero.
+ */
+void
+_mesa_threadpool_unref(struct _mesa_threadpool *pool)
+{
+   bool destroy = false;
+
+   mtx_lock(&pool->mutex);
+   pool->refcnt--;
+   destroy = (pool->refcnt == 0);
+   mtx_unlock(&pool->mutex);
+
+   if (destroy) {
+      _mesa_threadpool_join(pool, false);
+
+      if (pool->num_tasks) {
+         fprintf(stderr, "thread pool destroyed with %d tasks\n",
+               pool->num_tasks);
+      }
+
+      free(pool->threads);
+      cnd_destroy(&pool->thread_joined);
+      cnd_destroy(&pool->thread_wakeup);
+      mtx_destroy(&pool->mutex);
+      free(pool);
+   }
+}
+
+/**
+ * Increase the reference count.
+ */
+struct _mesa_threadpool *
+_mesa_threadpool_ref(struct _mesa_threadpool *pool)
+{
+   mtx_lock(&pool->mutex);
+   pool->refcnt++;
+   mtx_unlock(&pool->mutex);
+
+   return pool;
+}
+
+/**
+ * Create a thread pool.  As threads are spawned as needed, this is
+ * inexpensive.
+ */
+struct _mesa_threadpool *
+_mesa_threadpool_create(int max_threads)
+{
+   struct _mesa_threadpool *pool;
+
+   if (max_threads < 1)
+      return NULL;
+
+   pool = calloc(1, sizeof(*pool));
+   if (!pool)
+      return NULL;
+
+   if (mtx_init(&pool->mutex, mtx_plain)) {
+      free(pool);
+      return NULL;
+   }
+
+   pool->refcnt = 1;
+
+   if (cnd_init(&pool->thread_wakeup)) {
+      mtx_destroy(&pool->mutex);
+      free(pool);
+      return NULL;
+   }
+
+   if (cnd_init(&pool->thread_joined)) {
+      cnd_destroy(&pool->thread_wakeup);
+      mtx_destroy(&pool->mutex);
+      free(pool);
+      return NULL;
+   }
+
+   pool->thread_control = MESA_THREADPOOL_NORMAL;
+
+   pool->threads = malloc(sizeof(pool->threads[0]) * max_threads);
+   if (!pool->threads) {
+      cnd_destroy(&pool->thread_joined);
+      cnd_destroy(&pool->thread_wakeup);
+      mtx_destroy(&pool->mutex);
+      free(pool);
+      return NULL;
+   }
+
+   pool->max_threads = max_threads;
+
+   make_empty_list(&pool->pending_tasks);
+
+   return pool;
+}
diff --git a/src/util/threadpool.h b/src/util/threadpool.h
new file mode 100644
index 0000000..48e4a47
--- /dev/null
+++ b/src/util/threadpool.h
@@ -0,0 +1,67 @@
+/*
+ * Mesa 3-D graphics library
+ *
+ * Copyright (C) 2014  LunarG, Inc.   All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+
+#ifndef THREADPOOL_H
+#define THREADPOOL_H
+
+#include <stdbool.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct _mesa_threadpool;
+struct _mesa_threadpool_task;
+
+struct _mesa_threadpool *
+_mesa_threadpool_create(int max_threads);
+
+struct _mesa_threadpool *
+_mesa_threadpool_ref(struct _mesa_threadpool *pool);
+
+void
+_mesa_threadpool_unref(struct _mesa_threadpool *pool);
+
+void
+_mesa_threadpool_join(struct _mesa_threadpool *pool, bool graceful);
+
+struct _mesa_threadpool_task *
+_mesa_threadpool_queue_task(struct _mesa_threadpool *pool,
+                            void (*func)(void *), void *data);
+
+bool
+_mesa_threadpool_complete_tasks(struct _mesa_threadpool *pool,
+                                struct _mesa_threadpool_task **tasks,
+                                int num_tasks);
+
+bool
+_mesa_threadpool_complete_task(struct _mesa_threadpool *pool,
+                               struct _mesa_threadpool_task *task);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* THREADPOOL_H */
-- 
2.0.0.rc2



More information about the mesa-dev mailing list