[Mesa-dev] [PATCH 3/6] u_queue: add util_queue_finish for waiting for previously added jobs
Nicolai Hähnle
nhaehnle at gmail.com
Sun Oct 22 18:45:37 UTC 2017
From: Nicolai Hähnle <nicolai.haehnle at amd.com>
Schedule one job for every thread, and wait on a barrier inside the job
execution function.
---
src/util/u_queue.c | 35 +++++++++++++++++++++++++++++++++++
src/util/u_queue.h | 2 ++
2 files changed, 37 insertions(+)
diff --git a/src/util/u_queue.c b/src/util/u_queue.c
index 8293ec661b0..7008391d210 100644
--- a/src/util/u_queue.c
+++ b/src/util/u_queue.c
@@ -18,21 +18,25 @@
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*/
#include "u_queue.h"
+
+#include <alloca.h>
+
#include "util/u_string.h"
+#include "util/u_thread.h"
static void util_queue_killall_and_wait(struct util_queue *queue);
/****************************************************************************
* Wait for all queues to assert idle when exit() is called.
*
* Otherwise, C++ static variable destructors can be called while threads
* are using the static variables.
*/
@@ -422,19 +426,50 @@ util_queue_drop_job(struct util_queue *queue, struct util_queue_fence *fence)
}
}
mtx_unlock(&queue->lock);
if (removed)
util_queue_fence_signal(fence);
else
util_queue_fence_wait(fence);
}
+static void
+util_queue_finish_execute(void *data, int num_thread)
+{
+ util_barrier *barrier = data;
+ util_barrier_wait(barrier);
+}
+
+/**
+ * Wait until all previously added jobs have completed.
+ */
+void
+util_queue_finish(struct util_queue *queue)
+{
+ util_barrier barrier;
+ struct util_queue_fence *fences = alloca(queue->num_threads * sizeof(*fences));
+
+ util_barrier_init(&barrier, queue->num_threads);
+
+ for (unsigned i = 0; i < queue->num_threads; ++i) {
+ util_queue_fence_init(&fences[i]);
+ util_queue_add_job(queue, &barrier, &fences[i], util_queue_finish_execute, NULL);
+ }
+
+ for (unsigned i = 0; i < queue->num_threads; ++i) {
+ util_queue_fence_wait(&fences[i]);
+ util_queue_fence_destroy(&fences[i]);
+ }
+
+ util_barrier_destroy(&barrier);
+}
+
int64_t
util_queue_get_thread_time_nano(struct util_queue *queue, unsigned thread_index)
{
/* Allow some flexibility by not raising an error. */
if (thread_index >= queue->num_threads)
return 0;
return u_thread_get_time_nano(queue->threads[thread_index]);
}
diff --git a/src/util/u_queue.h b/src/util/u_queue.h
index 3d9f19f4e6c..f92797ba5aa 100644
--- a/src/util/u_queue.h
+++ b/src/util/u_queue.h
@@ -203,20 +203,22 @@ void util_queue_destroy(struct util_queue *queue);
/* optional cleanup callback is called after fence is signaled: */
void util_queue_add_job(struct util_queue *queue,
void *job,
struct util_queue_fence *fence,
util_queue_execute_func execute,
util_queue_execute_func cleanup);
void util_queue_drop_job(struct util_queue *queue,
struct util_queue_fence *fence);
+void util_queue_finish(struct util_queue *queue);
+
int64_t util_queue_get_thread_time_nano(struct util_queue *queue,
unsigned thread_index);
/* util_queue needs to be cleared to zeroes for this to work */
static inline bool
util_queue_is_initialized(struct util_queue *queue)
{
return queue->threads != NULL;
}
--
2.11.0
More information about the mesa-dev
mailing list