[Mesa-dev] [PATCH 1/3] radv: use a winsys context per-queue, instead of per device v2

Andres Rodriguez andresx7 at gmail.com
Fri Jan 13 23:44:15 UTC 2017


Queues are independent execution streams. The vulkan spec provides no
ordering guarantees for different queues.

By using a single context for all queues, we are forcing all commands
into an unecessary FIFO ordering.

This change is a preparation step to allow our-of-ordering scheduling of
certain work tasks.

v2: Fix a rebase error with radv_QueueSubmit() and trace_bo
Signed-off-by: Andres Rodriguez <andresx7 at gmail.com>
---
 src/amd/vulkan/radv_device.c  | 39 ++++++++++++++++++++-------------------
 src/amd/vulkan/radv_private.h |  2 +-
 src/amd/vulkan/radv_wsi.c     |  2 +-
 3 files changed, 22 insertions(+), 21 deletions(-)

diff --git a/src/amd/vulkan/radv_device.c b/src/amd/vulkan/radv_device.c
index 64fbce8..99c56a4 100644
--- a/src/amd/vulkan/radv_device.c
+++ b/src/amd/vulkan/radv_device.c
@@ -662,7 +662,7 @@ void radv_GetPhysicalDeviceMemoryProperties(
 	};
 }
 
-static void
+static int
 radv_queue_init(struct radv_device *device, struct radv_queue *queue,
 		int queue_family_index, int idx)
 {
@@ -670,11 +670,19 @@ radv_queue_init(struct radv_device *device, struct radv_queue *queue,
 	queue->device = device;
 	queue->queue_family_index = queue_family_index;
 	queue->queue_idx = idx;
+
+	queue->hw_ctx = device->ws->ctx_create(device->ws);
+	if (!queue->hw_ctx)
+		return VK_ERROR_OUT_OF_HOST_MEMORY;
+
+	return VK_SUCCESS;
 }
 
 static void
 radv_queue_finish(struct radv_queue *queue)
 {
+	if (queue->hw_ctx)
+		queue->device->ws->ctx_destroy(queue->hw_ctx);
 }
 
 VkResult radv_CreateDevice(
@@ -730,23 +738,20 @@ VkResult radv_CreateDevice(
 			goto fail;
 		}
 
-		device->queue_count[qfi] = queue_create->queueCount;
+		memset(device->queues[qfi], 0, queue_create->queueCount * sizeof(struct radv_queue));
 
-		for (unsigned q = 0; q < queue_create->queueCount; q++)
-			radv_queue_init(device, &device->queues[qfi][q], qfi, q);
-	}
+		device->queue_count[qfi] = queue_create->queueCount;
 
-	device->hw_ctx = device->ws->ctx_create(device->ws);
-	if (!device->hw_ctx) {
-		result = VK_ERROR_OUT_OF_HOST_MEMORY;
-		goto fail;
+		for (unsigned q = 0; q < queue_create->queueCount; q++) {
+			result = radv_queue_init(device, &device->queues[qfi][q], qfi, q);
+			if (result != VK_SUCCESS)
+				goto fail;
+		}
 	}
 
 	result = radv_device_init_meta(device);
-	if (result != VK_SUCCESS) {
-		device->ws->ctx_destroy(device->hw_ctx);
+	if (result != VK_SUCCESS)
 		goto fail;
-	}
 
 	radv_device_init_msaa(device);
 
@@ -791,9 +796,6 @@ fail:
 			vk_free(&device->alloc, device->queues[i]);
 	}
 
-	if (device->hw_ctx)
-		device->ws->ctx_destroy(device->hw_ctx);
-
 	vk_free(&device->alloc, device);
 	return result;
 }
@@ -807,7 +809,6 @@ void radv_DestroyDevice(
 	if (device->trace_bo)
 		device->ws->buffer_destroy(device->trace_bo);
 
-	device->ws->ctx_destroy(device->hw_ctx);
 	for (unsigned i = 0; i < RADV_MAX_QUEUE_FAMILIES; i++) {
 		for (unsigned q = 0; q < device->queue_count[i]; q++)
 			radv_queue_finish(&device->queues[i][q]);
@@ -920,7 +921,7 @@ VkResult radv_QueueSubmit(
 	RADV_FROM_HANDLE(radv_queue, queue, _queue);
 	RADV_FROM_HANDLE(radv_fence, fence, _fence);
 	struct radeon_winsys_fence *base_fence = fence ? fence->fence : NULL;
-	struct radeon_winsys_ctx *ctx = queue->device->hw_ctx;
+	struct radeon_winsys_ctx *ctx = queue->hw_ctx;
 	int ret;
 	uint32_t max_cs_submission = queue->device->trace_bo ? 1 : UINT32_MAX;
 
@@ -968,7 +969,7 @@ VkResult radv_QueueSubmit(
 			}
 			if (queue->device->trace_bo) {
 				bool success = queue->device->ws->ctx_wait_idle(
-							queue->device->hw_ctx,
+							queue->hw_ctx,
 							radv_queue_family_to_ring(
 								queue->queue_family_index),
 							queue->queue_idx);
@@ -999,7 +1000,7 @@ VkResult radv_QueueWaitIdle(
 {
 	RADV_FROM_HANDLE(radv_queue, queue, _queue);
 
-	queue->device->ws->ctx_wait_idle(queue->device->hw_ctx,
+	queue->device->ws->ctx_wait_idle(queue->hw_ctx,
 	                                 radv_queue_family_to_ring(queue->queue_family_index),
 	                                 queue->queue_idx);
 	return VK_SUCCESS;
diff --git a/src/amd/vulkan/radv_private.h b/src/amd/vulkan/radv_private.h
index fc3cbca..ab4ede6 100644
--- a/src/amd/vulkan/radv_private.h
+++ b/src/amd/vulkan/radv_private.h
@@ -459,6 +459,7 @@ enum ring_type radv_queue_family_to_ring(int f);
 struct radv_queue {
 	VK_LOADER_DATA                              _loader_data;
 	struct radv_device *                         device;
+	struct radeon_winsys_ctx                    *hw_ctx;
 	int queue_family_index;
 	int queue_idx;
 };
@@ -470,7 +471,6 @@ struct radv_device {
 
 	struct radv_instance *                       instance;
 	struct radeon_winsys *ws;
-	struct radeon_winsys_ctx *hw_ctx;
 
 	struct radv_meta_state                       meta_state;
 
diff --git a/src/amd/vulkan/radv_wsi.c b/src/amd/vulkan/radv_wsi.c
index 952f2c3..002b3a8 100644
--- a/src/amd/vulkan/radv_wsi.c
+++ b/src/amd/vulkan/radv_wsi.c
@@ -364,7 +364,7 @@ VkResult radv_QueuePresentKHR(
 
 		RADV_FROM_HANDLE(radv_fence, fence, swapchain->fences[0]);
 		struct radeon_winsys_fence *base_fence = fence->fence;
-		struct radeon_winsys_ctx *ctx = queue->device->hw_ctx;
+		struct radeon_winsys_ctx *ctx = queue->hw_ctx;
 		queue->device->ws->cs_submit(ctx, queue->queue_idx,
 					     &queue->device->empty_cs[queue->queue_family_index],
 					     1,
-- 
2.9.3



More information about the mesa-dev mailing list