[Mesa-dev] [PATCH 4/4] radv: initial support for shared semaphores
Dave Airlie
airlied at gmail.com
Thu Jul 20 03:38:14 UTC 2017
From: Dave Airlie <airlied at redhat.com>
This adds support for permanent semaphore import/export only.
It ports all semaphores to using syncobjs when the kernel supports
them, and exposes the extensions to the user.
Signed-off-by: Dave Airlie <airlied at redhat.com>
---
src/amd/vulkan/radv_device.c | 174 ++++++++++++++++++++++++--
src/amd/vulkan/radv_entrypoints_gen.py | 3 +
src/amd/vulkan/radv_private.h | 10 +-
src/amd/vulkan/radv_radeon_winsys.h | 6 +-
src/amd/vulkan/radv_wsi.c | 24 +++-
src/amd/vulkan/winsys/amdgpu/radv_amdgpu_cs.c | 54 ++++++--
6 files changed, 245 insertions(+), 26 deletions(-)
diff --git a/src/amd/vulkan/radv_device.c b/src/amd/vulkan/radv_device.c
index a91b366..2dfac87 100644
--- a/src/amd/vulkan/radv_device.c
+++ b/src/amd/vulkan/radv_device.c
@@ -102,6 +102,10 @@ static const VkExtensionProperties instance_extensions[] = {
.extensionName = VK_KHR_EXTERNAL_MEMORY_CAPABILITIES_EXTENSION_NAME,
.specVersion = 1,
},
+ {
+ .extensionName = VK_KHR_EXTERNAL_SEMAPHORE_CAPABILITIES_EXTENSION_NAME,
+ .specVersion = 1,
+ },
};
static const VkExtensionProperties common_device_extensions[] = {
@@ -154,6 +158,16 @@ static const VkExtensionProperties common_device_extensions[] = {
.specVersion = 1,
},
};
+static const VkExtensionProperties ext_sema_device_extensions[] = {
+ {
+ .extensionName = VK_KHR_EXTERNAL_SEMAPHORE_EXTENSION_NAME,
+ .specVersion = 1,
+ },
+ {
+ .extensionName = VK_KHR_EXTERNAL_SEMAPHORE_FD_EXTENSION_NAME,
+ .specVersion = 1,
+ },
+};
static VkResult
radv_extensions_register(struct radv_instance *instance,
@@ -304,6 +318,15 @@ radv_physical_device_init(struct radv_physical_device *device,
if (result != VK_SUCCESS)
goto fail;
+ if (device->rad_info.has_syncobj) {
+ result = radv_extensions_register(instance,
+ &device->extensions,
+ ext_sema_device_extensions,
+ ARRAY_SIZE(ext_sema_device_extensions));
+ if (result != VK_SUCCESS)
+ goto fail;
+ }
+
fprintf(stderr, "WARNING: radv is not a conformant vulkan implementation, testing use only.\n");
device->name = get_chip_name(device->rad_info.family);
@@ -1865,6 +1888,25 @@ fail:
return VK_ERROR_OUT_OF_DEVICE_MEMORY;
}
+void *radv_alloc_sem_array(int num_sems, const VkSemaphore *sems,
+ bool has_syncobj)
+{
+ const uint32_t sem_size = has_syncobj ? sizeof(uint32_t) : sizeof(struct radeon_winsys_sem *);
+ void *sem_array = malloc(sem_size * num_sems);
+ if (!sem_array)
+ return NULL;
+
+ for (uint32_t j = 0; j < num_sems; j++) {
+ RADV_FROM_HANDLE(radv_semaphore, sem, sems[j]);
+ if (has_syncobj)
+ ((uint32_t *)sem_array)[j] = sem->syncobj;
+ else
+ ((struct radeon_winsys_sem **)sem_array)[j] = sem->sem;
+ }
+
+ return sem_array;
+}
+
VkResult radv_QueueSubmit(
VkQueue _queue,
uint32_t submitCount,
@@ -1885,6 +1927,7 @@ VkResult radv_QueueSubmit(
bool fence_emitted = false;
bool tess_rings_needed = false;
bool sample_positions_needed = false;
+ bool has_syncobj = queue->device->physical_device->rad_info.has_syncobj;
/* Do this first so failing to allocate scratch buffers can't result in
* partially executed submissions. */
@@ -1915,15 +1958,32 @@ VkResult radv_QueueSubmit(
bool do_flush = !i || pSubmits[i].pWaitDstStageMask;
bool can_patch = !do_flush;
uint32_t advance;
+ void *wait_sem_array = NULL, *signal_sem_array = NULL;
+
+ if (pSubmits[i].waitSemaphoreCount) {
+ wait_sem_array = radv_alloc_sem_array(pSubmits[i].waitSemaphoreCount,
+ pSubmits[i].pWaitSemaphores,
+ has_syncobj);
+ if (!wait_sem_array)
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
+ }
+
+ if (pSubmits[i].signalSemaphoreCount) {
+ signal_sem_array = radv_alloc_sem_array(pSubmits[i].signalSemaphoreCount,
+ pSubmits[i].pSignalSemaphores,
+ has_syncobj);
+ if (!signal_sem_array)
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
+ }
if (!pSubmits[i].commandBufferCount) {
if (pSubmits[i].waitSemaphoreCount || pSubmits[i].signalSemaphoreCount) {
ret = queue->device->ws->cs_submit(ctx, queue->queue_idx,
&queue->device->empty_cs[queue->queue_family_index],
1, NULL, NULL,
- (struct radeon_winsys_sem **)pSubmits[i].pWaitSemaphores,
+ wait_sem_array,
pSubmits[i].waitSemaphoreCount,
- (struct radeon_winsys_sem **)pSubmits[i].pSignalSemaphores,
+ signal_sem_array,
pSubmits[i].signalSemaphoreCount,
false, base_fence);
if (ret) {
@@ -1964,9 +2024,9 @@ VkResult radv_QueueSubmit(
ret = queue->device->ws->cs_submit(ctx, queue->queue_idx, cs_array + j,
advance, initial_preamble_cs, continue_preamble_cs,
- (struct radeon_winsys_sem **)pSubmits[i].pWaitSemaphores,
+ wait_sem_array,
b ? pSubmits[i].waitSemaphoreCount : 0,
- (struct radeon_winsys_sem **)pSubmits[i].pSignalSemaphores,
+ signal_sem_array,
e ? pSubmits[i].signalSemaphoreCount : 0,
can_patch, base_fence);
@@ -1988,6 +2048,8 @@ VkResult radv_QueueSubmit(
}
}
}
+ free(wait_sem_array);
+ free(signal_sem_array);
free(cs_array);
}
@@ -2422,9 +2484,12 @@ radv_sparse_image_opaque_bind_memory(struct radv_device *device,
RADV_FROM_HANDLE(radv_fence, fence, _fence);
RADV_FROM_HANDLE(radv_queue, queue, _queue);
struct radeon_winsys_fence *base_fence = fence ? fence->fence : NULL;
+ bool has_syncobj = queue->device->physical_device->rad_info.has_syncobj;
+
bool fence_emitted = false;
for (uint32_t i = 0; i < bindInfoCount; ++i) {
+ void *wait_sem_array = NULL, *signal_sem_array = NULL;
for (uint32_t j = 0; j < pBindInfo[i].bufferBindCount; ++j) {
radv_sparse_buffer_bind_memory(queue->device,
pBindInfo[i].pBufferBinds + j);
@@ -2435,19 +2500,37 @@ radv_sparse_image_opaque_bind_memory(struct radv_device *device,
pBindInfo[i].pImageOpaqueBinds + j);
}
+ if (pBindInfo[i].waitSemaphoreCount) {
+ wait_sem_array = radv_alloc_sem_array(pBindInfo[i].waitSemaphoreCount,
+ pBindInfo[i].pWaitSemaphores,
+ has_syncobj);
+ if (!wait_sem_array)
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
+ }
+ if (pBindInfo[i].signalSemaphoreCount) {
+ signal_sem_array = radv_alloc_sem_array(pBindInfo[i].signalSemaphoreCount,
+ pBindInfo[i].pSignalSemaphores,
+ has_syncobj);
+ if (!signal_sem_array)
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
+ }
+
if (pBindInfo[i].waitSemaphoreCount || pBindInfo[i].signalSemaphoreCount) {
queue->device->ws->cs_submit(queue->hw_ctx, queue->queue_idx,
&queue->device->empty_cs[queue->queue_family_index],
1, NULL, NULL,
- (struct radeon_winsys_sem **)pBindInfo[i].pWaitSemaphores,
+ wait_sem_array,
pBindInfo[i].waitSemaphoreCount,
- (struct radeon_winsys_sem **)pBindInfo[i].pSignalSemaphores,
+ signal_sem_array,
pBindInfo[i].signalSemaphoreCount,
false, base_fence);
fence_emitted = true;
if (fence)
fence->submitted = true;
+ free(wait_sem_array);
+ free(signal_sem_array);
}
+
}
if (fence && !fence_emitted) {
@@ -2584,13 +2667,35 @@ VkResult radv_CreateSemaphore(
VkSemaphore* pSemaphore)
{
RADV_FROM_HANDLE(radv_device, device, _device);
- struct radeon_winsys_sem *sem;
+ const VkExportSemaphoreCreateInfoKHR *export =
+ vk_find_struct_const(pCreateInfo->pNext, EXPORT_SEMAPHORE_CREATE_INFO_KHR);
+ VkExternalSemaphoreHandleTypeFlagsKHR handleTypes =
+ export ? export->handleTypes : 0;
- sem = device->ws->create_sem(device->ws);
+ struct radv_semaphore *sem = vk_alloc2(&device->alloc, pAllocator,
+ sizeof(*sem), 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (!sem)
return VK_ERROR_OUT_OF_HOST_MEMORY;
- *pSemaphore = radeon_winsys_sem_to_handle(sem);
+ int ret;
+ if (device->physical_device->rad_info.has_syncobj) {
+ int ret = device->ws->create_syncobj(device->ws, &sem->syncobj);
+ if (ret) {
+ vk_free2(&device->alloc, pAllocator, sem);
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
+ }
+ sem->sem = NULL;
+ } else {
+ sem->sem = device->ws->create_sem(device->ws);
+ if (!sem->sem) {
+ vk_free2(&device->alloc, pAllocator, sem);
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
+ }
+ sem->syncobj = 0;
+ }
+
+ *pSemaphore = radv_semaphore_to_handle(sem);
return VK_SUCCESS;
}
@@ -2600,11 +2705,15 @@ void radv_DestroySemaphore(
const VkAllocationCallbacks* pAllocator)
{
RADV_FROM_HANDLE(radv_device, device, _device);
- RADV_FROM_HANDLE(radeon_winsys_sem, sem, _semaphore);
+ RADV_FROM_HANDLE(radv_semaphore, sem, _semaphore);
if (!_semaphore)
return;
- device->ws->destroy_sem(sem);
+ if (sem->syncobj)
+ device->ws->destroy_syncobj(device->ws, sem->syncobj);
+ else
+ device->ws->destroy_sem(sem->sem);
+ vk_free2(&device->alloc, pAllocator, sem);
}
VkResult radv_CreateEvent(
@@ -3389,3 +3498,46 @@ VkResult radv_GetMemoryFdPropertiesKHR(VkDevice _device,
*/
return VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR;
}
+
+VkResult radv_ImportSemaphoreFdKHR(VkDevice _device,
+ const VkImportSemaphoreFdInfoKHR *pImportSemaphoreFdInfo)
+{
+ RADV_FROM_HANDLE(radv_device, device, _device);
+ RADV_FROM_HANDLE(radv_semaphore, sem, pImportSemaphoreFdInfo->semaphore);
+
+ assert(pImportSemaphoreFdInfo->handleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR);
+
+ int ret = device->ws->import_syncobj(device->ws, pImportSemaphoreFdInfo->fd, &sem->syncobj);
+ if (ret != 0)
+ return VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR;
+
+ close(pImportSemaphoreFdInfo->fd);
+ return VK_SUCCESS;
+}
+
+VkResult radv_GetSemaphoreFdKHR(VkDevice _device,
+ const VkSemaphoreGetFdInfoKHR *pGetFdInfo,
+ int *pFd)
+{
+ RADV_FROM_HANDLE(radv_device, device, _device);
+ RADV_FROM_HANDLE(radv_semaphore, sem, pGetFdInfo->semaphore);
+ int ret;
+
+ assert(pGetFdInfo->handleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR);
+ ret = device->ws->export_syncobj(device->ws, sem->syncobj, pFd);
+ if (ret)
+ return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR);
+ return VK_SUCCESS;
+}
+
+void radv_GetPhysicalDeviceExternalSemaphorePropertiesKHR(
+ VkPhysicalDevice physicalDevice,
+ const VkPhysicalDeviceExternalSemaphoreInfoKHR* pExternalSemaphoreInfo,
+ VkExternalSemaphorePropertiesKHR* pExternalSemaphoreProperties)
+{
+ pExternalSemaphoreProperties->exportFromImportedHandleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR;
+ pExternalSemaphoreProperties->compatibleHandleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR;
+ pExternalSemaphoreProperties->externalSemaphoreFeatures = VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT_KHR |
+ VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT_KHR;
+
+}
diff --git a/src/amd/vulkan/radv_entrypoints_gen.py b/src/amd/vulkan/radv_entrypoints_gen.py
index 22068a5..7a48c8e 100644
--- a/src/amd/vulkan/radv_entrypoints_gen.py
+++ b/src/amd/vulkan/radv_entrypoints_gen.py
@@ -47,6 +47,9 @@ supported_extensions = [
'VK_KHR_external_memory_capabilities',
'VK_KHR_external_memory',
'VK_KHR_external_memory_fd',
+ 'VK_KHR_external_semaphore_capabilities',
+ 'VK_KHR_external_semaphore',
+ 'VK_KHR_external_semaphore_fd'
]
# We generate a static hash table for entry point lookup
diff --git a/src/amd/vulkan/radv_private.h b/src/amd/vulkan/radv_private.h
index 891b34e..5a498a7 100644
--- a/src/amd/vulkan/radv_private.h
+++ b/src/amd/vulkan/radv_private.h
@@ -1470,6 +1470,14 @@ struct radv_query_pool {
uint32_t pipeline_stats_mask;
};
+struct radv_semaphore {
+ uint32_t syncobj;
+ struct radeon_winsys_sem *sem;
+};
+void *radv_alloc_sem_array(int num_sems, const VkSemaphore *sems,
+ bool has_syncobj);
+
+
void
radv_update_descriptor_sets(struct radv_device *device,
struct radv_cmd_buffer *cmd_buffer,
@@ -1563,6 +1571,6 @@ RADV_DEFINE_NONDISP_HANDLE_CASTS(radv_query_pool, VkQueryPool)
RADV_DEFINE_NONDISP_HANDLE_CASTS(radv_render_pass, VkRenderPass)
RADV_DEFINE_NONDISP_HANDLE_CASTS(radv_sampler, VkSampler)
RADV_DEFINE_NONDISP_HANDLE_CASTS(radv_shader_module, VkShaderModule)
-RADV_DEFINE_NONDISP_HANDLE_CASTS(radeon_winsys_sem, VkSemaphore)
+RADV_DEFINE_NONDISP_HANDLE_CASTS(radv_semaphore, VkSemaphore)
#endif /* RADV_PRIVATE_H */
diff --git a/src/amd/vulkan/radv_radeon_winsys.h b/src/amd/vulkan/radv_radeon_winsys.h
index 2f3990c..6e7e51b 100644
--- a/src/amd/vulkan/radv_radeon_winsys.h
+++ b/src/amd/vulkan/radv_radeon_winsys.h
@@ -131,9 +131,9 @@ struct radeon_bo_metadata {
uint32_t metadata[64];
};
+uint32_t syncobj_handle;
struct radeon_winsys_bo;
struct radeon_winsys_fence;
-struct radeon_winsys_sem;
struct radeon_winsys {
void (*destroy)(struct radeon_winsys *ws);
@@ -191,9 +191,9 @@ struct radeon_winsys {
unsigned cs_count,
struct radeon_winsys_cs *initial_preamble_cs,
struct radeon_winsys_cs *continue_preamble_cs,
- struct radeon_winsys_sem **wait_sem,
+ void *wait_sem,
unsigned wait_sem_count,
- struct radeon_winsys_sem **signal_sem,
+ void *signal_sem,
unsigned signal_sem_count,
bool can_patch,
struct radeon_winsys_fence *fence);
diff --git a/src/amd/vulkan/radv_wsi.c b/src/amd/vulkan/radv_wsi.c
index 38338d2..f813624 100644
--- a/src/amd/vulkan/radv_wsi.c
+++ b/src/amd/vulkan/radv_wsi.c
@@ -435,7 +435,7 @@ VkResult radv_AcquireNextImageKHR(
RADV_FROM_HANDLE(wsi_swapchain, swapchain, _swapchain);
RADV_FROM_HANDLE(radv_fence, fence, _fence);
RADV_FROM_HANDLE(radv_device, device, _device);
- RADV_FROM_HANDLE(radeon_winsys_sem, semaphore, _semaphore);
+ RADV_FROM_HANDLE(radv_semaphore, semaphore, _semaphore);
VkResult result = swapchain->acquire_next_image(swapchain, timeout, _semaphore,
pImageIndex);
@@ -449,12 +449,18 @@ VkResult radv_AcquireNextImageKHR(
struct radv_queue *queue = device->queues[RADV_QUEUE_GENERAL];
struct radeon_winsys_cs *cs = queue->device->empty_cs[queue->queue_family_index];
struct radeon_winsys_ctx *ctx = queue->hw_ctx;
+ void *signal_ptr;
+
+ if (semaphore->syncobj)
+ signal_ptr = &semaphore->syncobj;
+ else
+ signal_ptr = &semaphore->sem;
queue->device->ws->cs_submit(ctx, queue->queue_idx,
&cs,
1, NULL, NULL,
NULL, 0,
- &semaphore, 1, false, NULL);
+ signal_ptr, 1, false, NULL);
}
return result;
@@ -466,7 +472,7 @@ VkResult radv_QueuePresentKHR(
{
RADV_FROM_HANDLE(radv_queue, queue, _queue);
VkResult result = VK_SUCCESS;
-
+ bool has_syncobj = queue->device->physical_device->rad_info.has_syncobj;
const VkPresentRegionsKHR *regions =
vk_find_struct_const(pPresentInfo->pNext, PRESENT_REGIONS_KHR);
@@ -475,6 +481,15 @@ VkResult radv_QueuePresentKHR(
struct radeon_winsys_cs *cs;
const VkPresentRegionKHR *region = NULL;
VkResult item_result;
+ void *wait_sem_array = NULL;
+
+ if (pPresentInfo->waitSemaphoreCount) {
+ wait_sem_array = radv_alloc_sem_array(pPresentInfo->waitSemaphoreCount,
+ pPresentInfo->pWaitSemaphores,
+ has_syncobj);
+ if (!wait_sem_array)
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
+ }
assert(radv_device_from_handle(swapchain->device) == queue->device);
if (swapchain->fences[0] == VK_NULL_HANDLE) {
@@ -504,7 +519,7 @@ VkResult radv_QueuePresentKHR(
queue->device->ws->cs_submit(ctx, queue->queue_idx,
&cs,
1, NULL, NULL,
- (struct radeon_winsys_sem **)pPresentInfo->pWaitSemaphores,
+ wait_sem_array,
pPresentInfo->waitSemaphoreCount, NULL, 0, false, base_fence);
fence->submitted = true;
@@ -531,6 +546,7 @@ VkResult radv_QueuePresentKHR(
1, &last, true, 1);
}
+ free(wait_sem_array);
}
return VK_SUCCESS;
diff --git a/src/amd/vulkan/winsys/amdgpu/radv_amdgpu_cs.c b/src/amd/vulkan/winsys/amdgpu/radv_amdgpu_cs.c
index 6ed8f32..092d77a 100644
--- a/src/amd/vulkan/winsys/amdgpu/radv_amdgpu_cs.c
+++ b/src/amd/vulkan/winsys/amdgpu/radv_amdgpu_cs.c
@@ -77,9 +77,10 @@ radv_amdgpu_cs(struct radeon_winsys_cs *base)
struct radv_amdgpu_sem_info {
int wait_sem_count;
- struct radeon_winsys_sem **wait_sems;
+ void *wait_sems;
int signal_sem_count;
- struct radeon_winsys_sem **signal_sems;
+ void *signal_sems;
+ bool emit_signal_sem;
};
static int ring_to_hw_ip(enum ring_type ring)
@@ -719,6 +720,7 @@ static int radv_amdgpu_winsys_cs_submit_chained(struct radeon_winsys_ctx *_ctx,
ibs[0] = ((struct radv_amdgpu_cs*)initial_preamble_cs)->ib;
}
+ sem_info->emit_signal_sem = true;
r = radv_amdgpu_cs_submit(ctx, &request, sem_info);
if (r) {
if (r == -ENOMEM)
@@ -792,6 +794,7 @@ static int radv_amdgpu_winsys_cs_submit_fallback(struct radeon_winsys_ctx *_ctx,
}
}
+ sem_info->emit_signal_sem = (i == cs_count - cnt);
r = radv_amdgpu_cs_submit(ctx, &request, sem_info);
if (r) {
if (r == -ENOMEM)
@@ -898,6 +901,7 @@ static int radv_amdgpu_winsys_cs_submit_sysmem(struct radeon_winsys_ctx *_ctx,
request.ibs = &ib;
request.fence_info = radv_set_cs_fence(ctx, cs0->hw_ip, queue_idx);
+ sem_info->emit_signal_sem = (i == cs_count - cnt);
r = radv_amdgpu_cs_submit(ctx, &request, sem_info);
if (r) {
if (r == -ENOMEM)
@@ -929,9 +933,9 @@ static int radv_amdgpu_winsys_cs_submit(struct radeon_winsys_ctx *_ctx,
unsigned cs_count,
struct radeon_winsys_cs *initial_preamble_cs,
struct radeon_winsys_cs *continue_preamble_cs,
- struct radeon_winsys_sem **wait_sem,
+ void *wait_sem,
unsigned wait_sem_count,
- struct radeon_winsys_sem **signal_sem,
+ void *signal_sem,
unsigned signal_sem_count,
bool can_patch,
struct radeon_winsys_fence *_fence)
@@ -1074,8 +1078,10 @@ static int radv_amdgpu_signal_sems(struct radv_amdgpu_ctx *ctx,
uint32_t ring,
struct radv_amdgpu_sem_info *sem_info)
{
+ if (ctx->ws->info.has_syncobj)
+ return 0;
for (unsigned i = 0; i < sem_info->signal_sem_count; i++) {
- struct amdgpu_cs_fence *sem = (struct amdgpu_cs_fence *)sem_info->signal_sems[i];
+ struct amdgpu_cs_fence *sem = (struct amdgpu_cs_fence *)((struct radeon_winsys_sem **)sem_info->signal_sems)[i];
if (sem->context)
return -EINVAL;
@@ -1096,6 +1102,7 @@ static int radv_amdgpu_cs_submit(struct radv_amdgpu_ctx *ctx,
struct drm_amdgpu_cs_chunk *chunks;
struct drm_amdgpu_cs_chunk_data *chunk_data;
struct drm_amdgpu_cs_chunk_dep *sem_dependencies = NULL;
+ struct drm_amdgpu_cs_chunk_sem *wait_syncobj = NULL, *signal_syncobj = NULL;
int i;
struct amdgpu_cs_fence *sem;
user_fence = (request->fence_info.handle != NULL);
@@ -1136,7 +1143,22 @@ static int radv_amdgpu_cs_submit(struct radv_amdgpu_ctx *ctx,
&chunk_data[i]);
}
- if (sem_info->wait_sem_count) {
+ if (ctx->ws->info.has_syncobj && sem_info->wait_sem_count) {
+ wait_syncobj = malloc(sizeof(struct drm_amdgpu_cs_chunk_sem) * sem_info->wait_sem_count);
+ if (!wait_syncobj) {
+ r = -ENOMEM;
+ goto error_out;
+ }
+ for (unsigned j = 0; j < sem_info->wait_sem_count; j++) {
+ struct drm_amdgpu_cs_chunk_sem *sem = &wait_syncobj[j];
+ sem->handle = ((uint32_t *)sem_info->wait_sems)[j];
+ }
+ i = num_chunks++;
+ chunks[i].chunk_id = AMDGPU_CHUNK_ID_SYNCOBJ_IN;
+ chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_sem) / 4 * sem_info->wait_sem_count;
+ chunks[i].chunk_data = (uint64_t)(uintptr_t)wait_syncobj;
+ }
+ else if (sem_info->wait_sem_count) {
sem_dependencies = malloc(sizeof(struct drm_amdgpu_cs_chunk_dep) * sem_info->wait_sem_count);
if (!sem_dependencies) {
r = -ENOMEM;
@@ -1144,7 +1166,7 @@ static int radv_amdgpu_cs_submit(struct radv_amdgpu_ctx *ctx,
}
int sem_count = 0;
for (unsigned j = 0; j < sem_info->wait_sem_count; j++) {
- sem = (struct amdgpu_cs_fence *)sem_info->wait_sems[j];
+ sem = (struct amdgpu_cs_fence *)((struct radeon_winsys_sem **)sem_info->wait_sems)[j];
if (!sem->context)
continue;
struct drm_amdgpu_cs_chunk_dep *dep = &sem_dependencies[sem_count++];
@@ -1161,6 +1183,22 @@ static int radv_amdgpu_cs_submit(struct radv_amdgpu_ctx *ctx,
sem_info->wait_sem_count = 0;
}
+ if (ctx->ws->info.has_syncobj && sem_info->signal_sem_count && sem_info->emit_signal_sem) {
+ signal_syncobj = malloc(sizeof(struct drm_amdgpu_cs_chunk_sem) * sem_info->signal_sem_count);
+ if (!signal_syncobj) {
+ r = -ENOMEM;
+ goto error_out;
+ }
+ for (unsigned j = 0; j < sem_info->signal_sem_count; j++) {
+ struct drm_amdgpu_cs_chunk_sem *sem = &signal_syncobj[j];
+ sem->handle = ((uint32_t *)sem_info->signal_sems)[j];
+ }
+ i = num_chunks++;
+ chunks[i].chunk_id = AMDGPU_CHUNK_ID_SYNCOBJ_OUT;
+ chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_sem) / 4 * sem_info->signal_sem_count;
+ chunks[i].chunk_data = (uint64_t)(uintptr_t)signal_syncobj;
+ }
+
r = amdgpu_cs_submit_raw(ctx->ws->dev,
ctx->ctx,
request->resources,
@@ -1169,6 +1207,8 @@ static int radv_amdgpu_cs_submit(struct radv_amdgpu_ctx *ctx,
&request->seq_no);
error_out:
free(sem_dependencies);
+ free(wait_syncobj);
+ free(signal_syncobj);
return r;
}
--
2.9.4
More information about the mesa-dev
mailing list