2
* Copyright 2019 Google LLC
3
* SPDX-License-Identifier: MIT
5
* based in part on anv and radv which are:
6
* Copyright © 2015 Intel Corporation
7
* Copyright © 2016 Red Hat.
8
* Copyright © 2016 Bas Nieuwenhuizen
13
#include "util/libsync.h"
14
#include "venus-protocol/vn_protocol_driver_event.h"
15
#include "venus-protocol/vn_protocol_driver_fence.h"
16
#include "venus-protocol/vn_protocol_driver_queue.h"
17
#include "venus-protocol/vn_protocol_driver_semaphore.h"
19
#include "vn_device.h"
20
#include "vn_device_memory.h"
21
#include "vn_renderer.h"
27
vn_GetDeviceQueue2(VkDevice device,
28
const VkDeviceQueueInfo2 *pQueueInfo,
31
struct vn_device *dev = vn_device_from_handle(device);
33
for (uint32_t i = 0; i < dev->queue_count; i++) {
34
struct vn_queue *queue = &dev->queues[i];
35
if (queue->family == pQueueInfo->queueFamilyIndex &&
36
queue->index == pQueueInfo->queueIndex &&
37
queue->flags == pQueueInfo->flags) {
38
*pQueue = vn_queue_to_handle(queue);
42
unreachable("bad queue family/index");
46
vn_semaphore_reset_wsi(struct vn_device *dev, struct vn_semaphore *sem);
48
struct vn_queue_submission {
49
VkStructureType batch_type;
54
const VkSubmitInfo *submit_batches;
55
const VkBindSparseInfo *bind_sparse_batches;
59
uint32_t wait_semaphore_count;
60
uint32_t wait_wsi_count;
67
VkSubmitInfo *submit_batches;
68
VkBindSparseInfo *bind_sparse_batches;
70
VkSemaphore *semaphores;
75
vn_queue_submission_count_batch_semaphores(struct vn_queue_submission *submit,
79
const VkSubmitInfo *submit_batch;
80
const VkBindSparseInfo *bind_sparse_batch;
82
const VkSemaphore *wait_sems;
84
switch (submit->batch_type) {
85
case VK_STRUCTURE_TYPE_SUBMIT_INFO:
86
u.submit_batch = &submit->submit_batches[batch_index];
87
wait_sems = u.submit_batch->pWaitSemaphores;
88
wait_count = u.submit_batch->waitSemaphoreCount;
90
case VK_STRUCTURE_TYPE_BIND_SPARSE_INFO:
91
u.bind_sparse_batch = &submit->bind_sparse_batches[batch_index];
92
wait_sems = u.bind_sparse_batch->pWaitSemaphores;
93
wait_count = u.bind_sparse_batch->waitSemaphoreCount;
96
unreachable("unexpected batch type");
100
submit->wait_semaphore_count += wait_count;
101
for (uint32_t i = 0; i < wait_count; i++) {
102
struct vn_semaphore *sem = vn_semaphore_from_handle(wait_sems[i]);
103
const struct vn_sync_payload *payload = sem->payload;
105
if (payload->type == VN_SYNC_TYPE_WSI_SIGNALED)
106
submit->wait_wsi_count++;
111
vn_queue_submission_count_semaphores(struct vn_queue_submission *submit)
113
submit->wait_semaphore_count = 0;
114
submit->wait_wsi_count = 0;
116
for (uint32_t i = 0; i < submit->batch_count; i++)
117
vn_queue_submission_count_batch_semaphores(submit, i);
121
vn_queue_submission_alloc_storage(struct vn_queue_submission *submit)
123
struct vn_queue *queue = vn_queue_from_handle(submit->queue);
124
const VkAllocationCallbacks *alloc = &queue->device->base.base.alloc;
125
size_t alloc_size = 0;
126
size_t semaphores_offset = 0;
128
/* we want to filter out VN_SYNC_TYPE_WSI_SIGNALED wait semaphores */
129
if (submit->wait_wsi_count) {
130
switch (submit->batch_type) {
131
case VK_STRUCTURE_TYPE_SUBMIT_INFO:
132
alloc_size += sizeof(VkSubmitInfo) * submit->batch_count;
134
case VK_STRUCTURE_TYPE_BIND_SPARSE_INFO:
135
alloc_size += sizeof(VkBindSparseInfo) * submit->batch_count;
138
unreachable("unexpected batch type");
142
semaphores_offset = alloc_size;
143
alloc_size += sizeof(*submit->temp.semaphores) *
144
(submit->wait_semaphore_count - submit->wait_wsi_count);
148
submit->temp.storage = NULL;
152
submit->temp.storage = vk_alloc(alloc, alloc_size, VN_DEFAULT_ALIGN,
153
VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
154
if (!submit->temp.storage)
155
return VK_ERROR_OUT_OF_HOST_MEMORY;
157
submit->temp.batches = submit->temp.storage;
158
submit->temp.semaphores = submit->temp.storage + semaphores_offset;
164
vn_queue_submission_filter_batch_wsi_semaphores(
165
struct vn_queue_submission *submit,
166
uint32_t batch_index,
169
struct vn_queue *queue = vn_queue_from_handle(submit->queue);
172
VkSubmitInfo *submit_batch;
173
VkBindSparseInfo *bind_sparse_batch;
175
const VkSemaphore *src_sems;
177
switch (submit->batch_type) {
178
case VK_STRUCTURE_TYPE_SUBMIT_INFO:
179
u.submit_batch = &submit->temp.submit_batches[batch_index];
180
src_sems = u.submit_batch->pWaitSemaphores;
181
src_count = u.submit_batch->waitSemaphoreCount;
183
case VK_STRUCTURE_TYPE_BIND_SPARSE_INFO:
184
u.bind_sparse_batch = &submit->temp.bind_sparse_batches[batch_index];
185
src_sems = u.bind_sparse_batch->pWaitSemaphores;
186
src_count = u.bind_sparse_batch->waitSemaphoreCount;
189
unreachable("unexpected batch type");
193
VkSemaphore *dst_sems = &submit->temp.semaphores[sem_base];
194
uint32_t dst_count = 0;
196
/* filter out VN_SYNC_TYPE_WSI_SIGNALED wait semaphores */
197
for (uint32_t i = 0; i < src_count; i++) {
198
struct vn_semaphore *sem = vn_semaphore_from_handle(src_sems[i]);
199
const struct vn_sync_payload *payload = sem->payload;
201
if (payload->type == VN_SYNC_TYPE_WSI_SIGNALED)
202
vn_semaphore_reset_wsi(queue->device, sem);
204
dst_sems[dst_count++] = src_sems[i];
207
switch (submit->batch_type) {
208
case VK_STRUCTURE_TYPE_SUBMIT_INFO:
209
u.submit_batch->pWaitSemaphores = dst_sems;
210
u.submit_batch->waitSemaphoreCount = dst_count;
212
case VK_STRUCTURE_TYPE_BIND_SPARSE_INFO:
213
u.bind_sparse_batch->pWaitSemaphores = dst_sems;
214
u.bind_sparse_batch->waitSemaphoreCount = dst_count;
224
vn_queue_submission_setup_batches(struct vn_queue_submission *submit)
226
if (!submit->temp.storage)
229
/* make a copy because we need to filter out WSI semaphores */
230
if (submit->wait_wsi_count) {
231
switch (submit->batch_type) {
232
case VK_STRUCTURE_TYPE_SUBMIT_INFO:
233
memcpy(submit->temp.submit_batches, submit->submit_batches,
234
sizeof(submit->submit_batches[0]) * submit->batch_count);
235
submit->submit_batches = submit->temp.submit_batches;
237
case VK_STRUCTURE_TYPE_BIND_SPARSE_INFO:
238
memcpy(submit->temp.bind_sparse_batches, submit->bind_sparse_batches,
239
sizeof(submit->bind_sparse_batches[0]) * submit->batch_count);
240
submit->bind_sparse_batches = submit->temp.bind_sparse_batches;
243
unreachable("unexpected batch type");
248
uint32_t wait_sem_base = 0;
249
for (uint32_t i = 0; i < submit->batch_count; i++) {
250
if (submit->wait_wsi_count) {
251
wait_sem_base += vn_queue_submission_filter_batch_wsi_semaphores(
252
submit, i, wait_sem_base);
258
vn_queue_submission_prepare_submit(struct vn_queue_submission *submit,
260
uint32_t batch_count,
261
const VkSubmitInfo *submit_batches,
264
submit->batch_type = VK_STRUCTURE_TYPE_SUBMIT_INFO;
265
submit->queue = queue;
266
submit->batch_count = batch_count;
267
submit->submit_batches = submit_batches;
268
submit->fence = fence;
270
vn_queue_submission_count_semaphores(submit);
272
VkResult result = vn_queue_submission_alloc_storage(submit);
273
if (result != VK_SUCCESS)
276
vn_queue_submission_setup_batches(submit);
282
vn_queue_submission_prepare_bind_sparse(
283
struct vn_queue_submission *submit,
285
uint32_t batch_count,
286
const VkBindSparseInfo *bind_sparse_batches,
289
submit->batch_type = VK_STRUCTURE_TYPE_BIND_SPARSE_INFO;
290
submit->queue = queue;
291
submit->batch_count = batch_count;
292
submit->bind_sparse_batches = bind_sparse_batches;
293
submit->fence = fence;
295
vn_queue_submission_count_semaphores(submit);
297
VkResult result = vn_queue_submission_alloc_storage(submit);
298
if (result != VK_SUCCESS)
301
vn_queue_submission_setup_batches(submit);
307
vn_queue_submission_cleanup(struct vn_queue_submission *submit)
309
struct vn_queue *queue = vn_queue_from_handle(submit->queue);
310
const VkAllocationCallbacks *alloc = &queue->device->base.base.alloc;
312
vk_free(alloc, submit->temp.storage);
316
vn_QueueSubmit(VkQueue _queue,
317
uint32_t submitCount,
318
const VkSubmitInfo *pSubmits,
322
struct vn_queue *queue = vn_queue_from_handle(_queue);
323
struct vn_device *dev = queue->device;
324
struct vn_fence *fence = vn_fence_from_handle(_fence);
325
const bool is_fence_external = fence && fence->is_external;
327
struct vn_queue_submission submit;
328
VkResult result = vn_queue_submission_prepare_submit(
329
&submit, _queue, submitCount, pSubmits, _fence);
330
if (result != VK_SUCCESS)
331
return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
333
const struct vn_device_memory *wsi_mem = NULL;
334
if (submit.batch_count == 1) {
335
const struct wsi_memory_signal_submit_info *info = vk_find_struct_const(
336
submit.submit_batches[0].pNext, WSI_MEMORY_SIGNAL_SUBMIT_INFO_MESA);
338
wsi_mem = vn_device_memory_from_handle(info->memory);
339
assert(!wsi_mem->base_memory && wsi_mem->base_bo);
343
/* TODO defer roundtrip for external fence until the next sync operation */
344
if (!wsi_mem && !is_fence_external) {
345
vn_async_vkQueueSubmit(dev->instance, submit.queue, submit.batch_count,
346
submit.submit_batches, submit.fence);
347
vn_queue_submission_cleanup(&submit);
352
vn_call_vkQueueSubmit(dev->instance, submit.queue, submit.batch_count,
353
submit.submit_batches, submit.fence);
354
if (result != VK_SUCCESS) {
355
vn_queue_submission_cleanup(&submit);
356
return vn_error(dev->instance, result);
360
/* XXX this is always false and kills the performance */
361
if (dev->instance->renderer->info.has_implicit_fencing) {
362
vn_renderer_submit(dev->renderer, &(const struct vn_renderer_submit){
363
.bos = &wsi_mem->base_bo,
368
static uint32_t ratelimit;
369
if (ratelimit < 10) {
370
vn_log(dev->instance,
371
"forcing vkQueueWaitIdle before presenting");
376
vn_QueueWaitIdle(submit.queue);
380
vn_queue_submission_cleanup(&submit);
386
vn_QueueBindSparse(VkQueue _queue,
387
uint32_t bindInfoCount,
388
const VkBindSparseInfo *pBindInfo,
392
struct vn_queue *queue = vn_queue_from_handle(_queue);
393
struct vn_device *dev = queue->device;
395
struct vn_queue_submission submit;
396
VkResult result = vn_queue_submission_prepare_bind_sparse(
397
&submit, _queue, bindInfoCount, pBindInfo, fence);
398
if (result != VK_SUCCESS)
399
return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
401
result = vn_call_vkQueueBindSparse(
402
dev->instance, submit.queue, submit.batch_count,
403
submit.bind_sparse_batches, submit.fence);
404
if (result != VK_SUCCESS) {
405
vn_queue_submission_cleanup(&submit);
406
return vn_error(dev->instance, result);
409
vn_queue_submission_cleanup(&submit);
415
vn_QueueWaitIdle(VkQueue _queue)
418
struct vn_queue *queue = vn_queue_from_handle(_queue);
419
VkDevice device = vn_device_to_handle(queue->device);
421
VkResult result = vn_QueueSubmit(_queue, 0, NULL, queue->wait_fence);
422
if (result != VK_SUCCESS)
425
result = vn_WaitForFences(device, 1, &queue->wait_fence, true, UINT64_MAX);
426
vn_ResetFences(device, 1, &queue->wait_fence);
428
return vn_result(queue->device->instance, result);
434
vn_sync_payload_release(struct vn_device *dev,
435
struct vn_sync_payload *payload)
437
payload->type = VN_SYNC_TYPE_INVALID;
441
vn_fence_init_payloads(struct vn_device *dev,
442
struct vn_fence *fence,
444
const VkAllocationCallbacks *alloc)
446
fence->permanent.type = VN_SYNC_TYPE_DEVICE_ONLY;
447
fence->temporary.type = VN_SYNC_TYPE_INVALID;
448
fence->payload = &fence->permanent;
454
vn_fence_signal_wsi(struct vn_device *dev, struct vn_fence *fence)
456
struct vn_sync_payload *temp = &fence->temporary;
458
vn_sync_payload_release(dev, temp);
459
temp->type = VN_SYNC_TYPE_WSI_SIGNALED;
460
fence->payload = temp;
464
vn_CreateFence(VkDevice device,
465
const VkFenceCreateInfo *pCreateInfo,
466
const VkAllocationCallbacks *pAllocator,
469
struct vn_device *dev = vn_device_from_handle(device);
470
const VkAllocationCallbacks *alloc =
471
pAllocator ? pAllocator : &dev->base.base.alloc;
473
struct vn_fence *fence = vk_zalloc(alloc, sizeof(*fence), VN_DEFAULT_ALIGN,
474
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
476
return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
478
vn_object_base_init(&fence->base, VK_OBJECT_TYPE_FENCE, &dev->base);
480
const struct VkExportFenceCreateInfo *export_info =
481
vk_find_struct_const(pCreateInfo->pNext, EXPORT_FENCE_CREATE_INFO);
482
VkFenceCreateInfo local_create_info;
484
local_create_info = *pCreateInfo;
485
local_create_info.pNext = NULL;
486
pCreateInfo = &local_create_info;
488
fence->is_external = !!export_info->handleTypes;
491
VkResult result = vn_fence_init_payloads(
492
dev, fence, pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT, alloc);
493
if (result != VK_SUCCESS) {
494
vn_object_base_fini(&fence->base);
495
vk_free(alloc, fence);
496
return vn_error(dev->instance, result);
499
VkFence fence_handle = vn_fence_to_handle(fence);
500
vn_async_vkCreateFence(dev->instance, device, pCreateInfo, NULL,
503
*pFence = fence_handle;
509
vn_DestroyFence(VkDevice device,
511
const VkAllocationCallbacks *pAllocator)
513
struct vn_device *dev = vn_device_from_handle(device);
514
struct vn_fence *fence = vn_fence_from_handle(_fence);
515
const VkAllocationCallbacks *alloc =
516
pAllocator ? pAllocator : &dev->base.base.alloc;
521
vn_async_vkDestroyFence(dev->instance, device, _fence, NULL);
523
vn_sync_payload_release(dev, &fence->permanent);
524
vn_sync_payload_release(dev, &fence->temporary);
526
vn_object_base_fini(&fence->base);
527
vk_free(alloc, fence);
531
vn_ResetFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences)
533
struct vn_device *dev = vn_device_from_handle(device);
535
/* TODO if the fence is shared-by-ref, this needs to be synchronous */
537
vn_call_vkResetFences(dev->instance, device, fenceCount, pFences);
539
vn_async_vkResetFences(dev->instance, device, fenceCount, pFences);
541
for (uint32_t i = 0; i < fenceCount; i++) {
542
struct vn_fence *fence = vn_fence_from_handle(pFences[i]);
543
struct vn_sync_payload *perm = &fence->permanent;
545
vn_sync_payload_release(dev, &fence->temporary);
547
assert(perm->type == VN_SYNC_TYPE_DEVICE_ONLY);
548
fence->payload = perm;
555
vn_GetFenceStatus(VkDevice device, VkFence _fence)
557
struct vn_device *dev = vn_device_from_handle(device);
558
struct vn_fence *fence = vn_fence_from_handle(_fence);
559
struct vn_sync_payload *payload = fence->payload;
562
switch (payload->type) {
563
case VN_SYNC_TYPE_DEVICE_ONLY:
564
result = vn_call_vkGetFenceStatus(dev->instance, device, _fence);
566
case VN_SYNC_TYPE_WSI_SIGNALED:
570
unreachable("unexpected fence payload type");
574
return vn_result(dev->instance, result);
578
vn_find_first_signaled_fence(VkDevice device,
579
const VkFence *fences,
582
for (uint32_t i = 0; i < count; i++) {
583
VkResult result = vn_GetFenceStatus(device, fences[i]);
584
if (result == VK_SUCCESS || result < 0)
591
vn_remove_signaled_fences(VkDevice device, VkFence *fences, uint32_t *count)
594
for (uint32_t i = 0; i < *count; i++) {
595
VkResult result = vn_GetFenceStatus(device, fences[i]);
596
if (result != VK_SUCCESS) {
599
fences[cur++] = fences[i];
604
return cur ? VK_NOT_READY : VK_SUCCESS;
608
vn_update_sync_result(VkResult result, int64_t abs_timeout, uint32_t *iter)
612
if (abs_timeout != OS_TIMEOUT_INFINITE &&
613
os_time_get_nano() >= abs_timeout)
616
vn_relax(iter, "client");
619
assert(result == VK_SUCCESS || result < 0);
627
vn_WaitForFences(VkDevice device,
629
const VkFence *pFences,
634
struct vn_device *dev = vn_device_from_handle(device);
635
const VkAllocationCallbacks *alloc = &dev->base.base.alloc;
637
const int64_t abs_timeout = os_time_get_absolute_timeout(timeout);
638
VkResult result = VK_NOT_READY;
640
if (fenceCount > 1 && waitAll) {
641
VkFence local_fences[8];
642
VkFence *fences = local_fences;
643
if (fenceCount > ARRAY_SIZE(local_fences)) {
645
vk_alloc(alloc, sizeof(*fences) * fenceCount, VN_DEFAULT_ALIGN,
646
VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
648
return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
650
memcpy(fences, pFences, sizeof(*fences) * fenceCount);
652
while (result == VK_NOT_READY) {
653
result = vn_remove_signaled_fences(device, fences, &fenceCount);
654
result = vn_update_sync_result(result, abs_timeout, &iter);
657
if (fences != local_fences)
658
vk_free(alloc, fences);
660
while (result == VK_NOT_READY) {
661
result = vn_find_first_signaled_fence(device, pFences, fenceCount);
662
result = vn_update_sync_result(result, abs_timeout, &iter);
666
return vn_result(dev->instance, result);
670
vn_create_sync_file(struct vn_device *dev, int *out_fd)
672
struct vn_renderer_sync *sync;
673
VkResult result = vn_renderer_sync_create(dev->renderer, 0,
674
VN_RENDERER_SYNC_BINARY, &sync);
675
if (result != VK_SUCCESS)
676
return vn_error(dev->instance, result);
678
const struct vn_renderer_submit submit = {
680
&(const struct vn_renderer_submit_batch){
682
.sync_values = &(const uint64_t){ 1 },
687
result = vn_renderer_submit(dev->renderer, &submit);
688
if (result != VK_SUCCESS) {
689
vn_renderer_sync_destroy(dev->renderer, sync);
690
return vn_error(dev->instance, result);
693
*out_fd = vn_renderer_sync_export_syncobj(dev->renderer, sync, true);
694
vn_renderer_sync_destroy(dev->renderer, sync);
696
return *out_fd >= 0 ? VK_SUCCESS : VK_ERROR_TOO_MANY_OBJECTS;
700
vn_ImportFenceFdKHR(VkDevice device,
701
const VkImportFenceFdInfoKHR *pImportFenceFdInfo)
703
struct vn_device *dev = vn_device_from_handle(device);
704
struct vn_fence *fence = vn_fence_from_handle(pImportFenceFdInfo->fence);
705
ASSERTED const bool sync_file = pImportFenceFdInfo->handleType ==
706
VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT;
707
const int fd = pImportFenceFdInfo->fd;
709
/* TODO update fence->is_external after we support opaque fd import */
710
assert(dev->instance->experimental.globalFencing);
713
if (sync_wait(fd, -1))
714
return vn_error(dev->instance, VK_ERROR_INVALID_EXTERNAL_HANDLE);
719
/* abuse VN_SYNC_TYPE_WSI_SIGNALED */
720
vn_fence_signal_wsi(dev, fence);
726
vn_GetFenceFdKHR(VkDevice device,
727
const VkFenceGetFdInfoKHR *pGetFdInfo,
730
struct vn_device *dev = vn_device_from_handle(device);
731
struct vn_fence *fence = vn_fence_from_handle(pGetFdInfo->fence);
732
const bool sync_file =
733
pGetFdInfo->handleType == VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT;
734
struct vn_sync_payload *payload = fence->payload;
736
assert(dev->instance->experimental.globalFencing);
739
if (payload->type == VN_SYNC_TYPE_DEVICE_ONLY) {
740
VkResult result = vn_create_sync_file(dev, &fd);
741
if (result != VK_SUCCESS)
742
return vn_error(dev->instance, result);
746
vn_sync_payload_release(dev, &fence->temporary);
747
fence->payload = &fence->permanent;
749
/* XXX implies reset operation on the host fence */
756
/* semaphore commands */
759
vn_semaphore_init_payloads(struct vn_device *dev,
760
struct vn_semaphore *sem,
761
uint64_t initial_val,
762
const VkAllocationCallbacks *alloc)
764
sem->permanent.type = VN_SYNC_TYPE_DEVICE_ONLY;
765
sem->temporary.type = VN_SYNC_TYPE_INVALID;
766
sem->payload = &sem->permanent;
772
vn_semaphore_reset_wsi(struct vn_device *dev, struct vn_semaphore *sem)
774
struct vn_sync_payload *perm = &sem->permanent;
776
vn_sync_payload_release(dev, &sem->temporary);
782
vn_semaphore_signal_wsi(struct vn_device *dev, struct vn_semaphore *sem)
784
struct vn_sync_payload *temp = &sem->temporary;
786
vn_sync_payload_release(dev, temp);
787
temp->type = VN_SYNC_TYPE_WSI_SIGNALED;
792
vn_CreateSemaphore(VkDevice device,
793
const VkSemaphoreCreateInfo *pCreateInfo,
794
const VkAllocationCallbacks *pAllocator,
795
VkSemaphore *pSemaphore)
797
struct vn_device *dev = vn_device_from_handle(device);
798
const VkAllocationCallbacks *alloc =
799
pAllocator ? pAllocator : &dev->base.base.alloc;
801
struct vn_semaphore *sem = vk_zalloc(alloc, sizeof(*sem), VN_DEFAULT_ALIGN,
802
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
804
return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
806
vn_object_base_init(&sem->base, VK_OBJECT_TYPE_SEMAPHORE, &dev->base);
808
const VkSemaphoreTypeCreateInfo *type_info =
809
vk_find_struct_const(pCreateInfo->pNext, SEMAPHORE_TYPE_CREATE_INFO);
810
uint64_t initial_val = 0;
811
if (type_info && type_info->semaphoreType == VK_SEMAPHORE_TYPE_TIMELINE) {
812
sem->type = VK_SEMAPHORE_TYPE_TIMELINE;
813
initial_val = type_info->initialValue;
815
sem->type = VK_SEMAPHORE_TYPE_BINARY;
818
VkResult result = vn_semaphore_init_payloads(dev, sem, initial_val, alloc);
819
if (result != VK_SUCCESS) {
820
vn_object_base_fini(&sem->base);
822
return vn_error(dev->instance, result);
825
VkSemaphore sem_handle = vn_semaphore_to_handle(sem);
826
vn_async_vkCreateSemaphore(dev->instance, device, pCreateInfo, NULL,
829
*pSemaphore = sem_handle;
835
vn_DestroySemaphore(VkDevice device,
836
VkSemaphore semaphore,
837
const VkAllocationCallbacks *pAllocator)
839
struct vn_device *dev = vn_device_from_handle(device);
840
struct vn_semaphore *sem = vn_semaphore_from_handle(semaphore);
841
const VkAllocationCallbacks *alloc =
842
pAllocator ? pAllocator : &dev->base.base.alloc;
847
vn_async_vkDestroySemaphore(dev->instance, device, semaphore, NULL);
849
vn_sync_payload_release(dev, &sem->permanent);
850
vn_sync_payload_release(dev, &sem->temporary);
852
vn_object_base_fini(&sem->base);
857
vn_GetSemaphoreCounterValue(VkDevice device,
858
VkSemaphore semaphore,
861
struct vn_device *dev = vn_device_from_handle(device);
862
struct vn_semaphore *sem = vn_semaphore_from_handle(semaphore);
863
ASSERTED struct vn_sync_payload *payload = sem->payload;
865
assert(payload->type == VN_SYNC_TYPE_DEVICE_ONLY);
866
return vn_call_vkGetSemaphoreCounterValue(dev->instance, device, semaphore,
871
vn_SignalSemaphore(VkDevice device, const VkSemaphoreSignalInfo *pSignalInfo)
873
struct vn_device *dev = vn_device_from_handle(device);
875
/* TODO if the semaphore is shared-by-ref, this needs to be synchronous */
877
vn_call_vkSignalSemaphore(dev->instance, device, pSignalInfo);
879
vn_async_vkSignalSemaphore(dev->instance, device, pSignalInfo);
885
vn_find_first_signaled_semaphore(VkDevice device,
886
const VkSemaphore *semaphores,
887
const uint64_t *values,
890
for (uint32_t i = 0; i < count; i++) {
893
vn_GetSemaphoreCounterValue(device, semaphores[i], &val);
894
if (result != VK_SUCCESS || val >= values[i])
901
vn_remove_signaled_semaphores(VkDevice device,
902
VkSemaphore *semaphores,
907
for (uint32_t i = 0; i < *count; i++) {
910
vn_GetSemaphoreCounterValue(device, semaphores[i], &val);
911
if (result != VK_SUCCESS)
914
semaphores[cur++] = semaphores[i];
918
return cur ? VK_NOT_READY : VK_SUCCESS;
922
vn_WaitSemaphores(VkDevice device,
923
const VkSemaphoreWaitInfo *pWaitInfo,
927
struct vn_device *dev = vn_device_from_handle(device);
928
const VkAllocationCallbacks *alloc = &dev->base.base.alloc;
930
const int64_t abs_timeout = os_time_get_absolute_timeout(timeout);
931
VkResult result = VK_NOT_READY;
933
if (pWaitInfo->semaphoreCount > 1 &&
934
!(pWaitInfo->flags & VK_SEMAPHORE_WAIT_ANY_BIT)) {
935
uint32_t semaphore_count = pWaitInfo->semaphoreCount;
936
VkSemaphore local_semaphores[8];
937
uint64_t local_values[8];
938
VkSemaphore *semaphores = local_semaphores;
939
uint64_t *values = local_values;
940
if (semaphore_count > ARRAY_SIZE(local_semaphores)) {
941
semaphores = vk_alloc(
942
alloc, (sizeof(*semaphores) + sizeof(*values)) * semaphore_count,
943
VN_DEFAULT_ALIGN, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
945
return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
947
values = (uint64_t *)&semaphores[semaphore_count];
949
memcpy(semaphores, pWaitInfo->pSemaphores,
950
sizeof(*semaphores) * semaphore_count);
951
memcpy(values, pWaitInfo->pValues, sizeof(*values) * semaphore_count);
953
while (result == VK_NOT_READY) {
954
result = vn_remove_signaled_semaphores(device, semaphores, values,
956
result = vn_update_sync_result(result, abs_timeout, &iter);
959
if (semaphores != local_semaphores)
960
vk_free(alloc, semaphores);
962
while (result == VK_NOT_READY) {
963
result = vn_find_first_signaled_semaphore(
964
device, pWaitInfo->pSemaphores, pWaitInfo->pValues,
965
pWaitInfo->semaphoreCount);
966
result = vn_update_sync_result(result, abs_timeout, &iter);
970
return vn_result(dev->instance, result);
974
vn_ImportSemaphoreFdKHR(
975
VkDevice device, const VkImportSemaphoreFdInfoKHR *pImportSemaphoreFdInfo)
977
struct vn_device *dev = vn_device_from_handle(device);
978
struct vn_semaphore *sem =
979
vn_semaphore_from_handle(pImportSemaphoreFdInfo->semaphore);
980
ASSERTED const bool sync_file =
981
pImportSemaphoreFdInfo->handleType ==
982
VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
983
const int fd = pImportSemaphoreFdInfo->fd;
985
assert(dev->instance->experimental.globalFencing);
988
if (sync_wait(fd, -1))
989
return vn_error(dev->instance, VK_ERROR_INVALID_EXTERNAL_HANDLE);
994
/* abuse VN_SYNC_TYPE_WSI_SIGNALED */
995
vn_semaphore_signal_wsi(dev, sem);
1001
vn_GetSemaphoreFdKHR(VkDevice device,
1002
const VkSemaphoreGetFdInfoKHR *pGetFdInfo,
1005
struct vn_device *dev = vn_device_from_handle(device);
1006
struct vn_semaphore *sem = vn_semaphore_from_handle(pGetFdInfo->semaphore);
1007
const bool sync_file =
1008
pGetFdInfo->handleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
1009
struct vn_sync_payload *payload = sem->payload;
1011
assert(dev->instance->experimental.globalFencing);
1014
if (payload->type == VN_SYNC_TYPE_DEVICE_ONLY) {
1015
VkResult result = vn_create_sync_file(dev, &fd);
1016
if (result != VK_SUCCESS)
1017
return vn_error(dev->instance, result);
1021
vn_sync_payload_release(dev, &sem->temporary);
1022
sem->payload = &sem->permanent;
1024
/* XXX implies wait operation on the host semaphore */
1031
/* event commands */
1034
vn_CreateEvent(VkDevice device,
1035
const VkEventCreateInfo *pCreateInfo,
1036
const VkAllocationCallbacks *pAllocator,
1039
struct vn_device *dev = vn_device_from_handle(device);
1040
const VkAllocationCallbacks *alloc =
1041
pAllocator ? pAllocator : &dev->base.base.alloc;
1043
struct vn_event *ev = vk_zalloc(alloc, sizeof(*ev), VN_DEFAULT_ALIGN,
1044
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1046
return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1048
vn_object_base_init(&ev->base, VK_OBJECT_TYPE_EVENT, &dev->base);
1050
VkEvent ev_handle = vn_event_to_handle(ev);
1051
vn_async_vkCreateEvent(dev->instance, device, pCreateInfo, NULL,
1054
*pEvent = ev_handle;
1060
vn_DestroyEvent(VkDevice device,
1062
const VkAllocationCallbacks *pAllocator)
1064
struct vn_device *dev = vn_device_from_handle(device);
1065
struct vn_event *ev = vn_event_from_handle(event);
1066
const VkAllocationCallbacks *alloc =
1067
pAllocator ? pAllocator : &dev->base.base.alloc;
1072
vn_async_vkDestroyEvent(dev->instance, device, event, NULL);
1074
vn_object_base_fini(&ev->base);
1079
vn_GetEventStatus(VkDevice device, VkEvent event)
1081
struct vn_device *dev = vn_device_from_handle(device);
1083
/* TODO When the renderer supports it (requires a new vk extension), there
1084
* should be a coherent memory backing the event.
1086
VkResult result = vn_call_vkGetEventStatus(dev->instance, device, event);
1088
return vn_result(dev->instance, result);
1092
vn_SetEvent(VkDevice device, VkEvent event)
1094
struct vn_device *dev = vn_device_from_handle(device);
1096
VkResult result = vn_call_vkSetEvent(dev->instance, device, event);
1098
return vn_result(dev->instance, result);
1102
vn_ResetEvent(VkDevice device, VkEvent event)
1104
struct vn_device *dev = vn_device_from_handle(device);
1106
VkResult result = vn_call_vkResetEvent(dev->instance, device, event);
1108
return vn_result(dev->instance, result);