~mmach/netext73/mesa-haswell

« back to all changes in this revision

Viewing changes to src/virtio/vulkan/vn_queue.c

  • Committer: mmach
  • Date: 2022-09-22 19:56:13 UTC
  • Revision ID: netbit73@gmail.com-20220922195613-wtik9mmy20tmor0i
2022-09-22 21:17:09

Show diffs side-by-side

added added

removed removed

Lines of Context:
1
 
/*
2
 
 * Copyright 2019 Google LLC
3
 
 * SPDX-License-Identifier: MIT
4
 
 *
5
 
 * based in part on anv and radv which are:
6
 
 * Copyright © 2015 Intel Corporation
7
 
 * Copyright © 2016 Red Hat.
8
 
 * Copyright © 2016 Bas Nieuwenhuizen
9
 
 */
10
 
 
11
 
#include "vn_queue.h"
12
 
 
13
 
#include "util/libsync.h"
14
 
#include "venus-protocol/vn_protocol_driver_event.h"
15
 
#include "venus-protocol/vn_protocol_driver_fence.h"
16
 
#include "venus-protocol/vn_protocol_driver_queue.h"
17
 
#include "venus-protocol/vn_protocol_driver_semaphore.h"
18
 
 
19
 
#include "vn_device.h"
20
 
#include "vn_device_memory.h"
21
 
#include "vn_renderer.h"
22
 
#include "vn_wsi.h"
23
 
 
24
 
/* queue commands */
25
 
 
26
 
void
27
 
vn_GetDeviceQueue2(VkDevice device,
28
 
                   const VkDeviceQueueInfo2 *pQueueInfo,
29
 
                   VkQueue *pQueue)
30
 
{
31
 
   struct vn_device *dev = vn_device_from_handle(device);
32
 
 
33
 
   for (uint32_t i = 0; i < dev->queue_count; i++) {
34
 
      struct vn_queue *queue = &dev->queues[i];
35
 
      if (queue->family == pQueueInfo->queueFamilyIndex &&
36
 
          queue->index == pQueueInfo->queueIndex &&
37
 
          queue->flags == pQueueInfo->flags) {
38
 
         *pQueue = vn_queue_to_handle(queue);
39
 
         return;
40
 
      }
41
 
   }
42
 
   unreachable("bad queue family/index");
43
 
}
44
 
 
45
 
static void
46
 
vn_semaphore_reset_wsi(struct vn_device *dev, struct vn_semaphore *sem);
47
 
 
48
 
struct vn_queue_submission {
49
 
   VkStructureType batch_type;
50
 
   VkQueue queue;
51
 
   uint32_t batch_count;
52
 
   union {
53
 
      const void *batches;
54
 
      const VkSubmitInfo *submit_batches;
55
 
      const VkBindSparseInfo *bind_sparse_batches;
56
 
   };
57
 
   VkFence fence;
58
 
 
59
 
   uint32_t wait_semaphore_count;
60
 
   uint32_t wait_wsi_count;
61
 
 
62
 
   struct {
63
 
      void *storage;
64
 
 
65
 
      union {
66
 
         void *batches;
67
 
         VkSubmitInfo *submit_batches;
68
 
         VkBindSparseInfo *bind_sparse_batches;
69
 
      };
70
 
      VkSemaphore *semaphores;
71
 
   } temp;
72
 
};
73
 
 
74
 
static void
75
 
vn_queue_submission_count_batch_semaphores(struct vn_queue_submission *submit,
76
 
                                           uint32_t batch_index)
77
 
{
78
 
   union {
79
 
      const VkSubmitInfo *submit_batch;
80
 
      const VkBindSparseInfo *bind_sparse_batch;
81
 
   } u;
82
 
   const VkSemaphore *wait_sems;
83
 
   uint32_t wait_count;
84
 
   switch (submit->batch_type) {
85
 
   case VK_STRUCTURE_TYPE_SUBMIT_INFO:
86
 
      u.submit_batch = &submit->submit_batches[batch_index];
87
 
      wait_sems = u.submit_batch->pWaitSemaphores;
88
 
      wait_count = u.submit_batch->waitSemaphoreCount;
89
 
      break;
90
 
   case VK_STRUCTURE_TYPE_BIND_SPARSE_INFO:
91
 
      u.bind_sparse_batch = &submit->bind_sparse_batches[batch_index];
92
 
      wait_sems = u.bind_sparse_batch->pWaitSemaphores;
93
 
      wait_count = u.bind_sparse_batch->waitSemaphoreCount;
94
 
      break;
95
 
   default:
96
 
      unreachable("unexpected batch type");
97
 
      break;
98
 
   }
99
 
 
100
 
   submit->wait_semaphore_count += wait_count;
101
 
   for (uint32_t i = 0; i < wait_count; i++) {
102
 
      struct vn_semaphore *sem = vn_semaphore_from_handle(wait_sems[i]);
103
 
      const struct vn_sync_payload *payload = sem->payload;
104
 
 
105
 
      if (payload->type == VN_SYNC_TYPE_WSI_SIGNALED)
106
 
         submit->wait_wsi_count++;
107
 
   }
108
 
}
109
 
 
110
 
static void
111
 
vn_queue_submission_count_semaphores(struct vn_queue_submission *submit)
112
 
{
113
 
   submit->wait_semaphore_count = 0;
114
 
   submit->wait_wsi_count = 0;
115
 
 
116
 
   for (uint32_t i = 0; i < submit->batch_count; i++)
117
 
      vn_queue_submission_count_batch_semaphores(submit, i);
118
 
}
119
 
 
120
 
static VkResult
121
 
vn_queue_submission_alloc_storage(struct vn_queue_submission *submit)
122
 
{
123
 
   struct vn_queue *queue = vn_queue_from_handle(submit->queue);
124
 
   const VkAllocationCallbacks *alloc = &queue->device->base.base.alloc;
125
 
   size_t alloc_size = 0;
126
 
   size_t semaphores_offset = 0;
127
 
 
128
 
   /* we want to filter out VN_SYNC_TYPE_WSI_SIGNALED wait semaphores */
129
 
   if (submit->wait_wsi_count) {
130
 
      switch (submit->batch_type) {
131
 
      case VK_STRUCTURE_TYPE_SUBMIT_INFO:
132
 
         alloc_size += sizeof(VkSubmitInfo) * submit->batch_count;
133
 
         break;
134
 
      case VK_STRUCTURE_TYPE_BIND_SPARSE_INFO:
135
 
         alloc_size += sizeof(VkBindSparseInfo) * submit->batch_count;
136
 
         break;
137
 
      default:
138
 
         unreachable("unexpected batch type");
139
 
         break;
140
 
      }
141
 
 
142
 
      semaphores_offset = alloc_size;
143
 
      alloc_size += sizeof(*submit->temp.semaphores) *
144
 
                    (submit->wait_semaphore_count - submit->wait_wsi_count);
145
 
   }
146
 
 
147
 
   if (!alloc_size) {
148
 
      submit->temp.storage = NULL;
149
 
      return VK_SUCCESS;
150
 
   }
151
 
 
152
 
   submit->temp.storage = vk_alloc(alloc, alloc_size, VN_DEFAULT_ALIGN,
153
 
                                   VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
154
 
   if (!submit->temp.storage)
155
 
      return VK_ERROR_OUT_OF_HOST_MEMORY;
156
 
 
157
 
   submit->temp.batches = submit->temp.storage;
158
 
   submit->temp.semaphores = submit->temp.storage + semaphores_offset;
159
 
 
160
 
   return VK_SUCCESS;
161
 
}
162
 
 
163
 
static uint32_t
164
 
vn_queue_submission_filter_batch_wsi_semaphores(
165
 
   struct vn_queue_submission *submit,
166
 
   uint32_t batch_index,
167
 
   uint32_t sem_base)
168
 
{
169
 
   struct vn_queue *queue = vn_queue_from_handle(submit->queue);
170
 
 
171
 
   union {
172
 
      VkSubmitInfo *submit_batch;
173
 
      VkBindSparseInfo *bind_sparse_batch;
174
 
   } u;
175
 
   const VkSemaphore *src_sems;
176
 
   uint32_t src_count;
177
 
   switch (submit->batch_type) {
178
 
   case VK_STRUCTURE_TYPE_SUBMIT_INFO:
179
 
      u.submit_batch = &submit->temp.submit_batches[batch_index];
180
 
      src_sems = u.submit_batch->pWaitSemaphores;
181
 
      src_count = u.submit_batch->waitSemaphoreCount;
182
 
      break;
183
 
   case VK_STRUCTURE_TYPE_BIND_SPARSE_INFO:
184
 
      u.bind_sparse_batch = &submit->temp.bind_sparse_batches[batch_index];
185
 
      src_sems = u.bind_sparse_batch->pWaitSemaphores;
186
 
      src_count = u.bind_sparse_batch->waitSemaphoreCount;
187
 
      break;
188
 
   default:
189
 
      unreachable("unexpected batch type");
190
 
      break;
191
 
   }
192
 
 
193
 
   VkSemaphore *dst_sems = &submit->temp.semaphores[sem_base];
194
 
   uint32_t dst_count = 0;
195
 
 
196
 
   /* filter out VN_SYNC_TYPE_WSI_SIGNALED wait semaphores */
197
 
   for (uint32_t i = 0; i < src_count; i++) {
198
 
      struct vn_semaphore *sem = vn_semaphore_from_handle(src_sems[i]);
199
 
      const struct vn_sync_payload *payload = sem->payload;
200
 
 
201
 
      if (payload->type == VN_SYNC_TYPE_WSI_SIGNALED)
202
 
         vn_semaphore_reset_wsi(queue->device, sem);
203
 
      else
204
 
         dst_sems[dst_count++] = src_sems[i];
205
 
   }
206
 
 
207
 
   switch (submit->batch_type) {
208
 
   case VK_STRUCTURE_TYPE_SUBMIT_INFO:
209
 
      u.submit_batch->pWaitSemaphores = dst_sems;
210
 
      u.submit_batch->waitSemaphoreCount = dst_count;
211
 
      break;
212
 
   case VK_STRUCTURE_TYPE_BIND_SPARSE_INFO:
213
 
      u.bind_sparse_batch->pWaitSemaphores = dst_sems;
214
 
      u.bind_sparse_batch->waitSemaphoreCount = dst_count;
215
 
      break;
216
 
   default:
217
 
      break;
218
 
   }
219
 
 
220
 
   return dst_count;
221
 
}
222
 
 
223
 
static void
224
 
vn_queue_submission_setup_batches(struct vn_queue_submission *submit)
225
 
{
226
 
   if (!submit->temp.storage)
227
 
      return;
228
 
 
229
 
   /* make a copy because we need to filter out WSI semaphores */
230
 
   if (submit->wait_wsi_count) {
231
 
      switch (submit->batch_type) {
232
 
      case VK_STRUCTURE_TYPE_SUBMIT_INFO:
233
 
         memcpy(submit->temp.submit_batches, submit->submit_batches,
234
 
                sizeof(submit->submit_batches[0]) * submit->batch_count);
235
 
         submit->submit_batches = submit->temp.submit_batches;
236
 
         break;
237
 
      case VK_STRUCTURE_TYPE_BIND_SPARSE_INFO:
238
 
         memcpy(submit->temp.bind_sparse_batches, submit->bind_sparse_batches,
239
 
                sizeof(submit->bind_sparse_batches[0]) * submit->batch_count);
240
 
         submit->bind_sparse_batches = submit->temp.bind_sparse_batches;
241
 
         break;
242
 
      default:
243
 
         unreachable("unexpected batch type");
244
 
         break;
245
 
      }
246
 
   }
247
 
 
248
 
   uint32_t wait_sem_base = 0;
249
 
   for (uint32_t i = 0; i < submit->batch_count; i++) {
250
 
      if (submit->wait_wsi_count) {
251
 
         wait_sem_base += vn_queue_submission_filter_batch_wsi_semaphores(
252
 
            submit, i, wait_sem_base);
253
 
      }
254
 
   }
255
 
}
256
 
 
257
 
static VkResult
258
 
vn_queue_submission_prepare_submit(struct vn_queue_submission *submit,
259
 
                                   VkQueue queue,
260
 
                                   uint32_t batch_count,
261
 
                                   const VkSubmitInfo *submit_batches,
262
 
                                   VkFence fence)
263
 
{
264
 
   submit->batch_type = VK_STRUCTURE_TYPE_SUBMIT_INFO;
265
 
   submit->queue = queue;
266
 
   submit->batch_count = batch_count;
267
 
   submit->submit_batches = submit_batches;
268
 
   submit->fence = fence;
269
 
 
270
 
   vn_queue_submission_count_semaphores(submit);
271
 
 
272
 
   VkResult result = vn_queue_submission_alloc_storage(submit);
273
 
   if (result != VK_SUCCESS)
274
 
      return result;
275
 
 
276
 
   vn_queue_submission_setup_batches(submit);
277
 
 
278
 
   return VK_SUCCESS;
279
 
}
280
 
 
281
 
static VkResult
282
 
vn_queue_submission_prepare_bind_sparse(
283
 
   struct vn_queue_submission *submit,
284
 
   VkQueue queue,
285
 
   uint32_t batch_count,
286
 
   const VkBindSparseInfo *bind_sparse_batches,
287
 
   VkFence fence)
288
 
{
289
 
   submit->batch_type = VK_STRUCTURE_TYPE_BIND_SPARSE_INFO;
290
 
   submit->queue = queue;
291
 
   submit->batch_count = batch_count;
292
 
   submit->bind_sparse_batches = bind_sparse_batches;
293
 
   submit->fence = fence;
294
 
 
295
 
   vn_queue_submission_count_semaphores(submit);
296
 
 
297
 
   VkResult result = vn_queue_submission_alloc_storage(submit);
298
 
   if (result != VK_SUCCESS)
299
 
      return result;
300
 
 
301
 
   vn_queue_submission_setup_batches(submit);
302
 
 
303
 
   return VK_SUCCESS;
304
 
}
305
 
 
306
 
static void
307
 
vn_queue_submission_cleanup(struct vn_queue_submission *submit)
308
 
{
309
 
   struct vn_queue *queue = vn_queue_from_handle(submit->queue);
310
 
   const VkAllocationCallbacks *alloc = &queue->device->base.base.alloc;
311
 
 
312
 
   vk_free(alloc, submit->temp.storage);
313
 
}
314
 
 
315
 
VkResult
316
 
vn_QueueSubmit(VkQueue _queue,
317
 
               uint32_t submitCount,
318
 
               const VkSubmitInfo *pSubmits,
319
 
               VkFence _fence)
320
 
{
321
 
   VN_TRACE_FUNC();
322
 
   struct vn_queue *queue = vn_queue_from_handle(_queue);
323
 
   struct vn_device *dev = queue->device;
324
 
   struct vn_fence *fence = vn_fence_from_handle(_fence);
325
 
   const bool is_fence_external = fence && fence->is_external;
326
 
 
327
 
   struct vn_queue_submission submit;
328
 
   VkResult result = vn_queue_submission_prepare_submit(
329
 
      &submit, _queue, submitCount, pSubmits, _fence);
330
 
   if (result != VK_SUCCESS)
331
 
      return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
332
 
 
333
 
   const struct vn_device_memory *wsi_mem = NULL;
334
 
   if (submit.batch_count == 1) {
335
 
      const struct wsi_memory_signal_submit_info *info = vk_find_struct_const(
336
 
         submit.submit_batches[0].pNext, WSI_MEMORY_SIGNAL_SUBMIT_INFO_MESA);
337
 
      if (info) {
338
 
         wsi_mem = vn_device_memory_from_handle(info->memory);
339
 
         assert(!wsi_mem->base_memory && wsi_mem->base_bo);
340
 
      }
341
 
   }
342
 
 
343
 
   /* TODO defer roundtrip for external fence until the next sync operation */
344
 
   if (!wsi_mem && !is_fence_external) {
345
 
      vn_async_vkQueueSubmit(dev->instance, submit.queue, submit.batch_count,
346
 
                             submit.submit_batches, submit.fence);
347
 
      vn_queue_submission_cleanup(&submit);
348
 
      return VK_SUCCESS;
349
 
   }
350
 
 
351
 
   result =
352
 
      vn_call_vkQueueSubmit(dev->instance, submit.queue, submit.batch_count,
353
 
                            submit.submit_batches, submit.fence);
354
 
   if (result != VK_SUCCESS) {
355
 
      vn_queue_submission_cleanup(&submit);
356
 
      return vn_error(dev->instance, result);
357
 
   }
358
 
 
359
 
   if (wsi_mem) {
360
 
      /* XXX this is always false and kills the performance */
361
 
      if (dev->instance->renderer->info.has_implicit_fencing) {
362
 
         vn_renderer_submit(dev->renderer, &(const struct vn_renderer_submit){
363
 
                                              .bos = &wsi_mem->base_bo,
364
 
                                              .bo_count = 1,
365
 
                                           });
366
 
      } else {
367
 
         if (VN_DEBUG(WSI)) {
368
 
            static uint32_t ratelimit;
369
 
            if (ratelimit < 10) {
370
 
               vn_log(dev->instance,
371
 
                      "forcing vkQueueWaitIdle before presenting");
372
 
               ratelimit++;
373
 
            }
374
 
         }
375
 
 
376
 
         vn_QueueWaitIdle(submit.queue);
377
 
      }
378
 
   }
379
 
 
380
 
   vn_queue_submission_cleanup(&submit);
381
 
 
382
 
   return VK_SUCCESS;
383
 
}
384
 
 
385
 
VkResult
386
 
vn_QueueBindSparse(VkQueue _queue,
387
 
                   uint32_t bindInfoCount,
388
 
                   const VkBindSparseInfo *pBindInfo,
389
 
                   VkFence fence)
390
 
{
391
 
   VN_TRACE_FUNC();
392
 
   struct vn_queue *queue = vn_queue_from_handle(_queue);
393
 
   struct vn_device *dev = queue->device;
394
 
 
395
 
   struct vn_queue_submission submit;
396
 
   VkResult result = vn_queue_submission_prepare_bind_sparse(
397
 
      &submit, _queue, bindInfoCount, pBindInfo, fence);
398
 
   if (result != VK_SUCCESS)
399
 
      return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
400
 
 
401
 
   result = vn_call_vkQueueBindSparse(
402
 
      dev->instance, submit.queue, submit.batch_count,
403
 
      submit.bind_sparse_batches, submit.fence);
404
 
   if (result != VK_SUCCESS) {
405
 
      vn_queue_submission_cleanup(&submit);
406
 
      return vn_error(dev->instance, result);
407
 
   }
408
 
 
409
 
   vn_queue_submission_cleanup(&submit);
410
 
 
411
 
   return VK_SUCCESS;
412
 
}
413
 
 
414
 
VkResult
415
 
vn_QueueWaitIdle(VkQueue _queue)
416
 
{
417
 
   VN_TRACE_FUNC();
418
 
   struct vn_queue *queue = vn_queue_from_handle(_queue);
419
 
   VkDevice device = vn_device_to_handle(queue->device);
420
 
 
421
 
   VkResult result = vn_QueueSubmit(_queue, 0, NULL, queue->wait_fence);
422
 
   if (result != VK_SUCCESS)
423
 
      return result;
424
 
 
425
 
   result = vn_WaitForFences(device, 1, &queue->wait_fence, true, UINT64_MAX);
426
 
   vn_ResetFences(device, 1, &queue->wait_fence);
427
 
 
428
 
   return vn_result(queue->device->instance, result);
429
 
}
430
 
 
431
 
/* fence commands */
432
 
 
433
 
static void
434
 
vn_sync_payload_release(struct vn_device *dev,
435
 
                        struct vn_sync_payload *payload)
436
 
{
437
 
   payload->type = VN_SYNC_TYPE_INVALID;
438
 
}
439
 
 
440
 
static VkResult
441
 
vn_fence_init_payloads(struct vn_device *dev,
442
 
                       struct vn_fence *fence,
443
 
                       bool signaled,
444
 
                       const VkAllocationCallbacks *alloc)
445
 
{
446
 
   fence->permanent.type = VN_SYNC_TYPE_DEVICE_ONLY;
447
 
   fence->temporary.type = VN_SYNC_TYPE_INVALID;
448
 
   fence->payload = &fence->permanent;
449
 
 
450
 
   return VK_SUCCESS;
451
 
}
452
 
 
453
 
void
454
 
vn_fence_signal_wsi(struct vn_device *dev, struct vn_fence *fence)
455
 
{
456
 
   struct vn_sync_payload *temp = &fence->temporary;
457
 
 
458
 
   vn_sync_payload_release(dev, temp);
459
 
   temp->type = VN_SYNC_TYPE_WSI_SIGNALED;
460
 
   fence->payload = temp;
461
 
}
462
 
 
463
 
VkResult
464
 
vn_CreateFence(VkDevice device,
465
 
               const VkFenceCreateInfo *pCreateInfo,
466
 
               const VkAllocationCallbacks *pAllocator,
467
 
               VkFence *pFence)
468
 
{
469
 
   struct vn_device *dev = vn_device_from_handle(device);
470
 
   const VkAllocationCallbacks *alloc =
471
 
      pAllocator ? pAllocator : &dev->base.base.alloc;
472
 
 
473
 
   struct vn_fence *fence = vk_zalloc(alloc, sizeof(*fence), VN_DEFAULT_ALIGN,
474
 
                                      VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
475
 
   if (!fence)
476
 
      return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
477
 
 
478
 
   vn_object_base_init(&fence->base, VK_OBJECT_TYPE_FENCE, &dev->base);
479
 
 
480
 
   const struct VkExportFenceCreateInfo *export_info =
481
 
      vk_find_struct_const(pCreateInfo->pNext, EXPORT_FENCE_CREATE_INFO);
482
 
   VkFenceCreateInfo local_create_info;
483
 
   if (export_info) {
484
 
      local_create_info = *pCreateInfo;
485
 
      local_create_info.pNext = NULL;
486
 
      pCreateInfo = &local_create_info;
487
 
 
488
 
      fence->is_external = !!export_info->handleTypes;
489
 
   }
490
 
 
491
 
   VkResult result = vn_fence_init_payloads(
492
 
      dev, fence, pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT, alloc);
493
 
   if (result != VK_SUCCESS) {
494
 
      vn_object_base_fini(&fence->base);
495
 
      vk_free(alloc, fence);
496
 
      return vn_error(dev->instance, result);
497
 
   }
498
 
 
499
 
   VkFence fence_handle = vn_fence_to_handle(fence);
500
 
   vn_async_vkCreateFence(dev->instance, device, pCreateInfo, NULL,
501
 
                          &fence_handle);
502
 
 
503
 
   *pFence = fence_handle;
504
 
 
505
 
   return VK_SUCCESS;
506
 
}
507
 
 
508
 
void
509
 
vn_DestroyFence(VkDevice device,
510
 
                VkFence _fence,
511
 
                const VkAllocationCallbacks *pAllocator)
512
 
{
513
 
   struct vn_device *dev = vn_device_from_handle(device);
514
 
   struct vn_fence *fence = vn_fence_from_handle(_fence);
515
 
   const VkAllocationCallbacks *alloc =
516
 
      pAllocator ? pAllocator : &dev->base.base.alloc;
517
 
 
518
 
   if (!fence)
519
 
      return;
520
 
 
521
 
   vn_async_vkDestroyFence(dev->instance, device, _fence, NULL);
522
 
 
523
 
   vn_sync_payload_release(dev, &fence->permanent);
524
 
   vn_sync_payload_release(dev, &fence->temporary);
525
 
 
526
 
   vn_object_base_fini(&fence->base);
527
 
   vk_free(alloc, fence);
528
 
}
529
 
 
530
 
VkResult
531
 
vn_ResetFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences)
532
 
{
533
 
   struct vn_device *dev = vn_device_from_handle(device);
534
 
 
535
 
   /* TODO if the fence is shared-by-ref, this needs to be synchronous */
536
 
   if (false)
537
 
      vn_call_vkResetFences(dev->instance, device, fenceCount, pFences);
538
 
   else
539
 
      vn_async_vkResetFences(dev->instance, device, fenceCount, pFences);
540
 
 
541
 
   for (uint32_t i = 0; i < fenceCount; i++) {
542
 
      struct vn_fence *fence = vn_fence_from_handle(pFences[i]);
543
 
      struct vn_sync_payload *perm = &fence->permanent;
544
 
 
545
 
      vn_sync_payload_release(dev, &fence->temporary);
546
 
 
547
 
      assert(perm->type == VN_SYNC_TYPE_DEVICE_ONLY);
548
 
      fence->payload = perm;
549
 
   }
550
 
 
551
 
   return VK_SUCCESS;
552
 
}
553
 
 
554
 
VkResult
555
 
vn_GetFenceStatus(VkDevice device, VkFence _fence)
556
 
{
557
 
   struct vn_device *dev = vn_device_from_handle(device);
558
 
   struct vn_fence *fence = vn_fence_from_handle(_fence);
559
 
   struct vn_sync_payload *payload = fence->payload;
560
 
 
561
 
   VkResult result;
562
 
   switch (payload->type) {
563
 
   case VN_SYNC_TYPE_DEVICE_ONLY:
564
 
      result = vn_call_vkGetFenceStatus(dev->instance, device, _fence);
565
 
      break;
566
 
   case VN_SYNC_TYPE_WSI_SIGNALED:
567
 
      result = VK_SUCCESS;
568
 
      break;
569
 
   default:
570
 
      unreachable("unexpected fence payload type");
571
 
      break;
572
 
   }
573
 
 
574
 
   return vn_result(dev->instance, result);
575
 
}
576
 
 
577
 
static VkResult
578
 
vn_find_first_signaled_fence(VkDevice device,
579
 
                             const VkFence *fences,
580
 
                             uint32_t count)
581
 
{
582
 
   for (uint32_t i = 0; i < count; i++) {
583
 
      VkResult result = vn_GetFenceStatus(device, fences[i]);
584
 
      if (result == VK_SUCCESS || result < 0)
585
 
         return result;
586
 
   }
587
 
   return VK_NOT_READY;
588
 
}
589
 
 
590
 
static VkResult
591
 
vn_remove_signaled_fences(VkDevice device, VkFence *fences, uint32_t *count)
592
 
{
593
 
   uint32_t cur = 0;
594
 
   for (uint32_t i = 0; i < *count; i++) {
595
 
      VkResult result = vn_GetFenceStatus(device, fences[i]);
596
 
      if (result != VK_SUCCESS) {
597
 
         if (result < 0)
598
 
            return result;
599
 
         fences[cur++] = fences[i];
600
 
      }
601
 
   }
602
 
 
603
 
   *count = cur;
604
 
   return cur ? VK_NOT_READY : VK_SUCCESS;
605
 
}
606
 
 
607
 
static VkResult
608
 
vn_update_sync_result(VkResult result, int64_t abs_timeout, uint32_t *iter)
609
 
{
610
 
   switch (result) {
611
 
   case VK_NOT_READY:
612
 
      if (abs_timeout != OS_TIMEOUT_INFINITE &&
613
 
          os_time_get_nano() >= abs_timeout)
614
 
         result = VK_TIMEOUT;
615
 
      else
616
 
         vn_relax(iter, "client");
617
 
      break;
618
 
   default:
619
 
      assert(result == VK_SUCCESS || result < 0);
620
 
      break;
621
 
   }
622
 
 
623
 
   return result;
624
 
}
625
 
 
626
 
VkResult
627
 
vn_WaitForFences(VkDevice device,
628
 
                 uint32_t fenceCount,
629
 
                 const VkFence *pFences,
630
 
                 VkBool32 waitAll,
631
 
                 uint64_t timeout)
632
 
{
633
 
   VN_TRACE_FUNC();
634
 
   struct vn_device *dev = vn_device_from_handle(device);
635
 
   const VkAllocationCallbacks *alloc = &dev->base.base.alloc;
636
 
 
637
 
   const int64_t abs_timeout = os_time_get_absolute_timeout(timeout);
638
 
   VkResult result = VK_NOT_READY;
639
 
   uint32_t iter = 0;
640
 
   if (fenceCount > 1 && waitAll) {
641
 
      VkFence local_fences[8];
642
 
      VkFence *fences = local_fences;
643
 
      if (fenceCount > ARRAY_SIZE(local_fences)) {
644
 
         fences =
645
 
            vk_alloc(alloc, sizeof(*fences) * fenceCount, VN_DEFAULT_ALIGN,
646
 
                     VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
647
 
         if (!fences)
648
 
            return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
649
 
      }
650
 
      memcpy(fences, pFences, sizeof(*fences) * fenceCount);
651
 
 
652
 
      while (result == VK_NOT_READY) {
653
 
         result = vn_remove_signaled_fences(device, fences, &fenceCount);
654
 
         result = vn_update_sync_result(result, abs_timeout, &iter);
655
 
      }
656
 
 
657
 
      if (fences != local_fences)
658
 
         vk_free(alloc, fences);
659
 
   } else {
660
 
      while (result == VK_NOT_READY) {
661
 
         result = vn_find_first_signaled_fence(device, pFences, fenceCount);
662
 
         result = vn_update_sync_result(result, abs_timeout, &iter);
663
 
      }
664
 
   }
665
 
 
666
 
   return vn_result(dev->instance, result);
667
 
}
668
 
 
669
 
static VkResult
670
 
vn_create_sync_file(struct vn_device *dev, int *out_fd)
671
 
{
672
 
   struct vn_renderer_sync *sync;
673
 
   VkResult result = vn_renderer_sync_create(dev->renderer, 0,
674
 
                                             VN_RENDERER_SYNC_BINARY, &sync);
675
 
   if (result != VK_SUCCESS)
676
 
      return vn_error(dev->instance, result);
677
 
 
678
 
   const struct vn_renderer_submit submit = {
679
 
      .batches =
680
 
         &(const struct vn_renderer_submit_batch){
681
 
            .syncs = &sync,
682
 
            .sync_values = &(const uint64_t){ 1 },
683
 
            .sync_count = 1,
684
 
         },
685
 
      .batch_count = 1,
686
 
   };
687
 
   result = vn_renderer_submit(dev->renderer, &submit);
688
 
   if (result != VK_SUCCESS) {
689
 
      vn_renderer_sync_destroy(dev->renderer, sync);
690
 
      return vn_error(dev->instance, result);
691
 
   }
692
 
 
693
 
   *out_fd = vn_renderer_sync_export_syncobj(dev->renderer, sync, true);
694
 
   vn_renderer_sync_destroy(dev->renderer, sync);
695
 
 
696
 
   return *out_fd >= 0 ? VK_SUCCESS : VK_ERROR_TOO_MANY_OBJECTS;
697
 
}
698
 
 
699
 
VkResult
700
 
vn_ImportFenceFdKHR(VkDevice device,
701
 
                    const VkImportFenceFdInfoKHR *pImportFenceFdInfo)
702
 
{
703
 
   struct vn_device *dev = vn_device_from_handle(device);
704
 
   struct vn_fence *fence = vn_fence_from_handle(pImportFenceFdInfo->fence);
705
 
   ASSERTED const bool sync_file = pImportFenceFdInfo->handleType ==
706
 
                                   VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT;
707
 
   const int fd = pImportFenceFdInfo->fd;
708
 
 
709
 
   /* TODO update fence->is_external after we support opaque fd import */
710
 
   assert(dev->instance->experimental.globalFencing);
711
 
   assert(sync_file);
712
 
   if (fd >= 0) {
713
 
      if (sync_wait(fd, -1))
714
 
         return vn_error(dev->instance, VK_ERROR_INVALID_EXTERNAL_HANDLE);
715
 
 
716
 
      close(fd);
717
 
   }
718
 
 
719
 
   /* abuse VN_SYNC_TYPE_WSI_SIGNALED */
720
 
   vn_fence_signal_wsi(dev, fence);
721
 
 
722
 
   return VK_SUCCESS;
723
 
}
724
 
 
725
 
VkResult
726
 
vn_GetFenceFdKHR(VkDevice device,
727
 
                 const VkFenceGetFdInfoKHR *pGetFdInfo,
728
 
                 int *pFd)
729
 
{
730
 
   struct vn_device *dev = vn_device_from_handle(device);
731
 
   struct vn_fence *fence = vn_fence_from_handle(pGetFdInfo->fence);
732
 
   const bool sync_file =
733
 
      pGetFdInfo->handleType == VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT;
734
 
   struct vn_sync_payload *payload = fence->payload;
735
 
 
736
 
   assert(dev->instance->experimental.globalFencing);
737
 
   assert(sync_file);
738
 
   int fd = -1;
739
 
   if (payload->type == VN_SYNC_TYPE_DEVICE_ONLY) {
740
 
      VkResult result = vn_create_sync_file(dev, &fd);
741
 
      if (result != VK_SUCCESS)
742
 
         return vn_error(dev->instance, result);
743
 
   }
744
 
 
745
 
   if (sync_file) {
746
 
      vn_sync_payload_release(dev, &fence->temporary);
747
 
      fence->payload = &fence->permanent;
748
 
 
749
 
      /* XXX implies reset operation on the host fence */
750
 
   }
751
 
 
752
 
   *pFd = fd;
753
 
   return VK_SUCCESS;
754
 
}
755
 
 
756
 
/* semaphore commands */
757
 
 
758
 
static VkResult
759
 
vn_semaphore_init_payloads(struct vn_device *dev,
760
 
                           struct vn_semaphore *sem,
761
 
                           uint64_t initial_val,
762
 
                           const VkAllocationCallbacks *alloc)
763
 
{
764
 
   sem->permanent.type = VN_SYNC_TYPE_DEVICE_ONLY;
765
 
   sem->temporary.type = VN_SYNC_TYPE_INVALID;
766
 
   sem->payload = &sem->permanent;
767
 
 
768
 
   return VK_SUCCESS;
769
 
}
770
 
 
771
 
static void
772
 
vn_semaphore_reset_wsi(struct vn_device *dev, struct vn_semaphore *sem)
773
 
{
774
 
   struct vn_sync_payload *perm = &sem->permanent;
775
 
 
776
 
   vn_sync_payload_release(dev, &sem->temporary);
777
 
 
778
 
   sem->payload = perm;
779
 
}
780
 
 
781
 
void
782
 
vn_semaphore_signal_wsi(struct vn_device *dev, struct vn_semaphore *sem)
783
 
{
784
 
   struct vn_sync_payload *temp = &sem->temporary;
785
 
 
786
 
   vn_sync_payload_release(dev, temp);
787
 
   temp->type = VN_SYNC_TYPE_WSI_SIGNALED;
788
 
   sem->payload = temp;
789
 
}
790
 
 
791
 
VkResult
792
 
vn_CreateSemaphore(VkDevice device,
793
 
                   const VkSemaphoreCreateInfo *pCreateInfo,
794
 
                   const VkAllocationCallbacks *pAllocator,
795
 
                   VkSemaphore *pSemaphore)
796
 
{
797
 
   struct vn_device *dev = vn_device_from_handle(device);
798
 
   const VkAllocationCallbacks *alloc =
799
 
      pAllocator ? pAllocator : &dev->base.base.alloc;
800
 
 
801
 
   struct vn_semaphore *sem = vk_zalloc(alloc, sizeof(*sem), VN_DEFAULT_ALIGN,
802
 
                                        VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
803
 
   if (!sem)
804
 
      return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
805
 
 
806
 
   vn_object_base_init(&sem->base, VK_OBJECT_TYPE_SEMAPHORE, &dev->base);
807
 
 
808
 
   const VkSemaphoreTypeCreateInfo *type_info =
809
 
      vk_find_struct_const(pCreateInfo->pNext, SEMAPHORE_TYPE_CREATE_INFO);
810
 
   uint64_t initial_val = 0;
811
 
   if (type_info && type_info->semaphoreType == VK_SEMAPHORE_TYPE_TIMELINE) {
812
 
      sem->type = VK_SEMAPHORE_TYPE_TIMELINE;
813
 
      initial_val = type_info->initialValue;
814
 
   } else {
815
 
      sem->type = VK_SEMAPHORE_TYPE_BINARY;
816
 
   }
817
 
 
818
 
   VkResult result = vn_semaphore_init_payloads(dev, sem, initial_val, alloc);
819
 
   if (result != VK_SUCCESS) {
820
 
      vn_object_base_fini(&sem->base);
821
 
      vk_free(alloc, sem);
822
 
      return vn_error(dev->instance, result);
823
 
   }
824
 
 
825
 
   VkSemaphore sem_handle = vn_semaphore_to_handle(sem);
826
 
   vn_async_vkCreateSemaphore(dev->instance, device, pCreateInfo, NULL,
827
 
                              &sem_handle);
828
 
 
829
 
   *pSemaphore = sem_handle;
830
 
 
831
 
   return VK_SUCCESS;
832
 
}
833
 
 
834
 
void
835
 
vn_DestroySemaphore(VkDevice device,
836
 
                    VkSemaphore semaphore,
837
 
                    const VkAllocationCallbacks *pAllocator)
838
 
{
839
 
   struct vn_device *dev = vn_device_from_handle(device);
840
 
   struct vn_semaphore *sem = vn_semaphore_from_handle(semaphore);
841
 
   const VkAllocationCallbacks *alloc =
842
 
      pAllocator ? pAllocator : &dev->base.base.alloc;
843
 
 
844
 
   if (!sem)
845
 
      return;
846
 
 
847
 
   vn_async_vkDestroySemaphore(dev->instance, device, semaphore, NULL);
848
 
 
849
 
   vn_sync_payload_release(dev, &sem->permanent);
850
 
   vn_sync_payload_release(dev, &sem->temporary);
851
 
 
852
 
   vn_object_base_fini(&sem->base);
853
 
   vk_free(alloc, sem);
854
 
}
855
 
 
856
 
VkResult
857
 
vn_GetSemaphoreCounterValue(VkDevice device,
858
 
                            VkSemaphore semaphore,
859
 
                            uint64_t *pValue)
860
 
{
861
 
   struct vn_device *dev = vn_device_from_handle(device);
862
 
   struct vn_semaphore *sem = vn_semaphore_from_handle(semaphore);
863
 
   ASSERTED struct vn_sync_payload *payload = sem->payload;
864
 
 
865
 
   assert(payload->type == VN_SYNC_TYPE_DEVICE_ONLY);
866
 
   return vn_call_vkGetSemaphoreCounterValue(dev->instance, device, semaphore,
867
 
                                             pValue);
868
 
}
869
 
 
870
 
VkResult
871
 
vn_SignalSemaphore(VkDevice device, const VkSemaphoreSignalInfo *pSignalInfo)
872
 
{
873
 
   struct vn_device *dev = vn_device_from_handle(device);
874
 
 
875
 
   /* TODO if the semaphore is shared-by-ref, this needs to be synchronous */
876
 
   if (false)
877
 
      vn_call_vkSignalSemaphore(dev->instance, device, pSignalInfo);
878
 
   else
879
 
      vn_async_vkSignalSemaphore(dev->instance, device, pSignalInfo);
880
 
 
881
 
   return VK_SUCCESS;
882
 
}
883
 
 
884
 
static VkResult
885
 
vn_find_first_signaled_semaphore(VkDevice device,
886
 
                                 const VkSemaphore *semaphores,
887
 
                                 const uint64_t *values,
888
 
                                 uint32_t count)
889
 
{
890
 
   for (uint32_t i = 0; i < count; i++) {
891
 
      uint64_t val = 0;
892
 
      VkResult result =
893
 
         vn_GetSemaphoreCounterValue(device, semaphores[i], &val);
894
 
      if (result != VK_SUCCESS || val >= values[i])
895
 
         return result;
896
 
   }
897
 
   return VK_NOT_READY;
898
 
}
899
 
 
900
 
static VkResult
901
 
vn_remove_signaled_semaphores(VkDevice device,
902
 
                              VkSemaphore *semaphores,
903
 
                              uint64_t *values,
904
 
                              uint32_t *count)
905
 
{
906
 
   uint32_t cur = 0;
907
 
   for (uint32_t i = 0; i < *count; i++) {
908
 
      uint64_t val = 0;
909
 
      VkResult result =
910
 
         vn_GetSemaphoreCounterValue(device, semaphores[i], &val);
911
 
      if (result != VK_SUCCESS)
912
 
         return result;
913
 
      if (val < values[i])
914
 
         semaphores[cur++] = semaphores[i];
915
 
   }
916
 
 
917
 
   *count = cur;
918
 
   return cur ? VK_NOT_READY : VK_SUCCESS;
919
 
}
920
 
 
921
 
VkResult
922
 
vn_WaitSemaphores(VkDevice device,
923
 
                  const VkSemaphoreWaitInfo *pWaitInfo,
924
 
                  uint64_t timeout)
925
 
{
926
 
   VN_TRACE_FUNC();
927
 
   struct vn_device *dev = vn_device_from_handle(device);
928
 
   const VkAllocationCallbacks *alloc = &dev->base.base.alloc;
929
 
 
930
 
   const int64_t abs_timeout = os_time_get_absolute_timeout(timeout);
931
 
   VkResult result = VK_NOT_READY;
932
 
   uint32_t iter = 0;
933
 
   if (pWaitInfo->semaphoreCount > 1 &&
934
 
       !(pWaitInfo->flags & VK_SEMAPHORE_WAIT_ANY_BIT)) {
935
 
      uint32_t semaphore_count = pWaitInfo->semaphoreCount;
936
 
      VkSemaphore local_semaphores[8];
937
 
      uint64_t local_values[8];
938
 
      VkSemaphore *semaphores = local_semaphores;
939
 
      uint64_t *values = local_values;
940
 
      if (semaphore_count > ARRAY_SIZE(local_semaphores)) {
941
 
         semaphores = vk_alloc(
942
 
            alloc, (sizeof(*semaphores) + sizeof(*values)) * semaphore_count,
943
 
            VN_DEFAULT_ALIGN, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
944
 
         if (!semaphores)
945
 
            return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
946
 
 
947
 
         values = (uint64_t *)&semaphores[semaphore_count];
948
 
      }
949
 
      memcpy(semaphores, pWaitInfo->pSemaphores,
950
 
             sizeof(*semaphores) * semaphore_count);
951
 
      memcpy(values, pWaitInfo->pValues, sizeof(*values) * semaphore_count);
952
 
 
953
 
      while (result == VK_NOT_READY) {
954
 
         result = vn_remove_signaled_semaphores(device, semaphores, values,
955
 
                                                &semaphore_count);
956
 
         result = vn_update_sync_result(result, abs_timeout, &iter);
957
 
      }
958
 
 
959
 
      if (semaphores != local_semaphores)
960
 
         vk_free(alloc, semaphores);
961
 
   } else {
962
 
      while (result == VK_NOT_READY) {
963
 
         result = vn_find_first_signaled_semaphore(
964
 
            device, pWaitInfo->pSemaphores, pWaitInfo->pValues,
965
 
            pWaitInfo->semaphoreCount);
966
 
         result = vn_update_sync_result(result, abs_timeout, &iter);
967
 
      }
968
 
   }
969
 
 
970
 
   return vn_result(dev->instance, result);
971
 
}
972
 
 
973
 
VkResult
974
 
vn_ImportSemaphoreFdKHR(
975
 
   VkDevice device, const VkImportSemaphoreFdInfoKHR *pImportSemaphoreFdInfo)
976
 
{
977
 
   struct vn_device *dev = vn_device_from_handle(device);
978
 
   struct vn_semaphore *sem =
979
 
      vn_semaphore_from_handle(pImportSemaphoreFdInfo->semaphore);
980
 
   ASSERTED const bool sync_file =
981
 
      pImportSemaphoreFdInfo->handleType ==
982
 
      VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
983
 
   const int fd = pImportSemaphoreFdInfo->fd;
984
 
 
985
 
   assert(dev->instance->experimental.globalFencing);
986
 
   assert(sync_file);
987
 
   if (fd >= 0) {
988
 
      if (sync_wait(fd, -1))
989
 
         return vn_error(dev->instance, VK_ERROR_INVALID_EXTERNAL_HANDLE);
990
 
 
991
 
      close(fd);
992
 
   }
993
 
 
994
 
   /* abuse VN_SYNC_TYPE_WSI_SIGNALED */
995
 
   vn_semaphore_signal_wsi(dev, sem);
996
 
 
997
 
   return VK_SUCCESS;
998
 
}
999
 
 
1000
 
VkResult
1001
 
vn_GetSemaphoreFdKHR(VkDevice device,
1002
 
                     const VkSemaphoreGetFdInfoKHR *pGetFdInfo,
1003
 
                     int *pFd)
1004
 
{
1005
 
   struct vn_device *dev = vn_device_from_handle(device);
1006
 
   struct vn_semaphore *sem = vn_semaphore_from_handle(pGetFdInfo->semaphore);
1007
 
   const bool sync_file =
1008
 
      pGetFdInfo->handleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
1009
 
   struct vn_sync_payload *payload = sem->payload;
1010
 
 
1011
 
   assert(dev->instance->experimental.globalFencing);
1012
 
   assert(sync_file);
1013
 
   int fd = -1;
1014
 
   if (payload->type == VN_SYNC_TYPE_DEVICE_ONLY) {
1015
 
      VkResult result = vn_create_sync_file(dev, &fd);
1016
 
      if (result != VK_SUCCESS)
1017
 
         return vn_error(dev->instance, result);
1018
 
   }
1019
 
 
1020
 
   if (sync_file) {
1021
 
      vn_sync_payload_release(dev, &sem->temporary);
1022
 
      sem->payload = &sem->permanent;
1023
 
 
1024
 
      /* XXX implies wait operation on the host semaphore */
1025
 
   }
1026
 
 
1027
 
   *pFd = fd;
1028
 
   return VK_SUCCESS;
1029
 
}
1030
 
 
1031
 
/* event commands */
1032
 
 
1033
 
VkResult
1034
 
vn_CreateEvent(VkDevice device,
1035
 
               const VkEventCreateInfo *pCreateInfo,
1036
 
               const VkAllocationCallbacks *pAllocator,
1037
 
               VkEvent *pEvent)
1038
 
{
1039
 
   struct vn_device *dev = vn_device_from_handle(device);
1040
 
   const VkAllocationCallbacks *alloc =
1041
 
      pAllocator ? pAllocator : &dev->base.base.alloc;
1042
 
 
1043
 
   struct vn_event *ev = vk_zalloc(alloc, sizeof(*ev), VN_DEFAULT_ALIGN,
1044
 
                                   VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1045
 
   if (!ev)
1046
 
      return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1047
 
 
1048
 
   vn_object_base_init(&ev->base, VK_OBJECT_TYPE_EVENT, &dev->base);
1049
 
 
1050
 
   VkEvent ev_handle = vn_event_to_handle(ev);
1051
 
   vn_async_vkCreateEvent(dev->instance, device, pCreateInfo, NULL,
1052
 
                          &ev_handle);
1053
 
 
1054
 
   *pEvent = ev_handle;
1055
 
 
1056
 
   return VK_SUCCESS;
1057
 
}
1058
 
 
1059
 
void
1060
 
vn_DestroyEvent(VkDevice device,
1061
 
                VkEvent event,
1062
 
                const VkAllocationCallbacks *pAllocator)
1063
 
{
1064
 
   struct vn_device *dev = vn_device_from_handle(device);
1065
 
   struct vn_event *ev = vn_event_from_handle(event);
1066
 
   const VkAllocationCallbacks *alloc =
1067
 
      pAllocator ? pAllocator : &dev->base.base.alloc;
1068
 
 
1069
 
   if (!ev)
1070
 
      return;
1071
 
 
1072
 
   vn_async_vkDestroyEvent(dev->instance, device, event, NULL);
1073
 
 
1074
 
   vn_object_base_fini(&ev->base);
1075
 
   vk_free(alloc, ev);
1076
 
}
1077
 
 
1078
 
VkResult
1079
 
vn_GetEventStatus(VkDevice device, VkEvent event)
1080
 
{
1081
 
   struct vn_device *dev = vn_device_from_handle(device);
1082
 
 
1083
 
   /* TODO When the renderer supports it (requires a new vk extension), there
1084
 
    * should be a coherent memory backing the event.
1085
 
    */
1086
 
   VkResult result = vn_call_vkGetEventStatus(dev->instance, device, event);
1087
 
 
1088
 
   return vn_result(dev->instance, result);
1089
 
}
1090
 
 
1091
 
VkResult
1092
 
vn_SetEvent(VkDevice device, VkEvent event)
1093
 
{
1094
 
   struct vn_device *dev = vn_device_from_handle(device);
1095
 
 
1096
 
   VkResult result = vn_call_vkSetEvent(dev->instance, device, event);
1097
 
 
1098
 
   return vn_result(dev->instance, result);
1099
 
}
1100
 
 
1101
 
VkResult
1102
 
vn_ResetEvent(VkDevice device, VkEvent event)
1103
 
{
1104
 
   struct vn_device *dev = vn_device_from_handle(device);
1105
 
 
1106
 
   VkResult result = vn_call_vkResetEvent(dev->instance, device, event);
1107
 
 
1108
 
   return vn_result(dev->instance, result);
1109
 
}