1
#define __STDC_LIMIT_MACROS
8
#include "base/basictypes.h"
9
#include "VulkanContext.h"
17
#pragma warning(disable:4996)
20
#include "ext/glslang/SPIRV/GlslangToSpv.h"
30
static const char *validationLayers[] = {
31
"VK_LAYER_LUNARG_standard_validation",
33
"VK_LAYER_GOOGLE_threading",
34
"VK_LAYER_LUNARG_draw_state",
35
"VK_LAYER_LUNARG_image",
36
"VK_LAYER_LUNARG_mem_tracker",
37
"VK_LAYER_LUNARG_object_tracker",
38
"VK_LAYER_LUNARG_param_checker",
42
static VkBool32 CheckLayers(const std::vector<layer_properties> &layer_props, const std::vector<const char *> &layer_names);
44
VulkanContext::VulkanContext(const char *app_name, int app_ver, uint32_t flags)
46
gfx_queue_(VK_NULL_HANDLE),
50
#elif defined(ANDROID)
51
native_window(nullptr),
53
graphics_queue_family_index_(-1),
54
surface_(VK_NULL_HANDLE),
55
instance_(VK_NULL_HANDLE),
59
swapchain_format(VK_FORMAT_UNDEFINED),
60
swapchainImageCount(0),
61
swap_chain_(VK_NULL_HANDLE),
62
cmd_pool_(VK_NULL_HANDLE),
66
init_error_ = "Failed to load Vulkan driver library";
70
// List extensions to try to enable.
71
instance_extension_names.push_back(VK_KHR_SURFACE_EXTENSION_NAME);
73
instance_extension_names.push_back(VK_KHR_WIN32_SURFACE_EXTENSION_NAME);
74
#elif defined(ANDROID)
75
instance_extension_names.push_back(VK_KHR_ANDROID_SURFACE_EXTENSION_NAME);
77
device_extension_names.push_back(VK_KHR_SWAPCHAIN_EXTENSION_NAME);
79
if (flags & VULKAN_FLAG_VALIDATE) {
80
for (size_t i = 0; i < ARRAY_SIZE(validationLayers); i++) {
81
instance_layer_names.push_back(validationLayers[i]);
82
device_layer_names.push_back(validationLayers[i]);
84
instance_extension_names.push_back(VK_EXT_DEBUG_REPORT_EXTENSION_NAME);
87
VkApplicationInfo app_info = { VK_STRUCTURE_TYPE_APPLICATION_INFO };
88
app_info.pApplicationName = app_name;
89
app_info.applicationVersion = app_ver;
90
app_info.pEngineName = app_name;
91
// Let's increment this when we make major engine/context changes.
92
app_info.engineVersion = 1;
93
app_info.apiVersion = VK_API_VERSION_1_0;
95
VkInstanceCreateInfo inst_info = { VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO };
97
inst_info.pApplicationInfo = &app_info;
98
inst_info.enabledLayerCount = (uint32_t)instance_layer_names.size();
99
inst_info.ppEnabledLayerNames = instance_layer_names.size() ? instance_layer_names.data() : NULL;
100
inst_info.enabledExtensionCount = (uint32_t)instance_extension_names.size();
101
inst_info.ppEnabledExtensionNames = instance_extension_names.size() ? instance_extension_names.data() : NULL;
103
VkResult res = vkCreateInstance(&inst_info, NULL, &instance_);
104
if (res != VK_SUCCESS) {
105
if (res == VK_ERROR_LAYER_NOT_PRESENT) {
106
WLOG("Validation on but layers not available - dropping layers");
107
// Drop the validation layers and try again.
108
instance_layer_names.clear();
109
device_layer_names.clear();
110
inst_info.enabledLayerCount = 0;
111
inst_info.ppEnabledLayerNames = NULL;
112
res = vkCreateInstance(&inst_info, NULL, &instance_);
113
if (res != VK_SUCCESS)
114
ELOG("Failed to create instance even without validation: %d", res);
116
ELOG("Failed to create instance : %d", res);
119
if (res != VK_SUCCESS) {
120
init_error_ = "Failed to create Vulkan instance";
124
VulkanLoadInstanceFunctions(instance_);
126
uint32_t gpu_count = 1;
127
res = vkEnumeratePhysicalDevices(instance_, &gpu_count, NULL);
129
physical_devices_.resize(gpu_count);
130
res = vkEnumeratePhysicalDevices(instance_, &gpu_count, physical_devices_.data());
131
if (res != VK_SUCCESS) {
132
init_error_ = "Failed to enumerate physical devices";
136
InitGlobalLayerProperties();
137
InitGlobalExtensionProperties();
139
if (!CheckLayers(instance_layer_properties, instance_layer_names)) {
140
ELOG("CheckLayers failed");
141
init_error_ = "Failed to validate instance layers";
145
InitDeviceLayerProperties();
146
if (!CheckLayers(device_layer_properties, device_layer_names)) {
147
ELOG("CheckLayers failed (2)");
148
init_error_ = "Failed to validate device layers";
153
VulkanContext::~VulkanContext() {
154
vkDestroyInstance(instance_, NULL);
158
void TransitionToPresent(VkCommandBuffer cmd, VkImage image) {
159
VkImageMemoryBarrier prePresentBarrier = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER };
160
prePresentBarrier.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
161
prePresentBarrier.dstAccessMask = 0;
162
prePresentBarrier.oldLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
163
prePresentBarrier.newLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
164
prePresentBarrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
165
prePresentBarrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
166
prePresentBarrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
167
prePresentBarrier.subresourceRange.baseMipLevel = 0;
168
prePresentBarrier.subresourceRange.levelCount = 1;
169
prePresentBarrier.subresourceRange.baseArrayLayer = 0;
170
prePresentBarrier.subresourceRange.layerCount = 1;
171
prePresentBarrier.image = image;
172
vkCmdPipelineBarrier(cmd, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
173
0, 0, nullptr, 0, nullptr, 1, &prePresentBarrier);
176
void TransitionFromPresent(VkCommandBuffer cmd, VkImage image) {
177
VkImageMemoryBarrier prePresentBarrier = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER };
178
prePresentBarrier.srcAccessMask = 0;
179
prePresentBarrier.dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
180
prePresentBarrier.oldLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
181
prePresentBarrier.newLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
182
prePresentBarrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
183
prePresentBarrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
184
prePresentBarrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
185
prePresentBarrier.subresourceRange.baseMipLevel = 0;
186
prePresentBarrier.subresourceRange.levelCount = 1;
187
prePresentBarrier.subresourceRange.baseArrayLayer = 0;
188
prePresentBarrier.subresourceRange.layerCount = 1;
189
prePresentBarrier.image = image;
190
vkCmdPipelineBarrier(cmd, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
191
0, 0, nullptr, 0, nullptr, 1, &prePresentBarrier);
194
VkCommandBuffer VulkanContext::GetInitCommandBuffer() {
195
FrameData *frame = &frame_[curFrame_];
196
if (!frame->hasInitCommands) {
197
VulkanBeginCommandBuffer(frame->cmdInit);
198
frame->hasInitCommands = true;
200
return frame_[curFrame_].cmdInit;
203
void VulkanContext::QueueBeforeSurfaceRender(VkCommandBuffer cmd) {
204
cmdQueue_.push_back(cmd);
207
VkCommandBuffer VulkanContext::BeginSurfaceRenderPass(VkClearValue clear_values[2]) {
208
FrameData *frame = &frame_[curFrame_];
210
// Get the index of the next available swapchain image, and a semaphore to block command buffer execution on.
211
// Now, I wonder if we should do this early in the frame or late? Right now we do it early, which should be fine.
212
VkResult res = vkAcquireNextImageKHR(device_, swap_chain_, UINT64_MAX, acquireSemaphore, VK_NULL_HANDLE, ¤t_buffer);
214
// TODO: Deal with the VK_SUBOPTIMAL_KHR and VK_ERROR_OUT_OF_DATE_KHR
216
assert(res == VK_SUCCESS);
218
// Make sure the very last command buffer from the frame before the previous has been fully executed.
219
WaitAndResetFence(frame->fence);
221
// Process pending deletes.
222
frame->deleteList.PerformDeletes(device_);
224
VkCommandBufferBeginInfo begin = { VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO };
226
begin.pInheritanceInfo = nullptr;
227
res = vkBeginCommandBuffer(frame->cmdBuf, &begin);
229
TransitionFromPresent(frame->cmdBuf, swapChainBuffers[current_buffer].image);
231
VkRenderPassBeginInfo rp_begin = { VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO };
232
rp_begin.renderPass = surface_render_pass_;
233
rp_begin.framebuffer = framebuffers_[current_buffer];
234
rp_begin.renderArea.offset.x = 0;
235
rp_begin.renderArea.offset.y = 0;
236
rp_begin.renderArea.extent.width = width_;
237
rp_begin.renderArea.extent.height = height_;
238
rp_begin.clearValueCount = 2;
239
rp_begin.pClearValues = clear_values;
241
// We don't really need to record this at this point in time, but hey, at some point we'll start this
242
// pass anyway so might as well do it now (although you can imagine getting away with just a stretchblt and not
243
// even starting a final render pass if there's nothing to overlay... hm. Uncommon though on mobile).
244
vkCmdBeginRenderPass(frame->cmdBuf, &rp_begin, VK_SUBPASS_CONTENTS_INLINE);
245
return frame->cmdBuf;
248
void VulkanContext::EndSurfaceRenderPass() {
249
FrameData *frame = &frame_[curFrame_];
250
vkCmdEndRenderPass(frame->cmdBuf);
252
TransitionToPresent(frame->cmdBuf, swapChainBuffers[current_buffer].image);
254
VkResult res = vkEndCommandBuffer(frame->cmdBuf);
255
assert(res == VK_SUCCESS);
257
// So the sequence will be, cmdInit, [cmdQueue_], frame->cmdBuf.
258
// This way we bunch up all the initialization needed for the frame, we render to
259
// other buffers before the back buffer, and then last we render to the backbuffer.
262
std::vector<VkCommandBuffer> cmdBufs;
263
if (frame->hasInitCommands) {
264
vkEndCommandBuffer(frame->cmdInit);
265
cmdBufs.push_back(frame->cmdInit);
266
frame->hasInitCommands = false;
268
for (auto cmd : cmdQueue_) {
269
cmdBufs.push_back(cmd);
272
cmdBufs.push_back(frame->cmdBuf);
274
VkSubmitInfo submit_info = { VK_STRUCTURE_TYPE_SUBMIT_INFO };
275
submit_info.waitSemaphoreCount = 1;
276
submit_info.pWaitSemaphores = &acquireSemaphore;
277
VkPipelineStageFlags waitStage[1] = { VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT };
278
submit_info.pWaitDstStageMask = waitStage;
279
submit_info.commandBufferCount = (uint32_t)cmdBufs.size();
280
submit_info.pCommandBuffers = cmdBufs.data();
281
submit_info.signalSemaphoreCount = 0;
282
submit_info.pSignalSemaphores = NULL;
283
res = vkQueueSubmit(gfx_queue_, 1, &submit_info, frame->fence);
284
assert(res == VK_SUCCESS);
286
VkPresentInfoKHR present = { VK_STRUCTURE_TYPE_PRESENT_INFO_KHR };
287
present.swapchainCount = 1;
288
present.pSwapchains = &swap_chain_;
289
present.pImageIndices = ¤t_buffer;
290
present.pWaitSemaphores = NULL;
291
present.waitSemaphoreCount = 0;
292
present.pResults = NULL;
294
res = vkQueuePresentKHR(gfx_queue_, &present);
295
// TODO: Deal with the VK_SUBOPTIMAL_WSI and VK_ERROR_OUT_OF_DATE_WSI
299
frame->deleteList.Take(globalDeleteList_);
303
void VulkanContext::WaitUntilQueueIdle() {
304
// Should almost never be used
305
vkQueueWaitIdle(gfx_queue_);
308
bool VulkanContext::MemoryTypeFromProperties(uint32_t typeBits, VkFlags requirements_mask, uint32_t *typeIndex) {
309
// Search memtypes to find first index with those properties
310
for (uint32_t i = 0; i < 32; i++) {
311
if ((typeBits & 1) == 1) {
312
// Type is available, does it match user properties?
313
if ((memory_properties.memoryTypes[i].propertyFlags & requirements_mask) == requirements_mask) {
320
// No memory types matched, return failure
324
void VulkanBeginCommandBuffer(VkCommandBuffer cmd) {
325
VkResult U_ASSERT_ONLY res;
326
VkCommandBufferBeginInfo cmd_buf_info = { VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO };
327
cmd_buf_info.pInheritanceInfo = nullptr;
328
cmd_buf_info.flags = 0;
329
res = vkBeginCommandBuffer(cmd, &cmd_buf_info);
330
assert(res == VK_SUCCESS);
333
void VulkanContext::InitObjects(bool depthPresent) {
339
VkCommandBufferAllocateInfo cmd_alloc = { VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO };
340
cmd_alloc.commandPool = cmd_pool_;
341
cmd_alloc.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
342
cmd_alloc.commandBufferCount = 4;
344
VkCommandBuffer cmdBuf[4];
345
VkResult res = vkAllocateCommandBuffers(device_, &cmd_alloc, cmdBuf);
346
assert(res == VK_SUCCESS);
348
frame_[0].cmdBuf = cmdBuf[0];
349
frame_[0].cmdInit = cmdBuf[1];
350
frame_[0].fence = CreateFence(true); // So it can be instantly waited on
351
frame_[1].cmdBuf = cmdBuf[2];
352
frame_[1].cmdInit = cmdBuf[3];
353
frame_[1].fence = CreateFence(true);
355
VkCommandBuffer cmd = GetInitCommandBuffer();
357
InitDepthStencilBuffer(cmd);
359
InitSurfaceRenderPass(depthPresent, true);
360
InitFramebuffers(depthPresent);
362
// The init command buffer will be executed as part of the first frame.
365
void VulkanContext::DestroyObjects() {
366
VkCommandBuffer cmdBuf[4] = { frame_[0].cmdBuf, frame_[0].cmdInit, frame_[1].cmdBuf, frame_[1].cmdInit };
368
vkFreeCommandBuffers(device_, cmd_pool_, sizeof(cmdBuf) / sizeof(cmdBuf[0]), cmdBuf);
369
vkDestroyFence(device_, frame_[0].fence, nullptr);
370
vkDestroyFence(device_, frame_[1].fence, nullptr);
372
DestroyFramebuffers();
373
DestroySurfaceRenderPass();
374
DestroyDepthStencilBuffer();
376
DestroyCommandPool();
378
// If there happen to be any pending deletes, now is a good time.
379
Delete().PerformDeletes(device_);
381
vkDestroySurfaceKHR(instance_, surface_, nullptr);
382
surface_ = VK_NULL_HANDLE;
385
VkResult VulkanContext::InitLayerExtensionProperties(layer_properties &layer_props) {
386
VkExtensionProperties *instance_extensions;
387
uint32_t instance_extension_count;
389
char *layer_name = NULL;
391
layer_name = layer_props.properties.layerName;
394
res = vkEnumerateInstanceExtensionProperties(layer_name, &instance_extension_count, NULL);
398
if (instance_extension_count == 0) {
402
layer_props.extensions.resize(instance_extension_count);
403
instance_extensions = layer_props.extensions.data();
404
res = vkEnumerateInstanceExtensionProperties(
406
&instance_extension_count,
407
instance_extensions);
408
} while (res == VK_INCOMPLETE);
413
VkResult VulkanContext::InitGlobalExtensionProperties() {
414
uint32_t instance_extension_count;
418
res = vkEnumerateInstanceExtensionProperties(NULL, &instance_extension_count, NULL);
422
if (instance_extension_count == 0) {
426
instance_extension_properties.resize(instance_extension_count);
427
res = vkEnumerateInstanceExtensionProperties(
429
&instance_extension_count,
430
instance_extension_properties.data());
431
} while (res == VK_INCOMPLETE);
436
VkResult VulkanContext::InitGlobalLayerProperties() {
437
uint32_t instance_layer_count;
438
VkLayerProperties *vk_props = NULL;
442
* It's possible, though very rare, that the number of
443
* instance layers could change. For example, installing something
444
* could include new layers that the loader would pick up
445
* between the initial query for the count and the
446
* request for VkLayerProperties. The loader indicates that
447
* by returning a VK_INCOMPLETE status and will update the
448
* the count parameter.
449
* The count parameter will be updated with the number of
450
* entries loaded into the data pointer - in case the number
451
* of layers went down or is smaller than the size given.
454
res = vkEnumerateInstanceLayerProperties(&instance_layer_count, NULL);
458
if (instance_layer_count == 0) {
462
vk_props = (VkLayerProperties *)realloc(vk_props, instance_layer_count * sizeof(VkLayerProperties));
464
res = vkEnumerateInstanceLayerProperties(&instance_layer_count, vk_props);
465
} while (res == VK_INCOMPLETE);
467
// Now gather the extension list for each instance layer.
468
for (uint32_t i = 0; i < instance_layer_count; i++) {
469
layer_properties layer_props;
470
layer_props.properties = vk_props[i];
471
res = InitLayerExtensionProperties(layer_props);
474
instance_layer_properties.push_back(layer_props);
481
VkResult VulkanContext::InitDeviceExtensionProperties(layer_properties &layer_props) {
482
VkExtensionProperties *device_extensions;
483
uint32_t device_extension_count;
485
char *layer_name = NULL;
487
layer_name = layer_props.properties.layerName;
489
res = vkEnumerateDeviceExtensionProperties(
490
physical_devices_[0],
491
layer_name, &device_extension_count, NULL);
495
if (device_extension_count == 0) {
499
layer_props.extensions.resize(device_extension_count);
500
device_extensions = layer_props.extensions.data();
501
res = vkEnumerateDeviceExtensionProperties(
502
physical_devices_[0],
504
&device_extension_count,
506
} while (res == VK_INCOMPLETE);
512
* TODO: function description here
514
VkResult VulkanContext::InitDeviceLayerProperties() {
515
uint32_t device_layer_count;
516
VkLayerProperties *vk_props = NULL;
520
* It's possible, though very rare, that the number of
521
* instance layers could change. For example, installing something
522
* could include new layers that the loader would pick up
523
* between the initial query for the count and the
524
* request for VkLayerProperties. The loader indicates that
525
* by returning a VK_INCOMPLETE status and will update the
526
* the count parameter.
527
* The count parameter will be updated with the number of
528
* entries loaded into the data pointer - in case the number
529
* of layers went down or is smaller than the size given.
532
res = vkEnumerateDeviceLayerProperties(physical_devices_[0], &device_layer_count, NULL);
536
if (device_layer_count == 0) {
540
vk_props = (VkLayerProperties *)realloc(vk_props, device_layer_count * sizeof(VkLayerProperties));
542
res = vkEnumerateDeviceLayerProperties(physical_devices_[0], &device_layer_count, vk_props);
543
} while (res == VK_INCOMPLETE);
546
* Now gather the extension list for each device layer.
548
for (uint32_t i = 0; i < device_layer_count; i++) {
549
layer_properties layer_props;
550
layer_props.properties = vk_props[i];
551
res = InitDeviceExtensionProperties(layer_props);
554
device_layer_properties.push_back(layer_props);
562
* Return 1 (true) if all layer names specified in check_names
563
* can be found in given layer properties.
565
static VkBool32 CheckLayers(const std::vector<layer_properties> &layer_props, const std::vector<const char *> &layer_names) {
566
uint32_t check_count = (uint32_t)layer_names.size();
567
uint32_t layer_count = (uint32_t)layer_props.size();
568
for (uint32_t i = 0; i < check_count; i++) {
570
for (uint32_t j = 0; j < layer_count; j++) {
571
if (!strcmp(layer_names[i], layer_props[j].properties.layerName)) {
576
std::cout << "Cannot find layer: " << layer_names[i] << std::endl;
583
VkResult VulkanContext::CreateDevice(int physical_device) {
586
if (!init_error_.empty()) {
587
ELOG("Vulkan init failed: %s", init_error_.c_str());
588
return VK_ERROR_INITIALIZATION_FAILED;
591
vkGetPhysicalDeviceQueueFamilyProperties(physical_devices_[0], &queue_count, nullptr);
592
assert(queue_count >= 1);
594
queue_props.resize(queue_count);
595
vkGetPhysicalDeviceQueueFamilyProperties(physical_devices_[0], &queue_count, queue_props.data());
596
assert(queue_count >= 1);
598
VkDeviceQueueCreateInfo queue_info = { VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO };
599
float queue_priorities[1] = { 1.0f };
600
queue_info.queueCount = 1;
601
queue_info.pQueuePriorities = queue_priorities;
603
for (int i = 0; i < (int)queue_count; i++) {
604
if (queue_props[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
605
queue_info.queueFamilyIndex = i;
611
assert(queue_count >= 1);
614
// Detect preferred formats, in this order.
615
static const VkFormat depthStencilFormats[] = {
616
VK_FORMAT_D24_UNORM_S8_UINT,
617
VK_FORMAT_D32_SFLOAT_S8_UINT,
618
VK_FORMAT_D16_UNORM_S8_UINT,
620
deviceInfo_.preferredDepthStencilFormat = VK_FORMAT_UNDEFINED;
621
for (size_t i = 0; i < ARRAY_SIZE(depthStencilFormats); i++) {
622
VkFormatProperties props;
623
vkGetPhysicalDeviceFormatProperties(physical_devices_[0], depthStencilFormats[i], &props);
624
if (props.optimalTilingFeatures & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT) {
625
deviceInfo_.preferredDepthStencilFormat = depthStencilFormats[i];
630
// This is as good a place as any to do this
631
vkGetPhysicalDeviceMemoryProperties(physical_devices_[0], &memory_properties);
632
vkGetPhysicalDeviceProperties(physical_devices_[0], &gpu_props);
635
vkGetPhysicalDeviceFeatures(physical_devices_[0], &featuresAvailable_);
636
memset(&featuresEnabled_, 0, sizeof(featuresEnabled_));
638
// Enable a few safe ones if they are available.
639
if (featuresAvailable_.dualSrcBlend) {
640
featuresEnabled_.dualSrcBlend = true;
642
if (featuresAvailable_.largePoints) {
643
featuresEnabled_.largePoints = true;
645
if (featuresAvailable_.wideLines) {
646
featuresEnabled_.wideLines = true;
648
if (featuresAvailable_.geometryShader) {
649
featuresEnabled_.geometryShader = true;
651
if (featuresAvailable_.logicOp) {
652
featuresEnabled_.logicOp = true;
654
if (featuresAvailable_.depthClamp) {
655
featuresEnabled_.depthClamp = true;
657
if (featuresAvailable_.depthBounds) {
658
featuresEnabled_.depthBounds = true;
660
if (featuresAvailable_.samplerAnisotropy) {
661
featuresEnabled_.samplerAnisotropy = true;
664
VkDeviceCreateInfo device_info = { VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO };
665
device_info.queueCreateInfoCount = 1;
666
device_info.pQueueCreateInfos = &queue_info;
667
device_info.enabledLayerCount = (uint32_t)device_layer_names.size();
668
device_info.ppEnabledLayerNames =
669
device_info.enabledLayerCount ? device_layer_names.data() : NULL;
670
device_info.enabledExtensionCount = (uint32_t)device_extension_names.size();
671
device_info.ppEnabledExtensionNames =
672
device_info.enabledExtensionCount ? device_extension_names.data() : NULL;
673
device_info.pEnabledFeatures = &featuresEnabled_;
675
res = vkCreateDevice(physical_devices_[0], &device_info, NULL, &device_);
676
if (res != VK_SUCCESS) {
677
init_error_ = "Unable to create Vulkan device";
678
ELOG("Unable to create Vulkan device");
680
VulkanLoadDeviceFunctions(device_);
686
VkResult VulkanContext::InitDebugMsgCallback(PFN_vkDebugReportCallbackEXT dbgFunc, int bits, void *userdata) {
688
VkDebugReportCallbackEXT msg_callback;
690
if (!(flags_ & VULKAN_FLAG_VALIDATE)) {
691
WLOG("Not registering debug report callback - extension not enabled!");
694
ILOG("Registering debug report callback");
696
VkDebugReportCallbackCreateInfoEXT cb = {};
697
cb.sType = VK_STRUCTURE_TYPE_DEBUG_REPORT_CREATE_INFO_EXT;
700
cb.pfnCallback = dbgFunc;
701
cb.pUserData = userdata;
702
res = vkCreateDebugReportCallbackEXT(instance_, &cb, nullptr, &msg_callback);
705
msg_callbacks.push_back(msg_callback);
707
case VK_ERROR_OUT_OF_HOST_MEMORY:
708
return VK_ERROR_INITIALIZATION_FAILED;
710
return VK_ERROR_INITIALIZATION_FAILED;
715
void VulkanContext::DestroyDebugMsgCallback() {
716
while (msg_callbacks.size() > 0) {
717
vkDestroyDebugReportCallbackEXT(instance_, msg_callbacks.back(), nullptr);
718
msg_callbacks.pop_back();
722
void VulkanContext::InitDepthStencilBuffer(VkCommandBuffer cmd) {
723
VkResult U_ASSERT_ONLY res;
724
bool U_ASSERT_ONLY pass;
726
const VkFormat depth_format = deviceInfo_.preferredDepthStencilFormat;
727
int aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
728
VkImageCreateInfo image_info = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO };
729
image_info.imageType = VK_IMAGE_TYPE_2D;
730
image_info.format = depth_format;
731
image_info.extent.width = width_;
732
image_info.extent.height = height_;
733
image_info.extent.depth = 1;
734
image_info.mipLevels = 1;
735
image_info.arrayLayers = 1;
736
image_info.samples = VK_SAMPLE_COUNT_1_BIT;
737
image_info.queueFamilyIndexCount = 0;
738
image_info.pQueueFamilyIndices = NULL;
739
image_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
740
image_info.usage = VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
741
image_info.flags = 0;
743
VkMemoryAllocateInfo mem_alloc = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
744
mem_alloc.allocationSize = 0;
745
mem_alloc.memoryTypeIndex = 0;
747
VkMemoryRequirements mem_reqs;
749
depth.format = depth_format;
751
res = vkCreateImage(device_, &image_info, NULL, &depth.image);
752
assert(res == VK_SUCCESS);
754
vkGetImageMemoryRequirements(device_, depth.image, &mem_reqs);
756
mem_alloc.allocationSize = mem_reqs.size;
757
/* Use the memory properties to determine the type of memory required */
758
pass = MemoryTypeFromProperties(mem_reqs.memoryTypeBits,
759
0, /* No requirements */
760
&mem_alloc.memoryTypeIndex);
763
res = vkAllocateMemory(device_, &mem_alloc, NULL, &depth.mem);
764
assert(res == VK_SUCCESS);
766
res = vkBindImageMemory(device_, depth.image, depth.mem, 0);
767
assert(res == VK_SUCCESS);
769
TransitionImageLayout(cmd, depth.image,
771
VK_IMAGE_LAYOUT_UNDEFINED,
772
VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL);
774
VkImageViewCreateInfo depth_view_info = { VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO };
775
depth_view_info.image = depth.image;
776
depth_view_info.format = depth_format;
777
depth_view_info.components.r = VK_COMPONENT_SWIZZLE_R;
778
depth_view_info.components.g = VK_COMPONENT_SWIZZLE_G;
779
depth_view_info.components.b = VK_COMPONENT_SWIZZLE_B;
780
depth_view_info.components.a = VK_COMPONENT_SWIZZLE_A;
781
depth_view_info.subresourceRange.aspectMask = aspectMask;
782
depth_view_info.subresourceRange.baseMipLevel = 0;
783
depth_view_info.subresourceRange.levelCount = 1;
784
depth_view_info.subresourceRange.baseArrayLayer = 0;
785
depth_view_info.subresourceRange.layerCount = 1;
786
depth_view_info.viewType = VK_IMAGE_VIEW_TYPE_2D;
787
depth_view_info.flags = 0;
789
res = vkCreateImageView(device_, &depth_view_info, NULL, &depth.view);
790
assert(res == VK_SUCCESS);
794
void VulkanContext::InitSurfaceWin32(HINSTANCE conn, HWND wnd) {
798
ReinitSurfaceWin32();
801
void VulkanContext::ReinitSurfaceWin32() {
802
if (surface_ != VK_NULL_HANDLE) {
803
vkDestroySurfaceKHR(instance_, surface_, nullptr);
804
surface_ = VK_NULL_HANDLE;
808
GetClientRect(window, &rc);
809
width_ = rc.right - rc.left;
810
height_ = rc.bottom - rc.top;
812
VkResult U_ASSERT_ONLY res;
814
VkWin32SurfaceCreateInfoKHR win32 = { VK_STRUCTURE_TYPE_WIN32_SURFACE_CREATE_INFO_KHR };
817
win32.hinstance = connection;
818
res = vkCreateWin32SurfaceKHR(instance_, &win32, nullptr, &surface_);
820
assert(res == VK_SUCCESS);
823
#elif defined(ANDROID)
825
void VulkanContext::InitSurfaceAndroid(ANativeWindow *wnd, int width, int height) {
828
ReinitSurfaceAndroid(width, height);
831
void VulkanContext::ReinitSurfaceAndroid(int width, int height) {
832
if (surface_ != VK_NULL_HANDLE) {
833
vkDestroySurfaceKHR(instance_, surface_, nullptr);
834
surface_ = VK_NULL_HANDLE;
837
VkResult U_ASSERT_ONLY res;
839
VkAndroidSurfaceCreateInfoKHR android = { VK_STRUCTURE_TYPE_ANDROID_SURFACE_CREATE_INFO_KHR };
841
android.window = native_window;
842
res = vkCreateAndroidSurfaceKHR(instance_, &android, nullptr, &surface_);
843
assert(res == VK_SUCCESS);
850
void VulkanContext::InitQueue() {
851
// Iterate over each queue to learn whether it supports presenting:
852
VkBool32 *supportsPresent = new VkBool32[queue_count];
853
for (uint32_t i = 0; i < queue_count; i++) {
854
vkGetPhysicalDeviceSurfaceSupportKHR(physical_devices_[0], i, surface_, &supportsPresent[i]);
857
// Search for a graphics queue and a present queue in the array of queue
858
// families, try to find one that supports both
859
uint32_t graphicsQueueNodeIndex = UINT32_MAX;
860
uint32_t presentQueueNodeIndex = UINT32_MAX;
861
for (uint32_t i = 0; i < queue_count; i++) {
862
if ((queue_props[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) != 0) {
863
if (graphicsQueueNodeIndex == UINT32_MAX) {
864
graphicsQueueNodeIndex = i;
867
if (supportsPresent[i] == VK_TRUE) {
868
graphicsQueueNodeIndex = i;
869
presentQueueNodeIndex = i;
874
if (presentQueueNodeIndex == UINT32_MAX) {
875
// If didn't find a queue that supports both graphics and present, then
876
// find a separate present queue.
877
for (uint32_t i = 0; i < queue_count; ++i) {
878
if (supportsPresent[i] == VK_TRUE) {
879
presentQueueNodeIndex = i;
884
delete[] supportsPresent;
886
// Generate error if could not find both a graphics and a present queue
887
if (graphicsQueueNodeIndex == UINT32_MAX || presentQueueNodeIndex == UINT32_MAX) {
888
std::cout << "Could not find a graphics and a present queue";
892
graphics_queue_family_index_ = graphicsQueueNodeIndex;
894
// Get the list of VkFormats that are supported:
895
uint32_t formatCount;
896
VkResult res = vkGetPhysicalDeviceSurfaceFormatsKHR(physical_devices_[0], surface_, &formatCount, NULL);
897
assert(res == VK_SUCCESS);
898
VkSurfaceFormatKHR *surfFormats = new VkSurfaceFormatKHR[formatCount];
899
res = vkGetPhysicalDeviceSurfaceFormatsKHR(physical_devices_[0], surface_, &formatCount, surfFormats);
900
assert(res == VK_SUCCESS);
901
// If the format list includes just one entry of VK_FORMAT_UNDEFINED,
902
// the surface has no preferred format. Otherwise, at least one
903
// supported format will be returned.
904
if (formatCount == 0 || (formatCount == 1 && surfFormats[0].format == VK_FORMAT_UNDEFINED)) {
905
ILOG("swapchain_format: Falling back to B8G8R8A8_UNORM");
906
swapchain_format = VK_FORMAT_B8G8R8A8_UNORM;
908
swapchain_format = VK_FORMAT_UNDEFINED;
909
for (uint32_t i = 0; i < formatCount; ++i) {
910
if (surfFormats[i].colorSpace != VK_COLORSPACE_SRGB_NONLINEAR_KHR) {
914
if (surfFormats[i].format == VK_FORMAT_B8G8R8A8_UNORM || surfFormats[i].format == VK_FORMAT_R8G8B8A8_UNORM) {
915
swapchain_format = surfFormats[i].format;
919
if (swapchain_format == VK_FORMAT_UNDEFINED) {
920
// Okay, take the first one then.
921
swapchain_format = surfFormats[0].format;
923
ILOG("swapchain_format: %d (/%d)", swapchain_format, formatCount);
925
delete[] surfFormats;
927
vkGetDeviceQueue(device_, graphics_queue_family_index_, 0, &gfx_queue_);
928
ILOG("gfx_queue_: %p", gfx_queue_);
930
VkSemaphoreCreateInfo acquireSemaphoreCreateInfo = { VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO };
931
acquireSemaphoreCreateInfo.flags = 0;
933
res = vkCreateSemaphore(device_,
934
&acquireSemaphoreCreateInfo,
937
assert(res == VK_SUCCESS);
940
void VulkanContext::InitSwapchain(VkCommandBuffer cmd) {
941
VkResult U_ASSERT_ONLY res;
942
VkSurfaceCapabilitiesKHR surfCapabilities;
944
res = vkGetPhysicalDeviceSurfaceCapabilitiesKHR(physical_devices_[0], surface_, &surfCapabilities);
945
assert(res == VK_SUCCESS);
947
uint32_t presentModeCount;
948
res = vkGetPhysicalDeviceSurfacePresentModesKHR(physical_devices_[0], surface_, &presentModeCount, NULL);
949
assert(res == VK_SUCCESS);
950
VkPresentModeKHR *presentModes = new VkPresentModeKHR[presentModeCount];
951
assert(presentModes);
952
res = vkGetPhysicalDeviceSurfacePresentModesKHR(physical_devices_[0], surface_, &presentModeCount, presentModes);
953
assert(res == VK_SUCCESS);
955
VkExtent2D swapChainExtent;
956
// width and height are either both -1, or both not -1.
957
if (surfCapabilities.currentExtent.width == (uint32_t)-1) {
958
// If the surface size is undefined, the size is set to
959
// the size of the images requested.
960
ILOG("initSwapchain: %dx%d", width_, height_);
961
swapChainExtent.width = width_;
962
swapChainExtent.height = height_;
964
// If the surface size is defined, the swap chain size must match
965
swapChainExtent = surfCapabilities.currentExtent;
968
// TODO: Find a better way to specify the prioritized present mode while being able
969
// to fall back in a sensible way.
970
VkPresentModeKHR swapchainPresentMode = VK_PRESENT_MODE_MAX_ENUM_KHR;
971
for (size_t i = 0; i < presentModeCount; i++) {
972
ILOG("Supported present mode: %d", presentModes[i]);
974
for (size_t i = 0; i < presentModeCount; i++) {
975
if (swapchainPresentMode == VK_PRESENT_MODE_MAX_ENUM_KHR) {
976
// Default to the first present mode from the list.
977
swapchainPresentMode = presentModes[i];
979
if ((flags_ & VULKAN_FLAG_PRESENT_MAILBOX) && presentModes[i] == VK_PRESENT_MODE_MAILBOX_KHR) {
980
swapchainPresentMode = VK_PRESENT_MODE_MAILBOX_KHR;
983
if ((flags_ & VULKAN_FLAG_PRESENT_FIFO_RELAXED) && presentModes[i] == VK_PRESENT_MODE_FIFO_RELAXED_KHR) {
984
swapchainPresentMode = VK_PRESENT_MODE_FIFO_RELAXED_KHR;
987
if ((flags_ & VULKAN_FLAG_PRESENT_IMMEDIATE) && presentModes[i] == VK_PRESENT_MODE_IMMEDIATE_KHR) {
988
swapchainPresentMode = VK_PRESENT_MODE_IMMEDIATE_KHR;
994
swapchainPresentMode = VK_PRESENT_MODE_FIFO_KHR;
996
ILOG("Chosen present mode: %d", swapchainPresentMode);
997
delete[] presentModes;
998
// Determine the number of VkImage's to use in the swap chain (we desire to
999
// own only 1 image at a time, besides the images being displayed and
1000
// queued for display):
1001
uint32_t desiredNumberOfSwapChainImages = surfCapabilities.minImageCount + 1;
1002
ILOG("numSwapChainImages: %d", desiredNumberOfSwapChainImages);
1003
if ((surfCapabilities.maxImageCount > 0) &&
1004
(desiredNumberOfSwapChainImages > surfCapabilities.maxImageCount))
1006
// Application must settle for fewer images than desired:
1007
desiredNumberOfSwapChainImages = surfCapabilities.maxImageCount;
1010
VkSurfaceTransformFlagBitsKHR preTransform;
1011
if (surfCapabilities.supportedTransforms & VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR) {
1012
preTransform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
1014
preTransform = surfCapabilities.currentTransform;
1017
VkSwapchainCreateInfoKHR swap_chain_info = { VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR };
1018
swap_chain_info.surface = surface_;
1019
swap_chain_info.minImageCount = desiredNumberOfSwapChainImages;
1020
swap_chain_info.imageFormat = swapchain_format;
1021
swap_chain_info.imageColorSpace = VK_COLORSPACE_SRGB_NONLINEAR_KHR;
1022
swap_chain_info.imageExtent.width = swapChainExtent.width;
1023
swap_chain_info.imageExtent.height = swapChainExtent.height;
1024
swap_chain_info.preTransform = preTransform;
1025
swap_chain_info.imageArrayLayers = 1;
1026
swap_chain_info.presentMode = swapchainPresentMode;
1027
swap_chain_info.oldSwapchain = VK_NULL_HANDLE;
1028
swap_chain_info.clipped = true;
1029
swap_chain_info.imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
1030
swap_chain_info.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE;
1031
swap_chain_info.queueFamilyIndexCount = 0;
1032
swap_chain_info.pQueueFamilyIndices = NULL;
1033
swap_chain_info.compositeAlpha = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
1035
res = vkCreateSwapchainKHR(device_, &swap_chain_info, NULL, &swap_chain_);
1036
assert(res == VK_SUCCESS);
1038
res = vkGetSwapchainImagesKHR(device_, swap_chain_,
1039
&swapchainImageCount, NULL);
1040
assert(res == VK_SUCCESS);
1042
VkImage* swapchainImages = (VkImage*)malloc(swapchainImageCount * sizeof(VkImage));
1043
assert(swapchainImages);
1044
res = vkGetSwapchainImagesKHR(device_, swap_chain_, &swapchainImageCount, swapchainImages);
1045
assert(res == VK_SUCCESS);
1047
for (uint32_t i = 0; i < swapchainImageCount; i++) {
1048
swap_chain_buffer sc_buffer;
1050
VkImageViewCreateInfo color_image_view = { VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO };
1051
color_image_view.format = swapchain_format;
1052
color_image_view.components.r = VK_COMPONENT_SWIZZLE_R;
1053
color_image_view.components.g = VK_COMPONENT_SWIZZLE_G;
1054
color_image_view.components.b = VK_COMPONENT_SWIZZLE_B;
1055
color_image_view.components.a = VK_COMPONENT_SWIZZLE_A;
1056
color_image_view.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
1057
color_image_view.subresourceRange.baseMipLevel = 0;
1058
color_image_view.subresourceRange.levelCount = 1;
1059
color_image_view.subresourceRange.baseArrayLayer = 0;
1060
color_image_view.subresourceRange.layerCount = 1;
1061
color_image_view.viewType = VK_IMAGE_VIEW_TYPE_2D;
1062
color_image_view.flags = 0;
1064
sc_buffer.image = swapchainImages[i];
1066
// TODO: Pre-set them to PRESENT_SRC_KHR, as the first thing we do after acquiring
1067
// in image to render to will be to transition them away from that.
1068
TransitionImageLayout(cmd, sc_buffer.image,
1069
VK_IMAGE_ASPECT_COLOR_BIT,
1070
VK_IMAGE_LAYOUT_UNDEFINED,
1071
VK_IMAGE_LAYOUT_PRESENT_SRC_KHR);
1073
color_image_view.image = sc_buffer.image;
1075
res = vkCreateImageView(device_,
1076
&color_image_view, NULL, &sc_buffer.view);
1077
swapChainBuffers.push_back(sc_buffer);
1078
assert(res == VK_SUCCESS);
1080
free(swapchainImages);
1085
void VulkanContext::InitSurfaceRenderPass(bool include_depth, bool clear) {
1086
VkResult U_ASSERT_ONLY res;
1087
/* Need attachments for render target and depth buffer */
1088
VkAttachmentDescription attachments[2];
1089
attachments[0].format = swapchain_format;
1090
attachments[0].samples = VK_SAMPLE_COUNT_1_BIT;
1091
attachments[0].loadOp = clear ? VK_ATTACHMENT_LOAD_OP_CLEAR : VK_ATTACHMENT_LOAD_OP_LOAD;
1092
attachments[0].storeOp = VK_ATTACHMENT_STORE_OP_STORE;
1093
attachments[0].stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
1094
attachments[0].stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
1095
attachments[0].initialLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
1096
attachments[0].finalLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
1097
attachments[0].flags = 0;
1099
if (include_depth) {
1100
attachments[1].format = depth.format;
1101
attachments[1].samples = VK_SAMPLE_COUNT_1_BIT;
1102
attachments[1].loadOp = clear ? VK_ATTACHMENT_LOAD_OP_CLEAR : VK_ATTACHMENT_LOAD_OP_LOAD;
1103
attachments[1].storeOp = VK_ATTACHMENT_STORE_OP_STORE;
1104
attachments[1].stencilLoadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
1105
attachments[1].stencilStoreOp = VK_ATTACHMENT_STORE_OP_STORE;
1106
attachments[1].initialLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
1107
attachments[1].finalLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
1108
attachments[1].flags = 0;
1111
VkAttachmentReference color_reference = {};
1112
color_reference.attachment = 0;
1113
color_reference.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
1115
VkAttachmentReference depth_reference = {};
1116
depth_reference.attachment = 1;
1117
depth_reference.layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
1119
VkSubpassDescription subpass = {};
1120
subpass.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
1122
subpass.inputAttachmentCount = 0;
1123
subpass.pInputAttachments = NULL;
1124
subpass.colorAttachmentCount = 1;
1125
subpass.pColorAttachments = &color_reference;
1126
subpass.pResolveAttachments = NULL;
1127
subpass.pDepthStencilAttachment = include_depth ? &depth_reference : NULL;
1128
subpass.preserveAttachmentCount = 0;
1129
subpass.pPreserveAttachments = NULL;
1131
VkRenderPassCreateInfo rp_info = { VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO };
1132
rp_info.pNext = NULL;
1133
rp_info.attachmentCount = include_depth ? 2 : 1;
1134
rp_info.pAttachments = attachments;
1135
rp_info.subpassCount = 1;
1136
rp_info.pSubpasses = &subpass;
1137
rp_info.dependencyCount = 0;
1138
rp_info.pDependencies = NULL;
1140
res = vkCreateRenderPass(device_, &rp_info, NULL, &surface_render_pass_);
1141
assert(res == VK_SUCCESS);
1144
void VulkanContext::InitFramebuffers(bool include_depth) {
1145
VkResult U_ASSERT_ONLY res;
1146
VkImageView attachments[2];
1147
attachments[1] = depth.view;
1149
ILOG("InitFramebuffers: %dx%d", width_, height_);
1150
VkFramebufferCreateInfo fb_info = { VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO };
1151
fb_info.renderPass = surface_render_pass_;
1152
fb_info.attachmentCount = include_depth ? 2 : 1;
1153
fb_info.pAttachments = attachments;
1154
fb_info.width = width_;
1155
fb_info.height = height_;
1158
framebuffers_.resize(swapchainImageCount);
1160
for (uint32_t i = 0; i < swapchainImageCount; i++) {
1161
attachments[0] = swapChainBuffers[i].view;
1162
res = vkCreateFramebuffer(device_, &fb_info, nullptr, &framebuffers_[i]);
1163
assert(res == VK_SUCCESS);
1167
void VulkanContext::InitCommandPool() {
1168
VkResult U_ASSERT_ONLY res;
1170
VkCommandPoolCreateInfo cmd_pool_info = { VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO };
1171
cmd_pool_info.queueFamilyIndex = graphics_queue_family_index_;
1172
cmd_pool_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT | VK_COMMAND_POOL_CREATE_TRANSIENT_BIT;
1174
res = vkCreateCommandPool(device_, &cmd_pool_info, NULL, &cmd_pool_);
1175
assert(res == VK_SUCCESS);
1178
VkFence VulkanContext::CreateFence(bool presignalled) {
1180
VkFenceCreateInfo fenceInfo = { VK_STRUCTURE_TYPE_FENCE_CREATE_INFO };
1181
fenceInfo.flags = presignalled ? VK_FENCE_CREATE_SIGNALED_BIT : 0;
1182
vkCreateFence(device_, &fenceInfo, NULL, &fence);
1186
void VulkanContext::WaitAndResetFence(VkFence fence) {
1187
vkWaitForFences(device_, 1, &fence, true, UINT64_MAX);
1188
vkResetFences(device_, 1, &fence);
1191
void VulkanContext::DestroyCommandPool() {
1192
vkDestroyCommandPool(device_, cmd_pool_, NULL);
1193
cmd_pool_ = VK_NULL_HANDLE;
1196
void VulkanContext::DestroyDepthStencilBuffer() {
1197
vkDestroyImageView(device_, depth.view, NULL);
1198
vkDestroyImage(device_, depth.image, NULL);
1199
vkFreeMemory(device_, depth.mem, NULL);
1201
depth.view = VK_NULL_HANDLE;
1202
depth.image = VK_NULL_HANDLE;
1203
depth.mem = VK_NULL_HANDLE;
1206
void VulkanContext::DestroySwapChain() {
1207
for (uint32_t i = 0; i < swapchainImageCount; i++) {
1208
vkDestroyImageView(device_, swapChainBuffers[i].view, NULL);
1210
vkDestroySwapchainKHR(device_, swap_chain_, NULL);
1211
swap_chain_ = VK_NULL_HANDLE;
1212
swapChainBuffers.clear();
1213
vkDestroySemaphore(device_, acquireSemaphore, NULL);
1216
void VulkanContext::DestroyFramebuffers() {
1217
for (uint32_t i = 0; i < framebuffers_.size(); i++) {
1218
vkDestroyFramebuffer(device_, framebuffers_[i], NULL);
1220
framebuffers_.clear();
1223
void VulkanContext::DestroySurfaceRenderPass() {
1224
vkDestroyRenderPass(device_, surface_render_pass_, NULL);
1225
surface_render_pass_ = VK_NULL_HANDLE;
1228
void VulkanContext::DestroyDevice() {
1229
vkDestroyDevice(device_, nullptr);
1233
VkPipelineCache VulkanContext::CreatePipelineCache() {
1234
VkPipelineCache cache;
1235
VkPipelineCacheCreateInfo pc = { VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO };
1236
pc.pInitialData = nullptr;
1237
pc.initialDataSize = 0;
1239
VkResult res = vkCreatePipelineCache(device_, &pc, nullptr, &cache);
1240
assert(VK_SUCCESS == res);
1244
bool VulkanContext::CreateShaderModule(const std::vector<uint32_t> &spirv, VkShaderModule *shaderModule) {
1245
VkShaderModuleCreateInfo sm = { VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO };
1246
sm.pCode = spirv.data();
1247
sm.codeSize = spirv.size() * sizeof(uint32_t);
1249
VkResult result = vkCreateShaderModule(device_, &sm, NULL, shaderModule);
1250
if (result != VK_SUCCESS) {
1257
void TransitionImageLayout(VkCommandBuffer cmd, VkImage image, VkImageAspectFlags aspectMask, VkImageLayout old_image_layout, VkImageLayout new_image_layout) {
1258
VkImageMemoryBarrier image_memory_barrier = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER };
1259
image_memory_barrier.srcAccessMask = 0;
1260
image_memory_barrier.dstAccessMask = 0;
1261
image_memory_barrier.oldLayout = old_image_layout;
1262
image_memory_barrier.newLayout = new_image_layout;
1263
image_memory_barrier.image = image;
1264
image_memory_barrier.subresourceRange.aspectMask = aspectMask;
1265
image_memory_barrier.subresourceRange.baseMipLevel = 0;
1266
image_memory_barrier.subresourceRange.levelCount = 1;
1267
image_memory_barrier.subresourceRange.layerCount = 1;
1268
if (old_image_layout == VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) {
1269
image_memory_barrier.srcAccessMask = VK_ACCESS_MEMORY_READ_BIT;
1272
if (old_image_layout == VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL) {
1273
image_memory_barrier.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
1276
if (new_image_layout == VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL) {
1277
if (old_image_layout == VK_IMAGE_LAYOUT_PREINITIALIZED) {
1278
image_memory_barrier.srcAccessMask = VK_ACCESS_HOST_WRITE_BIT;
1280
image_memory_barrier.dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
1283
if (new_image_layout == VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) {
1284
/* Make sure anything that was copying from this image has completed */
1285
image_memory_barrier.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
1288
if (new_image_layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) {
1289
/* Make sure any Copy or CPU writes to image are flushed */
1290
if (old_image_layout != VK_IMAGE_LAYOUT_UNDEFINED) {
1291
image_memory_barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
1293
image_memory_barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
1296
if (new_image_layout == VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL) {
1297
image_memory_barrier.dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT | VK_ACCESS_COLOR_ATTACHMENT_READ_BIT;
1300
if (new_image_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL) {
1301
image_memory_barrier.dstAccessMask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT;
1304
VkPipelineStageFlags src_stages = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
1305
VkPipelineStageFlags dest_stages = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
1307
vkCmdPipelineBarrier(cmd, src_stages, dest_stages, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier);
1310
void init_resources(TBuiltInResource &Resources) {
1311
Resources.maxLights = 32;
1312
Resources.maxClipPlanes = 6;
1313
Resources.maxTextureUnits = 32;
1314
Resources.maxTextureCoords = 32;
1315
Resources.maxVertexAttribs = 64;
1316
Resources.maxVertexUniformComponents = 4096;
1317
Resources.maxVaryingFloats = 64;
1318
Resources.maxVertexTextureImageUnits = 32;
1319
Resources.maxCombinedTextureImageUnits = 80;
1320
Resources.maxTextureImageUnits = 32;
1321
Resources.maxFragmentUniformComponents = 4096;
1322
Resources.maxDrawBuffers = 32;
1323
Resources.maxVertexUniformVectors = 128;
1324
Resources.maxVaryingVectors = 8;
1325
Resources.maxFragmentUniformVectors = 16;
1326
Resources.maxVertexOutputVectors = 16;
1327
Resources.maxFragmentInputVectors = 15;
1328
Resources.minProgramTexelOffset = -8;
1329
Resources.maxProgramTexelOffset = 7;
1330
Resources.maxClipDistances = 8;
1331
Resources.maxComputeWorkGroupCountX = 65535;
1332
Resources.maxComputeWorkGroupCountY = 65535;
1333
Resources.maxComputeWorkGroupCountZ = 65535;
1334
Resources.maxComputeWorkGroupSizeX = 1024;
1335
Resources.maxComputeWorkGroupSizeY = 1024;
1336
Resources.maxComputeWorkGroupSizeZ = 64;
1337
Resources.maxComputeUniformComponents = 1024;
1338
Resources.maxComputeTextureImageUnits = 16;
1339
Resources.maxComputeImageUniforms = 8;
1340
Resources.maxComputeAtomicCounters = 8;
1341
Resources.maxComputeAtomicCounterBuffers = 1;
1342
Resources.maxVaryingComponents = 60;
1343
Resources.maxVertexOutputComponents = 64;
1344
Resources.maxGeometryInputComponents = 64;
1345
Resources.maxGeometryOutputComponents = 128;
1346
Resources.maxFragmentInputComponents = 128;
1347
Resources.maxImageUnits = 8;
1348
Resources.maxCombinedImageUnitsAndFragmentOutputs = 8;
1349
Resources.maxCombinedShaderOutputResources = 8;
1350
Resources.maxImageSamples = 0;
1351
Resources.maxVertexImageUniforms = 0;
1352
Resources.maxTessControlImageUniforms = 0;
1353
Resources.maxTessEvaluationImageUniforms = 0;
1354
Resources.maxGeometryImageUniforms = 0;
1355
Resources.maxFragmentImageUniforms = 8;
1356
Resources.maxCombinedImageUniforms = 8;
1357
Resources.maxGeometryTextureImageUnits = 16;
1358
Resources.maxGeometryOutputVertices = 256;
1359
Resources.maxGeometryTotalOutputComponents = 1024;
1360
Resources.maxGeometryUniformComponents = 1024;
1361
Resources.maxGeometryVaryingComponents = 64;
1362
Resources.maxTessControlInputComponents = 128;
1363
Resources.maxTessControlOutputComponents = 128;
1364
Resources.maxTessControlTextureImageUnits = 16;
1365
Resources.maxTessControlUniformComponents = 1024;
1366
Resources.maxTessControlTotalOutputComponents = 4096;
1367
Resources.maxTessEvaluationInputComponents = 128;
1368
Resources.maxTessEvaluationOutputComponents = 128;
1369
Resources.maxTessEvaluationTextureImageUnits = 16;
1370
Resources.maxTessEvaluationUniformComponents = 1024;
1371
Resources.maxTessPatchComponents = 120;
1372
Resources.maxPatchVertices = 32;
1373
Resources.maxTessGenLevel = 64;
1374
Resources.maxViewports = 16;
1375
Resources.maxVertexAtomicCounters = 0;
1376
Resources.maxTessControlAtomicCounters = 0;
1377
Resources.maxTessEvaluationAtomicCounters = 0;
1378
Resources.maxGeometryAtomicCounters = 0;
1379
Resources.maxFragmentAtomicCounters = 8;
1380
Resources.maxCombinedAtomicCounters = 8;
1381
Resources.maxAtomicCounterBindings = 1;
1382
Resources.maxVertexAtomicCounterBuffers = 0;
1383
Resources.maxTessControlAtomicCounterBuffers = 0;
1384
Resources.maxTessEvaluationAtomicCounterBuffers = 0;
1385
Resources.maxGeometryAtomicCounterBuffers = 0;
1386
Resources.maxFragmentAtomicCounterBuffers = 1;
1387
Resources.maxCombinedAtomicCounterBuffers = 1;
1388
Resources.maxAtomicCounterBufferSize = 16384;
1389
Resources.maxTransformFeedbackBuffers = 4;
1390
Resources.maxTransformFeedbackInterleavedComponents = 64;
1391
Resources.maxCullDistances = 8;
1392
Resources.maxCombinedClipAndCullDistances = 8;
1393
Resources.maxSamples = 4;
1394
Resources.limits.nonInductiveForLoops = 1;
1395
Resources.limits.whileLoops = 1;
1396
Resources.limits.doWhileLoops = 1;
1397
Resources.limits.generalUniformIndexing = 1;
1398
Resources.limits.generalAttributeMatrixVectorIndexing = 1;
1399
Resources.limits.generalVaryingIndexing = 1;
1400
Resources.limits.generalSamplerIndexing = 1;
1401
Resources.limits.generalVariableIndexing = 1;
1402
Resources.limits.generalConstantMatrixVectorIndexing = 1;
1405
EShLanguage FindLanguage(const VkShaderStageFlagBits shader_type) {
1406
switch (shader_type) {
1407
case VK_SHADER_STAGE_VERTEX_BIT:
1408
return EShLangVertex;
1410
case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT:
1411
return EShLangTessControl;
1413
case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT:
1414
return EShLangTessEvaluation;
1416
case VK_SHADER_STAGE_GEOMETRY_BIT:
1417
return EShLangGeometry;
1419
case VK_SHADER_STAGE_FRAGMENT_BIT:
1420
return EShLangFragment;
1422
case VK_SHADER_STAGE_COMPUTE_BIT:
1423
return EShLangCompute;
1426
return EShLangVertex;
1430
// Compile a given string containing GLSL into SPV for use by VK
1431
// Return value of false means an error was encountered.
1432
bool GLSLtoSPV(const VkShaderStageFlagBits shader_type,
1433
const char *pshader,
1434
std::vector<unsigned int> &spirv, std::string *errorMessage) {
1436
glslang::TProgram program;
1437
const char *shaderStrings[1];
1438
TBuiltInResource Resources;
1439
init_resources(Resources);
1441
// Enable SPIR-V and Vulkan rules when parsing GLSL
1442
EShMessages messages = (EShMessages)(EShMsgSpvRules | EShMsgVulkanRules);
1444
EShLanguage stage = FindLanguage(shader_type);
1445
glslang::TShader shader(stage);
1447
shaderStrings[0] = pshader;
1448
shader.setStrings(shaderStrings, 1);
1450
if (!shader.parse(&Resources, 100, false, messages)) {
1451
puts(shader.getInfoLog());
1452
puts(shader.getInfoDebugLog());
1454
*errorMessage = shader.getInfoLog();
1455
(*errorMessage) += shader.getInfoDebugLog();
1457
return false; // something didn't work
1460
// Note that program does not take ownership of &shader, so this is fine.
1461
program.addShader(&shader);
1463
if (!program.link(messages)) {
1464
puts(shader.getInfoLog());
1465
puts(shader.getInfoDebugLog());
1467
*errorMessage = shader.getInfoLog();
1468
(*errorMessage) += shader.getInfoDebugLog();
1473
// Can't fail, parsing worked, "linking" worked.
1474
glslang::GlslangToSpv(*program.getIntermediate(stage), spirv);
1478
void init_glslang() {
1479
glslang::InitializeProcess();
1482
void finalize_glslang() {
1483
glslang::FinalizeProcess();
1486
const char *VulkanResultToString(VkResult res) {
1488
case VK_NOT_READY: return "VK_NOT_READY";
1489
case VK_TIMEOUT: return "VK_TIMEOUT";
1490
case VK_EVENT_SET: return "VK_EVENT_SET";
1491
case VK_EVENT_RESET: return "VK_EVENT_RESET";
1492
case VK_INCOMPLETE: return "VK_INCOMPLETE";
1493
case VK_ERROR_OUT_OF_HOST_MEMORY: return "VK_ERROR_OUT_OF_HOST_MEMORY";
1494
case VK_ERROR_OUT_OF_DEVICE_MEMORY: return "VK_ERROR_OUT_OF_DEVICE_MEMORY";
1495
case VK_ERROR_INITIALIZATION_FAILED: return "VK_ERROR_INITIALIZATION_FAILED";
1496
case VK_ERROR_DEVICE_LOST: return "VK_ERROR_DEVICE_LOST";
1497
case VK_ERROR_MEMORY_MAP_FAILED: return "VK_ERROR_MEMORY_MAP_FAILED";
1498
case VK_ERROR_LAYER_NOT_PRESENT: return "VK_ERROR_LAYER_NOT_PRESENT";
1499
case VK_ERROR_EXTENSION_NOT_PRESENT: return "VK_ERROR_EXTENSION_NOT_PRESENT";
1500
case VK_ERROR_FEATURE_NOT_PRESENT: return "VK_ERROR_FEATURE_NOT_PRESENT";
1501
case VK_ERROR_INCOMPATIBLE_DRIVER: return "VK_ERROR_INCOMPATIBLE_DRIVER";
1502
case VK_ERROR_TOO_MANY_OBJECTS: return "VK_ERROR_TOO_MANY_OBJECTS";
1503
case VK_ERROR_FORMAT_NOT_SUPPORTED: return "VK_ERROR_FORMAT_NOT_SUPPORTED";
1504
case VK_ERROR_SURFACE_LOST_KHR: return "VK_ERROR_SURFACE_LOST_KHR";
1505
case VK_SUBOPTIMAL_KHR: return "VK_SUBOPTIMAL_KHR";
1506
case VK_ERROR_OUT_OF_DATE_KHR: return "VK_ERROR_OUT_OF_DATE_KHR";
1507
case VK_ERROR_INCOMPATIBLE_DISPLAY_KHR: return "VK_ERROR_INCOMPATIBLE_DISPLAY_KHR";
1508
case VK_ERROR_NATIVE_WINDOW_IN_USE_KHR: return "VK_ERROR_NATIVE_WINDOW_IN_USE_KHR";
1514
void VulkanAssertImpl(VkResult check, const char *function, const char *file, int line) {
1515
const char *error = "(none)";