~mmach/netext73/mesa-haswell

« back to all changes in this revision

Viewing changes to src/virtio/vulkan/vn_pipeline.c

  • Committer: mmach
  • Date: 2022-09-22 19:56:13 UTC
  • Revision ID: netbit73@gmail.com-20220922195613-wtik9mmy20tmor0i
2022-09-22 21:17:09

Show diffs side-by-side

added added

removed removed

Lines of Context:
1
 
/*
2
 
 * Copyright 2019 Google LLC
3
 
 * SPDX-License-Identifier: MIT
4
 
 *
5
 
 * based in part on anv and radv which are:
6
 
 * Copyright © 2015 Intel Corporation
7
 
 * Copyright © 2016 Red Hat.
8
 
 * Copyright © 2016 Bas Nieuwenhuizen
9
 
 */
10
 
 
11
 
#include "vn_pipeline.h"
12
 
 
13
 
#include "venus-protocol/vn_protocol_driver_pipeline.h"
14
 
#include "venus-protocol/vn_protocol_driver_pipeline_cache.h"
15
 
#include "venus-protocol/vn_protocol_driver_pipeline_layout.h"
16
 
#include "venus-protocol/vn_protocol_driver_shader_module.h"
17
 
 
18
 
#include "vn_device.h"
19
 
#include "vn_physical_device.h"
20
 
 
21
 
/* shader module commands */
22
 
 
23
 
VkResult
24
 
vn_CreateShaderModule(VkDevice device,
25
 
                      const VkShaderModuleCreateInfo *pCreateInfo,
26
 
                      const VkAllocationCallbacks *pAllocator,
27
 
                      VkShaderModule *pShaderModule)
28
 
{
29
 
   struct vn_device *dev = vn_device_from_handle(device);
30
 
   const VkAllocationCallbacks *alloc =
31
 
      pAllocator ? pAllocator : &dev->base.base.alloc;
32
 
 
33
 
   struct vn_shader_module *mod =
34
 
      vk_zalloc(alloc, sizeof(*mod), VN_DEFAULT_ALIGN,
35
 
                VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
36
 
   if (!mod)
37
 
      return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
38
 
 
39
 
   vn_object_base_init(&mod->base, VK_OBJECT_TYPE_SHADER_MODULE, &dev->base);
40
 
 
41
 
   VkShaderModule mod_handle = vn_shader_module_to_handle(mod);
42
 
   vn_async_vkCreateShaderModule(dev->instance, device, pCreateInfo, NULL,
43
 
                                 &mod_handle);
44
 
 
45
 
   *pShaderModule = mod_handle;
46
 
 
47
 
   return VK_SUCCESS;
48
 
}
49
 
 
50
 
void
51
 
vn_DestroyShaderModule(VkDevice device,
52
 
                       VkShaderModule shaderModule,
53
 
                       const VkAllocationCallbacks *pAllocator)
54
 
{
55
 
   struct vn_device *dev = vn_device_from_handle(device);
56
 
   struct vn_shader_module *mod = vn_shader_module_from_handle(shaderModule);
57
 
   const VkAllocationCallbacks *alloc =
58
 
      pAllocator ? pAllocator : &dev->base.base.alloc;
59
 
 
60
 
   if (!mod)
61
 
      return;
62
 
 
63
 
   vn_async_vkDestroyShaderModule(dev->instance, device, shaderModule, NULL);
64
 
 
65
 
   vn_object_base_fini(&mod->base);
66
 
   vk_free(alloc, mod);
67
 
}
68
 
 
69
 
/* pipeline layout commands */
70
 
 
71
 
VkResult
72
 
vn_CreatePipelineLayout(VkDevice device,
73
 
                        const VkPipelineLayoutCreateInfo *pCreateInfo,
74
 
                        const VkAllocationCallbacks *pAllocator,
75
 
                        VkPipelineLayout *pPipelineLayout)
76
 
{
77
 
   struct vn_device *dev = vn_device_from_handle(device);
78
 
   const VkAllocationCallbacks *alloc =
79
 
      pAllocator ? pAllocator : &dev->base.base.alloc;
80
 
 
81
 
   struct vn_pipeline_layout *layout =
82
 
      vk_zalloc(alloc, sizeof(*layout), VN_DEFAULT_ALIGN,
83
 
                VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
84
 
   if (!layout)
85
 
      return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
86
 
 
87
 
   vn_object_base_init(&layout->base, VK_OBJECT_TYPE_PIPELINE_LAYOUT,
88
 
                       &dev->base);
89
 
 
90
 
   VkPipelineLayout layout_handle = vn_pipeline_layout_to_handle(layout);
91
 
   vn_async_vkCreatePipelineLayout(dev->instance, device, pCreateInfo, NULL,
92
 
                                   &layout_handle);
93
 
 
94
 
   *pPipelineLayout = layout_handle;
95
 
 
96
 
   return VK_SUCCESS;
97
 
}
98
 
 
99
 
void
100
 
vn_DestroyPipelineLayout(VkDevice device,
101
 
                         VkPipelineLayout pipelineLayout,
102
 
                         const VkAllocationCallbacks *pAllocator)
103
 
{
104
 
   struct vn_device *dev = vn_device_from_handle(device);
105
 
   struct vn_pipeline_layout *layout =
106
 
      vn_pipeline_layout_from_handle(pipelineLayout);
107
 
   const VkAllocationCallbacks *alloc =
108
 
      pAllocator ? pAllocator : &dev->base.base.alloc;
109
 
 
110
 
   if (!layout)
111
 
      return;
112
 
 
113
 
   vn_async_vkDestroyPipelineLayout(dev->instance, device, pipelineLayout,
114
 
                                    NULL);
115
 
 
116
 
   vn_object_base_fini(&layout->base);
117
 
   vk_free(alloc, layout);
118
 
}
119
 
 
120
 
/* pipeline cache commands */
121
 
 
122
 
VkResult
123
 
vn_CreatePipelineCache(VkDevice device,
124
 
                       const VkPipelineCacheCreateInfo *pCreateInfo,
125
 
                       const VkAllocationCallbacks *pAllocator,
126
 
                       VkPipelineCache *pPipelineCache)
127
 
{
128
 
   struct vn_device *dev = vn_device_from_handle(device);
129
 
   const VkAllocationCallbacks *alloc =
130
 
      pAllocator ? pAllocator : &dev->base.base.alloc;
131
 
 
132
 
   struct vn_pipeline_cache *cache =
133
 
      vk_zalloc(alloc, sizeof(*cache), VN_DEFAULT_ALIGN,
134
 
                VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
135
 
   if (!cache)
136
 
      return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
137
 
 
138
 
   vn_object_base_init(&cache->base, VK_OBJECT_TYPE_PIPELINE_CACHE,
139
 
                       &dev->base);
140
 
 
141
 
   VkPipelineCacheCreateInfo local_create_info;
142
 
   if (pCreateInfo->initialDataSize) {
143
 
      const struct vk_pipeline_cache_header *header =
144
 
         pCreateInfo->pInitialData;
145
 
 
146
 
      local_create_info = *pCreateInfo;
147
 
      local_create_info.initialDataSize -= header->header_size;
148
 
      local_create_info.pInitialData += header->header_size;
149
 
      pCreateInfo = &local_create_info;
150
 
   }
151
 
 
152
 
   VkPipelineCache cache_handle = vn_pipeline_cache_to_handle(cache);
153
 
   vn_async_vkCreatePipelineCache(dev->instance, device, pCreateInfo, NULL,
154
 
                                  &cache_handle);
155
 
 
156
 
   *pPipelineCache = cache_handle;
157
 
 
158
 
   return VK_SUCCESS;
159
 
}
160
 
 
161
 
void
162
 
vn_DestroyPipelineCache(VkDevice device,
163
 
                        VkPipelineCache pipelineCache,
164
 
                        const VkAllocationCallbacks *pAllocator)
165
 
{
166
 
   struct vn_device *dev = vn_device_from_handle(device);
167
 
   struct vn_pipeline_cache *cache =
168
 
      vn_pipeline_cache_from_handle(pipelineCache);
169
 
   const VkAllocationCallbacks *alloc =
170
 
      pAllocator ? pAllocator : &dev->base.base.alloc;
171
 
 
172
 
   if (!cache)
173
 
      return;
174
 
 
175
 
   vn_async_vkDestroyPipelineCache(dev->instance, device, pipelineCache,
176
 
                                   NULL);
177
 
 
178
 
   vn_object_base_fini(&cache->base);
179
 
   vk_free(alloc, cache);
180
 
}
181
 
 
182
 
VkResult
183
 
vn_GetPipelineCacheData(VkDevice device,
184
 
                        VkPipelineCache pipelineCache,
185
 
                        size_t *pDataSize,
186
 
                        void *pData)
187
 
{
188
 
   struct vn_device *dev = vn_device_from_handle(device);
189
 
   struct vn_physical_device *physical_dev = dev->physical_device;
190
 
 
191
 
   struct vk_pipeline_cache_header *header = pData;
192
 
   VkResult result;
193
 
   if (!pData) {
194
 
      result = vn_call_vkGetPipelineCacheData(dev->instance, device,
195
 
                                              pipelineCache, pDataSize, NULL);
196
 
      if (result != VK_SUCCESS)
197
 
         return vn_error(dev->instance, result);
198
 
 
199
 
      *pDataSize += sizeof(*header);
200
 
      return VK_SUCCESS;
201
 
   }
202
 
 
203
 
   if (*pDataSize <= sizeof(*header)) {
204
 
      *pDataSize = 0;
205
 
      return VK_INCOMPLETE;
206
 
   }
207
 
 
208
 
   const VkPhysicalDeviceProperties *props =
209
 
      &physical_dev->properties.vulkan_1_0;
210
 
   header->header_size = sizeof(*header);
211
 
   header->header_version = VK_PIPELINE_CACHE_HEADER_VERSION_ONE;
212
 
   header->vendor_id = props->vendorID;
213
 
   header->device_id = props->deviceID;
214
 
   memcpy(header->uuid, props->pipelineCacheUUID, VK_UUID_SIZE);
215
 
 
216
 
   *pDataSize -= header->header_size;
217
 
   result =
218
 
      vn_call_vkGetPipelineCacheData(dev->instance, device, pipelineCache,
219
 
                                     pDataSize, pData + header->header_size);
220
 
   if (result < VK_SUCCESS)
221
 
      return vn_error(dev->instance, result);
222
 
 
223
 
   *pDataSize += header->header_size;
224
 
 
225
 
   return result;
226
 
}
227
 
 
228
 
VkResult
229
 
vn_MergePipelineCaches(VkDevice device,
230
 
                       VkPipelineCache dstCache,
231
 
                       uint32_t srcCacheCount,
232
 
                       const VkPipelineCache *pSrcCaches)
233
 
{
234
 
   struct vn_device *dev = vn_device_from_handle(device);
235
 
 
236
 
   vn_async_vkMergePipelineCaches(dev->instance, device, dstCache,
237
 
                                  srcCacheCount, pSrcCaches);
238
 
 
239
 
   return VK_SUCCESS;
240
 
}
241
 
 
242
 
/* pipeline commands */
243
 
 
244
 
static const VkGraphicsPipelineCreateInfo *
245
 
vn_fix_graphics_pipeline_create_info(
246
 
   struct vn_device *dev,
247
 
   uint32_t create_info_count,
248
 
   const VkGraphicsPipelineCreateInfo *create_infos,
249
 
   const VkAllocationCallbacks *alloc,
250
 
   VkGraphicsPipelineCreateInfo **out)
251
 
{
252
 
   VkGraphicsPipelineCreateInfo *infos = NULL;
253
 
   bool has_ignored_state = false;
254
 
 
255
 
   for (uint32_t i = 0; i < create_info_count; i++) {
256
 
      if (create_infos[i].pRasterizationState->rasterizerDiscardEnable ==
257
 
          VK_FALSE)
258
 
         continue;
259
 
 
260
 
      if (create_infos[i].pViewportState ||
261
 
          create_infos[i].pMultisampleState ||
262
 
          create_infos[i].pDepthStencilState ||
263
 
          create_infos[i].pColorBlendState) {
264
 
         has_ignored_state = true;
265
 
         break;
266
 
      }
267
 
   }
268
 
 
269
 
   if (!has_ignored_state)
270
 
      return create_infos;
271
 
 
272
 
   infos = vk_alloc(alloc, sizeof(*infos) * create_info_count,
273
 
                    VN_DEFAULT_ALIGN, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
274
 
   if (!infos)
275
 
      return NULL;
276
 
 
277
 
   memcpy(infos, create_infos, sizeof(*infos) * create_info_count);
278
 
 
279
 
   for (uint32_t i = 0; i < create_info_count; i++) {
280
 
      if (infos[i].pRasterizationState->rasterizerDiscardEnable == VK_FALSE)
281
 
         continue;
282
 
 
283
 
      infos[i].pViewportState = NULL;
284
 
      infos[i].pMultisampleState = NULL;
285
 
      infos[i].pDepthStencilState = NULL;
286
 
      infos[i].pColorBlendState = NULL;
287
 
   }
288
 
 
289
 
   *out = infos;
290
 
   return infos;
291
 
}
292
 
 
293
 
VkResult
294
 
vn_CreateGraphicsPipelines(VkDevice device,
295
 
                           VkPipelineCache pipelineCache,
296
 
                           uint32_t createInfoCount,
297
 
                           const VkGraphicsPipelineCreateInfo *pCreateInfos,
298
 
                           const VkAllocationCallbacks *pAllocator,
299
 
                           VkPipeline *pPipelines)
300
 
{
301
 
   struct vn_device *dev = vn_device_from_handle(device);
302
 
   const VkAllocationCallbacks *alloc =
303
 
      pAllocator ? pAllocator : &dev->base.base.alloc;
304
 
   VkGraphicsPipelineCreateInfo *local_infos = NULL;
305
 
 
306
 
   pCreateInfos = vn_fix_graphics_pipeline_create_info(
307
 
      dev, createInfoCount, pCreateInfos, alloc, &local_infos);
308
 
   if (!pCreateInfos)
309
 
      return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
310
 
 
311
 
   for (uint32_t i = 0; i < createInfoCount; i++) {
312
 
      struct vn_pipeline *pipeline =
313
 
         vk_zalloc(alloc, sizeof(*pipeline), VN_DEFAULT_ALIGN,
314
 
                   VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
315
 
      if (!pipeline) {
316
 
         for (uint32_t j = 0; j < i; j++)
317
 
            vk_free(alloc, vn_pipeline_from_handle(pPipelines[j]));
318
 
 
319
 
         if (local_infos)
320
 
            vk_free(alloc, local_infos);
321
 
 
322
 
         memset(pPipelines, 0, sizeof(*pPipelines) * createInfoCount);
323
 
         return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
324
 
      }
325
 
 
326
 
      vn_object_base_init(&pipeline->base, VK_OBJECT_TYPE_PIPELINE,
327
 
                          &dev->base);
328
 
 
329
 
      VkPipeline pipeline_handle = vn_pipeline_to_handle(pipeline);
330
 
      pPipelines[i] = pipeline_handle;
331
 
   }
332
 
 
333
 
   vn_async_vkCreateGraphicsPipelines(dev->instance, device, pipelineCache,
334
 
                                      createInfoCount, pCreateInfos, NULL,
335
 
                                      pPipelines);
336
 
 
337
 
   if (local_infos)
338
 
      vk_free(alloc, local_infos);
339
 
 
340
 
   return VK_SUCCESS;
341
 
}
342
 
 
343
 
VkResult
344
 
vn_CreateComputePipelines(VkDevice device,
345
 
                          VkPipelineCache pipelineCache,
346
 
                          uint32_t createInfoCount,
347
 
                          const VkComputePipelineCreateInfo *pCreateInfos,
348
 
                          const VkAllocationCallbacks *pAllocator,
349
 
                          VkPipeline *pPipelines)
350
 
{
351
 
   struct vn_device *dev = vn_device_from_handle(device);
352
 
   const VkAllocationCallbacks *alloc =
353
 
      pAllocator ? pAllocator : &dev->base.base.alloc;
354
 
 
355
 
   for (uint32_t i = 0; i < createInfoCount; i++) {
356
 
      struct vn_pipeline *pipeline =
357
 
         vk_zalloc(alloc, sizeof(*pipeline), VN_DEFAULT_ALIGN,
358
 
                   VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
359
 
      if (!pipeline) {
360
 
         for (uint32_t j = 0; j < i; j++)
361
 
            vk_free(alloc, vn_pipeline_from_handle(pPipelines[j]));
362
 
         memset(pPipelines, 0, sizeof(*pPipelines) * createInfoCount);
363
 
         return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
364
 
      }
365
 
 
366
 
      vn_object_base_init(&pipeline->base, VK_OBJECT_TYPE_PIPELINE,
367
 
                          &dev->base);
368
 
 
369
 
      VkPipeline pipeline_handle = vn_pipeline_to_handle(pipeline);
370
 
      pPipelines[i] = pipeline_handle;
371
 
   }
372
 
 
373
 
   vn_async_vkCreateComputePipelines(dev->instance, device, pipelineCache,
374
 
                                     createInfoCount, pCreateInfos, NULL,
375
 
                                     pPipelines);
376
 
 
377
 
   return VK_SUCCESS;
378
 
}
379
 
 
380
 
void
381
 
vn_DestroyPipeline(VkDevice device,
382
 
                   VkPipeline _pipeline,
383
 
                   const VkAllocationCallbacks *pAllocator)
384
 
{
385
 
   struct vn_device *dev = vn_device_from_handle(device);
386
 
   struct vn_pipeline *pipeline = vn_pipeline_from_handle(_pipeline);
387
 
   const VkAllocationCallbacks *alloc =
388
 
      pAllocator ? pAllocator : &dev->base.base.alloc;
389
 
 
390
 
   if (!pipeline)
391
 
      return;
392
 
 
393
 
   vn_async_vkDestroyPipeline(dev->instance, device, _pipeline, NULL);
394
 
 
395
 
   vn_object_base_fini(&pipeline->base);
396
 
   vk_free(alloc, pipeline);
397
 
}