~mmach/netext73/mesa-haswell

« back to all changes in this revision

Viewing changes to src/gallium/drivers/zink/zink_draw.cpp

  • Committer: mmach
  • Date: 2022-09-22 19:56:13 UTC
  • Revision ID: netbit73@gmail.com-20220922195613-wtik9mmy20tmor0i
2022-09-22 21:17:09

Show diffs side-by-side

added added

removed removed

Lines of Context:
1
 
#include "zink_compiler.h"
2
 
#include "zink_context.h"
3
 
#include "zink_program.h"
4
 
#include "zink_query.h"
5
 
#include "zink_resource.h"
6
 
#include "zink_screen.h"
7
 
#include "zink_state.h"
8
 
#include "zink_surface.h"
9
 
#include "zink_inlines.h"
10
 
 
11
 
#include "tgsi/tgsi_from_mesa.h"
12
 
#include "util/hash_table.h"
13
 
#include "util/u_debug.h"
14
 
#include "util/u_helpers.h"
15
 
#include "util/u_inlines.h"
16
 
#include "util/u_prim.h"
17
 
#include "util/u_prim_restart.h"
18
 
 
19
 
 
20
 
static void
21
 
zink_emit_xfb_counter_barrier(struct zink_context *ctx)
22
 
{
23
 
   for (unsigned i = 0; i < ctx->num_so_targets; i++) {
24
 
      struct zink_so_target *t = zink_so_target(ctx->so_targets[i]);
25
 
      if (!t)
26
 
         continue;
27
 
      struct zink_resource *res = zink_resource(t->counter_buffer);
28
 
      VkAccessFlags access = VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_WRITE_BIT_EXT;
29
 
      VkPipelineStageFlags stage = VK_PIPELINE_STAGE_TRANSFORM_FEEDBACK_BIT_EXT;
30
 
      if (t->counter_buffer_valid) {
31
 
         /* Between the pause and resume there needs to be a memory barrier for the counter buffers
32
 
          * with a source access of VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_WRITE_BIT_EXT
33
 
          * at pipeline stage VK_PIPELINE_STAGE_TRANSFORM_FEEDBACK_BIT_EXT
34
 
          * to a destination access of VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_READ_BIT_EXT
35
 
          * at pipeline stage VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT.
36
 
          *
37
 
          * - from VK_EXT_transform_feedback spec
38
 
          */
39
 
         access |= VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_READ_BIT_EXT;
40
 
         stage |= VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT;
41
 
      }
42
 
      zink_resource_buffer_barrier(ctx, res, access, stage);
43
 
   }
44
 
}
45
 
 
46
 
static void
47
 
zink_emit_stream_output_targets(struct pipe_context *pctx)
48
 
{
49
 
   struct zink_context *ctx = zink_context(pctx);
50
 
   struct zink_batch *batch = &ctx->batch;
51
 
   VkBuffer buffers[PIPE_MAX_SO_OUTPUTS] = {0};
52
 
   VkDeviceSize buffer_offsets[PIPE_MAX_SO_OUTPUTS] = {0};
53
 
   VkDeviceSize buffer_sizes[PIPE_MAX_SO_OUTPUTS] = {0};
54
 
 
55
 
   for (unsigned i = 0; i < ctx->num_so_targets; i++) {
56
 
      struct zink_so_target *t = (struct zink_so_target *)ctx->so_targets[i];
57
 
      if (!t) {
58
 
         /* no need to reference this or anything */
59
 
         buffers[i] = zink_resource(ctx->dummy_xfb_buffer)->obj->buffer;
60
 
         buffer_offsets[i] = 0;
61
 
         buffer_sizes[i] = sizeof(uint8_t);
62
 
         continue;
63
 
      }
64
 
      struct zink_resource *res = zink_resource(t->base.buffer);
65
 
      if (!res->so_valid)
66
 
         /* resource has been rebound */
67
 
         t->counter_buffer_valid = false;
68
 
      buffers[i] = res->obj->buffer;
69
 
      zink_batch_reference_resource_rw(batch, res, true);
70
 
      buffer_offsets[i] = t->base.buffer_offset;
71
 
      buffer_sizes[i] = t->base.buffer_size;
72
 
      res->so_valid = true;
73
 
      util_range_add(t->base.buffer, &res->valid_buffer_range, t->base.buffer_offset,
74
 
                     t->base.buffer_offset + t->base.buffer_size);
75
 
   }
76
 
 
77
 
   VKCTX(CmdBindTransformFeedbackBuffersEXT)(batch->state->cmdbuf, 0, ctx->num_so_targets,
78
 
                                                 buffers, buffer_offsets,
79
 
                                                 buffer_sizes);
80
 
   ctx->dirty_so_targets = false;
81
 
}
82
 
 
83
 
ALWAYS_INLINE static void
84
 
check_buffer_barrier(struct zink_context *ctx, struct pipe_resource *pres, VkAccessFlags flags, VkPipelineStageFlags pipeline)
85
 
{
86
 
   struct zink_resource *res = zink_resource(pres);
87
 
   zink_resource_buffer_barrier(ctx, res, flags, pipeline);
88
 
}
89
 
 
90
 
ALWAYS_INLINE static void
91
 
barrier_draw_buffers(struct zink_context *ctx, const struct pipe_draw_info *dinfo,
92
 
                     const struct pipe_draw_indirect_info *dindirect, struct pipe_resource *index_buffer)
93
 
{
94
 
   if (index_buffer)
95
 
      check_buffer_barrier(ctx, index_buffer, VK_ACCESS_INDEX_READ_BIT, VK_PIPELINE_STAGE_VERTEX_INPUT_BIT);
96
 
   if (dindirect && dindirect->buffer) {
97
 
      check_buffer_barrier(ctx, dindirect->buffer,
98
 
                           VK_ACCESS_INDIRECT_COMMAND_READ_BIT, VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT);
99
 
      if (dindirect->indirect_draw_count)
100
 
         check_buffer_barrier(ctx, dindirect->indirect_draw_count,
101
 
                              VK_ACCESS_INDIRECT_COMMAND_READ_BIT, VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT);
102
 
   }
103
 
}
104
 
 
105
 
template <zink_dynamic_state DYNAMIC_STATE>
106
 
static void
107
 
zink_bind_vertex_buffers(struct zink_batch *batch, struct zink_context *ctx)
108
 
{
109
 
   VkBuffer buffers[PIPE_MAX_ATTRIBS];
110
 
   VkDeviceSize buffer_offsets[PIPE_MAX_ATTRIBS];
111
 
   VkDeviceSize buffer_strides[PIPE_MAX_ATTRIBS];
112
 
   struct zink_vertex_elements_state *elems = ctx->element_state;
113
 
   struct zink_screen *screen = zink_screen(ctx->base.screen);
114
 
 
115
 
   if (!elems->hw_state.num_bindings)
116
 
      return;
117
 
 
118
 
   for (unsigned i = 0; i < elems->hw_state.num_bindings; i++) {
119
 
      struct pipe_vertex_buffer *vb = ctx->vertex_buffers + ctx->element_state->binding_map[i];
120
 
      assert(vb);
121
 
      if (vb->buffer.resource) {
122
 
         struct zink_resource *res = zink_resource(vb->buffer.resource);
123
 
         assert(res->obj->buffer);
124
 
         buffers[i] = res->obj->buffer;
125
 
         buffer_offsets[i] = vb->buffer_offset;
126
 
         buffer_strides[i] = vb->stride;
127
 
         if (DYNAMIC_STATE == ZINK_DYNAMIC_VERTEX_INPUT)
128
 
            elems->hw_state.dynbindings[i].stride = vb->stride;
129
 
      } else {
130
 
         buffers[i] = zink_resource(ctx->dummy_vertex_buffer)->obj->buffer;
131
 
         buffer_offsets[i] = 0;
132
 
         buffer_strides[i] = 0;
133
 
         if (DYNAMIC_STATE == ZINK_DYNAMIC_VERTEX_INPUT)
134
 
            elems->hw_state.dynbindings[i].stride = 0;
135
 
      }
136
 
   }
137
 
 
138
 
   if (DYNAMIC_STATE != ZINK_NO_DYNAMIC_STATE && DYNAMIC_STATE != ZINK_DYNAMIC_VERTEX_INPUT)
139
 
      VKCTX(CmdBindVertexBuffers2EXT)(batch->state->cmdbuf, 0,
140
 
                                          elems->hw_state.num_bindings,
141
 
                                          buffers, buffer_offsets, NULL, buffer_strides);
142
 
   else
143
 
      VKSCR(CmdBindVertexBuffers)(batch->state->cmdbuf, 0,
144
 
                             elems->hw_state.num_bindings,
145
 
                             buffers, buffer_offsets);
146
 
 
147
 
   if (DYNAMIC_STATE == ZINK_DYNAMIC_VERTEX_INPUT)
148
 
      VKCTX(CmdSetVertexInputEXT)(batch->state->cmdbuf,
149
 
                                      elems->hw_state.num_bindings, elems->hw_state.dynbindings,
150
 
                                      elems->hw_state.num_attribs, elems->hw_state.dynattribs);
151
 
 
152
 
   ctx->vertex_buffers_dirty = false;
153
 
}
154
 
 
155
 
static void
156
 
zink_bind_vertex_state(struct zink_batch *batch, struct zink_context *ctx,
157
 
                       struct pipe_vertex_state *vstate, uint32_t partial_velem_mask)
158
 
{
159
 
   if (!vstate->input.vbuffer.buffer.resource)
160
 
      return;
161
 
 
162
 
   const struct zink_vertex_elements_hw_state *hw_state = zink_vertex_state_mask(vstate, partial_velem_mask, true);
163
 
   assert(hw_state);
164
 
 
165
 
   struct zink_resource *res = zink_resource(vstate->input.vbuffer.buffer.resource);
166
 
   zink_batch_resource_usage_set(&ctx->batch, res, false);
167
 
   VkDeviceSize offset = vstate->input.vbuffer.buffer_offset;
168
 
   VKCTX(CmdBindVertexBuffers)(batch->state->cmdbuf, 0,
169
 
                               hw_state->num_bindings,
170
 
                               &res->obj->buffer, &offset);
171
 
 
172
 
   VKCTX(CmdSetVertexInputEXT)(batch->state->cmdbuf,
173
 
                               hw_state->num_bindings, hw_state->dynbindings,
174
 
                               hw_state->num_attribs, hw_state->dynattribs);
175
 
}
176
 
 
177
 
static void
178
 
update_gfx_program(struct zink_context *ctx)
179
 
{
180
 
   if (ctx->last_vertex_stage_dirty) {
181
 
      enum pipe_shader_type pstage = pipe_shader_type_from_mesa(ctx->last_vertex_stage->nir->info.stage);
182
 
      ctx->dirty_shader_stages |= BITFIELD_BIT(pstage);
183
 
      memcpy(&ctx->gfx_pipeline_state.shader_keys.key[pstage].key.vs_base,
184
 
             &ctx->gfx_pipeline_state.shader_keys.last_vertex.key.vs_base,
185
 
             sizeof(struct zink_vs_key_base));
186
 
      ctx->last_vertex_stage_dirty = false;
187
 
   }
188
 
   unsigned bits = BITFIELD_MASK(PIPE_SHADER_COMPUTE);
189
 
   if (ctx->gfx_dirty) {
190
 
      struct zink_gfx_program *prog = NULL;
191
 
 
192
 
      struct hash_table *ht = &ctx->program_cache[ctx->shader_stages >> 2];
193
 
      const uint32_t hash = ctx->gfx_hash;
194
 
      struct hash_entry *entry = _mesa_hash_table_search_pre_hashed(ht, hash, ctx->gfx_stages);
195
 
      if (entry) {
196
 
         prog = (struct zink_gfx_program*)entry->data;
197
 
         u_foreach_bit(stage, prog->stages_present & ~ctx->dirty_shader_stages)
198
 
            ctx->gfx_pipeline_state.modules[stage] = prog->modules[stage]->shader;
199
 
         /* ensure variants are always updated if keys have changed since last use */
200
 
         ctx->dirty_shader_stages |= prog->stages_present;
201
 
      } else {
202
 
         ctx->dirty_shader_stages |= bits;
203
 
         prog = zink_create_gfx_program(ctx, ctx->gfx_stages, ctx->gfx_pipeline_state.vertices_per_patch + 1);
204
 
         _mesa_hash_table_insert_pre_hashed(ht, hash, prog->shaders, prog);
205
 
      }
206
 
      zink_update_gfx_program(ctx, prog);
207
 
      if (prog && prog != ctx->curr_program)
208
 
         zink_batch_reference_program(&ctx->batch, &prog->base);
209
 
      if (ctx->curr_program)
210
 
         ctx->gfx_pipeline_state.final_hash ^= ctx->curr_program->last_variant_hash;
211
 
      ctx->curr_program = prog;
212
 
      ctx->gfx_pipeline_state.final_hash ^= ctx->curr_program->last_variant_hash;
213
 
      ctx->gfx_dirty = false;
214
 
   } else if (ctx->dirty_shader_stages & bits) {
215
 
      /* remove old hash */
216
 
      ctx->gfx_pipeline_state.final_hash ^= ctx->curr_program->last_variant_hash;
217
 
      zink_update_gfx_program(ctx, ctx->curr_program);
218
 
      /* apply new hash */
219
 
      ctx->gfx_pipeline_state.final_hash ^= ctx->curr_program->last_variant_hash;
220
 
   }
221
 
   ctx->dirty_shader_stages &= ~bits;
222
 
}
223
 
 
224
 
ALWAYS_INLINE static void
225
 
update_drawid(struct zink_context *ctx, unsigned draw_id)
226
 
{
227
 
   VKCTX(CmdPushConstants)(ctx->batch.state->cmdbuf, ctx->curr_program->base.layout, VK_SHADER_STAGE_VERTEX_BIT,
228
 
                      offsetof(struct zink_gfx_push_constant, draw_id), sizeof(unsigned),
229
 
                      &draw_id);
230
 
}
231
 
 
232
 
ALWAYS_INLINE static void
233
 
draw_indexed_need_index_buffer_unref(struct zink_context *ctx,
234
 
             const struct pipe_draw_info *dinfo,
235
 
             const struct pipe_draw_start_count_bias *draws,
236
 
             unsigned num_draws,
237
 
             unsigned draw_id,
238
 
             bool needs_drawid)
239
 
{
240
 
   VkCommandBuffer cmdbuf = ctx->batch.state->cmdbuf;
241
 
   if (dinfo->increment_draw_id && needs_drawid) {
242
 
      for (unsigned i = 0; i < num_draws; i++) {
243
 
         update_drawid(ctx, draw_id);
244
 
         VKCTX(CmdDrawIndexed)(cmdbuf,
245
 
            draws[i].count, dinfo->instance_count,
246
 
            0, draws[i].index_bias, dinfo->start_instance);
247
 
         draw_id++;
248
 
      }
249
 
   } else {
250
 
      if (needs_drawid)
251
 
         update_drawid(ctx, draw_id);
252
 
      for (unsigned i = 0; i < num_draws; i++)
253
 
         VKCTX(CmdDrawIndexed)(cmdbuf,
254
 
            draws[i].count, dinfo->instance_count,
255
 
            0, draws[i].index_bias, dinfo->start_instance);
256
 
 
257
 
   }
258
 
}
259
 
 
260
 
template <zink_multidraw HAS_MULTIDRAW>
261
 
ALWAYS_INLINE static void
262
 
draw_indexed(struct zink_context *ctx,
263
 
             const struct pipe_draw_info *dinfo,
264
 
             const struct pipe_draw_start_count_bias *draws,
265
 
             unsigned num_draws,
266
 
             unsigned draw_id,
267
 
             bool needs_drawid)
268
 
{
269
 
   VkCommandBuffer cmdbuf = ctx->batch.state->cmdbuf;
270
 
   if (dinfo->increment_draw_id && needs_drawid) {
271
 
      for (unsigned i = 0; i < num_draws; i++) {
272
 
         update_drawid(ctx, draw_id);
273
 
         VKCTX(CmdDrawIndexed)(cmdbuf,
274
 
            draws[i].count, dinfo->instance_count,
275
 
            draws[i].start, draws[i].index_bias, dinfo->start_instance);
276
 
         draw_id++;
277
 
      }
278
 
   } else {
279
 
      if (needs_drawid)
280
 
         update_drawid(ctx, draw_id);
281
 
      if (HAS_MULTIDRAW) {
282
 
         VKCTX(CmdDrawMultiIndexedEXT)(cmdbuf, num_draws, (const VkMultiDrawIndexedInfoEXT*)draws,
283
 
                                       dinfo->instance_count,
284
 
                                       dinfo->start_instance, sizeof(struct pipe_draw_start_count_bias),
285
 
                                       dinfo->index_bias_varies ? NULL : &draws[0].index_bias);
286
 
      } else {
287
 
         for (unsigned i = 0; i < num_draws; i++)
288
 
            VKCTX(CmdDrawIndexed)(cmdbuf,
289
 
               draws[i].count, dinfo->instance_count,
290
 
               draws[i].start, draws[i].index_bias, dinfo->start_instance);
291
 
      }
292
 
   }
293
 
}
294
 
 
295
 
template <zink_multidraw HAS_MULTIDRAW>
296
 
ALWAYS_INLINE static void
297
 
draw(struct zink_context *ctx,
298
 
     const struct pipe_draw_info *dinfo,
299
 
     const struct pipe_draw_start_count_bias *draws,
300
 
     unsigned num_draws,
301
 
     unsigned draw_id,
302
 
     bool needs_drawid)
303
 
{
304
 
   VkCommandBuffer cmdbuf = ctx->batch.state->cmdbuf;
305
 
   if (dinfo->increment_draw_id && needs_drawid) {
306
 
      for (unsigned i = 0; i < num_draws; i++) {
307
 
         update_drawid(ctx, draw_id);
308
 
         VKCTX(CmdDraw)(cmdbuf, draws[i].count, dinfo->instance_count, draws[i].start, dinfo->start_instance);
309
 
         draw_id++;
310
 
      }
311
 
   } else {
312
 
      if (needs_drawid)
313
 
         update_drawid(ctx, draw_id);
314
 
      if (HAS_MULTIDRAW)
315
 
         VKCTX(CmdDrawMultiEXT)(cmdbuf, num_draws, (const VkMultiDrawInfoEXT*)draws,
316
 
                                dinfo->instance_count, dinfo->start_instance,
317
 
                                sizeof(struct pipe_draw_start_count_bias));
318
 
      else {
319
 
         for (unsigned i = 0; i < num_draws; i++)
320
 
            VKCTX(CmdDraw)(cmdbuf, draws[i].count, dinfo->instance_count, draws[i].start, dinfo->start_instance);
321
 
 
322
 
      }
323
 
   }
324
 
}
325
 
 
326
 
/*
327
 
   If a synchronization command includes a source stage mask, its first synchronization scope only
328
 
   includes execution of the pipeline stages specified in that mask, and its first access scope only
329
 
   includes memory accesses performed by pipeline stages specified in that mask.
330
 
 
331
 
   If a synchronization command includes a destination stage mask, its second synchronization scope
332
 
   only includes execution of the pipeline stages specified in that mask, and its second access scope
333
 
   only includes memory access performed by pipeline stages specified in that mask.
334
 
 
335
 
   - Chapter 7. Synchronization and Cache Control
336
 
 
337
 
 * thus, all stages must be added to ensure accurate synchronization
338
 
 */
339
 
ALWAYS_INLINE static VkPipelineStageFlags
340
 
find_pipeline_bits(uint32_t *mask)
341
 
{
342
 
   VkPipelineStageFlags pipeline = 0;
343
 
   for (unsigned i = 0; i < ZINK_SHADER_COUNT; i++) {
344
 
      if (mask[i]) {
345
 
         pipeline |= zink_pipeline_flags_from_pipe_stage((enum pipe_shader_type)i);
346
 
      }
347
 
   }
348
 
   return pipeline;
349
 
}
350
 
 
351
 
static void
352
 
update_barriers(struct zink_context *ctx, bool is_compute,
353
 
                struct pipe_resource *index, struct pipe_resource *indirect, struct pipe_resource *indirect_draw_count)
354
 
{
355
 
   if (!ctx->need_barriers[is_compute]->entries)
356
 
      return;
357
 
   struct set *need_barriers = ctx->need_barriers[is_compute];
358
 
   ctx->barrier_set_idx[is_compute] = !ctx->barrier_set_idx[is_compute];
359
 
   ctx->need_barriers[is_compute] = &ctx->update_barriers[is_compute][ctx->barrier_set_idx[is_compute]];
360
 
   set_foreach(need_barriers, he) {
361
 
      struct zink_resource *res = (struct zink_resource *)he->key;
362
 
      bool is_buffer = res->obj->is_buffer;
363
 
      VkPipelineStageFlags pipeline = 0;
364
 
      VkAccessFlags access = 0;
365
 
 
366
 
      if (res == zink_resource(index)) {
367
 
         access |= VK_ACCESS_INDEX_READ_BIT;
368
 
         pipeline |= VK_PIPELINE_STAGE_VERTEX_INPUT_BIT;
369
 
      } else if (res == zink_resource(indirect) || res == zink_resource(indirect_draw_count)) {
370
 
         access |= VK_ACCESS_INDIRECT_COMMAND_READ_BIT;
371
 
         pipeline |= VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT;
372
 
      }
373
 
      if (res->bind_count[is_compute]) {
374
 
         if (res->write_bind_count[is_compute])
375
 
            access |= VK_ACCESS_SHADER_WRITE_BIT;
376
 
         if (is_buffer) {
377
 
            unsigned bind_count = res->bind_count[is_compute];
378
 
            if (res->ubo_bind_count[is_compute])
379
 
               access |= VK_ACCESS_UNIFORM_READ_BIT;
380
 
            bind_count -= res->ubo_bind_count[is_compute];
381
 
            if (!is_compute && res->vbo_bind_mask) {
382
 
               access |= VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT;
383
 
               pipeline |= VK_PIPELINE_STAGE_VERTEX_INPUT_BIT;
384
 
               bind_count -= res->vbo_bind_count;
385
 
            }
386
 
            if (bind_count)
387
 
               access |= VK_ACCESS_SHADER_READ_BIT;
388
 
            if (!is_compute) {
389
 
               pipeline |= find_pipeline_bits(res->ssbo_bind_mask);
390
 
 
391
 
               if (res->ubo_bind_count[0] && (pipeline & GFX_SHADER_BITS) != GFX_SHADER_BITS)
392
 
                  pipeline |= find_pipeline_bits(res->ubo_bind_mask);
393
 
            }
394
 
         } else {
395
 
            if (res->bind_count[is_compute] != res->write_bind_count[is_compute])
396
 
               access |= VK_ACCESS_SHADER_READ_BIT;
397
 
            if (res->write_bind_count[is_compute])
398
 
               access |= VK_ACCESS_SHADER_WRITE_BIT;
399
 
         }
400
 
         if (is_compute)
401
 
            pipeline = VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
402
 
         else {
403
 
            VkPipelineStageFlags gfx_stages = pipeline & ~(VK_PIPELINE_STAGE_VERTEX_INPUT_BIT | VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT);
404
 
            /* images always need gfx stages, and buffers need gfx stages if non-vbo binds exist */
405
 
            bool needs_stages = !is_buffer || (res->bind_count[0] - res->vbo_bind_count > 0);
406
 
            if (gfx_stages != GFX_SHADER_BITS && needs_stages) {
407
 
               gfx_stages |= find_pipeline_bits(res->sampler_binds);
408
 
               if (gfx_stages != GFX_SHADER_BITS) //must be a shader image
409
 
                  gfx_stages |= find_pipeline_bits(res->image_binds);
410
 
               pipeline |= gfx_stages;
411
 
            }
412
 
         }
413
 
         if (res->base.b.target == PIPE_BUFFER)
414
 
            zink_resource_buffer_barrier(ctx, res, access, pipeline);
415
 
         else {
416
 
            VkImageLayout layout = zink_descriptor_util_image_layout_eval(ctx, res, is_compute);
417
 
            if (layout != res->layout)
418
 
               zink_resource_image_barrier(ctx, res, layout, access, pipeline);
419
 
         }
420
 
         /* always barrier on draw if this resource has either multiple image write binds or
421
 
          * image write binds and image read binds
422
 
          */
423
 
         if (res->write_bind_count[is_compute] && res->bind_count[is_compute] > 1)
424
 
            _mesa_set_add_pre_hashed(ctx->need_barriers[is_compute], he->hash, res);
425
 
      }
426
 
      _mesa_set_remove(need_barriers, he);
427
 
      if (!need_barriers->entries)
428
 
         break;
429
 
   }
430
 
}
431
 
 
432
 
template <bool BATCH_CHANGED>
433
 
static bool
434
 
update_gfx_pipeline(struct zink_context *ctx, struct zink_batch_state *bs, enum pipe_prim_type mode)
435
 
{
436
 
   VkPipeline prev_pipeline = ctx->gfx_pipeline_state.pipeline;
437
 
   update_gfx_program(ctx);
438
 
   VkPipeline pipeline = zink_get_gfx_pipeline(ctx, ctx->curr_program, &ctx->gfx_pipeline_state, mode);
439
 
   bool pipeline_changed = prev_pipeline != pipeline;
440
 
   if (BATCH_CHANGED || pipeline_changed)
441
 
      VKCTX(CmdBindPipeline)(bs->cmdbuf, VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline);
442
 
   return pipeline_changed;
443
 
}
444
 
 
445
 
static bool
446
 
hack_conditional_render(struct pipe_context *pctx,
447
 
                        const struct pipe_draw_info *dinfo,
448
 
                        unsigned drawid_offset,
449
 
                        const struct pipe_draw_indirect_info *dindirect,
450
 
                        const struct pipe_draw_start_count_bias *draws,
451
 
                        unsigned num_draws)
452
 
{
453
 
   struct zink_context *ctx = zink_context(pctx);
454
 
   struct zink_batch_state *bs = ctx->batch.state;
455
 
   static bool warned;
456
 
   if (!warned) {
457
 
      fprintf(stderr, "ZINK: warning, this is cpu-based conditional rendering, say bye-bye to fps\n");
458
 
      warned = true;
459
 
   }
460
 
   if (!zink_check_conditional_render(ctx))
461
 
      return false;
462
 
   if (bs != ctx->batch.state) {
463
 
      bool prev = ctx->render_condition_active;
464
 
      ctx->render_condition_active = false;
465
 
      zink_select_draw_vbo(ctx);
466
 
      pctx->draw_vbo(pctx, dinfo, drawid_offset, dindirect, draws, num_draws);
467
 
      ctx->render_condition_active = prev;
468
 
      return false;
469
 
   }
470
 
   return true;
471
 
}
472
 
 
473
 
template <zink_multidraw HAS_MULTIDRAW, zink_dynamic_state DYNAMIC_STATE, bool BATCH_CHANGED, bool DRAW_STATE>
474
 
void
475
 
zink_draw(struct pipe_context *pctx,
476
 
          const struct pipe_draw_info *dinfo,
477
 
          unsigned drawid_offset,
478
 
          const struct pipe_draw_indirect_info *dindirect,
479
 
          const struct pipe_draw_start_count_bias *draws,
480
 
          unsigned num_draws,
481
 
          struct pipe_vertex_state *vstate,
482
 
          uint32_t partial_velem_mask)
483
 
{
484
 
   if (!dindirect && (!draws[0].count || !dinfo->instance_count))
485
 
      return;
486
 
 
487
 
   struct zink_context *ctx = zink_context(pctx);
488
 
   struct zink_screen *screen = zink_screen(pctx->screen);
489
 
   struct zink_rasterizer_state *rast_state = ctx->rast_state;
490
 
   struct zink_depth_stencil_alpha_state *dsa_state = ctx->dsa_state;
491
 
   struct zink_batch *batch = &ctx->batch;
492
 
   struct zink_so_target *so_target =
493
 
      dindirect && dindirect->count_from_stream_output ?
494
 
         zink_so_target(dindirect->count_from_stream_output) : NULL;
495
 
   VkBuffer counter_buffers[PIPE_MAX_SO_OUTPUTS];
496
 
   VkDeviceSize counter_buffer_offsets[PIPE_MAX_SO_OUTPUTS];
497
 
   bool need_index_buffer_unref = false;
498
 
   bool mode_changed = ctx->gfx_pipeline_state.gfx_prim_mode != dinfo->mode;
499
 
   bool reads_drawid = ctx->shader_reads_drawid;
500
 
   bool reads_basevertex = ctx->shader_reads_basevertex;
501
 
   unsigned work_count = ctx->batch.work_count;
502
 
   enum pipe_prim_type mode = (enum pipe_prim_type)dinfo->mode;
503
 
 
504
 
   if (unlikely(!screen->info.have_EXT_conditional_rendering)) {
505
 
      if (!hack_conditional_render(pctx, dinfo, drawid_offset, dindirect, draws, num_draws))
506
 
         return;
507
 
   }
508
 
 
509
 
   if (ctx->memory_barrier)
510
 
      zink_flush_memory_barrier(ctx, false);
511
 
 
512
 
   if (unlikely(ctx->buffer_rebind_counter < screen->buffer_rebind_counter)) {
513
 
      ctx->buffer_rebind_counter = screen->buffer_rebind_counter;
514
 
      zink_rebind_all_buffers(ctx);
515
 
   }
516
 
 
517
 
   unsigned index_offset = 0;
518
 
   unsigned index_size = dinfo->index_size;
519
 
   struct pipe_resource *index_buffer = NULL;
520
 
   if (index_size > 0) {
521
 
      if (dinfo->has_user_indices) {
522
 
         if (!util_upload_index_buffer(pctx, dinfo, &draws[0], &index_buffer, &index_offset, 4)) {
523
 
            debug_printf("util_upload_index_buffer() failed\n");
524
 
            return;
525
 
         }
526
 
         /* this will have extra refs from tc */
527
 
         if (screen->threaded)
528
 
            zink_batch_reference_resource_move(batch, zink_resource(index_buffer));
529
 
         else
530
 
            zink_batch_reference_resource(batch, zink_resource(index_buffer));
531
 
      } else {
532
 
         index_buffer = dinfo->index.resource;
533
 
         zink_batch_reference_resource_rw(batch, zink_resource(index_buffer), false);
534
 
      }
535
 
      assert(index_size <= 4 && index_size != 3);
536
 
      assert(index_size != 1 || screen->info.have_EXT_index_type_uint8);
537
 
   }
538
 
 
539
 
   bool have_streamout = !!ctx->num_so_targets;
540
 
   if (have_streamout) {
541
 
      zink_emit_xfb_counter_barrier(ctx);
542
 
      if (ctx->dirty_so_targets) {
543
 
         /* have to loop here and below because barriers must be emitted out of renderpass,
544
 
          * but xfb buffers can't be bound before the renderpass is active to avoid
545
 
          * breaking from recursion
546
 
          */
547
 
         for (unsigned i = 0; i < ctx->num_so_targets; i++) {
548
 
            struct zink_so_target *t = (struct zink_so_target *)ctx->so_targets[i];
549
 
            if (t)
550
 
               zink_resource_buffer_barrier(ctx, zink_resource(t->base.buffer),
551
 
                                            VK_ACCESS_TRANSFORM_FEEDBACK_WRITE_BIT_EXT, VK_PIPELINE_STAGE_TRANSFORM_FEEDBACK_BIT_EXT);
552
 
         }
553
 
      }
554
 
   }
555
 
 
556
 
   barrier_draw_buffers(ctx, dinfo, dindirect, index_buffer);
557
 
   /* this may re-emit draw buffer barriers, but such synchronization is harmless */
558
 
   update_barriers(ctx, false, index_buffer, dindirect ? dindirect->buffer : NULL, dindirect ? dindirect->indirect_draw_count : NULL);
559
 
 
560
 
   /* ensure synchronization between doing streamout with counter buffer
561
 
    * and using counter buffer for indirect draw
562
 
    */
563
 
   if (so_target && so_target->counter_buffer_valid)
564
 
      zink_resource_buffer_barrier(ctx, zink_resource(so_target->counter_buffer),
565
 
                                   VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_READ_BIT_EXT,
566
 
                                   VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT);
567
 
 
568
 
   zink_query_update_gs_states(ctx, dinfo->was_line_loop);
569
 
 
570
 
   zink_batch_rp(ctx);
571
 
   /* check dead swapchain */
572
 
   if (unlikely(!ctx->batch.in_rp))
573
 
      return;
574
 
 
575
 
   if (BATCH_CHANGED)
576
 
      zink_update_descriptor_refs(ctx, false);
577
 
 
578
 
   /* these must be after renderpass start to avoid issues with recursion */
579
 
   bool drawid_broken = false;
580
 
   if (reads_drawid && (!dindirect || !dindirect->buffer))
581
 
      drawid_broken = (drawid_offset != 0 ||
582
 
                      (!HAS_MULTIDRAW && num_draws > 1) ||
583
 
                      (HAS_MULTIDRAW && num_draws > 1 && !dinfo->increment_draw_id));
584
 
   if (drawid_broken != zink_get_last_vertex_key(ctx)->push_drawid)
585
 
      zink_set_last_vertex_key(ctx)->push_drawid = drawid_broken;
586
 
   if (mode_changed) {
587
 
      bool points_changed = false;
588
 
      if (mode == PIPE_PRIM_POINTS) {
589
 
         ctx->gfx_pipeline_state.has_points++;
590
 
         points_changed = true;
591
 
      } else if (ctx->gfx_pipeline_state.gfx_prim_mode == PIPE_PRIM_POINTS) {
592
 
         ctx->gfx_pipeline_state.has_points--;
593
 
         points_changed = true;
594
 
      }
595
 
      if (points_changed && ctx->rast_state->base.point_quad_rasterization)
596
 
         zink_set_fs_point_coord_key(ctx);
597
 
   }
598
 
   ctx->gfx_pipeline_state.gfx_prim_mode = mode;
599
 
 
600
 
   if (index_size) {
601
 
      const VkIndexType index_type[3] = {
602
 
         VK_INDEX_TYPE_UINT8_EXT,
603
 
         VK_INDEX_TYPE_UINT16,
604
 
         VK_INDEX_TYPE_UINT32,
605
 
      };
606
 
      struct zink_resource *res = zink_resource(index_buffer);
607
 
      VKCTX(CmdBindIndexBuffer)(batch->state->cmdbuf, res->obj->buffer, index_offset, index_type[index_size >> 1]);
608
 
   }
609
 
   if (DYNAMIC_STATE < ZINK_DYNAMIC_STATE2) {
610
 
      if (ctx->gfx_pipeline_state.dyn_state2.primitive_restart != dinfo->primitive_restart)
611
 
         ctx->gfx_pipeline_state.dirty = true;
612
 
      ctx->gfx_pipeline_state.dyn_state2.primitive_restart = dinfo->primitive_restart;
613
 
   }
614
 
 
615
 
   if (have_streamout && ctx->dirty_so_targets)
616
 
      zink_emit_stream_output_targets(pctx);
617
 
 
618
 
   bool pipeline_changed = false;
619
 
   if (DYNAMIC_STATE == ZINK_NO_DYNAMIC_STATE)
620
 
      pipeline_changed = update_gfx_pipeline<BATCH_CHANGED>(ctx, batch->state, mode);
621
 
 
622
 
   if (BATCH_CHANGED || ctx->vp_state_changed || (DYNAMIC_STATE == ZINK_NO_DYNAMIC_STATE && pipeline_changed)) {
623
 
      VkViewport viewports[PIPE_MAX_VIEWPORTS];
624
 
      for (unsigned i = 0; i < ctx->vp_state.num_viewports; i++) {
625
 
         VkViewport viewport = {
626
 
            ctx->vp_state.viewport_states[i].translate[0] - ctx->vp_state.viewport_states[i].scale[0],
627
 
            ctx->vp_state.viewport_states[i].translate[1] - ctx->vp_state.viewport_states[i].scale[1],
628
 
            MAX2(ctx->vp_state.viewport_states[i].scale[0] * 2, 1),
629
 
            ctx->vp_state.viewport_states[i].scale[1] * 2,
630
 
            ctx->rast_state->base.clip_halfz ?
631
 
               ctx->vp_state.viewport_states[i].translate[2] :
632
 
               ctx->vp_state.viewport_states[i].translate[2] - ctx->vp_state.viewport_states[i].scale[2],
633
 
            ctx->vp_state.viewport_states[i].translate[2] + ctx->vp_state.viewport_states[i].scale[2]
634
 
         };
635
 
         viewports[i] = viewport;
636
 
      }
637
 
      if (DYNAMIC_STATE != ZINK_NO_DYNAMIC_STATE)
638
 
         VKCTX(CmdSetViewportWithCountEXT)(batch->state->cmdbuf, ctx->vp_state.num_viewports, viewports);
639
 
      else
640
 
         VKCTX(CmdSetViewport)(batch->state->cmdbuf, 0, ctx->vp_state.num_viewports, viewports);
641
 
   }
642
 
   if (BATCH_CHANGED || ctx->scissor_changed || ctx->vp_state_changed || (DYNAMIC_STATE == ZINK_NO_DYNAMIC_STATE && pipeline_changed)) {
643
 
      VkRect2D scissors[PIPE_MAX_VIEWPORTS];
644
 
      if (ctx->rast_state->base.scissor) {
645
 
         for (unsigned i = 0; i < ctx->vp_state.num_viewports; i++) {
646
 
            scissors[i].offset.x = ctx->vp_state.scissor_states[i].minx;
647
 
            scissors[i].offset.y = ctx->vp_state.scissor_states[i].miny;
648
 
            scissors[i].extent.width = ctx->vp_state.scissor_states[i].maxx - ctx->vp_state.scissor_states[i].minx;
649
 
            scissors[i].extent.height = ctx->vp_state.scissor_states[i].maxy - ctx->vp_state.scissor_states[i].miny;
650
 
         }
651
 
      } else {
652
 
         for (unsigned i = 0; i < ctx->vp_state.num_viewports; i++) {
653
 
            scissors[i].offset.x = 0;
654
 
            scissors[i].offset.y = 0;
655
 
            scissors[i].extent.width = ctx->fb_state.width;
656
 
            scissors[i].extent.height = ctx->fb_state.height;
657
 
         }
658
 
      }
659
 
      if (DYNAMIC_STATE != ZINK_NO_DYNAMIC_STATE)
660
 
         VKCTX(CmdSetScissorWithCountEXT)(batch->state->cmdbuf, ctx->vp_state.num_viewports, scissors);
661
 
      else
662
 
         VKCTX(CmdSetScissor)(batch->state->cmdbuf, 0, ctx->vp_state.num_viewports, scissors);
663
 
   }
664
 
   ctx->vp_state_changed = false;
665
 
   ctx->scissor_changed = false;
666
 
 
667
 
   if (BATCH_CHANGED || ctx->stencil_ref_changed) {
668
 
      VKCTX(CmdSetStencilReference)(batch->state->cmdbuf, VK_STENCIL_FACE_FRONT_BIT,
669
 
                               ctx->stencil_ref.ref_value[0]);
670
 
      VKCTX(CmdSetStencilReference)(batch->state->cmdbuf, VK_STENCIL_FACE_BACK_BIT,
671
 
                               ctx->stencil_ref.ref_value[1]);
672
 
      ctx->stencil_ref_changed = false;
673
 
   }
674
 
 
675
 
   if (DYNAMIC_STATE != ZINK_NO_DYNAMIC_STATE && (BATCH_CHANGED || ctx->dsa_state_changed)) {
676
 
      VKCTX(CmdSetDepthBoundsTestEnableEXT)(batch->state->cmdbuf, dsa_state->hw_state.depth_bounds_test);
677
 
      if (dsa_state->hw_state.depth_bounds_test)
678
 
         VKCTX(CmdSetDepthBounds)(batch->state->cmdbuf,
679
 
                             dsa_state->hw_state.min_depth_bounds,
680
 
                             dsa_state->hw_state.max_depth_bounds);
681
 
      VKCTX(CmdSetDepthTestEnableEXT)(batch->state->cmdbuf, dsa_state->hw_state.depth_test);
682
 
      if (dsa_state->hw_state.depth_test)
683
 
         VKCTX(CmdSetDepthCompareOpEXT)(batch->state->cmdbuf, dsa_state->hw_state.depth_compare_op);
684
 
      VKCTX(CmdSetDepthWriteEnableEXT)(batch->state->cmdbuf, dsa_state->hw_state.depth_write);
685
 
      VKCTX(CmdSetStencilTestEnableEXT)(batch->state->cmdbuf, dsa_state->hw_state.stencil_test);
686
 
      if (dsa_state->hw_state.stencil_test) {
687
 
         VKCTX(CmdSetStencilOpEXT)(batch->state->cmdbuf, VK_STENCIL_FACE_FRONT_BIT,
688
 
                                       dsa_state->hw_state.stencil_front.failOp,
689
 
                                       dsa_state->hw_state.stencil_front.passOp,
690
 
                                       dsa_state->hw_state.stencil_front.depthFailOp,
691
 
                                       dsa_state->hw_state.stencil_front.compareOp);
692
 
         VKCTX(CmdSetStencilOpEXT)(batch->state->cmdbuf, VK_STENCIL_FACE_BACK_BIT,
693
 
                                       dsa_state->hw_state.stencil_back.failOp,
694
 
                                       dsa_state->hw_state.stencil_back.passOp,
695
 
                                       dsa_state->hw_state.stencil_back.depthFailOp,
696
 
                                       dsa_state->hw_state.stencil_back.compareOp);
697
 
         if (dsa_state->base.stencil[1].enabled) {
698
 
            VKCTX(CmdSetStencilWriteMask)(batch->state->cmdbuf, VK_STENCIL_FACE_FRONT_BIT, dsa_state->hw_state.stencil_front.writeMask);
699
 
            VKCTX(CmdSetStencilWriteMask)(batch->state->cmdbuf, VK_STENCIL_FACE_BACK_BIT, dsa_state->hw_state.stencil_back.writeMask);
700
 
            VKCTX(CmdSetStencilCompareMask)(batch->state->cmdbuf, VK_STENCIL_FACE_FRONT_BIT, dsa_state->hw_state.stencil_front.compareMask);
701
 
            VKCTX(CmdSetStencilCompareMask)(batch->state->cmdbuf, VK_STENCIL_FACE_BACK_BIT, dsa_state->hw_state.stencil_back.compareMask);
702
 
         } else {
703
 
            VKCTX(CmdSetStencilWriteMask)(batch->state->cmdbuf, VK_STENCIL_FACE_FRONT_AND_BACK, dsa_state->hw_state.stencil_front.writeMask);
704
 
            VKCTX(CmdSetStencilCompareMask)(batch->state->cmdbuf, VK_STENCIL_FACE_FRONT_AND_BACK, dsa_state->hw_state.stencil_front.compareMask);
705
 
         }
706
 
      } else {
707
 
         VKCTX(CmdSetStencilWriteMask)(batch->state->cmdbuf, VK_STENCIL_FACE_FRONT_AND_BACK, dsa_state->hw_state.stencil_front.writeMask);
708
 
         VKCTX(CmdSetStencilCompareMask)(batch->state->cmdbuf, VK_STENCIL_FACE_FRONT_AND_BACK, dsa_state->hw_state.stencil_front.compareMask);
709
 
      }
710
 
   }
711
 
   ctx->dsa_state_changed = false;
712
 
 
713
 
   bool rast_state_changed = ctx->rast_state_changed;
714
 
   if (DYNAMIC_STATE != ZINK_NO_DYNAMIC_STATE && (BATCH_CHANGED || rast_state_changed))
715
 
      VKCTX(CmdSetFrontFaceEXT)(batch->state->cmdbuf, ctx->gfx_pipeline_state.dyn_state1.front_face);
716
 
   if ((BATCH_CHANGED || rast_state_changed) &&
717
 
       screen->info.have_EXT_line_rasterization && rast_state->base.line_stipple_enable)
718
 
      VKCTX(CmdSetLineStippleEXT)(batch->state->cmdbuf, rast_state->base.line_stipple_factor, rast_state->base.line_stipple_pattern);
719
 
 
720
 
   if (BATCH_CHANGED || ctx->rast_state_changed || mode_changed) {
721
 
      enum pipe_prim_type reduced_prim = ctx->last_vertex_stage->reduced_prim;
722
 
      if (reduced_prim == PIPE_PRIM_MAX)
723
 
         reduced_prim = u_reduced_prim(mode);
724
 
 
725
 
      bool depth_bias = false;
726
 
      switch (reduced_prim) {
727
 
      case PIPE_PRIM_POINTS:
728
 
         depth_bias = rast_state->offset_point;
729
 
         break;
730
 
 
731
 
      case PIPE_PRIM_LINES:
732
 
         depth_bias = rast_state->offset_line;
733
 
         break;
734
 
 
735
 
      case PIPE_PRIM_TRIANGLES:
736
 
         depth_bias = rast_state->offset_tri;
737
 
         break;
738
 
 
739
 
      default:
740
 
         unreachable("unexpected reduced prim");
741
 
      }
742
 
 
743
 
      VKCTX(CmdSetLineWidth)(batch->state->cmdbuf, rast_state->line_width);
744
 
      if (depth_bias)
745
 
         VKCTX(CmdSetDepthBias)(batch->state->cmdbuf, rast_state->offset_units, rast_state->offset_clamp, rast_state->offset_scale);
746
 
      else
747
 
         VKCTX(CmdSetDepthBias)(batch->state->cmdbuf, 0.0f, 0.0f, 0.0f);
748
 
   }
749
 
   ctx->rast_state_changed = false;
750
 
 
751
 
   if (DYNAMIC_STATE != ZINK_NO_DYNAMIC_STATE) {
752
 
      if (ctx->sample_locations_changed) {
753
 
         VkSampleLocationsInfoEXT loc;
754
 
         zink_init_vk_sample_locations(ctx, &loc);
755
 
         VKCTX(CmdSetSampleLocationsEXT)(batch->state->cmdbuf, &loc);
756
 
      }
757
 
      ctx->sample_locations_changed = false;
758
 
   }
759
 
 
760
 
   if ((BATCH_CHANGED || ctx->blend_state_changed) &&
761
 
       ctx->gfx_pipeline_state.blend_state->need_blend_constants) {
762
 
      VKCTX(CmdSetBlendConstants)(batch->state->cmdbuf, ctx->blend_constants);
763
 
   }
764
 
   ctx->blend_state_changed = false;
765
 
 
766
 
   if (DRAW_STATE)
767
 
      zink_bind_vertex_state(batch, ctx, vstate, partial_velem_mask);
768
 
   else if (BATCH_CHANGED || ctx->vertex_buffers_dirty)
769
 
      zink_bind_vertex_buffers<DYNAMIC_STATE>(batch, ctx);
770
 
 
771
 
   if (BATCH_CHANGED) {
772
 
      ctx->pipeline_changed[0] = false;
773
 
      zink_select_draw_vbo(ctx);
774
 
   }
775
 
 
776
 
   if (DYNAMIC_STATE != ZINK_NO_DYNAMIC_STATE) {
777
 
      update_gfx_pipeline<BATCH_CHANGED>(ctx, batch->state, mode);
778
 
      if (BATCH_CHANGED || mode_changed)
779
 
         VKCTX(CmdSetPrimitiveTopologyEXT)(batch->state->cmdbuf, zink_primitive_topology(mode));
780
 
   }
781
 
 
782
 
   if (DYNAMIC_STATE >= ZINK_DYNAMIC_STATE2 && (BATCH_CHANGED || ctx->primitive_restart != dinfo->primitive_restart)) {
783
 
      VKCTX(CmdSetPrimitiveRestartEnableEXT)(batch->state->cmdbuf, dinfo->primitive_restart);
784
 
      ctx->primitive_restart = dinfo->primitive_restart;
785
 
   }
786
 
 
787
 
   if (DYNAMIC_STATE >= ZINK_DYNAMIC_STATE2 && (BATCH_CHANGED || ctx->rasterizer_discard_changed)) {
788
 
      VKCTX(CmdSetRasterizerDiscardEnableEXT)(batch->state->cmdbuf, ctx->gfx_pipeline_state.dyn_state2.rasterizer_discard);
789
 
      ctx->rasterizer_discard_changed = false;
790
 
   }
791
 
 
792
 
   if (zink_program_has_descriptors(&ctx->curr_program->base))
793
 
      screen->descriptors_update(ctx, false);
794
 
 
795
 
   if (ctx->di.any_bindless_dirty && ctx->curr_program->base.dd->bindless)
796
 
      zink_descriptors_update_bindless(ctx);
797
 
 
798
 
   if (reads_basevertex) {
799
 
      unsigned draw_mode_is_indexed = index_size > 0;
800
 
      VKCTX(CmdPushConstants)(batch->state->cmdbuf, ctx->curr_program->base.layout, VK_SHADER_STAGE_VERTEX_BIT,
801
 
                         offsetof(struct zink_gfx_push_constant, draw_mode_is_indexed), sizeof(unsigned),
802
 
                         &draw_mode_is_indexed);
803
 
   }
804
 
   if (ctx->curr_program->shaders[PIPE_SHADER_TESS_CTRL] && ctx->curr_program->shaders[PIPE_SHADER_TESS_CTRL]->is_generated) {
805
 
      VKCTX(CmdPushConstants)(batch->state->cmdbuf, ctx->curr_program->base.layout, VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT,
806
 
                         offsetof(struct zink_gfx_push_constant, default_inner_level), sizeof(float) * 6,
807
 
                         &ctx->tess_levels[0]);
808
 
   }
809
 
 
810
 
   if (have_streamout) {
811
 
      for (unsigned i = 0; i < ctx->num_so_targets; i++) {
812
 
         struct zink_so_target *t = zink_so_target(ctx->so_targets[i]);
813
 
         counter_buffers[i] = VK_NULL_HANDLE;
814
 
         if (t) {
815
 
            struct zink_resource *res = zink_resource(t->counter_buffer);
816
 
            t->stride = ctx->last_vertex_stage->sinfo.so_info.stride[i] * sizeof(uint32_t);
817
 
            zink_batch_reference_resource_rw(batch, res, true);
818
 
            if (t->counter_buffer_valid) {
819
 
               counter_buffers[i] = res->obj->buffer;
820
 
               counter_buffer_offsets[i] = t->counter_buffer_offset;
821
 
            }
822
 
         }
823
 
      }
824
 
      VKCTX(CmdBeginTransformFeedbackEXT)(batch->state->cmdbuf, 0, ctx->num_so_targets, counter_buffers, counter_buffer_offsets);
825
 
   }
826
 
 
827
 
   bool needs_drawid = reads_drawid && zink_get_last_vertex_key(ctx)->push_drawid;
828
 
   work_count += num_draws;
829
 
   if (index_size > 0) {
830
 
      if (dindirect && dindirect->buffer) {
831
 
         assert(num_draws == 1);
832
 
         if (needs_drawid)
833
 
            update_drawid(ctx, drawid_offset);
834
 
         struct zink_resource *indirect = zink_resource(dindirect->buffer);
835
 
         zink_batch_reference_resource_rw(batch, indirect, false);
836
 
         if (dindirect->indirect_draw_count) {
837
 
             struct zink_resource *indirect_draw_count = zink_resource(dindirect->indirect_draw_count);
838
 
             zink_batch_reference_resource_rw(batch, indirect_draw_count, false);
839
 
             VKCTX(CmdDrawIndexedIndirectCount)(batch->state->cmdbuf, indirect->obj->buffer, dindirect->offset,
840
 
                                                indirect_draw_count->obj->buffer, dindirect->indirect_draw_count_offset,
841
 
                                                dindirect->draw_count, dindirect->stride);
842
 
         } else
843
 
            VKCTX(CmdDrawIndexedIndirect)(batch->state->cmdbuf, indirect->obj->buffer, dindirect->offset, dindirect->draw_count, dindirect->stride);
844
 
      } else {
845
 
         if (need_index_buffer_unref)
846
 
            draw_indexed_need_index_buffer_unref(ctx, dinfo, draws, num_draws, drawid_offset, needs_drawid);
847
 
         else
848
 
            draw_indexed<HAS_MULTIDRAW>(ctx, dinfo, draws, num_draws, drawid_offset, needs_drawid);
849
 
      }
850
 
   } else {
851
 
      if (so_target && screen->info.tf_props.transformFeedbackDraw) {
852
 
         /* GTF-GL46.gtf40.GL3Tests.transform_feedback2.transform_feedback2_api attempts a bogus xfb
853
 
          * draw using a streamout target that has no data
854
 
          * to avoid hanging the gpu, reject any such draws
855
 
          */
856
 
         if (so_target->counter_buffer_valid) {
857
 
            if (needs_drawid)
858
 
               update_drawid(ctx, drawid_offset);
859
 
            zink_batch_reference_resource_rw(batch, zink_resource(so_target->base.buffer), false);
860
 
            zink_batch_reference_resource_rw(batch, zink_resource(so_target->counter_buffer), true);
861
 
            VKCTX(CmdDrawIndirectByteCountEXT)(batch->state->cmdbuf, dinfo->instance_count, dinfo->start_instance,
862
 
                                          zink_resource(so_target->counter_buffer)->obj->buffer, so_target->counter_buffer_offset, 0,
863
 
                                          MIN2(so_target->stride, screen->info.tf_props.maxTransformFeedbackBufferDataStride));
864
 
         }
865
 
      } else if (dindirect && dindirect->buffer) {
866
 
         assert(num_draws == 1);
867
 
         if (needs_drawid)
868
 
            update_drawid(ctx, drawid_offset);
869
 
         struct zink_resource *indirect = zink_resource(dindirect->buffer);
870
 
         zink_batch_reference_resource_rw(batch, indirect, false);
871
 
         if (dindirect->indirect_draw_count) {
872
 
             struct zink_resource *indirect_draw_count = zink_resource(dindirect->indirect_draw_count);
873
 
             zink_batch_reference_resource_rw(batch, indirect_draw_count, false);
874
 
             VKCTX(CmdDrawIndirectCount)(batch->state->cmdbuf, indirect->obj->buffer, dindirect->offset,
875
 
                                           indirect_draw_count->obj->buffer, dindirect->indirect_draw_count_offset,
876
 
                                           dindirect->draw_count, dindirect->stride);
877
 
         } else
878
 
            VKCTX(CmdDrawIndirect)(batch->state->cmdbuf, indirect->obj->buffer, dindirect->offset, dindirect->draw_count, dindirect->stride);
879
 
      } else {
880
 
         draw<HAS_MULTIDRAW>(ctx, dinfo, draws, num_draws, drawid_offset, needs_drawid);
881
 
      }
882
 
   }
883
 
 
884
 
   if (have_streamout) {
885
 
      for (unsigned i = 0; i < ctx->num_so_targets; i++) {
886
 
         struct zink_so_target *t = zink_so_target(ctx->so_targets[i]);
887
 
         if (t) {
888
 
            counter_buffers[i] = zink_resource(t->counter_buffer)->obj->buffer;
889
 
            counter_buffer_offsets[i] = t->counter_buffer_offset;
890
 
            t->counter_buffer_valid = true;
891
 
         }
892
 
      }
893
 
      VKCTX(CmdEndTransformFeedbackEXT)(batch->state->cmdbuf, 0, ctx->num_so_targets, counter_buffers, counter_buffer_offsets);
894
 
   }
895
 
   batch->has_work = true;
896
 
   batch->last_was_compute = false;
897
 
   ctx->batch.work_count = work_count;
898
 
   /* flush if there's >100k draws */
899
 
   if (unlikely(work_count >= 30000) || ctx->oom_flush)
900
 
      pctx->flush(pctx, NULL, 0);
901
 
}
902
 
 
903
 
template <zink_multidraw HAS_MULTIDRAW, zink_dynamic_state DYNAMIC_STATE, bool BATCH_CHANGED>
904
 
static void
905
 
zink_draw_vbo(struct pipe_context *pctx,
906
 
              const struct pipe_draw_info *info,
907
 
              unsigned drawid_offset,
908
 
              const struct pipe_draw_indirect_info *indirect,
909
 
              const struct pipe_draw_start_count_bias *draws,
910
 
              unsigned num_draws)
911
 
{
912
 
   zink_draw<HAS_MULTIDRAW, DYNAMIC_STATE, BATCH_CHANGED, false>(pctx, info, drawid_offset, indirect, draws, num_draws, NULL, 0);
913
 
}
914
 
 
915
 
template <zink_multidraw HAS_MULTIDRAW, zink_dynamic_state DYNAMIC_STATE, bool BATCH_CHANGED>
916
 
static void
917
 
zink_draw_vertex_state(struct pipe_context *pctx,
918
 
                       struct pipe_vertex_state *vstate,
919
 
                       uint32_t partial_velem_mask,
920
 
                       struct pipe_draw_vertex_state_info info,
921
 
                       const struct pipe_draw_start_count_bias *draws,
922
 
                       unsigned num_draws)
923
 
{
924
 
   struct pipe_draw_info dinfo = {};
925
 
 
926
 
   dinfo.mode = info.mode;
927
 
   dinfo.index_size = 4;
928
 
   dinfo.instance_count = 1;
929
 
   dinfo.index.resource = vstate->input.indexbuf;
930
 
   struct zink_context *ctx = zink_context(pctx);
931
 
   struct zink_resource *res = zink_resource(vstate->input.vbuffer.buffer.resource);
932
 
   zink_resource_buffer_barrier(ctx, res, VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT,
933
 
                                VK_PIPELINE_STAGE_VERTEX_INPUT_BIT);
934
 
   struct zink_vertex_elements_hw_state *hw_state = ctx->gfx_pipeline_state.element_state;
935
 
   ctx->gfx_pipeline_state.element_state = &((struct zink_vertex_state*)vstate)->velems.hw_state;
936
 
 
937
 
   zink_draw<HAS_MULTIDRAW, DYNAMIC_STATE, BATCH_CHANGED, true>(pctx, &dinfo, 0, NULL, draws, num_draws, vstate, partial_velem_mask);
938
 
   ctx->gfx_pipeline_state.element_state = hw_state;
939
 
 
940
 
   if (info.take_vertex_state_ownership)
941
 
      pipe_vertex_state_reference(&vstate, NULL);
942
 
}
943
 
 
944
 
template <bool BATCH_CHANGED>
945
 
static void
946
 
zink_launch_grid(struct pipe_context *pctx, const struct pipe_grid_info *info)
947
 
{
948
 
   struct zink_context *ctx = zink_context(pctx);
949
 
   struct zink_screen *screen = zink_screen(pctx->screen);
950
 
   struct zink_batch *batch = &ctx->batch;
951
 
 
952
 
   if (ctx->render_condition_active)
953
 
      zink_start_conditional_render(ctx);
954
 
 
955
 
   if (info->indirect) {
956
 
      /*
957
 
         VK_ACCESS_INDIRECT_COMMAND_READ_BIT specifies read access to indirect command data read as
958
 
         part of an indirect build, trace, drawing or dispatching command. Such access occurs in the
959
 
         VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT pipeline stage.
960
 
 
961
 
         - Chapter 7. Synchronization and Cache Control
962
 
       */
963
 
      check_buffer_barrier(ctx, info->indirect, VK_ACCESS_INDIRECT_COMMAND_READ_BIT, VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT);
964
 
   }
965
 
 
966
 
   update_barriers(ctx, true, NULL, info->indirect, NULL);
967
 
   if (ctx->memory_barrier)
968
 
      zink_flush_memory_barrier(ctx, true);
969
 
 
970
 
   if (zink_program_has_descriptors(&ctx->curr_compute->base))
971
 
      screen->descriptors_update(ctx, true);
972
 
   if (ctx->di.any_bindless_dirty && ctx->curr_compute->base.dd->bindless)
973
 
      zink_descriptors_update_bindless(ctx);
974
 
 
975
 
   zink_program_update_compute_pipeline_state(ctx, ctx->curr_compute, info->block);
976
 
   VkPipeline prev_pipeline = ctx->compute_pipeline_state.pipeline;
977
 
 
978
 
   if (BATCH_CHANGED) {
979
 
      zink_update_descriptor_refs(ctx, true);
980
 
      zink_batch_reference_program(&ctx->batch, &ctx->curr_compute->base);
981
 
   }
982
 
   if (ctx->dirty_shader_stages & BITFIELD_BIT(PIPE_SHADER_COMPUTE)) {
983
 
      /* update inlinable constants */
984
 
      zink_update_compute_program(ctx);
985
 
      ctx->dirty_shader_stages &= ~BITFIELD_BIT(PIPE_SHADER_COMPUTE);
986
 
   }
987
 
 
988
 
   VkPipeline pipeline = zink_get_compute_pipeline(screen, ctx->curr_compute,
989
 
                                               &ctx->compute_pipeline_state);
990
 
 
991
 
   if (prev_pipeline != pipeline || BATCH_CHANGED)
992
 
      VKCTX(CmdBindPipeline)(batch->state->cmdbuf, VK_PIPELINE_BIND_POINT_COMPUTE, pipeline);
993
 
   if (BATCH_CHANGED) {
994
 
      ctx->pipeline_changed[1] = false;
995
 
      zink_select_launch_grid(ctx);
996
 
   }
997
 
 
998
 
   if (BITSET_TEST(ctx->compute_stage->nir->info.system_values_read, SYSTEM_VALUE_WORK_DIM))
999
 
      VKCTX(CmdPushConstants)(batch->state->cmdbuf, ctx->curr_compute->base.layout, VK_SHADER_STAGE_COMPUTE_BIT,
1000
 
                         offsetof(struct zink_cs_push_constant, work_dim), sizeof(uint32_t),
1001
 
                         &info->work_dim);
1002
 
 
1003
 
   batch->work_count++;
1004
 
   zink_batch_no_rp(ctx);
1005
 
   if (info->indirect) {
1006
 
      VKCTX(CmdDispatchIndirect)(batch->state->cmdbuf, zink_resource(info->indirect)->obj->buffer, info->indirect_offset);
1007
 
      zink_batch_reference_resource_rw(batch, zink_resource(info->indirect), false);
1008
 
   } else
1009
 
      VKCTX(CmdDispatch)(batch->state->cmdbuf, info->grid[0], info->grid[1], info->grid[2]);
1010
 
   batch->has_work = true;
1011
 
   batch->last_was_compute = true;
1012
 
   /* flush if there's >100k computes */
1013
 
   if (unlikely(ctx->batch.work_count >= 30000) || ctx->oom_flush)
1014
 
      pctx->flush(pctx, NULL, 0);
1015
 
}
1016
 
 
1017
 
template <zink_multidraw HAS_MULTIDRAW, zink_dynamic_state DYNAMIC_STATE, bool BATCH_CHANGED>
1018
 
static void
1019
 
init_batch_changed_functions(struct zink_context *ctx, pipe_draw_vbo_func draw_vbo_array[2][4][2], pipe_draw_vertex_state_func draw_state_array[2][4][2])
1020
 
{
1021
 
   draw_vbo_array[HAS_MULTIDRAW][DYNAMIC_STATE][BATCH_CHANGED] = zink_draw_vbo<HAS_MULTIDRAW, DYNAMIC_STATE, BATCH_CHANGED>;
1022
 
   draw_state_array[HAS_MULTIDRAW][DYNAMIC_STATE][BATCH_CHANGED] = zink_draw_vertex_state<HAS_MULTIDRAW, DYNAMIC_STATE, BATCH_CHANGED>;
1023
 
}
1024
 
 
1025
 
template <zink_multidraw HAS_MULTIDRAW, zink_dynamic_state DYNAMIC_STATE>
1026
 
static void
1027
 
init_dynamic_state_functions(struct zink_context *ctx, pipe_draw_vbo_func draw_vbo_array[2][4][2], pipe_draw_vertex_state_func draw_state_array[2][4][2])
1028
 
{
1029
 
   init_batch_changed_functions<HAS_MULTIDRAW, DYNAMIC_STATE, false>(ctx, draw_vbo_array, draw_state_array);
1030
 
   init_batch_changed_functions<HAS_MULTIDRAW, DYNAMIC_STATE, true>(ctx, draw_vbo_array, draw_state_array);
1031
 
}
1032
 
 
1033
 
template <zink_multidraw HAS_MULTIDRAW>
1034
 
static void
1035
 
init_multidraw_functions(struct zink_context *ctx, pipe_draw_vbo_func draw_vbo_array[2][4][2], pipe_draw_vertex_state_func draw_state_array[2][4][2])
1036
 
{
1037
 
   init_dynamic_state_functions<HAS_MULTIDRAW, ZINK_NO_DYNAMIC_STATE>(ctx, draw_vbo_array, draw_state_array);
1038
 
   init_dynamic_state_functions<HAS_MULTIDRAW, ZINK_DYNAMIC_STATE>(ctx, draw_vbo_array, draw_state_array);
1039
 
   init_dynamic_state_functions<HAS_MULTIDRAW, ZINK_DYNAMIC_STATE2>(ctx, draw_vbo_array, draw_state_array);
1040
 
   init_dynamic_state_functions<HAS_MULTIDRAW, ZINK_DYNAMIC_VERTEX_INPUT>(ctx, draw_vbo_array, draw_state_array);
1041
 
}
1042
 
 
1043
 
static void
1044
 
init_all_draw_functions(struct zink_context *ctx, pipe_draw_vbo_func draw_vbo_array[2][4][2], pipe_draw_vertex_state_func draw_state_array[2][4][2])
1045
 
{
1046
 
   init_multidraw_functions<ZINK_NO_MULTIDRAW>(ctx, draw_vbo_array, draw_state_array);
1047
 
   init_multidraw_functions<ZINK_MULTIDRAW>(ctx, draw_vbo_array, draw_state_array);
1048
 
}
1049
 
 
1050
 
template <bool BATCH_CHANGED>
1051
 
static void
1052
 
init_grid_batch_changed_functions(struct zink_context *ctx)
1053
 
{
1054
 
   ctx->launch_grid[BATCH_CHANGED] = zink_launch_grid<BATCH_CHANGED>;
1055
 
}
1056
 
 
1057
 
static void
1058
 
init_all_grid_functions(struct zink_context *ctx)
1059
 
{
1060
 
   init_grid_batch_changed_functions<false>(ctx);
1061
 
   init_grid_batch_changed_functions<true>(ctx);
1062
 
}
1063
 
 
1064
 
static void
1065
 
zink_invalid_draw_vbo(struct pipe_context *pipe,
1066
 
                      const struct pipe_draw_info *dinfo,
1067
 
                      unsigned drawid_offset,
1068
 
                      const struct pipe_draw_indirect_info *dindirect,
1069
 
                      const struct pipe_draw_start_count_bias *draws,
1070
 
                      unsigned num_draws)
1071
 
{
1072
 
   unreachable("vertex shader not bound");
1073
 
}
1074
 
 
1075
 
static void
1076
 
zink_invalid_draw_vertex_state(struct pipe_context *pipe,
1077
 
                               struct pipe_vertex_state *vstate,
1078
 
                               uint32_t partial_velem_mask,
1079
 
                               struct pipe_draw_vertex_state_info info,
1080
 
                               const struct pipe_draw_start_count_bias *draws,
1081
 
                               unsigned num_draws)
1082
 
{
1083
 
   unreachable("vertex shader not bound");
1084
 
}
1085
 
 
1086
 
static void
1087
 
zink_invalid_launch_grid(struct pipe_context *pctx, const struct pipe_grid_info *info)
1088
 
{
1089
 
   unreachable("compute shader not bound");
1090
 
}
1091
 
 
1092
 
template <unsigned STAGE_MASK>
1093
 
static uint32_t
1094
 
hash_gfx_program(const void *key)
1095
 
{
1096
 
   const struct zink_shader **shaders = (const struct zink_shader**)key;
1097
 
   uint32_t base_hash = shaders[PIPE_SHADER_VERTEX]->hash ^ shaders[PIPE_SHADER_FRAGMENT]->hash;
1098
 
   if (STAGE_MASK == 0) //VS+FS
1099
 
      return base_hash;
1100
 
   if (STAGE_MASK == 1) //VS+GS+FS
1101
 
      return base_hash ^ shaders[PIPE_SHADER_GEOMETRY]->hash;
1102
 
   /*VS+TCS+FS isn't a thing */
1103
 
   /*VS+TCS+GS+FS isn't a thing */
1104
 
   if (STAGE_MASK == 4) //VS+TES+FS
1105
 
      return base_hash ^ shaders[PIPE_SHADER_TESS_EVAL]->hash;
1106
 
   if (STAGE_MASK == 5) //VS+TES+GS+FS
1107
 
      return base_hash ^ shaders[PIPE_SHADER_GEOMETRY]->hash ^ shaders[PIPE_SHADER_TESS_EVAL]->hash;
1108
 
   if (STAGE_MASK == 6) //VS+TCS+TES+FS
1109
 
      return base_hash ^ shaders[PIPE_SHADER_TESS_CTRL]->hash ^ shaders[PIPE_SHADER_TESS_EVAL]->hash;
1110
 
 
1111
 
   /* all stages */
1112
 
   return base_hash ^ shaders[PIPE_SHADER_GEOMETRY]->hash ^ shaders[PIPE_SHADER_TESS_CTRL]->hash ^ shaders[PIPE_SHADER_TESS_EVAL]->hash;
1113
 
}
1114
 
 
1115
 
template <unsigned STAGE_MASK>
1116
 
static bool
1117
 
equals_gfx_program(const void *a, const void *b)
1118
 
{
1119
 
   const void **sa = (const void**)a;
1120
 
   const void **sb = (const void**)b;
1121
 
   if (STAGE_MASK == 0) //VS+FS
1122
 
      return !memcmp(a, b, sizeof(void*) * 2);
1123
 
   if (STAGE_MASK == 1) //VS+GS+FS
1124
 
      return !memcmp(a, b, sizeof(void*) * 3);
1125
 
   /*VS+TCS+FS isn't a thing */
1126
 
   /*VS+TCS+GS+FS isn't a thing */
1127
 
   if (STAGE_MASK == 4) //VS+TES+FS
1128
 
      return sa[PIPE_SHADER_TESS_EVAL] == sb[PIPE_SHADER_TESS_EVAL] && !memcmp(a, b, sizeof(void*) * 2);
1129
 
   if (STAGE_MASK == 5) //VS+TES+GS+FS
1130
 
      return sa[PIPE_SHADER_TESS_EVAL] == sb[PIPE_SHADER_TESS_EVAL] && !memcmp(a, b, sizeof(void*) * 3);
1131
 
   if (STAGE_MASK == 6) //VS+TCS+TES+FS
1132
 
      return !memcmp(&sa[PIPE_SHADER_TESS_CTRL], &sb[PIPE_SHADER_TESS_CTRL], sizeof(void*) * 2) &&
1133
 
             !memcmp(a, b, sizeof(void*) * 2);
1134
 
 
1135
 
   /* all stages */
1136
 
   return !memcmp(a, b, sizeof(void*) * ZINK_SHADER_COUNT);
1137
 
}
1138
 
 
1139
 
extern "C"
1140
 
void
1141
 
zink_init_draw_functions(struct zink_context *ctx, struct zink_screen *screen)
1142
 
{
1143
 
   pipe_draw_vbo_func draw_vbo_array[2][4] //multidraw, zink_dynamic_state
1144
 
                                    [2];   //batch changed
1145
 
   pipe_draw_vertex_state_func draw_state_array[2][4] //multidraw, zink_dynamic_state
1146
 
                                               [2];   //batch changed
1147
 
   zink_dynamic_state dynamic;
1148
 
   if (screen->info.have_EXT_extended_dynamic_state) {
1149
 
      if (screen->info.have_EXT_extended_dynamic_state2) {
1150
 
         if (screen->info.have_EXT_vertex_input_dynamic_state)
1151
 
            dynamic = ZINK_DYNAMIC_VERTEX_INPUT;
1152
 
         else
1153
 
            dynamic = ZINK_DYNAMIC_STATE2;
1154
 
      } else {
1155
 
         dynamic = ZINK_DYNAMIC_STATE;
1156
 
      }
1157
 
   } else {
1158
 
      dynamic = ZINK_NO_DYNAMIC_STATE;
1159
 
   }
1160
 
   init_all_draw_functions(ctx, draw_vbo_array, draw_state_array);
1161
 
   memcpy(ctx->draw_vbo, &draw_vbo_array[screen->info.have_EXT_multi_draw]
1162
 
                                        [dynamic],
1163
 
                                        sizeof(ctx->draw_vbo));
1164
 
   memcpy(ctx->draw_state, &draw_state_array[screen->info.have_EXT_multi_draw]
1165
 
                                          [dynamic],
1166
 
                                          sizeof(ctx->draw_state));
1167
 
 
1168
 
   /* Bind a fake draw_vbo, so that draw_vbo isn't NULL, which would skip
1169
 
    * initialization of callbacks in upper layers (such as u_threaded_context).
1170
 
    */
1171
 
   ctx->base.draw_vbo = zink_invalid_draw_vbo;
1172
 
   ctx->base.draw_vertex_state = zink_invalid_draw_vertex_state;
1173
 
 
1174
 
   _mesa_hash_table_init(&ctx->program_cache[0], ctx, hash_gfx_program<0>, equals_gfx_program<0>);
1175
 
   _mesa_hash_table_init(&ctx->program_cache[1], ctx, hash_gfx_program<1>, equals_gfx_program<1>);
1176
 
   _mesa_hash_table_init(&ctx->program_cache[2], ctx, hash_gfx_program<2>, equals_gfx_program<2>);
1177
 
   _mesa_hash_table_init(&ctx->program_cache[3], ctx, hash_gfx_program<3>, equals_gfx_program<3>);
1178
 
   _mesa_hash_table_init(&ctx->program_cache[4], ctx, hash_gfx_program<4>, equals_gfx_program<4>);
1179
 
   _mesa_hash_table_init(&ctx->program_cache[5], ctx, hash_gfx_program<5>, equals_gfx_program<5>);
1180
 
   _mesa_hash_table_init(&ctx->program_cache[6], ctx, hash_gfx_program<6>, equals_gfx_program<6>);
1181
 
   _mesa_hash_table_init(&ctx->program_cache[7], ctx, hash_gfx_program<7>, equals_gfx_program<7>);
1182
 
}
1183
 
 
1184
 
void
1185
 
zink_init_grid_functions(struct zink_context *ctx)
1186
 
{
1187
 
   init_all_grid_functions(ctx);
1188
 
   /* Bind a fake launch_grid, so that draw_vbo isn't NULL, which would skip
1189
 
    * initialization of callbacks in upper layers (such as u_threaded_context).
1190
 
    */
1191
 
   ctx->base.launch_grid = zink_invalid_launch_grid;
1192
 
}