2
* Copyright © 2018 Intel Corporation
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
* and/or sell copies of the Software, and to permit persons to whom the
9
* Software is furnished to do so, subject to the following conditions:
11
* The above copyright notice and this permission notice (including the next
12
* paragraph) shall be included in all copies or substantial portions of the
15
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
27
#include "gl_nir_linker.h"
28
#include "linker_util.h"
29
#include "main/shader_types.h"
30
#include "main/consts_exts.h"
31
#include "main/shaderobj.h"
32
#include "ir_uniform.h" /* for gl_uniform_storage */
35
* This file included general link methods, using NIR, instead of IR as
36
* the counter-part glsl/linker.cpp
40
gl_nir_opts(nir_shader *nir)
47
NIR_PASS_V(nir, nir_lower_vars_to_ssa);
49
/* Linking deals with unused inputs/outputs, but here we can remove
50
* things local to the shader in the hopes that we can cleanup other
51
* things. This pass will also remove variables with only stores, so we
52
* might be able to make progress after it.
54
NIR_PASS(progress, nir, nir_remove_dead_variables,
55
nir_var_function_temp | nir_var_shader_temp |
59
NIR_PASS(progress, nir, nir_opt_copy_prop_vars);
60
NIR_PASS(progress, nir, nir_opt_dead_write_vars);
62
if (nir->options->lower_to_scalar) {
63
NIR_PASS_V(nir, nir_lower_alu_to_scalar,
64
nir->options->lower_to_scalar_filter, NULL);
65
NIR_PASS_V(nir, nir_lower_phis_to_scalar, false);
68
NIR_PASS_V(nir, nir_lower_alu);
69
NIR_PASS_V(nir, nir_lower_pack);
70
NIR_PASS(progress, nir, nir_copy_prop);
71
NIR_PASS(progress, nir, nir_opt_remove_phis);
72
NIR_PASS(progress, nir, nir_opt_dce);
73
if (nir_opt_trivial_continues(nir)) {
75
NIR_PASS(progress, nir, nir_copy_prop);
76
NIR_PASS(progress, nir, nir_opt_dce);
78
NIR_PASS(progress, nir, nir_opt_if, false);
79
NIR_PASS(progress, nir, nir_opt_dead_cf);
80
NIR_PASS(progress, nir, nir_opt_cse);
81
NIR_PASS(progress, nir, nir_opt_peephole_select, 8, true, true);
83
NIR_PASS(progress, nir, nir_opt_phi_precision);
84
NIR_PASS(progress, nir, nir_opt_algebraic);
85
NIR_PASS(progress, nir, nir_opt_constant_folding);
87
if (!nir->info.flrp_lowered) {
89
(nir->options->lower_flrp16 ? 16 : 0) |
90
(nir->options->lower_flrp32 ? 32 : 0) |
91
(nir->options->lower_flrp64 ? 64 : 0);
94
bool lower_flrp_progress = false;
96
NIR_PASS(lower_flrp_progress, nir, nir_lower_flrp,
98
false /* always_precise */);
99
if (lower_flrp_progress) {
100
NIR_PASS(progress, nir,
101
nir_opt_constant_folding);
106
/* Nothing should rematerialize any flrps, so we only need to do this
109
nir->info.flrp_lowered = true;
112
NIR_PASS(progress, nir, nir_opt_undef);
113
NIR_PASS(progress, nir, nir_opt_conditional_discard);
114
if (nir->options->max_unroll_iterations) {
115
NIR_PASS(progress, nir, nir_opt_loop_unroll);
121
can_remove_uniform(nir_variable *var, UNUSED void *data)
123
/* Section 2.11.6 (Uniform Variables) of the OpenGL ES 3.0.3 spec
126
* "All members of a named uniform block declared with a shared or
127
* std140 layout qualifier are considered active, even if they are not
128
* referenced in any shader in the program. The uniform block itself is
129
* also considered active, even if no member of the block is
132
* Although the spec doesn't state it std430 layouts are expect to behave
133
* the same way. If the variable is in a uniform block with one of those
134
* layouts, do not eliminate it.
136
if (nir_variable_is_in_block(var) &&
137
(glsl_get_ifc_packing(var->interface_type) !=
138
GLSL_INTERFACE_PACKING_PACKED))
141
if (glsl_get_base_type(glsl_without_array(var->type)) ==
142
GLSL_TYPE_SUBROUTINE)
145
/* Uniform initializers could get used by another stage */
146
if (var->constant_initializer)
153
* Built-in / reserved GL variables names start with "gl_"
156
is_gl_identifier(const char *s)
158
return s && s[0] == 'g' && s[1] == 'l' && s[2] == '_';
162
inout_has_same_location(const nir_variable *var, unsigned stage)
164
if (!var->data.patch &&
165
((var->data.mode == nir_var_shader_out &&
166
stage == MESA_SHADER_TESS_CTRL) ||
167
(var->data.mode == nir_var_shader_in &&
168
(stage == MESA_SHADER_TESS_CTRL || stage == MESA_SHADER_TESS_EVAL ||
169
stage == MESA_SHADER_GEOMETRY))))
176
* Create gl_shader_variable from nir_variable.
178
static struct gl_shader_variable *
179
create_shader_variable(struct gl_shader_program *shProg,
180
const nir_variable *in,
181
const char *name, const struct glsl_type *type,
182
const struct glsl_type *interface_type,
183
bool use_implicit_location, int location,
184
const struct glsl_type *outermost_struct_type)
186
/* Allocate zero-initialized memory to ensure that bitfield padding
189
struct gl_shader_variable *out = rzalloc(shProg,
190
struct gl_shader_variable);
194
/* Since gl_VertexID may be lowered to gl_VertexIDMESA, but applications
195
* expect to see gl_VertexID in the program resource list. Pretend.
197
if (in->data.mode == nir_var_system_value &&
198
in->data.location == SYSTEM_VALUE_VERTEX_ID_ZERO_BASE) {
199
out->name.string = ralloc_strdup(shProg, "gl_VertexID");
200
} else if ((in->data.mode == nir_var_shader_out &&
201
in->data.location == VARYING_SLOT_TESS_LEVEL_OUTER) ||
202
(in->data.mode == nir_var_system_value &&
203
in->data.location == SYSTEM_VALUE_TESS_LEVEL_OUTER)) {
204
out->name.string = ralloc_strdup(shProg, "gl_TessLevelOuter");
205
type = glsl_array_type(glsl_float_type(), 4, 0);
206
} else if ((in->data.mode == nir_var_shader_out &&
207
in->data.location == VARYING_SLOT_TESS_LEVEL_INNER) ||
208
(in->data.mode == nir_var_system_value &&
209
in->data.location == SYSTEM_VALUE_TESS_LEVEL_INNER)) {
210
out->name.string = ralloc_strdup(shProg, "gl_TessLevelInner");
211
type = glsl_array_type(glsl_float_type(), 2, 0);
213
out->name.string = ralloc_strdup(shProg, name);
216
resource_name_updated(&out->name);
218
if (!out->name.string)
221
/* The ARB_program_interface_query spec says:
223
* "Not all active variables are assigned valid locations; the
224
* following variables will have an effective location of -1:
226
* * uniforms declared as atomic counters;
228
* * members of a uniform block;
230
* * built-in inputs, outputs, and uniforms (starting with "gl_"); and
232
* * inputs or outputs not declared with a "location" layout
233
* qualifier, except for vertex shader inputs and fragment shader
236
if (glsl_get_base_type(in->type) == GLSL_TYPE_ATOMIC_UINT ||
237
is_gl_identifier(in->name) ||
238
!(in->data.explicit_location || use_implicit_location)) {
241
out->location = location;
245
out->outermost_struct_type = outermost_struct_type;
246
out->interface_type = interface_type;
247
out->component = in->data.location_frac;
248
out->index = in->data.index;
249
out->patch = in->data.patch;
250
out->mode = in->data.mode;
251
out->interpolation = in->data.interpolation;
252
out->precision = in->data.precision;
253
out->explicit_location = in->data.explicit_location;
259
add_shader_variable(const struct gl_constants *consts,
260
struct gl_shader_program *shProg,
261
struct set *resource_set,
263
GLenum programInterface, nir_variable *var,
264
const char *name, const struct glsl_type *type,
265
bool use_implicit_location, int location,
266
bool inouts_share_location,
267
const struct glsl_type *outermost_struct_type)
269
const struct glsl_type *interface_type = var->interface_type;
271
if (outermost_struct_type == NULL) {
272
if (var->data.from_named_ifc_block) {
273
const char *interface_name = glsl_get_type_name(interface_type);
275
if (glsl_type_is_array(interface_type)) {
276
/* Issue #16 of the ARB_program_interface_query spec says:
278
* "* If a variable is a member of an interface block without an
279
* instance name, it is enumerated using just the variable name.
281
* * If a variable is a member of an interface block with an
282
* instance name, it is enumerated as "BlockName.Member", where
283
* "BlockName" is the name of the interface block (not the
284
* instance name) and "Member" is the name of the variable."
286
* In particular, it indicates that it should be "BlockName",
287
* not "BlockName[array length]". The conformance suite and
288
* dEQP both require this behavior.
290
* Here, we unwrap the extra array level added by named interface
291
* block array lowering so we have the correct variable type. We
292
* also unwrap the interface type when constructing the name.
294
* We leave interface_type the same so that ES 3.x SSO pipeline
295
* validation can enforce the rules requiring array length to
296
* match on interface blocks.
298
type = glsl_get_array_element(type);
301
glsl_get_type_name(glsl_get_array_element(interface_type));
304
name = ralloc_asprintf(shProg, "%s.%s", interface_name, name);
308
switch (glsl_get_base_type(type)) {
309
case GLSL_TYPE_STRUCT: {
310
/* The ARB_program_interface_query spec says:
312
* "For an active variable declared as a structure, a separate entry
313
* will be generated for each active structure member. The name of
314
* each entry is formed by concatenating the name of the structure,
315
* the "." character, and the name of the structure member. If a
316
* structure member to enumerate is itself a structure or array,
317
* these enumeration rules are applied recursively."
319
if (outermost_struct_type == NULL)
320
outermost_struct_type = type;
322
unsigned field_location = location;
323
for (unsigned i = 0; i < glsl_get_length(type); i++) {
324
const struct glsl_type *field_type = glsl_get_struct_field(type, i);
325
const struct glsl_struct_field *field =
326
glsl_get_struct_field_data(type, i);
328
char *field_name = ralloc_asprintf(shProg, "%s.%s", name, field->name);
329
if (!add_shader_variable(consts, shProg, resource_set,
330
stage_mask, programInterface,
331
var, field_name, field_type,
332
use_implicit_location, field_location,
333
false, outermost_struct_type))
336
field_location += glsl_count_attribute_slots(field_type, false);
341
case GLSL_TYPE_ARRAY: {
342
/* The ARB_program_interface_query spec says:
344
* "For an active variable declared as an array of basic types, a
345
* single entry will be generated, with its name string formed by
346
* concatenating the name of the array and the string "[0]"."
348
* "For an active variable declared as an array of an aggregate data
349
* type (structures or arrays), a separate entry will be generated
350
* for each active array element, unless noted immediately below.
351
* The name of each entry is formed by concatenating the name of
352
* the array, the "[" character, an integer identifying the element
353
* number, and the "]" character. These enumeration rules are
354
* applied recursively, treating each enumerated array element as a
355
* separate active variable."
357
const struct glsl_type *array_type = glsl_get_array_element(type);
358
if (glsl_get_base_type(array_type) == GLSL_TYPE_STRUCT ||
359
glsl_get_base_type(array_type) == GLSL_TYPE_ARRAY) {
360
unsigned elem_location = location;
361
unsigned stride = inouts_share_location ? 0 :
362
glsl_count_attribute_slots(array_type, false);
363
for (unsigned i = 0; i < glsl_get_length(type); i++) {
364
char *elem = ralloc_asprintf(shProg, "%s[%d]", name, i);
365
if (!add_shader_variable(consts, shProg, resource_set,
366
stage_mask, programInterface,
367
var, elem, array_type,
368
use_implicit_location, elem_location,
369
false, outermost_struct_type))
371
elem_location += stride;
379
/* The ARB_program_interface_query spec says:
381
* "For an active variable declared as a single instance of a basic
382
* type, a single entry will be generated, using the variable name
383
* from the shader source."
385
struct gl_shader_variable *sha_v =
386
create_shader_variable(shProg, var, name, type, interface_type,
387
use_implicit_location, location,
388
outermost_struct_type);
392
return link_util_add_program_resource(shProg, resource_set,
393
programInterface, sha_v, stage_mask);
399
add_vars_with_modes(const struct gl_constants *consts,
400
struct gl_shader_program *prog, struct set *resource_set,
401
nir_shader *nir, nir_variable_mode modes,
402
unsigned stage, GLenum programInterface)
404
nir_foreach_variable_with_modes(var, nir, modes) {
405
if (var->data.how_declared == nir_var_hidden)
409
switch(var->data.mode) {
410
case nir_var_system_value:
411
case nir_var_shader_in:
412
if (programInterface != GL_PROGRAM_INPUT)
414
loc_bias = (stage == MESA_SHADER_VERTEX) ? VERT_ATTRIB_GENERIC0
417
case nir_var_shader_out:
418
if (programInterface != GL_PROGRAM_OUTPUT)
420
loc_bias = (stage == MESA_SHADER_FRAGMENT) ? FRAG_RESULT_DATA0
428
loc_bias = VARYING_SLOT_PATCH0;
430
if (prog->data->spirv) {
431
struct gl_shader_variable *sh_var =
432
rzalloc(prog, struct gl_shader_variable);
434
/* In the ARB_gl_spirv spec, names are considered optional debug info, so
435
* the linker needs to work without them. Returning them is optional.
436
* For simplicity, we ignore names.
438
sh_var->name.string = NULL;
439
resource_name_updated(&sh_var->name);
440
sh_var->type = var->type;
441
sh_var->location = var->data.location - loc_bias;
442
sh_var->index = var->data.index;
444
if (!link_util_add_program_resource(prog, resource_set,
446
sh_var, 1 << stage)) {
450
/* Skip packed varyings, packed varyings are handled separately
451
* by add_packed_varyings in the GLSL IR
452
* build_program_resource_list() call.
453
* TODO: handle packed varyings here instead. We likely want a NIR
454
* based packing pass first.
456
if (strncmp(var->name, "packed:", 7) == 0)
459
const bool vs_input_or_fs_output =
460
(stage == MESA_SHADER_VERTEX &&
461
var->data.mode == nir_var_shader_in) ||
462
(stage == MESA_SHADER_FRAGMENT &&
463
var->data.mode == nir_var_shader_out);
465
if (!add_shader_variable(consts, prog, resource_set,
466
1 << stage, programInterface,
467
var, var->name, var->type,
468
vs_input_or_fs_output,
469
var->data.location - loc_bias,
470
inout_has_same_location(var, stage),
480
add_interface_variables(const struct gl_constants *consts,
481
struct gl_shader_program *prog,
482
struct set *resource_set,
483
unsigned stage, GLenum programInterface)
485
struct gl_linked_shader *sh = prog->_LinkedShaders[stage];
489
nir_shader *nir = sh->Program->nir;
492
switch (programInterface) {
493
case GL_PROGRAM_INPUT: {
494
return add_vars_with_modes(consts, prog, resource_set,
495
nir, nir_var_shader_in | nir_var_system_value,
496
stage, programInterface);
498
case GL_PROGRAM_OUTPUT:
499
return add_vars_with_modes(consts, prog, resource_set,
500
nir, nir_var_shader_out,
501
stage, programInterface);
503
assert("!Should not get here");
510
/* TODO: as we keep adding features, this method is becoming more and more
511
* similar to its GLSL counterpart at linker.cpp. Eventually it would be good
512
* to check if they could be refactored, and reduce code duplication somehow
515
nir_build_program_resource_list(const struct gl_constants *consts,
516
struct gl_shader_program *prog,
517
bool rebuild_resourse_list)
519
/* Rebuild resource list. */
520
if (prog->data->ProgramResourceList && rebuild_resourse_list) {
521
ralloc_free(prog->data->ProgramResourceList);
522
prog->data->ProgramResourceList = NULL;
523
prog->data->NumProgramResourceList = 0;
526
int input_stage = MESA_SHADER_STAGES, output_stage = 0;
528
/* Determine first input and final output stage. These are used to
529
* detect which variables should be enumerated in the resource list
530
* for GL_PROGRAM_INPUT and GL_PROGRAM_OUTPUT.
532
for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
533
if (!prog->_LinkedShaders[i])
535
if (input_stage == MESA_SHADER_STAGES)
540
/* Empty shader, no resources. */
541
if (input_stage == MESA_SHADER_STAGES && output_stage == 0)
544
struct set *resource_set = _mesa_pointer_set_create(NULL);
546
/* Add inputs and outputs to the resource list. */
547
if (!add_interface_variables(consts, prog, resource_set, input_stage,
552
if (!add_interface_variables(consts, prog, resource_set, output_stage,
553
GL_PROGRAM_OUTPUT)) {
557
/* Add transform feedback varyings and buffers. */
558
if (prog->last_vert_prog) {
559
struct gl_transform_feedback_info *linked_xfb =
560
prog->last_vert_prog->sh.LinkedTransformFeedback;
563
if (linked_xfb->NumVarying > 0) {
564
for (int i = 0; i < linked_xfb->NumVarying; i++) {
565
if (!link_util_add_program_resource(prog, resource_set,
566
GL_TRANSFORM_FEEDBACK_VARYING,
567
&linked_xfb->Varyings[i], 0))
573
for (unsigned i = 0; i < consts->MaxTransformFeedbackBuffers; i++) {
574
if ((linked_xfb->ActiveBuffers >> i) & 1) {
575
linked_xfb->Buffers[i].Binding = i;
576
if (!link_util_add_program_resource(prog, resource_set,
577
GL_TRANSFORM_FEEDBACK_BUFFER,
578
&linked_xfb->Buffers[i], 0))
586
* Here, it is expected that nir_link_uniforms() has already been
587
* called, so that UniformStorage table is already available.
589
int top_level_array_base_offset = -1;
590
int top_level_array_size_in_bytes = -1;
591
int second_element_offset = -1;
592
int block_index = -1;
593
for (unsigned i = 0; i < prog->data->NumUniformStorage; i++) {
594
struct gl_uniform_storage *uniform = &prog->data->UniformStorage[i];
596
if (uniform->hidden) {
597
for (int j = MESA_SHADER_VERTEX; j < MESA_SHADER_STAGES; j++) {
598
if (!uniform->opaque[j].active ||
599
glsl_get_base_type(uniform->type) != GLSL_TYPE_SUBROUTINE)
603
_mesa_shader_stage_to_subroutine_uniform((gl_shader_stage)j);
604
/* add shader subroutines */
605
if (!link_util_add_program_resource(prog, resource_set,
613
if (!link_util_should_add_buffer_variable(prog, uniform,
614
top_level_array_base_offset,
615
top_level_array_size_in_bytes,
616
second_element_offset, block_index))
620
if (prog->data->UniformStorage[i].offset >= second_element_offset) {
621
top_level_array_base_offset =
622
prog->data->UniformStorage[i].offset;
624
top_level_array_size_in_bytes =
625
prog->data->UniformStorage[i].top_level_array_size *
626
prog->data->UniformStorage[i].top_level_array_stride;
628
/* Set or reset the second element offset. For non arrays this
631
second_element_offset = top_level_array_size_in_bytes ?
632
top_level_array_base_offset +
633
prog->data->UniformStorage[i].top_level_array_stride : -1;
635
block_index = uniform->block_index;
638
GLenum interface = uniform->is_shader_storage ? GL_BUFFER_VARIABLE : GL_UNIFORM;
639
if (!link_util_add_program_resource(prog, resource_set, interface, uniform,
640
uniform->active_shader_mask)) {
646
for (unsigned i = 0; i < prog->data->NumUniformBlocks; i++) {
647
if (!link_util_add_program_resource(prog, resource_set, GL_UNIFORM_BLOCK,
648
&prog->data->UniformBlocks[i],
649
prog->data->UniformBlocks[i].stageref))
653
for (unsigned i = 0; i < prog->data->NumShaderStorageBlocks; i++) {
654
if (!link_util_add_program_resource(prog, resource_set, GL_SHADER_STORAGE_BLOCK,
655
&prog->data->ShaderStorageBlocks[i],
656
prog->data->ShaderStorageBlocks[i].stageref))
660
/* Add atomic counter buffers. */
661
for (unsigned i = 0; i < prog->data->NumAtomicBuffers; i++) {
662
if (!link_util_add_program_resource(prog, resource_set, GL_ATOMIC_COUNTER_BUFFER,
663
&prog->data->AtomicBuffers[i], 0))
667
unsigned mask = prog->data->linked_stages;
669
const int i = u_bit_scan(&mask);
670
struct gl_program *p = prog->_LinkedShaders[i]->Program;
672
GLuint type = _mesa_shader_stage_to_subroutine((gl_shader_stage)i);
673
for (unsigned j = 0; j < p->sh.NumSubroutineFunctions; j++) {
674
if (!link_util_add_program_resource(prog, resource_set,
676
&p->sh.SubroutineFunctions[j],
682
_mesa_set_destroy(resource_set, NULL);
686
gl_nir_link_spirv(const struct gl_constants *consts,
687
struct gl_shader_program *prog,
688
const struct gl_nir_linker_options *options)
690
for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
691
struct gl_linked_shader *shader = prog->_LinkedShaders[i];
693
const nir_remove_dead_variables_options opts = {
694
.can_remove_var = can_remove_uniform,
696
nir_remove_dead_variables(shader->Program->nir,
697
nir_var_uniform | nir_var_image,
702
if (!gl_nir_link_uniform_blocks(prog))
705
if (!gl_nir_link_uniforms(consts, prog, options->fill_parameters))
708
gl_nir_link_assign_atomic_counter_resources(consts, prog);
709
gl_nir_link_assign_xfb_resources(consts, prog);
715
* Validate shader image resources.
718
check_image_resources(const struct gl_constants *consts,
719
const struct gl_extensions *exts,
720
struct gl_shader_program *prog)
722
unsigned total_image_units = 0;
723
unsigned fragment_outputs = 0;
724
unsigned total_shader_storage_blocks = 0;
726
if (!exts->ARB_shader_image_load_store)
729
for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
730
struct gl_linked_shader *sh = prog->_LinkedShaders[i];
734
total_image_units += sh->Program->info.num_images;
735
total_shader_storage_blocks += sh->Program->info.num_ssbos;
738
if (total_image_units > consts->MaxCombinedImageUniforms)
739
linker_error(prog, "Too many combined image uniforms\n");
741
struct gl_linked_shader *frag_sh =
742
prog->_LinkedShaders[MESA_SHADER_FRAGMENT];
744
uint64_t frag_outputs_written = frag_sh->Program->info.outputs_written;
745
fragment_outputs = util_bitcount64(frag_outputs_written);
748
if (total_image_units + fragment_outputs + total_shader_storage_blocks >
749
consts->MaxCombinedShaderOutputResources)
750
linker_error(prog, "Too many combined image uniforms, shader storage "
751
" buffers and fragment outputs\n");
755
gl_nir_link_glsl(const struct gl_constants *consts,
756
const struct gl_extensions *exts,
757
struct gl_shader_program *prog)
759
for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
760
struct gl_linked_shader *shader = prog->_LinkedShaders[i];
762
const nir_remove_dead_variables_options opts = {
763
.can_remove_var = can_remove_uniform,
765
nir_remove_dead_variables(shader->Program->nir,
766
nir_var_uniform | nir_var_image,
771
if (!gl_nir_link_uniforms(consts, prog, true))
774
link_util_calculate_subroutine_compat(prog);
775
link_util_check_uniform_resources(consts, prog);
776
link_util_check_subroutine_resources(prog);
777
check_image_resources(consts, exts, prog);
778
gl_nir_link_assign_atomic_counter_resources(consts, prog);
779
gl_nir_link_check_atomic_counter_resources(consts, prog);
781
if (prog->data->LinkStatus == LINKING_FAILURE)