2
* Copyright (C) 2015 Rob Clark <robclark@freedesktop.org>
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
* and/or sell copies of the Software, and to permit persons to whom the
9
* Software is furnished to do so, subject to the following conditions:
11
* The above copyright notice and this permission notice (including the next
12
* paragraph) shall be included in all copies or substantial portions of the
15
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24
* Rob Clark <robclark@freedesktop.org>
27
#include "util/debug.h"
28
#include "util/u_math.h"
30
#include "ir3_compiler.h"
32
#include "ir3_shader.h"
35
ir3_nir_should_vectorize_mem(unsigned align_mul, unsigned align_offset,
36
unsigned bit_size, unsigned num_components,
37
nir_intrinsic_instr *low,
38
nir_intrinsic_instr *high, void *data)
40
assert(bit_size >= 8);
43
unsigned byte_size = bit_size / 8;
45
int size = num_components * byte_size;
47
/* Don't care about alignment past vec4. */
48
assert(util_is_power_of_two_nonzero(align_mul));
49
align_mul = MIN2(align_mul, 16);
52
/* Our offset alignment should aways be at least 4 bytes */
56
unsigned worst_start_offset = 16 - align_mul + align_offset;
57
if (worst_start_offset + size > 16)
63
#define OPT(nir, pass, ...) \
65
bool this_progress = false; \
66
NIR_PASS(this_progress, nir, pass, ##__VA_ARGS__); \
70
#define OPT_V(nir, pass, ...) NIR_PASS_V(nir, pass, ##__VA_ARGS__)
73
ir3_optimize_loop(struct ir3_compiler *compiler, nir_shader *s)
76
unsigned lower_flrp = (s->options->lower_flrp16 ? 16 : 0) |
77
(s->options->lower_flrp32 ? 32 : 0) |
78
(s->options->lower_flrp64 ? 64 : 0);
83
OPT_V(s, nir_lower_vars_to_ssa);
84
progress |= OPT(s, nir_opt_copy_prop_vars);
85
progress |= OPT(s, nir_opt_dead_write_vars);
86
progress |= OPT(s, nir_lower_alu_to_scalar, NULL, NULL);
87
progress |= OPT(s, nir_lower_phis_to_scalar, false);
89
progress |= OPT(s, nir_copy_prop);
90
progress |= OPT(s, nir_opt_deref);
91
progress |= OPT(s, nir_opt_dce);
92
progress |= OPT(s, nir_opt_cse);
95
gcm = env_var_as_unsigned("GCM", 0);
97
progress |= OPT(s, nir_opt_gcm, true);
99
progress |= OPT(s, nir_opt_gcm, false);
100
progress |= OPT(s, nir_opt_peephole_select, 16, true, true);
101
progress |= OPT(s, nir_opt_intrinsics);
102
/* NOTE: GS lowering inserts an output var with varying slot that
103
* is larger than VARYING_SLOT_MAX (ie. GS_VERTEX_FLAGS_IR3),
104
* which triggers asserts in nir_shader_gather_info(). To work
105
* around that skip lowering phi precision for GS.
107
* Calling nir_shader_gather_info() late also seems to cause
108
* problems for tess lowering, for now since we only enable
109
* fp16/int16 for frag and compute, skip phi precision lowering
112
if ((s->info.stage == MESA_SHADER_FRAGMENT) ||
113
(s->info.stage == MESA_SHADER_COMPUTE) ||
114
(s->info.stage == MESA_SHADER_KERNEL)) {
115
progress |= OPT(s, nir_opt_phi_precision);
117
progress |= OPT(s, nir_opt_algebraic);
118
progress |= OPT(s, nir_lower_alu);
119
progress |= OPT(s, nir_lower_pack);
120
progress |= OPT(s, nir_opt_constant_folding);
122
static const nir_opt_offsets_options offset_options = {
123
/* How large an offset we can encode in the instr's immediate field.
125
.uniform_max = (1 << 9) - 1,
127
.shared_max = (1 << 13) - 1,
131
progress |= OPT(s, nir_opt_offsets, &offset_options);
133
nir_load_store_vectorize_options vectorize_opts = {
134
.modes = nir_var_mem_ubo,
135
.callback = ir3_nir_should_vectorize_mem,
136
.robust_modes = compiler->robust_ubo_access ? nir_var_mem_ubo : 0,
138
progress |= OPT(s, nir_opt_load_store_vectorize, &vectorize_opts);
140
if (lower_flrp != 0) {
141
if (OPT(s, nir_lower_flrp, lower_flrp, false /* always_precise */)) {
142
OPT(s, nir_opt_constant_folding);
146
/* Nothing should rematerialize any flrps, so we only
147
* need to do this lowering once.
152
progress |= OPT(s, nir_opt_dead_cf);
153
if (OPT(s, nir_opt_trivial_continues)) {
155
/* If nir_opt_trivial_continues makes progress, then we need to clean
156
* things up if we want any hope of nir_opt_if or nir_opt_loop_unroll
159
OPT(s, nir_copy_prop);
162
progress |= OPT(s, nir_opt_if, false);
163
progress |= OPT(s, nir_opt_loop_unroll);
164
progress |= OPT(s, nir_lower_64bit_phis);
165
progress |= OPT(s, nir_opt_remove_phis);
166
progress |= OPT(s, nir_opt_undef);
171
should_split_wrmask(const nir_instr *instr, const void *data)
173
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
175
switch (intr->intrinsic) {
176
case nir_intrinsic_store_ssbo:
177
case nir_intrinsic_store_shared:
178
case nir_intrinsic_store_global:
179
case nir_intrinsic_store_scratch:
187
ir3_nir_lower_ssbo_size_filter(const nir_instr *instr, const void *data)
189
return instr->type == nir_instr_type_intrinsic &&
190
nir_instr_as_intrinsic(instr)->intrinsic ==
191
nir_intrinsic_get_ssbo_size;
195
ir3_nir_lower_ssbo_size_instr(nir_builder *b, nir_instr *instr, void *data)
197
uint8_t ssbo_size_to_bytes_shift = *(uint8_t *) data;
198
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
199
return nir_ishl(b, &intr->dest.ssa, nir_imm_int(b, ssbo_size_to_bytes_shift));
203
ir3_nir_lower_ssbo_size(nir_shader *s, uint8_t ssbo_size_to_bytes_shift)
205
return nir_shader_lower_instructions(s, ir3_nir_lower_ssbo_size_filter,
206
ir3_nir_lower_ssbo_size_instr,
207
&ssbo_size_to_bytes_shift);
211
ir3_nir_lower_io_to_temporaries(nir_shader *s)
213
/* Outputs consumed by the VPC, VS inputs, and FS outputs are all handled
214
* by the hardware pre-loading registers at the beginning and then reading
215
* them at the end, so we can't access them indirectly except through
216
* normal register-indirect accesses, and therefore ir3 doesn't support
217
* indirect accesses on those. Other i/o is lowered in ir3_nir_lower_tess,
218
* and indirects work just fine for those. GS outputs may be consumed by
219
* VPC, but have their own lowering in ir3_nir_lower_gs() which does
220
* something similar to nir_lower_io_to_temporaries so we shouldn't need
223
* Note: this might be a little inefficient for VS or TES outputs which are
224
* when the next stage isn't an FS, but it probably don't make sense to
225
* depend on the next stage before variant creation.
227
* TODO: for gallium, mesa/st also does some redundant lowering, including
228
* running this pass for GS inputs/outputs which we don't want but not
229
* including TES outputs or FS inputs which we do need. We should probably
230
* stop doing that once we're sure all drivers are doing their own
231
* indirect i/o lowering.
233
bool lower_input = s->info.stage == MESA_SHADER_VERTEX ||
234
s->info.stage == MESA_SHADER_FRAGMENT;
235
bool lower_output = s->info.stage != MESA_SHADER_TESS_CTRL &&
236
s->info.stage != MESA_SHADER_GEOMETRY;
237
if (lower_input || lower_output) {
238
NIR_PASS_V(s, nir_lower_io_to_temporaries, nir_shader_get_entrypoint(s),
239
lower_output, lower_input);
241
/* nir_lower_io_to_temporaries() creates global variables and copy
242
* instructions which need to be cleaned up.
244
NIR_PASS_V(s, nir_split_var_copies);
245
NIR_PASS_V(s, nir_lower_var_copies);
246
NIR_PASS_V(s, nir_lower_global_vars_to_local);
249
/* Regardless of the above, we need to lower indirect references to
250
* compact variables such as clip/cull distances because due to how
251
* TCS<->TES IO works we cannot handle indirect accesses that "straddle"
252
* vec4 components. nir_lower_indirect_derefs has a special case for
253
* compact variables, so it will actually lower them even though we pass
256
* Using temporaries would be slightly better but
257
* nir_lower_io_to_temporaries currently doesn't support TCS i/o.
259
NIR_PASS_V(s, nir_lower_indirect_derefs, 0, UINT32_MAX);
263
ir3_finalize_nir(struct ir3_compiler *compiler, nir_shader *s)
265
struct nir_lower_tex_options tex_options = {
267
.lower_tg4_offsets = true,
270
if (compiler->gen >= 4) {
271
/* a4xx seems to have *no* sam.p */
272
tex_options.lower_txp = ~0; /* lower all txp */
274
/* a3xx just needs to avoid sam.p for 3d tex */
275
tex_options.lower_txp = (1 << GLSL_SAMPLER_DIM_3D);
278
if (ir3_shader_debug & IR3_DBG_DISASM) {
279
mesa_logi("----------------------");
281
mesa_logi("----------------------");
284
if (s->info.stage == MESA_SHADER_GEOMETRY)
285
NIR_PASS_V(s, ir3_nir_lower_gs);
287
NIR_PASS_V(s, nir_lower_amul, ir3_glsl_type_size);
289
OPT_V(s, nir_lower_regs_to_ssa);
290
OPT_V(s, nir_lower_wrmasks, should_split_wrmask, s);
292
OPT_V(s, nir_lower_tex, &tex_options);
293
OPT_V(s, nir_lower_load_const_to_scalar);
295
ir3_optimize_loop(compiler, s);
297
/* do idiv lowering after first opt loop to get a chance to propagate
298
* constants for divide by immed power-of-two:
300
nir_lower_idiv_options idiv_options = {
301
.imprecise_32bit_lowering = true,
304
const bool idiv_progress = OPT(s, nir_lower_idiv, &idiv_options);
307
ir3_optimize_loop(compiler, s);
309
OPT_V(s, nir_remove_dead_variables, nir_var_function_temp, NULL);
311
if (ir3_shader_debug & IR3_DBG_DISASM) {
312
mesa_logi("----------------------");
314
mesa_logi("----------------------");
317
/* st_program.c's parameter list optimization requires that future nir
318
* variants don't reallocate the uniform storage, so we have to remove
319
* uniforms that occupy storage. But we don't want to remove samplers,
320
* because they're needed for YUV variant lowering.
322
nir_foreach_uniform_variable_safe (var, s) {
323
if (var->data.mode == nir_var_uniform &&
324
(glsl_type_get_image_count(var->type) ||
325
glsl_type_get_sampler_count(var->type)))
328
exec_node_remove(&var->node);
330
nir_validate_shader(s, "after uniform var removal");
336
lower_subgroup_id_filter(const nir_instr *instr, const void *unused)
340
if (instr->type != nir_instr_type_intrinsic)
343
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
344
return intr->intrinsic == nir_intrinsic_load_subgroup_invocation ||
345
intr->intrinsic == nir_intrinsic_load_subgroup_id ||
346
intr->intrinsic == nir_intrinsic_load_num_subgroups;
350
lower_subgroup_id(nir_builder *b, nir_instr *instr, void *unused)
354
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
355
if (intr->intrinsic == nir_intrinsic_load_subgroup_invocation) {
357
b, nir_load_local_invocation_index(b),
358
nir_isub(b, nir_load_subgroup_size(b), nir_imm_int(b, 1)));
359
} else if (intr->intrinsic == nir_intrinsic_load_subgroup_id) {
360
return nir_ishr(b, nir_load_local_invocation_index(b),
361
nir_load_subgroup_id_shift_ir3(b));
363
assert(intr->intrinsic == nir_intrinsic_load_num_subgroups);
364
/* If the workgroup size is constant,
365
* nir_lower_compute_system_values() will replace local_size with a
366
* constant so this can mostly be constant folded away.
368
nir_ssa_def *local_size = nir_load_workgroup_size(b);
370
nir_imul24(b, nir_channel(b, local_size, 0),
371
nir_imul24(b, nir_channel(b, local_size, 1),
372
nir_channel(b, local_size, 2)));
373
nir_ssa_def *one = nir_imm_int(b, 1);
374
return nir_iadd(b, one,
375
nir_ishr(b, nir_isub(b, size, one),
376
nir_load_subgroup_id_shift_ir3(b)));
381
ir3_nir_lower_subgroup_id_cs(nir_shader *shader)
383
return nir_shader_lower_instructions(shader, lower_subgroup_id_filter,
384
lower_subgroup_id, NULL);
388
* Late passes that need to be done after pscreen->finalize_nir()
391
ir3_nir_post_finalize(struct ir3_shader *shader)
393
struct nir_shader *s = shader->nir;
394
struct ir3_compiler *compiler = shader->compiler;
396
NIR_PASS_V(s, nir_lower_io, nir_var_shader_in | nir_var_shader_out,
397
ir3_glsl_type_size, (nir_lower_io_options)0);
399
if (s->info.stage == MESA_SHADER_FRAGMENT) {
400
/* NOTE: lower load_barycentric_at_sample first, since it
401
* produces load_barycentric_at_offset:
403
NIR_PASS_V(s, ir3_nir_lower_load_barycentric_at_sample);
404
NIR_PASS_V(s, ir3_nir_lower_load_barycentric_at_offset);
405
NIR_PASS_V(s, ir3_nir_move_varying_inputs);
406
NIR_PASS_V(s, nir_lower_fb_read);
409
if (compiler->gen >= 6 && s->info.stage == MESA_SHADER_FRAGMENT &&
410
!(ir3_shader_debug & IR3_DBG_NOFP16)) {
411
NIR_PASS_V(s, nir_lower_mediump_io, nir_var_shader_out, 0, false);
414
if ((s->info.stage == MESA_SHADER_COMPUTE) ||
415
(s->info.stage == MESA_SHADER_KERNEL) ||
416
compiler->has_getfiberid) {
417
/* If the API-facing subgroup size is forced to a particular value, lower
418
* it here. Beyond this point nir_intrinsic_load_subgroup_size will return
419
* the "real" subgroup size.
421
unsigned subgroup_size = 0, max_subgroup_size = 0;
422
switch (shader->api_wavesize) {
423
case IR3_SINGLE_ONLY:
424
subgroup_size = max_subgroup_size = compiler->threadsize_base;
426
case IR3_DOUBLE_ONLY:
427
subgroup_size = max_subgroup_size = compiler->threadsize_base * 2;
429
case IR3_SINGLE_OR_DOUBLE:
430
/* For vertex stages, we know the wavesize will never be doubled.
431
* Lower subgroup_size here, to avoid having to deal with it when
432
* translating from NIR. Otherwise use the "real" wavesize obtained as
435
if (s->info.stage != MESA_SHADER_COMPUTE &&
436
s->info.stage != MESA_SHADER_FRAGMENT) {
437
subgroup_size = max_subgroup_size = compiler->threadsize_base;
440
max_subgroup_size = compiler->threadsize_base * 2;
445
OPT(s, nir_lower_subgroups,
446
&(nir_lower_subgroups_options){
447
.subgroup_size = subgroup_size,
448
.ballot_bit_size = 32,
449
.ballot_components = max_subgroup_size / 32,
450
.lower_to_scalar = true,
451
.lower_vote_eq = true,
452
.lower_subgroup_masks = true,
453
.lower_read_invocation_to_cond = true,
454
.lower_shuffle = true,
455
.lower_relative_shuffle = true,
459
if ((s->info.stage == MESA_SHADER_COMPUTE) ||
460
(s->info.stage == MESA_SHADER_KERNEL)) {
461
bool progress = false;
462
NIR_PASS(progress, s, ir3_nir_lower_subgroup_id_cs);
464
/* ir3_nir_lower_subgroup_id_cs creates extra compute intrinsics which
465
* we need to lower again.
468
NIR_PASS_V(s, nir_lower_compute_system_values, NULL);
471
/* we cannot ensure that ir3_finalize_nir() is only called once, so
472
* we also need to do any run-once workarounds here:
474
OPT_V(s, ir3_nir_apply_trig_workarounds);
476
const nir_lower_image_options lower_image_opts = {
477
.lower_cube_size = true,
479
NIR_PASS_V(s, nir_lower_image, &lower_image_opts);
481
const nir_lower_idiv_options lower_idiv_options = {
482
.imprecise_32bit_lowering = true,
485
NIR_PASS_V(s, nir_lower_idiv, &lower_idiv_options); /* idiv generated by cube lowering */
488
/* The resinfo opcode returns the size in dwords on a4xx */
489
if (compiler->gen == 4)
490
OPT_V(s, ir3_nir_lower_ssbo_size, 2);
492
/* The resinfo opcode we have for getting the SSBO size on a6xx returns a
493
* byte length divided by IBO_0_FMT, while the NIR intrinsic coming in is a
494
* number of bytes. Switch things so the NIR intrinsic in our backend means
497
if (compiler->gen >= 6)
498
OPT_V(s, ir3_nir_lower_ssbo_size, compiler->storage_16bit ? 1 : 2);
500
ir3_optimize_loop(compiler, s);
504
ir3_nir_lower_view_layer_id(nir_shader *nir, bool layer_zero, bool view_zero)
506
unsigned layer_id_loc = ~0, view_id_loc = ~0;
507
nir_foreach_shader_in_variable (var, nir) {
508
if (var->data.location == VARYING_SLOT_LAYER)
509
layer_id_loc = var->data.driver_location;
510
if (var->data.location == VARYING_SLOT_VIEWPORT)
511
view_id_loc = var->data.driver_location;
514
assert(!layer_zero || layer_id_loc != ~0);
515
assert(!view_zero || view_id_loc != ~0);
517
bool progress = false;
520
nir_foreach_function (func, nir) {
521
nir_builder_init(&b, func->impl);
523
nir_foreach_block (block, func->impl) {
524
nir_foreach_instr_safe (instr, block) {
525
if (instr->type != nir_instr_type_intrinsic)
528
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
530
if (intrin->intrinsic != nir_intrinsic_load_input)
533
unsigned base = nir_intrinsic_base(intrin);
534
if (base != layer_id_loc && base != view_id_loc)
537
b.cursor = nir_before_instr(&intrin->instr);
538
nir_ssa_def *zero = nir_imm_int(&b, 0);
539
nir_ssa_def_rewrite_uses(&intrin->dest.ssa, zero);
540
nir_instr_remove(&intrin->instr);
546
nir_metadata_preserve(
547
func->impl, nir_metadata_block_index | nir_metadata_dominance);
549
nir_metadata_preserve(func->impl, nir_metadata_all);
557
ir3_nir_lower_variant(struct ir3_shader_variant *so, nir_shader *s)
559
if (ir3_shader_debug & IR3_DBG_DISASM) {
560
mesa_logi("----------------------");
562
mesa_logi("----------------------");
565
bool progress = false;
567
if (so->key.has_gs || so->key.tessellation) {
568
switch (so->shader->type) {
569
case MESA_SHADER_VERTEX:
570
NIR_PASS_V(s, ir3_nir_lower_to_explicit_output, so,
571
so->key.tessellation);
574
case MESA_SHADER_TESS_CTRL:
575
NIR_PASS_V(s, ir3_nir_lower_tess_ctrl, so, so->key.tessellation);
576
NIR_PASS_V(s, ir3_nir_lower_to_explicit_input, so);
579
case MESA_SHADER_TESS_EVAL:
580
NIR_PASS_V(s, ir3_nir_lower_tess_eval, so, so->key.tessellation);
582
NIR_PASS_V(s, ir3_nir_lower_to_explicit_output, so,
583
so->key.tessellation);
586
case MESA_SHADER_GEOMETRY:
587
NIR_PASS_V(s, ir3_nir_lower_to_explicit_input, so);
595
if (s->info.stage == MESA_SHADER_VERTEX) {
596
if (so->key.ucp_enables)
598
OPT(s, nir_lower_clip_vs, so->key.ucp_enables, false, true, NULL);
599
} else if (s->info.stage == MESA_SHADER_FRAGMENT) {
601
so->key.layer_zero && (s->info.inputs_read & VARYING_BIT_LAYER);
603
so->key.view_zero && (s->info.inputs_read & VARYING_BIT_VIEWPORT);
605
if (so->key.ucp_enables && !so->shader->compiler->has_clip_cull)
606
progress |= OPT(s, nir_lower_clip_fs, so->key.ucp_enables, true);
607
if (layer_zero || view_zero)
608
progress |= OPT(s, ir3_nir_lower_view_layer_id, layer_zero, view_zero);
611
/* Move large constant variables to the constants attached to the NIR
612
* shader, which we will upload in the immediates range. This generates
613
* amuls, so we need to clean those up after.
615
* Passing no size_align, we would get packed values, which if we end up
616
* having to load with LDC would result in extra reads to unpack from
617
* straddling loads. Align everything to vec4 to avoid that, though we
618
* could theoretically do better.
620
OPT_V(s, nir_opt_large_constants, glsl_get_vec4_size_align_bytes,
622
OPT_V(s, ir3_nir_lower_load_constant, so);
624
/* Lower large temporaries to scratch, which in Qualcomm terms is private
625
* memory, to avoid excess register pressure. This should happen after
626
* nir_opt_large_constants, because loading from a UBO is much, much less
629
if (so->shader->compiler->has_pvtmem) {
630
progress |= OPT(s, nir_lower_vars_to_scratch, nir_var_function_temp,
631
16 * 16 /* bytes */, glsl_get_natural_size_align_bytes);
634
/* Lower scratch writemasks */
635
progress |= OPT(s, nir_lower_wrmasks, should_split_wrmask, s);
637
progress |= OPT(s, ir3_nir_lower_wide_load_store);
638
progress |= OPT(s, ir3_nir_lower_64b_global);
639
progress |= OPT(s, ir3_nir_lower_64b_intrinsics);
640
progress |= OPT(s, ir3_nir_lower_64b_undef);
641
progress |= OPT(s, nir_lower_int64);
643
/* Cleanup code leftover from lowering passes before opt_preamble */
645
progress |= OPT(s, nir_opt_constant_folding);
648
/* Do the preamble before analysing UBO ranges, because it's usually
649
* higher-value and because it can result in eliminating some indirect UBO
650
* accesses where otherwise we'd have to push the whole range. However we
651
* have to lower the preamble after UBO lowering so that UBO lowering can
652
* insert instructions in the preamble to push UBOs.
654
if (so->shader->compiler->has_preamble &&
655
!(ir3_shader_debug & IR3_DBG_NOPREAMBLE))
656
progress |= OPT(s, ir3_nir_opt_preamble, so);
658
if (!so->binning_pass)
659
OPT_V(s, ir3_nir_analyze_ubo_ranges, so);
661
progress |= OPT(s, ir3_nir_lower_ubo_loads, so);
663
progress |= OPT(s, ir3_nir_lower_preamble, so);
665
OPT_V(s, nir_lower_amul, ir3_glsl_type_size);
667
/* UBO offset lowering has to come after we've decided what will
668
* be left as load_ubo
670
if (so->shader->compiler->gen >= 6)
671
progress |= OPT(s, nir_lower_ubo_vec4);
673
OPT_V(s, ir3_nir_lower_io_offsets);
676
ir3_optimize_loop(so->shader->compiler, s);
678
/* Fixup indirect load_uniform's which end up with a const base offset
679
* which is too large to encode. Do this late(ish) so we actually
680
* can differentiate indirect vs non-indirect.
682
if (OPT(s, ir3_nir_fixup_load_uniform))
683
ir3_optimize_loop(so->shader->compiler, s);
685
/* Do late algebraic optimization to turn add(a, neg(b)) back into
686
* subs, then the mandatory cleanup after algebraic. Note that it may
687
* produce fnegs, and if so then we need to keep running to squash
690
bool more_late_algebraic = true;
691
while (more_late_algebraic) {
692
more_late_algebraic = OPT(s, nir_opt_algebraic_late);
693
OPT_V(s, nir_opt_constant_folding);
694
OPT_V(s, nir_copy_prop);
695
OPT_V(s, nir_opt_dce);
696
OPT_V(s, nir_opt_cse);
699
OPT_V(s, nir_opt_sink, nir_move_const_undef);
701
if (ir3_shader_debug & IR3_DBG_DISASM) {
702
mesa_logi("----------------------");
704
mesa_logi("----------------------");
709
/* Binning pass variants re-use the const_state of the corresponding
710
* draw pass shader, so that same const emit can be re-used for both
713
if (!so->binning_pass)
714
ir3_setup_const_state(s, so, ir3_const_state(so));
718
ir3_nir_scan_driver_consts(struct ir3_compiler *compiler, nir_shader *shader, struct ir3_const_state *layout)
720
nir_foreach_function (function, shader) {
724
nir_foreach_block (block, function->impl) {
725
nir_foreach_instr (instr, block) {
726
if (instr->type != nir_instr_type_intrinsic)
729
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
732
switch (intr->intrinsic) {
733
case nir_intrinsic_image_atomic_add:
734
case nir_intrinsic_image_atomic_imin:
735
case nir_intrinsic_image_atomic_umin:
736
case nir_intrinsic_image_atomic_imax:
737
case nir_intrinsic_image_atomic_umax:
738
case nir_intrinsic_image_atomic_and:
739
case nir_intrinsic_image_atomic_or:
740
case nir_intrinsic_image_atomic_xor:
741
case nir_intrinsic_image_atomic_exchange:
742
case nir_intrinsic_image_atomic_comp_swap:
743
case nir_intrinsic_image_load:
744
case nir_intrinsic_image_store:
745
case nir_intrinsic_image_size:
746
/* a4xx gets these supplied by the hw directly (maybe CP?) */
747
if (compiler->gen == 5 &&
748
!(intr->intrinsic == nir_intrinsic_image_load &&
749
!(nir_intrinsic_access(intr) & ACCESS_COHERENT))) {
750
idx = nir_src_as_uint(intr->src[0]);
751
if (layout->image_dims.mask & (1 << idx))
753
layout->image_dims.mask |= (1 << idx);
754
layout->image_dims.off[idx] = layout->image_dims.count;
755
layout->image_dims.count += 3; /* three const per */
758
case nir_intrinsic_load_base_vertex:
759
case nir_intrinsic_load_first_vertex:
760
layout->num_driver_params =
761
MAX2(layout->num_driver_params, IR3_DP_VTXID_BASE + 1);
763
case nir_intrinsic_load_base_instance:
764
layout->num_driver_params =
765
MAX2(layout->num_driver_params, IR3_DP_INSTID_BASE + 1);
767
case nir_intrinsic_load_user_clip_plane:
768
idx = nir_intrinsic_ucp_id(intr);
769
layout->num_driver_params = MAX2(layout->num_driver_params,
770
IR3_DP_UCP0_X + (idx + 1) * 4);
772
case nir_intrinsic_load_num_workgroups:
773
layout->num_driver_params =
774
MAX2(layout->num_driver_params, IR3_DP_NUM_WORK_GROUPS_Z + 1);
776
case nir_intrinsic_load_workgroup_id:
777
if (!compiler->has_shared_regfile) {
778
layout->num_driver_params =
779
MAX2(layout->num_driver_params, IR3_DP_WORKGROUP_ID_Z + 1);
782
case nir_intrinsic_load_workgroup_size:
783
layout->num_driver_params = MAX2(layout->num_driver_params,
784
IR3_DP_LOCAL_GROUP_SIZE_Z + 1);
786
case nir_intrinsic_load_base_workgroup_id:
787
layout->num_driver_params =
788
MAX2(layout->num_driver_params, IR3_DP_BASE_GROUP_Z + 1);
790
case nir_intrinsic_load_subgroup_size: {
791
assert(shader->info.stage == MESA_SHADER_COMPUTE ||
792
shader->info.stage == MESA_SHADER_FRAGMENT);
793
enum ir3_driver_param size = shader->info.stage == MESA_SHADER_COMPUTE ?
794
IR3_DP_CS_SUBGROUP_SIZE : IR3_DP_FS_SUBGROUP_SIZE;
795
layout->num_driver_params =
796
MAX2(layout->num_driver_params, size + 1);
799
case nir_intrinsic_load_subgroup_id_shift_ir3:
800
layout->num_driver_params =
801
MAX2(layout->num_driver_params, IR3_DP_SUBGROUP_ID_SHIFT + 1);
810
/* TODO: Provide a spot somewhere to safely upload unwanted values, and a way
811
* to determine if they're wanted or not. For now we always make the whole
812
* driver param range available, since the driver will always instruct the
813
* hardware to upload these.
815
if (!compiler->has_shared_regfile &&
816
shader->info.stage == MESA_SHADER_COMPUTE) {
817
layout->num_driver_params =
818
MAX2(layout->num_driver_params, IR3_DP_WORKGROUP_ID_Z + 1);
822
/* Sets up the variant-dependent constant state for the ir3_shader. Note
823
* that it is also used from ir3_nir_analyze_ubo_ranges() to figure out the
824
* maximum number of driver params that would eventually be used, to leave
825
* space for this function to allocate the driver params.
828
ir3_setup_const_state(nir_shader *nir, struct ir3_shader_variant *v,
829
struct ir3_const_state *const_state)
831
struct ir3_compiler *compiler = v->shader->compiler;
833
memset(&const_state->offsets, ~0, sizeof(const_state->offsets));
835
ir3_nir_scan_driver_consts(compiler, nir, const_state);
837
if ((compiler->gen < 5) && (v->shader->stream_output.num_outputs > 0)) {
838
const_state->num_driver_params =
839
MAX2(const_state->num_driver_params, IR3_DP_VTXCNT_MAX + 1);
842
const_state->num_ubos = nir->info.num_ubos;
844
debug_assert((const_state->ubo_state.size % 16) == 0);
845
unsigned constoff = v->shader->num_reserved_user_consts +
846
const_state->ubo_state.size / 16 +
847
const_state->preamble_size;
848
unsigned ptrsz = ir3_pointer_size(compiler);
850
if (const_state->num_ubos > 0) {
851
const_state->offsets.ubo = constoff;
852
constoff += align(const_state->num_ubos * ptrsz, 4) / 4;
855
if (const_state->image_dims.count > 0) {
856
unsigned cnt = const_state->image_dims.count;
857
const_state->offsets.image_dims = constoff;
858
constoff += align(cnt, 4) / 4;
861
if (v->type == MESA_SHADER_KERNEL) {
862
const_state->offsets.kernel_params = constoff;
863
constoff += align(v->shader->cs.req_input_mem, 4) / 4;
866
if (const_state->num_driver_params > 0) {
867
/* num_driver_params in dwords. we only need to align to vec4s for the
868
* common case of immediate constant uploads, but for indirect dispatch
869
* the constants may also be indirect and so we have to align the area in
870
* const space to that requirement.
872
const_state->num_driver_params = align(const_state->num_driver_params, 4);
873
unsigned upload_unit = 1;
874
if (v->type == MESA_SHADER_COMPUTE ||
875
(const_state->num_driver_params >= IR3_DP_VTXID_BASE)) {
876
upload_unit = compiler->const_upload_unit;
879
/* offset cannot be 0 for vs params loaded by CP_DRAW_INDIRECT_MULTI */
880
if (v->type == MESA_SHADER_VERTEX && compiler->gen >= 6)
881
constoff = MAX2(constoff, 1);
882
constoff = align(constoff, upload_unit);
883
const_state->offsets.driver_param = constoff;
885
constoff += align(const_state->num_driver_params / 4, upload_unit);
888
if ((v->type == MESA_SHADER_VERTEX) && (compiler->gen < 5) &&
889
v->shader->stream_output.num_outputs > 0) {
890
const_state->offsets.tfbo = constoff;
891
constoff += align(IR3_MAX_SO_BUFFERS * ptrsz, 4) / 4;
895
case MESA_SHADER_VERTEX:
896
const_state->offsets.primitive_param = constoff;
899
case MESA_SHADER_TESS_CTRL:
900
case MESA_SHADER_TESS_EVAL:
901
constoff = align(constoff - 1, 4) + 3;
902
const_state->offsets.primitive_param = constoff;
903
const_state->offsets.primitive_map = constoff + 5;
904
constoff += 5 + DIV_ROUND_UP(v->input_size, 4);
906
case MESA_SHADER_GEOMETRY:
907
const_state->offsets.primitive_param = constoff;
908
const_state->offsets.primitive_map = constoff + 1;
909
constoff += 1 + DIV_ROUND_UP(v->input_size, 4);
915
const_state->offsets.immediate = constoff;
917
assert(constoff <= ir3_max_const(v));