2
* Copyright © 2016-2017 Broadcom
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
* and/or sell copies of the Software, and to permit persons to whom the
9
* Software is furnished to do so, subject to the following conditions:
11
* The above copyright notice and this permission notice (including the next
12
* paragraph) shall be included in all copies or substantial portions of the
15
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24
#include "broadcom/common/v3d_device_info.h"
25
#include "v3d_compiler.h"
26
#include "util/u_prim.h"
27
#include "compiler/nir/nir_schedule.h"
28
#include "compiler/nir/nir_builder.h"
31
vir_get_nsrc(struct qinst *inst)
33
switch (inst->qpu.type) {
34
case V3D_QPU_INSTR_TYPE_BRANCH:
36
case V3D_QPU_INSTR_TYPE_ALU:
37
if (inst->qpu.alu.add.op != V3D_QPU_A_NOP)
38
return v3d_qpu_add_op_num_src(inst->qpu.alu.add.op);
40
return v3d_qpu_mul_op_num_src(inst->qpu.alu.mul.op);
47
* Returns whether the instruction has any side effects that must be
51
vir_has_side_effects(struct v3d_compile *c, struct qinst *inst)
53
switch (inst->qpu.type) {
54
case V3D_QPU_INSTR_TYPE_BRANCH:
56
case V3D_QPU_INSTR_TYPE_ALU:
57
switch (inst->qpu.alu.add.op) {
58
case V3D_QPU_A_SETREVF:
59
case V3D_QPU_A_SETMSF:
60
case V3D_QPU_A_VPMSETUP:
61
case V3D_QPU_A_STVPMV:
62
case V3D_QPU_A_STVPMD:
63
case V3D_QPU_A_STVPMP:
71
switch (inst->qpu.alu.mul.op) {
72
case V3D_QPU_M_MULTOP:
79
if (inst->qpu.sig.ldtmu ||
80
inst->qpu.sig.ldvary ||
81
inst->qpu.sig.ldtlbu ||
82
inst->qpu.sig.ldtlb ||
83
inst->qpu.sig.wrtmuc ||
84
inst->qpu.sig.thrsw) {
88
/* ldunifa works like ldunif: it reads an element and advances the
89
* pointer, so each read has a side effect (we don't care for ldunif
90
* because we reconstruct the uniform stream buffer after compiling
91
* with the surviving uniforms), so allowing DCE to remove
92
* one would break follow-up loads. We could fix this by emiting a
93
* unifa for each ldunifa, but each unifa requires 3 delay slots
94
* before a ldunifa, so that would be quite expensive.
96
if (inst->qpu.sig.ldunifa || inst->qpu.sig.ldunifarf)
103
vir_is_raw_mov(struct qinst *inst)
105
if (inst->qpu.type != V3D_QPU_INSTR_TYPE_ALU ||
106
(inst->qpu.alu.mul.op != V3D_QPU_M_FMOV &&
107
inst->qpu.alu.mul.op != V3D_QPU_M_MOV)) {
111
if (inst->qpu.alu.add.output_pack != V3D_QPU_PACK_NONE ||
112
inst->qpu.alu.mul.output_pack != V3D_QPU_PACK_NONE) {
116
if (inst->qpu.alu.add.a_unpack != V3D_QPU_UNPACK_NONE ||
117
inst->qpu.alu.add.b_unpack != V3D_QPU_UNPACK_NONE ||
118
inst->qpu.alu.mul.a_unpack != V3D_QPU_UNPACK_NONE ||
119
inst->qpu.alu.mul.b_unpack != V3D_QPU_UNPACK_NONE) {
123
if (inst->qpu.flags.ac != V3D_QPU_COND_NONE ||
124
inst->qpu.flags.mc != V3D_QPU_COND_NONE)
131
vir_is_add(struct qinst *inst)
133
return (inst->qpu.type == V3D_QPU_INSTR_TYPE_ALU &&
134
inst->qpu.alu.add.op != V3D_QPU_A_NOP);
138
vir_is_mul(struct qinst *inst)
140
return (inst->qpu.type == V3D_QPU_INSTR_TYPE_ALU &&
141
inst->qpu.alu.mul.op != V3D_QPU_M_NOP);
145
vir_is_tex(const struct v3d_device_info *devinfo, struct qinst *inst)
147
if (inst->dst.file == QFILE_MAGIC)
148
return v3d_qpu_magic_waddr_is_tmu(devinfo, inst->dst.index);
150
if (inst->qpu.type == V3D_QPU_INSTR_TYPE_ALU &&
151
inst->qpu.alu.add.op == V3D_QPU_A_TMUWT) {
159
vir_writes_r3(const struct v3d_device_info *devinfo, struct qinst *inst)
161
for (int i = 0; i < vir_get_nsrc(inst); i++) {
162
switch (inst->src[i].file) {
170
if (devinfo->ver < 41 && (inst->qpu.sig.ldvary ||
171
inst->qpu.sig.ldtlb ||
172
inst->qpu.sig.ldtlbu ||
173
inst->qpu.sig.ldvpm)) {
181
vir_writes_r4(const struct v3d_device_info *devinfo, struct qinst *inst)
183
switch (inst->dst.file) {
185
switch (inst->dst.index) {
186
case V3D_QPU_WADDR_RECIP:
187
case V3D_QPU_WADDR_RSQRT:
188
case V3D_QPU_WADDR_EXP:
189
case V3D_QPU_WADDR_LOG:
190
case V3D_QPU_WADDR_SIN:
198
if (devinfo->ver < 41 && inst->qpu.sig.ldtmu)
205
vir_set_unpack(struct qinst *inst, int src,
206
enum v3d_qpu_input_unpack unpack)
208
assert(src == 0 || src == 1);
210
if (vir_is_add(inst)) {
212
inst->qpu.alu.add.a_unpack = unpack;
214
inst->qpu.alu.add.b_unpack = unpack;
216
assert(vir_is_mul(inst));
218
inst->qpu.alu.mul.a_unpack = unpack;
220
inst->qpu.alu.mul.b_unpack = unpack;
225
vir_set_pack(struct qinst *inst, enum v3d_qpu_output_pack pack)
227
if (vir_is_add(inst)) {
228
inst->qpu.alu.add.output_pack = pack;
230
assert(vir_is_mul(inst));
231
inst->qpu.alu.mul.output_pack = pack;
236
vir_set_cond(struct qinst *inst, enum v3d_qpu_cond cond)
238
if (vir_is_add(inst)) {
239
inst->qpu.flags.ac = cond;
241
assert(vir_is_mul(inst));
242
inst->qpu.flags.mc = cond;
247
vir_get_cond(struct qinst *inst)
249
assert(inst->qpu.type == V3D_QPU_INSTR_TYPE_ALU);
251
if (vir_is_add(inst))
252
return inst->qpu.flags.ac;
253
else if (vir_is_mul(inst))
254
return inst->qpu.flags.mc;
256
return V3D_QPU_COND_NONE;
260
vir_set_pf(struct v3d_compile *c, struct qinst *inst, enum v3d_qpu_pf pf)
263
if (vir_is_add(inst)) {
264
inst->qpu.flags.apf = pf;
266
assert(vir_is_mul(inst));
267
inst->qpu.flags.mpf = pf;
272
vir_set_uf(struct v3d_compile *c, struct qinst *inst, enum v3d_qpu_uf uf)
275
if (vir_is_add(inst)) {
276
inst->qpu.flags.auf = uf;
278
assert(vir_is_mul(inst));
279
inst->qpu.flags.muf = uf;
285
vir_channels_written(struct qinst *inst)
287
if (vir_is_mul(inst)) {
288
switch (inst->dst.pack) {
289
case QPU_PACK_MUL_NOP:
290
case QPU_PACK_MUL_8888:
292
case QPU_PACK_MUL_8A:
294
case QPU_PACK_MUL_8B:
296
case QPU_PACK_MUL_8C:
298
case QPU_PACK_MUL_8D:
302
switch (inst->dst.pack) {
304
case QPU_PACK_A_8888:
305
case QPU_PACK_A_8888_SAT:
306
case QPU_PACK_A_32_SAT:
309
case QPU_PACK_A_8A_SAT:
312
case QPU_PACK_A_8B_SAT:
315
case QPU_PACK_A_8C_SAT:
318
case QPU_PACK_A_8D_SAT:
321
case QPU_PACK_A_16A_SAT:
324
case QPU_PACK_A_16B_SAT:
328
unreachable("Bad pack field");
333
vir_get_temp(struct v3d_compile *c)
337
reg.file = QFILE_TEMP;
338
reg.index = c->num_temps++;
340
if (c->num_temps > c->defs_array_size) {
341
uint32_t old_size = c->defs_array_size;
342
c->defs_array_size = MAX2(old_size * 2, 16);
344
c->defs = reralloc(c, c->defs, struct qinst *,
346
memset(&c->defs[old_size], 0,
347
sizeof(c->defs[0]) * (c->defs_array_size - old_size));
349
c->spillable = reralloc(c, c->spillable,
351
BITSET_WORDS(c->defs_array_size));
352
for (int i = old_size; i < c->defs_array_size; i++)
353
BITSET_SET(c->spillable, i);
360
vir_add_inst(enum v3d_qpu_add_op op, struct qreg dst, struct qreg src0, struct qreg src1)
362
struct qinst *inst = calloc(1, sizeof(*inst));
364
inst->qpu = v3d_qpu_nop();
365
inst->qpu.alu.add.op = op;
378
vir_mul_inst(enum v3d_qpu_mul_op op, struct qreg dst, struct qreg src0, struct qreg src1)
380
struct qinst *inst = calloc(1, sizeof(*inst));
382
inst->qpu = v3d_qpu_nop();
383
inst->qpu.alu.mul.op = op;
396
vir_branch_inst(struct v3d_compile *c, enum v3d_qpu_branch_cond cond)
398
struct qinst *inst = calloc(1, sizeof(*inst));
400
inst->qpu = v3d_qpu_nop();
401
inst->qpu.type = V3D_QPU_INSTR_TYPE_BRANCH;
402
inst->qpu.branch.cond = cond;
403
inst->qpu.branch.msfign = V3D_QPU_MSFIGN_NONE;
404
inst->qpu.branch.bdi = V3D_QPU_BRANCH_DEST_REL;
405
inst->qpu.branch.ub = true;
406
inst->qpu.branch.bdu = V3D_QPU_BRANCH_DEST_REL;
408
inst->dst = vir_nop_reg();
409
inst->uniform = vir_get_uniform_index(c, QUNIFORM_CONSTANT, 0);
417
vir_emit(struct v3d_compile *c, struct qinst *inst)
421
switch (c->cursor.mode) {
423
list_add(&inst->link, c->cursor.link);
425
case vir_cursor_addtail:
426
list_addtail(&inst->link, c->cursor.link);
430
c->cursor = vir_after_inst(inst);
431
c->live_intervals_valid = false;
434
/* Updates inst to write to a new temporary, emits it, and notes the def. */
436
vir_emit_def(struct v3d_compile *c, struct qinst *inst)
438
assert(inst->dst.file == QFILE_NULL);
440
/* If we're emitting an instruction that's a def, it had better be
441
* writing a register.
443
if (inst->qpu.type == V3D_QPU_INSTR_TYPE_ALU) {
444
assert(inst->qpu.alu.add.op == V3D_QPU_A_NOP ||
445
v3d_qpu_add_op_has_dst(inst->qpu.alu.add.op));
446
assert(inst->qpu.alu.mul.op == V3D_QPU_M_NOP ||
447
v3d_qpu_mul_op_has_dst(inst->qpu.alu.mul.op));
450
inst->dst = vir_get_temp(c);
452
if (inst->dst.file == QFILE_TEMP)
453
c->defs[inst->dst.index] = inst;
461
vir_emit_nondef(struct v3d_compile *c, struct qinst *inst)
463
if (inst->dst.file == QFILE_TEMP)
464
c->defs[inst->dst.index] = NULL;
472
vir_new_block(struct v3d_compile *c)
474
struct qblock *block = rzalloc(c, struct qblock);
476
list_inithead(&block->instructions);
478
block->predecessors = _mesa_set_create(block,
480
_mesa_key_pointer_equal);
482
block->index = c->next_block_index++;
488
vir_set_emit_block(struct v3d_compile *c, struct qblock *block)
490
c->cur_block = block;
491
c->cursor = vir_after_block(block);
492
list_addtail(&block->link, &c->blocks);
496
vir_entry_block(struct v3d_compile *c)
498
return list_first_entry(&c->blocks, struct qblock, link);
502
vir_exit_block(struct v3d_compile *c)
504
return list_last_entry(&c->blocks, struct qblock, link);
508
vir_link_blocks(struct qblock *predecessor, struct qblock *successor)
510
_mesa_set_add(successor->predecessors, predecessor);
511
if (predecessor->successors[0]) {
512
assert(!predecessor->successors[1]);
513
predecessor->successors[1] = successor;
515
predecessor->successors[0] = successor;
519
const struct v3d_compiler *
520
v3d_compiler_init(const struct v3d_device_info *devinfo,
521
uint32_t max_inline_uniform_buffers)
523
struct v3d_compiler *compiler = rzalloc(NULL, struct v3d_compiler);
527
compiler->devinfo = devinfo;
528
compiler->max_inline_uniform_buffers = max_inline_uniform_buffers;
530
if (!vir_init_reg_sets(compiler)) {
531
ralloc_free(compiler);
539
v3d_compiler_free(const struct v3d_compiler *compiler)
541
ralloc_free((void *)compiler);
544
static struct v3d_compile *
545
vir_compile_init(const struct v3d_compiler *compiler,
548
void (*debug_output)(const char *msg,
549
void *debug_output_data),
550
void *debug_output_data,
551
int program_id, int variant_id,
552
uint32_t max_threads,
553
uint32_t min_threads_for_reg_alloc,
554
uint32_t max_tmu_spills,
555
bool disable_general_tmu_sched,
556
bool disable_loop_unrolling,
557
bool disable_constant_ubo_load_sorting,
558
bool disable_tmu_pipelining,
559
bool fallback_scheduler)
561
struct v3d_compile *c = rzalloc(NULL, struct v3d_compile);
563
c->compiler = compiler;
564
c->devinfo = compiler->devinfo;
566
c->program_id = program_id;
567
c->variant_id = variant_id;
568
c->threads = max_threads;
569
c->debug_output = debug_output;
570
c->debug_output_data = debug_output_data;
571
c->compilation_result = V3D_COMPILATION_SUCCEEDED;
572
c->min_threads_for_reg_alloc = min_threads_for_reg_alloc;
573
c->max_tmu_spills = max_tmu_spills;
574
c->fallback_scheduler = fallback_scheduler;
575
c->disable_general_tmu_sched = disable_general_tmu_sched;
576
c->disable_tmu_pipelining = disable_tmu_pipelining;
577
c->disable_constant_ubo_load_sorting = disable_constant_ubo_load_sorting;
578
c->disable_loop_unrolling = V3D_DEBUG & V3D_DEBUG_NO_LOOP_UNROLL
579
? true : disable_loop_unrolling;
581
s = nir_shader_clone(c, s);
584
list_inithead(&c->blocks);
585
vir_set_emit_block(c, vir_new_block(c));
587
c->output_position_index = -1;
588
c->output_sample_mask_index = -1;
590
c->def_ht = _mesa_hash_table_create(c, _mesa_hash_pointer,
591
_mesa_key_pointer_equal);
593
c->tmu.outstanding_regs = _mesa_pointer_set_create(c);
600
type_size_vec4(const struct glsl_type *type, bool bindless)
602
return glsl_count_attribute_slots(type, false);
606
v3d_lower_nir(struct v3d_compile *c)
608
struct nir_lower_tex_options tex_options = {
610
.lower_tg4_broadcom_swizzle = true,
612
.lower_rect = false, /* XXX: Use this on V3D 3.x */
614
/* Apply swizzles to all samplers. */
615
.swizzle_result = ~0,
618
/* Lower the format swizzle and (for 32-bit returns)
619
* ARB_texture_swizzle-style swizzle.
621
assert(c->key->num_tex_used <= ARRAY_SIZE(c->key->tex));
622
for (int i = 0; i < c->key->num_tex_used; i++) {
623
for (int j = 0; j < 4; j++)
624
tex_options.swizzles[i][j] = c->key->tex[i].swizzle[j];
627
assert(c->key->num_samplers_used <= ARRAY_SIZE(c->key->sampler));
628
for (int i = 0; i < c->key->num_samplers_used; i++) {
629
if (c->key->sampler[i].return_size == 16) {
630
tex_options.lower_tex_packing[i] =
631
nir_lower_tex_packing_16;
635
/* CS textures may not have return_size reflecting the shadow state. */
636
nir_foreach_uniform_variable(var, c->s) {
637
const struct glsl_type *type = glsl_without_array(var->type);
638
unsigned array_len = MAX2(glsl_get_length(var->type), 1);
640
if (!glsl_type_is_sampler(type) ||
641
!glsl_sampler_type_is_shadow(type))
644
for (int i = 0; i < array_len; i++) {
645
tex_options.lower_tex_packing[var->data.binding + i] =
646
nir_lower_tex_packing_16;
650
NIR_PASS_V(c->s, nir_lower_tex, &tex_options);
651
NIR_PASS_V(c->s, nir_lower_system_values);
652
NIR_PASS_V(c->s, nir_lower_compute_system_values, NULL);
654
NIR_PASS_V(c->s, nir_lower_vars_to_scratch,
655
nir_var_function_temp,
657
glsl_get_natural_size_align_bytes);
658
NIR_PASS_V(c->s, v3d_nir_lower_scratch);
662
v3d_set_prog_data_uniforms(struct v3d_compile *c,
663
struct v3d_prog_data *prog_data)
665
int count = c->num_uniforms;
666
struct v3d_uniform_list *ulist = &prog_data->uniforms;
668
ulist->count = count;
669
ulist->data = ralloc_array(prog_data, uint32_t, count);
670
memcpy(ulist->data, c->uniform_data,
671
count * sizeof(*ulist->data));
672
ulist->contents = ralloc_array(prog_data, enum quniform_contents, count);
673
memcpy(ulist->contents, c->uniform_contents,
674
count * sizeof(*ulist->contents));
678
v3d_vs_set_prog_data(struct v3d_compile *c,
679
struct v3d_vs_prog_data *prog_data)
681
/* The vertex data gets format converted by the VPM so that
682
* each attribute channel takes up a VPM column. Precompute
683
* the sizes for the shader record.
685
for (int i = 0; i < ARRAY_SIZE(prog_data->vattr_sizes); i++) {
686
prog_data->vattr_sizes[i] = c->vattr_sizes[i];
687
prog_data->vpm_input_size += c->vattr_sizes[i];
690
memset(prog_data->driver_location_map, -1,
691
sizeof(prog_data->driver_location_map));
693
nir_foreach_shader_in_variable(var, c->s) {
694
prog_data->driver_location_map[var->data.location] =
695
var->data.driver_location;
698
prog_data->uses_vid = BITSET_TEST(c->s->info.system_values_read,
699
SYSTEM_VALUE_VERTEX_ID) ||
700
BITSET_TEST(c->s->info.system_values_read,
701
SYSTEM_VALUE_VERTEX_ID_ZERO_BASE);
703
prog_data->uses_biid = BITSET_TEST(c->s->info.system_values_read,
704
SYSTEM_VALUE_BASE_INSTANCE);
706
prog_data->uses_iid = BITSET_TEST(c->s->info.system_values_read,
707
SYSTEM_VALUE_INSTANCE_ID) ||
708
BITSET_TEST(c->s->info.system_values_read,
709
SYSTEM_VALUE_INSTANCE_INDEX);
711
if (prog_data->uses_vid)
712
prog_data->vpm_input_size++;
713
if (prog_data->uses_biid)
714
prog_data->vpm_input_size++;
715
if (prog_data->uses_iid)
716
prog_data->vpm_input_size++;
718
/* Input/output segment size are in sectors (8 rows of 32 bits per
721
prog_data->vpm_input_size = align(prog_data->vpm_input_size, 8) / 8;
722
prog_data->vpm_output_size = align(c->vpm_output_size, 8) / 8;
724
/* Set us up for shared input/output segments. This is apparently
725
* necessary for our VCM setup to avoid varying corruption.
727
prog_data->separate_segments = false;
728
prog_data->vpm_output_size = MAX2(prog_data->vpm_output_size,
729
prog_data->vpm_input_size);
730
prog_data->vpm_input_size = 0;
732
/* Compute VCM cache size. We set up our program to take up less than
733
* half of the VPM, so that any set of bin and render programs won't
734
* run out of space. We need space for at least one input segment,
735
* and then allocate the rest to output segments (one for the current
736
* program, the rest to VCM). The valid range of the VCM cache size
737
* field is 1-4 16-vertex batches, but GFXH-1744 limits us to 2-4
740
assert(c->devinfo->vpm_size);
741
int sector_size = V3D_CHANNELS * sizeof(uint32_t) * 8;
742
int vpm_size_in_sectors = c->devinfo->vpm_size / sector_size;
743
int half_vpm = vpm_size_in_sectors / 2;
744
int vpm_output_sectors = half_vpm - prog_data->vpm_input_size;
745
int vpm_output_batches = vpm_output_sectors / prog_data->vpm_output_size;
746
assert(vpm_output_batches >= 2);
747
prog_data->vcm_cache_size = CLAMP(vpm_output_batches - 1, 2, 4);
751
v3d_gs_set_prog_data(struct v3d_compile *c,
752
struct v3d_gs_prog_data *prog_data)
754
prog_data->num_inputs = c->num_inputs;
755
memcpy(prog_data->input_slots, c->input_slots,
756
c->num_inputs * sizeof(*c->input_slots));
758
/* gl_PrimitiveIdIn is written by the GBG into the first word of the
759
* VPM output header automatically and the shader will overwrite
760
* it after reading it if necessary, so it doesn't add to the VPM
763
prog_data->uses_pid = BITSET_TEST(c->s->info.system_values_read,
764
SYSTEM_VALUE_PRIMITIVE_ID);
766
/* Output segment size is in sectors (8 rows of 32 bits per channel) */
767
prog_data->vpm_output_size = align(c->vpm_output_size, 8) / 8;
769
/* Compute SIMD dispatch width and update VPM output size accordingly
770
* to ensure we can fit our program in memory. Available widths are
773
* Notice that at draw time we will have to consider VPM memory
774
* requirements from other stages and choose a smaller dispatch
775
* width if needed to fit the program in VPM memory.
777
prog_data->simd_width = 16;
778
while ((prog_data->simd_width > 1 && prog_data->vpm_output_size > 16) ||
779
prog_data->simd_width == 2) {
780
prog_data->simd_width >>= 1;
781
prog_data->vpm_output_size =
782
align(prog_data->vpm_output_size, 2) / 2;
784
assert(prog_data->vpm_output_size <= 16);
785
assert(prog_data->simd_width != 2);
787
prog_data->out_prim_type = c->s->info.gs.output_primitive;
788
prog_data->num_invocations = c->s->info.gs.invocations;
790
prog_data->writes_psiz =
791
c->s->info.outputs_written & (1 << VARYING_SLOT_PSIZ);
795
v3d_set_fs_prog_data_inputs(struct v3d_compile *c,
796
struct v3d_fs_prog_data *prog_data)
798
prog_data->num_inputs = c->num_inputs;
799
memcpy(prog_data->input_slots, c->input_slots,
800
c->num_inputs * sizeof(*c->input_slots));
802
STATIC_ASSERT(ARRAY_SIZE(prog_data->flat_shade_flags) >
803
(V3D_MAX_FS_INPUTS - 1) / 24);
804
for (int i = 0; i < V3D_MAX_FS_INPUTS; i++) {
805
if (BITSET_TEST(c->flat_shade_flags, i))
806
prog_data->flat_shade_flags[i / 24] |= 1 << (i % 24);
808
if (BITSET_TEST(c->noperspective_flags, i))
809
prog_data->noperspective_flags[i / 24] |= 1 << (i % 24);
811
if (BITSET_TEST(c->centroid_flags, i))
812
prog_data->centroid_flags[i / 24] |= 1 << (i % 24);
817
v3d_fs_set_prog_data(struct v3d_compile *c,
818
struct v3d_fs_prog_data *prog_data)
820
v3d_set_fs_prog_data_inputs(c, prog_data);
821
prog_data->writes_z = c->writes_z;
822
prog_data->writes_z_from_fep = c->writes_z_from_fep;
823
prog_data->disable_ez = !c->s->info.fs.early_fragment_tests;
824
prog_data->uses_center_w = c->uses_center_w;
825
prog_data->uses_implicit_point_line_varyings =
826
c->uses_implicit_point_line_varyings;
827
prog_data->lock_scoreboard_on_first_thrsw =
828
c->lock_scoreboard_on_first_thrsw;
829
prog_data->force_per_sample_msaa = c->force_per_sample_msaa;
830
prog_data->uses_pid = c->fs_uses_primitive_id;
834
v3d_cs_set_prog_data(struct v3d_compile *c,
835
struct v3d_compute_prog_data *prog_data)
837
prog_data->shared_size = c->s->info.shared_size;
839
prog_data->local_size[0] = c->s->info.workgroup_size[0];
840
prog_data->local_size[1] = c->s->info.workgroup_size[1];
841
prog_data->local_size[2] = c->s->info.workgroup_size[2];
843
prog_data->has_subgroups = c->has_subgroups;
847
v3d_set_prog_data(struct v3d_compile *c,
848
struct v3d_prog_data *prog_data)
850
prog_data->threads = c->threads;
851
prog_data->single_seg = !c->last_thrsw;
852
prog_data->spill_size = c->spill_size;
853
prog_data->tmu_dirty_rcl = c->tmu_dirty_rcl;
854
prog_data->has_control_barrier = c->s->info.uses_control_barrier;
856
v3d_set_prog_data_uniforms(c, prog_data);
858
switch (c->s->info.stage) {
859
case MESA_SHADER_VERTEX:
860
v3d_vs_set_prog_data(c, (struct v3d_vs_prog_data *)prog_data);
862
case MESA_SHADER_GEOMETRY:
863
v3d_gs_set_prog_data(c, (struct v3d_gs_prog_data *)prog_data);
865
case MESA_SHADER_FRAGMENT:
866
v3d_fs_set_prog_data(c, (struct v3d_fs_prog_data *)prog_data);
868
case MESA_SHADER_COMPUTE:
869
v3d_cs_set_prog_data(c, (struct v3d_compute_prog_data *)prog_data);
872
unreachable("unsupported shader stage");
877
v3d_return_qpu_insts(struct v3d_compile *c, uint32_t *final_assembly_size)
879
*final_assembly_size = c->qpu_inst_count * sizeof(uint64_t);
881
uint64_t *qpu_insts = malloc(*final_assembly_size);
885
memcpy(qpu_insts, c->qpu_insts, *final_assembly_size);
887
vir_compile_destroy(c);
893
v3d_nir_lower_vs_early(struct v3d_compile *c)
895
/* Split our I/O vars and dead code eliminate the unused
898
NIR_PASS_V(c->s, nir_lower_io_to_scalar_early,
899
nir_var_shader_in | nir_var_shader_out);
900
uint64_t used_outputs[4] = {0};
901
for (int i = 0; i < c->vs_key->num_used_outputs; i++) {
902
int slot = v3d_slot_get_slot(c->vs_key->used_outputs[i]);
903
int comp = v3d_slot_get_component(c->vs_key->used_outputs[i]);
904
used_outputs[comp] |= 1ull << slot;
906
NIR_PASS_V(c->s, nir_remove_unused_io_vars,
907
nir_var_shader_out, used_outputs, NULL); /* demotes to globals */
908
NIR_PASS_V(c->s, nir_lower_global_vars_to_local);
909
v3d_optimize_nir(c, c->s);
910
NIR_PASS_V(c->s, nir_remove_dead_variables, nir_var_shader_in, NULL);
912
/* This must go before nir_lower_io */
913
if (c->vs_key->per_vertex_point_size)
914
NIR_PASS_V(c->s, nir_lower_point_size, 1.0f, 0.0f);
916
NIR_PASS_V(c->s, nir_lower_io, nir_var_shader_in | nir_var_shader_out,
918
(nir_lower_io_options)0);
919
/* clean up nir_lower_io's deref_var remains and do a constant folding pass
920
* on the code it generated.
922
NIR_PASS_V(c->s, nir_opt_dce);
923
NIR_PASS_V(c->s, nir_opt_constant_folding);
927
v3d_nir_lower_gs_early(struct v3d_compile *c)
929
/* Split our I/O vars and dead code eliminate the unused
932
NIR_PASS_V(c->s, nir_lower_io_to_scalar_early,
933
nir_var_shader_in | nir_var_shader_out);
934
uint64_t used_outputs[4] = {0};
935
for (int i = 0; i < c->gs_key->num_used_outputs; i++) {
936
int slot = v3d_slot_get_slot(c->gs_key->used_outputs[i]);
937
int comp = v3d_slot_get_component(c->gs_key->used_outputs[i]);
938
used_outputs[comp] |= 1ull << slot;
940
NIR_PASS_V(c->s, nir_remove_unused_io_vars,
941
nir_var_shader_out, used_outputs, NULL); /* demotes to globals */
942
NIR_PASS_V(c->s, nir_lower_global_vars_to_local);
943
v3d_optimize_nir(c, c->s);
944
NIR_PASS_V(c->s, nir_remove_dead_variables, nir_var_shader_in, NULL);
946
/* This must go before nir_lower_io */
947
if (c->gs_key->per_vertex_point_size)
948
NIR_PASS_V(c->s, nir_lower_point_size, 1.0f, 0.0f);
950
NIR_PASS_V(c->s, nir_lower_io, nir_var_shader_in | nir_var_shader_out,
952
(nir_lower_io_options)0);
953
/* clean up nir_lower_io's deref_var remains and do a constant folding pass
954
* on the code it generated.
956
NIR_PASS_V(c->s, nir_opt_dce);
957
NIR_PASS_V(c->s, nir_opt_constant_folding);
961
v3d_fixup_fs_output_types(struct v3d_compile *c)
963
nir_foreach_shader_out_variable(var, c->s) {
966
switch (var->data.location) {
967
case FRAG_RESULT_COLOR:
970
case FRAG_RESULT_DATA0:
971
case FRAG_RESULT_DATA1:
972
case FRAG_RESULT_DATA2:
973
case FRAG_RESULT_DATA3:
974
mask = 1 << (var->data.location - FRAG_RESULT_DATA0);
978
if (c->fs_key->int_color_rb & mask) {
980
glsl_vector_type(GLSL_TYPE_INT,
981
glsl_get_components(var->type));
982
} else if (c->fs_key->uint_color_rb & mask) {
984
glsl_vector_type(GLSL_TYPE_UINT,
985
glsl_get_components(var->type));
991
v3d_nir_lower_fs_early(struct v3d_compile *c)
993
if (c->fs_key->int_color_rb || c->fs_key->uint_color_rb)
994
v3d_fixup_fs_output_types(c);
996
NIR_PASS_V(c->s, v3d_nir_lower_logic_ops, c);
998
if (c->fs_key->line_smoothing) {
999
v3d_nir_lower_line_smooth(c->s);
1000
NIR_PASS_V(c->s, nir_lower_global_vars_to_local);
1001
/* The lowering pass can introduce new sysval reads */
1002
nir_shader_gather_info(c->s, nir_shader_get_entrypoint(c->s));
1007
v3d_nir_lower_gs_late(struct v3d_compile *c)
1009
if (c->key->ucp_enables) {
1010
NIR_PASS_V(c->s, nir_lower_clip_gs, c->key->ucp_enables,
1014
/* Note: GS output scalarizing must happen after nir_lower_clip_gs. */
1015
NIR_PASS_V(c->s, nir_lower_io_to_scalar, nir_var_shader_out);
1019
v3d_nir_lower_vs_late(struct v3d_compile *c)
1021
if (c->key->ucp_enables) {
1022
NIR_PASS_V(c->s, nir_lower_clip_vs, c->key->ucp_enables,
1023
false, false, NULL);
1024
NIR_PASS_V(c->s, nir_lower_io_to_scalar,
1025
nir_var_shader_out);
1028
/* Note: VS output scalarizing must happen after nir_lower_clip_vs. */
1029
NIR_PASS_V(c->s, nir_lower_io_to_scalar, nir_var_shader_out);
1033
v3d_nir_lower_fs_late(struct v3d_compile *c)
1035
/* In OpenGL the fragment shader can't read gl_ClipDistance[], but
1036
* Vulkan allows it, in which case the SPIR-V compiler will declare
1037
* VARING_SLOT_CLIP_DIST0 as compact array variable. Pass true as
1038
* the last parameter to always operate with a compact array in both
1039
* OpenGL and Vulkan so we do't have to care about the API we
1042
if (c->key->ucp_enables)
1043
NIR_PASS_V(c->s, nir_lower_clip_fs, c->key->ucp_enables, true);
1045
NIR_PASS_V(c->s, nir_lower_io_to_scalar, nir_var_shader_in);
1049
vir_get_max_temps(struct v3d_compile *c)
1052
vir_for_each_inst_inorder(inst, c)
1055
uint32_t *pressure = rzalloc_array(NULL, uint32_t, max_ip);
1057
for (int t = 0; t < c->num_temps; t++) {
1058
for (int i = c->temp_start[t]; (i < c->temp_end[t] &&
1066
uint32_t max_temps = 0;
1067
for (int i = 0; i < max_ip; i++)
1068
max_temps = MAX2(max_temps, pressure[i]);
1070
ralloc_free(pressure);
1075
enum v3d_dependency_class {
1076
V3D_DEPENDENCY_CLASS_GS_VPM_OUTPUT_0
1080
v3d_intrinsic_dependency_cb(nir_intrinsic_instr *intr,
1081
nir_schedule_dependency *dep,
1084
struct v3d_compile *c = user_data;
1086
switch (intr->intrinsic) {
1087
case nir_intrinsic_store_output:
1088
/* Writing to location 0 overwrites the value passed in for
1089
* gl_PrimitiveID on geometry shaders
1091
if (c->s->info.stage != MESA_SHADER_GEOMETRY ||
1092
nir_intrinsic_base(intr) != 0)
1095
nir_const_value *const_value =
1096
nir_src_as_const_value(intr->src[1]);
1098
if (const_value == NULL)
1102
nir_const_value_as_uint(*const_value,
1103
nir_src_bit_size(intr->src[1]));
1107
dep->klass = V3D_DEPENDENCY_CLASS_GS_VPM_OUTPUT_0;
1108
dep->type = NIR_SCHEDULE_WRITE_DEPENDENCY;
1111
case nir_intrinsic_load_primitive_id:
1112
if (c->s->info.stage != MESA_SHADER_GEOMETRY)
1115
dep->klass = V3D_DEPENDENCY_CLASS_GS_VPM_OUTPUT_0;
1116
dep->type = NIR_SCHEDULE_READ_DEPENDENCY;
1127
v3d_instr_delay_cb(nir_instr *instr, void *data)
1129
struct v3d_compile *c = (struct v3d_compile *) data;
1131
switch (instr->type) {
1132
case nir_instr_type_ssa_undef:
1133
case nir_instr_type_load_const:
1134
case nir_instr_type_alu:
1135
case nir_instr_type_deref:
1136
case nir_instr_type_jump:
1137
case nir_instr_type_parallel_copy:
1138
case nir_instr_type_call:
1139
case nir_instr_type_phi:
1142
case nir_instr_type_intrinsic: {
1143
if (!c->disable_general_tmu_sched) {
1144
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
1145
switch (intr->intrinsic) {
1146
case nir_intrinsic_load_ssbo:
1147
case nir_intrinsic_load_scratch:
1148
case nir_intrinsic_load_shared:
1149
case nir_intrinsic_image_load:
1151
case nir_intrinsic_load_ubo:
1152
if (nir_src_is_divergent(intr->src[1]))
1164
case nir_instr_type_tex:
1172
should_split_wrmask(const nir_instr *instr, const void *data)
1174
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
1175
switch (intr->intrinsic) {
1176
case nir_intrinsic_store_ssbo:
1177
case nir_intrinsic_store_shared:
1178
case nir_intrinsic_store_global:
1179
case nir_intrinsic_store_scratch:
1186
static nir_intrinsic_instr *
1187
nir_instr_as_constant_ubo_load(nir_instr *inst)
1189
if (inst->type != nir_instr_type_intrinsic)
1192
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(inst);
1193
if (intr->intrinsic != nir_intrinsic_load_ubo)
1196
assert(nir_src_is_const(intr->src[0]));
1197
if (!nir_src_is_const(intr->src[1]))
1204
v3d_nir_sort_constant_ubo_load(nir_block *block, nir_intrinsic_instr *ref)
1206
bool progress = false;
1208
nir_instr *ref_inst = &ref->instr;
1209
uint32_t ref_offset = nir_src_as_uint(ref->src[1]);
1210
uint32_t ref_index = nir_src_as_uint(ref->src[0]);
1212
/* Go through all instructions after ref searching for constant UBO
1213
* loads for the same UBO index.
1215
bool seq_break = false;
1216
nir_instr *inst = &ref->instr;
1217
nir_instr *next_inst = NULL;
1219
inst = next_inst ? next_inst : nir_instr_next(inst);
1225
if (inst->type != nir_instr_type_intrinsic)
1228
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(inst);
1229
if (intr->intrinsic != nir_intrinsic_load_ubo)
1232
/* We only produce unifa sequences for non-divergent loads */
1233
if (nir_src_is_divergent(intr->src[1]))
1236
/* If there are any UBO loads that are not constant or that
1237
* use a different UBO index in between the reference load and
1238
* any other constant load for the same index, they would break
1239
* the unifa sequence. We will flag that so we can then move
1240
* all constant UBO loads for the reference index before these
1241
* and not just the ones that are not ordered to avoid breaking
1242
* the sequence and reduce unifa writes.
1244
if (!nir_src_is_const(intr->src[1])) {
1248
uint32_t offset = nir_src_as_uint(intr->src[1]);
1250
assert(nir_src_is_const(intr->src[0]));
1251
uint32_t index = nir_src_as_uint(intr->src[0]);
1252
if (index != ref_index) {
1257
/* Only move loads with an offset that is close enough to the
1258
* reference offset, since otherwise we would not be able to
1259
* skip the unifa write for them. See ntq_emit_load_ubo_unifa.
1261
if (abs((int)(ref_offset - offset)) > MAX_UNIFA_SKIP_DISTANCE)
1264
/* We will move this load if its offset is smaller than ref's
1265
* (in which case we will move it before ref) or if the offset
1266
* is larger than ref's but there are sequence breakers in
1267
* in between (in which case we will move it after ref and
1268
* before the sequence breakers).
1270
if (!seq_break && offset >= ref_offset)
1273
/* Find where exactly we want to move this load:
1275
* If we are moving it before ref, we want to check any other
1276
* UBO loads we placed before ref and make sure we insert this
1277
* one properly ordered with them. Likewise, if we are moving
1280
nir_instr *pos = ref_inst;
1281
nir_instr *tmp = pos;
1283
if (offset < ref_offset)
1284
tmp = nir_instr_prev(tmp);
1286
tmp = nir_instr_next(tmp);
1288
if (!tmp || tmp == inst)
1291
/* Ignore non-unifa UBO loads */
1292
if (tmp->type != nir_instr_type_intrinsic)
1295
nir_intrinsic_instr *tmp_intr =
1296
nir_instr_as_intrinsic(tmp);
1297
if (tmp_intr->intrinsic != nir_intrinsic_load_ubo)
1300
if (nir_src_is_divergent(tmp_intr->src[1]))
1303
/* Stop if we find a unifa UBO load that breaks the
1306
if (!nir_src_is_const(tmp_intr->src[1]))
1309
if (nir_src_as_uint(tmp_intr->src[0]) != index)
1312
uint32_t tmp_offset = nir_src_as_uint(tmp_intr->src[1]);
1313
if (offset < ref_offset) {
1314
if (tmp_offset < offset ||
1315
tmp_offset >= ref_offset) {
1321
if (tmp_offset > offset ||
1322
tmp_offset <= ref_offset) {
1330
/* We can't move the UBO load before the instruction that
1331
* defines its constant offset. If that instruction is placed
1332
* in between the new location (pos) and the current location
1333
* of this load, we will have to move that instruction too.
1335
* We don't care about the UBO index definition because that
1336
* is optimized to be reused by all UBO loads for the same
1337
* index and therefore is certain to be defined before the
1338
* first UBO load that uses it.
1340
nir_instr *offset_inst = NULL;
1342
while ((tmp = nir_instr_prev(tmp)) != NULL) {
1344
/* We reached the target location without
1345
* finding the instruction that defines the
1346
* offset, so that instruction must be before
1347
* the new position and we don't have to fix it.
1351
if (intr->src[1].ssa->parent_instr == tmp) {
1358
exec_node_remove(&offset_inst->node);
1359
exec_node_insert_node_before(&pos->node,
1360
&offset_inst->node);
1363
/* Since we are moving the instruction before its current
1364
* location, grab its successor before the move so that
1365
* we can continue the next iteration of the main loop from
1368
next_inst = nir_instr_next(inst);
1370
/* Move this load to the selected location */
1371
exec_node_remove(&inst->node);
1372
if (offset < ref_offset)
1373
exec_node_insert_node_before(&pos->node, &inst->node);
1375
exec_node_insert_after(&pos->node, &inst->node);
1384
v3d_nir_sort_constant_ubo_loads_block(struct v3d_compile *c,
1387
bool progress = false;
1388
bool local_progress;
1390
local_progress = false;
1391
nir_foreach_instr_safe(inst, block) {
1392
nir_intrinsic_instr *intr =
1393
nir_instr_as_constant_ubo_load(inst);
1396
v3d_nir_sort_constant_ubo_load(block, intr);
1399
progress |= local_progress;
1400
} while (local_progress);
1406
* Sorts constant UBO loads in each block by offset to maximize chances of
1407
* skipping unifa writes when converting to VIR. This can increase register
1411
v3d_nir_sort_constant_ubo_loads(nir_shader *s, struct v3d_compile *c)
1413
nir_foreach_function(function, s) {
1414
if (function->impl) {
1415
nir_foreach_block(block, function->impl) {
1416
c->sorted_any_ubo_loads |=
1417
v3d_nir_sort_constant_ubo_loads_block(c, block);
1419
nir_metadata_preserve(function->impl,
1420
nir_metadata_block_index |
1421
nir_metadata_dominance);
1424
return c->sorted_any_ubo_loads;
1428
lower_load_num_subgroups(struct v3d_compile *c,
1430
nir_intrinsic_instr *intr)
1432
assert(c->s->info.stage == MESA_SHADER_COMPUTE);
1433
assert(intr->intrinsic == nir_intrinsic_load_num_subgroups);
1435
b->cursor = nir_after_instr(&intr->instr);
1436
uint32_t num_subgroups =
1437
DIV_ROUND_UP(c->s->info.workgroup_size[0] *
1438
c->s->info.workgroup_size[1] *
1439
c->s->info.workgroup_size[2], V3D_CHANNELS);
1440
nir_ssa_def *result = nir_imm_int(b, num_subgroups);
1441
nir_ssa_def_rewrite_uses(&intr->dest.ssa, result);
1442
nir_instr_remove(&intr->instr);
1446
lower_subgroup_intrinsics(struct v3d_compile *c,
1447
nir_block *block, nir_builder *b)
1449
bool progress = false;
1450
nir_foreach_instr_safe(inst, block) {
1451
if (inst->type != nir_instr_type_intrinsic)
1454
nir_intrinsic_instr *intr =
1455
nir_instr_as_intrinsic(inst);
1459
switch (intr->intrinsic) {
1460
case nir_intrinsic_load_num_subgroups:
1461
lower_load_num_subgroups(c, b, intr);
1464
case nir_intrinsic_load_subgroup_id:
1465
case nir_intrinsic_load_subgroup_size:
1466
case nir_intrinsic_load_subgroup_invocation:
1467
case nir_intrinsic_elect:
1468
c->has_subgroups = true;
1479
v3d_nir_lower_subgroup_intrinsics(nir_shader *s, struct v3d_compile *c)
1481
bool progress = false;
1482
nir_foreach_function(function, s) {
1483
if (function->impl) {
1485
nir_builder_init(&b, function->impl);
1487
nir_foreach_block(block, function->impl)
1488
progress |= lower_subgroup_intrinsics(c, block, &b);
1490
nir_metadata_preserve(function->impl,
1491
nir_metadata_block_index |
1492
nir_metadata_dominance);
1499
v3d_attempt_compile(struct v3d_compile *c)
1501
switch (c->s->info.stage) {
1502
case MESA_SHADER_VERTEX:
1503
c->vs_key = (struct v3d_vs_key *) c->key;
1505
case MESA_SHADER_GEOMETRY:
1506
c->gs_key = (struct v3d_gs_key *) c->key;
1508
case MESA_SHADER_FRAGMENT:
1509
c->fs_key = (struct v3d_fs_key *) c->key;
1511
case MESA_SHADER_COMPUTE:
1514
unreachable("unsupported shader stage");
1517
switch (c->s->info.stage) {
1518
case MESA_SHADER_VERTEX:
1519
v3d_nir_lower_vs_early(c);
1521
case MESA_SHADER_GEOMETRY:
1522
v3d_nir_lower_gs_early(c);
1524
case MESA_SHADER_FRAGMENT:
1525
v3d_nir_lower_fs_early(c);
1533
switch (c->s->info.stage) {
1534
case MESA_SHADER_VERTEX:
1535
v3d_nir_lower_vs_late(c);
1537
case MESA_SHADER_GEOMETRY:
1538
v3d_nir_lower_gs_late(c);
1540
case MESA_SHADER_FRAGMENT:
1541
v3d_nir_lower_fs_late(c);
1547
NIR_PASS_V(c->s, v3d_nir_lower_io, c);
1548
NIR_PASS_V(c->s, v3d_nir_lower_txf_ms, c);
1549
NIR_PASS_V(c->s, v3d_nir_lower_image_load_store);
1550
nir_lower_idiv_options idiv_options = {
1551
.imprecise_32bit_lowering = true,
1554
NIR_PASS_V(c->s, nir_lower_idiv, &idiv_options);
1556
if (c->key->robust_buffer_access) {
1557
/* v3d_nir_lower_robust_buffer_access assumes constant buffer
1558
* indices on ubo/ssbo intrinsics so run copy propagation and
1559
* constant folding passes before we run the lowering to warrant
1560
* this. We also want to run the lowering before v3d_optimize to
1561
* clean-up redundant get_buffer_size calls produced in the pass.
1563
NIR_PASS_V(c->s, nir_copy_prop);
1564
NIR_PASS_V(c->s, nir_opt_constant_folding);
1565
NIR_PASS_V(c->s, v3d_nir_lower_robust_buffer_access, c);
1568
NIR_PASS_V(c->s, nir_lower_wrmasks, should_split_wrmask, c->s);
1570
NIR_PASS_V(c->s, v3d_nir_lower_load_store_bitsize, c);
1572
NIR_PASS_V(c->s, v3d_nir_lower_subgroup_intrinsics, c);
1574
v3d_optimize_nir(c, c->s);
1576
/* Do late algebraic optimization to turn add(a, neg(b)) back into
1577
* subs, then the mandatory cleanup after algebraic. Note that it may
1578
* produce fnegs, and if so then we need to keep running to squash
1581
bool more_late_algebraic = true;
1582
while (more_late_algebraic) {
1583
more_late_algebraic = false;
1584
NIR_PASS(more_late_algebraic, c->s, nir_opt_algebraic_late);
1585
NIR_PASS_V(c->s, nir_opt_constant_folding);
1586
NIR_PASS_V(c->s, nir_copy_prop);
1587
NIR_PASS_V(c->s, nir_opt_dce);
1588
NIR_PASS_V(c->s, nir_opt_cse);
1591
NIR_PASS_V(c->s, nir_lower_bool_to_int32);
1592
nir_convert_to_lcssa(c->s, true, true);
1593
NIR_PASS_V(c->s, nir_divergence_analysis);
1594
NIR_PASS_V(c->s, nir_convert_from_ssa, true);
1596
struct nir_schedule_options schedule_options = {
1597
/* Schedule for about half our register space, to enable more
1598
* shaders to hit 4 threads.
1600
.threshold = c->threads == 4 ? 24 : 48,
1602
/* Vertex shaders share the same memory for inputs and outputs,
1603
* fragement and geometry shaders do not.
1605
.stages_with_shared_io_memory =
1606
(((1 << MESA_ALL_SHADER_STAGES) - 1) &
1607
~((1 << MESA_SHADER_FRAGMENT) |
1608
(1 << MESA_SHADER_GEOMETRY))),
1610
.fallback = c->fallback_scheduler,
1612
.intrinsic_cb = v3d_intrinsic_dependency_cb,
1613
.intrinsic_cb_data = c,
1615
.instr_delay_cb = v3d_instr_delay_cb,
1616
.instr_delay_cb_data = c,
1618
NIR_PASS_V(c->s, nir_schedule, &schedule_options);
1620
if (!c->disable_constant_ubo_load_sorting)
1621
NIR_PASS_V(c->s, v3d_nir_sort_constant_ubo_loads, c);
1623
NIR_PASS_V(c->s, nir_opt_move, nir_move_load_uniform |
1624
nir_move_const_undef);
1630
v3d_prog_data_size(gl_shader_stage stage)
1632
static const int prog_data_size[] = {
1633
[MESA_SHADER_VERTEX] = sizeof(struct v3d_vs_prog_data),
1634
[MESA_SHADER_GEOMETRY] = sizeof(struct v3d_gs_prog_data),
1635
[MESA_SHADER_FRAGMENT] = sizeof(struct v3d_fs_prog_data),
1636
[MESA_SHADER_COMPUTE] = sizeof(struct v3d_compute_prog_data),
1639
assert(stage >= 0 &&
1640
stage < ARRAY_SIZE(prog_data_size) &&
1641
prog_data_size[stage]);
1643
return prog_data_size[stage];
1646
int v3d_shaderdb_dump(struct v3d_compile *c,
1647
char **shaderdb_str)
1649
if (c == NULL || c->compilation_result != V3D_COMPILATION_SUCCEEDED)
1652
return asprintf(shaderdb_str,
1653
"%s shader: %d inst, %d threads, %d loops, "
1654
"%d uniforms, %d max-temps, %d:%d spills:fills, "
1655
"%d sfu-stalls, %d inst-and-stalls, %d nops",
1656
vir_get_stage_name(c),
1661
vir_get_max_temps(c),
1664
c->qpu_inst_stalled_count,
1665
c->qpu_inst_count + c->qpu_inst_stalled_count,
1669
/* This is a list of incremental changes to the compilation strategy
1670
* that will be used to try to compile the shader successfully. The
1671
* default strategy is to enable all optimizations which will have
1672
* the highest register pressure but is expected to produce most
1673
* optimal code. Following strategies incrementally disable specific
1674
* optimizations that are known to contribute to register pressure
1675
* in order to be able to compile the shader successfully while meeting
1676
* thread count requirements.
1678
* V3D 4.1+ has a min thread count of 2, but we can use 1 here to also
1679
* cover previous hardware as well (meaning that we are not limiting
1680
* register allocation to any particular thread count). This is fine
1681
* because v3d_nir_to_vir will cap this to the actual minimum.
1683
struct v3d_compiler_strategy {
1685
uint32_t max_threads;
1686
uint32_t min_threads;
1687
bool disable_general_tmu_sched;
1688
bool disable_loop_unrolling;
1689
bool disable_ubo_load_sorting;
1690
bool disable_tmu_pipelining;
1691
uint32_t max_tmu_spills;
1692
} static const strategies[] = {
1693
/*0*/ { "default", 4, 4, false, false, false, false, 0 },
1694
/*1*/ { "disable general TMU sched", 4, 4, true, false, false, false, 0 },
1695
/*2*/ { "disable loop unrolling", 4, 4, true, true, false, false, 0 },
1696
/*3*/ { "disable UBO load sorting", 4, 4, true, true, true, false, 0 },
1697
/*4*/ { "disable TMU pipelining", 4, 4, true, true, true, true, 0 },
1698
/*5*/ { "lower thread count", 2, 1, false, false, false, false, -1 },
1699
/*6*/ { "disable general TMU sched (2t)", 2, 1, true, false, false, false, -1 },
1700
/*7*/ { "disable loop unrolling (2t)", 2, 1, true, true, false, false, -1 },
1701
/*8*/ { "disable UBO load sorting (2t)", 2, 1, true, true, true, false, -1 },
1702
/*9*/ { "disable TMU pipelining (2t)", 2, 1, true, true, true, true, -1 },
1703
/*10*/ { "fallback scheduler", 2, 1, true, true, true, true, -1 }
1707
* If a particular optimization didn't make any progress during a compile
1708
* attempt disabling it alone won't allow us to compile the shader successfuly,
1709
* since we'll end up with the same code. Detect these scenarios so we can
1710
* avoid wasting time with useless compiles. We should also consider if the
1711
* gy changes other aspects of the compilation process though, like
1712
* spilling, and not skip it in that case.
1715
skip_compile_strategy(struct v3d_compile *c, uint32_t idx)
1717
/* We decide if we can skip a strategy based on the optimizations that
1718
* were active in the previous strategy, so we should only be calling this
1719
* for strategies after the first.
1723
/* Don't skip a strategy that changes spilling behavior */
1724
if (strategies[idx].max_tmu_spills !=
1725
strategies[idx - 1].max_tmu_spills) {
1730
/* General TMU sched.: skip if we didn't emit any TMU loads */
1733
return !c->has_general_tmu_load;
1734
/* Loop unrolling: skip if we didn't unroll any loops */
1737
return !c->unrolled_any_loops;
1738
/* UBO load sorting: skip if we didn't sort any loads */
1741
return !c->sorted_any_ubo_loads;
1742
/* TMU pipelining: skip if we didn't pipeline any TMU ops */
1745
return !c->pipelined_any_tmu;
1746
/* Lower thread count: skip if we already tried less that 4 threads */
1748
return c->threads < 4;
1753
uint64_t *v3d_compile(const struct v3d_compiler *compiler,
1754
struct v3d_key *key,
1755
struct v3d_prog_data **out_prog_data,
1757
void (*debug_output)(const char *msg,
1758
void *debug_output_data),
1759
void *debug_output_data,
1760
int program_id, int variant_id,
1761
uint32_t *final_assembly_size)
1763
struct v3d_compile *c = NULL;
1765
uint32_t best_spill_fill_count = UINT32_MAX;
1766
struct v3d_compile *best_c = NULL;
1767
for (int32_t strat = 0; strat < ARRAY_SIZE(strategies); strat++) {
1768
/* Fallback strategy */
1771
if (skip_compile_strategy(c, strat))
1775
int ret = asprintf(&debug_msg,
1776
"Falling back to strategy '%s' "
1777
"for %s prog %d/%d",
1778
strategies[strat].name,
1779
vir_get_stage_name(c),
1780
c->program_id, c->variant_id);
1783
if (unlikely(V3D_DEBUG & V3D_DEBUG_PERF))
1784
fprintf(stderr, "%s\n", debug_msg);
1786
c->debug_output(debug_msg, c->debug_output_data);
1791
vir_compile_destroy(c);
1794
c = vir_compile_init(compiler, key, s,
1795
debug_output, debug_output_data,
1796
program_id, variant_id,
1797
strategies[strat].max_threads,
1798
strategies[strat].min_threads,
1799
strategies[strat].max_tmu_spills,
1800
strategies[strat].disable_general_tmu_sched,
1801
strategies[strat].disable_loop_unrolling,
1802
strategies[strat].disable_ubo_load_sorting,
1803
strategies[strat].disable_tmu_pipelining,
1804
strat == ARRAY_SIZE(strategies) - 1);
1806
v3d_attempt_compile(c);
1808
/* Broken shader or driver bug */
1809
if (c->compilation_result == V3D_COMPILATION_FAILED)
1812
/* If we compiled without spills, choose this.
1813
* Otherwise if this is a 4-thread compile, choose this (these
1814
* have a very low cap on the allowed TMU spills so we assume
1815
* it will be better than a 2-thread compile without spills).
1816
* Otherwise, keep going while tracking the strategy with the
1817
* lowest spill count.
1819
if (c->compilation_result == V3D_COMPILATION_SUCCEEDED) {
1820
if (c->spills == 0 ||
1821
strategies[strat].min_threads == 4) {
1824
} else if (c->spills + c->fills <
1825
best_spill_fill_count) {
1827
best_spill_fill_count = c->spills + c->fills;
1830
if (unlikely(V3D_DEBUG & V3D_DEBUG_PERF)) {
1832
int ret = asprintf(&debug_msg,
1833
"Compiled %s prog %d/%d with %d "
1834
"spills and %d fills. Will try "
1836
vir_get_stage_name(c),
1837
c->program_id, c->variant_id,
1838
c->spills, c->fills);
1840
fprintf(stderr, "%s\n", debug_msg);
1841
c->debug_output(debug_msg, c->debug_output_data);
1847
/* Only try next streategy if we failed to register allocate
1848
* or we had to spill.
1850
assert(c->compilation_result ==
1851
V3D_COMPILATION_FAILED_REGISTER_ALLOCATION ||
1855
/* If the best strategy was not the last, choose that */
1856
if (best_c && c != best_c) {
1857
vir_compile_destroy(c);
1861
if (unlikely(V3D_DEBUG & V3D_DEBUG_PERF) &&
1862
c->compilation_result !=
1863
V3D_COMPILATION_FAILED_REGISTER_ALLOCATION &&
1866
int ret = asprintf(&debug_msg,
1867
"Compiled %s prog %d/%d with %d "
1868
"spills and %d fills",
1869
vir_get_stage_name(c),
1870
c->program_id, c->variant_id,
1871
c->spills, c->fills);
1872
fprintf(stderr, "%s\n", debug_msg);
1875
c->debug_output(debug_msg, c->debug_output_data);
1880
if (c->compilation_result != V3D_COMPILATION_SUCCEEDED) {
1881
fprintf(stderr, "Failed to compile %s prog %d/%d "
1882
"with any strategy.\n",
1883
vir_get_stage_name(c), c->program_id, c->variant_id);
1886
struct v3d_prog_data *prog_data;
1888
prog_data = rzalloc_size(NULL, v3d_prog_data_size(c->s->info.stage));
1890
v3d_set_prog_data(c, prog_data);
1892
*out_prog_data = prog_data;
1895
int ret = v3d_shaderdb_dump(c, &shaderdb);
1897
if (V3D_DEBUG & V3D_DEBUG_SHADERDB)
1898
fprintf(stderr, "SHADER-DB-%s - %s\n", s->info.name, shaderdb);
1900
c->debug_output(shaderdb, c->debug_output_data);
1904
return v3d_return_qpu_insts(c, final_assembly_size);
1908
vir_remove_instruction(struct v3d_compile *c, struct qinst *qinst)
1910
if (qinst->dst.file == QFILE_TEMP)
1911
c->defs[qinst->dst.index] = NULL;
1913
assert(&qinst->link != c->cursor.link);
1915
list_del(&qinst->link);
1918
c->live_intervals_valid = false;
1922
vir_follow_movs(struct v3d_compile *c, struct qreg reg)
1925
int pack = reg.pack;
1927
while (reg.file == QFILE_TEMP &&
1928
c->defs[reg.index] &&
1929
(c->defs[reg.index]->op == QOP_MOV ||
1930
c->defs[reg.index]->op == QOP_FMOV) &&
1931
!c->defs[reg.index]->dst.pack &&
1932
!c->defs[reg.index]->src[0].pack) {
1933
reg = c->defs[reg.index]->src[0];
1942
vir_compile_destroy(struct v3d_compile *c)
1944
/* Defuse the assert that we aren't removing the cursor's instruction.
1946
c->cursor.link = NULL;
1948
vir_for_each_block(block, c) {
1949
while (!list_is_empty(&block->instructions)) {
1950
struct qinst *qinst =
1951
list_first_entry(&block->instructions,
1952
struct qinst, link);
1953
vir_remove_instruction(c, qinst);
1961
vir_get_uniform_index(struct v3d_compile *c,
1962
enum quniform_contents contents,
1965
for (int i = 0; i < c->num_uniforms; i++) {
1966
if (c->uniform_contents[i] == contents &&
1967
c->uniform_data[i] == data) {
1972
uint32_t uniform = c->num_uniforms++;
1974
if (uniform >= c->uniform_array_size) {
1975
c->uniform_array_size = MAX2(MAX2(16, uniform + 1),
1976
c->uniform_array_size * 2);
1978
c->uniform_data = reralloc(c, c->uniform_data,
1980
c->uniform_array_size);
1981
c->uniform_contents = reralloc(c, c->uniform_contents,
1982
enum quniform_contents,
1983
c->uniform_array_size);
1986
c->uniform_contents[uniform] = contents;
1987
c->uniform_data[uniform] = data;
1992
/* Looks back into the current block to find the ldunif that wrote the uniform
1993
* at the requested index. If it finds it, it returns true and writes the
1994
* destination register of the ldunif instruction to 'unif'.
1996
* This can impact register pressure and end up leading to worse code, so we
1997
* limit the number of instructions we are willing to look back through to
1998
* strike a good balance.
2001
try_opt_ldunif(struct v3d_compile *c, uint32_t index, struct qreg *unif)
2003
uint32_t count = 20;
2004
struct qinst *prev_inst = NULL;
2005
assert(c->cur_block);
2008
/* We can only reuse a uniform if it was emitted in the same block,
2009
* so callers must make sure the current instruction is being emitted
2010
* in the current block.
2013
vir_for_each_inst(inst, c->cur_block) {
2014
if (&inst->link == c->cursor.link) {
2020
assert(found || &c->cur_block->instructions == c->cursor.link);
2023
list_for_each_entry_from_rev(struct qinst, inst, c->cursor.link->prev,
2024
&c->cur_block->instructions, link) {
2025
if ((inst->qpu.sig.ldunif || inst->qpu.sig.ldunifrf) &&
2026
inst->uniform == index) {
2039
list_for_each_entry_from(struct qinst, inst, prev_inst->link.next,
2040
&c->cur_block->instructions, link) {
2041
if (inst->dst.file == prev_inst->dst.file &&
2042
inst->dst.index == prev_inst->dst.index) {
2047
*unif = prev_inst->dst;
2052
vir_uniform(struct v3d_compile *c,
2053
enum quniform_contents contents,
2056
const int num_uniforms = c->num_uniforms;
2057
const int index = vir_get_uniform_index(c, contents, data);
2059
/* If this is not the first time we see this uniform try to reuse the
2060
* result of the last ldunif that loaded it.
2062
const bool is_new_uniform = num_uniforms != c->num_uniforms;
2063
if (!is_new_uniform && !c->disable_ldunif_opt) {
2064
struct qreg ldunif_dst;
2065
if (try_opt_ldunif(c, index, &ldunif_dst))
2069
struct qinst *inst = vir_NOP(c);
2070
inst->qpu.sig.ldunif = true;
2071
inst->uniform = index;
2072
inst->dst = vir_get_temp(c);
2073
c->defs[inst->dst.index] = inst;
2077
#define OPTPASS(func) \
2079
bool stage_progress = func(c); \
2080
if (stage_progress) { \
2082
if (print_opt_debug) { \
2084
"VIR opt pass %2d: %s progress\n", \
2087
/*XXX vir_validate(c);*/ \
2092
vir_optimize(struct v3d_compile *c)
2094
bool print_opt_debug = false;
2098
bool progress = false;
2100
OPTPASS(vir_opt_copy_propagate);
2101
OPTPASS(vir_opt_redundant_flags);
2102
OPTPASS(vir_opt_dead_code);
2103
OPTPASS(vir_opt_small_immediates);
2104
OPTPASS(vir_opt_constant_alu);
2114
vir_get_stage_name(struct v3d_compile *c)
2116
if (c->vs_key && c->vs_key->is_coord)
2117
return "MESA_SHADER_VERTEX_BIN";
2118
else if (c->gs_key && c->gs_key->is_coord)
2119
return "MESA_SHADER_GEOMETRY_BIN";
2121
return gl_shader_stage_name(c->s->info.stage);
2124
static inline uint32_t
2125
compute_vpm_size_in_sectors(const struct v3d_device_info *devinfo)
2127
assert(devinfo->vpm_size > 0);
2128
const uint32_t sector_size = V3D_CHANNELS * sizeof(uint32_t) * 8;
2129
return devinfo->vpm_size / sector_size;
2132
/* Computes various parameters affecting VPM memory configuration for programs
2133
* involving geometry shaders to ensure the program fits in memory and honors
2134
* requirements described in section "VPM usage" of the programming manual.
2137
compute_vpm_config_gs(struct v3d_device_info *devinfo,
2138
struct v3d_vs_prog_data *vs,
2139
struct v3d_gs_prog_data *gs,
2140
struct vpm_config *vpm_cfg_out)
2142
const uint32_t A = vs->separate_segments ? 1 : 0;
2143
const uint32_t Ad = vs->vpm_input_size;
2144
const uint32_t Vd = vs->vpm_output_size;
2146
const uint32_t vpm_size = compute_vpm_size_in_sectors(devinfo);
2148
/* Try to fit program into our VPM memory budget by adjusting
2149
* configurable parameters iteratively. We do this in two phases:
2150
* the first phase tries to fit the program into the total available
2151
* VPM memory. If we succeed at that, then the second phase attempts
2152
* to fit the program into half of that budget so we can run bin and
2153
* render programs in parallel.
2155
struct vpm_config vpm_cfg[2];
2156
struct vpm_config *final_vpm_cfg = NULL;
2159
vpm_cfg[phase].As = 1;
2160
vpm_cfg[phase].Gs = 1;
2161
vpm_cfg[phase].Gd = gs->vpm_output_size;
2162
vpm_cfg[phase].gs_width = gs->simd_width;
2164
/* While there is a requirement that Vc >= [Vn / 16], this is
2165
* always the case when tessellation is not present because in that
2166
* case Vn can only be 6 at most (when input primitive is triangles
2169
* We always choose Vc=2. We can't go lower than this due to GFXH-1744,
2170
* and Broadcom has not found it worth it to increase it beyond this
2171
* in general. Increasing Vc also increases VPM memory pressure which
2172
* can turn up being detrimental for performance in some scenarios.
2174
vpm_cfg[phase].Vc = 2;
2176
/* Gv is a constraint on the hardware to not exceed the
2177
* specified number of vertex segments per GS batch. If adding a
2178
* new primitive to a GS batch would result in a range of more
2179
* than Gv vertex segments being referenced by the batch, then
2180
* the hardware will flush the batch and start a new one. This
2181
* means that we can choose any value we want, we just need to
2182
* be aware that larger values improve GS batch utilization
2183
* at the expense of more VPM memory pressure (which can affect
2184
* other performance aspects, such as GS dispatch width).
2185
* We start with the largest value, and will reduce it if we
2186
* find that total memory pressure is too high.
2188
vpm_cfg[phase].Gv = 3;
2190
/* When GS is present in absence of TES, then we need to satisfy
2191
* that Ve >= Gv. We go with the smallest value of Ve to avoid
2192
* increasing memory pressure.
2194
vpm_cfg[phase].Ve = vpm_cfg[phase].Gv;
2196
uint32_t vpm_sectors =
2197
A * vpm_cfg[phase].As * Ad +
2198
(vpm_cfg[phase].Vc + vpm_cfg[phase].Ve) * Vd +
2199
vpm_cfg[phase].Gs * vpm_cfg[phase].Gd;
2201
/* Ideally we want to use no more than half of the available
2202
* memory so we can execute a bin and render program in parallel
2203
* without stalls. If we achieved that then we are done.
2205
if (vpm_sectors <= vpm_size / 2) {
2206
final_vpm_cfg = &vpm_cfg[phase];
2210
/* At the very least, we should not allocate more than the
2211
* total available VPM memory. If we have a configuration that
2212
* succeeds at this we save it and continue to see if we can
2213
* meet the half-memory-use criteria too.
2215
if (phase == 0 && vpm_sectors <= vpm_size) {
2216
vpm_cfg[1] = vpm_cfg[0];
2220
/* Try lowering Gv */
2221
if (vpm_cfg[phase].Gv > 0) {
2222
vpm_cfg[phase].Gv--;
2226
/* Try lowering GS dispatch width */
2227
if (vpm_cfg[phase].gs_width > 1) {
2229
vpm_cfg[phase].gs_width >>= 1;
2230
vpm_cfg[phase].Gd = align(vpm_cfg[phase].Gd, 2) / 2;
2231
} while (vpm_cfg[phase].gs_width == 2);
2233
/* Reset Gv to max after dropping dispatch width */
2234
vpm_cfg[phase].Gv = 3;
2238
/* We ran out of options to reduce memory pressure. If we
2239
* are at phase 1 we have at least a valid configuration, so we
2243
final_vpm_cfg = &vpm_cfg[0];
2250
assert(final_vpm_cfg);
2251
assert(final_vpm_cfg->Gd <= 16);
2252
assert(final_vpm_cfg->Gv < 4);
2253
assert(final_vpm_cfg->Ve < 4);
2254
assert(final_vpm_cfg->Vc >= 2 && final_vpm_cfg->Vc <= 4);
2255
assert(final_vpm_cfg->gs_width == 1 ||
2256
final_vpm_cfg->gs_width == 4 ||
2257
final_vpm_cfg->gs_width == 8 ||
2258
final_vpm_cfg->gs_width == 16);
2260
*vpm_cfg_out = *final_vpm_cfg;
2265
v3d_compute_vpm_config(struct v3d_device_info *devinfo,
2266
struct v3d_vs_prog_data *vs_bin,
2267
struct v3d_vs_prog_data *vs,
2268
struct v3d_gs_prog_data *gs_bin,
2269
struct v3d_gs_prog_data *gs,
2270
struct vpm_config *vpm_cfg_bin,
2271
struct vpm_config *vpm_cfg)
2273
assert(vs && vs_bin);
2274
assert((gs != NULL) == (gs_bin != NULL));
2277
vpm_cfg_bin->As = 1;
2278
vpm_cfg_bin->Ve = 0;
2279
vpm_cfg_bin->Vc = vs_bin->vcm_cache_size;
2283
vpm_cfg->Vc = vs->vcm_cache_size;
2285
if (!compute_vpm_config_gs(devinfo, vs_bin, gs_bin, vpm_cfg_bin))
2288
if (!compute_vpm_config_gs(devinfo, vs, gs, vpm_cfg))