2
* Copyright (C) 2018-2019 Alyssa Rosenzweig <alyssa@rosenzweig.io>
3
* Copyright (C) 2019 Collabora, Ltd.
5
* Permission is hereby granted, free of charge, to any person obtaining a
6
* copy of this software and associated documentation files (the "Software"),
7
* to deal in the Software without restriction, including without limitation
8
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
9
* and/or sell copies of the Software, and to permit persons to whom the
10
* Software is furnished to do so, subject to the following conditions:
12
* The above copyright notice and this permission notice (including the next
13
* paragraph) shall be included in all copies or substantial portions of the
16
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
26
#include "midgard_ops.h"
27
#include "util/u_math.h"
28
#include "util/u_memory.h"
29
#include "midgard_quirks.h"
32
/* Physical register: 0-31 */
35
/* Byte offset into the physical register: 0-15 */
38
/* log2(bytes per component) for fast mul/div */
42
/* Shift up by reg_offset and horizontally by dst_offset. */
45
offset_swizzle(unsigned *swizzle, unsigned reg_offset, unsigned srcshift, unsigned dstshift, unsigned dst_offset)
47
unsigned out[MIR_VEC_COMPONENTS];
49
signed reg_comp = reg_offset >> srcshift;
50
signed dst_comp = dst_offset >> dstshift;
52
unsigned max_component = (16 >> srcshift) - 1;
54
assert(reg_comp << srcshift == reg_offset);
55
assert(dst_comp << dstshift == dst_offset);
57
for (signed c = 0; c < MIR_VEC_COMPONENTS; ++c) {
58
signed comp = MAX2(c - dst_comp, 0);
59
out[c] = MIN2(swizzle[comp] + reg_comp, max_component);
62
memcpy(swizzle, out, sizeof(out));
65
/* Helper to return the default phys_reg for a given register */
67
static struct phys_reg
68
default_phys_reg(int reg, unsigned shift)
79
/* Determine which physical register, swizzle, and mask a virtual
80
* register corresponds to */
82
static struct phys_reg
83
index_to_reg(compiler_context *ctx, struct lcra_state *l, unsigned reg, unsigned shift)
85
/* Check for special cases */
87
return default_phys_reg(REGISTER_UNUSED, shift);
88
else if (reg >= SSA_FIXED_MINIMUM)
89
return default_phys_reg(SSA_REG_FROM_FIXED(reg), shift);
91
return default_phys_reg(REGISTER_UNUSED, shift);
94
.reg = l->solutions[reg] / 16,
95
.offset = l->solutions[reg] & 0xF,
99
/* Report that we actually use this register, and return it */
102
ctx->info->work_reg_count = MAX2(ctx->info->work_reg_count, r.reg + 1);
108
set_class(unsigned *classes, unsigned node, unsigned class)
110
if (node < SSA_FIXED_MINIMUM && class != classes[node]) {
111
assert(classes[node] == REG_CLASS_WORK);
112
classes[node] = class;
116
/* Special register classes impose special constraints on who can read their
117
* values, so check that */
120
check_read_class(unsigned *classes, unsigned tag, unsigned node)
122
/* Non-nodes are implicitly ok */
123
if (node >= SSA_FIXED_MINIMUM)
126
switch (classes[node]) {
128
return (tag == TAG_LOAD_STORE_4);
130
return (tag == TAG_TEXTURE_4);
132
return (tag != TAG_LOAD_STORE_4);
136
unreachable("Invalid class");
141
check_write_class(unsigned *classes, unsigned tag, unsigned node)
143
/* Non-nodes are implicitly ok */
144
if (node >= SSA_FIXED_MINIMUM)
147
switch (classes[node]) {
151
return (tag == TAG_TEXTURE_4);
154
return IS_ALU(tag) || (tag == TAG_LOAD_STORE_4);
156
unreachable("Invalid class");
160
/* Prepass before RA to ensure special class restrictions are met. The idea is
161
* to create a bit field of types of instructions that read a particular index.
162
* Later, we'll add moves as appropriate and rewrite to specialize by type. */
165
mark_node_class (unsigned *bitfield, unsigned node)
167
if (node < SSA_FIXED_MINIMUM)
168
BITSET_SET(bitfield, node);
172
mir_lower_special_reads(compiler_context *ctx)
174
size_t sz = BITSET_WORDS(ctx->temp_count) * sizeof(BITSET_WORD);
176
/* Bitfields for the various types of registers we could have. aluw can
177
* be written by either ALU or load/store */
179
unsigned *alur = calloc(sz, 1);
180
unsigned *aluw = calloc(sz, 1);
181
unsigned *brar = calloc(sz, 1);
182
unsigned *ldst = calloc(sz, 1);
183
unsigned *texr = calloc(sz, 1);
184
unsigned *texw = calloc(sz, 1);
186
/* Pass #1 is analysis, a linear scan to fill out the bitfields */
188
mir_foreach_instr_global(ctx, ins) {
191
mark_node_class(aluw, ins->dest);
192
mark_node_class(alur, ins->src[0]);
193
mark_node_class(alur, ins->src[1]);
194
mark_node_class(alur, ins->src[2]);
196
if (ins->compact_branch && ins->writeout)
197
mark_node_class(brar, ins->src[0]);
201
case TAG_LOAD_STORE_4:
202
mark_node_class(aluw, ins->dest);
203
mark_node_class(ldst, ins->src[0]);
204
mark_node_class(ldst, ins->src[1]);
205
mark_node_class(ldst, ins->src[2]);
206
mark_node_class(ldst, ins->src[3]);
210
mark_node_class(texr, ins->src[0]);
211
mark_node_class(texr, ins->src[1]);
212
mark_node_class(texr, ins->src[2]);
213
mark_node_class(texw, ins->dest);
221
/* Pass #2 is lowering now that we've analyzed all the classes.
222
* Conceptually, if an index is only marked for a single type of use,
223
* there is nothing to lower. If it is marked for different uses, we
224
* split up based on the number of types of uses. To do so, we divide
225
* into N distinct classes of use (where N>1 by definition), emit N-1
226
* moves from the index to copies of the index, and finally rewrite N-1
227
* of the types of uses to use the corresponding move */
229
unsigned spill_idx = ctx->temp_count;
231
for (unsigned i = 0; i < ctx->temp_count; ++i) {
232
bool is_alur = BITSET_TEST(alur, i);
233
bool is_aluw = BITSET_TEST(aluw, i);
234
bool is_brar = BITSET_TEST(brar, i);
235
bool is_ldst = BITSET_TEST(ldst, i);
236
bool is_texr = BITSET_TEST(texr, i);
237
bool is_texw = BITSET_TEST(texw, i);
239
/* Analyse to check how many distinct uses there are. ALU ops
240
* (alur) can read the results of the texture pipeline (texw)
241
* but not ldst or texr. Load/store ops (ldst) cannot read
242
* anything but load/store inputs. Texture pipeline cannot read
243
* anything but texture inputs. TODO: Simplify. */
246
(is_alur && (is_ldst || is_texr)) ||
247
(is_ldst && (is_alur || is_texr || is_texw)) ||
248
(is_texr && (is_alur || is_ldst || is_texw)) ||
249
(is_texw && (is_aluw || is_ldst || is_texr)) ||
250
(is_brar && is_texw);
255
/* Use the index as-is as the work copy. Emit copies for
258
unsigned classes[] = { TAG_LOAD_STORE_4, TAG_TEXTURE_4, TAG_TEXTURE_4, TAG_ALU_4};
259
bool collisions[] = { is_ldst, is_texr, is_texw && is_aluw, is_brar };
261
for (unsigned j = 0; j < ARRAY_SIZE(collisions); ++j) {
262
if (!collisions[j]) continue;
264
/* When the hazard is from reading, we move and rewrite
265
* sources (typical case). When it's from writing, we
266
* flip the move and rewrite destinations (obscure,
267
* only from control flow -- impossible in SSA) */
269
bool hazard_write = (j == 2);
271
unsigned idx = spill_idx++;
273
/* Insert move before each read/write, depending on the
274
* hazard we're trying to account for */
276
mir_foreach_instr_global_safe(ctx, pre_use) {
277
if (pre_use->type != classes[j])
281
if (pre_use->dest != i)
284
midgard_instruction m = v_mov(idx, i);
285
m.dest_type = pre_use->dest_type;
286
m.src_types[1] = m.dest_type;
287
m.mask = pre_use->mask;
289
midgard_instruction *use = mir_next_op(pre_use);
291
mir_insert_instruction_before(ctx, use, m);
292
mir_rewrite_index_dst_single(pre_use, i, idx);
294
if (!mir_has_arg(pre_use, i))
299
midgard_instruction m = v_mov(i, idx);
300
m.mask = mir_from_bytemask(mir_round_bytemask_up(
301
mir_bytemask_of_read_components(pre_use, i), 32), 32);
302
mir_insert_instruction_before(ctx, pre_use, m);
303
mir_rewrite_index_src_single(pre_use, i, idx);
318
mir_compute_interference(
319
compiler_context *ctx,
320
struct lcra_state *l)
322
/* First, we need liveness information to be computed per block */
323
mir_compute_liveness(ctx);
325
/* We need to force r1.w live throughout a blend shader */
327
if (ctx->inputs->is_blend) {
330
mir_foreach_block(ctx, _block) {
331
midgard_block *block = (midgard_block *) _block;
332
mir_foreach_instr_in_block_rev(block, ins) {
341
mir_foreach_instr_global(ctx, ins) {
342
if (ins->dest < ctx->temp_count)
343
lcra_add_node_interference(l, ins->dest, mir_bytemask(ins), r1w, 0xF);
347
/* Now that every block has live_in/live_out computed, we can determine
348
* interference by walking each block linearly. Take live_out at the
349
* end of each block and walk the block backwards. */
351
mir_foreach_block(ctx, _blk) {
352
midgard_block *blk = (midgard_block *) _blk;
354
/* The scalar and vector units run in parallel. We need to make
355
* sure they don't write to same portion of the register file
356
* otherwise the result is undefined. Add interferences to
357
* avoid this situation.
359
util_dynarray_foreach(&blk->bundles, midgard_bundle, bundle) {
360
midgard_instruction *instrs[2][4];
361
unsigned instr_count[2] = { 0, 0 };
363
for (unsigned i = 0; i < bundle->instruction_count; i++) {
364
if (bundle->instructions[i]->unit == UNIT_VMUL ||
365
bundle->instructions[i]->unit == UNIT_SADD)
366
instrs[0][instr_count[0]++] = bundle->instructions[i];
368
instrs[1][instr_count[1]++] = bundle->instructions[i];
371
for (unsigned i = 0; i < ARRAY_SIZE(instr_count); i++) {
372
for (unsigned j = 0; j < instr_count[i]; j++) {
373
midgard_instruction *ins_a = instrs[i][j];
375
if (ins_a->dest >= ctx->temp_count) continue;
377
for (unsigned k = j + 1; k < instr_count[i]; k++) {
378
midgard_instruction *ins_b = instrs[i][k];
380
if (ins_b->dest >= ctx->temp_count) continue;
382
lcra_add_node_interference(l, ins_b->dest,
385
mir_bytemask(ins_a));
391
uint16_t *live = mem_dup(_blk->live_out, ctx->temp_count * sizeof(uint16_t));
393
mir_foreach_instr_in_block_rev(blk, ins) {
394
/* Mark all registers live after the instruction as
395
* interfering with the destination */
397
unsigned dest = ins->dest;
399
if (dest < ctx->temp_count) {
400
for (unsigned i = 0; i < ctx->temp_count; ++i) {
402
unsigned mask = mir_bytemask(ins);
403
lcra_add_node_interference(l, dest, mask, i, live[i]);
408
/* Add blend shader interference: blend shaders might
410
if (ins->compact_branch && ins->writeout) {
411
for (unsigned i = 0; i < ctx->temp_count; ++i) {
415
for (unsigned j = 0; j < 4; j++) {
416
lcra_add_node_interference(l, ctx->temp_count + j,
424
mir_liveness_ins_update(live, ins, ctx->temp_count);
432
mir_is_64(midgard_instruction *ins)
434
if (nir_alu_type_get_type_size(ins->dest_type) == 64)
437
mir_foreach_src(ins, v) {
438
if (nir_alu_type_get_type_size(ins->src_types[v]) == 64)
445
/* This routine performs the actual register allocation. It should be succeeded
446
* by install_registers */
448
static struct lcra_state *
449
allocate_registers(compiler_context *ctx, bool *spilled)
451
/* The number of vec4 work registers available depends on the number of
452
* register-mapped uniforms and the shader stage. By ABI we limit blend
453
* shaders to 8 registers, should be lower XXX */
454
int rmu = ctx->info->push.count / 4;
455
int work_count = ctx->inputs->is_blend ? 8 : 16 - MAX2(rmu - 8, 0);
457
/* No register allocation to do with no SSA */
459
if (!ctx->temp_count)
462
/* Initialize LCRA. Allocate extra node at the end for r1-r3 for
465
struct lcra_state *l = lcra_alloc_equations(ctx->temp_count + 4, 5);
466
unsigned node_r1 = ctx->temp_count + 1;
468
/* Starts of classes, in bytes */
469
l->class_start[REG_CLASS_WORK] = 16 * 0;
470
l->class_start[REG_CLASS_LDST] = 16 * 26;
471
l->class_start[REG_CLASS_TEXR] = 16 * 28;
472
l->class_start[REG_CLASS_TEXW] = 16 * 28;
474
l->class_size[REG_CLASS_WORK] = 16 * work_count;
475
l->class_size[REG_CLASS_LDST] = 16 * 2;
476
l->class_size[REG_CLASS_TEXR] = 16 * 2;
477
l->class_size[REG_CLASS_TEXW] = 16 * 2;
479
lcra_set_disjoint_class(l, REG_CLASS_TEXR, REG_CLASS_TEXW);
481
/* To save space on T*20, we don't have real texture registers.
482
* Instead, tex inputs reuse the load/store pipeline registers, and
483
* tex outputs use work r0/r1. Note we still use TEXR/TEXW classes,
484
* noting that this handles interferences and sizes correctly. */
486
if (ctx->quirks & MIDGARD_INTERPIPE_REG_ALIASING) {
487
l->class_start[REG_CLASS_TEXR] = l->class_start[REG_CLASS_LDST];
488
l->class_start[REG_CLASS_TEXW] = l->class_start[REG_CLASS_WORK];
491
unsigned *found_class = calloc(sizeof(unsigned), ctx->temp_count);
492
unsigned *min_alignment = calloc(sizeof(unsigned), ctx->temp_count);
493
unsigned *min_bound = calloc(sizeof(unsigned), ctx->temp_count);
495
mir_foreach_instr_global(ctx, ins) {
496
/* Swizzles of 32-bit sources on 64-bit instructions need to be
497
* aligned to either bottom (xy) or top (zw). More general
498
* swizzle lowering should happen prior to scheduling (TODO),
499
* but once we get RA we shouldn't disrupt this further. Align
500
* sources of 64-bit instructions. */
502
if (ins->type == TAG_ALU_4 && mir_is_64(ins)) {
503
mir_foreach_src(ins, v) {
504
unsigned s = ins->src[v];
506
if (s < ctx->temp_count)
507
min_alignment[s] = MAX2(3, min_alignment[s]);
511
if (ins->type == TAG_LOAD_STORE_4 && OP_HAS_ADDRESS(ins->op)) {
512
mir_foreach_src(ins, v) {
513
unsigned s = ins->src[v];
514
unsigned size = nir_alu_type_get_type_size(ins->src_types[v]);
516
if (s < ctx->temp_count)
517
min_alignment[s] = MAX2((size == 64) ? 3 : 2, min_alignment[s]);
521
if (ins->dest >= SSA_FIXED_MINIMUM) continue;
523
unsigned size = nir_alu_type_get_type_size(ins->dest_type);
528
/* 0 for x, 1 for xy, 2 for xyz, 3 for xyzw */
529
int comps1 = util_logbase2(ins->mask);
531
int bytes = (comps1 + 1) * (size / 8);
533
/* Use the largest class if there's ambiguity, this
534
* handles partial writes */
536
int dest = ins->dest;
537
found_class[dest] = MAX2(found_class[dest], bytes);
539
min_alignment[dest] =
540
MAX2(min_alignment[dest],
541
(size == 16) ? 1 : /* (1 << 1) = 2-byte */
542
(size == 32) ? 2 : /* (1 << 2) = 4-byte */
543
(size == 64) ? 3 : /* (1 << 3) = 8-byte */
546
/* We can't cross xy/zw boundaries. TODO: vec8 can */
547
if (size == 16 && min_alignment[dest] != 4)
550
mir_foreach_src(ins, s) {
551
unsigned src_size = nir_alu_type_get_type_size(ins->src_types[s]);
552
if (src_size == 16 && ins->src[s] < SSA_FIXED_MINIMUM)
553
min_bound[ins->src[s]] = MAX2(min_bound[ins->src[s]], 8);
556
/* We don't have a swizzle for the conditional and we don't
557
* want to muck with the conditional itself, so just force
558
* alignment for now */
560
if (ins->type == TAG_ALU_4 && OP_IS_CSEL_V(ins->op)) {
561
min_alignment[dest] = 4; /* 1 << 4= 16-byte = vec4 */
563
/* LCRA assumes bound >= alignment */
564
min_bound[dest] = 16;
567
/* Since ld/st swizzles and masks are 32-bit only, we need them
568
* aligned to enable final packing */
569
if (ins->type == TAG_LOAD_STORE_4)
570
min_alignment[dest] = MAX2(min_alignment[dest], 2);
573
for (unsigned i = 0; i < ctx->temp_count; ++i) {
574
lcra_set_alignment(l, i, min_alignment[i] ? min_alignment[i] : 2,
575
min_bound[i] ? min_bound[i] : 16);
576
lcra_restrict_range(l, i, found_class[i]);
583
/* Next, we'll determine semantic class. We default to zero (work).
584
* But, if we're used with a special operation, that will force us to a
585
* particular class. Each node must be assigned to exactly one class; a
586
* prepass before RA should have lowered what-would-have-been
587
* multiclass nodes into a series of moves to break it up into multiple
590
mir_foreach_instr_global(ctx, ins) {
591
/* Check if this operation imposes any classes */
593
if (ins->type == TAG_LOAD_STORE_4) {
594
set_class(l->class, ins->src[0], REG_CLASS_LDST);
595
set_class(l->class, ins->src[1], REG_CLASS_LDST);
596
set_class(l->class, ins->src[2], REG_CLASS_LDST);
597
set_class(l->class, ins->src[3], REG_CLASS_LDST);
599
if (OP_IS_VEC4_ONLY(ins->op)) {
600
lcra_restrict_range(l, ins->dest, 16);
601
lcra_restrict_range(l, ins->src[0], 16);
602
lcra_restrict_range(l, ins->src[1], 16);
603
lcra_restrict_range(l, ins->src[2], 16);
604
lcra_restrict_range(l, ins->src[3], 16);
606
} else if (ins->type == TAG_TEXTURE_4) {
607
set_class(l->class, ins->dest, REG_CLASS_TEXW);
608
set_class(l->class, ins->src[0], REG_CLASS_TEXR);
609
set_class(l->class, ins->src[1], REG_CLASS_TEXR);
610
set_class(l->class, ins->src[2], REG_CLASS_TEXR);
611
set_class(l->class, ins->src[3], REG_CLASS_TEXR);
615
/* Check that the semantics of the class are respected */
616
mir_foreach_instr_global(ctx, ins) {
617
assert(check_write_class(l->class, ins->type, ins->dest));
618
assert(check_read_class(l->class, ins->type, ins->src[0]));
619
assert(check_read_class(l->class, ins->type, ins->src[1]));
620
assert(check_read_class(l->class, ins->type, ins->src[2]));
621
assert(check_read_class(l->class, ins->type, ins->src[3]));
624
/* Mark writeout to r0, depth to r1.x, stencil to r1.y,
625
* render target to r1.z, unknown to r1.w */
626
mir_foreach_instr_global(ctx, ins) {
627
if (!(ins->compact_branch && ins->writeout)) continue;
629
if (ins->src[0] < ctx->temp_count)
630
l->solutions[ins->src[0]] = 0;
632
if (ins->src[2] < ctx->temp_count)
633
l->solutions[ins->src[2]] = (16 * 1) + COMPONENT_X * 4;
635
if (ins->src[3] < ctx->temp_count)
636
l->solutions[ins->src[3]] = (16 * 1) + COMPONENT_Y * 4;
638
if (ins->src[1] < ctx->temp_count)
639
l->solutions[ins->src[1]] = (16 * 1) + COMPONENT_Z * 4;
641
if (ins->dest < ctx->temp_count)
642
l->solutions[ins->dest] = (16 * 1) + COMPONENT_W * 4;
645
/* Destinations of instructions in a writeout block cannot be assigned
646
* to r1 unless they are actually used as r1 from the writeout itself,
647
* since the writes to r1 are special. A code sequence like:
649
* sadd.fmov r1.x, [...]
650
* vadd.fadd r0, r1, r2
653
* will misbehave since the r1.x write will be interpreted as a
654
* gl_FragDepth write so it won't show up correctly when r1 is read in
655
* the following segment. We model this as interference.
658
for (unsigned i = 0; i < 4; ++i)
659
l->solutions[ctx->temp_count + i] = (16 * i);
661
mir_foreach_block(ctx, _blk) {
662
midgard_block *blk = (midgard_block *) _blk;
664
mir_foreach_bundle_in_block(blk, v) {
665
/* We need at least a writeout and nonwriteout instruction */
666
if (v->instruction_count < 2)
669
/* Branches always come at the end */
670
midgard_instruction *br = v->instructions[v->instruction_count - 1];
675
for (signed i = v->instruction_count - 2; i >= 0; --i) {
676
midgard_instruction *ins = v->instructions[i];
678
if (ins->dest >= ctx->temp_count)
681
bool used_as_r1 = (br->dest == ins->dest);
683
mir_foreach_src(br, s)
684
used_as_r1 |= (s > 0) && (br->src[s] == ins->dest);
687
lcra_add_node_interference(l, ins->dest, mir_bytemask(ins), node_r1, 0xFFFF);
692
/* Precolour blend input to r0. Note writeout is necessarily at the end
693
* and blend shaders are single-RT only so there is only a single
694
* writeout block, so this cannot conflict with the writeout r0 (there
695
* is no need to have an intermediate move) */
697
if (ctx->blend_input != ~0) {
698
assert(ctx->blend_input < ctx->temp_count);
699
l->solutions[ctx->blend_input] = 0;
702
/* Same for the dual-source blend input/output, except here we use r2,
703
* which is also set in the fragment shader. */
705
if (ctx->blend_src1 != ~0) {
706
assert(ctx->blend_src1 < ctx->temp_count);
707
l->solutions[ctx->blend_src1] = (16 * 2);
708
ctx->info->work_reg_count = MAX2(ctx->info->work_reg_count, 3);
711
mir_compute_interference(ctx, l);
713
*spilled = !lcra_solve(l);
718
/* Once registers have been decided via register allocation
719
* (allocate_registers), we need to rewrite the MIR to use registers instead of
723
install_registers_instr(
724
compiler_context *ctx,
725
struct lcra_state *l,
726
midgard_instruction *ins)
728
unsigned src_shift[MIR_SRC_COUNT];
730
for (unsigned i = 0; i < MIR_SRC_COUNT; ++i) {
732
util_logbase2(nir_alu_type_get_type_size(ins->src_types[i]) / 8);
735
unsigned dest_shift =
736
util_logbase2(nir_alu_type_get_type_size(ins->dest_type) / 8);
743
if (ins->compact_branch)
746
struct phys_reg src1 = index_to_reg(ctx, l, ins->src[0], src_shift[0]);
747
struct phys_reg src2 = index_to_reg(ctx, l, ins->src[1], src_shift[1]);
748
struct phys_reg dest = index_to_reg(ctx, l, ins->dest, dest_shift);
750
mir_set_bytemask(ins, mir_bytemask(ins) << dest.offset);
752
unsigned dest_offset =
753
GET_CHANNEL_COUNT(alu_opcode_props[ins->op].props) ? 0 :
756
offset_swizzle(ins->swizzle[0], src1.offset, src1.shift, dest.shift, dest_offset);
757
if (!ins->has_inline_constant)
758
offset_swizzle(ins->swizzle[1], src2.offset, src2.shift, dest.shift, dest_offset);
759
if (ins->src[0] != ~0)
760
ins->src[0] = SSA_FIXED_REGISTER(src1.reg);
761
if (ins->src[1] != ~0)
762
ins->src[1] = SSA_FIXED_REGISTER(src2.reg);
764
ins->dest = SSA_FIXED_REGISTER(dest.reg);
768
case TAG_LOAD_STORE_4: {
769
/* Which physical register we read off depends on
770
* whether we are loading or storing -- think about the
771
* logical dataflow */
773
bool encodes_src = OP_IS_STORE(ins->op);
776
struct phys_reg src = index_to_reg(ctx, l, ins->src[0], src_shift[0]);
777
assert(src.reg == 26 || src.reg == 27);
779
ins->src[0] = SSA_FIXED_REGISTER(src.reg);
780
offset_swizzle(ins->swizzle[0], src.offset, src.shift, 0, 0);
782
struct phys_reg dst = index_to_reg(ctx, l, ins->dest, dest_shift);
784
ins->dest = SSA_FIXED_REGISTER(dst.reg);
785
offset_swizzle(ins->swizzle[0], 0, 2, dest_shift, dst.offset);
786
mir_set_bytemask(ins, mir_bytemask(ins) << dst.offset);
789
/* We also follow up by actual arguments */
791
for (int i = 1; i <= 3; i++) {
792
unsigned src_index = ins->src[i];
793
if (src_index != ~0) {
794
struct phys_reg src = index_to_reg(ctx, l, src_index, src_shift[i]);
795
unsigned component = src.offset >> src.shift;
796
assert(component << src.shift == src.offset);
797
ins->src[i] = SSA_FIXED_REGISTER(src.reg);
798
ins->swizzle[i][0] += component;
805
case TAG_TEXTURE_4: {
806
if (ins->op == midgard_tex_op_barrier)
809
/* Grab RA results */
810
struct phys_reg dest = index_to_reg(ctx, l, ins->dest, dest_shift);
811
struct phys_reg coord = index_to_reg(ctx, l, ins->src[1], src_shift[1]);
812
struct phys_reg lod = index_to_reg(ctx, l, ins->src[2], src_shift[2]);
813
struct phys_reg offset = index_to_reg(ctx, l, ins->src[3], src_shift[3]);
815
/* First, install the texture coordinate */
816
if (ins->src[1] != ~0)
817
ins->src[1] = SSA_FIXED_REGISTER(coord.reg);
818
offset_swizzle(ins->swizzle[1], coord.offset, coord.shift, dest.shift, 0);
820
/* Next, install the destination */
822
ins->dest = SSA_FIXED_REGISTER(dest.reg);
823
offset_swizzle(ins->swizzle[0], 0, 2, dest.shift,
824
dest_shift == 1 ? dest.offset % 8 :
826
mir_set_bytemask(ins, mir_bytemask(ins) << dest.offset);
828
/* If there is a register LOD/bias, use it */
829
if (ins->src[2] != ~0) {
830
assert(!(lod.offset & 3));
831
ins->src[2] = SSA_FIXED_REGISTER(lod.reg);
832
ins->swizzle[2][0] = lod.offset / 4;
835
/* If there is an offset register, install it */
836
if (ins->src[3] != ~0) {
837
ins->src[3] = SSA_FIXED_REGISTER(offset.reg);
838
ins->swizzle[3][0] = offset.offset / 4;
850
install_registers(compiler_context *ctx, struct lcra_state *l)
852
mir_foreach_instr_global(ctx, ins)
853
install_registers_instr(ctx, l, ins);
857
/* If register allocation fails, find the best spill node */
860
mir_choose_spill_node(
861
compiler_context *ctx,
862
struct lcra_state *l)
864
/* We can't spill a previously spilled value or an unspill */
866
mir_foreach_instr_global(ctx, ins) {
867
if (ins->no_spill & (1 << l->spill_class)) {
868
lcra_set_node_spill_cost(l, ins->dest, -1);
870
if (l->spill_class != REG_CLASS_WORK) {
871
mir_foreach_src(ins, s)
872
lcra_set_node_spill_cost(l, ins->src[s], -1);
877
return lcra_get_best_spill_node(l);
880
/* Once we've chosen a spill node, spill it */
884
compiler_context *ctx,
886
unsigned spill_class,
887
unsigned *spill_count)
889
if (spill_class == REG_CLASS_WORK && ctx->inputs->is_blend)
890
unreachable("Blend shader spilling is currently unimplemented");
892
unsigned spill_index = ctx->temp_count;
894
/* We have a spill node, so check the class. Work registers
895
* legitimately spill to TLS, but special registers just spill to work
898
bool is_special = spill_class != REG_CLASS_WORK;
899
bool is_special_w = spill_class == REG_CLASS_TEXW;
901
/* Allocate TLS slot (maybe) */
902
unsigned spill_slot = !is_special ? (*spill_count)++ : 0;
904
/* For special reads, figure out how many bytes we need */
905
unsigned read_bytemask = 0;
907
/* If multiple instructions write to this destination, we'll have to
908
* fill from TLS before writing */
909
unsigned write_count = 0;
911
mir_foreach_instr_global_safe(ctx, ins) {
912
read_bytemask |= mir_bytemask_of_read_components(ins, spill_node);
913
if (ins->dest == spill_node)
917
/* For TLS, replace all stores to the spilled node. For
918
* special reads, just keep as-is; the class will be demoted
919
* implicitly. For special writes, spill to a work register */
921
if (!is_special || is_special_w) {
923
spill_slot = spill_index++;
925
unsigned last_id = ~0;
926
unsigned last_fill = ~0;
927
unsigned last_spill_index = ~0;
928
midgard_instruction *last_spill = NULL;
930
mir_foreach_block(ctx, _block) {
931
midgard_block *block = (midgard_block *) _block;
932
mir_foreach_instr_in_block_safe(block, ins) {
933
if (ins->dest != spill_node) continue;
935
/* Note: it's important to match the mask of the spill
936
* with the mask of the instruction whose destination
937
* we're spilling, or otherwise we'll read invalid
938
* components and can fail RA in a subsequent iteration
942
midgard_instruction st = v_mov(spill_node, spill_slot);
943
st.no_spill |= (1 << spill_class);
945
st.dest_type = st.src_types[1] = ins->dest_type;
947
/* Hint: don't rewrite this node */
950
mir_insert_instruction_after_scheduled(ctx, block, ins, st);
952
unsigned bundle = ins->bundle_id;
953
unsigned dest = (bundle == last_id)? last_spill_index : spill_index++;
955
unsigned bytemask = mir_bytemask(ins);
956
unsigned write_mask = mir_from_bytemask(mir_round_bytemask_up(
959
if (write_count > 1 && bytemask != 0xFFFF && bundle != last_fill) {
960
midgard_instruction read =
961
v_load_store_scratch(dest, spill_slot, false, 0xF);
962
mir_insert_instruction_before_scheduled(ctx, block, ins, read);
968
ins->no_spill |= (1 << spill_class);
972
/* In the same bundle, reads of the destination
973
* of the spilt instruction need to be direct */
974
midgard_instruction *it = ins;
975
while ((it = list_first_entry(&it->link, midgard_instruction, link))
976
&& (it->bundle_id == bundle)) {
978
if (!mir_has_arg(it, spill_node)) continue;
980
mir_rewrite_index_src_single(it, spill_node, dest);
982
/* The spilt instruction will write to
983
* a work register for `it` to read but
984
* the spill needs an LD/ST register */
989
dest = spill_index++;
991
if (last_id == bundle) {
992
last_spill->mask |= write_mask;
993
u_foreach_bit(c, write_mask)
994
last_spill->swizzle[0][c] = c;
996
midgard_instruction st =
997
v_load_store_scratch(dest, spill_slot, true, write_mask);
998
last_spill = mir_insert_instruction_after_scheduled(ctx, block, ins, st);
1002
midgard_instruction mv = v_mov(ins->dest, dest);
1003
mv.no_spill |= (1 << spill_class);
1005
mir_insert_instruction_after_scheduled(ctx, block, ins, mv);
1009
last_spill_index = ins->dest;
1018
/* Insert a load from TLS before the first consecutive
1019
* use of the node, rewriting to use spilled indices to
1020
* break up the live range. Or, for special, insert a
1021
* move. Ironically the latter *increases* register
1022
* pressure, but the two uses of the spilling mechanism
1023
* are somewhat orthogonal. (special spilling is to use
1024
* work registers to back special registers; TLS
1025
* spilling is to use memory to back work registers) */
1027
mir_foreach_block(ctx, _block) {
1028
midgard_block *block = (midgard_block *) _block;
1029
mir_foreach_instr_in_block(block, ins) {
1030
/* We can't rewrite the moves used to spill in the
1031
* first place. These moves are hinted. */
1032
if (ins->hint) continue;
1034
/* If we don't use the spilled value, nothing to do */
1035
if (!mir_has_arg(ins, spill_node)) continue;
1039
if (!is_special_w) {
1040
index = ++spill_index;
1042
midgard_instruction *before = ins;
1043
midgard_instruction st;
1047
st = v_mov(spill_node, index);
1048
st.no_spill |= (1 << spill_class);
1051
st = v_load_store_scratch(index, spill_slot, false, 0xF);
1054
/* Mask the load based on the component count
1055
* actually needed to prevent RA loops */
1057
st.mask = mir_from_bytemask(mir_round_bytemask_up(
1058
read_bytemask, 32), 32);
1060
mir_insert_instruction_before_scheduled(ctx, block, before, st);
1062
/* Special writes already have their move spilled in */
1067
/* Rewrite to use */
1068
mir_rewrite_index_src_single(ins, spill_node, index);
1077
mir_foreach_instr_global(ctx, ins) {
1083
mir_demote_uniforms(compiler_context *ctx, unsigned new_cutoff)
1085
unsigned uniforms = ctx->info->push.count / 4;
1086
unsigned old_work_count = 16 - MAX2(uniforms - 8, 0);
1087
unsigned work_count = 16 - MAX2((new_cutoff - 8), 0);
1089
unsigned min_demote = SSA_FIXED_REGISTER(old_work_count);
1090
unsigned max_demote = SSA_FIXED_REGISTER(work_count);
1092
mir_foreach_block(ctx, _block) {
1093
midgard_block *block = (midgard_block *) _block;
1094
mir_foreach_instr_in_block(block, ins) {
1095
mir_foreach_src(ins, i) {
1096
if (ins->src[i] < min_demote || ins->src[i] >= max_demote)
1099
midgard_instruction *before = ins;
1101
unsigned temp = make_compiler_temp(ctx);
1102
unsigned idx = (23 - SSA_REG_FROM_FIXED(ins->src[i])) * 4;
1103
assert(idx < ctx->info->push.count);
1105
ctx->ubo_mask |= BITSET_BIT(ctx->info->push.words[idx].ubo);
1107
midgard_instruction ld = {
1108
.type = TAG_LOAD_STORE_4,
1111
.dest_type = ins->src_types[i],
1112
.src = { ~0, ~0, ~0, ~0 },
1113
.swizzle = SWIZZLE_IDENTITY_4,
1114
.op = midgard_op_ld_ubo_128,
1116
.index_reg = REGISTER_LDST_ZERO,
1118
.constants.u32[0] = ctx->info->push.words[idx].offset
1121
midgard_pack_ubo_index_imm(&ld.load_store,
1122
ctx->info->push.words[idx].ubo);
1124
mir_insert_instruction_before_scheduled(ctx, block, before, ld);
1126
mir_rewrite_index_src_single(ins, ins->src[i], temp);
1131
ctx->info->push.count = MIN2(ctx->info->push.count, new_cutoff * 4);
1134
/* Run register allocation in a loop, spilling until we succeed */
1137
mir_ra(compiler_context *ctx)
1139
struct lcra_state *l = NULL;
1140
bool spilled = false;
1141
int iter_count = 1000; /* max iterations */
1143
/* Number of 128-bit slots in memory we've spilled into */
1144
unsigned spill_count = DIV_ROUND_UP(ctx->info->tls_size, 16);
1147
mir_create_pipeline_registers(ctx);
1151
signed spill_node = mir_choose_spill_node(ctx, l);
1152
unsigned uniforms = ctx->info->push.count / 4;
1154
/* It's a lot cheaper to demote uniforms to get more
1155
* work registers than to spill to TLS. */
1156
if (l->spill_class == REG_CLASS_WORK && uniforms > 8) {
1157
mir_demote_uniforms(ctx, MAX2(uniforms - 4, 8));
1158
} else if (spill_node == -1) {
1159
fprintf(stderr, "ERROR: Failed to choose spill node\n");
1163
mir_spill_register(ctx, spill_node, l->spill_class, &spill_count);
1167
mir_squeeze_index(ctx);
1168
mir_invalidate_liveness(ctx);
1175
l = allocate_registers(ctx, &spilled);
1176
} while(spilled && ((iter_count--) > 0));
1178
if (iter_count <= 0) {
1179
fprintf(stderr, "panfrost: Gave up allocating registers, rendering will be incomplete\n");
1183
/* Report spilling information. spill_count is in 128-bit slots (vec4 x
1184
* fp32), but tls_size is in bytes, so multiply by 16 */
1186
ctx->info->tls_size = spill_count * 16;
1188
install_registers(ctx, l);