2
* Copyright (C) 2014 Rob Clark <robclark@freedesktop.org>
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
* and/or sell copies of the Software, and to permit persons to whom the
9
* Software is furnished to do so, subject to the following conditions:
11
* The above copyright notice and this permission notice (including the next
12
* paragraph) shall be included in all copies or substantial portions of the
15
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24
* Rob Clark <robclark@freedesktop.org>
27
#include "util/ralloc.h"
28
#include "util/u_math.h"
31
#include "ir3_shader.h"
36
* The legalize pass handles ensuring sufficient nop's and sync flags for
39
* 1) Iteratively determine where sync ((sy)/(ss)) flags are needed,
40
* based on state flowing out of predecessor blocks until there is
41
* no further change. In some cases this requires inserting nops.
42
* 2) Mark (ei) on last varying input, and (ul) on last use of a0.x
43
* 3) Final nop scheduling for instruction latency
44
* 4) Resolve jumps and schedule blocks, marking potential convergence
48
struct ir3_legalize_ctx {
49
struct ir3_compiler *compiler;
50
struct ir3_shader_variant *so;
53
bool early_input_release;
57
struct ir3_legalize_state {
59
regmask_t needs_ss_war; /* write after read */
63
struct ir3_legalize_block_data {
65
struct ir3_legalize_state state;
68
/* We want to evaluate each block from the position of any other
69
* predecessor block, in order that the flags set are the union of
70
* all possible program paths.
72
* To do this, we need to know the output state (needs_ss/ss_war/sy)
73
* of all predecessor blocks. The tricky thing is loops, which mean
74
* that we can't simply recursively process each predecessor block
75
* before legalizing the current block.
77
* How we handle that is by looping over all the blocks until the
78
* results converge. If the output state of a given block changes
79
* in a given pass, this means that all successor blocks are not
80
* yet fully legalized.
84
legalize_block(struct ir3_legalize_ctx *ctx, struct ir3_block *block)
86
struct ir3_legalize_block_data *bd = block->data;
91
struct ir3_instruction *last_rel = NULL;
92
struct ir3_instruction *last_n = NULL;
93
struct list_head instr_list;
94
struct ir3_legalize_state prev_state = bd->state;
95
struct ir3_legalize_state *state = &bd->state;
96
bool last_input_needs_ss = false;
97
bool has_tex_prefetch = false;
98
bool mergedregs = ctx->so->mergedregs;
100
/* our input state is the OR of all predecessor blocks' state: */
101
for (unsigned i = 0; i < block->predecessors_count; i++) {
102
struct ir3_block *predecessor = block->predecessors[i];
103
struct ir3_legalize_block_data *pbd = predecessor->data;
104
struct ir3_legalize_state *pstate = &pbd->state;
106
/* Our input (ss)/(sy) state is based on OR'ing the output
107
* state of all our predecessor blocks
109
regmask_or(&state->needs_ss, &state->needs_ss, &pstate->needs_ss);
110
regmask_or(&state->needs_ss_war, &state->needs_ss_war,
111
&pstate->needs_ss_war);
112
regmask_or(&state->needs_sy, &state->needs_sy, &pstate->needs_sy);
115
/* We need to take phsyical-only edges into account when tracking shared
118
for (unsigned i = 0; i < block->physical_predecessors_count; i++) {
119
struct ir3_block *predecessor = block->physical_predecessors[i];
120
struct ir3_legalize_block_data *pbd = predecessor->data;
121
struct ir3_legalize_state *pstate = &pbd->state;
123
regmask_or_shared(&state->needs_ss, &state->needs_ss, &pstate->needs_ss);
126
unsigned input_count = 0;
128
foreach_instr (n, &block->instr_list) {
134
unsigned inputs_remaining = input_count;
136
/* Either inputs are in the first block or we expect inputs to be released
137
* with the end of the program.
139
assert(input_count == 0 || !ctx->early_input_release ||
140
block == ir3_after_preamble(block->shader));
142
/* remove all the instructions from the list, we'll be adding
143
* them back in as we go
145
list_replace(&block->instr_list, &instr_list);
146
list_inithead(&block->instr_list);
148
foreach_instr_safe (n, &instr_list) {
151
n->flags &= ~(IR3_INSTR_SS | IR3_INSTR_SY);
153
/* _meta::tex_prefetch instructions removed later in
154
* collect_tex_prefetches()
156
if (is_meta(n) && (n->opc != OPC_META_TEX_PREFETCH))
160
struct ir3_register *inloc = n->srcs[0];
161
assert(inloc->flags & IR3_REG_IMMED);
162
ctx->max_bary = MAX2(ctx->max_bary, inloc->iim_val);
165
if ((last_n && is_barrier(last_n)) || n->opc == OPC_SHPE) {
166
n->flags |= IR3_INSTR_SS | IR3_INSTR_SY;
167
last_input_needs_ss = false;
168
regmask_init(&state->needs_ss_war, mergedregs);
169
regmask_init(&state->needs_ss, mergedregs);
170
regmask_init(&state->needs_sy, mergedregs);
173
if (last_n && (last_n->opc == OPC_PREDT)) {
174
n->flags |= IR3_INSTR_SS;
175
regmask_init(&state->needs_ss_war, mergedregs);
176
regmask_init(&state->needs_ss, mergedregs);
179
/* NOTE: consider dst register too.. it could happen that
180
* texture sample instruction (for example) writes some
181
* components which are unused. A subsequent instruction
182
* that writes the same register can race w/ the sam instr
183
* resulting in undefined results:
185
for (i = 0; i < n->dsts_count + n->srcs_count; i++) {
186
struct ir3_register *reg;
187
if (i < n->dsts_count)
190
reg = n->srcs[i - n->dsts_count];
194
/* TODO: we probably only need (ss) for alu
195
* instr consuming sfu result.. need to make
196
* some tests for both this and (sy)..
198
if (regmask_get(&state->needs_ss, reg)) {
199
n->flags |= IR3_INSTR_SS;
200
last_input_needs_ss = false;
201
regmask_init(&state->needs_ss_war, mergedregs);
202
regmask_init(&state->needs_ss, mergedregs);
205
if (regmask_get(&state->needs_sy, reg)) {
206
n->flags |= IR3_INSTR_SY;
207
regmask_init(&state->needs_sy, mergedregs);
211
/* TODO: is it valid to have address reg loaded from a
212
* relative src (ie. mova a0, c<a0.x+4>)? If so, the
213
* last_rel check below should be moved ahead of this:
215
if (reg->flags & IR3_REG_RELATIV)
219
foreach_dst (reg, n) {
220
if (regmask_get(&state->needs_ss_war, reg)) {
221
n->flags |= IR3_INSTR_SS;
222
last_input_needs_ss = false;
223
regmask_init(&state->needs_ss_war, mergedregs);
224
regmask_init(&state->needs_ss, mergedregs);
227
if (last_rel && (reg->num == regid(REG_A0, 0))) {
228
last_rel->flags |= IR3_INSTR_UL;
233
/* cat5+ does not have an (ss) bit, if needed we need to
234
* insert a nop to carry the sync flag. Would be kinda
235
* clever if we were aware of this during scheduling, but
236
* this should be a pretty rare case:
238
if ((n->flags & IR3_INSTR_SS) && (opc_cat(n->opc) >= 5)) {
239
struct ir3_instruction *nop;
240
nop = ir3_NOP(block);
241
nop->flags |= IR3_INSTR_SS;
242
n->flags &= ~IR3_INSTR_SS;
245
/* need to be able to set (ss) on first instruction: */
246
if (list_is_empty(&block->instr_list) && (opc_cat(n->opc) >= 5))
249
if (ctx->compiler->samgq_workaround &&
250
ctx->type != MESA_SHADER_FRAGMENT &&
251
ctx->type != MESA_SHADER_COMPUTE && n->opc == OPC_SAMGQ) {
252
struct ir3_instruction *samgp;
254
list_delinit(&n->node);
256
for (i = 0; i < 4; i++) {
257
samgp = ir3_instr_clone(n);
258
samgp->opc = OPC_SAMGP0 + i;
260
samgp->flags |= IR3_INSTR_SY;
263
list_delinit(&n->node);
264
list_addtail(&n->node, &block->instr_list);
268
regmask_set(&state->needs_ss, n->dsts[0]);
270
foreach_dst (dst, n) {
271
if (dst->flags & IR3_REG_SHARED)
272
regmask_set(&state->needs_ss, dst);
275
if (is_tex_or_prefetch(n)) {
276
regmask_set(&state->needs_sy, n->dsts[0]);
277
if (n->opc == OPC_META_TEX_PREFETCH)
278
has_tex_prefetch = true;
279
} else if (n->opc == OPC_RESINFO) {
280
regmask_set(&state->needs_ss, n->dsts[0]);
281
ir3_NOP(block)->flags |= IR3_INSTR_SS;
282
last_input_needs_ss = false;
283
} else if (is_load(n)) {
284
if (is_local_mem_load(n))
285
regmask_set(&state->needs_ss, n->dsts[0]);
287
regmask_set(&state->needs_sy, n->dsts[0]);
288
} else if (is_atomic(n->opc)) {
289
if (is_bindless_atomic(n->opc)) {
290
regmask_set(&state->needs_sy, n->srcs[2]);
291
} else if (is_global_a3xx_atomic(n->opc) ||
292
is_global_a6xx_atomic(n->opc)) {
293
regmask_set(&state->needs_sy, n->dsts[0]);
295
regmask_set(&state->needs_ss, n->dsts[0]);
299
if (is_ssbo(n->opc) || is_global_a3xx_atomic(n->opc) ||
300
is_bindless_atomic(n->opc))
301
ctx->so->has_ssbo = true;
303
/* both tex/sfu appear to not always immediately consume
304
* their src register(s):
306
if (is_tex(n) || is_sfu(n) || is_mem(n)) {
307
foreach_src (reg, n) {
308
regmask_set(&state->needs_ss_war, reg);
312
if (ctx->early_input_release && is_input(n)) {
313
last_input_needs_ss |= (n->opc == OPC_LDLV);
315
assert(inputs_remaining > 0);
317
if (inputs_remaining == 0) {
318
/* This is the last input. We add the (ei) flag to release
319
* varying memory after this executes. If it's an ldlv,
320
* however, we need to insert a dummy bary.f on which we can
321
* set the (ei) flag. We may also need to insert an (ss) to
322
* guarantee that all ldlv's have finished fetching their
323
* results before releasing the varying memory.
325
struct ir3_instruction *last_input = n;
326
if (n->opc == OPC_LDLV) {
327
struct ir3_instruction *baryf;
329
/* (ss)bary.f (ei)r63.x, 0, r0.x */
330
baryf = ir3_instr_create(block, OPC_BARY_F, 1, 2);
331
ir3_dst_create(baryf, regid(63, 0), 0);
332
ir3_src_create(baryf, 0, IR3_REG_IMMED)->iim_val = 0;
333
ir3_src_create(baryf, regid(0, 0), 0);
338
last_input->dsts[0]->flags |= IR3_REG_EI;
339
if (last_input_needs_ss) {
340
last_input->flags |= IR3_INSTR_SS;
341
regmask_init(&state->needs_ss_war, mergedregs);
342
regmask_init(&state->needs_ss, mergedregs);
350
assert(inputs_remaining == 0 || !ctx->early_input_release);
352
if (has_tex_prefetch && !ctx->has_inputs) {
353
/* texture prefetch, but *no* inputs.. we need to insert a
354
* dummy bary.f at the top of the shader to unblock varying
357
struct ir3_instruction *baryf;
359
/* (ss)bary.f (ei)r63.x, 0, r0.x */
360
baryf = ir3_instr_create(block, OPC_BARY_F, 1, 2);
361
ir3_dst_create(baryf, regid(63, 0), 0)->flags |= IR3_REG_EI;
362
ir3_src_create(baryf, 0, IR3_REG_IMMED)->iim_val = 0;
363
ir3_src_create(baryf, regid(0, 0), 0);
365
/* insert the dummy bary.f at head: */
366
list_delinit(&baryf->node);
367
list_add(&baryf->node, &block->instr_list);
371
last_rel->flags |= IR3_INSTR_UL;
375
if (memcmp(&prev_state, state, sizeof(*state))) {
376
/* our output state changed, this invalidates all of our
379
for (unsigned i = 0; i < ARRAY_SIZE(block->successors); i++) {
380
if (!block->successors[i])
382
struct ir3_legalize_block_data *pbd = block->successors[i]->data;
390
/* Expands dsxpp and dsypp macros to:
395
* We apply this after flags syncing, as we don't want to sync in between the
396
* two (which might happen if dst == src). We do it before nop scheduling
397
* because that needs to count actual instructions.
400
apply_fine_deriv_macro(struct ir3_legalize_ctx *ctx, struct ir3_block *block)
402
struct list_head instr_list;
404
/* remove all the instructions from the list, we'll be adding
405
* them back in as we go
407
list_replace(&block->instr_list, &instr_list);
408
list_inithead(&block->instr_list);
410
foreach_instr_safe (n, &instr_list) {
411
list_addtail(&n->node, &block->instr_list);
413
if (n->opc == OPC_DSXPP_MACRO || n->opc == OPC_DSYPP_MACRO) {
414
n->opc = (n->opc == OPC_DSXPP_MACRO) ? OPC_DSXPP_1 : OPC_DSYPP_1;
416
struct ir3_instruction *op_p = ir3_instr_clone(n);
417
op_p->flags = IR3_INSTR_P;
419
ctx->so->need_fine_derivatives = true;
426
/* NOTE: branch instructions are always the last instruction(s)
427
* in the block. We take advantage of this as we resolve the
428
* branches, since "if (foo) break;" constructs turn into
433
* 0029:021: mov.s32s32 r62.x, r1.y
434
* 0082:022: br !p0.x, target=block5
435
* 0083:023: br p0.x, target=block4
436
* // succs: if _[0029:021: mov.s32s32] block4; else block5;
439
* 0084:024: jump, target=block6
443
* 0085:025: jump, target=block7
447
* ie. only instruction in block4/block5 is a jump, so when
448
* resolving branches we can easily detect this by checking
449
* that the first instruction in the target block is itself
450
* a jump, and setup the br directly to the jump's target
451
* (and strip back out the now unreached jump)
453
* TODO sometimes we end up with things like:
457
* add.u r0.y, r0.y, 1
459
* If we swapped the order of the branches, we could drop one.
461
static struct ir3_block *
462
resolve_dest_block(struct ir3_block *block)
464
/* special case for last block: */
465
if (!block->successors[0])
468
/* NOTE that we may or may not have inserted the jump
469
* in the target block yet, so conditions to resolve
470
* the dest to the dest block's successor are:
472
* (1) successor[1] == NULL &&
473
* (2) (block-is-empty || only-instr-is-jump)
475
if (block->successors[1] == NULL) {
476
if (list_is_empty(&block->instr_list)) {
477
return block->successors[0];
478
} else if (list_length(&block->instr_list) == 1) {
479
struct ir3_instruction *instr =
480
list_first_entry(&block->instr_list, struct ir3_instruction, node);
481
if (instr->opc == OPC_JUMP) {
482
/* If this jump is backwards, then we will probably convert
483
* the jump being resolved to a backwards jump, which will
484
* change a loop-with-continue or loop-with-if into a
485
* doubly-nested loop and change the convergence behavior.
486
* Disallow this here.
488
if (block->successors[0]->index <= block->index)
490
return block->successors[0];
498
remove_unused_block(struct ir3_block *old_target)
500
list_delinit(&old_target->node);
502
/* If there are any physical predecessors due to fallthroughs, then they may
503
* fall through to any of the physical successors of this block. But we can
504
* only fit two, so just pick the "earliest" one, i.e. the fallthrough if
507
* TODO: we really ought to have unlimited numbers of physical successors,
508
* both because of this and because we currently don't model some scenarios
509
* with nested break/continue correctly.
511
struct ir3_block *new_target;
512
if (old_target->physical_successors[1] &&
513
old_target->physical_successors[1]->start_ip <
514
old_target->physical_successors[0]->start_ip) {
515
new_target = old_target->physical_successors[1];
517
new_target = old_target->physical_successors[0];
520
for (unsigned i = 0; i < old_target->physical_predecessors_count; i++) {
521
struct ir3_block *pred = old_target->physical_predecessors[i];
522
if (pred->physical_successors[0] == old_target) {
524
/* If we remove a physical successor, make sure the only physical
525
* successor is the first one.
527
pred->physical_successors[0] = pred->physical_successors[1];
528
pred->physical_successors[1] = NULL;
530
pred->physical_successors[0] = new_target;
533
assert(pred->physical_successors[1] == old_target);
534
pred->physical_successors[1] = new_target;
537
ir3_block_add_physical_predecessor(new_target, pred);
540
/* cleanup dangling predecessors: */
541
for (unsigned i = 0; i < ARRAY_SIZE(old_target->successors); i++) {
542
if (old_target->successors[i]) {
543
struct ir3_block *succ = old_target->successors[i];
544
ir3_block_remove_predecessor(succ, old_target);
548
for (unsigned i = 0; i < ARRAY_SIZE(old_target->physical_successors); i++) {
549
if (old_target->physical_successors[i]) {
550
struct ir3_block *succ = old_target->physical_successors[i];
551
ir3_block_remove_physical_predecessor(succ, old_target);
557
retarget_jump(struct ir3_instruction *instr, struct ir3_block *new_target)
559
struct ir3_block *old_target = instr->cat0.target;
560
struct ir3_block *cur_block = instr->block;
562
/* update current blocks successors to reflect the retargetting: */
563
if (cur_block->successors[0] == old_target) {
564
cur_block->successors[0] = new_target;
566
debug_assert(cur_block->successors[1] == old_target);
567
cur_block->successors[1] = new_target;
570
/* also update physical_successors: */
571
if (cur_block->physical_successors[0] == old_target) {
572
cur_block->physical_successors[0] = new_target;
574
debug_assert(cur_block->physical_successors[1] == old_target);
575
cur_block->physical_successors[1] = new_target;
578
/* update new target's predecessors: */
579
ir3_block_add_predecessor(new_target, cur_block);
580
ir3_block_add_physical_predecessor(new_target, cur_block);
582
/* and remove old_target's predecessor: */
583
ir3_block_remove_predecessor(old_target, cur_block);
584
ir3_block_remove_physical_predecessor(old_target, cur_block);
586
instr->cat0.target = new_target;
588
if (old_target->predecessors_count == 0) {
589
remove_unused_block(old_target);
597
opt_jump(struct ir3 *ir)
599
bool progress = false;
602
foreach_block (block, &ir->block_list)
603
block->index = index++;
605
foreach_block (block, &ir->block_list) {
606
foreach_instr (instr, &block->instr_list) {
607
if (!is_flow(instr) || !instr->cat0.target)
610
struct ir3_block *tblock = resolve_dest_block(instr->cat0.target);
611
if (tblock != instr->cat0.target) {
614
/* Exit early if we deleted a block to avoid iterator
615
* weirdness/assert fails
617
if (retarget_jump(instr, tblock))
622
/* Detect the case where the block ends either with:
623
* - A single unconditional jump to the next block.
624
* - Two jump instructions with opposite conditions, and one of the
625
* them jumps to the next block.
626
* We can remove the one that jumps to the next block in either case.
628
if (list_is_empty(&block->instr_list))
631
struct ir3_instruction *jumps[2] = {NULL, NULL};
633
list_last_entry(&block->instr_list, struct ir3_instruction, node);
634
if (!list_is_singular(&block->instr_list))
636
list_last_entry(&jumps[0]->node, struct ir3_instruction, node);
638
if (jumps[0]->opc == OPC_JUMP)
640
else if (jumps[0]->opc != OPC_B || !jumps[1] || jumps[1]->opc != OPC_B)
643
for (unsigned i = 0; i < 2; i++) {
647
struct ir3_block *tblock = jumps[i]->cat0.target;
648
if (&tblock->node == block->node.next) {
649
list_delinit(&jumps[i]->node);
660
resolve_jumps(struct ir3 *ir)
662
foreach_block (block, &ir->block_list)
663
foreach_instr (instr, &block->instr_list)
664
if (is_flow(instr) && instr->cat0.target) {
665
struct ir3_instruction *target = list_first_entry(
666
&instr->cat0.target->instr_list, struct ir3_instruction, node);
668
instr->cat0.immed = (int)target->ip - (int)instr->ip;
673
mark_jp(struct ir3_block *block)
675
/* We only call this on the end block (in kill_sched) or after retargeting
676
* all jumps to empty blocks (in mark_xvergence_points) so there's no need to
677
* worry about empty blocks.
679
assert(!list_is_empty(&block->instr_list));
681
struct ir3_instruction *target =
682
list_first_entry(&block->instr_list, struct ir3_instruction, node);
683
target->flags |= IR3_INSTR_JP;
686
/* Mark points where control flow converges or diverges.
688
* Divergence points could actually be re-convergence points where
689
* "parked" threads are recoverged with threads that took the opposite
690
* path last time around. Possibly it is easier to think of (jp) as
691
* "the execution mask might have changed".
694
mark_xvergence_points(struct ir3 *ir)
696
foreach_block (block, &ir->block_list) {
697
/* We need to insert (jp) if an entry in the "branch stack" is created for
698
* our block. This happens if there is a predecessor to our block that may
699
* fallthrough to an earlier block in the physical CFG, either because it
700
* ends in a non-uniform conditional branch or because there's a
701
* fallthrough for an block in-between that also starts with (jp) and was
702
* pushed on the branch stack already.
704
for (unsigned i = 0; i < block->predecessors_count; i++) {
705
struct ir3_block *pred = block->predecessors[i];
707
for (unsigned j = 0; j < ARRAY_SIZE(pred->physical_successors); j++) {
708
if (pred->physical_successors[j] != NULL &&
709
pred->physical_successors[j]->start_ip < block->start_ip)
712
/* If the predecessor just falls through to this block, we still
713
* need to check if it "falls through" by jumping to the block. This
714
* can happen if opt_jump fails and the block ends in two branches,
715
* or if there's an empty if-statement (which currently can happen
716
* with binning shaders after dead-code elimination) and the block
717
* before ends with a conditional branch directly to this block.
719
if (pred->physical_successors[j] == block) {
720
foreach_instr_rev (instr, &pred->instr_list) {
723
if (instr->cat0.target == block) {
734
/* Insert the branch/jump instructions for flow control between blocks.
735
* Initially this is done naively, without considering if the successor
736
* block immediately follows the current block (ie. so no jump required),
737
* but that is cleaned up in opt_jump().
739
* TODO what ensures that the last write to p0.x in a block is the
740
* branch condition? Have we been getting lucky all this time?
743
block_sched(struct ir3 *ir)
745
foreach_block (block, &ir->block_list) {
746
if (block->successors[1]) {
747
/* if/else, conditional branches to "then" or "else": */
748
struct ir3_instruction *br1, *br2;
750
if (block->brtype == IR3_BRANCH_GETONE ||
751
block->brtype == IR3_BRANCH_SHPS) {
752
/* getone/shps can't be inverted, and it wouldn't even make sense
753
* to follow it with an inverted branch, so follow it by an
754
* unconditional branch.
756
debug_assert(!block->condition);
757
if (block->brtype == IR3_BRANCH_GETONE)
758
br1 = ir3_GETONE(block);
760
br1 = ir3_SHPS(block);
761
br1->cat0.target = block->successors[1];
763
br2 = ir3_JUMP(block);
764
br2->cat0.target = block->successors[0];
766
debug_assert(block->condition);
768
/* create "else" branch first (since "then" block should
769
* frequently/always end up being a fall-thru):
771
br1 = ir3_instr_create(block, OPC_B, 0, 1);
772
ir3_src_create(br1, regid(REG_P0, 0), 0)->def =
773
block->condition->dsts[0];
774
br1->cat0.inv1 = true;
775
br1->cat0.target = block->successors[1];
778
br2 = ir3_instr_create(block, OPC_B, 0, 1);
779
ir3_src_create(br2, regid(REG_P0, 0), 0)->def =
780
block->condition->dsts[0];
781
br2->cat0.target = block->successors[0];
783
switch (block->brtype) {
784
case IR3_BRANCH_COND:
785
br1->cat0.brtype = br2->cat0.brtype = BRANCH_PLAIN;
788
br1->cat0.brtype = BRANCH_ANY;
789
br2->cat0.brtype = BRANCH_ALL;
792
br1->cat0.brtype = BRANCH_ALL;
793
br2->cat0.brtype = BRANCH_ANY;
795
case IR3_BRANCH_GETONE:
796
case IR3_BRANCH_SHPS:
797
unreachable("can't get here");
800
} else if (block->successors[0]) {
801
/* otherwise unconditional jump to next block: */
802
struct ir3_instruction *jmp;
804
jmp = ir3_JUMP(block);
805
jmp->cat0.target = block->successors[0];
810
/* Here we workaround the fact that kill doesn't actually kill the thread as
811
* GL expects. The last instruction always needs to be an end instruction,
812
* which means that if we're stuck in a loop where kill is the only way out,
813
* then we may have to jump out to the end. kill may also have the d3d
814
* semantics of converting the thread to a helper thread, rather than setting
815
* the exec mask to 0, in which case the helper thread could get stuck in an
818
* We do this late, both to give the scheduler the opportunity to reschedule
819
* kill instructions earlier and to avoid having to create a separate basic
822
* TODO: Assuming that the wavefront doesn't stop as soon as all threads are
823
* killed, we might benefit by doing this more aggressively when the remaining
824
* part of the program after the kill is large, since that would let us
825
* skip over the instructions when there are no non-killed threads left.
828
kill_sched(struct ir3 *ir, struct ir3_shader_variant *so)
830
/* True if we know that this block will always eventually lead to the end
833
bool always_ends = true;
835
struct ir3_block *last_block =
836
list_last_entry(&ir->block_list, struct ir3_block, node);
838
foreach_block_rev (block, &ir->block_list) {
839
for (unsigned i = 0; i < 2 && block->successors[i]; i++) {
840
if (block->successors[i]->start_ip <= block->end_ip)
847
foreach_instr_safe (instr, &block->instr_list) {
848
if (instr->opc != OPC_KILL)
851
struct ir3_instruction *br = ir3_instr_create(block, OPC_B, 0, 1);
852
ir3_src_create(br, instr->srcs[0]->num, instr->srcs[0]->flags)->wrmask =
855
list_last_entry(&ir->block_list, struct ir3_block, node);
858
list_add(&br->node, &instr->node);
865
/* I'm not entirely sure how the branchstack works, but we probably
866
* need to add at least one entry for the divergence which is resolved
871
/* We don't update predecessors/successors, so we have to do this
878
/* Insert nop's required to make this a legal/valid shader program: */
880
nop_sched(struct ir3 *ir, struct ir3_shader_variant *so)
882
foreach_block (block, &ir->block_list) {
883
struct ir3_instruction *last = NULL;
884
struct list_head instr_list;
886
/* remove all the instructions from the list, we'll be adding
887
* them back in as we go
889
list_replace(&block->instr_list, &instr_list);
890
list_inithead(&block->instr_list);
892
foreach_instr_safe (instr, &instr_list) {
893
unsigned delay = ir3_delay_calc(block, instr, so->mergedregs);
895
/* NOTE: I think the nopN encoding works for a5xx and
896
* probably a4xx, but not a3xx. So far only tested on
900
if ((delay > 0) && (ir->compiler->gen >= 6) && last &&
901
((opc_cat(last->opc) == 2) || (opc_cat(last->opc) == 3)) &&
902
(last->repeat == 0)) {
903
/* the previous cat2/cat3 instruction can encode at most 3 nop's: */
904
unsigned transfer = MIN2(delay, 3 - last->nop);
905
last->nop += transfer;
909
if ((delay > 0) && last && (last->opc == OPC_NOP)) {
910
/* the previous nop can encode at most 5 repeats: */
911
unsigned transfer = MIN2(delay, 5 - last->repeat);
912
last->repeat += transfer;
917
debug_assert(delay <= 6);
918
ir3_NOP(block)->repeat = delay - 1;
921
list_addtail(&instr->node, &block->instr_list);
928
ir3_legalize(struct ir3 *ir, struct ir3_shader_variant *so, int *max_bary)
930
struct ir3_legalize_ctx *ctx = rzalloc(ir, struct ir3_legalize_ctx);
931
bool mergedregs = so->mergedregs;
936
ctx->compiler = ir->compiler;
937
ctx->type = ir->type;
939
/* allocate per-block data: */
940
foreach_block (block, &ir->block_list) {
941
struct ir3_legalize_block_data *bd =
942
rzalloc(ctx, struct ir3_legalize_block_data);
944
regmask_init(&bd->state.needs_ss_war, mergedregs);
945
regmask_init(&bd->state.needs_ss, mergedregs);
946
regmask_init(&bd->state.needs_sy, mergedregs);
951
/* We may have failed to pull all input loads into the first block.
952
* In such case at the moment we aren't able to find a better place
953
* to for (ei) than the end of the program.
954
* a5xx and a6xx do automatically release varying storage at the end.
956
ctx->early_input_release = true;
957
struct ir3_block *start_block = ir3_after_preamble(ir);
958
foreach_block (block, &ir->block_list) {
959
foreach_instr (instr, &block->instr_list) {
960
if (is_input(instr)) {
961
ctx->has_inputs = true;
962
if (block != start_block) {
963
ctx->early_input_release = false;
970
assert(ctx->early_input_release || ctx->compiler->gen >= 5);
972
/* process each block: */
975
foreach_block (block, &ir->block_list) {
976
progress |= legalize_block(ctx, block);
980
*max_bary = ctx->max_bary;
983
if (so->type == MESA_SHADER_FRAGMENT)
986
foreach_block (block, &ir->block_list) {
987
progress |= apply_fine_deriv_macro(ctx, block);
995
ir3_count_instructions(ir);
998
mark_xvergence_points(ir);