2
* Copyright © 2012 Intel Corporation
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
* and/or sell copies of the Software, and to permit persons to whom the
9
* Software is furnished to do so, subject to the following conditions:
11
* The above copyright notice and this permission notice (including the next
12
* paragraph) shall be included in all copies or substantial portions of the
15
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24
/** @file brw_fs_copy_propagation.cpp
26
* Support for global copy propagation in two passes: A local pass that does
27
* intra-block copy (and constant) propagation, and a global pass that uses
28
* dataflow analysis on the copies available at the end of each block to re-do
29
* local copy propagation with more copies available.
31
* See Muchnick's Advanced Compiler Design and Implementation, section
35
#define ACP_HASH_SIZE 64
37
#include "util/bitset.h"
38
#include "util/u_math.h"
40
#include "brw_fs_live_variables.h"
46
namespace { /* avoid conflict with opt_copy_propagation_elements */
47
struct acp_entry : public exec_node {
51
unsigned size_written;
55
bool is_partial_write;
60
* Which entries in the fs_copy_prop_dataflow acp table are live at the
61
* start of this block. This is the useful output of the analysis, since
62
* it lets us plug those into the local copy propagation on the second
68
* Which entries in the fs_copy_prop_dataflow acp table are live at the end
69
* of this block. This is done in initial setup from the per-block acps
70
* returned by the first local copy prop pass.
75
* Which entries in the fs_copy_prop_dataflow acp table are generated by
76
* instructions in this block which reach the end of the block without
82
* Which entries in the fs_copy_prop_dataflow acp table are killed over the
83
* course of this block.
88
* Which entries in the fs_copy_prop_dataflow acp table are guaranteed to
89
* have a fully uninitialized destination at the end of this block.
94
class fs_copy_prop_dataflow
97
fs_copy_prop_dataflow(void *mem_ctx, cfg_t *cfg,
98
const fs_live_variables &live,
99
exec_list *out_acp[ACP_HASH_SIZE]);
101
void setup_initial_values();
104
void dump_block_data() const UNUSED;
108
const fs_live_variables &live;
114
struct block_data *bd;
116
} /* anonymous namespace */
118
fs_copy_prop_dataflow::fs_copy_prop_dataflow(void *mem_ctx, cfg_t *cfg,
119
const fs_live_variables &live,
120
exec_list *out_acp[ACP_HASH_SIZE])
121
: mem_ctx(mem_ctx), cfg(cfg), live(live)
123
bd = rzalloc_array(mem_ctx, struct block_data, cfg->num_blocks);
126
foreach_block (block, cfg) {
127
for (int i = 0; i < ACP_HASH_SIZE; i++) {
128
num_acp += out_acp[block->num][i].length();
132
acp = rzalloc_array(mem_ctx, struct acp_entry *, num_acp);
134
bitset_words = BITSET_WORDS(num_acp);
137
foreach_block (block, cfg) {
138
bd[block->num].livein = rzalloc_array(bd, BITSET_WORD, bitset_words);
139
bd[block->num].liveout = rzalloc_array(bd, BITSET_WORD, bitset_words);
140
bd[block->num].copy = rzalloc_array(bd, BITSET_WORD, bitset_words);
141
bd[block->num].kill = rzalloc_array(bd, BITSET_WORD, bitset_words);
142
bd[block->num].undef = rzalloc_array(bd, BITSET_WORD, bitset_words);
144
for (int i = 0; i < ACP_HASH_SIZE; i++) {
145
foreach_in_list(acp_entry, entry, &out_acp[block->num][i]) {
146
acp[next_acp] = entry;
148
entry->global_idx = next_acp;
150
/* opt_copy_propagation_local populates out_acp with copies created
151
* in a block which are still live at the end of the block. This
152
* is exactly what we want in the COPY set.
154
BITSET_SET(bd[block->num].copy, next_acp);
161
assert(next_acp == num_acp);
163
setup_initial_values();
168
* Set up initial values for each of the data flow sets, prior to running
169
* the fixed-point algorithm.
172
fs_copy_prop_dataflow::setup_initial_values()
174
/* Initialize the COPY and KILL sets. */
176
/* Create a temporary table of ACP entries which we'll use for efficient
177
* look-up. Unfortunately, we have to do this in two steps because we
178
* have to match both sources and destinations and an ACP entry can only
179
* be in one list at a time.
181
* We choose to make the table size between num_acp/2 and num_acp/4 to
182
* try and trade off between the time it takes to initialize the table
183
* via exec_list constructors or make_empty() and the cost of
184
* collisions. In practice, it doesn't appear to matter too much what
185
* size we make the table as long as it's roughly the same order of
186
* magnitude as num_acp. We get most of the benefit of the table
187
* approach even if we use a table of size ACP_HASH_SIZE though a
188
* full-sized table is 1-2% faster in practice.
190
unsigned acp_table_size = util_next_power_of_two(num_acp) / 4;
191
acp_table_size = MAX2(acp_table_size, ACP_HASH_SIZE);
192
exec_list *acp_table = new exec_list[acp_table_size];
194
/* First, get all the KILLs for instructions which overwrite ACP
197
for (int i = 0; i < num_acp; i++) {
198
unsigned idx = reg_space(acp[i]->dst) & (acp_table_size - 1);
199
acp_table[idx].push_tail(acp[i]);
202
foreach_block (block, cfg) {
203
foreach_inst_in_block(fs_inst, inst, block) {
204
if (inst->dst.file != VGRF)
207
unsigned idx = reg_space(inst->dst) & (acp_table_size - 1);
208
foreach_in_list(acp_entry, entry, &acp_table[idx]) {
209
if (regions_overlap(inst->dst, inst->size_written,
210
entry->dst, entry->size_written))
211
BITSET_SET(bd[block->num].kill, entry->global_idx);
216
/* Clear the table for the second pass */
217
for (unsigned i = 0; i < acp_table_size; i++)
218
acp_table[i].make_empty();
220
/* Next, get all the KILLs for instructions which overwrite ACP
223
for (int i = 0; i < num_acp; i++) {
224
unsigned idx = reg_space(acp[i]->src) & (acp_table_size - 1);
225
acp_table[idx].push_tail(acp[i]);
228
foreach_block (block, cfg) {
229
foreach_inst_in_block(fs_inst, inst, block) {
230
if (inst->dst.file != VGRF &&
231
inst->dst.file != FIXED_GRF)
234
unsigned idx = reg_space(inst->dst) & (acp_table_size - 1);
235
foreach_in_list(acp_entry, entry, &acp_table[idx]) {
236
if (regions_overlap(inst->dst, inst->size_written,
237
entry->src, entry->size_read))
238
BITSET_SET(bd[block->num].kill, entry->global_idx);
246
/* Populate the initial values for the livein and liveout sets. For the
247
* block at the start of the program, livein = 0 and liveout = copy.
248
* For the others, set liveout and livein to ~0 (the universal set).
250
foreach_block (block, cfg) {
251
if (block->parents.is_empty()) {
252
for (int i = 0; i < bitset_words; i++) {
253
bd[block->num].livein[i] = 0u;
254
bd[block->num].liveout[i] = bd[block->num].copy[i];
257
for (int i = 0; i < bitset_words; i++) {
258
bd[block->num].liveout[i] = ~0u;
259
bd[block->num].livein[i] = ~0u;
264
/* Initialize the undef set. */
265
foreach_block (block, cfg) {
266
for (int i = 0; i < num_acp; i++) {
267
BITSET_SET(bd[block->num].undef, i);
268
for (unsigned off = 0; off < acp[i]->size_written; off += REG_SIZE) {
269
if (BITSET_TEST(live.block_data[block->num].defout,
270
live.var_from_reg(byte_offset(acp[i]->dst, off))))
271
BITSET_CLEAR(bd[block->num].undef, i);
278
* Walk the set of instructions in the block, marking which entries in the acp
279
* are killed by the block.
282
fs_copy_prop_dataflow::run()
289
foreach_block (block, cfg) {
290
if (block->parents.is_empty())
293
for (int i = 0; i < bitset_words; i++) {
294
const BITSET_WORD old_liveout = bd[block->num].liveout[i];
295
BITSET_WORD livein_from_any_block = 0;
297
/* Update livein for this block. If a copy is live out of all
298
* parent blocks, it's live coming in to this block.
300
bd[block->num].livein[i] = ~0u;
301
foreach_list_typed(bblock_link, parent_link, link, &block->parents) {
302
bblock_t *parent = parent_link->block;
303
/* Consider ACP entries with a known-undefined destination to
304
* be available from the parent. This is valid because we're
305
* free to set the undefined variable equal to the source of
306
* the ACP entry without breaking the application's
307
* expectations, since the variable is undefined.
309
bd[block->num].livein[i] &= (bd[parent->num].liveout[i] |
310
bd[parent->num].undef[i]);
311
livein_from_any_block |= bd[parent->num].liveout[i];
314
/* Limit to the set of ACP entries that can possibly be available
315
* at the start of the block, since propagating from a variable
316
* which is guaranteed to be undefined (rather than potentially
317
* undefined for some dynamic control-flow paths) doesn't seem
318
* particularly useful.
320
bd[block->num].livein[i] &= livein_from_any_block;
322
/* Update liveout for this block. */
323
bd[block->num].liveout[i] =
324
bd[block->num].copy[i] | (bd[block->num].livein[i] &
325
~bd[block->num].kill[i]);
327
if (old_liveout != bd[block->num].liveout[i])
335
fs_copy_prop_dataflow::dump_block_data() const
337
foreach_block (block, cfg) {
338
fprintf(stderr, "Block %d [%d, %d] (parents ", block->num,
339
block->start_ip, block->end_ip);
340
foreach_list_typed(bblock_link, link, link, &block->parents) {
341
bblock_t *parent = link->block;
342
fprintf(stderr, "%d ", parent->num);
344
fprintf(stderr, "):\n");
345
fprintf(stderr, " livein = 0x");
346
for (int i = 0; i < bitset_words; i++)
347
fprintf(stderr, "%08x", bd[block->num].livein[i]);
348
fprintf(stderr, ", liveout = 0x");
349
for (int i = 0; i < bitset_words; i++)
350
fprintf(stderr, "%08x", bd[block->num].liveout[i]);
351
fprintf(stderr, ",\n copy = 0x");
352
for (int i = 0; i < bitset_words; i++)
353
fprintf(stderr, "%08x", bd[block->num].copy[i]);
354
fprintf(stderr, ", kill = 0x");
355
for (int i = 0; i < bitset_words; i++)
356
fprintf(stderr, "%08x", bd[block->num].kill[i]);
357
fprintf(stderr, "\n");
362
is_logic_op(enum opcode opcode)
364
return (opcode == BRW_OPCODE_AND ||
365
opcode == BRW_OPCODE_OR ||
366
opcode == BRW_OPCODE_XOR ||
367
opcode == BRW_OPCODE_NOT);
371
can_take_stride(fs_inst *inst, brw_reg_type dst_type,
372
unsigned arg, unsigned stride,
373
const intel_device_info *devinfo)
378
/* Bail if the channels of the source need to be aligned to the byte offset
379
* of the corresponding channel of the destination, and the provided stride
380
* would break this restriction.
382
if (has_dst_aligned_region_restriction(devinfo, inst, dst_type) &&
383
!(type_sz(inst->src[arg].type) * stride ==
384
type_sz(dst_type) * inst->dst.stride ||
388
/* 3-source instructions can only be Align16, which restricts what strides
389
* they can take. They can only take a stride of 1 (the usual case), or 0
390
* with a special "repctrl" bit. But the repctrl bit doesn't work for
391
* 64-bit datatypes, so if the source type is 64-bit then only a stride of
392
* 1 is allowed. From the Broadwell PRM, Volume 7 "3D Media GPGPU", page
395
* This is applicable to 32b datatypes and 16b datatype. 64b datatypes
396
* cannot use the replicate control.
398
if (inst->is_3src(devinfo)) {
399
if (type_sz(inst->src[arg].type) > 4)
402
return stride == 1 || stride == 0;
405
/* From the Broadwell PRM, Volume 2a "Command Reference - Instructions",
406
* page 391 ("Extended Math Function"):
408
* The following restrictions apply for align1 mode: Scalar source is
409
* supported. Source and destination horizontal stride must be the
412
* From the Haswell PRM Volume 2b "Command Reference - Instructions", page
413
* 134 ("Extended Math Function"):
415
* Scalar source is supported. Source and destination horizontal stride
418
* and similar language exists for IVB and SNB. Pre-SNB, math instructions
419
* are sends, so the sources are moved to MRF's and there are no
422
if (inst->is_math()) {
423
if (devinfo->ver == 6 || devinfo->ver == 7) {
424
assert(inst->dst.stride == 1);
425
return stride == 1 || stride == 0;
426
} else if (devinfo->ver >= 8) {
427
return stride == inst->dst.stride || stride == 0;
435
instruction_requires_packed_data(fs_inst *inst)
437
switch (inst->opcode) {
438
case FS_OPCODE_DDX_FINE:
439
case FS_OPCODE_DDX_COARSE:
440
case FS_OPCODE_DDY_FINE:
441
case FS_OPCODE_DDY_COARSE:
442
case SHADER_OPCODE_QUAD_SWIZZLE:
450
fs_visitor::try_copy_propagate(fs_inst *inst, int arg, acp_entry *entry)
452
if (inst->src[arg].file != VGRF)
455
if (entry->src.file == IMM)
457
assert(entry->src.file == VGRF || entry->src.file == UNIFORM ||
458
entry->src.file == ATTR || entry->src.file == FIXED_GRF);
460
/* Avoid propagating a LOAD_PAYLOAD instruction into another if there is a
461
* good chance that we'll be able to eliminate the latter through register
462
* coalescing. If only part of the sources of the second LOAD_PAYLOAD can
463
* be simplified through copy propagation we would be making register
464
* coalescing impossible, ending up with unnecessary copies in the program.
465
* This is also the case for is_multi_copy_payload() copies that can only
466
* be coalesced when the instruction is lowered into a sequence of MOVs.
468
* Worse -- In cases where the ACP entry was the result of CSE combining
469
* multiple LOAD_PAYLOAD subexpressions, propagating the first LOAD_PAYLOAD
470
* into the second would undo the work of CSE, leading to an infinite
471
* optimization loop. Avoid this by detecting LOAD_PAYLOAD copies from CSE
472
* temporaries which should match is_coalescing_payload().
474
if (entry->opcode == SHADER_OPCODE_LOAD_PAYLOAD &&
475
(is_coalescing_payload(alloc, inst) || is_multi_copy_payload(inst)))
478
assert(entry->dst.file == VGRF);
479
if (inst->src[arg].nr != entry->dst.nr)
482
/* Bail if inst is reading a range that isn't contained in the range
483
* that entry is writing.
485
if (!region_contained_in(inst->src[arg], inst->size_read(arg),
486
entry->dst, entry->size_written))
489
/* Avoid propagating a FIXED_GRF register into an EOT instruction in order
490
* for any register allocation restrictions to be applied.
492
if (entry->src.file == FIXED_GRF && inst->eot)
495
/* Avoid propagating odd-numbered FIXED_GRF registers into the first source
496
* of a LINTERP instruction on platforms where the PLN instruction has
497
* register alignment restrictions.
499
if (devinfo->has_pln && devinfo->ver <= 6 &&
500
entry->src.file == FIXED_GRF && (entry->src.nr & 1) &&
501
inst->opcode == FS_OPCODE_LINTERP && arg == 0)
504
/* we can't generally copy-propagate UD negations because we
505
* can end up accessing the resulting values as signed integers
506
* instead. See also resolve_ud_negate() and comment in
507
* fs_generator::generate_code.
509
if (entry->src.type == BRW_REGISTER_TYPE_UD &&
513
bool has_source_modifiers = entry->src.abs || entry->src.negate;
515
if (has_source_modifiers && !inst->can_do_source_mods(devinfo))
518
/* Reject cases that would violate register regioning restrictions. */
519
if ((entry->src.file == UNIFORM || !entry->src.is_contiguous()) &&
520
((devinfo->ver == 6 && inst->is_math()) ||
521
inst->is_send_from_grf() ||
522
inst->uses_indirect_addressing())) {
526
if (has_source_modifiers &&
527
inst->opcode == SHADER_OPCODE_GFX4_SCRATCH_WRITE)
530
/* Some instructions implemented in the generator backend, such as
531
* derivatives, assume that their operands are packed so we can't
532
* generally propagate strided regions to them.
534
const unsigned entry_stride = (entry->src.file == FIXED_GRF ? 1 :
536
if (instruction_requires_packed_data(inst) && entry_stride != 1)
539
const brw_reg_type dst_type = (has_source_modifiers &&
540
entry->dst.type != inst->src[arg].type) ?
541
entry->dst.type : inst->dst.type;
543
/* Bail if the result of composing both strides would exceed the
546
if (!can_take_stride(inst, dst_type, arg,
547
entry_stride * inst->src[arg].stride,
551
/* From the Cherry Trail/Braswell PRMs, Volume 7: 3D Media GPGPU:
553
* Register Region Restrictions
554
* Special Requirements for Handling Double Precision Data Types :
556
* "When source or destination datatype is 64b or operation is integer
557
* DWord multiply, regioning in Align1 must follow these rules:
559
* 1. Source and Destination horizontal stride must be aligned to the
561
* 2. Regioning must ensure Src.Vstride = Src.Width * Src.Hstride.
562
* 3. Source and Destination offset must be the same, except the case
565
* Most of this is already checked in can_take_stride(), we're only left
568
if (has_dst_aligned_region_restriction(devinfo, inst, dst_type) &&
570
(reg_offset(inst->dst) % REG_SIZE) != (reg_offset(entry->src) % REG_SIZE))
573
/* Bail if the source FIXED_GRF region of the copy cannot be trivially
574
* composed with the source region of the instruction -- E.g. because the
575
* copy uses some extended stride greater than 4 not supported natively by
576
* the hardware as a horizontal stride, or because instruction compression
577
* could require us to use a vertical stride shorter than a GRF.
579
if (entry->src.file == FIXED_GRF &&
580
(inst->src[arg].stride > 4 ||
581
inst->dst.component_size(inst->exec_size) >
582
inst->src[arg].component_size(inst->exec_size)))
585
/* Bail if the instruction type is larger than the execution type of the
586
* copy, what implies that each channel is reading multiple channels of the
587
* destination of the copy, and simply replacing the sources would give a
588
* program with different semantics.
590
if ((type_sz(entry->dst.type) < type_sz(inst->src[arg].type) ||
591
entry->is_partial_write) &&
592
inst->opcode != BRW_OPCODE_MOV) {
596
/* Bail if the result of composing both strides cannot be expressed
597
* as another stride. This avoids, for example, trying to transform
600
* MOV (8) rX<1>UD rY<0;1,0>UD
601
* FOO (8) ... rX<8;8,1>UW
605
* FOO (8) ... rY<0;1,0>UW
607
* Which would have different semantics.
609
if (entry_stride != 1 &&
610
(inst->src[arg].stride *
611
type_sz(inst->src[arg].type)) % type_sz(entry->src.type) != 0)
614
/* Since semantics of source modifiers are type-dependent we need to
615
* ensure that the meaning of the instruction remains the same if we
616
* change the type. If the sizes of the types are different the new
617
* instruction will read a different amount of data than the original
618
* and the semantics will always be different.
620
if (has_source_modifiers &&
621
entry->dst.type != inst->src[arg].type &&
622
(!inst->can_change_types() ||
623
type_sz(entry->dst.type) != type_sz(inst->src[arg].type)))
626
if (devinfo->ver >= 8 && (entry->src.negate || entry->src.abs) &&
627
is_logic_op(inst->opcode)) {
631
if (entry->saturate) {
632
switch(inst->opcode) {
634
if ((inst->conditional_mod != BRW_CONDITIONAL_GE &&
635
inst->conditional_mod != BRW_CONDITIONAL_L) ||
636
inst->src[1].file != IMM ||
637
inst->src[1].f < 0.0 ||
638
inst->src[1].f > 1.0) {
647
/* Save the offset of inst->src[arg] relative to entry->dst for it to be
650
const unsigned rel_offset = inst->src[arg].offset - entry->dst.offset;
652
/* Fold the copy into the instruction consuming it. */
653
inst->src[arg].file = entry->src.file;
654
inst->src[arg].nr = entry->src.nr;
655
inst->src[arg].subnr = entry->src.subnr;
656
inst->src[arg].offset = entry->src.offset;
658
/* Compose the strides of both regions. */
659
if (entry->src.file == FIXED_GRF) {
660
if (inst->src[arg].stride) {
661
const unsigned orig_width = 1 << entry->src.width;
662
const unsigned reg_width = REG_SIZE / (type_sz(inst->src[arg].type) *
663
inst->src[arg].stride);
664
inst->src[arg].width = cvt(MIN2(orig_width, reg_width)) - 1;
665
inst->src[arg].hstride = cvt(inst->src[arg].stride);
666
inst->src[arg].vstride = inst->src[arg].hstride + inst->src[arg].width;
668
inst->src[arg].vstride = inst->src[arg].hstride =
669
inst->src[arg].width = 0;
672
inst->src[arg].stride = 1;
674
/* Hopefully no Align16 around here... */
675
assert(entry->src.swizzle == BRW_SWIZZLE_XYZW);
676
inst->src[arg].swizzle = entry->src.swizzle;
678
inst->src[arg].stride *= entry->src.stride;
681
/* Compose any saturate modifiers. */
682
inst->saturate = inst->saturate || entry->saturate;
684
/* Compute the first component of the copy that the instruction is
685
* reading, and the base byte offset within that component.
687
assert((entry->dst.offset % REG_SIZE == 0 || inst->opcode == BRW_OPCODE_MOV) &&
688
entry->dst.stride == 1);
689
const unsigned component = rel_offset / type_sz(entry->dst.type);
690
const unsigned suboffset = rel_offset % type_sz(entry->dst.type);
692
/* Calculate the byte offset at the origin of the copy of the given
693
* component and suboffset.
695
inst->src[arg] = byte_offset(inst->src[arg],
696
component * entry_stride * type_sz(entry->src.type) + suboffset);
698
if (has_source_modifiers) {
699
if (entry->dst.type != inst->src[arg].type) {
700
/* We are propagating source modifiers from a MOV with a different
701
* type. If we got here, then we can just change the source and
702
* destination types of the instruction and keep going.
704
assert(inst->can_change_types());
705
for (int i = 0; i < inst->sources; i++) {
706
inst->src[i].type = entry->dst.type;
708
inst->dst.type = entry->dst.type;
711
if (!inst->src[arg].abs) {
712
inst->src[arg].abs = entry->src.abs;
713
inst->src[arg].negate ^= entry->src.negate;
722
fs_visitor::try_constant_propagate(fs_inst *inst, acp_entry *entry)
724
bool progress = false;
726
if (entry->src.file != IMM)
728
if (type_sz(entry->src.type) > 4)
733
for (int i = inst->sources - 1; i >= 0; i--) {
734
if (inst->src[i].file != VGRF)
737
assert(entry->dst.file == VGRF);
738
if (inst->src[i].nr != entry->dst.nr)
741
/* Bail if inst is reading a range that isn't contained in the range
742
* that entry is writing.
744
if (!region_contained_in(inst->src[i], inst->size_read(i),
745
entry->dst, entry->size_written))
748
/* If the type sizes don't match each channel of the instruction is
749
* either extracting a portion of the constant (which could be handled
750
* with some effort but the code below doesn't) or reading multiple
751
* channels of the source at once.
753
if (type_sz(inst->src[i].type) != type_sz(entry->dst.type))
756
fs_reg val = entry->src;
757
val.type = inst->src[i].type;
759
if (inst->src[i].abs) {
760
if ((devinfo->ver >= 8 && is_logic_op(inst->opcode)) ||
761
!brw_abs_immediate(val.type, &val.as_brw_reg())) {
766
if (inst->src[i].negate) {
767
if ((devinfo->ver >= 8 && is_logic_op(inst->opcode)) ||
768
!brw_negate_immediate(val.type, &val.as_brw_reg())) {
773
switch (inst->opcode) {
775
case SHADER_OPCODE_LOAD_PAYLOAD:
781
case SHADER_OPCODE_INT_QUOTIENT:
782
case SHADER_OPCODE_INT_REMAINDER:
783
/* FINISHME: Promote non-float constants and remove this. */
784
if (devinfo->ver < 8)
787
case SHADER_OPCODE_POW:
788
/* Allow constant propagation into src1 (except on Gen 6 which
789
* doesn't support scalar source math), and let constant combining
790
* promote the constant on Gen < 8.
792
if (devinfo->ver == 6)
795
case BRW_OPCODE_BFI1:
799
case BRW_OPCODE_SUBB:
806
case BRW_OPCODE_MACH:
808
case SHADER_OPCODE_MULH:
813
case BRW_OPCODE_ADDC:
817
} else if (i == 0 && inst->src[1].file != IMM) {
818
/* Fit this constant in by commuting the operands.
819
* Exception: we can't do this for 32-bit integer MUL/MACH
820
* because it's asymmetric.
822
* The BSpec says for Broadwell that
824
* "When multiplying DW x DW, the dst cannot be accumulator."
826
* Integer MUL with a non-accumulator destination will be lowered
827
* by lower_integer_multiplication(), so don't restrict it.
829
if (((inst->opcode == BRW_OPCODE_MUL &&
830
inst->dst.is_accumulator()) ||
831
inst->opcode == BRW_OPCODE_MACH) &&
832
(inst->src[1].type == BRW_REGISTER_TYPE_D ||
833
inst->src[1].type == BRW_REGISTER_TYPE_UD))
835
inst->src[0] = inst->src[1];
846
} else if (i == 0 && inst->src[1].file != IMM) {
847
enum brw_conditional_mod new_cmod;
849
new_cmod = brw_swap_cmod(inst->conditional_mod);
850
if (new_cmod != BRW_CONDITIONAL_NONE) {
851
/* Fit this constant in by swapping the operands and
854
inst->src[0] = inst->src[1];
856
inst->conditional_mod = new_cmod;
866
} else if (i == 0 && inst->src[1].file != IMM &&
867
(inst->conditional_mod == BRW_CONDITIONAL_NONE ||
868
/* Only GE and L are commutative. */
869
inst->conditional_mod == BRW_CONDITIONAL_GE ||
870
inst->conditional_mod == BRW_CONDITIONAL_L)) {
871
inst->src[0] = inst->src[1];
874
/* If this was predicated, flipping operands means
875
* we also need to flip the predicate.
877
if (inst->conditional_mod == BRW_CONDITIONAL_NONE) {
878
inst->predicate_inverse =
879
!inst->predicate_inverse;
885
case FS_OPCODE_FB_WRITE_LOGICAL:
886
/* The stencil and omask sources of FS_OPCODE_FB_WRITE_LOGICAL are
887
* bit-cast using a strided region so they cannot be immediates.
889
if (i != FB_WRITE_LOGICAL_SRC_SRC_STENCIL &&
890
i != FB_WRITE_LOGICAL_SRC_OMASK) {
896
case SHADER_OPCODE_TEX_LOGICAL:
897
case SHADER_OPCODE_TXD_LOGICAL:
898
case SHADER_OPCODE_TXF_LOGICAL:
899
case SHADER_OPCODE_TXL_LOGICAL:
900
case SHADER_OPCODE_TXS_LOGICAL:
901
case FS_OPCODE_TXB_LOGICAL:
902
case SHADER_OPCODE_TXF_CMS_LOGICAL:
903
case SHADER_OPCODE_TXF_CMS_W_LOGICAL:
904
case SHADER_OPCODE_TXF_CMS_W_GFX12_LOGICAL:
905
case SHADER_OPCODE_TXF_UMS_LOGICAL:
906
case SHADER_OPCODE_TXF_MCS_LOGICAL:
907
case SHADER_OPCODE_LOD_LOGICAL:
908
case SHADER_OPCODE_TG4_LOGICAL:
909
case SHADER_OPCODE_TG4_OFFSET_LOGICAL:
910
case SHADER_OPCODE_SAMPLEINFO_LOGICAL:
911
case SHADER_OPCODE_IMAGE_SIZE_LOGICAL:
912
case SHADER_OPCODE_UNTYPED_ATOMIC_LOGICAL:
913
case SHADER_OPCODE_UNTYPED_ATOMIC_FLOAT_LOGICAL:
914
case SHADER_OPCODE_UNTYPED_SURFACE_READ_LOGICAL:
915
case SHADER_OPCODE_UNTYPED_SURFACE_WRITE_LOGICAL:
916
case SHADER_OPCODE_TYPED_ATOMIC_LOGICAL:
917
case SHADER_OPCODE_TYPED_SURFACE_READ_LOGICAL:
918
case SHADER_OPCODE_TYPED_SURFACE_WRITE_LOGICAL:
919
case SHADER_OPCODE_BYTE_SCATTERED_WRITE_LOGICAL:
920
case SHADER_OPCODE_BYTE_SCATTERED_READ_LOGICAL:
925
case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD:
926
case SHADER_OPCODE_BROADCAST:
937
case FS_OPCODE_PACK_HALF_2x16_SPLIT:
951
can_propagate_from(fs_inst *inst)
953
return (inst->opcode == BRW_OPCODE_MOV &&
954
inst->dst.file == VGRF &&
955
((inst->src[0].file == VGRF &&
956
!regions_overlap(inst->dst, inst->size_written,
957
inst->src[0], inst->size_read(0))) ||
958
inst->src[0].file == ATTR ||
959
inst->src[0].file == UNIFORM ||
960
inst->src[0].file == IMM ||
961
(inst->src[0].file == FIXED_GRF &&
962
inst->src[0].is_contiguous())) &&
963
inst->src[0].type == inst->dst.type &&
964
/* Subset of !is_partial_write() conditions. */
965
!((inst->predicate && inst->opcode != BRW_OPCODE_SEL) ||
966
!inst->dst.is_contiguous())) ||
967
is_identity_payload(FIXED_GRF, inst);
970
/* Walks a basic block and does copy propagation on it using the acp
974
fs_visitor::opt_copy_propagation_local(void *copy_prop_ctx, bblock_t *block,
977
bool progress = false;
979
foreach_inst_in_block(fs_inst, inst, block) {
980
/* Try propagating into this instruction. */
981
for (int i = 0; i < inst->sources; i++) {
982
if (inst->src[i].file != VGRF)
985
foreach_in_list(acp_entry, entry, &acp[inst->src[i].nr % ACP_HASH_SIZE]) {
986
if (try_constant_propagate(inst, entry))
988
else if (try_copy_propagate(inst, i, entry))
993
/* kill the destination from the ACP */
994
if (inst->dst.file == VGRF || inst->dst.file == FIXED_GRF) {
995
foreach_in_list_safe(acp_entry, entry, &acp[inst->dst.nr % ACP_HASH_SIZE]) {
996
if (regions_overlap(entry->dst, entry->size_written,
997
inst->dst, inst->size_written))
1001
/* Oops, we only have the chaining hash based on the destination, not
1002
* the source, so walk across the entire table.
1004
for (int i = 0; i < ACP_HASH_SIZE; i++) {
1005
foreach_in_list_safe(acp_entry, entry, &acp[i]) {
1006
/* Make sure we kill the entry if this instruction overwrites
1007
* _any_ of the registers that it reads
1009
if (regions_overlap(entry->src, entry->size_read,
1010
inst->dst, inst->size_written))
1016
/* If this instruction's source could potentially be folded into the
1017
* operand of another instruction, add it to the ACP.
1019
if (can_propagate_from(inst)) {
1020
acp_entry *entry = rzalloc(copy_prop_ctx, acp_entry);
1021
entry->dst = inst->dst;
1022
entry->src = inst->src[0];
1023
entry->size_written = inst->size_written;
1024
for (unsigned i = 0; i < inst->sources; i++)
1025
entry->size_read += inst->size_read(i);
1026
entry->opcode = inst->opcode;
1027
entry->saturate = inst->saturate;
1028
entry->is_partial_write = inst->is_partial_write();
1029
acp[entry->dst.nr % ACP_HASH_SIZE].push_tail(entry);
1030
} else if (inst->opcode == SHADER_OPCODE_LOAD_PAYLOAD &&
1031
inst->dst.file == VGRF) {
1033
for (int i = 0; i < inst->sources; i++) {
1034
int effective_width = i < inst->header_size ? 8 : inst->exec_size;
1035
const unsigned size_written = effective_width *
1036
type_sz(inst->src[i].type);
1037
if (inst->src[i].file == VGRF ||
1038
(inst->src[i].file == FIXED_GRF &&
1039
inst->src[i].is_contiguous())) {
1040
acp_entry *entry = rzalloc(copy_prop_ctx, acp_entry);
1041
entry->dst = byte_offset(inst->dst, offset);
1042
entry->src = inst->src[i];
1043
entry->size_written = size_written;
1044
entry->size_read = inst->size_read(i);
1045
entry->opcode = inst->opcode;
1046
if (!entry->dst.equals(inst->src[i])) {
1047
acp[entry->dst.nr % ACP_HASH_SIZE].push_tail(entry);
1052
offset += size_written;
1061
fs_visitor::opt_copy_propagation()
1063
bool progress = false;
1064
void *copy_prop_ctx = ralloc_context(NULL);
1065
exec_list *out_acp[cfg->num_blocks];
1067
for (int i = 0; i < cfg->num_blocks; i++)
1068
out_acp[i] = new exec_list [ACP_HASH_SIZE];
1070
const fs_live_variables &live = live_analysis.require();
1072
/* First, walk through each block doing local copy propagation and getting
1073
* the set of copies available at the end of the block.
1075
foreach_block (block, cfg) {
1076
progress = opt_copy_propagation_local(copy_prop_ctx, block,
1077
out_acp[block->num]) || progress;
1079
/* If the destination of an ACP entry exists only within this block,
1080
* then there's no need to keep it for dataflow analysis. We can delete
1081
* it from the out_acp table and avoid growing the bitsets any bigger
1082
* than we absolutely have to.
1084
* Because nothing in opt_copy_propagation_local touches the block
1085
* start/end IPs and opt_copy_propagation_local is incapable of
1086
* extending the live range of an ACP destination beyond the block,
1087
* it's safe to use the liveness information in this way.
1089
for (unsigned a = 0; a < ACP_HASH_SIZE; a++) {
1090
foreach_in_list_safe(acp_entry, entry, &out_acp[block->num][a]) {
1091
assert(entry->dst.file == VGRF);
1092
if (block->start_ip <= live.vgrf_start[entry->dst.nr] &&
1093
live.vgrf_end[entry->dst.nr] <= block->end_ip)
1099
/* Do dataflow analysis for those available copies. */
1100
fs_copy_prop_dataflow dataflow(copy_prop_ctx, cfg, live, out_acp);
1102
/* Next, re-run local copy propagation, this time with the set of copies
1103
* provided by the dataflow analysis available at the start of a block.
1105
foreach_block (block, cfg) {
1106
exec_list in_acp[ACP_HASH_SIZE];
1108
for (int i = 0; i < dataflow.num_acp; i++) {
1109
if (BITSET_TEST(dataflow.bd[block->num].livein, i)) {
1110
struct acp_entry *entry = dataflow.acp[i];
1111
in_acp[entry->dst.nr % ACP_HASH_SIZE].push_tail(entry);
1115
progress = opt_copy_propagation_local(copy_prop_ctx, block, in_acp) ||
1119
for (int i = 0; i < cfg->num_blocks; i++)
1120
delete [] out_acp[i];
1121
ralloc_free(copy_prop_ctx);
1124
invalidate_analysis(DEPENDENCY_INSTRUCTION_DATA_FLOW |
1125
DEPENDENCY_INSTRUCTION_DETAIL);