2
* Copyright © 2014 Intel Corporation
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
* and/or sell copies of the Software, and to permit persons to whom the
9
* Software is furnished to do so, subject to the following conditions:
11
* The above copyright notice and this permission notice (including the next
12
* paragraph) shall be included in all copies or substantial portions of the
15
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24
* Jason Ekstrand (jason@jlekstrand.net)
29
#include "nir_search.h"
30
#include "nir_builder.h"
31
#include "nir_worklist.h"
32
#include "util/half_float.h"
34
/* This should be the same as nir_search_max_comm_ops in nir_algebraic.py. */
35
#define NIR_SEARCH_MAX_COMM_OPS 8
40
uint8_t comm_op_direction;
41
unsigned variables_seen;
43
/* Used for running the automaton on newly-constructed instructions. */
44
struct util_dynarray *states;
45
const struct per_op_table *pass_op_table;
46
const nir_algebraic_table *table;
48
nir_alu_src variables[NIR_SEARCH_MAX_VARIABLES];
49
struct hash_table *range_ht;
53
match_expression(const nir_algebraic_table *table, const nir_search_expression *expr, nir_alu_instr *instr,
54
unsigned num_components, const uint8_t *swizzle,
55
struct match_state *state);
57
nir_algebraic_automaton(nir_instr *instr, struct util_dynarray *states,
58
const struct per_op_table *pass_op_table);
60
static const uint8_t identity_swizzle[NIR_MAX_VEC_COMPONENTS] =
69
* Check if a source produces a value of the given type.
71
* Used for satisfying 'a@type' constraints.
74
src_is_type(nir_src src, nir_alu_type type)
76
assert(type != nir_type_invalid);
81
if (src.ssa->parent_instr->type == nir_instr_type_alu) {
82
nir_alu_instr *src_alu = nir_instr_as_alu(src.ssa->parent_instr);
83
nir_alu_type output_type = nir_op_infos[src_alu->op].output_type;
85
if (type == nir_type_bool) {
86
switch (src_alu->op) {
90
return src_is_type(src_alu->src[0].src, nir_type_bool) &&
91
src_is_type(src_alu->src[1].src, nir_type_bool);
93
return src_is_type(src_alu->src[0].src, nir_type_bool);
99
return nir_alu_type_get_base_type(output_type) == type;
100
} else if (src.ssa->parent_instr->type == nir_instr_type_intrinsic) {
101
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(src.ssa->parent_instr);
103
if (type == nir_type_bool) {
104
return intr->intrinsic == nir_intrinsic_load_front_face ||
105
intr->intrinsic == nir_intrinsic_load_helper_invocation;
114
nir_op_matches_search_op(nir_op nop, uint16_t sop)
116
if (sop <= nir_last_opcode)
119
#define MATCH_FCONV_CASE(op) \
120
case nir_search_op_##op: \
121
return nop == nir_op_##op##16 || \
122
nop == nir_op_##op##32 || \
123
nop == nir_op_##op##64;
125
#define MATCH_ICONV_CASE(op) \
126
case nir_search_op_##op: \
127
return nop == nir_op_##op##8 || \
128
nop == nir_op_##op##16 || \
129
nop == nir_op_##op##32 || \
130
nop == nir_op_##op##64;
132
#define MATCH_BCONV_CASE(op) \
133
case nir_search_op_##op: \
134
return nop == nir_op_##op##1 || \
135
nop == nir_op_##op##32;
138
MATCH_FCONV_CASE(i2f)
139
MATCH_FCONV_CASE(u2f)
140
MATCH_FCONV_CASE(f2f)
141
MATCH_ICONV_CASE(f2u)
142
MATCH_ICONV_CASE(f2i)
143
MATCH_ICONV_CASE(u2u)
144
MATCH_ICONV_CASE(i2i)
145
MATCH_FCONV_CASE(b2f)
146
MATCH_ICONV_CASE(b2i)
147
MATCH_BCONV_CASE(i2b)
148
MATCH_BCONV_CASE(f2b)
150
unreachable("Invalid nir_search_op");
153
#undef MATCH_FCONV_CASE
154
#undef MATCH_ICONV_CASE
155
#undef MATCH_BCONV_CASE
159
nir_search_op_for_nir_op(nir_op nop)
161
#define MATCH_FCONV_CASE(op) \
162
case nir_op_##op##16: \
163
case nir_op_##op##32: \
164
case nir_op_##op##64: \
165
return nir_search_op_##op;
167
#define MATCH_ICONV_CASE(op) \
168
case nir_op_##op##8: \
169
case nir_op_##op##16: \
170
case nir_op_##op##32: \
171
case nir_op_##op##64: \
172
return nir_search_op_##op;
174
#define MATCH_BCONV_CASE(op) \
175
case nir_op_##op##1: \
176
case nir_op_##op##32: \
177
return nir_search_op_##op;
181
MATCH_FCONV_CASE(i2f)
182
MATCH_FCONV_CASE(u2f)
183
MATCH_FCONV_CASE(f2f)
184
MATCH_ICONV_CASE(f2u)
185
MATCH_ICONV_CASE(f2i)
186
MATCH_ICONV_CASE(u2u)
187
MATCH_ICONV_CASE(i2i)
188
MATCH_FCONV_CASE(b2f)
189
MATCH_ICONV_CASE(b2i)
190
MATCH_BCONV_CASE(i2b)
191
MATCH_BCONV_CASE(f2b)
196
#undef MATCH_FCONV_CASE
197
#undef MATCH_ICONV_CASE
198
#undef MATCH_BCONV_CASE
202
nir_op_for_search_op(uint16_t sop, unsigned bit_size)
204
if (sop <= nir_last_opcode)
207
#define RET_FCONV_CASE(op) \
208
case nir_search_op_##op: \
209
switch (bit_size) { \
210
case 16: return nir_op_##op##16; \
211
case 32: return nir_op_##op##32; \
212
case 64: return nir_op_##op##64; \
213
default: unreachable("Invalid bit size"); \
216
#define RET_ICONV_CASE(op) \
217
case nir_search_op_##op: \
218
switch (bit_size) { \
219
case 8: return nir_op_##op##8; \
220
case 16: return nir_op_##op##16; \
221
case 32: return nir_op_##op##32; \
222
case 64: return nir_op_##op##64; \
223
default: unreachable("Invalid bit size"); \
226
#define RET_BCONV_CASE(op) \
227
case nir_search_op_##op: \
228
switch (bit_size) { \
229
case 1: return nir_op_##op##1; \
230
case 32: return nir_op_##op##32; \
231
default: unreachable("Invalid bit size"); \
247
unreachable("Invalid nir_search_op");
250
#undef RET_FCONV_CASE
251
#undef RET_ICONV_CASE
252
#undef RET_BCONV_CASE
256
match_value(const nir_algebraic_table *table,
257
const nir_search_value *value, nir_alu_instr *instr, unsigned src,
258
unsigned num_components, const uint8_t *swizzle,
259
struct match_state *state)
261
uint8_t new_swizzle[NIR_MAX_VEC_COMPONENTS];
263
/* Searching only works on SSA values because, if it's not SSA, we can't
264
* know if the value changed between one instance of that value in the
265
* expression and another. Also, the replace operation will place reads of
266
* that value right before the last instruction in the expression we're
267
* replacing so those reads will happen after the original reads and may
268
* not be valid if they're register reads.
270
assert(instr->src[src].src.is_ssa);
272
/* If the source is an explicitly sized source, then we need to reset
273
* both the number of components and the swizzle.
275
if (nir_op_infos[instr->op].input_sizes[src] != 0) {
276
num_components = nir_op_infos[instr->op].input_sizes[src];
277
swizzle = identity_swizzle;
280
for (unsigned i = 0; i < num_components; ++i)
281
new_swizzle[i] = instr->src[src].swizzle[swizzle[i]];
283
/* If the value has a specific bit size and it doesn't match, bail */
284
if (value->bit_size > 0 &&
285
nir_src_bit_size(instr->src[src].src) != value->bit_size)
288
switch (value->type) {
289
case nir_search_value_expression:
290
if (instr->src[src].src.ssa->parent_instr->type != nir_instr_type_alu)
293
return match_expression(table, nir_search_value_as_expression(value),
294
nir_instr_as_alu(instr->src[src].src.ssa->parent_instr),
295
num_components, new_swizzle, state);
297
case nir_search_value_variable: {
298
nir_search_variable *var = nir_search_value_as_variable(value);
299
assert(var->variable < NIR_SEARCH_MAX_VARIABLES);
301
if (state->variables_seen & (1 << var->variable)) {
302
if (state->variables[var->variable].src.ssa != instr->src[src].src.ssa)
305
assert(!instr->src[src].abs && !instr->src[src].negate);
307
for (unsigned i = 0; i < num_components; ++i) {
308
if (state->variables[var->variable].swizzle[i] != new_swizzle[i])
314
if (var->is_constant &&
315
instr->src[src].src.ssa->parent_instr->type != nir_instr_type_load_const)
318
if (var->cond_index != -1 && !table->variable_cond[var->cond_index](state->range_ht, instr,
319
src, num_components, new_swizzle))
322
if (var->type != nir_type_invalid &&
323
!src_is_type(instr->src[src].src, var->type))
326
state->variables_seen |= (1 << var->variable);
327
state->variables[var->variable].src = instr->src[src].src;
328
state->variables[var->variable].abs = false;
329
state->variables[var->variable].negate = false;
331
for (unsigned i = 0; i < NIR_MAX_VEC_COMPONENTS; ++i) {
332
if (i < num_components)
333
state->variables[var->variable].swizzle[i] = new_swizzle[i];
335
state->variables[var->variable].swizzle[i] = 0;
342
case nir_search_value_constant: {
343
nir_search_constant *const_val = nir_search_value_as_constant(value);
345
if (!nir_src_is_const(instr->src[src].src))
348
switch (const_val->type) {
349
case nir_type_float: {
350
nir_load_const_instr *const load =
351
nir_instr_as_load_const(instr->src[src].src.ssa->parent_instr);
353
/* There are 8-bit and 1-bit integer types, but there are no 8-bit or
354
* 1-bit float types. This prevents potential assertion failures in
355
* nir_src_comp_as_float.
357
if (load->def.bit_size < 16)
360
for (unsigned i = 0; i < num_components; ++i) {
361
double val = nir_src_comp_as_float(instr->src[src].src,
363
if (val != const_val->data.d)
371
case nir_type_bool: {
372
unsigned bit_size = nir_src_bit_size(instr->src[src].src);
373
uint64_t mask = u_uintN_max(bit_size);
374
for (unsigned i = 0; i < num_components; ++i) {
375
uint64_t val = nir_src_comp_as_uint(instr->src[src].src,
377
if ((val & mask) != (const_val->data.u & mask))
384
unreachable("Invalid alu source type");
389
unreachable("Invalid search value type");
394
match_expression(const nir_algebraic_table *table, const nir_search_expression *expr, nir_alu_instr *instr,
395
unsigned num_components, const uint8_t *swizzle,
396
struct match_state *state)
398
if (expr->cond_index != -1 && !table->expression_cond[expr->cond_index](instr))
401
if (!nir_op_matches_search_op(instr->op, expr->opcode))
404
assert(instr->dest.dest.is_ssa);
406
if (expr->value.bit_size > 0 &&
407
instr->dest.dest.ssa.bit_size != expr->value.bit_size)
410
state->inexact_match = expr->inexact || state->inexact_match;
411
state->has_exact_alu = (instr->exact && !expr->ignore_exact) || state->has_exact_alu;
412
if (state->inexact_match && state->has_exact_alu)
415
assert(!instr->dest.saturate);
416
assert(nir_op_infos[instr->op].num_inputs > 0);
418
/* If we have an explicitly sized destination, we can only handle the
419
* identity swizzle. While dot(vec3(a, b, c).zxy) is a valid
420
* expression, we don't have the information right now to propagate that
421
* swizzle through. We can only properly propagate swizzles if the
422
* instruction is vectorized.
424
if (nir_op_infos[instr->op].output_size != 0) {
425
for (unsigned i = 0; i < num_components; i++) {
431
/* If this is a commutative expression and it's one of the first few, look
432
* up its direction for the current search operation. We'll use that value
433
* to possibly flip the sources for the match.
435
unsigned comm_op_flip =
436
(expr->comm_expr_idx >= 0 &&
437
expr->comm_expr_idx < NIR_SEARCH_MAX_COMM_OPS) ?
438
((state->comm_op_direction >> expr->comm_expr_idx) & 1) : 0;
441
for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
442
/* 2src_commutative instructions that have 3 sources are only commutative
443
* in the first two sources. Source 2 is always source 2.
445
if (!match_value(table, &state->table->values[expr->srcs[i]].value, instr,
446
i < 2 ? i ^ comm_op_flip : i,
447
num_components, swizzle, state)) {
457
replace_bitsize(const nir_search_value *value, unsigned search_bitsize,
458
struct match_state *state)
460
if (value->bit_size > 0)
461
return value->bit_size;
462
if (value->bit_size < 0)
463
return nir_src_bit_size(state->variables[-value->bit_size - 1].src);
464
return search_bitsize;
468
construct_value(nir_builder *build,
469
const nir_search_value *value,
470
unsigned num_components, unsigned search_bitsize,
471
struct match_state *state,
474
switch (value->type) {
475
case nir_search_value_expression: {
476
const nir_search_expression *expr = nir_search_value_as_expression(value);
477
unsigned dst_bit_size = replace_bitsize(value, search_bitsize, state);
478
nir_op op = nir_op_for_search_op(expr->opcode, dst_bit_size);
480
if (nir_op_infos[op].output_size != 0)
481
num_components = nir_op_infos[op].output_size;
483
nir_alu_instr *alu = nir_alu_instr_create(build->shader, op);
484
nir_ssa_dest_init(&alu->instr, &alu->dest.dest, num_components,
486
alu->dest.write_mask = (1 << num_components) - 1;
487
alu->dest.saturate = false;
489
/* We have no way of knowing what values in a given search expression
490
* map to a particular replacement value. Therefore, if the
491
* expression we are replacing has any exact values, the entire
492
* replacement should be exact.
494
alu->exact = state->has_exact_alu || expr->exact;
496
for (unsigned i = 0; i < nir_op_infos[op].num_inputs; i++) {
497
/* If the source is an explicitly sized source, then we need to reset
498
* the number of components to match.
500
if (nir_op_infos[alu->op].input_sizes[i] != 0)
501
num_components = nir_op_infos[alu->op].input_sizes[i];
503
alu->src[i] = construct_value(build, &state->table->values[expr->srcs[i]].value,
504
num_components, search_bitsize,
508
nir_builder_instr_insert(build, &alu->instr);
510
assert(alu->dest.dest.ssa.index ==
511
util_dynarray_num_elements(state->states, uint16_t));
512
util_dynarray_append(state->states, uint16_t, 0);
513
nir_algebraic_automaton(&alu->instr, state->states, state->pass_op_table);
516
val.src = nir_src_for_ssa(&alu->dest.dest.ssa);
519
memcpy(val.swizzle, identity_swizzle, sizeof val.swizzle);
524
case nir_search_value_variable: {
525
const nir_search_variable *var = nir_search_value_as_variable(value);
526
assert(state->variables_seen & (1 << var->variable));
528
nir_alu_src val = { NIR_SRC_INIT };
529
nir_alu_src_copy(&val, &state->variables[var->variable]);
530
assert(!var->is_constant);
532
for (unsigned i = 0; i < NIR_MAX_VEC_COMPONENTS; i++)
533
val.swizzle[i] = state->variables[var->variable].swizzle[var->swizzle[i]];
538
case nir_search_value_constant: {
539
const nir_search_constant *c = nir_search_value_as_constant(value);
540
unsigned bit_size = replace_bitsize(value, search_bitsize, state);
545
cval = nir_imm_floatN_t(build, c->data.d, bit_size);
550
cval = nir_imm_intN_t(build, c->data.i, bit_size);
554
cval = nir_imm_boolN_t(build, c->data.u, bit_size);
558
unreachable("Invalid alu source type");
561
assert(cval->index ==
562
util_dynarray_num_elements(state->states, uint16_t));
563
util_dynarray_append(state->states, uint16_t, 0);
564
nir_algebraic_automaton(cval->parent_instr, state->states,
565
state->pass_op_table);
568
val.src = nir_src_for_ssa(cval);
571
memset(val.swizzle, 0, sizeof val.swizzle);
577
unreachable("Invalid search value type");
581
UNUSED static void dump_value(const nir_algebraic_table *table, const nir_search_value *val)
584
case nir_search_value_constant: {
585
const nir_search_constant *sconst = nir_search_value_as_constant(val);
586
switch (sconst->type) {
588
fprintf(stderr, "%f", sconst->data.d);
591
fprintf(stderr, "%"PRId64, sconst->data.i);
594
fprintf(stderr, "0x%"PRIx64, sconst->data.u);
597
fprintf(stderr, "%s", sconst->data.u != 0 ? "True" : "False");
600
unreachable("bad const type");
605
case nir_search_value_variable: {
606
const nir_search_variable *var = nir_search_value_as_variable(val);
607
if (var->is_constant)
608
fprintf(stderr, "#");
609
fprintf(stderr, "%c", var->variable + 'a');
613
case nir_search_value_expression: {
614
const nir_search_expression *expr = nir_search_value_as_expression(val);
615
fprintf(stderr, "(");
617
fprintf(stderr, "~");
618
switch (expr->opcode) {
620
case nir_search_op_##n: fprintf(stderr, #n); break;
630
fprintf(stderr, "%s", nir_op_infos[expr->opcode].name);
633
unsigned num_srcs = 1;
634
if (expr->opcode <= nir_last_opcode)
635
num_srcs = nir_op_infos[expr->opcode].num_inputs;
637
for (unsigned i = 0; i < num_srcs; i++) {
638
fprintf(stderr, " ");
639
dump_value(table, &table->values[expr->srcs[i]].value);
642
fprintf(stderr, ")");
647
if (val->bit_size > 0)
648
fprintf(stderr, "@%d", val->bit_size);
652
add_uses_to_worklist(nir_instr *instr,
653
nir_instr_worklist *worklist,
654
struct util_dynarray *states,
655
const struct per_op_table *pass_op_table)
657
nir_ssa_def *def = nir_instr_ssa_def(instr);
659
nir_foreach_use_safe(use_src, def) {
660
if (nir_algebraic_automaton(use_src->parent_instr, states, pass_op_table))
661
nir_instr_worklist_push_tail(worklist, use_src->parent_instr);
666
nir_algebraic_update_automaton(nir_instr *new_instr,
667
nir_instr_worklist *algebraic_worklist,
668
struct util_dynarray *states,
669
const struct per_op_table *pass_op_table)
672
nir_instr_worklist *automaton_worklist = nir_instr_worklist_create();
674
/* Walk through the tree of uses of our new instruction's SSA value,
675
* recursively updating the automaton state until it stabilizes.
677
add_uses_to_worklist(new_instr, automaton_worklist, states, pass_op_table);
680
while ((instr = nir_instr_worklist_pop_head(automaton_worklist))) {
681
nir_instr_worklist_push_tail(algebraic_worklist, instr);
682
add_uses_to_worklist(instr, automaton_worklist, states, pass_op_table);
685
nir_instr_worklist_destroy(automaton_worklist);
689
nir_replace_instr(nir_builder *build, nir_alu_instr *instr,
690
struct hash_table *range_ht,
691
struct util_dynarray *states,
692
const nir_algebraic_table *table,
693
const nir_search_expression *search,
694
const nir_search_value *replace,
695
nir_instr_worklist *algebraic_worklist)
697
uint8_t swizzle[NIR_MAX_VEC_COMPONENTS] = { 0 };
699
for (unsigned i = 0; i < instr->dest.dest.ssa.num_components; ++i)
702
assert(instr->dest.dest.is_ssa);
704
struct match_state state;
705
state.inexact_match = false;
706
state.has_exact_alu = false;
707
state.range_ht = range_ht;
708
state.pass_op_table = table->pass_op_table;
711
STATIC_ASSERT(sizeof(state.comm_op_direction) * 8 >= NIR_SEARCH_MAX_COMM_OPS);
713
unsigned comm_expr_combinations =
714
1 << MIN2(search->comm_exprs, NIR_SEARCH_MAX_COMM_OPS);
717
for (unsigned comb = 0; comb < comm_expr_combinations; comb++) {
718
/* The bitfield of directions is just the current iteration. Hooray for
721
state.comm_op_direction = comb;
722
state.variables_seen = 0;
724
if (match_expression(table, search, instr,
725
instr->dest.dest.ssa.num_components,
735
fprintf(stderr, "matched: ");
736
dump_value(&search->value);
737
fprintf(stderr, " -> ");
739
fprintf(stderr, " ssa_%d\n", instr->dest.dest.ssa.index);
742
/* If the instruction at the root of the expression tree being replaced is
743
* a unary operation, insert the replacement instructions at the location
744
* of the source of the unary operation. Otherwise, insert the replacement
745
* instructions at the location of the expression tree root.
747
* For the unary operation case, this is done to prevent some spurious code
748
* motion that can dramatically extend live ranges. Imagine an expression
749
* like -(A+B) where the addtion and the negation are separated by flow
750
* control and thousands of instructions. If this expression is replaced
751
* with -A+-B, inserting the new instructions at the site of the negation
752
* could extend the live range of A and B dramtically. This could increase
753
* register pressure and cause spilling.
755
* It may well be that moving instructions around is a good thing, but
756
* keeping algebraic optimizations and code motion optimizations separate
759
nir_alu_instr *const src_instr = nir_src_as_alu_instr(instr->src[0].src);
760
if (src_instr != NULL &&
761
(instr->op == nir_op_fneg || instr->op == nir_op_fabs ||
762
instr->op == nir_op_ineg || instr->op == nir_op_iabs ||
763
instr->op == nir_op_inot)) {
764
/* Insert new instructions *after*. Otherwise a hypothetical
765
* replacement fneg(X) -> fabs(X) would insert the fabs() instruction
766
* before X! This can also occur for things like fneg(X.wzyx) -> X.wzyx
767
* in vector mode. A move instruction to handle the swizzle will get
770
* This manifested in a single OpenGL ES 2.0 CTS vertex shader test on
771
* older Intel GPU that use vector-mode vertex processing.
773
build->cursor = nir_after_instr(&src_instr->instr);
775
build->cursor = nir_before_instr(&instr->instr);
778
state.states = states;
780
nir_alu_src val = construct_value(build, replace,
781
instr->dest.dest.ssa.num_components,
782
instr->dest.dest.ssa.bit_size,
783
&state, &instr->instr);
785
/* Note that NIR builder will elide the MOV if it's a no-op, which may
786
* allow more work to be done in a single pass through algebraic.
788
nir_ssa_def *ssa_val =
789
nir_mov_alu(build, val, instr->dest.dest.ssa.num_components);
790
if (ssa_val->index == util_dynarray_num_elements(states, uint16_t)) {
791
util_dynarray_append(states, uint16_t, 0);
792
nir_algebraic_automaton(ssa_val->parent_instr, states, table->pass_op_table);
795
/* Rewrite the uses of the old SSA value to the new one, and recurse
796
* through the uses updating the automaton's state.
798
nir_ssa_def_rewrite_uses(&instr->dest.dest.ssa, ssa_val);
799
nir_algebraic_update_automaton(ssa_val->parent_instr, algebraic_worklist,
800
states, table->pass_op_table);
802
/* Nothing uses the instr any more, so drop it out of the program. Note
803
* that the instr may be in the worklist still, so we can't free it
806
nir_instr_remove(&instr->instr);
812
nir_algebraic_automaton(nir_instr *instr, struct util_dynarray *states,
813
const struct per_op_table *pass_op_table)
815
switch (instr->type) {
816
case nir_instr_type_alu: {
817
nir_alu_instr *alu = nir_instr_as_alu(instr);
819
uint16_t search_op = nir_search_op_for_nir_op(op);
820
const struct per_op_table *tbl = &pass_op_table[search_op];
821
if (tbl->num_filtered_states == 0)
824
/* Calculate the index into the transition table. Note the index
825
* calculated must match the iteration order of Python's
826
* itertools.product(), which was used to emit the transition
830
for (unsigned i = 0; i < nir_op_infos[op].num_inputs; i++) {
831
index *= tbl->num_filtered_states;
833
index += tbl->filter[*util_dynarray_element(states, uint16_t,
834
alu->src[i].src.ssa->index)];
837
uint16_t *state = util_dynarray_element(states, uint16_t,
838
alu->dest.dest.ssa.index);
839
if (*state != tbl->table[index]) {
840
*state = tbl->table[index];
846
case nir_instr_type_load_const: {
847
nir_load_const_instr *load_const = nir_instr_as_load_const(instr);
848
uint16_t *state = util_dynarray_element(states, uint16_t,
849
load_const->def.index);
850
if (*state != CONST_STATE) {
851
*state = CONST_STATE;
863
nir_algebraic_instr(nir_builder *build, nir_instr *instr,
864
struct hash_table *range_ht,
865
const bool *condition_flags,
866
const nir_algebraic_table *table,
867
struct util_dynarray *states,
868
nir_instr_worklist *worklist)
871
if (instr->type != nir_instr_type_alu)
874
nir_alu_instr *alu = nir_instr_as_alu(instr);
875
if (!alu->dest.dest.is_ssa)
878
unsigned bit_size = alu->dest.dest.ssa.bit_size;
879
const unsigned execution_mode =
880
build->shader->info.float_controls_execution_mode;
881
const bool ignore_inexact =
882
nir_is_float_control_signed_zero_inf_nan_preserve(execution_mode, bit_size) ||
883
nir_is_denorm_flush_to_zero(execution_mode, bit_size);
885
int xform_idx = *util_dynarray_element(states, uint16_t,
886
alu->dest.dest.ssa.index);
887
for (const struct transform *xform = &table->transforms[table->transform_offsets[xform_idx]];
888
xform->condition_offset != ~0;
890
if (condition_flags[xform->condition_offset] &&
891
!(table->values[xform->search].expression.inexact && ignore_inexact) &&
892
nir_replace_instr(build, alu, range_ht, states, table,
893
&table->values[xform->search].expression,
894
&table->values[xform->replace].value, worklist)) {
895
_mesa_hash_table_clear(range_ht, NULL);
904
nir_algebraic_impl(nir_function_impl *impl,
905
const bool *condition_flags,
906
const nir_algebraic_table *table)
908
bool progress = false;
911
nir_builder_init(&build, impl);
913
/* Note: it's important here that we're allocating a zeroed array, since
914
* state 0 is the default state, which means we don't have to visit
915
* anything other than constants and ALU instructions.
917
struct util_dynarray states = {0};
918
if (!util_dynarray_resize(&states, uint16_t, impl->ssa_alloc)) {
919
nir_metadata_preserve(impl, nir_metadata_all);
922
memset(states.data, 0, states.size);
924
struct hash_table *range_ht = _mesa_pointer_hash_table_create(NULL);
926
nir_instr_worklist *worklist = nir_instr_worklist_create();
928
/* Walk top-to-bottom setting up the automaton state. */
929
nir_foreach_block(block, impl) {
930
nir_foreach_instr(instr, block) {
931
nir_algebraic_automaton(instr, &states, table->pass_op_table);
935
/* Put our instrs in the worklist such that we're popping the last instr
936
* first. This will encourage us to match the biggest source patterns when
939
nir_foreach_block_reverse(block, impl) {
940
nir_foreach_instr_reverse(instr, block) {
941
if (instr->type == nir_instr_type_alu)
942
nir_instr_worklist_push_tail(worklist, instr);
947
while ((instr = nir_instr_worklist_pop_head(worklist))) {
948
/* The worklist can have an instr pushed to it multiple times if it was
949
* the src of multiple instrs that also got optimized, so make sure that
950
* we don't try to re-optimize an instr we already handled.
952
if (exec_node_is_tail_sentinel(&instr->node))
955
progress |= nir_algebraic_instr(&build, instr,
956
range_ht, condition_flags,
957
table, &states, worklist);
960
nir_instr_worklist_destroy(worklist);
961
ralloc_free(range_ht);
962
util_dynarray_fini(&states);
965
nir_metadata_preserve(impl, nir_metadata_block_index |
966
nir_metadata_dominance);
968
nir_metadata_preserve(impl, nir_metadata_all);