2
* Copyright © 2014 Intel Corporation
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
* and/or sell copies of the Software, and to permit persons to whom the
9
* Software is furnished to do so, subject to the following conditions:
11
* The above copyright notice and this permission notice (including the next
12
* paragraph) shall be included in all copies or substantial portions of the
15
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24
* Jason Ekstrand (jason@jlekstrand.net)
29
#include "nir_instr_set.h"
32
* Implements Global Code Motion. A description of GCM can be found in
33
* "Global Code Motion; Global Value Numbering" by Cliff Click.
34
* Unfortunately, the algorithm presented in the paper is broken in a
35
* number of ways. The algorithm used here differs substantially from the
36
* one in the paper but it is, in my opinion, much easier to read and
40
/* This is used to stop GCM moving instruction out of a loop if the loop
41
* contains too many instructions and moving them would create excess spilling.
43
* TODO: Figure out a better way to decide if we should remove instructions from
46
#define MAX_LOOP_INSTRUCTIONS 100
48
struct gcm_block_info {
49
/* Number of loops this block is inside */
52
unsigned loop_instr_count;
54
/* The loop the block is nested inside or NULL */
57
/* The last instruction inserted into this block. This is used as we
58
* traverse the instructions and insert them back into the program to
59
* put them in the right order.
61
nir_instr *last_instr;
64
struct gcm_instr_info {
65
nir_block *early_block;
68
/* Flags used in the instr->pass_flags field for various instruction states */
70
GCM_INSTR_PINNED = (1 << 0),
71
GCM_INSTR_SCHEDULE_EARLIER_ONLY = (1 << 1),
72
GCM_INSTR_SCHEDULED_EARLY = (1 << 2),
73
GCM_INSTR_SCHEDULED_LATE = (1 << 3),
74
GCM_INSTR_PLACED = (1 << 4),
78
nir_function_impl *impl;
83
/* The list of non-pinned instructions. As we do the late scheduling,
84
* we pull non-pinned instructions out of their blocks and place them in
85
* this list. This saves us from having linked-list problems when we go
86
* to put instructions back in their blocks.
88
struct exec_list instrs;
90
struct gcm_block_info *blocks;
93
struct gcm_instr_info *instr_infos;
97
get_loop_instr_count(struct exec_list *cf_list)
99
unsigned loop_instr_count = 0;
100
foreach_list_typed(nir_cf_node, node, node, cf_list) {
101
switch (node->type) {
102
case nir_cf_node_block: {
103
nir_block *block = nir_cf_node_as_block(node);
104
nir_foreach_instr(instr, block) {
109
case nir_cf_node_if: {
110
nir_if *if_stmt = nir_cf_node_as_if(node);
111
loop_instr_count += get_loop_instr_count(&if_stmt->then_list);
112
loop_instr_count += get_loop_instr_count(&if_stmt->else_list);
115
case nir_cf_node_loop: {
116
nir_loop *loop = nir_cf_node_as_loop(node);
117
loop_instr_count += get_loop_instr_count(&loop->body);
121
unreachable("Invalid CF node type");
125
return loop_instr_count;
128
/* Recursively walks the CFG and builds the block_info structure */
130
gcm_build_block_info(struct exec_list *cf_list, struct gcm_state *state,
131
nir_loop *loop, unsigned loop_depth,
132
unsigned loop_instr_count)
134
foreach_list_typed(nir_cf_node, node, node, cf_list) {
135
switch (node->type) {
136
case nir_cf_node_block: {
137
nir_block *block = nir_cf_node_as_block(node);
138
state->blocks[block->index].loop_depth = loop_depth;
139
state->blocks[block->index].loop_instr_count = loop_instr_count;
140
state->blocks[block->index].loop = loop;
143
case nir_cf_node_if: {
144
nir_if *if_stmt = nir_cf_node_as_if(node);
145
gcm_build_block_info(&if_stmt->then_list, state, loop, loop_depth, ~0u);
146
gcm_build_block_info(&if_stmt->else_list, state, loop, loop_depth, ~0u);
149
case nir_cf_node_loop: {
150
nir_loop *loop = nir_cf_node_as_loop(node);
151
gcm_build_block_info(&loop->body, state, loop, loop_depth + 1,
152
get_loop_instr_count(&loop->body));
156
unreachable("Invalid CF node type");
162
is_src_scalarizable(nir_src *src)
166
nir_instr *src_instr = src->ssa->parent_instr;
167
switch (src_instr->type) {
168
case nir_instr_type_alu: {
169
nir_alu_instr *src_alu = nir_instr_as_alu(src_instr);
171
/* ALU operations with output_size == 0 should be scalarized. We
172
* will also see a bunch of vecN operations from scalarizing ALU
173
* operations and, since they can easily be copy-propagated, they
176
return nir_op_infos[src_alu->op].output_size == 0 ||
177
src_alu->op == nir_op_vec2 ||
178
src_alu->op == nir_op_vec3 ||
179
src_alu->op == nir_op_vec4;
182
case nir_instr_type_load_const:
183
/* These are trivially scalarizable */
186
case nir_instr_type_ssa_undef:
189
case nir_instr_type_intrinsic: {
190
nir_intrinsic_instr *src_intrin = nir_instr_as_intrinsic(src_instr);
192
switch (src_intrin->intrinsic) {
193
case nir_intrinsic_load_deref: {
194
/* Don't scalarize if we see a load of a local variable because it
195
* might turn into one of the things we can't scalarize.
197
nir_deref_instr *deref = nir_src_as_deref(src_intrin->src[0]);
198
return !nir_deref_mode_may_be(deref, (nir_var_function_temp |
199
nir_var_shader_temp));
202
case nir_intrinsic_interp_deref_at_centroid:
203
case nir_intrinsic_interp_deref_at_sample:
204
case nir_intrinsic_interp_deref_at_offset:
205
case nir_intrinsic_load_uniform:
206
case nir_intrinsic_load_ubo:
207
case nir_intrinsic_load_ssbo:
208
case nir_intrinsic_load_global:
209
case nir_intrinsic_load_global_constant:
210
case nir_intrinsic_load_input:
220
/* We can't scalarize this type of instruction */
226
is_binding_uniform(nir_src src)
228
nir_binding binding = nir_chase_binding(src);
229
if (!binding.success)
232
for (unsigned i = 0; i < binding.num_indices; i++) {
233
if (!nir_src_is_always_uniform(binding.indices[i]))
241
pin_intrinsic(nir_intrinsic_instr *intrin)
243
nir_instr *instr = &intrin->instr;
245
if (!nir_intrinsic_can_reorder(intrin)) {
246
instr->pass_flags = GCM_INSTR_PINNED;
250
instr->pass_flags = 0;
252
/* If the intrinsic requires a uniform source, we can't safely move it across non-uniform
253
* control flow if it's not uniform at the point it's defined.
254
* Stores and atomics can never be re-ordered, so we don't have to consider them here.
256
bool non_uniform = nir_intrinsic_has_access(intrin) &&
257
(nir_intrinsic_access(intrin) & ACCESS_NON_UNIFORM);
259
(intrin->intrinsic == nir_intrinsic_load_ubo ||
260
intrin->intrinsic == nir_intrinsic_load_ssbo ||
261
intrin->intrinsic == nir_intrinsic_get_ubo_size ||
262
intrin->intrinsic == nir_intrinsic_get_ssbo_size ||
263
nir_intrinsic_has_image_dim(intrin) ||
264
((intrin->intrinsic == nir_intrinsic_load_deref ||
265
intrin->intrinsic == nir_intrinsic_deref_buffer_array_length) &&
266
nir_deref_mode_may_be(nir_src_as_deref(intrin->src[0]),
267
nir_var_mem_ubo | nir_var_mem_ssbo)))) {
268
if (!is_binding_uniform(intrin->src[0]))
269
instr->pass_flags = GCM_INSTR_PINNED;
270
} else if (intrin->intrinsic == nir_intrinsic_load_push_constant) {
271
if (!nir_src_is_always_uniform(intrin->src[0]))
272
instr->pass_flags = GCM_INSTR_PINNED;
273
} else if (intrin->intrinsic == nir_intrinsic_load_deref &&
274
nir_deref_mode_is(nir_src_as_deref(intrin->src[0]),
275
nir_var_mem_push_const)) {
276
nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
277
while (deref->deref_type != nir_deref_type_var) {
278
if ((deref->deref_type == nir_deref_type_array ||
279
deref->deref_type == nir_deref_type_ptr_as_array) &&
280
!nir_src_is_always_uniform(deref->arr.index)) {
281
instr->pass_flags = GCM_INSTR_PINNED;
284
deref = nir_deref_instr_parent(deref);
286
instr->pass_flags = GCM_INSTR_PINNED;
293
/* Walks the instruction list and marks immovable instructions as pinned or
296
* This function also serves to initialize the instr->pass_flags field.
297
* After this is completed, all instructions' pass_flags fields will be set
298
* to either GCM_INSTR_PINNED, GCM_INSTR_PLACED or 0.
301
gcm_pin_instructions(nir_function_impl *impl, struct gcm_state *state)
303
state->num_instrs = 0;
305
nir_foreach_block(block, impl) {
306
nir_foreach_instr_safe(instr, block) {
307
/* Index the instructions for use in gcm_state::instrs */
308
instr->index = state->num_instrs++;
310
switch (instr->type) {
311
case nir_instr_type_alu:
312
switch (nir_instr_as_alu(instr)->op) {
315
case nir_op_fddx_fine:
316
case nir_op_fddy_fine:
317
case nir_op_fddx_coarse:
318
case nir_op_fddy_coarse:
319
/* These can only go in uniform control flow */
320
instr->pass_flags = GCM_INSTR_SCHEDULE_EARLIER_ONLY;
324
if (!is_src_scalarizable(&(nir_instr_as_alu(instr)->src[0].src))) {
325
instr->pass_flags = GCM_INSTR_PINNED;
331
instr->pass_flags = 0;
336
case nir_instr_type_tex: {
337
nir_tex_instr *tex = nir_instr_as_tex(instr);
338
if (nir_tex_instr_has_implicit_derivative(tex))
339
instr->pass_flags = GCM_INSTR_SCHEDULE_EARLIER_ONLY;
341
for (unsigned i = 0; i < tex->num_srcs; i++) {
342
nir_tex_src *src = &tex->src[i];
343
switch (src->src_type) {
344
case nir_tex_src_texture_deref:
345
if (!tex->texture_non_uniform && !is_binding_uniform(src->src))
346
instr->pass_flags = GCM_INSTR_PINNED;
348
case nir_tex_src_sampler_deref:
349
if (!tex->sampler_non_uniform && !is_binding_uniform(src->src))
350
instr->pass_flags = GCM_INSTR_PINNED;
352
case nir_tex_src_texture_offset:
353
case nir_tex_src_texture_handle:
354
if (!tex->texture_non_uniform && !nir_src_is_always_uniform(src->src))
355
instr->pass_flags = GCM_INSTR_PINNED;
357
case nir_tex_src_sampler_offset:
358
case nir_tex_src_sampler_handle:
359
if (!tex->sampler_non_uniform && !nir_src_is_always_uniform(src->src))
360
instr->pass_flags = GCM_INSTR_PINNED;
369
case nir_instr_type_deref:
370
case nir_instr_type_load_const:
371
instr->pass_flags = 0;
374
case nir_instr_type_intrinsic:
375
pin_intrinsic(nir_instr_as_intrinsic(instr));
378
case nir_instr_type_jump:
379
case nir_instr_type_ssa_undef:
380
case nir_instr_type_phi:
381
instr->pass_flags = GCM_INSTR_PLACED;
385
unreachable("Invalid instruction type in GCM");
388
if (!(instr->pass_flags & GCM_INSTR_PLACED)) {
389
/* If this is an unplaced instruction, go ahead and pull it out of
390
* the program and put it on the instrs list. This has a couple
391
* of benifits. First, it makes the scheduling algorithm more
392
* efficient because we can avoid walking over basic blocks.
393
* Second, it keeps us from causing linked list confusion when
394
* we're trying to put everything in its proper place at the end
397
* Note that we don't use nir_instr_remove here because that also
398
* cleans up uses and defs and we want to keep that information.
400
exec_node_remove(&instr->node);
401
exec_list_push_tail(&state->instrs, &instr->node);
408
gcm_schedule_early_instr(nir_instr *instr, struct gcm_state *state);
410
/** Update an instructions schedule for the given source
412
* This function is called iteratively as we walk the sources of an
413
* instruction. It ensures that the given source instruction has been
414
* scheduled and then update this instruction's block if the source
415
* instruction is lower down the tree.
418
gcm_schedule_early_src(nir_src *src, void *void_state)
420
struct gcm_state *state = void_state;
421
nir_instr *instr = state->instr;
425
gcm_schedule_early_instr(src->ssa->parent_instr, void_state);
427
/* While the index isn't a proper dominance depth, it does have the
428
* property that if A dominates B then A->index <= B->index. Since we
429
* know that this instruction must have been dominated by all of its
430
* sources at some point (even if it's gone through value-numbering),
431
* all of the sources must lie on the same branch of the dominance tree.
432
* Therefore, we can just go ahead and just compare indices.
434
struct gcm_instr_info *src_info =
435
&state->instr_infos[src->ssa->parent_instr->index];
436
struct gcm_instr_info *info = &state->instr_infos[instr->index];
437
if (info->early_block->index < src_info->early_block->index)
438
info->early_block = src_info->early_block;
440
/* We need to restore the state instruction because it may have been
441
* changed through the gcm_schedule_early_instr call above. Since we
442
* may still be iterating through sources and future calls to
443
* gcm_schedule_early_src for the same instruction will still need it.
445
state->instr = instr;
450
/** Schedules an instruction early
452
* This function performs a recursive depth-first search starting at the
453
* given instruction and proceeding through the sources to schedule
454
* instructions as early as they can possibly go in the dominance tree.
455
* The instructions are "scheduled" by updating the early_block field of
456
* the corresponding gcm_instr_state entry.
459
gcm_schedule_early_instr(nir_instr *instr, struct gcm_state *state)
461
if (instr->pass_flags & GCM_INSTR_SCHEDULED_EARLY)
464
instr->pass_flags |= GCM_INSTR_SCHEDULED_EARLY;
466
/* Pinned/placed instructions always get scheduled in their original block so
467
* we don't need to do anything. Also, bailing here keeps us from ever
468
* following the sources of phi nodes which can be back-edges.
470
if (instr->pass_flags & GCM_INSTR_PINNED ||
471
instr->pass_flags & GCM_INSTR_PLACED) {
472
state->instr_infos[instr->index].early_block = instr->block;
476
/* Start with the instruction at the top. As we iterate over the
477
* sources, it will get moved down as needed.
479
state->instr_infos[instr->index].early_block = nir_start_block(state->impl);
480
state->instr = instr;
482
nir_foreach_src(instr, gcm_schedule_early_src, state);
486
set_block_for_loop_instr(struct gcm_state *state, nir_instr *instr,
489
/* If the instruction wasn't in a loop to begin with we don't want to push
492
nir_loop *loop = state->blocks[instr->block->index].loop;
496
if (nir_block_dominates(instr->block, block))
499
/* If the loop only executes a single time i.e its wrapped in a:
500
* do{ ... break; } while(true)
501
* Don't move the instruction as it will not help anything.
503
if (loop->info->limiting_terminator == NULL && !loop->info->complex_loop &&
504
nir_block_ends_in_break(nir_loop_last_block(loop)))
507
/* Being too aggressive with how we pull instructions out of loops can
508
* result in extra register pressure and spilling. For example its fairly
509
* common for loops in compute shaders to calculate SSBO offsets using
510
* the workgroup id, subgroup id and subgroup invocation, pulling all
511
* these calculations outside the loop causes register pressure.
513
* To work around these issues for now we only allow constant and texture
514
* instructions to be moved outside their original loops, or instructions
515
* where the total loop instruction count is less than
516
* MAX_LOOP_INSTRUCTIONS.
518
* TODO: figure out some more heuristics to allow more to be moved out of
521
if (state->blocks[instr->block->index].loop_instr_count < MAX_LOOP_INSTRUCTIONS)
524
if (instr->type == nir_instr_type_load_const ||
525
instr->type == nir_instr_type_tex)
532
gcm_choose_block_for_instr(nir_instr *instr, nir_block *early_block,
533
nir_block *late_block, struct gcm_state *state)
535
assert(nir_block_dominates(early_block, late_block));
537
nir_block *best = late_block;
538
for (nir_block *block = late_block; block != NULL; block = block->imm_dom) {
539
if (state->blocks[block->index].loop_depth <
540
state->blocks[best->index].loop_depth &&
541
set_block_for_loop_instr(state, instr, block))
543
else if (block == instr->block)
546
if (block == early_block)
554
gcm_schedule_late_instr(nir_instr *instr, struct gcm_state *state);
556
/** Schedules the instruction associated with the given SSA def late
558
* This function works by first walking all of the uses of the given SSA
559
* definition, ensuring that they are scheduled, and then computing the LCA
560
* (least common ancestor) of its uses. It then schedules this instruction
561
* as close to the LCA as possible while trying to stay out of loops.
564
gcm_schedule_late_def(nir_ssa_def *def, void *void_state)
566
struct gcm_state *state = void_state;
568
nir_block *lca = NULL;
570
nir_foreach_use(use_src, def) {
571
nir_instr *use_instr = use_src->parent_instr;
573
gcm_schedule_late_instr(use_instr, state);
575
/* Phi instructions are a bit special. SSA definitions don't have to
576
* dominate the sources of the phi nodes that use them; instead, they
577
* have to dominate the predecessor block corresponding to the phi
578
* source. We handle this by looking through the sources, finding
579
* any that are usingg this SSA def, and using those blocks instead
580
* of the one the phi lives in.
582
if (use_instr->type == nir_instr_type_phi) {
583
nir_phi_instr *phi = nir_instr_as_phi(use_instr);
585
nir_foreach_phi_src(phi_src, phi) {
586
if (phi_src->src.ssa == def)
587
lca = nir_dominance_lca(lca, phi_src->pred);
590
lca = nir_dominance_lca(lca, use_instr->block);
594
nir_foreach_if_use(use_src, def) {
595
nir_if *if_stmt = use_src->parent_if;
597
/* For if statements, we consider the block to be the one immediately
598
* preceding the if CF node.
600
nir_block *pred_block =
601
nir_cf_node_as_block(nir_cf_node_prev(&if_stmt->cf_node));
603
lca = nir_dominance_lca(lca, pred_block);
606
nir_block *early_block =
607
state->instr_infos[def->parent_instr->index].early_block;
609
/* Some instructions may never be used. Flag them and the instruction
610
* placement code will get rid of them for us.
613
def->parent_instr->block = NULL;
617
if (def->parent_instr->pass_flags & GCM_INSTR_SCHEDULE_EARLIER_ONLY &&
618
lca != def->parent_instr->block &&
619
nir_block_dominates(def->parent_instr->block, lca)) {
620
lca = def->parent_instr->block;
623
/* We now have the LCA of all of the uses. If our invariants hold,
624
* this is dominated by the block that we chose when scheduling early.
625
* We now walk up the dominance tree and pick the lowest block that is
626
* as far outside loops as we can get.
628
nir_block *best_block =
629
gcm_choose_block_for_instr(def->parent_instr, early_block, lca, state);
631
if (def->parent_instr->block != best_block)
632
state->progress = true;
634
def->parent_instr->block = best_block;
639
/** Schedules an instruction late
641
* This function performs a depth-first search starting at the given
642
* instruction and proceeding through its uses to schedule instructions as
643
* late as they can reasonably go in the dominance tree. The instructions
644
* are "scheduled" by updating their instr->block field.
646
* The name of this function is actually a bit of a misnomer as it doesn't
647
* schedule them "as late as possible" as the paper implies. Instead, it
648
* first finds the lates possible place it can schedule the instruction and
649
* then possibly schedules it earlier than that. The actual location is as
650
* far down the tree as we can go while trying to stay out of loops.
653
gcm_schedule_late_instr(nir_instr *instr, struct gcm_state *state)
655
if (instr->pass_flags & GCM_INSTR_SCHEDULED_LATE)
658
instr->pass_flags |= GCM_INSTR_SCHEDULED_LATE;
660
/* Pinned/placed instructions are already scheduled so we don't need to do
661
* anything. Also, bailing here keeps us from ever following phi nodes
662
* which can be back-edges.
664
if (instr->pass_flags & GCM_INSTR_PLACED ||
665
instr->pass_flags & GCM_INSTR_PINNED)
668
nir_foreach_ssa_def(instr, gcm_schedule_late_def, state);
672
gcm_replace_def_with_undef(nir_ssa_def *def, void *void_state)
674
struct gcm_state *state = void_state;
676
if (nir_ssa_def_is_unused(def))
679
nir_ssa_undef_instr *undef =
680
nir_ssa_undef_instr_create(state->impl->function->shader,
681
def->num_components, def->bit_size);
682
nir_instr_insert(nir_before_cf_list(&state->impl->body), &undef->instr);
683
nir_ssa_def_rewrite_uses(def, &undef->def);
688
/** Places an instrution back into the program
690
* The earlier passes of GCM simply choose blocks for each instruction and
691
* otherwise leave them alone. This pass actually places the instructions
692
* into their chosen blocks.
694
* To do so, we simply insert instructions in the reverse order they were
695
* extracted. This will simply place instructions that were scheduled earlier
696
* onto the end of their new block and instructions that were scheduled later to
697
* the start of their new block.
700
gcm_place_instr(nir_instr *instr, struct gcm_state *state)
702
if (instr->pass_flags & GCM_INSTR_PLACED)
705
instr->pass_flags |= GCM_INSTR_PLACED;
707
if (instr->block == NULL) {
708
nir_foreach_ssa_def(instr, gcm_replace_def_with_undef, state);
709
nir_instr_remove(instr);
713
struct gcm_block_info *block_info = &state->blocks[instr->block->index];
714
exec_node_remove(&instr->node);
716
if (block_info->last_instr) {
717
exec_node_insert_node_before(&block_info->last_instr->node,
720
/* Schedule it at the end of the block */
721
nir_instr *jump_instr = nir_block_last_instr(instr->block);
722
if (jump_instr && jump_instr->type == nir_instr_type_jump) {
723
exec_node_insert_node_before(&jump_instr->node, &instr->node);
725
exec_list_push_tail(&instr->block->instr_list, &instr->node);
729
block_info->last_instr = instr;
733
opt_gcm_impl(nir_shader *shader, nir_function_impl *impl, bool value_number)
735
nir_metadata_require(impl, nir_metadata_block_index |
736
nir_metadata_dominance);
737
nir_metadata_require(impl, nir_metadata_loop_analysis,
738
shader->options->force_indirect_unrolling);
740
/* A previous pass may have left pass_flags dirty, so clear it all out. */
741
nir_foreach_block(block, impl)
742
nir_foreach_instr(instr, block)
743
instr->pass_flags = 0;
745
struct gcm_state state;
749
state.progress = false;
750
exec_list_make_empty(&state.instrs);
751
state.blocks = rzalloc_array(NULL, struct gcm_block_info, impl->num_blocks);
753
gcm_build_block_info(&impl->body, &state, NULL, 0, ~0u);
755
gcm_pin_instructions(impl, &state);
758
rzalloc_array(NULL, struct gcm_instr_info, state.num_instrs);
761
struct set *gvn_set = nir_instr_set_create(NULL);
762
foreach_list_typed_safe(nir_instr, instr, node, &state.instrs) {
763
if (instr->pass_flags & GCM_INSTR_PINNED)
766
if (nir_instr_set_add_or_rewrite(gvn_set, instr, NULL))
767
state.progress = true;
769
nir_instr_set_destroy(gvn_set);
772
foreach_list_typed(nir_instr, instr, node, &state.instrs)
773
gcm_schedule_early_instr(instr, &state);
775
foreach_list_typed(nir_instr, instr, node, &state.instrs)
776
gcm_schedule_late_instr(instr, &state);
778
while (!exec_list_is_empty(&state.instrs)) {
779
nir_instr *instr = exec_node_data(nir_instr,
780
state.instrs.tail_sentinel.prev, node);
781
gcm_place_instr(instr, &state);
784
ralloc_free(state.blocks);
785
ralloc_free(state.instr_infos);
787
nir_metadata_preserve(impl, nir_metadata_block_index |
788
nir_metadata_dominance |
789
nir_metadata_loop_analysis);
791
return state.progress;
795
nir_opt_gcm(nir_shader *shader, bool value_number)
797
bool progress = false;
799
nir_foreach_function(function, shader) {
801
progress |= opt_gcm_impl(shader, function->impl, value_number);