2
* Copyright © 2019 Red Hat
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
* and/or sell copies of the Software, and to permit persons to whom the
9
* Software is furnished to do so, subject to the following conditions:
11
* The above copyright notice and this permission notice (including the next
12
* paragraph) shall be included in all copies or substantial portions of the
15
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24
#include "compiler/nir/nir_builder.h"
28
* This pass moves varying fetches (and the instructions they depend on
29
* into the start block.
31
* We need to set the (ei) "end input" flag on the last varying fetch.
32
* And we want to ensure that all threads execute the instruction that
33
* sets (ei). The easiest way to ensure this is to move all varying
34
* fetches into the start block. Which is something we used to get for
35
* free by using lower_all_io_to_temps=true.
37
* This may come at the cost of additional register usage. OTOH setting
38
* the (ei) flag earlier probably frees up more VS to run.
40
* Not all varying fetches could be pulled into the start block.
41
* If there are fetches we couldn't pull, like load_interpolated_input
42
* with offset which depends on a non-reorderable ssbo load or on a
43
* phi node, this pass is skipped since it would be hard to find a place
44
* to set (ei) flag (beside at the very end).
45
* a5xx and a6xx do automatically release varying storage at the end.
49
nir_block *start_block;
50
bool precondition_failed;
55
nir_block *start_block;
58
static void check_precondition_instr(precond_state *state, nir_instr *instr);
59
static void move_instruction_to_start_block(state *state, nir_instr *instr);
62
check_precondition_src(nir_src *src, void *state)
64
check_precondition_instr(state, src->ssa->parent_instr);
68
/* Recursively check if there is even a single dependency which
72
check_precondition_instr(precond_state *state, nir_instr *instr)
74
if (instr->block == state->start_block)
77
switch (instr->type) {
78
case nir_instr_type_alu:
79
case nir_instr_type_deref:
80
case nir_instr_type_load_const:
81
case nir_instr_type_ssa_undef:
82
/* These could be safely moved around */
84
case nir_instr_type_intrinsic: {
85
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
86
if (!nir_intrinsic_can_reorder(intr)) {
87
state->precondition_failed = true;
93
state->precondition_failed = true;
97
nir_foreach_src(instr, check_precondition_src, state);
101
check_precondition_block(precond_state *state, nir_block *block)
103
nir_foreach_instr_safe (instr, block) {
104
if (instr->type != nir_instr_type_intrinsic)
107
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
109
switch (intr->intrinsic) {
110
case nir_intrinsic_load_interpolated_input:
111
case nir_intrinsic_load_input:
117
check_precondition_instr(state, instr);
119
if (state->precondition_failed)
125
move_src(nir_src *src, void *state)
127
/* At this point we shouldn't have any non-ssa src: */
128
debug_assert(src->is_ssa);
129
move_instruction_to_start_block(state, src->ssa->parent_instr);
134
move_instruction_to_start_block(state *state, nir_instr *instr)
136
/* nothing to do if the instruction is already in the start block */
137
if (instr->block == state->start_block)
140
/* first move (recursively) all src's to ensure they appear before
141
* load*_input that we are trying to move:
143
nir_foreach_src(instr, move_src, state);
145
/* and then move the instruction itself:
147
exec_node_remove(&instr->node);
148
exec_list_push_tail(&state->start_block->instr_list, &instr->node);
149
instr->block = state->start_block;
153
move_varying_inputs_block(state *state, nir_block *block)
155
bool progress = false;
157
nir_foreach_instr_safe (instr, block) {
158
if (instr->type != nir_instr_type_intrinsic)
161
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
163
switch (intr->intrinsic) {
164
case nir_intrinsic_load_interpolated_input:
165
case nir_intrinsic_load_input:
166
/* TODO any others to handle? */
172
debug_assert(intr->dest.is_ssa);
174
move_instruction_to_start_block(state, instr);
183
ir3_nir_move_varying_inputs(nir_shader *shader)
185
bool progress = false;
187
debug_assert(shader->info.stage == MESA_SHADER_FRAGMENT);
189
nir_foreach_function (function, shader) {
195
state.precondition_failed = false;
196
state.start_block = nir_start_block(function->impl);
198
nir_foreach_block (block, function->impl) {
199
if (block == state.start_block)
202
check_precondition_block(&state, block);
204
if (state.precondition_failed)
209
nir_foreach_function (function, shader) {
215
state.shader = shader;
216
state.start_block = nir_start_block(function->impl);
218
bool progress = false;
219
nir_foreach_block (block, function->impl) {
220
/* don't need to move anything that is already in the first block */
221
if (block == state.start_block)
223
progress |= move_varying_inputs_block(&state, block);
227
nir_metadata_preserve(
228
function->impl, nir_metadata_block_index | nir_metadata_dominance);