2
* Copyright © 2016 Intel Corporation
3
* Copyright © 2020 Valve Corporation
5
* Permission is hereby granted, free of charge, to any person obtaining a
6
* copy of this software and associated documentation files (the "Software"),
7
* to deal in the Software without restriction, including without limitation
8
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
9
* and/or sell copies of the Software, and to permit persons to whom the
10
* Software is furnished to do so, subject to the following conditions:
12
* The above copyright notice and this permission notice (including the next
13
* paragraph) shall be included in all copies or substantial portions of the
16
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25
#include "nir_control_flow.h"
26
#include "nir_builder.h"
29
* This file implements an optimization for multiview. Some GPU's have a
30
* special mode which allows the vertex shader (or last stage in the geometry
31
* pipeline) to create multiple primitives in different layers of the
32
* framebuffer at once by writing multiple copies of gl_Position. The
33
* assumption is that in most uses of multiview, the only use of gl_ViewIndex
34
* is to change the position to implement the parallax effect, and other
35
* varyings will be the same between the different views. We put the body of
36
* the original vertex shader in a loop, writing to a different copy of
37
* gl_Position each loop iteration, and then let other optimizations clean up
42
shader_writes_to_memory(nir_shader *shader)
44
/* With multiview, we would need to ensure that memory writes happen either
45
* once or once per view. Since combination of multiview and memory writes
46
* is not expected, we'll just skip this optimization in this case.
49
nir_function_impl *entrypoint = nir_shader_get_entrypoint(shader);
51
nir_foreach_block(block, entrypoint) {
52
nir_foreach_instr(instr, block) {
53
if (instr->type != nir_instr_type_intrinsic)
55
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
57
switch (intrin->intrinsic) {
58
case nir_intrinsic_deref_atomic_add:
59
case nir_intrinsic_deref_atomic_imin:
60
case nir_intrinsic_deref_atomic_umin:
61
case nir_intrinsic_deref_atomic_imax:
62
case nir_intrinsic_deref_atomic_umax:
63
case nir_intrinsic_deref_atomic_and:
64
case nir_intrinsic_deref_atomic_or:
65
case nir_intrinsic_deref_atomic_xor:
66
case nir_intrinsic_deref_atomic_exchange:
67
case nir_intrinsic_deref_atomic_comp_swap:
68
case nir_intrinsic_store_ssbo:
69
case nir_intrinsic_ssbo_atomic_add:
70
case nir_intrinsic_ssbo_atomic_imin:
71
case nir_intrinsic_ssbo_atomic_umin:
72
case nir_intrinsic_ssbo_atomic_imax:
73
case nir_intrinsic_ssbo_atomic_umax:
74
case nir_intrinsic_ssbo_atomic_and:
75
case nir_intrinsic_ssbo_atomic_or:
76
case nir_intrinsic_ssbo_atomic_xor:
77
case nir_intrinsic_ssbo_atomic_exchange:
78
case nir_intrinsic_ssbo_atomic_comp_swap:
79
case nir_intrinsic_store_shared:
80
case nir_intrinsic_store_shared2_amd:
81
case nir_intrinsic_shared_atomic_add:
82
case nir_intrinsic_shared_atomic_imin:
83
case nir_intrinsic_shared_atomic_umin:
84
case nir_intrinsic_shared_atomic_imax:
85
case nir_intrinsic_shared_atomic_umax:
86
case nir_intrinsic_shared_atomic_and:
87
case nir_intrinsic_shared_atomic_or:
88
case nir_intrinsic_shared_atomic_xor:
89
case nir_intrinsic_shared_atomic_exchange:
90
case nir_intrinsic_shared_atomic_comp_swap:
91
case nir_intrinsic_image_deref_store:
92
case nir_intrinsic_image_deref_atomic_add:
93
case nir_intrinsic_image_deref_atomic_fadd:
94
case nir_intrinsic_image_deref_atomic_umin:
95
case nir_intrinsic_image_deref_atomic_umax:
96
case nir_intrinsic_image_deref_atomic_imin:
97
case nir_intrinsic_image_deref_atomic_imax:
98
case nir_intrinsic_image_deref_atomic_fmin:
99
case nir_intrinsic_image_deref_atomic_fmax:
100
case nir_intrinsic_image_deref_atomic_and:
101
case nir_intrinsic_image_deref_atomic_or:
102
case nir_intrinsic_image_deref_atomic_xor:
103
case nir_intrinsic_image_deref_atomic_exchange:
104
case nir_intrinsic_image_deref_atomic_comp_swap:
118
nir_shader_uses_view_index(nir_shader *shader)
120
nir_function_impl *entrypoint = nir_shader_get_entrypoint(shader);
122
nir_foreach_block(block, entrypoint) {
123
nir_foreach_instr(instr, block) {
124
if (instr->type != nir_instr_type_intrinsic)
127
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
128
if (intrin->intrinsic == nir_intrinsic_load_view_index)
137
shader_only_position_uses_view_index(nir_shader *shader)
139
nir_shader *shader_no_position = nir_shader_clone(NULL, shader);
140
nir_function_impl *entrypoint = nir_shader_get_entrypoint(shader_no_position);
142
/* Remove the store position from a cloned shader. */
143
nir_foreach_block(block, entrypoint) {
144
nir_foreach_instr_safe(instr, block) {
145
if (instr->type != nir_instr_type_intrinsic)
148
nir_intrinsic_instr *store = nir_instr_as_intrinsic(instr);
149
if (store->intrinsic != nir_intrinsic_store_deref)
152
nir_variable *var = nir_intrinsic_get_var(store, 0);
153
if (var->data.location != VARYING_SLOT_POS)
156
nir_instr_remove(&store->instr);
160
/* Clean up shader so unused load_view_index intrinsics are removed. */
164
progress |= nir_opt_dead_cf(shader_no_position);
166
/* Peephole select will drop if-blocks that have then and else empty,
167
* which will remove the usage of an SSA in the condition.
169
progress |= nir_opt_peephole_select(shader_no_position, 0, false, false);
171
progress |= nir_opt_dce(shader_no_position);
174
bool uses_view_index = nir_shader_uses_view_index(shader_no_position);
176
ralloc_free(shader_no_position);
177
return !uses_view_index;
180
/* Return true if it's safe to call nir_lower_multiview() on this vertex
181
* shader. Note that this only handles driver-agnostic checks, i.e. things
182
* which would make nir_lower_multiview() incorrect. Any driver-specific
183
* checks, e.g. for sufficient varying space or performance considerations,
184
* should be handled in the driver.
186
* Note that we don't handle the more complex checks needed for lowering
187
* pipelines with geometry or tessellation shaders.
191
nir_can_lower_multiview(nir_shader *shader)
193
bool writes_position = false;
194
nir_foreach_shader_out_variable(var, shader) {
195
if (var->data.location == VARYING_SLOT_POS) {
196
writes_position = true;
201
/* Don't bother handling this edge case. */
202
if (!writes_position)
205
return !shader_writes_to_memory(shader) &&
206
shader_only_position_uses_view_index(shader);
210
* The lowering. Call with the last active geometry stage.
214
nir_lower_multiview(nir_shader *shader, uint32_t view_mask)
216
assert(shader->info.stage != MESA_SHADER_FRAGMENT);
217
int view_count = util_bitcount(view_mask);
219
nir_function_impl *entrypoint = nir_shader_get_entrypoint(shader);
221
/* Update position to refer to an array. */
222
nir_variable *pos_var = NULL;
223
nir_foreach_shader_out_variable(var, shader) {
224
if (var->data.location == VARYING_SLOT_POS) {
225
assert(var->type == glsl_vec4_type());
226
var->type = glsl_array_type(glsl_vec4_type(), view_count, 0);
227
var->data.per_view = true;
236
nir_cf_list_extract(&body, &entrypoint->body);
239
nir_builder_init(&b, entrypoint);
240
b.cursor = nir_after_cf_list(&entrypoint->body);
242
/* Loop Index will go from 0 to view_count. */
243
nir_variable *loop_index_var =
244
nir_local_variable_create(entrypoint, glsl_uint_type(), "loop_index");
245
nir_deref_instr *loop_index_deref = nir_build_deref_var(&b, loop_index_var);
246
nir_store_deref(&b, loop_index_deref, nir_imm_int(&b, 0), 1);
248
/* Array of view index values that are active in the loop. Note that the
249
* loop index only matches the view index if there are no gaps in the
252
nir_variable *view_index_var = nir_local_variable_create(
253
entrypoint, glsl_array_type(glsl_uint_type(), view_count, 0), "view_index");
254
nir_deref_instr *view_index_deref = nir_build_deref_var(&b, view_index_var);
256
int array_position = 0;
257
uint32_t view_mask_temp = view_mask;
258
while (view_mask_temp) {
259
uint32_t view_index = u_bit_scan(&view_mask_temp);
260
nir_store_deref(&b, nir_build_deref_array_imm(&b, view_index_deref, array_position),
261
nir_imm_int(&b, view_index), 1);
266
/* Create the equivalent of
269
* if (loop_index >= view_count):
272
* view_index = active_indices[loop_index]
273
* pos_deref = &pos[loop_index]
275
* # Placeholder for the body to be reinserted.
279
* Later both `view_index` and `pos_deref` will be used to rewrite the
280
* original shader body.
283
nir_loop* loop = nir_push_loop(&b);
285
nir_ssa_def *loop_index = nir_load_deref(&b, loop_index_deref);
286
nir_ssa_def *cmp = nir_ige(&b, loop_index, nir_imm_int(&b, view_count));
287
nir_if *loop_check = nir_push_if(&b, cmp);
288
nir_jump(&b, nir_jump_break);
289
nir_pop_if(&b, loop_check);
291
nir_ssa_def *view_index =
292
nir_load_deref(&b, nir_build_deref_array(&b, view_index_deref, loop_index));
293
nir_deref_instr *pos_deref =
294
nir_build_deref_array(&b, nir_build_deref_var(&b, pos_var), loop_index);
296
nir_store_deref(&b, loop_index_deref, nir_iadd_imm(&b, loop_index, 1), 1);
297
nir_pop_loop(&b, loop);
299
/* Reinsert the body. */
300
b.cursor = nir_after_instr(&pos_deref->instr);
301
nir_cf_reinsert(&body, b.cursor);
303
nir_foreach_block(block, entrypoint) {
304
nir_foreach_instr_safe(instr, block) {
305
if (instr->type != nir_instr_type_intrinsic)
308
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
310
switch (intrin->intrinsic) {
311
case nir_intrinsic_load_view_index: {
312
assert(intrin->dest.is_ssa);
313
nir_ssa_def_rewrite_uses(&intrin->dest.ssa, view_index);
317
case nir_intrinsic_store_deref: {
318
nir_variable *var = nir_intrinsic_get_var(intrin, 0);
319
if (var == pos_var) {
320
nir_deref_instr *old_deref = nir_src_as_deref(intrin->src[0]);
322
nir_instr_rewrite_src(instr, &intrin->src[0],
323
nir_src_for_ssa(&pos_deref->dest.ssa));
325
/* Remove old deref since it has the wrong type. */
326
nir_deref_instr_remove_if_unused(old_deref);
331
case nir_intrinsic_load_deref:
332
if (nir_intrinsic_get_var(intrin, 0) == pos_var) {
333
unreachable("Should have lowered I/O to temporaries "
334
"so no load_deref on position output is expected.");
338
case nir_intrinsic_copy_deref:
339
unreachable("Should have lowered copy_derefs at this point");
349
nir_metadata_preserve(entrypoint, nir_metadata_none);