2
* Copyright © 2018 Intel Corporation
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
* and/or sell copies of the Software, and to permit persons to whom the
9
* Software is furnished to do so, subject to the following conditions:
11
* The above copyright notice and this permission notice (including the next
12
* paragraph) shall be included in all copies or substantial portions of the
15
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25
#include "nir_builder.h"
26
#include "nir_deref.h"
28
#include "util/u_dynarray.h"
31
* Elimination of dead writes based on derefs.
33
* Dead writes are stores and copies that write to a deref, which then gets
34
* another write before it was used (read or sourced for a copy). Those
35
* writes can be removed since they don't affect anything.
37
* For derefs that refer to a memory area that can be read after the program,
38
* the last write is considered used. The presence of certain instructions
39
* may also cause writes to be considered used, e.g. memory barrier (in this case
40
* the value must be written as other thread might use it).
42
* The write mask for store instructions is considered, so it is possible that
43
* a store is removed because of the combination of other stores overwritten
47
/* Entry for unused_writes arrays. */
49
/* If NULL indicates the entry is free to be reused. */
50
nir_intrinsic_instr *intrin;
51
nir_component_mask_t mask;
56
clear_unused_for_modes(struct util_dynarray *unused_writes, nir_variable_mode modes)
58
util_dynarray_foreach_reverse(unused_writes, struct write_entry, entry) {
59
if (nir_deref_mode_may_be(entry->dst, modes))
60
*entry = util_dynarray_pop(unused_writes, struct write_entry);
65
clear_unused_for_read(struct util_dynarray *unused_writes, nir_deref_instr *src)
67
util_dynarray_foreach_reverse(unused_writes, struct write_entry, entry) {
68
if (nir_compare_derefs(src, entry->dst) & nir_derefs_may_alias_bit)
69
*entry = util_dynarray_pop(unused_writes, struct write_entry);
74
update_unused_writes(struct util_dynarray *unused_writes,
75
nir_intrinsic_instr *intrin,
76
nir_deref_instr *dst, nir_component_mask_t mask)
78
bool progress = false;
80
/* This pass assumes that destination of copies and stores are derefs that
81
* end in a vector or scalar (it is OK to have wildcards or indirects for
84
assert(glsl_type_is_vector_or_scalar(dst->type));
86
/* Find writes that are unused and can be removed. */
87
util_dynarray_foreach_reverse(unused_writes, struct write_entry, entry) {
88
nir_deref_compare_result comp = nir_compare_derefs(dst, entry->dst);
89
if (comp & nir_derefs_a_contains_b_bit) {
91
if (entry->mask == 0) {
92
nir_instr_remove(&entry->intrin->instr);
93
*entry = util_dynarray_pop(unused_writes, struct write_entry);
99
/* Add the new write to the unused array. */
100
struct write_entry new_entry = {
106
util_dynarray_append(unused_writes, struct write_entry, new_entry);
112
remove_dead_write_vars_local(void *mem_ctx, nir_shader *shader, nir_block *block)
114
bool progress = false;
116
struct util_dynarray unused_writes;
117
util_dynarray_init(&unused_writes, mem_ctx);
119
nir_foreach_instr_safe(instr, block) {
120
if (instr->type == nir_instr_type_call) {
121
clear_unused_for_modes(&unused_writes, nir_var_shader_out |
122
nir_var_shader_temp |
123
nir_var_function_temp |
130
if (instr->type != nir_instr_type_intrinsic)
133
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
134
switch (intrin->intrinsic) {
135
case nir_intrinsic_control_barrier:
136
case nir_intrinsic_group_memory_barrier:
137
case nir_intrinsic_memory_barrier: {
138
clear_unused_for_modes(&unused_writes, nir_var_shader_out |
145
case nir_intrinsic_memory_barrier_buffer:
146
clear_unused_for_modes(&unused_writes, nir_var_mem_ssbo |
150
case nir_intrinsic_memory_barrier_shared:
151
clear_unused_for_modes(&unused_writes, nir_var_mem_shared);
154
case nir_intrinsic_memory_barrier_tcs_patch:
155
clear_unused_for_modes(&unused_writes, nir_var_shader_out);
158
case nir_intrinsic_scoped_barrier: {
159
if (nir_intrinsic_memory_semantics(intrin) & NIR_MEMORY_RELEASE) {
160
clear_unused_for_modes(&unused_writes,
161
nir_intrinsic_memory_modes(intrin));
166
case nir_intrinsic_emit_vertex:
167
case nir_intrinsic_emit_vertex_with_counter: {
168
clear_unused_for_modes(&unused_writes, nir_var_shader_out);
172
case nir_intrinsic_execute_callable:
173
case nir_intrinsic_rt_execute_callable: {
174
/* Mark payload as it can be used by the callee */
175
nir_deref_instr *src = nir_src_as_deref(intrin->src[1]);
176
clear_unused_for_read(&unused_writes, src);
180
case nir_intrinsic_trace_ray:
181
case nir_intrinsic_rt_trace_ray: {
182
/* Mark payload as it can be used by the callees */
183
nir_deref_instr *src = nir_src_as_deref(intrin->src[10]);
184
clear_unused_for_read(&unused_writes, src);
188
case nir_intrinsic_load_deref: {
189
nir_deref_instr *src = nir_src_as_deref(intrin->src[0]);
190
if (nir_deref_mode_must_be(src, nir_var_read_only_modes))
192
clear_unused_for_read(&unused_writes, src);
196
case nir_intrinsic_store_deref: {
197
nir_deref_instr *dst = nir_src_as_deref(intrin->src[0]);
199
if (nir_intrinsic_access(intrin) & ACCESS_VOLATILE) {
200
/* Consider a volatile write to also be a sort of read. This
201
* prevents us from deleting a non-volatile write just before a
202
* volatile write thanks to a non-volatile write afterwards. It's
203
* quite the corner case, but this should be safer and more
204
* predictable for the programmer than allowing two non-volatile
205
* writes to be combined with a volatile write between them.
207
clear_unused_for_read(&unused_writes, dst);
211
nir_component_mask_t mask = nir_intrinsic_write_mask(intrin);
212
progress |= update_unused_writes(&unused_writes, intrin, dst, mask);
216
case nir_intrinsic_copy_deref: {
217
nir_deref_instr *src = nir_src_as_deref(intrin->src[1]);
218
nir_deref_instr *dst = nir_src_as_deref(intrin->src[0]);
220
if (nir_intrinsic_dst_access(intrin) & ACCESS_VOLATILE) {
221
clear_unused_for_read(&unused_writes, src);
222
clear_unused_for_read(&unused_writes, dst);
226
/* Self-copy is removed. */
227
if (nir_compare_derefs(src, dst) & nir_derefs_equal_bit) {
228
nir_instr_remove(instr);
233
clear_unused_for_read(&unused_writes, src);
234
nir_component_mask_t mask = (1 << glsl_get_vector_elements(dst->type)) - 1;
235
progress |= update_unused_writes(&unused_writes, intrin, dst, mask);
244
/* All unused writes at the end of the block are kept, since we can't be
245
* sure they'll be overwritten or not with local analysis only.
252
remove_dead_write_vars_impl(void *mem_ctx, nir_shader *shader, nir_function_impl *impl)
254
bool progress = false;
256
nir_metadata_require(impl, nir_metadata_block_index);
258
nir_foreach_block(block, impl)
259
progress |= remove_dead_write_vars_local(mem_ctx, shader, block);
262
nir_metadata_preserve(impl, nir_metadata_block_index |
263
nir_metadata_dominance);
265
nir_metadata_preserve(impl, nir_metadata_all);
272
nir_opt_dead_write_vars(nir_shader *shader)
274
void *mem_ctx = ralloc_context(NULL);
275
bool progress = false;
277
nir_foreach_function(function, shader) {
280
progress |= remove_dead_write_vars_impl(mem_ctx, shader, function->impl);
283
ralloc_free(mem_ctx);