2
* Copyright © 2019 Intel Corporation
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
* and/or sell copies of the Software, and to permit persons to whom the
9
* Software is furnished to do so, subject to the following conditions:
11
* The above copyright notice and this permission notice (including the next
12
* paragraph) shall be included in all copies or substantial portions of the
15
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25
#include "nir_builder.h"
26
#include "nir_deref.h"
28
#include "util/bitscan.h"
29
#include "util/list.h"
30
#include "util/u_math.h"
32
/* Combine stores of vectors to the same deref into a single store.
34
* This per-block pass keeps track of stores of vectors to the same
35
* destination and combines them into the last store of the sequence. Dead
36
* stores (or parts of the store) found during the process are removed.
38
* A pending combination becomes an actual combination in various situations:
39
* at the end of the block, when another instruction uses the memory or due to
42
* Besides vectors, the pass also look at array derefs of vectors. For direct
43
* array derefs, it works like a write mask access to the given component.
44
* For indirect access there's no way to know before hand what component it
45
* will overlap with, so the combination is finished -- the indirect remains
49
/* Keep track of a group of stores that can be combined. All stores share the
52
struct combined_store {
53
struct list_head link;
55
nir_component_mask_t write_mask;
58
/* Latest store added. It is reused when combining. */
59
nir_intrinsic_instr *latest;
61
/* Original store for each component. The number of times a store appear
62
* in this array is kept in the store's pass_flags.
64
nir_intrinsic_instr *stores[NIR_MAX_VEC_COMPONENTS];
67
struct combine_stores_state {
68
nir_variable_mode modes;
70
/* Pending store combinations. */
71
struct list_head pending;
73
/* Per function impl state. */
78
/* Allocator and freelist to reuse structs between functions. */
80
struct list_head freelist;
83
static struct combined_store *
84
alloc_combined_store(struct combine_stores_state *state)
86
struct combined_store *result;
87
if (list_is_empty(&state->freelist)) {
88
result = linear_zalloc_child(state->lin_ctx, sizeof(*result));
90
result = list_first_entry(&state->freelist,
91
struct combined_store,
93
list_del(&result->link);
94
memset(result, 0, sizeof(*result));
100
free_combined_store(struct combine_stores_state *state,
101
struct combined_store *combo)
103
list_del(&combo->link);
104
combo->write_mask = 0;
105
list_add(&combo->link, &state->freelist);
109
combine_stores(struct combine_stores_state *state,
110
struct combined_store *combo)
112
assert(combo->latest);
113
assert(combo->latest->intrinsic == nir_intrinsic_store_deref);
115
/* If the combined writemask is the same as the latest store, we know there
116
* is only one store in the combination, so nothing to combine.
118
if ((combo->write_mask & nir_intrinsic_write_mask(combo->latest)) ==
122
state->b.cursor = nir_before_instr(&combo->latest->instr);
124
/* Build a new vec, to be used as source for the combined store. As it
125
* gets build, remove previous stores that are not needed anymore.
127
nir_ssa_scalar comps[NIR_MAX_VEC_COMPONENTS] = {0};
128
unsigned num_components = glsl_get_vector_elements(combo->dst->type);
129
unsigned bit_size = combo->latest->src[1].ssa->bit_size;
130
for (unsigned i = 0; i < num_components; i++) {
131
nir_intrinsic_instr *store = combo->stores[i];
132
if (combo->write_mask & (1 << i)) {
134
assert(store->src[1].is_ssa);
136
/* If store->num_components == 1 then we are in the deref-of-vec case
137
* and store->src[1] is a scalar. Otherwise, we're a regular vector
138
* load and we have to pick off a component.
140
comps[i] = nir_get_ssa_scalar(store->src[1].ssa, store->num_components == 1 ? 0 : i);
142
assert(store->instr.pass_flags > 0);
143
if (--store->instr.pass_flags == 0 && store != combo->latest)
144
nir_instr_remove(&store->instr);
146
comps[i] = nir_get_ssa_scalar(nir_ssa_undef(&state->b, 1, bit_size), 0);
149
assert(combo->latest->instr.pass_flags == 0);
150
nir_ssa_def *vec = nir_vec_scalars(&state->b, comps, num_components);
152
/* Fix the latest store with the combined information. */
153
nir_intrinsic_instr *store = combo->latest;
155
/* In this case, our store is as an array deref of a vector so we need to
156
* rewrite it to use a deref to the whole vector.
158
if (store->num_components == 1) {
159
store->num_components = num_components;
160
nir_instr_rewrite_src(&store->instr, &store->src[0],
161
nir_src_for_ssa(&combo->dst->dest.ssa));
164
assert(store->num_components == num_components);
165
nir_intrinsic_set_write_mask(store, combo->write_mask);
166
nir_instr_rewrite_src(&store->instr, &store->src[1],
167
nir_src_for_ssa(vec));
168
state->progress = true;
172
combine_stores_with_deref(struct combine_stores_state *state,
173
nir_deref_instr *deref)
175
if (!nir_deref_mode_may_be(deref, state->modes))
178
list_for_each_entry_safe(struct combined_store, combo, &state->pending, link) {
179
if (nir_compare_derefs(combo->dst, deref) & nir_derefs_may_alias_bit) {
180
combine_stores(state, combo);
181
free_combined_store(state, combo);
187
combine_stores_with_modes(struct combine_stores_state *state,
188
nir_variable_mode modes)
190
if ((state->modes & modes) == 0)
193
list_for_each_entry_safe(struct combined_store, combo, &state->pending, link) {
194
if (nir_deref_mode_may_be(combo->dst, modes)) {
195
combine_stores(state, combo);
196
free_combined_store(state, combo);
201
static struct combined_store *
202
find_matching_combined_store(struct combine_stores_state *state,
203
nir_deref_instr *deref)
205
list_for_each_entry(struct combined_store, combo, &state->pending, link) {
206
if (nir_compare_derefs(combo->dst, deref) & nir_derefs_equal_bit)
213
update_combined_store(struct combine_stores_state *state,
214
nir_intrinsic_instr *intrin)
216
nir_deref_instr *dst = nir_src_as_deref(intrin->src[0]);
217
if (!nir_deref_mode_may_be(dst, state->modes))
221
nir_deref_instr *vec_dst;
223
if (glsl_type_is_vector(dst->type)) {
224
vec_mask = nir_intrinsic_write_mask(intrin);
227
/* Besides vectors, only direct array derefs of vectors are handled. */
228
if (dst->deref_type != nir_deref_type_array ||
229
!nir_src_is_const(dst->arr.index) ||
230
!glsl_type_is_vector(nir_deref_instr_parent(dst)->type)) {
231
combine_stores_with_deref(state, dst);
235
uint64_t index = nir_src_as_uint(dst->arr.index);
236
vec_dst = nir_deref_instr_parent(dst);
238
if (index >= glsl_get_vector_elements(vec_dst->type)) {
239
/* Storing to an invalid index is a no-op. */
240
nir_instr_remove(&intrin->instr);
241
state->progress = true;
245
vec_mask = 1 << index;
248
struct combined_store *combo = find_matching_combined_store(state, vec_dst);
250
combo = alloc_combined_store(state);
251
combo->dst = vec_dst;
252
list_add(&combo->link, &state->pending);
255
/* Use pass_flags to reference count the store based on how many
256
* components are still used by the combination.
258
intrin->instr.pass_flags = util_bitcount(vec_mask);
259
combo->latest = intrin;
261
/* Update the combined_store, clearing up older overlapping references. */
262
combo->write_mask |= vec_mask;
264
unsigned i = u_bit_scan(&vec_mask);
265
nir_intrinsic_instr *prev_store = combo->stores[i];
268
if (--prev_store->instr.pass_flags == 0) {
269
nir_instr_remove(&prev_store->instr);
271
assert(glsl_type_is_vector(
272
nir_src_as_deref(prev_store->src[0])->type));
273
nir_component_mask_t prev_mask = nir_intrinsic_write_mask(prev_store);
274
nir_intrinsic_set_write_mask(prev_store, prev_mask & ~(1 << i));
276
state->progress = true;
278
combo->stores[i] = combo->latest;
283
combine_stores_block(struct combine_stores_state *state, nir_block *block)
285
nir_foreach_instr_safe(instr, block) {
286
if (instr->type == nir_instr_type_call) {
287
combine_stores_with_modes(state, nir_var_shader_out |
288
nir_var_shader_temp |
289
nir_var_function_temp |
296
if (instr->type != nir_instr_type_intrinsic)
299
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
300
switch (intrin->intrinsic) {
301
case nir_intrinsic_store_deref:
302
if (nir_intrinsic_access(intrin) & ACCESS_VOLATILE) {
303
nir_deref_instr *dst = nir_src_as_deref(intrin->src[0]);
304
/* When we see a volatile store, we go ahead and combine all
305
* previous non-volatile stores which touch that address and
306
* specifically don't add the volatile store to the list. This
307
* way we guarantee that the volatile store isn't combined with
308
* anything and no non-volatile stores are combined across a
311
combine_stores_with_deref(state, dst);
313
update_combined_store(state, intrin);
317
case nir_intrinsic_control_barrier:
318
case nir_intrinsic_group_memory_barrier:
319
case nir_intrinsic_memory_barrier:
320
combine_stores_with_modes(state, nir_var_shader_out |
326
case nir_intrinsic_memory_barrier_buffer:
327
combine_stores_with_modes(state, nir_var_mem_ssbo |
331
case nir_intrinsic_memory_barrier_shared:
332
combine_stores_with_modes(state, nir_var_mem_shared);
335
case nir_intrinsic_memory_barrier_tcs_patch:
336
combine_stores_with_modes(state, nir_var_shader_out);
339
case nir_intrinsic_scoped_barrier:
340
if (nir_intrinsic_memory_semantics(intrin) & NIR_MEMORY_RELEASE) {
341
combine_stores_with_modes(state,
342
nir_intrinsic_memory_modes(intrin));
346
case nir_intrinsic_emit_vertex:
347
case nir_intrinsic_emit_vertex_with_counter:
348
combine_stores_with_modes(state, nir_var_shader_out);
351
case nir_intrinsic_report_ray_intersection:
352
combine_stores_with_modes(state, nir_var_mem_ssbo |
354
nir_var_shader_call_data |
355
nir_var_ray_hit_attrib);
358
case nir_intrinsic_ignore_ray_intersection:
359
case nir_intrinsic_terminate_ray:
360
combine_stores_with_modes(state, nir_var_mem_ssbo |
362
nir_var_shader_call_data);
365
case nir_intrinsic_load_deref: {
366
nir_deref_instr *src = nir_src_as_deref(intrin->src[0]);
367
combine_stores_with_deref(state, src);
371
case nir_intrinsic_load_deref_block_intel:
372
case nir_intrinsic_store_deref_block_intel: {
373
/* Combine all the stores that may alias with the whole variable (or
376
nir_deref_instr *operand = nir_src_as_deref(intrin->src[0]);
377
while (nir_deref_instr_parent(operand))
378
operand = nir_deref_instr_parent(operand);
379
assert(operand->deref_type == nir_deref_type_var ||
380
operand->deref_type == nir_deref_type_cast);
382
combine_stores_with_deref(state, operand);
386
case nir_intrinsic_copy_deref:
387
case nir_intrinsic_memcpy_deref: {
388
nir_deref_instr *dst = nir_src_as_deref(intrin->src[0]);
389
nir_deref_instr *src = nir_src_as_deref(intrin->src[1]);
390
combine_stores_with_deref(state, dst);
391
combine_stores_with_deref(state, src);
395
case nir_intrinsic_trace_ray:
396
case nir_intrinsic_execute_callable:
397
case nir_intrinsic_rt_trace_ray:
398
case nir_intrinsic_rt_execute_callable: {
399
nir_deref_instr *payload =
400
nir_src_as_deref(*nir_get_shader_call_payload_src(intrin));
401
combine_stores_with_deref(state, payload);
405
case nir_intrinsic_deref_atomic_add:
406
case nir_intrinsic_deref_atomic_imin:
407
case nir_intrinsic_deref_atomic_umin:
408
case nir_intrinsic_deref_atomic_imax:
409
case nir_intrinsic_deref_atomic_umax:
410
case nir_intrinsic_deref_atomic_and:
411
case nir_intrinsic_deref_atomic_or:
412
case nir_intrinsic_deref_atomic_xor:
413
case nir_intrinsic_deref_atomic_exchange:
414
case nir_intrinsic_deref_atomic_comp_swap: {
415
nir_deref_instr *dst = nir_src_as_deref(intrin->src[0]);
416
combine_stores_with_deref(state, dst);
425
/* At the end of the block, try all the remaining combinations. */
426
combine_stores_with_modes(state, state->modes);
430
combine_stores_impl(struct combine_stores_state *state, nir_function_impl *impl)
432
state->progress = false;
433
nir_builder_init(&state->b, impl);
435
nir_foreach_block(block, impl)
436
combine_stores_block(state, block);
438
if (state->progress) {
439
nir_metadata_preserve(impl, nir_metadata_block_index |
440
nir_metadata_dominance);
442
nir_metadata_preserve(impl, nir_metadata_all);
445
return state->progress;
449
nir_opt_combine_stores(nir_shader *shader, nir_variable_mode modes)
451
void *mem_ctx = ralloc_context(NULL);
452
struct combine_stores_state state = {
454
.lin_ctx = linear_zalloc_parent(mem_ctx, 0),
457
list_inithead(&state.pending);
458
list_inithead(&state.freelist);
460
bool progress = false;
462
nir_foreach_function(function, shader) {
465
progress |= combine_stores_impl(&state, function->impl);
468
ralloc_free(mem_ctx);