2
* Copyright © 2020 Raspberry Pi Ltd
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
* and/or sell copies of the Software, and to permit persons to whom the
9
* Software is furnished to do so, subject to the following conditions:
11
* The above copyright notice and this permission notice (including the next
12
* paragraph) shall be included in all copies or substantial portions of the
15
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24
#include "compiler/v3d_compiler.h"
25
#include "compiler/nir/nir_builder.h"
28
rewrite_offset(nir_builder *b,
29
nir_intrinsic_instr *instr,
32
nir_intrinsic_op buffer_size_op)
34
b->cursor = nir_before_instr(&instr->instr);
36
/* Get size of the buffer */
37
nir_intrinsic_instr *size =
38
nir_intrinsic_instr_create(b->shader, buffer_size_op);
39
size->src[0] = nir_src_for_ssa(nir_imm_int(b, buffer_idx));
40
nir_ssa_dest_init(&size->instr, &size->dest, 1, 32, NULL);
41
nir_builder_instr_insert(b, &size->instr);
43
/* All out TMU accesses are 32-bit aligned */
44
nir_ssa_def *aligned_buffer_size =
45
nir_iand(b, &size->dest.ssa, nir_imm_int(b, 0xfffffffc));
49
nir_umin(b, instr->src[offset_src].ssa, aligned_buffer_size);
50
nir_instr_rewrite_src(&instr->instr, &instr->src[offset_src],
51
nir_src_for_ssa(offset));
55
lower_load(struct v3d_compile *c,
57
nir_intrinsic_instr *instr)
59
uint32_t index = nir_src_comp_as_uint(instr->src[0], 0);
62
if (instr->intrinsic == nir_intrinsic_load_ubo) {
63
op = nir_intrinsic_get_ubo_size;
64
if (c->key->environment == V3D_ENVIRONMENT_VULKAN)
67
op = nir_intrinsic_get_ssbo_size;
70
rewrite_offset(b, instr, index, 1, op);
74
lower_store(struct v3d_compile *c,
76
nir_intrinsic_instr *instr)
78
uint32_t index = nir_src_comp_as_uint(instr->src[1], 0);
79
rewrite_offset(b, instr, index, 2, nir_intrinsic_get_ssbo_size);
83
lower_atomic(struct v3d_compile *c,
85
nir_intrinsic_instr *instr)
87
uint32_t index = nir_src_comp_as_uint(instr->src[0], 0);
88
rewrite_offset(b, instr, index, 1, nir_intrinsic_get_ssbo_size);
92
lower_shared(struct v3d_compile *c,
94
nir_intrinsic_instr *instr)
96
b->cursor = nir_before_instr(&instr->instr);
97
nir_ssa_def *aligned_size =
98
nir_imm_int(b, c->s->info.shared_size & 0xfffffffc);
99
nir_ssa_def *offset = nir_umin(b, instr->src[0].ssa, aligned_size);
100
nir_instr_rewrite_src(&instr->instr, &instr->src[0],
101
nir_src_for_ssa(offset));
105
lower_instr(struct v3d_compile *c, nir_builder *b, struct nir_instr *instr)
107
if (instr->type != nir_instr_type_intrinsic)
109
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
111
switch (intr->intrinsic) {
112
case nir_intrinsic_load_ubo:
113
case nir_intrinsic_load_ssbo:
114
lower_load(c, b, intr);
116
case nir_intrinsic_store_ssbo:
117
lower_store(c, b, intr);
119
case nir_intrinsic_ssbo_atomic_add:
120
case nir_intrinsic_ssbo_atomic_imin:
121
case nir_intrinsic_ssbo_atomic_umin:
122
case nir_intrinsic_ssbo_atomic_imax:
123
case nir_intrinsic_ssbo_atomic_umax:
124
case nir_intrinsic_ssbo_atomic_and:
125
case nir_intrinsic_ssbo_atomic_or:
126
case nir_intrinsic_ssbo_atomic_xor:
127
case nir_intrinsic_ssbo_atomic_exchange:
128
case nir_intrinsic_ssbo_atomic_comp_swap:
129
lower_atomic(c, b, intr);
131
case nir_intrinsic_load_shared:
132
case nir_intrinsic_shared_atomic_add:
133
case nir_intrinsic_shared_atomic_imin:
134
case nir_intrinsic_shared_atomic_umin:
135
case nir_intrinsic_shared_atomic_imax:
136
case nir_intrinsic_shared_atomic_umax:
137
case nir_intrinsic_shared_atomic_and:
138
case nir_intrinsic_shared_atomic_or:
139
case nir_intrinsic_shared_atomic_xor:
140
case nir_intrinsic_shared_atomic_exchange:
141
case nir_intrinsic_shared_atomic_comp_swap:
142
lower_shared(c, b, intr);
150
v3d_nir_lower_robust_buffer_access(nir_shader *s, struct v3d_compile *c)
152
nir_foreach_function(function, s) {
153
if (function->impl) {
155
nir_builder_init(&b, function->impl);
157
nir_foreach_block(block, function->impl) {
158
nir_foreach_instr_safe(instr, block)
159
lower_instr(c, &b, instr);
162
nir_metadata_preserve(function->impl,
163
nir_metadata_block_index |
164
nir_metadata_dominance);