~mmach/netext73/mesa-haswell

« back to all changes in this revision

Viewing changes to src/broadcom/compiler/v3d_nir_lower_robust_buffer_access.c

  • Committer: mmach
  • Date: 2022-09-22 19:56:13 UTC
  • Revision ID: netbit73@gmail.com-20220922195613-wtik9mmy20tmor0i
2022-09-22 21:17:09

Show diffs side-by-side

added added

removed removed

Lines of Context:
1
 
/*
2
 
 * Copyright © 2020 Raspberry Pi Ltd
3
 
 *
4
 
 * Permission is hereby granted, free of charge, to any person obtaining a
5
 
 * copy of this software and associated documentation files (the "Software"),
6
 
 * to deal in the Software without restriction, including without limitation
7
 
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
 
 * and/or sell copies of the Software, and to permit persons to whom the
9
 
 * Software is furnished to do so, subject to the following conditions:
10
 
 *
11
 
 * The above copyright notice and this permission notice (including the next
12
 
 * paragraph) shall be included in all copies or substantial portions of the
13
 
 * Software.
14
 
 *
15
 
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18
 
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
 
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
 
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21
 
 * IN THE SOFTWARE.
22
 
 */
23
 
 
24
 
#include "compiler/v3d_compiler.h"
25
 
#include "compiler/nir/nir_builder.h"
26
 
 
27
 
static void
28
 
rewrite_offset(nir_builder *b,
29
 
               nir_intrinsic_instr *instr,
30
 
               uint32_t buffer_idx,
31
 
               uint32_t offset_src,
32
 
               nir_intrinsic_op buffer_size_op)
33
 
{
34
 
        b->cursor = nir_before_instr(&instr->instr);
35
 
 
36
 
        /* Get size of the buffer */
37
 
        nir_intrinsic_instr *size =
38
 
                nir_intrinsic_instr_create(b->shader, buffer_size_op);
39
 
        size->src[0] = nir_src_for_ssa(nir_imm_int(b, buffer_idx));
40
 
        nir_ssa_dest_init(&size->instr, &size->dest, 1, 32, NULL);
41
 
        nir_builder_instr_insert(b, &size->instr);
42
 
 
43
 
        /* All out TMU accesses are 32-bit aligned */
44
 
        nir_ssa_def *aligned_buffer_size =
45
 
                nir_iand(b, &size->dest.ssa, nir_imm_int(b, 0xfffffffc));
46
 
 
47
 
        /* Rewrite offset */
48
 
        nir_ssa_def *offset =
49
 
                nir_umin(b, instr->src[offset_src].ssa, aligned_buffer_size);
50
 
        nir_instr_rewrite_src(&instr->instr, &instr->src[offset_src],
51
 
                              nir_src_for_ssa(offset));
52
 
}
53
 
 
54
 
static void
55
 
lower_load(struct v3d_compile *c,
56
 
           nir_builder *b,
57
 
           nir_intrinsic_instr *instr)
58
 
{
59
 
        uint32_t index = nir_src_comp_as_uint(instr->src[0], 0);
60
 
 
61
 
        nir_intrinsic_op op;
62
 
        if (instr->intrinsic == nir_intrinsic_load_ubo) {
63
 
                op = nir_intrinsic_get_ubo_size;
64
 
                if (c->key->environment == V3D_ENVIRONMENT_VULKAN)
65
 
                        index--;
66
 
        } else {
67
 
                op = nir_intrinsic_get_ssbo_size;
68
 
        }
69
 
 
70
 
        rewrite_offset(b, instr, index, 1, op);
71
 
}
72
 
 
73
 
static void
74
 
lower_store(struct v3d_compile *c,
75
 
            nir_builder *b,
76
 
            nir_intrinsic_instr *instr)
77
 
{
78
 
        uint32_t index = nir_src_comp_as_uint(instr->src[1], 0);
79
 
        rewrite_offset(b, instr, index, 2, nir_intrinsic_get_ssbo_size);
80
 
}
81
 
 
82
 
static void
83
 
lower_atomic(struct v3d_compile *c,
84
 
             nir_builder *b,
85
 
             nir_intrinsic_instr *instr)
86
 
{
87
 
        uint32_t index = nir_src_comp_as_uint(instr->src[0], 0);
88
 
        rewrite_offset(b, instr, index, 1, nir_intrinsic_get_ssbo_size);
89
 
}
90
 
 
91
 
static void
92
 
lower_shared(struct v3d_compile *c,
93
 
             nir_builder *b,
94
 
             nir_intrinsic_instr *instr)
95
 
{
96
 
        b->cursor = nir_before_instr(&instr->instr);
97
 
        nir_ssa_def *aligned_size =
98
 
                nir_imm_int(b, c->s->info.shared_size & 0xfffffffc);
99
 
        nir_ssa_def *offset = nir_umin(b, instr->src[0].ssa, aligned_size);
100
 
        nir_instr_rewrite_src(&instr->instr, &instr->src[0],
101
 
                              nir_src_for_ssa(offset));
102
 
}
103
 
 
104
 
static void
105
 
lower_instr(struct v3d_compile *c, nir_builder *b, struct nir_instr *instr)
106
 
{
107
 
        if (instr->type != nir_instr_type_intrinsic)
108
 
                return;
109
 
        nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
110
 
 
111
 
        switch (intr->intrinsic) {
112
 
        case nir_intrinsic_load_ubo:
113
 
        case nir_intrinsic_load_ssbo:
114
 
                lower_load(c, b, intr);
115
 
                break;
116
 
        case nir_intrinsic_store_ssbo:
117
 
                lower_store(c, b, intr);
118
 
                break;
119
 
        case nir_intrinsic_ssbo_atomic_add:
120
 
        case nir_intrinsic_ssbo_atomic_imin:
121
 
        case nir_intrinsic_ssbo_atomic_umin:
122
 
        case nir_intrinsic_ssbo_atomic_imax:
123
 
        case nir_intrinsic_ssbo_atomic_umax:
124
 
        case nir_intrinsic_ssbo_atomic_and:
125
 
        case nir_intrinsic_ssbo_atomic_or:
126
 
        case nir_intrinsic_ssbo_atomic_xor:
127
 
        case nir_intrinsic_ssbo_atomic_exchange:
128
 
        case nir_intrinsic_ssbo_atomic_comp_swap:
129
 
                lower_atomic(c, b, intr);
130
 
                break;
131
 
        case nir_intrinsic_load_shared:
132
 
        case nir_intrinsic_shared_atomic_add:
133
 
        case nir_intrinsic_shared_atomic_imin:
134
 
        case nir_intrinsic_shared_atomic_umin:
135
 
        case nir_intrinsic_shared_atomic_imax:
136
 
        case nir_intrinsic_shared_atomic_umax:
137
 
        case nir_intrinsic_shared_atomic_and:
138
 
        case nir_intrinsic_shared_atomic_or:
139
 
        case nir_intrinsic_shared_atomic_xor:
140
 
        case nir_intrinsic_shared_atomic_exchange:
141
 
        case nir_intrinsic_shared_atomic_comp_swap:
142
 
                lower_shared(c, b, intr);
143
 
                break;
144
 
        default:
145
 
                break;
146
 
        }
147
 
}
148
 
 
149
 
void
150
 
v3d_nir_lower_robust_buffer_access(nir_shader *s, struct v3d_compile *c)
151
 
{
152
 
        nir_foreach_function(function, s) {
153
 
                if (function->impl) {
154
 
                        nir_builder b;
155
 
                        nir_builder_init(&b, function->impl);
156
 
 
157
 
                        nir_foreach_block(block, function->impl) {
158
 
                                nir_foreach_instr_safe(instr, block)
159
 
                                        lower_instr(c, &b, instr);
160
 
                        }
161
 
 
162
 
                        nir_metadata_preserve(function->impl,
163
 
                                              nir_metadata_block_index |
164
 
                                              nir_metadata_dominance);
165
 
                }
166
 
        }
167
 
}