2
* Copyright © 2016-2018 Broadcom
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
* and/or sell copies of the Software, and to permit persons to whom the
9
* Software is furnished to do so, subject to the following conditions:
11
* The above copyright notice and this permission notice (including the next
12
* paragraph) shall be included in all copies or substantial portions of the
15
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24
#include "v3d_compiler.h"
26
/* We don't do any address packing. */
27
#define __gen_user_data void
28
#define __gen_address_type uint32_t
29
#define __gen_address_offset(reloc) (*reloc)
30
#define __gen_emit_reloc(cl, reloc)
31
#include "cle/v3d_packet_v41_pack.h"
34
vir_TMU_WRITE(struct v3d_compile *c, enum v3d_qpu_waddr waddr, struct qreg val)
36
/* XXX perf: We should figure out how to merge ALU operations
37
* producing the val with this MOV, when possible.
39
vir_MOV_dest(c, vir_reg(QFILE_MAGIC, waddr), val);
43
vir_TMU_WRITE_or_count(struct v3d_compile *c,
44
enum v3d_qpu_waddr waddr,
51
vir_TMU_WRITE(c, waddr, val);
55
vir_WRTMUC(struct v3d_compile *c, enum quniform_contents contents, uint32_t data)
57
struct qinst *inst = vir_NOP(c);
58
inst->qpu.sig.wrtmuc = true;
59
inst->uniform = vir_get_uniform_index(c, contents, data);
62
static const struct V3D41_TMU_CONFIG_PARAMETER_1 p1_unpacked_default = {
63
.per_pixel_mask_enable = true,
66
static const struct V3D41_TMU_CONFIG_PARAMETER_2 p2_unpacked_default = {
67
.op = V3D_TMU_OP_REGULAR,
71
* If 'tmu_writes' is not NULL, then it just counts required register writes,
72
* otherwise, it emits the actual register writes.
74
* It is important to notice that emitting register writes for the current
75
* TMU operation may trigger a TMU flush, since it is possible that any
76
* of the inputs required for the register writes is the result of a pending
77
* TMU operation. If that happens we need to make sure that it doesn't happen
78
* in the middle of the TMU register writes for the current TMU operation,
79
* which is why we always call ntq_get_src() even if we are only interested in
80
* register write counts.
83
handle_tex_src(struct v3d_compile *c,
86
unsigned non_array_components,
87
struct V3D41_TMU_CONFIG_PARAMETER_2 *p2_unpacked,
91
/* Either we are calling this just to count required TMU writes, or we
92
* are calling this to emit the actual TMU writes.
94
assert(tmu_writes || (s_out && p2_unpacked));
97
switch (instr->src[src_idx].src_type) {
98
case nir_tex_src_coord:
99
/* S triggers the lookup, so save it for the end. */
100
s = ntq_get_src(c, instr->src[src_idx].src, 0);
106
if (non_array_components > 1) {
108
ntq_get_src(c, instr->src[src_idx].src, 1);
109
vir_TMU_WRITE_or_count(c, V3D_QPU_WADDR_TMUT, src,
113
if (non_array_components > 2) {
115
ntq_get_src(c, instr->src[src_idx].src, 2);
116
vir_TMU_WRITE_or_count(c, V3D_QPU_WADDR_TMUR, src,
120
if (instr->is_array) {
122
ntq_get_src(c, instr->src[src_idx].src,
123
instr->coord_components - 1);
124
vir_TMU_WRITE_or_count(c, V3D_QPU_WADDR_TMUI, src,
129
case nir_tex_src_bias: {
130
struct qreg src = ntq_get_src(c, instr->src[src_idx].src, 0);
131
vir_TMU_WRITE_or_count(c, V3D_QPU_WADDR_TMUB, src, tmu_writes);
135
case nir_tex_src_lod: {
136
struct qreg src = ntq_get_src(c, instr->src[src_idx].src, 0);
137
vir_TMU_WRITE_or_count(c, V3D_QPU_WADDR_TMUB, src, tmu_writes);
139
/* With texel fetch automatic LOD is already disabled,
140
* and disable_autolod must not be enabled. For
141
* non-cubes we can use the register TMUSLOD, that
142
* implicitly sets disable_autolod.
145
if (instr->op != nir_texop_txf &&
146
instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE) {
147
p2_unpacked->disable_autolod = true;
153
case nir_tex_src_comparator: {
154
struct qreg src = ntq_get_src(c, instr->src[src_idx].src, 0);
155
vir_TMU_WRITE_or_count(c, V3D_QPU_WADDR_TMUDREF, src, tmu_writes);
159
case nir_tex_src_offset: {
160
bool is_const_offset = nir_src_is_const(instr->src[src_idx].src);
161
if (is_const_offset) {
163
p2_unpacked->offset_s =
164
nir_src_comp_as_int(instr->src[src_idx].src, 0);
165
if (non_array_components >= 2)
166
p2_unpacked->offset_t =
167
nir_src_comp_as_int(instr->src[src_idx].src, 1);
168
if (non_array_components >= 3)
169
p2_unpacked->offset_r =
170
nir_src_comp_as_int(instr->src[src_idx].src, 2);
174
ntq_get_src(c, instr->src[src_idx].src, 0);
176
ntq_get_src(c, instr->src[src_idx].src, 1);
178
struct qreg mask = vir_uniform_ui(c, 0xf);
179
struct qreg x, y, offset;
181
x = vir_AND(c, src_0, mask);
182
y = vir_AND(c, src_1, mask);
183
offset = vir_OR(c, x,
184
vir_SHL(c, y, vir_uniform_ui(c, 4)));
186
vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUOFF, offset);
195
unreachable("unknown texture source");
200
vir_tex_handle_srcs(struct v3d_compile *c,
201
nir_tex_instr *instr,
202
struct V3D41_TMU_CONFIG_PARAMETER_2 *p2_unpacked,
204
unsigned *tmu_writes)
206
unsigned non_array_components = instr->op != nir_texop_lod ?
207
instr->coord_components - instr->is_array :
208
instr->coord_components;
210
for (unsigned i = 0; i < instr->num_srcs; i++) {
211
handle_tex_src(c, instr, i, non_array_components,
212
p2_unpacked, s, tmu_writes);
217
get_required_tex_tmu_writes(struct v3d_compile *c, nir_tex_instr *instr)
219
unsigned tmu_writes = 0;
220
vir_tex_handle_srcs(c, instr, NULL, NULL, &tmu_writes);
225
v3d40_vir_emit_tex(struct v3d_compile *c, nir_tex_instr *instr)
227
assert(instr->op != nir_texop_lod || c->devinfo->ver >= 42);
229
unsigned texture_idx = instr->texture_index;
230
unsigned sampler_idx = instr->sampler_index;
232
struct V3D41_TMU_CONFIG_PARAMETER_0 p0_unpacked = {
235
/* Limit the number of channels returned to both how many the NIR
236
* instruction writes and how many the instruction could produce.
238
p0_unpacked.return_words_of_texture_data =
240
nir_ssa_def_components_read(&instr->dest.ssa) :
241
(1 << instr->dest.reg.reg->num_components) - 1;
242
assert(p0_unpacked.return_words_of_texture_data != 0);
244
struct V3D41_TMU_CONFIG_PARAMETER_2 p2_unpacked = {
245
.op = V3D_TMU_OP_REGULAR,
246
.gather_mode = instr->op == nir_texop_tg4,
247
.gather_component = instr->component,
248
.coefficient_mode = instr->op == nir_texop_txd,
249
.disable_autolod = instr->op == nir_texop_tg4
252
const unsigned tmu_writes = get_required_tex_tmu_writes(c, instr);
254
/* The input FIFO has 16 slots across all threads so if we require
255
* more than that we need to lower thread count.
257
while (tmu_writes > 16 / c->threads)
260
/* If pipelining this TMU operation would overflow TMU fifos, we need
261
* to flush any outstanding TMU operations.
263
const unsigned dest_components =
264
util_bitcount(p0_unpacked.return_words_of_texture_data);
265
if (ntq_tmu_fifo_overflow(c, dest_components))
268
/* Process tex sources emitting corresponding TMU writes */
270
vir_tex_handle_srcs(c, instr, &p2_unpacked, &s, NULL);
273
V3D41_TMU_CONFIG_PARAMETER_0_pack(NULL,
274
(uint8_t *)&p0_packed,
278
V3D41_TMU_CONFIG_PARAMETER_2_pack(NULL,
279
(uint8_t *)&p2_packed,
282
/* We manually set the LOD Query bit (see
283
* V3D42_TMU_CONFIG_PARAMETER_2) as right now is the only V42 specific
284
* feature over V41 we are using
286
if (instr->op == nir_texop_lod)
287
p2_packed |= 1UL << 24;
289
/* Load texture_idx number into the high bits of the texture address field,
290
* which will be be used by the driver to decide which texture to put
291
* in the actual address field.
293
p0_packed |= texture_idx << 24;
295
vir_WRTMUC(c, QUNIFORM_TMU_CONFIG_P0, p0_packed);
297
/* Even if the texture operation doesn't need a sampler by
298
* itself, we still need to add the sampler configuration
299
* parameter if the output is 32 bit
301
bool output_type_32_bit =
302
c->key->sampler[sampler_idx].return_size == 32 &&
305
/* p1 is optional, but we can skip it only if p2 can be skipped too */
306
bool needs_p2_config =
307
(instr->op == nir_texop_lod ||
308
memcmp(&p2_unpacked, &p2_unpacked_default,
309
sizeof(p2_unpacked)) != 0);
311
/* To handle the cases were we can't just use p1_unpacked_default */
312
bool non_default_p1_config = nir_tex_instr_need_sampler(instr) ||
315
if (non_default_p1_config) {
316
struct V3D41_TMU_CONFIG_PARAMETER_1 p1_unpacked = {
317
.output_type_32_bit = output_type_32_bit,
319
.unnormalized_coordinates = (instr->sampler_dim ==
320
GLSL_SAMPLER_DIM_RECT),
323
/* Word enables can't ask for more channels than the
324
* output type could provide (2 for f16, 4 for
327
assert(!p1_unpacked.output_type_32_bit ||
328
p0_unpacked.return_words_of_texture_data < (1 << 4));
329
assert(p1_unpacked.output_type_32_bit ||
330
p0_unpacked.return_words_of_texture_data < (1 << 2));
333
V3D41_TMU_CONFIG_PARAMETER_1_pack(NULL,
334
(uint8_t *)&p1_packed,
337
if (nir_tex_instr_need_sampler(instr)) {
338
/* Load sampler_idx number into the high bits of the
339
* sampler address field, which will be be used by the
340
* driver to decide which sampler to put in the actual
343
p1_packed |= sampler_idx << 24;
345
vir_WRTMUC(c, QUNIFORM_TMU_CONFIG_P1, p1_packed);
347
/* In this case, we don't need to merge in any
348
* sampler state from the API and can just use
350
vir_WRTMUC(c, QUNIFORM_CONSTANT, p1_packed);
352
} else if (needs_p2_config) {
353
/* Configuration parameters need to be set up in
354
* order, and if P2 is needed, you need to set up P1
355
* too even if sampler info is not needed by the
356
* texture operation. But we can set up default info,
357
* and avoid asking the driver for the sampler state
360
uint32_t p1_packed_default;
361
V3D41_TMU_CONFIG_PARAMETER_1_pack(NULL,
362
(uint8_t *)&p1_packed_default,
363
&p1_unpacked_default);
364
vir_WRTMUC(c, QUNIFORM_CONSTANT, p1_packed_default);
368
vir_WRTMUC(c, QUNIFORM_CONSTANT, p2_packed);
370
/* Emit retiring TMU write */
371
if (instr->op == nir_texop_txf) {
372
assert(instr->sampler_dim != GLSL_SAMPLER_DIM_CUBE);
373
vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUSF, s);
374
} else if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE) {
375
vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUSCM, s);
376
} else if (instr->op == nir_texop_txl) {
377
vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUSLOD, s);
379
vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUS, s);
382
ntq_add_pending_tmu_flush(c, &instr->dest,
383
p0_unpacked.return_words_of_texture_data);
387
v3d40_image_load_store_tmu_op(nir_intrinsic_instr *instr)
389
switch (instr->intrinsic) {
390
case nir_intrinsic_image_load:
391
case nir_intrinsic_image_store:
392
return V3D_TMU_OP_REGULAR;
393
case nir_intrinsic_image_atomic_add:
394
return v3d_get_op_for_atomic_add(instr, 3);
395
case nir_intrinsic_image_atomic_imin:
396
return V3D_TMU_OP_WRITE_SMIN;
397
case nir_intrinsic_image_atomic_umin:
398
return V3D_TMU_OP_WRITE_UMIN_FULL_L1_CLEAR;
399
case nir_intrinsic_image_atomic_imax:
400
return V3D_TMU_OP_WRITE_SMAX;
401
case nir_intrinsic_image_atomic_umax:
402
return V3D_TMU_OP_WRITE_UMAX;
403
case nir_intrinsic_image_atomic_and:
404
return V3D_TMU_OP_WRITE_AND_READ_INC;
405
case nir_intrinsic_image_atomic_or:
406
return V3D_TMU_OP_WRITE_OR_READ_DEC;
407
case nir_intrinsic_image_atomic_xor:
408
return V3D_TMU_OP_WRITE_XOR_READ_NOT;
409
case nir_intrinsic_image_atomic_exchange:
410
return V3D_TMU_OP_WRITE_XCHG_READ_FLUSH;
411
case nir_intrinsic_image_atomic_comp_swap:
412
return V3D_TMU_OP_WRITE_CMPXCHG_READ_FLUSH;
414
unreachable("unknown image intrinsic");
419
* If 'tmu_writes' is not NULL, then it just counts required register writes,
420
* otherwise, it emits the actual register writes.
422
* It is important to notice that emitting register writes for the current
423
* TMU operation may trigger a TMU flush, since it is possible that any
424
* of the inputs required for the register writes is the result of a pending
425
* TMU operation. If that happens we need to make sure that it doesn't happen
426
* in the middle of the TMU register writes for the current TMU operation,
427
* which is why we always call ntq_get_src() even if we are only interested in
428
* register write counts.
431
vir_image_emit_register_writes(struct v3d_compile *c,
432
nir_intrinsic_instr *instr,
433
bool atomic_add_replaced,
434
uint32_t *tmu_writes)
440
switch (nir_intrinsic_image_dim(instr)) {
441
case GLSL_SAMPLER_DIM_1D:
444
case GLSL_SAMPLER_DIM_BUF:
446
case GLSL_SAMPLER_DIM_2D:
447
case GLSL_SAMPLER_DIM_RECT:
448
case GLSL_SAMPLER_DIM_CUBE: {
449
struct qreg src = ntq_get_src(c, instr->src[1], 1);
450
vir_TMU_WRITE_or_count(c, V3D_QPU_WADDR_TMUT, src, tmu_writes);
453
case GLSL_SAMPLER_DIM_3D: {
454
struct qreg src_1_1 = ntq_get_src(c, instr->src[1], 1);
455
struct qreg src_1_2 = ntq_get_src(c, instr->src[1], 2);
456
vir_TMU_WRITE_or_count(c, V3D_QPU_WADDR_TMUT, src_1_1, tmu_writes);
457
vir_TMU_WRITE_or_count(c, V3D_QPU_WADDR_TMUR, src_1_2, tmu_writes);
461
unreachable("bad image sampler dim");
464
/* In order to fetch on a cube map, we need to interpret it as
465
* 2D arrays, where the third coord would be the face index.
467
if (nir_intrinsic_image_dim(instr) == GLSL_SAMPLER_DIM_CUBE ||
468
nir_intrinsic_image_array(instr)) {
469
struct qreg src = ntq_get_src(c, instr->src[1], is_1d ? 1 : 2);
470
vir_TMU_WRITE_or_count(c, V3D_QPU_WADDR_TMUI, src, tmu_writes);
473
/* Emit the data writes for atomics or image store. */
474
if (instr->intrinsic != nir_intrinsic_image_load &&
475
!atomic_add_replaced) {
476
for (int i = 0; i < nir_intrinsic_src_components(instr, 3); i++) {
477
struct qreg src_3_i = ntq_get_src(c, instr->src[3], i);
478
vir_TMU_WRITE_or_count(c, V3D_QPU_WADDR_TMUD, src_3_i,
482
/* Second atomic argument */
483
if (instr->intrinsic == nir_intrinsic_image_atomic_comp_swap) {
484
struct qreg src_4_0 = ntq_get_src(c, instr->src[4], 0);
485
vir_TMU_WRITE_or_count(c, V3D_QPU_WADDR_TMUD, src_4_0,
490
struct qreg src_1_0 = ntq_get_src(c, instr->src[1], 0);
491
if (!tmu_writes && vir_in_nonuniform_control_flow(c) &&
492
instr->intrinsic != nir_intrinsic_image_load) {
493
vir_set_pf(c, vir_MOV_dest(c, vir_nop_reg(), c->execute),
497
vir_TMU_WRITE_or_count(c, V3D_QPU_WADDR_TMUSF, src_1_0, tmu_writes);
499
if (!tmu_writes && vir_in_nonuniform_control_flow(c) &&
500
instr->intrinsic != nir_intrinsic_image_load) {
501
struct qinst *last_inst =
502
(struct qinst *)c->cur_block->instructions.prev;
503
vir_set_cond(last_inst, V3D_QPU_COND_IFA);
508
get_required_image_tmu_writes(struct v3d_compile *c,
509
nir_intrinsic_instr *instr,
510
bool atomic_add_replaced)
513
vir_image_emit_register_writes(c, instr, atomic_add_replaced,
519
v3d40_vir_emit_image_load_store(struct v3d_compile *c,
520
nir_intrinsic_instr *instr)
522
unsigned format = nir_intrinsic_format(instr);
523
unsigned unit = nir_src_as_uint(instr->src[0]);
525
struct V3D41_TMU_CONFIG_PARAMETER_0 p0_unpacked = {
528
struct V3D41_TMU_CONFIG_PARAMETER_1 p1_unpacked = {
529
.per_pixel_mask_enable = true,
530
.output_type_32_bit = v3d_gl_format_is_return_32(format),
533
struct V3D41_TMU_CONFIG_PARAMETER_2 p2_unpacked = { 0 };
535
/* Limit the number of channels returned to both how many the NIR
536
* instruction writes and how many the instruction could produce.
538
uint32_t instr_return_channels = nir_intrinsic_dest_components(instr);
539
if (!p1_unpacked.output_type_32_bit)
540
instr_return_channels = (instr_return_channels + 1) / 2;
542
p0_unpacked.return_words_of_texture_data =
543
(1 << instr_return_channels) - 1;
545
p2_unpacked.op = v3d40_image_load_store_tmu_op(instr);
547
/* If we were able to replace atomic_add for an inc/dec, then we
548
* need/can to do things slightly different, like not loading the
549
* amount to add/sub, as that is implicit.
551
bool atomic_add_replaced =
552
(instr->intrinsic == nir_intrinsic_image_atomic_add &&
553
(p2_unpacked.op == V3D_TMU_OP_WRITE_AND_READ_INC ||
554
p2_unpacked.op == V3D_TMU_OP_WRITE_OR_READ_DEC));
557
V3D41_TMU_CONFIG_PARAMETER_0_pack(NULL,
558
(uint8_t *)&p0_packed,
561
/* Load unit number into the high bits of the texture or sampler
562
* address field, which will be be used by the driver to decide which
563
* texture to put in the actual address field.
565
p0_packed |= unit << 24;
568
V3D41_TMU_CONFIG_PARAMETER_1_pack(NULL,
569
(uint8_t *)&p1_packed,
573
V3D41_TMU_CONFIG_PARAMETER_2_pack(NULL,
574
(uint8_t *)&p2_packed,
577
if (instr->intrinsic != nir_intrinsic_image_load)
578
c->tmu_dirty_rcl = true;
581
const uint32_t tmu_writes =
582
get_required_image_tmu_writes(c, instr, atomic_add_replaced);
584
/* The input FIFO has 16 slots across all threads so if we require
585
* more than that we need to lower thread count.
587
while (tmu_writes > 16 / c->threads)
590
/* If pipelining this TMU operation would overflow TMU fifos, we need
591
* to flush any outstanding TMU operations.
593
if (ntq_tmu_fifo_overflow(c, instr_return_channels))
596
vir_WRTMUC(c, QUNIFORM_IMAGE_TMU_CONFIG_P0, p0_packed);
597
if (memcmp(&p1_unpacked, &p1_unpacked_default, sizeof(p1_unpacked)))
598
vir_WRTMUC(c, QUNIFORM_CONSTANT, p1_packed);
599
if (memcmp(&p2_unpacked, &p2_unpacked_default, sizeof(p2_unpacked)))
600
vir_WRTMUC(c, QUNIFORM_CONSTANT, p2_packed);
602
vir_image_emit_register_writes(c, instr, atomic_add_replaced, NULL);
604
ntq_add_pending_tmu_flush(c, &instr->dest,
605
p0_unpacked.return_words_of_texture_data);