2
* Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* on the rights to use, copy, modify, merge, publish, distribute, sub
8
* license, and/or sell copies of the Software, and to permit persons to whom
9
* the Software is furnished to do so, subject to the following conditions:
11
* The above copyright notice and this permission notice (including the next
12
* paragraph) shall be included in all copies or substantial portions of the
15
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18
* THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21
* USE OR OTHER DEALINGS IN THE SOFTWARE.
24
#include "r600_context.h"
25
#include "util/u_memory.h"
27
#include "r600_opcodes.h"
31
static inline unsigned int r600_bc_get_num_operands(struct r600_bc_alu *alu)
37
case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP:
39
case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ADD:
40
case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLE:
41
case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT:
42
case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGE:
43
case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLNE:
44
case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MUL:
45
case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MAX:
46
case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MIN:
47
case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETE:
48
case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETNE:
49
case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGT:
50
case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGE:
51
case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETE:
52
case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGT:
53
case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETGE:
54
case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE:
55
case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4:
56
case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_DOT4_IEEE:
57
case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_CUBE:
60
case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV:
61
case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOVA_FLOOR:
62
case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FRACT:
63
case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLOOR:
64
case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_TRUNC:
65
case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_EXP_IEEE:
66
case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_CLAMPED:
67
case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_IEEE:
68
case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_IEEE:
69
case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIPSQRT_IEEE:
70
case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLT_TO_INT:
71
case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SIN:
72
case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_COS:
76
"Need instruction operand number for 0x%x.\n", alu->inst);
82
int r700_bc_alu_build(struct r600_bc *bc, struct r600_bc_alu *alu, unsigned id);
84
static struct r600_bc_cf *r600_bc_cf(void)
86
struct r600_bc_cf *cf = CALLOC_STRUCT(r600_bc_cf);
90
LIST_INITHEAD(&cf->list);
91
LIST_INITHEAD(&cf->alu);
92
LIST_INITHEAD(&cf->vtx);
93
LIST_INITHEAD(&cf->tex);
97
static struct r600_bc_alu *r600_bc_alu(void)
99
struct r600_bc_alu *alu = CALLOC_STRUCT(r600_bc_alu);
103
LIST_INITHEAD(&alu->list);
104
LIST_INITHEAD(&alu->bs_list);
108
static struct r600_bc_vtx *r600_bc_vtx(void)
110
struct r600_bc_vtx *vtx = CALLOC_STRUCT(r600_bc_vtx);
114
LIST_INITHEAD(&vtx->list);
118
static struct r600_bc_tex *r600_bc_tex(void)
120
struct r600_bc_tex *tex = CALLOC_STRUCT(r600_bc_tex);
124
LIST_INITHEAD(&tex->list);
128
int r600_bc_init(struct r600_bc *bc, enum radeon_family family)
130
LIST_INITHEAD(&bc->cf);
132
switch (bc->family) {
150
R600_ERR("unknown family %d\n", bc->family);
156
static int r600_bc_add_cf(struct r600_bc *bc)
158
struct r600_bc_cf *cf = r600_bc_cf();
162
LIST_ADDTAIL(&cf->list, &bc->cf);
164
cf->id = bc->cf_last->id + 2;
168
bc->force_add_cf = 0;
172
int r600_bc_add_output(struct r600_bc *bc, const struct r600_bc_output *output)
176
r = r600_bc_add_cf(bc);
179
bc->cf_last->inst = output->inst;
180
memcpy(&bc->cf_last->output, output, sizeof(struct r600_bc_output));
184
const unsigned bank_swizzle_vec[8] = {SQ_ALU_VEC_210, //000
185
SQ_ALU_VEC_120, //001
186
SQ_ALU_VEC_102, //010
188
SQ_ALU_VEC_201, //011
189
SQ_ALU_VEC_012, //100
190
SQ_ALU_VEC_021, //101
192
SQ_ALU_VEC_012, //110
193
SQ_ALU_VEC_012}; //111
195
const unsigned bank_swizzle_scl[8] = {SQ_ALU_SCL_210, //000
196
SQ_ALU_SCL_122, //001
197
SQ_ALU_SCL_122, //010
199
SQ_ALU_SCL_221, //011
200
SQ_ALU_SCL_212, //100
201
SQ_ALU_SCL_122, //101
203
SQ_ALU_SCL_122, //110
204
SQ_ALU_SCL_122}; //111
206
static int init_gpr(struct r600_bc_alu *alu)
208
int cycle, component;
210
for (cycle = 0; cycle < NUM_OF_CYCLES; cycle++)
211
for (component = 0; component < NUM_OF_COMPONENTS; component++)
212
alu->hw_gpr[cycle][component] = -1;
216
static int reserve_gpr(struct r600_bc_alu *alu, unsigned sel, unsigned chan, unsigned cycle)
218
if (alu->hw_gpr[cycle][chan] < 0)
219
alu->hw_gpr[cycle][chan] = sel;
220
else if (alu->hw_gpr[cycle][chan] != (int)sel) {
221
R600_ERR("Another scalar operation has already used GPR read port for channel\n");
227
static int cycle_for_scalar_bank_swizzle(const int swiz, const int sel, unsigned *p_cycle)
233
table[0] = 2; table[1] = 1; table[2] = 0;
234
*p_cycle = table[sel];
237
table[0] = 1; table[1] = 2; table[2] = 2;
238
*p_cycle = table[sel];
241
table[0] = 2; table[1] = 1; table[2] = 2;
242
*p_cycle = table[sel];
245
table[0] = 2; table[1] = 2; table[2] = 1;
246
*p_cycle = table[sel];
250
R600_ERR("bad scalar bank swizzle value\n");
257
static int cycle_for_vector_bank_swizzle(const int swiz, const int sel, unsigned *p_cycle)
264
table[0] = 0; table[1] = 1; table[2] = 2;
265
*p_cycle = table[sel];
268
table[0] = 0; table[1] = 2; table[2] = 1;
269
*p_cycle = table[sel];
272
table[0] = 1; table[1] = 2; table[2] = 0;
273
*p_cycle = table[sel];
276
table[0] = 1; table[1] = 0; table[2] = 2;
277
*p_cycle = table[sel];
280
table[0] = 2; table[1] = 0; table[2] = 1;
281
*p_cycle = table[sel];
284
table[0] = 2; table[1] = 1; table[2] = 0;
285
*p_cycle = table[sel];
288
R600_ERR("bad vector bank swizzle value\n");
295
static int is_const(int sel)
297
if (sel > 255 && sel < 512)
299
if (sel >= V_SQ_ALU_SRC_0 && sel <= V_SQ_ALU_SRC_LITERAL)
304
static void update_chan_counter(struct r600_bc_alu *alu, int *chan_counter)
310
num_src = r600_bc_get_num_operands(alu);
312
for (i = 0; i < num_src; i++) {
313
channel_swizzle = alu->src[i].chan;
314
if ((alu->src[i].sel > 0 && alu->src[i].sel < 128) && channel_swizzle <= 3)
315
chan_counter[channel_swizzle]++;
320
/* we need something like this I think - but this is bogus */
321
int check_read_slots(struct r600_bc *bc, struct r600_bc_alu *alu_first)
323
struct r600_bc_alu *alu;
324
int chan_counter[4] = { 0 };
326
update_chan_counter(alu_first, chan_counter);
328
LIST_FOR_EACH_ENTRY(alu, &alu_first->bs_list, bs_list) {
329
update_chan_counter(alu, chan_counter);
332
if (chan_counter[0] > 3 ||
333
chan_counter[1] > 3 ||
334
chan_counter[2] > 3 ||
335
chan_counter[3] > 3) {
336
R600_ERR("needed to split instruction for input ran out of banks %x %d %d %d %d\n",
337
alu_first->inst, chan_counter[0], chan_counter[1], chan_counter[2], chan_counter[3]);
344
static int check_scalar(struct r600_bc *bc, struct r600_bc_alu *alu)
346
unsigned swizzle_key;
348
swizzle_key = (is_const(alu->src[0].sel) ? 4 : 0 ) +
349
(is_const(alu->src[1].sel) ? 2 : 0 ) +
350
(is_const(alu->src[2].sel) ? 1 : 0 );
352
alu->bank_swizzle = bank_swizzle_scl[swizzle_key];
356
static int check_vector(struct r600_bc *bc, struct r600_bc_alu *alu)
358
unsigned swizzle_key;
360
swizzle_key = (is_const(alu->src[0].sel) ? 4 : 0 ) +
361
(is_const(alu->src[1].sel) ? 2 : 0 ) +
362
(is_const(alu->src[2].sel) ? 1 : 0 );
364
alu->bank_swizzle = bank_swizzle_vec[swizzle_key];
368
static int check_and_set_bank_swizzle(struct r600_bc *bc, struct r600_bc_alu *alu_first)
370
struct r600_bc_alu *alu;
375
LIST_FOR_EACH_ENTRY(alu, &alu_first->bs_list, bs_list) {
379
if (num_instr == 1) {
380
check_scalar(bc, alu_first);
383
/* check_read_slots(bc, bc->cf_last->curr_bs_head);*/
384
check_vector(bc, alu_first);
385
LIST_FOR_EACH_ENTRY(alu, &alu_first->bs_list, bs_list) {
386
check_vector(bc, alu);
392
int r600_bc_add_alu_type(struct r600_bc *bc, const struct r600_bc_alu *alu, int type)
394
struct r600_bc_alu *nalu = r600_bc_alu();
395
struct r600_bc_alu *lalu;
396
struct r600_bc_alu *curr_bs_head;
401
memcpy(nalu, alu, sizeof(struct r600_bc_alu));
404
/* cf can contains only alu or only vtx or only tex */
405
if (bc->cf_last == NULL || bc->cf_last->inst != (type << 3) ||
407
/* at most 128 slots, one add alu can add 4 slots + 4 constant worst case */
408
r = r600_bc_add_cf(bc);
413
bc->cf_last->inst = (type << 3);
415
if (!bc->cf_last->curr_bs_head) {
416
bc->cf_last->curr_bs_head = nalu;
417
LIST_INITHEAD(&nalu->bs_list);
419
LIST_ADDTAIL(&nalu->bs_list, &bc->cf_last->curr_bs_head->bs_list);
421
if (alu->last && (bc->cf_last->ndw >> 1) >= 124) {
422
bc->force_add_cf = 1;
424
/* number of gpr == the last gpr used in any alu */
425
for (i = 0; i < 3; i++) {
426
if (alu->src[i].sel >= bc->ngpr && alu->src[i].sel < 128) {
427
bc->ngpr = alu->src[i].sel + 1;
429
/* compute how many literal are needed
430
* either 2 or 4 literals
432
if (alu->src[i].sel == 253) {
433
if (((alu->src[i].chan + 2) & 0x6) > nalu->nliteral) {
434
nalu->nliteral = (alu->src[i].chan + 2) & 0x6;
438
if (!LIST_IS_EMPTY(&bc->cf_last->alu)) {
439
lalu = LIST_ENTRY(struct r600_bc_alu, bc->cf_last->alu.prev, list);
440
if (!lalu->last && lalu->nliteral > nalu->nliteral) {
441
nalu->nliteral = lalu->nliteral;
444
if (alu->dst.sel >= bc->ngpr) {
445
bc->ngpr = alu->dst.sel + 1;
447
LIST_ADDTAIL(&nalu->list, &bc->cf_last->alu);
448
/* each alu use 2 dwords */
449
bc->cf_last->ndw += 2;
452
if (bc->use_mem_constant)
453
bc->cf_last->kcache0_mode = 2;
455
/* process cur ALU instructions for bank swizzle */
457
check_and_set_bank_swizzle(bc, bc->cf_last->curr_bs_head);
458
bc->cf_last->curr_bs_head = NULL;
463
int r600_bc_add_alu(struct r600_bc *bc, const struct r600_bc_alu *alu)
465
return r600_bc_add_alu_type(bc, alu, V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU);
468
int r600_bc_add_literal(struct r600_bc *bc, const u32 *value)
470
struct r600_bc_alu *alu;
472
if (bc->cf_last == NULL) {
475
if (bc->cf_last->inst == V_SQ_CF_WORD1_SQ_CF_INST_TEX) {
478
if (bc->cf_last->inst == V_SQ_CF_WORD1_SQ_CF_INST_JUMP ||
479
bc->cf_last->inst == V_SQ_CF_WORD1_SQ_CF_INST_ELSE ||
480
bc->cf_last->inst == V_SQ_CF_WORD1_SQ_CF_INST_LOOP_START_NO_AL ||
481
bc->cf_last->inst == V_SQ_CF_WORD1_SQ_CF_INST_LOOP_BREAK ||
482
bc->cf_last->inst == V_SQ_CF_WORD1_SQ_CF_INST_LOOP_CONTINUE ||
483
bc->cf_last->inst == V_SQ_CF_WORD1_SQ_CF_INST_LOOP_END ||
484
bc->cf_last->inst == V_SQ_CF_WORD1_SQ_CF_INST_POP) {
487
if (((bc->cf_last->inst != (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU << 3)) &&
488
(bc->cf_last->inst != (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_PUSH_BEFORE << 3))) ||
489
LIST_IS_EMPTY(&bc->cf_last->alu)) {
490
R600_ERR("last CF is not ALU (%p)\n", bc->cf_last);
493
alu = LIST_ENTRY(struct r600_bc_alu, bc->cf_last->alu.prev, list);
494
if (!alu->last || !alu->nliteral || alu->literal_added) {
497
memcpy(alu->value, value, 4 * 4);
498
bc->cf_last->ndw += alu->nliteral;
499
bc->ndw += alu->nliteral;
500
alu->literal_added = 1;
504
int r600_bc_add_vtx(struct r600_bc *bc, const struct r600_bc_vtx *vtx)
506
struct r600_bc_vtx *nvtx = r600_bc_vtx();
511
memcpy(nvtx, vtx, sizeof(struct r600_bc_vtx));
513
/* cf can contains only alu or only vtx or only tex */
514
if (bc->cf_last == NULL ||
515
(bc->cf_last->inst != V_SQ_CF_WORD1_SQ_CF_INST_VTX &&
516
bc->cf_last->inst != V_SQ_CF_WORD1_SQ_CF_INST_VTX_TC)) {
517
r = r600_bc_add_cf(bc);
522
bc->cf_last->inst = V_SQ_CF_WORD1_SQ_CF_INST_VTX;
524
LIST_ADDTAIL(&nvtx->list, &bc->cf_last->vtx);
525
/* each fetch use 4 dwords */
526
bc->cf_last->ndw += 4;
531
int r600_bc_add_tex(struct r600_bc *bc, const struct r600_bc_tex *tex)
533
struct r600_bc_tex *ntex = r600_bc_tex();
538
memcpy(ntex, tex, sizeof(struct r600_bc_tex));
540
/* cf can contains only alu or only vtx or only tex */
541
if (bc->cf_last == NULL ||
542
bc->cf_last->inst != V_SQ_CF_WORD1_SQ_CF_INST_TEX) {
543
r = r600_bc_add_cf(bc);
548
bc->cf_last->inst = V_SQ_CF_WORD1_SQ_CF_INST_TEX;
550
LIST_ADDTAIL(&ntex->list, &bc->cf_last->tex);
551
/* each texture fetch use 4 dwords */
552
bc->cf_last->ndw += 4;
557
int r600_bc_add_cfinst(struct r600_bc *bc, int inst)
560
r = r600_bc_add_cf(bc);
564
bc->cf_last->cond = V_SQ_CF_COND_ACTIVE;
565
bc->cf_last->inst = inst;
569
static int r600_bc_vtx_build(struct r600_bc *bc, struct r600_bc_vtx *vtx, unsigned id)
571
bc->bytecode[id++] = S_SQ_VTX_WORD0_BUFFER_ID(vtx->buffer_id) |
572
S_SQ_VTX_WORD0_SRC_GPR(vtx->src_gpr) |
573
S_SQ_VTX_WORD0_SRC_SEL_X(vtx->src_sel_x) |
574
S_SQ_VTX_WORD0_MEGA_FETCH_COUNT(vtx->mega_fetch_count);
575
bc->bytecode[id++] = S_SQ_VTX_WORD1_DST_SEL_X(vtx->dst_sel_x) |
576
S_SQ_VTX_WORD1_DST_SEL_Y(vtx->dst_sel_y) |
577
S_SQ_VTX_WORD1_DST_SEL_Z(vtx->dst_sel_z) |
578
S_SQ_VTX_WORD1_DST_SEL_W(vtx->dst_sel_w) |
579
S_SQ_VTX_WORD1_USE_CONST_FIELDS(1) |
580
S_SQ_VTX_WORD1_GPR_DST_GPR(vtx->dst_gpr);
581
bc->bytecode[id++] = S_SQ_VTX_WORD2_MEGA_FETCH(1);
582
bc->bytecode[id++] = 0;
586
static int r600_bc_tex_build(struct r600_bc *bc, struct r600_bc_tex *tex, unsigned id)
588
bc->bytecode[id++] = S_SQ_TEX_WORD0_TEX_INST(tex->inst) |
589
S_SQ_TEX_WORD0_RESOURCE_ID(tex->resource_id) |
590
S_SQ_TEX_WORD0_SRC_GPR(tex->src_gpr) |
591
S_SQ_TEX_WORD0_SRC_REL(tex->src_rel);
592
bc->bytecode[id++] = S_SQ_TEX_WORD1_DST_GPR(tex->dst_gpr) |
593
S_SQ_TEX_WORD1_DST_REL(tex->dst_rel) |
594
S_SQ_TEX_WORD1_DST_SEL_X(tex->dst_sel_x) |
595
S_SQ_TEX_WORD1_DST_SEL_Y(tex->dst_sel_y) |
596
S_SQ_TEX_WORD1_DST_SEL_Z(tex->dst_sel_z) |
597
S_SQ_TEX_WORD1_DST_SEL_W(tex->dst_sel_w) |
598
S_SQ_TEX_WORD1_LOD_BIAS(tex->lod_bias) |
599
S_SQ_TEX_WORD1_COORD_TYPE_X(tex->coord_type_x) |
600
S_SQ_TEX_WORD1_COORD_TYPE_Y(tex->coord_type_y) |
601
S_SQ_TEX_WORD1_COORD_TYPE_Z(tex->coord_type_z) |
602
S_SQ_TEX_WORD1_COORD_TYPE_W(tex->coord_type_w);
603
bc->bytecode[id++] = S_SQ_TEX_WORD2_OFFSET_X(tex->offset_x) |
604
S_SQ_TEX_WORD2_OFFSET_Y(tex->offset_y) |
605
S_SQ_TEX_WORD2_OFFSET_Z(tex->offset_z) |
606
S_SQ_TEX_WORD2_SAMPLER_ID(tex->sampler_id) |
607
S_SQ_TEX_WORD2_SRC_SEL_X(tex->src_sel_x) |
608
S_SQ_TEX_WORD2_SRC_SEL_Y(tex->src_sel_y) |
609
S_SQ_TEX_WORD2_SRC_SEL_Z(tex->src_sel_z) |
610
S_SQ_TEX_WORD2_SRC_SEL_W(tex->src_sel_w);
611
bc->bytecode[id++] = 0;
615
static int r600_bc_alu_build(struct r600_bc *bc, struct r600_bc_alu *alu, unsigned id)
619
/* don't replace gpr by pv or ps for destination register */
620
bc->bytecode[id++] = S_SQ_ALU_WORD0_SRC0_SEL(alu->src[0].sel) |
621
S_SQ_ALU_WORD0_SRC0_REL(alu->src[0].rel) |
622
S_SQ_ALU_WORD0_SRC0_CHAN(alu->src[0].chan) |
623
S_SQ_ALU_WORD0_SRC0_NEG(alu->src[0].neg) |
624
S_SQ_ALU_WORD0_SRC1_SEL(alu->src[1].sel) |
625
S_SQ_ALU_WORD0_SRC1_REL(alu->src[1].rel) |
626
S_SQ_ALU_WORD0_SRC1_CHAN(alu->src[1].chan) |
627
S_SQ_ALU_WORD0_SRC1_NEG(alu->src[1].neg) |
628
S_SQ_ALU_WORD0_LAST(alu->last);
631
bc->bytecode[id++] = S_SQ_ALU_WORD1_DST_GPR(alu->dst.sel) |
632
S_SQ_ALU_WORD1_DST_CHAN(alu->dst.chan) |
633
S_SQ_ALU_WORD1_DST_REL(alu->dst.rel) |
634
S_SQ_ALU_WORD1_CLAMP(alu->dst.clamp) |
635
S_SQ_ALU_WORD1_OP3_SRC2_SEL(alu->src[2].sel) |
636
S_SQ_ALU_WORD1_OP3_SRC2_REL(alu->src[2].rel) |
637
S_SQ_ALU_WORD1_OP3_SRC2_CHAN(alu->src[2].chan) |
638
S_SQ_ALU_WORD1_OP3_SRC2_NEG(alu->src[2].neg) |
639
S_SQ_ALU_WORD1_OP3_ALU_INST(alu->inst) |
640
S_SQ_ALU_WORD1_BANK_SWIZZLE(alu->bank_swizzle);
642
bc->bytecode[id++] = S_SQ_ALU_WORD1_DST_GPR(alu->dst.sel) |
643
S_SQ_ALU_WORD1_DST_CHAN(alu->dst.chan) |
644
S_SQ_ALU_WORD1_DST_REL(alu->dst.rel) |
645
S_SQ_ALU_WORD1_CLAMP(alu->dst.clamp) |
646
S_SQ_ALU_WORD1_OP2_SRC0_ABS(alu->src[0].abs) |
647
S_SQ_ALU_WORD1_OP2_SRC1_ABS(alu->src[1].abs) |
648
S_SQ_ALU_WORD1_OP2_WRITE_MASK(alu->dst.write) |
649
S_SQ_ALU_WORD1_OP2_ALU_INST(alu->inst) |
650
S_SQ_ALU_WORD1_BANK_SWIZZLE(alu->bank_swizzle) |
651
S_SQ_ALU_WORD1_OP2_UPDATE_EXECUTE_MASK(alu->predicate) |
652
S_SQ_ALU_WORD1_OP2_UPDATE_PRED(alu->predicate);
655
if (alu->nliteral && !alu->literal_added) {
656
R600_ERR("Bug in ALU processing for instruction 0x%08x, literal not added correctly\n", alu->inst);
658
for (i = 0; i < alu->nliteral; i++) {
659
bc->bytecode[id++] = alu->value[i];
665
static int r600_bc_cf_build(struct r600_bc *bc, struct r600_bc_cf *cf)
667
unsigned id = cf->id;
670
case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU << 3):
671
case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_PUSH_BEFORE << 3):
672
bc->bytecode[id++] = S_SQ_CF_ALU_WORD0_ADDR(cf->addr >> 1) |
673
S_SQ_CF_ALU_WORD0_KCACHE_MODE0(cf->kcache0_mode);
675
bc->bytecode[id++] = S_SQ_CF_ALU_WORD1_CF_INST(cf->inst >> 3) |
676
S_SQ_CF_ALU_WORD1_BARRIER(1) |
677
S_SQ_CF_ALU_WORD1_COUNT((cf->ndw / 2) - 1);
679
case V_SQ_CF_WORD1_SQ_CF_INST_TEX:
680
case V_SQ_CF_WORD1_SQ_CF_INST_VTX:
681
case V_SQ_CF_WORD1_SQ_CF_INST_VTX_TC:
682
bc->bytecode[id++] = S_SQ_CF_WORD0_ADDR(cf->addr >> 1);
683
bc->bytecode[id++] = S_SQ_CF_WORD1_CF_INST(cf->inst) |
684
S_SQ_CF_WORD1_BARRIER(1) |
685
S_SQ_CF_WORD1_COUNT((cf->ndw / 4) - 1);
687
case V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT:
688
case V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT_DONE:
689
bc->bytecode[id++] = S_SQ_CF_ALLOC_EXPORT_WORD0_RW_GPR(cf->output.gpr) |
690
S_SQ_CF_ALLOC_EXPORT_WORD0_ELEM_SIZE(cf->output.elem_size) |
691
S_SQ_CF_ALLOC_EXPORT_WORD0_ARRAY_BASE(cf->output.array_base) |
692
S_SQ_CF_ALLOC_EXPORT_WORD0_TYPE(cf->output.type);
693
bc->bytecode[id++] = S_SQ_CF_ALLOC_EXPORT_WORD1_SWIZ_SEL_X(cf->output.swizzle_x) |
694
S_SQ_CF_ALLOC_EXPORT_WORD1_SWIZ_SEL_Y(cf->output.swizzle_y) |
695
S_SQ_CF_ALLOC_EXPORT_WORD1_SWIZ_SEL_Z(cf->output.swizzle_z) |
696
S_SQ_CF_ALLOC_EXPORT_WORD1_SWIZ_SEL_W(cf->output.swizzle_w) |
697
S_SQ_CF_ALLOC_EXPORT_WORD1_BARRIER(cf->output.barrier) |
698
S_SQ_CF_ALLOC_EXPORT_WORD1_CF_INST(cf->output.inst) |
699
S_SQ_CF_ALLOC_EXPORT_WORD1_END_OF_PROGRAM(cf->output.end_of_program);
701
case V_SQ_CF_WORD1_SQ_CF_INST_JUMP:
702
case V_SQ_CF_WORD1_SQ_CF_INST_ELSE:
703
case V_SQ_CF_WORD1_SQ_CF_INST_POP:
704
case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_START_NO_AL:
705
case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_END:
706
case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_CONTINUE:
707
case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_BREAK:
708
bc->bytecode[id++] = S_SQ_CF_WORD0_ADDR(cf->cf_addr >> 1);
709
bc->bytecode[id++] = S_SQ_CF_WORD1_CF_INST(cf->inst) |
710
S_SQ_CF_WORD1_BARRIER(1) |
711
S_SQ_CF_WORD1_COND(cf->cond) |
712
S_SQ_CF_WORD1_POP_COUNT(cf->pop_count);
716
R600_ERR("unsupported CF instruction (0x%X)\n", cf->inst);
722
int r600_bc_build(struct r600_bc *bc)
724
struct r600_bc_cf *cf;
725
struct r600_bc_alu *alu;
726
struct r600_bc_vtx *vtx;
727
struct r600_bc_tex *tex;
731
if (bc->callstack[0].max > 0)
732
bc->nstack = ((bc->callstack[0].max + 3) >> 2) + 2;
734
/* first path compute addr of each CF block */
735
/* addr start after all the CF instructions */
736
addr = bc->cf_last->id + 2;
737
LIST_FOR_EACH_ENTRY(cf, &bc->cf, list) {
739
case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU << 3):
740
case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_PUSH_BEFORE << 3):
742
case V_SQ_CF_WORD1_SQ_CF_INST_TEX:
743
case V_SQ_CF_WORD1_SQ_CF_INST_VTX:
744
case V_SQ_CF_WORD1_SQ_CF_INST_VTX_TC:
745
/* fetch node need to be 16 bytes aligned*/
747
addr &= 0xFFFFFFFCUL;
749
case V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT:
750
case V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT_DONE:
752
case V_SQ_CF_WORD1_SQ_CF_INST_JUMP:
753
case V_SQ_CF_WORD1_SQ_CF_INST_ELSE:
754
case V_SQ_CF_WORD1_SQ_CF_INST_POP:
755
case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_START_NO_AL:
756
case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_END:
757
case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_CONTINUE:
758
case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_BREAK:
761
R600_ERR("unsupported CF instruction (0x%X)\n", cf->inst);
766
bc->ndw = cf->addr + cf->ndw;
769
bc->bytecode = calloc(1, bc->ndw * 4);
770
if (bc->bytecode == NULL)
772
LIST_FOR_EACH_ENTRY(cf, &bc->cf, list) {
774
r = r600_bc_cf_build(bc, cf);
778
case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU << 3):
779
case (V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_PUSH_BEFORE << 3):
780
LIST_FOR_EACH_ENTRY(alu, &cf->alu, list) {
781
switch(bc->chiprev) {
783
r = r600_bc_alu_build(bc, alu, addr);
786
r = r700_bc_alu_build(bc, alu, addr);
789
R600_ERR("unknown family %d\n", bc->family);
796
addr += alu->nliteral;
800
case V_SQ_CF_WORD1_SQ_CF_INST_VTX:
801
case V_SQ_CF_WORD1_SQ_CF_INST_VTX_TC:
802
LIST_FOR_EACH_ENTRY(vtx, &cf->vtx, list) {
803
r = r600_bc_vtx_build(bc, vtx, addr);
809
case V_SQ_CF_WORD1_SQ_CF_INST_TEX:
810
LIST_FOR_EACH_ENTRY(tex, &cf->tex, list) {
811
r = r600_bc_tex_build(bc, tex, addr);
817
case V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT:
818
case V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT_DONE:
819
case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_START_NO_AL:
820
case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_END:
821
case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_CONTINUE:
822
case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_BREAK:
823
case V_SQ_CF_WORD1_SQ_CF_INST_JUMP:
824
case V_SQ_CF_WORD1_SQ_CF_INST_ELSE:
825
case V_SQ_CF_WORD1_SQ_CF_INST_POP:
828
R600_ERR("unsupported CF instruction (0x%X)\n", cf->inst);