2
* Copyright 2010 Advanced Micro Devices, Inc.
3
* Copyright 2008 Red Hat Inc.
4
* Copyright 2009 Jerome Glisse.
6
* Permission is hereby granted, free of charge, to any person obtaining a
7
* copy of this software and associated documentation files (the "Software"),
8
* to deal in the Software without restriction, including without limitation
9
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
10
* and/or sell copies of the Software, and to permit persons to whom the
11
* Software is furnished to do so, subject to the following conditions:
13
* The above copyright notice and this permission notice shall be included in
14
* all copies or substantial portions of the Software.
16
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22
* OTHER DEALINGS IN THE SOFTWARE.
24
* Authors: Dave Airlie
30
#include "evergreend.h"
31
#include "evergreen_reg_safe.h"
33
static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser *p,
34
struct radeon_cs_reloc **cs_reloc);
36
struct evergreen_cs_track {
42
u32 cb_color_base_last[12];
43
struct radeon_bo *cb_color_bo[12];
44
u32 cb_color_bo_offset[12];
45
struct radeon_bo *cb_color_fmask_bo[8];
46
struct radeon_bo *cb_color_cmask_bo[8];
47
u32 cb_color_info[12];
48
u32 cb_color_view[12];
49
u32 cb_color_pitch_idx[12];
50
u32 cb_color_slice_idx[12];
51
u32 cb_color_dim_idx[12];
53
u32 cb_color_pitch[12];
54
u32 cb_color_slice[12];
55
u32 cb_color_cmask_slice[8];
56
u32 cb_color_fmask_slice[8];
59
u32 vgt_strmout_config;
60
u32 vgt_strmout_buffer_config;
64
u32 db_depth_size_idx;
68
u32 db_z_write_offset;
69
struct radeon_bo *db_z_read_bo;
70
struct radeon_bo *db_z_write_bo;
74
u32 db_s_write_offset;
75
struct radeon_bo *db_s_read_bo;
76
struct radeon_bo *db_s_write_bo;
79
static void evergreen_cs_track_init(struct evergreen_cs_track *track)
83
for (i = 0; i < 8; i++) {
84
track->cb_color_fmask_bo[i] = NULL;
85
track->cb_color_cmask_bo[i] = NULL;
86
track->cb_color_cmask_slice[i] = 0;
87
track->cb_color_fmask_slice[i] = 0;
90
for (i = 0; i < 12; i++) {
91
track->cb_color_base_last[i] = 0;
92
track->cb_color_bo[i] = NULL;
93
track->cb_color_bo_offset[i] = 0xFFFFFFFF;
94
track->cb_color_info[i] = 0;
95
track->cb_color_view[i] = 0;
96
track->cb_color_pitch_idx[i] = 0;
97
track->cb_color_slice_idx[i] = 0;
98
track->cb_color_dim[i] = 0;
99
track->cb_color_pitch[i] = 0;
100
track->cb_color_slice[i] = 0;
101
track->cb_color_dim[i] = 0;
103
track->cb_target_mask = 0xFFFFFFFF;
104
track->cb_shader_mask = 0xFFFFFFFF;
106
track->db_depth_view = 0xFFFFC000;
107
track->db_depth_size = 0xFFFFFFFF;
108
track->db_depth_size_idx = 0;
109
track->db_depth_control = 0xFFFFFFFF;
110
track->db_z_info = 0xFFFFFFFF;
111
track->db_z_idx = 0xFFFFFFFF;
112
track->db_z_read_offset = 0xFFFFFFFF;
113
track->db_z_write_offset = 0xFFFFFFFF;
114
track->db_z_read_bo = NULL;
115
track->db_z_write_bo = NULL;
116
track->db_s_info = 0xFFFFFFFF;
117
track->db_s_idx = 0xFFFFFFFF;
118
track->db_s_read_offset = 0xFFFFFFFF;
119
track->db_s_write_offset = 0xFFFFFFFF;
120
track->db_s_read_bo = NULL;
121
track->db_s_write_bo = NULL;
124
static inline int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
130
static int evergreen_cs_track_check(struct radeon_cs_parser *p)
132
struct evergreen_cs_track *track = p->track;
134
/* we don't support stream out buffer yet */
135
if (track->vgt_strmout_config || track->vgt_strmout_buffer_config) {
136
dev_warn(p->dev, "this kernel doesn't support SMX output buffer\n");
145
* evergreen_cs_packet_parse() - parse cp packet and point ib index to next packet
146
* @parser: parser structure holding parsing context.
147
* @pkt: where to store packet informations
149
* Assume that chunk_ib_index is properly set. Will return -EINVAL
150
* if packet is bigger than remaining ib size. or if packets is unknown.
152
int evergreen_cs_packet_parse(struct radeon_cs_parser *p,
153
struct radeon_cs_packet *pkt,
156
struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
159
if (idx >= ib_chunk->length_dw) {
160
DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
161
idx, ib_chunk->length_dw);
164
header = radeon_get_ib_value(p, idx);
166
pkt->type = CP_PACKET_GET_TYPE(header);
167
pkt->count = CP_PACKET_GET_COUNT(header);
171
pkt->reg = CP_PACKET0_GET_REG(header);
174
pkt->opcode = CP_PACKET3_GET_OPCODE(header);
180
DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
183
if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
184
DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
185
pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
192
* evergreen_cs_packet_next_reloc() - parse next packet which should be reloc packet3
193
* @parser: parser structure holding parsing context.
194
* @data: pointer to relocation data
195
* @offset_start: starting offset
196
* @offset_mask: offset mask (to align start offset on)
197
* @reloc: reloc informations
199
* Check next packet is relocation packet3, do bo validation and compute
200
* GPU offset using the provided start.
202
static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser *p,
203
struct radeon_cs_reloc **cs_reloc)
205
struct radeon_cs_chunk *relocs_chunk;
206
struct radeon_cs_packet p3reloc;
210
if (p->chunk_relocs_idx == -1) {
211
DRM_ERROR("No relocation chunk !\n");
215
relocs_chunk = &p->chunks[p->chunk_relocs_idx];
216
r = evergreen_cs_packet_parse(p, &p3reloc, p->idx);
220
p->idx += p3reloc.count + 2;
221
if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
222
DRM_ERROR("No packet3 for relocation for packet at %d.\n",
226
idx = radeon_get_ib_value(p, p3reloc.idx + 1);
227
if (idx >= relocs_chunk->length_dw) {
228
DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
229
idx, relocs_chunk->length_dw);
232
/* FIXME: we assume reloc size is 4 dwords */
233
*cs_reloc = p->relocs_ptr[(idx / 4)];
238
* evergreen_cs_packet_next_is_pkt3_nop() - test if next packet is packet3 nop for reloc
239
* @parser: parser structure holding parsing context.
241
* Check next packet is relocation packet3, do bo validation and compute
242
* GPU offset using the provided start.
244
static inline int evergreen_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
246
struct radeon_cs_packet p3reloc;
249
r = evergreen_cs_packet_parse(p, &p3reloc, p->idx);
253
if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
260
* evergreen_cs_packet_next_vline() - parse userspace VLINE packet
261
* @parser: parser structure holding parsing context.
263
* Userspace sends a special sequence for VLINE waits.
264
* PACKET0 - VLINE_START_END + value
265
* PACKET3 - WAIT_REG_MEM poll vline status reg
266
* RELOC (P3) - crtc_id in reloc.
268
* This function parses this and relocates the VLINE START END
269
* and WAIT_REG_MEM packets to the correct crtc.
270
* It also detects a switched off crtc and nulls out the
273
static int evergreen_cs_packet_parse_vline(struct radeon_cs_parser *p)
275
struct drm_mode_object *obj;
276
struct drm_crtc *crtc;
277
struct radeon_crtc *radeon_crtc;
278
struct radeon_cs_packet p3reloc, wait_reg_mem;
281
uint32_t header, h_idx, reg, wait_reg_mem_info;
282
volatile uint32_t *ib;
286
/* parse the WAIT_REG_MEM */
287
r = evergreen_cs_packet_parse(p, &wait_reg_mem, p->idx);
291
/* check its a WAIT_REG_MEM */
292
if (wait_reg_mem.type != PACKET_TYPE3 ||
293
wait_reg_mem.opcode != PACKET3_WAIT_REG_MEM) {
294
DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n");
299
wait_reg_mem_info = radeon_get_ib_value(p, wait_reg_mem.idx + 1);
300
/* bit 4 is reg (0) or mem (1) */
301
if (wait_reg_mem_info & 0x10) {
302
DRM_ERROR("vline WAIT_REG_MEM waiting on MEM rather than REG\n");
306
/* waiting for value to be equal */
307
if ((wait_reg_mem_info & 0x7) != 0x3) {
308
DRM_ERROR("vline WAIT_REG_MEM function not equal\n");
312
if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != EVERGREEN_VLINE_STATUS) {
313
DRM_ERROR("vline WAIT_REG_MEM bad reg\n");
318
if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != EVERGREEN_VLINE_STAT) {
319
DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n");
324
/* jump over the NOP */
325
r = evergreen_cs_packet_parse(p, &p3reloc, p->idx + wait_reg_mem.count + 2);
330
p->idx += wait_reg_mem.count + 2;
331
p->idx += p3reloc.count + 2;
333
header = radeon_get_ib_value(p, h_idx);
334
crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1);
335
reg = CP_PACKET0_GET_REG(header);
336
obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
338
DRM_ERROR("cannot find crtc %d\n", crtc_id);
342
crtc = obj_to_crtc(obj);
343
radeon_crtc = to_radeon_crtc(crtc);
344
crtc_id = radeon_crtc->crtc_id;
346
if (!crtc->enabled) {
347
/* if the CRTC isn't enabled - we need to nop out the WAIT_REG_MEM */
348
ib[h_idx + 2] = PACKET2(0);
349
ib[h_idx + 3] = PACKET2(0);
350
ib[h_idx + 4] = PACKET2(0);
351
ib[h_idx + 5] = PACKET2(0);
352
ib[h_idx + 6] = PACKET2(0);
353
ib[h_idx + 7] = PACKET2(0);
354
ib[h_idx + 8] = PACKET2(0);
357
case EVERGREEN_VLINE_START_END:
358
header &= ~R600_CP_PACKET0_REG_MASK;
359
header |= (EVERGREEN_VLINE_START_END + radeon_crtc->crtc_offset) >> 2;
361
ib[h_idx + 4] = (EVERGREEN_VLINE_STATUS + radeon_crtc->crtc_offset) >> 2;
364
DRM_ERROR("unknown crtc reloc\n");
373
static int evergreen_packet0_check(struct radeon_cs_parser *p,
374
struct radeon_cs_packet *pkt,
375
unsigned idx, unsigned reg)
380
case EVERGREEN_VLINE_START_END:
381
r = evergreen_cs_packet_parse_vline(p);
383
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
389
printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
396
static int evergreen_cs_parse_packet0(struct radeon_cs_parser *p,
397
struct radeon_cs_packet *pkt)
405
for (i = 0; i <= pkt->count; i++, idx++, reg += 4) {
406
r = evergreen_packet0_check(p, pkt, idx, reg);
415
* evergreen_cs_check_reg() - check if register is authorized or not
416
* @parser: parser structure holding parsing context
417
* @reg: register we are testing
418
* @idx: index into the cs buffer
420
* This function will test against evergreen_reg_safe_bm and return 0
421
* if register is safe. If register is not flag as safe this function
422
* will test it against a list of register needind special handling.
424
static inline int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
426
struct evergreen_cs_track *track = (struct evergreen_cs_track *)p->track;
427
struct radeon_cs_reloc *reloc;
428
u32 last_reg = ARRAY_SIZE(evergreen_reg_safe_bm);
434
dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
437
m = 1 << ((reg >> 2) & 31);
438
if (!(evergreen_reg_safe_bm[i] & m))
442
/* force following reg to 0 in an attemp to disable out buffer
443
* which will need us to better understand how it works to perform
444
* security check on it (Jerome)
446
case SQ_ESGS_RING_SIZE:
447
case SQ_GSVS_RING_SIZE:
448
case SQ_ESTMP_RING_SIZE:
449
case SQ_GSTMP_RING_SIZE:
450
case SQ_HSTMP_RING_SIZE:
451
case SQ_LSTMP_RING_SIZE:
452
case SQ_PSTMP_RING_SIZE:
453
case SQ_VSTMP_RING_SIZE:
454
case SQ_ESGS_RING_ITEMSIZE:
455
case SQ_ESTMP_RING_ITEMSIZE:
456
case SQ_GSTMP_RING_ITEMSIZE:
457
case SQ_GSVS_RING_ITEMSIZE:
458
case SQ_GS_VERT_ITEMSIZE:
459
case SQ_GS_VERT_ITEMSIZE_1:
460
case SQ_GS_VERT_ITEMSIZE_2:
461
case SQ_GS_VERT_ITEMSIZE_3:
462
case SQ_GSVS_RING_OFFSET_1:
463
case SQ_GSVS_RING_OFFSET_2:
464
case SQ_GSVS_RING_OFFSET_3:
465
case SQ_HSTMP_RING_ITEMSIZE:
466
case SQ_LSTMP_RING_ITEMSIZE:
467
case SQ_PSTMP_RING_ITEMSIZE:
468
case SQ_VSTMP_RING_ITEMSIZE:
469
case VGT_TF_RING_SIZE:
470
/* get value to populate the IB don't remove */
471
tmp =radeon_get_ib_value(p, idx);
474
case DB_DEPTH_CONTROL:
475
track->db_depth_control = radeon_get_ib_value(p, idx);
478
r = evergreen_cs_packet_next_reloc(p, &reloc);
480
dev_warn(p->dev, "bad SET_CONTEXT_REG "
484
track->db_z_info = radeon_get_ib_value(p, idx);
485
ib[idx] &= ~Z_ARRAY_MODE(0xf);
486
track->db_z_info &= ~Z_ARRAY_MODE(0xf);
487
if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
488
ib[idx] |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
489
track->db_z_info |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
491
ib[idx] |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
492
track->db_z_info |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
495
case DB_STENCIL_INFO:
496
track->db_s_info = radeon_get_ib_value(p, idx);
499
track->db_depth_view = radeon_get_ib_value(p, idx);
502
track->db_depth_size = radeon_get_ib_value(p, idx);
503
track->db_depth_size_idx = idx;
506
r = evergreen_cs_packet_next_reloc(p, &reloc);
508
dev_warn(p->dev, "bad SET_CONTEXT_REG "
512
track->db_z_read_offset = radeon_get_ib_value(p, idx);
513
ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
514
track->db_z_read_bo = reloc->robj;
516
case DB_Z_WRITE_BASE:
517
r = evergreen_cs_packet_next_reloc(p, &reloc);
519
dev_warn(p->dev, "bad SET_CONTEXT_REG "
523
track->db_z_write_offset = radeon_get_ib_value(p, idx);
524
ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
525
track->db_z_write_bo = reloc->robj;
527
case DB_STENCIL_READ_BASE:
528
r = evergreen_cs_packet_next_reloc(p, &reloc);
530
dev_warn(p->dev, "bad SET_CONTEXT_REG "
534
track->db_s_read_offset = radeon_get_ib_value(p, idx);
535
ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
536
track->db_s_read_bo = reloc->robj;
538
case DB_STENCIL_WRITE_BASE:
539
r = evergreen_cs_packet_next_reloc(p, &reloc);
541
dev_warn(p->dev, "bad SET_CONTEXT_REG "
545
track->db_s_write_offset = radeon_get_ib_value(p, idx);
546
ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
547
track->db_s_write_bo = reloc->robj;
549
case VGT_STRMOUT_CONFIG:
550
track->vgt_strmout_config = radeon_get_ib_value(p, idx);
552
case VGT_STRMOUT_BUFFER_CONFIG:
553
track->vgt_strmout_buffer_config = radeon_get_ib_value(p, idx);
556
track->cb_target_mask = radeon_get_ib_value(p, idx);
559
track->cb_shader_mask = radeon_get_ib_value(p, idx);
561
case PA_SC_AA_CONFIG:
562
tmp = radeon_get_ib_value(p, idx) & MSAA_NUM_SAMPLES_MASK;
563
track->nsamples = 1 << tmp;
573
tmp = (reg - CB_COLOR0_VIEW) / 0x3c;
574
track->cb_color_view[tmp] = radeon_get_ib_value(p, idx);
578
case CB_COLOR10_VIEW:
579
case CB_COLOR11_VIEW:
580
tmp = ((reg - CB_COLOR8_VIEW) / 0x1c) + 8;
581
track->cb_color_view[tmp] = radeon_get_ib_value(p, idx);
591
r = evergreen_cs_packet_next_reloc(p, &reloc);
593
dev_warn(p->dev, "bad SET_CONTEXT_REG "
597
tmp = (reg - CB_COLOR0_INFO) / 0x3c;
598
track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
599
if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
600
ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
601
track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
602
} else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
603
ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
604
track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
609
case CB_COLOR10_INFO:
610
case CB_COLOR11_INFO:
611
r = evergreen_cs_packet_next_reloc(p, &reloc);
613
dev_warn(p->dev, "bad SET_CONTEXT_REG "
617
tmp = ((reg - CB_COLOR8_INFO) / 0x1c) + 8;
618
track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
619
if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
620
ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
621
track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
622
} else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
623
ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
624
track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
627
case CB_COLOR0_PITCH:
628
case CB_COLOR1_PITCH:
629
case CB_COLOR2_PITCH:
630
case CB_COLOR3_PITCH:
631
case CB_COLOR4_PITCH:
632
case CB_COLOR5_PITCH:
633
case CB_COLOR6_PITCH:
634
case CB_COLOR7_PITCH:
635
tmp = (reg - CB_COLOR0_PITCH) / 0x3c;
636
track->cb_color_pitch[tmp] = radeon_get_ib_value(p, idx);
637
track->cb_color_pitch_idx[tmp] = idx;
639
case CB_COLOR8_PITCH:
640
case CB_COLOR9_PITCH:
641
case CB_COLOR10_PITCH:
642
case CB_COLOR11_PITCH:
643
tmp = ((reg - CB_COLOR8_PITCH) / 0x1c) + 8;
644
track->cb_color_pitch[tmp] = radeon_get_ib_value(p, idx);
645
track->cb_color_pitch_idx[tmp] = idx;
647
case CB_COLOR0_SLICE:
648
case CB_COLOR1_SLICE:
649
case CB_COLOR2_SLICE:
650
case CB_COLOR3_SLICE:
651
case CB_COLOR4_SLICE:
652
case CB_COLOR5_SLICE:
653
case CB_COLOR6_SLICE:
654
case CB_COLOR7_SLICE:
655
tmp = (reg - CB_COLOR0_SLICE) / 0x3c;
656
track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx);
657
track->cb_color_slice_idx[tmp] = idx;
659
case CB_COLOR8_SLICE:
660
case CB_COLOR9_SLICE:
661
case CB_COLOR10_SLICE:
662
case CB_COLOR11_SLICE:
663
tmp = ((reg - CB_COLOR8_SLICE) / 0x1c) + 8;
664
track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx);
665
track->cb_color_slice_idx[tmp] = idx;
667
case CB_COLOR0_ATTRIB:
668
case CB_COLOR1_ATTRIB:
669
case CB_COLOR2_ATTRIB:
670
case CB_COLOR3_ATTRIB:
671
case CB_COLOR4_ATTRIB:
672
case CB_COLOR5_ATTRIB:
673
case CB_COLOR6_ATTRIB:
674
case CB_COLOR7_ATTRIB:
675
case CB_COLOR8_ATTRIB:
676
case CB_COLOR9_ATTRIB:
677
case CB_COLOR10_ATTRIB:
678
case CB_COLOR11_ATTRIB:
688
tmp = (reg - CB_COLOR0_DIM) / 0x3c;
689
track->cb_color_dim[tmp] = radeon_get_ib_value(p, idx);
690
track->cb_color_dim_idx[tmp] = idx;
696
tmp = ((reg - CB_COLOR8_DIM) / 0x1c) + 8;
697
track->cb_color_dim[tmp] = radeon_get_ib_value(p, idx);
698
track->cb_color_dim_idx[tmp] = idx;
700
case CB_COLOR0_FMASK:
701
case CB_COLOR1_FMASK:
702
case CB_COLOR2_FMASK:
703
case CB_COLOR3_FMASK:
704
case CB_COLOR4_FMASK:
705
case CB_COLOR5_FMASK:
706
case CB_COLOR6_FMASK:
707
case CB_COLOR7_FMASK:
708
tmp = (reg - CB_COLOR0_FMASK) / 0x3c;
709
r = evergreen_cs_packet_next_reloc(p, &reloc);
711
dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
714
ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
715
track->cb_color_fmask_bo[tmp] = reloc->robj;
717
case CB_COLOR0_CMASK:
718
case CB_COLOR1_CMASK:
719
case CB_COLOR2_CMASK:
720
case CB_COLOR3_CMASK:
721
case CB_COLOR4_CMASK:
722
case CB_COLOR5_CMASK:
723
case CB_COLOR6_CMASK:
724
case CB_COLOR7_CMASK:
725
tmp = (reg - CB_COLOR0_CMASK) / 0x3c;
726
r = evergreen_cs_packet_next_reloc(p, &reloc);
728
dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
731
ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
732
track->cb_color_cmask_bo[tmp] = reloc->robj;
734
case CB_COLOR0_FMASK_SLICE:
735
case CB_COLOR1_FMASK_SLICE:
736
case CB_COLOR2_FMASK_SLICE:
737
case CB_COLOR3_FMASK_SLICE:
738
case CB_COLOR4_FMASK_SLICE:
739
case CB_COLOR5_FMASK_SLICE:
740
case CB_COLOR6_FMASK_SLICE:
741
case CB_COLOR7_FMASK_SLICE:
742
tmp = (reg - CB_COLOR0_FMASK_SLICE) / 0x3c;
743
track->cb_color_fmask_slice[tmp] = radeon_get_ib_value(p, idx);
745
case CB_COLOR0_CMASK_SLICE:
746
case CB_COLOR1_CMASK_SLICE:
747
case CB_COLOR2_CMASK_SLICE:
748
case CB_COLOR3_CMASK_SLICE:
749
case CB_COLOR4_CMASK_SLICE:
750
case CB_COLOR5_CMASK_SLICE:
751
case CB_COLOR6_CMASK_SLICE:
752
case CB_COLOR7_CMASK_SLICE:
753
tmp = (reg - CB_COLOR0_CMASK_SLICE) / 0x3c;
754
track->cb_color_cmask_slice[tmp] = radeon_get_ib_value(p, idx);
764
r = evergreen_cs_packet_next_reloc(p, &reloc);
766
dev_warn(p->dev, "bad SET_CONTEXT_REG "
770
tmp = (reg - CB_COLOR0_BASE) / 0x3c;
771
track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx);
772
ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
773
track->cb_color_base_last[tmp] = ib[idx];
774
track->cb_color_bo[tmp] = reloc->robj;
778
case CB_COLOR10_BASE:
779
case CB_COLOR11_BASE:
780
r = evergreen_cs_packet_next_reloc(p, &reloc);
782
dev_warn(p->dev, "bad SET_CONTEXT_REG "
786
tmp = ((reg - CB_COLOR8_BASE) / 0x1c) + 8;
787
track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx);
788
ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
789
track->cb_color_base_last[tmp] = ib[idx];
790
track->cb_color_bo[tmp] = reloc->robj;
802
case CB_IMMED10_BASE:
803
case CB_IMMED11_BASE:
804
case DB_HTILE_DATA_BASE:
805
case SQ_PGM_START_FS:
806
case SQ_PGM_START_ES:
807
case SQ_PGM_START_VS:
808
case SQ_PGM_START_GS:
809
case SQ_PGM_START_PS:
810
case SQ_PGM_START_HS:
811
case SQ_PGM_START_LS:
813
case SQ_CONST_MEM_BASE:
814
case SQ_ALU_CONST_CACHE_GS_0:
815
case SQ_ALU_CONST_CACHE_GS_1:
816
case SQ_ALU_CONST_CACHE_GS_2:
817
case SQ_ALU_CONST_CACHE_GS_3:
818
case SQ_ALU_CONST_CACHE_GS_4:
819
case SQ_ALU_CONST_CACHE_GS_5:
820
case SQ_ALU_CONST_CACHE_GS_6:
821
case SQ_ALU_CONST_CACHE_GS_7:
822
case SQ_ALU_CONST_CACHE_GS_8:
823
case SQ_ALU_CONST_CACHE_GS_9:
824
case SQ_ALU_CONST_CACHE_GS_10:
825
case SQ_ALU_CONST_CACHE_GS_11:
826
case SQ_ALU_CONST_CACHE_GS_12:
827
case SQ_ALU_CONST_CACHE_GS_13:
828
case SQ_ALU_CONST_CACHE_GS_14:
829
case SQ_ALU_CONST_CACHE_GS_15:
830
case SQ_ALU_CONST_CACHE_PS_0:
831
case SQ_ALU_CONST_CACHE_PS_1:
832
case SQ_ALU_CONST_CACHE_PS_2:
833
case SQ_ALU_CONST_CACHE_PS_3:
834
case SQ_ALU_CONST_CACHE_PS_4:
835
case SQ_ALU_CONST_CACHE_PS_5:
836
case SQ_ALU_CONST_CACHE_PS_6:
837
case SQ_ALU_CONST_CACHE_PS_7:
838
case SQ_ALU_CONST_CACHE_PS_8:
839
case SQ_ALU_CONST_CACHE_PS_9:
840
case SQ_ALU_CONST_CACHE_PS_10:
841
case SQ_ALU_CONST_CACHE_PS_11:
842
case SQ_ALU_CONST_CACHE_PS_12:
843
case SQ_ALU_CONST_CACHE_PS_13:
844
case SQ_ALU_CONST_CACHE_PS_14:
845
case SQ_ALU_CONST_CACHE_PS_15:
846
case SQ_ALU_CONST_CACHE_VS_0:
847
case SQ_ALU_CONST_CACHE_VS_1:
848
case SQ_ALU_CONST_CACHE_VS_2:
849
case SQ_ALU_CONST_CACHE_VS_3:
850
case SQ_ALU_CONST_CACHE_VS_4:
851
case SQ_ALU_CONST_CACHE_VS_5:
852
case SQ_ALU_CONST_CACHE_VS_6:
853
case SQ_ALU_CONST_CACHE_VS_7:
854
case SQ_ALU_CONST_CACHE_VS_8:
855
case SQ_ALU_CONST_CACHE_VS_9:
856
case SQ_ALU_CONST_CACHE_VS_10:
857
case SQ_ALU_CONST_CACHE_VS_11:
858
case SQ_ALU_CONST_CACHE_VS_12:
859
case SQ_ALU_CONST_CACHE_VS_13:
860
case SQ_ALU_CONST_CACHE_VS_14:
861
case SQ_ALU_CONST_CACHE_VS_15:
862
case SQ_ALU_CONST_CACHE_HS_0:
863
case SQ_ALU_CONST_CACHE_HS_1:
864
case SQ_ALU_CONST_CACHE_HS_2:
865
case SQ_ALU_CONST_CACHE_HS_3:
866
case SQ_ALU_CONST_CACHE_HS_4:
867
case SQ_ALU_CONST_CACHE_HS_5:
868
case SQ_ALU_CONST_CACHE_HS_6:
869
case SQ_ALU_CONST_CACHE_HS_7:
870
case SQ_ALU_CONST_CACHE_HS_8:
871
case SQ_ALU_CONST_CACHE_HS_9:
872
case SQ_ALU_CONST_CACHE_HS_10:
873
case SQ_ALU_CONST_CACHE_HS_11:
874
case SQ_ALU_CONST_CACHE_HS_12:
875
case SQ_ALU_CONST_CACHE_HS_13:
876
case SQ_ALU_CONST_CACHE_HS_14:
877
case SQ_ALU_CONST_CACHE_HS_15:
878
case SQ_ALU_CONST_CACHE_LS_0:
879
case SQ_ALU_CONST_CACHE_LS_1:
880
case SQ_ALU_CONST_CACHE_LS_2:
881
case SQ_ALU_CONST_CACHE_LS_3:
882
case SQ_ALU_CONST_CACHE_LS_4:
883
case SQ_ALU_CONST_CACHE_LS_5:
884
case SQ_ALU_CONST_CACHE_LS_6:
885
case SQ_ALU_CONST_CACHE_LS_7:
886
case SQ_ALU_CONST_CACHE_LS_8:
887
case SQ_ALU_CONST_CACHE_LS_9:
888
case SQ_ALU_CONST_CACHE_LS_10:
889
case SQ_ALU_CONST_CACHE_LS_11:
890
case SQ_ALU_CONST_CACHE_LS_12:
891
case SQ_ALU_CONST_CACHE_LS_13:
892
case SQ_ALU_CONST_CACHE_LS_14:
893
case SQ_ALU_CONST_CACHE_LS_15:
894
r = evergreen_cs_packet_next_reloc(p, &reloc);
896
dev_warn(p->dev, "bad SET_CONTEXT_REG "
900
ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
903
dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
910
* evergreen_check_texture_resource() - check if register is authorized or not
911
* @p: parser structure holding parsing context
912
* @idx: index into the cs buffer
913
* @texture: texture's bo structure
914
* @mipmap: mipmap's bo structure
916
* This function will check that the resource has valid field and that
917
* the texture and mipmap bo object are big enough to cover this resource.
919
static inline int evergreen_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
920
struct radeon_bo *texture,
921
struct radeon_bo *mipmap)
927
static int evergreen_packet3_check(struct radeon_cs_parser *p,
928
struct radeon_cs_packet *pkt)
930
struct radeon_cs_reloc *reloc;
931
struct evergreen_cs_track *track;
935
unsigned start_reg, end_reg, reg;
939
track = (struct evergreen_cs_track *)p->track;
942
idx_value = radeon_get_ib_value(p, idx);
944
switch (pkt->opcode) {
945
case PACKET3_CONTEXT_CONTROL:
946
if (pkt->count != 1) {
947
DRM_ERROR("bad CONTEXT_CONTROL\n");
951
case PACKET3_INDEX_TYPE:
952
case PACKET3_NUM_INSTANCES:
953
case PACKET3_CLEAR_STATE:
955
DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES/CLEAR_STATE\n");
959
case PACKET3_INDEX_BASE:
960
if (pkt->count != 1) {
961
DRM_ERROR("bad INDEX_BASE\n");
964
r = evergreen_cs_packet_next_reloc(p, &reloc);
966
DRM_ERROR("bad INDEX_BASE\n");
969
ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
970
ib[idx+1] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
971
r = evergreen_cs_track_check(p);
973
dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
977
case PACKET3_DRAW_INDEX:
978
if (pkt->count != 3) {
979
DRM_ERROR("bad DRAW_INDEX\n");
982
r = evergreen_cs_packet_next_reloc(p, &reloc);
984
DRM_ERROR("bad DRAW_INDEX\n");
987
ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
988
ib[idx+1] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
989
r = evergreen_cs_track_check(p);
991
dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
995
case PACKET3_DRAW_INDEX_2:
996
if (pkt->count != 4) {
997
DRM_ERROR("bad DRAW_INDEX_2\n");
1000
r = evergreen_cs_packet_next_reloc(p, &reloc);
1002
DRM_ERROR("bad DRAW_INDEX_2\n");
1005
ib[idx+1] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
1006
ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
1007
r = evergreen_cs_track_check(p);
1009
dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1013
case PACKET3_DRAW_INDEX_AUTO:
1014
if (pkt->count != 1) {
1015
DRM_ERROR("bad DRAW_INDEX_AUTO\n");
1018
r = evergreen_cs_track_check(p);
1020
dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
1024
case PACKET3_DRAW_INDEX_MULTI_AUTO:
1025
if (pkt->count != 2) {
1026
DRM_ERROR("bad DRAW_INDEX_MULTI_AUTO\n");
1029
r = evergreen_cs_track_check(p);
1031
dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
1035
case PACKET3_DRAW_INDEX_IMMD:
1036
if (pkt->count < 2) {
1037
DRM_ERROR("bad DRAW_INDEX_IMMD\n");
1040
r = evergreen_cs_track_check(p);
1042
dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1046
case PACKET3_DRAW_INDEX_OFFSET:
1047
if (pkt->count != 2) {
1048
DRM_ERROR("bad DRAW_INDEX_OFFSET\n");
1051
r = evergreen_cs_track_check(p);
1053
dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1057
case PACKET3_DRAW_INDEX_OFFSET_2:
1058
if (pkt->count != 3) {
1059
DRM_ERROR("bad DRAW_INDEX_OFFSET_2\n");
1062
r = evergreen_cs_track_check(p);
1064
dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1068
case PACKET3_WAIT_REG_MEM:
1069
if (pkt->count != 5) {
1070
DRM_ERROR("bad WAIT_REG_MEM\n");
1073
/* bit 4 is reg (0) or mem (1) */
1074
if (idx_value & 0x10) {
1075
r = evergreen_cs_packet_next_reloc(p, &reloc);
1077
DRM_ERROR("bad WAIT_REG_MEM\n");
1080
ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
1081
ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
1084
case PACKET3_SURFACE_SYNC:
1085
if (pkt->count != 3) {
1086
DRM_ERROR("bad SURFACE_SYNC\n");
1089
/* 0xffffffff/0x0 is flush all cache flag */
1090
if (radeon_get_ib_value(p, idx + 1) != 0xffffffff ||
1091
radeon_get_ib_value(p, idx + 2) != 0) {
1092
r = evergreen_cs_packet_next_reloc(p, &reloc);
1094
DRM_ERROR("bad SURFACE_SYNC\n");
1097
ib[idx+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1100
case PACKET3_EVENT_WRITE:
1101
if (pkt->count != 2 && pkt->count != 0) {
1102
DRM_ERROR("bad EVENT_WRITE\n");
1106
r = evergreen_cs_packet_next_reloc(p, &reloc);
1108
DRM_ERROR("bad EVENT_WRITE\n");
1111
ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
1112
ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
1115
case PACKET3_EVENT_WRITE_EOP:
1116
if (pkt->count != 4) {
1117
DRM_ERROR("bad EVENT_WRITE_EOP\n");
1120
r = evergreen_cs_packet_next_reloc(p, &reloc);
1122
DRM_ERROR("bad EVENT_WRITE_EOP\n");
1125
ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
1126
ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
1128
case PACKET3_EVENT_WRITE_EOS:
1129
if (pkt->count != 3) {
1130
DRM_ERROR("bad EVENT_WRITE_EOS\n");
1133
r = evergreen_cs_packet_next_reloc(p, &reloc);
1135
DRM_ERROR("bad EVENT_WRITE_EOS\n");
1138
ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
1139
ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
1141
case PACKET3_SET_CONFIG_REG:
1142
start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_START;
1143
end_reg = 4 * pkt->count + start_reg - 4;
1144
if ((start_reg < PACKET3_SET_CONFIG_REG_START) ||
1145
(start_reg >= PACKET3_SET_CONFIG_REG_END) ||
1146
(end_reg >= PACKET3_SET_CONFIG_REG_END)) {
1147
DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
1150
for (i = 0; i < pkt->count; i++) {
1151
reg = start_reg + (4 * i);
1152
r = evergreen_cs_check_reg(p, reg, idx+1+i);
1157
case PACKET3_SET_CONTEXT_REG:
1158
start_reg = (idx_value << 2) + PACKET3_SET_CONTEXT_REG_START;
1159
end_reg = 4 * pkt->count + start_reg - 4;
1160
if ((start_reg < PACKET3_SET_CONTEXT_REG_START) ||
1161
(start_reg >= PACKET3_SET_CONTEXT_REG_END) ||
1162
(end_reg >= PACKET3_SET_CONTEXT_REG_END)) {
1163
DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n");
1166
for (i = 0; i < pkt->count; i++) {
1167
reg = start_reg + (4 * i);
1168
r = evergreen_cs_check_reg(p, reg, idx+1+i);
1173
case PACKET3_SET_RESOURCE:
1174
if (pkt->count % 8) {
1175
DRM_ERROR("bad SET_RESOURCE\n");
1178
start_reg = (idx_value << 2) + PACKET3_SET_RESOURCE_START;
1179
end_reg = 4 * pkt->count + start_reg - 4;
1180
if ((start_reg < PACKET3_SET_RESOURCE_START) ||
1181
(start_reg >= PACKET3_SET_RESOURCE_END) ||
1182
(end_reg >= PACKET3_SET_RESOURCE_END)) {
1183
DRM_ERROR("bad SET_RESOURCE\n");
1186
for (i = 0; i < (pkt->count / 8); i++) {
1187
struct radeon_bo *texture, *mipmap;
1190
switch (G__SQ_CONSTANT_TYPE(radeon_get_ib_value(p, idx+1+(i*8)+7))) {
1191
case SQ_TEX_VTX_VALID_TEXTURE:
1193
r = evergreen_cs_packet_next_reloc(p, &reloc);
1195
DRM_ERROR("bad SET_RESOURCE (tex)\n");
1198
ib[idx+1+(i*8)+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1199
if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
1200
ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
1201
else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
1202
ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
1203
texture = reloc->robj;
1205
r = evergreen_cs_packet_next_reloc(p, &reloc);
1207
DRM_ERROR("bad SET_RESOURCE (tex)\n");
1210
ib[idx+1+(i*8)+3] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1211
mipmap = reloc->robj;
1212
r = evergreen_check_texture_resource(p, idx+1+(i*8),
1217
case SQ_TEX_VTX_VALID_BUFFER:
1219
r = evergreen_cs_packet_next_reloc(p, &reloc);
1221
DRM_ERROR("bad SET_RESOURCE (vtx)\n");
1224
offset = radeon_get_ib_value(p, idx+1+(i*8)+0);
1225
size = radeon_get_ib_value(p, idx+1+(i*8)+1);
1226
if (p->rdev && (size + offset) > radeon_bo_size(reloc->robj)) {
1227
/* force size to size of the buffer */
1228
dev_warn(p->dev, "vbo resource seems too big for the bo\n");
1229
ib[idx+1+(i*8)+1] = radeon_bo_size(reloc->robj);
1231
ib[idx+1+(i*8)+0] += (u32)((reloc->lobj.gpu_offset) & 0xffffffff);
1232
ib[idx+1+(i*8)+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
1234
case SQ_TEX_VTX_INVALID_TEXTURE:
1235
case SQ_TEX_VTX_INVALID_BUFFER:
1237
DRM_ERROR("bad SET_RESOURCE\n");
1242
case PACKET3_SET_ALU_CONST:
1243
/* XXX fix me ALU const buffers only */
1245
case PACKET3_SET_BOOL_CONST:
1246
start_reg = (idx_value << 2) + PACKET3_SET_BOOL_CONST_START;
1247
end_reg = 4 * pkt->count + start_reg - 4;
1248
if ((start_reg < PACKET3_SET_BOOL_CONST_START) ||
1249
(start_reg >= PACKET3_SET_BOOL_CONST_END) ||
1250
(end_reg >= PACKET3_SET_BOOL_CONST_END)) {
1251
DRM_ERROR("bad SET_BOOL_CONST\n");
1255
case PACKET3_SET_LOOP_CONST:
1256
start_reg = (idx_value << 2) + PACKET3_SET_LOOP_CONST_START;
1257
end_reg = 4 * pkt->count + start_reg - 4;
1258
if ((start_reg < PACKET3_SET_LOOP_CONST_START) ||
1259
(start_reg >= PACKET3_SET_LOOP_CONST_END) ||
1260
(end_reg >= PACKET3_SET_LOOP_CONST_END)) {
1261
DRM_ERROR("bad SET_LOOP_CONST\n");
1265
case PACKET3_SET_CTL_CONST:
1266
start_reg = (idx_value << 2) + PACKET3_SET_CTL_CONST_START;
1267
end_reg = 4 * pkt->count + start_reg - 4;
1268
if ((start_reg < PACKET3_SET_CTL_CONST_START) ||
1269
(start_reg >= PACKET3_SET_CTL_CONST_END) ||
1270
(end_reg >= PACKET3_SET_CTL_CONST_END)) {
1271
DRM_ERROR("bad SET_CTL_CONST\n");
1275
case PACKET3_SET_SAMPLER:
1276
if (pkt->count % 3) {
1277
DRM_ERROR("bad SET_SAMPLER\n");
1280
start_reg = (idx_value << 2) + PACKET3_SET_SAMPLER_START;
1281
end_reg = 4 * pkt->count + start_reg - 4;
1282
if ((start_reg < PACKET3_SET_SAMPLER_START) ||
1283
(start_reg >= PACKET3_SET_SAMPLER_END) ||
1284
(end_reg >= PACKET3_SET_SAMPLER_END)) {
1285
DRM_ERROR("bad SET_SAMPLER\n");
1292
DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
1298
int evergreen_cs_parse(struct radeon_cs_parser *p)
1300
struct radeon_cs_packet pkt;
1301
struct evergreen_cs_track *track;
1304
if (p->track == NULL) {
1305
/* initialize tracker, we are in kms */
1306
track = kzalloc(sizeof(*track), GFP_KERNEL);
1309
evergreen_cs_track_init(track);
1310
track->npipes = p->rdev->config.evergreen.tiling_npipes;
1311
track->nbanks = p->rdev->config.evergreen.tiling_nbanks;
1312
track->group_size = p->rdev->config.evergreen.tiling_group_size;
1316
r = evergreen_cs_packet_parse(p, &pkt, p->idx);
1322
p->idx += pkt.count + 2;
1325
r = evergreen_cs_parse_packet0(p, &pkt);
1330
r = evergreen_packet3_check(p, &pkt);
1333
DRM_ERROR("Unknown packet type %d !\n", pkt.type);
1343
} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
1345
for (r = 0; r < p->ib->length_dw; r++) {
1346
printk(KERN_INFO "%05d 0x%08X\n", r, p->ib->ptr[r]);