~ubuntu-branches/ubuntu/precise/linux-ti-omap4/precise

« back to all changes in this revision

Viewing changes to drivers/gpu/drm/radeon/ni.c

  • Committer: Bazaar Package Importer
  • Author(s): Paolo Pisati
  • Date: 2011-06-29 15:23:51 UTC
  • mfrom: (26.1.1 natty-proposed)
  • Revision ID: james.westby@ubuntu.com-20110629152351-xs96tm303d95rpbk
Tags: 3.0.0-1200.2
* Rebased against 3.0.0-6.7
* BSP from TI based on 3.0.0

Show diffs side-by-side

added added

removed removed

Lines of Context:
31
31
#include "nid.h"
32
32
#include "atom.h"
33
33
#include "ni_reg.h"
 
34
#include "cayman_blit_shaders.h"
 
35
 
 
36
extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save);
 
37
extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save);
 
38
extern int evergreen_mc_wait_for_idle(struct radeon_device *rdev);
 
39
extern void evergreen_mc_program(struct radeon_device *rdev);
 
40
extern void evergreen_irq_suspend(struct radeon_device *rdev);
 
41
extern int evergreen_mc_init(struct radeon_device *rdev);
34
42
 
35
43
#define EVERGREEN_PFP_UCODE_SIZE 1120
36
44
#define EVERGREEN_PM4_UCODE_SIZE 1376
37
45
#define EVERGREEN_RLC_UCODE_SIZE 768
38
46
#define BTC_MC_UCODE_SIZE 6024
39
47
 
 
48
#define CAYMAN_PFP_UCODE_SIZE 2176
 
49
#define CAYMAN_PM4_UCODE_SIZE 2176
 
50
#define CAYMAN_RLC_UCODE_SIZE 1024
 
51
#define CAYMAN_MC_UCODE_SIZE 6037
 
52
 
40
53
/* Firmware Names */
41
54
MODULE_FIRMWARE("radeon/BARTS_pfp.bin");
42
55
MODULE_FIRMWARE("radeon/BARTS_me.bin");
48
61
MODULE_FIRMWARE("radeon/CAICOS_pfp.bin");
49
62
MODULE_FIRMWARE("radeon/CAICOS_me.bin");
50
63
MODULE_FIRMWARE("radeon/CAICOS_mc.bin");
 
64
MODULE_FIRMWARE("radeon/CAYMAN_pfp.bin");
 
65
MODULE_FIRMWARE("radeon/CAYMAN_me.bin");
 
66
MODULE_FIRMWARE("radeon/CAYMAN_mc.bin");
 
67
MODULE_FIRMWARE("radeon/CAYMAN_rlc.bin");
51
68
 
52
69
#define BTC_IO_MC_REGS_SIZE 29
53
70
 
147
164
        {0x0000009f, 0x00916a00}
148
165
};
149
166
 
150
 
int btc_mc_load_microcode(struct radeon_device *rdev)
 
167
static const u32 cayman_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
 
168
        {0x00000077, 0xff010100},
 
169
        {0x00000078, 0x00000000},
 
170
        {0x00000079, 0x00001434},
 
171
        {0x0000007a, 0xcc08ec08},
 
172
        {0x0000007b, 0x00040000},
 
173
        {0x0000007c, 0x000080c0},
 
174
        {0x0000007d, 0x09000000},
 
175
        {0x0000007e, 0x00210404},
 
176
        {0x00000081, 0x08a8e800},
 
177
        {0x00000082, 0x00030444},
 
178
        {0x00000083, 0x00000000},
 
179
        {0x00000085, 0x00000001},
 
180
        {0x00000086, 0x00000002},
 
181
        {0x00000087, 0x48490000},
 
182
        {0x00000088, 0x20244647},
 
183
        {0x00000089, 0x00000005},
 
184
        {0x0000008b, 0x66030000},
 
185
        {0x0000008c, 0x00006603},
 
186
        {0x0000008d, 0x00000100},
 
187
        {0x0000008f, 0x00001c0a},
 
188
        {0x00000090, 0xff000001},
 
189
        {0x00000094, 0x00101101},
 
190
        {0x00000095, 0x00000fff},
 
191
        {0x00000096, 0x00116fff},
 
192
        {0x00000097, 0x60010000},
 
193
        {0x00000098, 0x10010000},
 
194
        {0x00000099, 0x00006000},
 
195
        {0x0000009a, 0x00001000},
 
196
        {0x0000009f, 0x00976b00}
 
197
};
 
198
 
 
199
int ni_mc_load_microcode(struct radeon_device *rdev)
151
200
{
152
201
        const __be32 *fw_data;
153
202
        u32 mem_type, running, blackout = 0;
154
203
        u32 *io_mc_regs;
155
 
        int i;
 
204
        int i, ucode_size, regs_size;
156
205
 
157
206
        if (!rdev->mc_fw)
158
207
                return -EINVAL;
160
209
        switch (rdev->family) {
161
210
        case CHIP_BARTS:
162
211
                io_mc_regs = (u32 *)&barts_io_mc_regs;
 
212
                ucode_size = BTC_MC_UCODE_SIZE;
 
213
                regs_size = BTC_IO_MC_REGS_SIZE;
163
214
                break;
164
215
        case CHIP_TURKS:
165
216
                io_mc_regs = (u32 *)&turks_io_mc_regs;
 
217
                ucode_size = BTC_MC_UCODE_SIZE;
 
218
                regs_size = BTC_IO_MC_REGS_SIZE;
166
219
                break;
167
220
        case CHIP_CAICOS:
168
221
        default:
169
222
                io_mc_regs = (u32 *)&caicos_io_mc_regs;
 
223
                ucode_size = BTC_MC_UCODE_SIZE;
 
224
                regs_size = BTC_IO_MC_REGS_SIZE;
 
225
                break;
 
226
        case CHIP_CAYMAN:
 
227
                io_mc_regs = (u32 *)&cayman_io_mc_regs;
 
228
                ucode_size = CAYMAN_MC_UCODE_SIZE;
 
229
                regs_size = BTC_IO_MC_REGS_SIZE;
170
230
                break;
171
231
        }
172
232
 
184
244
                WREG32(MC_SEQ_SUP_CNTL, 0x00000010);
185
245
 
186
246
                /* load mc io regs */
187
 
                for (i = 0; i < BTC_IO_MC_REGS_SIZE; i++) {
 
247
                for (i = 0; i < regs_size; i++) {
188
248
                        WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]);
189
249
                        WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]);
190
250
                }
191
251
                /* load the MC ucode */
192
252
                fw_data = (const __be32 *)rdev->mc_fw->data;
193
 
                for (i = 0; i < BTC_MC_UCODE_SIZE; i++)
 
253
                for (i = 0; i < ucode_size; i++)
194
254
                        WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++));
195
255
 
196
256
                /* put the engine back into the active state */
231
291
        case CHIP_BARTS:
232
292
                chip_name = "BARTS";
233
293
                rlc_chip_name = "BTC";
 
294
                pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
 
295
                me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
 
296
                rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
 
297
                mc_req_size = BTC_MC_UCODE_SIZE * 4;
234
298
                break;
235
299
        case CHIP_TURKS:
236
300
                chip_name = "TURKS";
237
301
                rlc_chip_name = "BTC";
 
302
                pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
 
303
                me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
 
304
                rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
 
305
                mc_req_size = BTC_MC_UCODE_SIZE * 4;
238
306
                break;
239
307
        case CHIP_CAICOS:
240
308
                chip_name = "CAICOS";
241
309
                rlc_chip_name = "BTC";
 
310
                pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
 
311
                me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
 
312
                rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
 
313
                mc_req_size = BTC_MC_UCODE_SIZE * 4;
 
314
                break;
 
315
        case CHIP_CAYMAN:
 
316
                chip_name = "CAYMAN";
 
317
                rlc_chip_name = "CAYMAN";
 
318
                pfp_req_size = CAYMAN_PFP_UCODE_SIZE * 4;
 
319
                me_req_size = CAYMAN_PM4_UCODE_SIZE * 4;
 
320
                rlc_req_size = CAYMAN_RLC_UCODE_SIZE * 4;
 
321
                mc_req_size = CAYMAN_MC_UCODE_SIZE * 4;
242
322
                break;
243
323
        default: BUG();
244
324
        }
245
325
 
246
 
        pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
247
 
        me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
248
 
        rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
249
 
        mc_req_size = BTC_MC_UCODE_SIZE * 4;
250
 
 
251
326
        DRM_INFO("Loading %s Microcode\n", chip_name);
252
327
 
253
328
        snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
314
389
        return err;
315
390
}
316
391
 
 
392
/*
 
393
 * Core functions
 
394
 */
 
395
static u32 cayman_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
 
396
                                               u32 num_tile_pipes,
 
397
                                               u32 num_backends_per_asic,
 
398
                                               u32 *backend_disable_mask_per_asic,
 
399
                                               u32 num_shader_engines)
 
400
{
 
401
        u32 backend_map = 0;
 
402
        u32 enabled_backends_mask = 0;
 
403
        u32 enabled_backends_count = 0;
 
404
        u32 num_backends_per_se;
 
405
        u32 cur_pipe;
 
406
        u32 swizzle_pipe[CAYMAN_MAX_PIPES];
 
407
        u32 cur_backend = 0;
 
408
        u32 i;
 
409
        bool force_no_swizzle;
 
410
 
 
411
        /* force legal values */
 
412
        if (num_tile_pipes < 1)
 
413
                num_tile_pipes = 1;
 
414
        if (num_tile_pipes > rdev->config.cayman.max_tile_pipes)
 
415
                num_tile_pipes = rdev->config.cayman.max_tile_pipes;
 
416
        if (num_shader_engines < 1)
 
417
                num_shader_engines = 1;
 
418
        if (num_shader_engines > rdev->config.cayman.max_shader_engines)
 
419
                num_shader_engines = rdev->config.cayman.max_shader_engines;
 
420
        if (num_backends_per_asic < num_shader_engines)
 
421
                num_backends_per_asic = num_shader_engines;
 
422
        if (num_backends_per_asic > (rdev->config.cayman.max_backends_per_se * num_shader_engines))
 
423
                num_backends_per_asic = rdev->config.cayman.max_backends_per_se * num_shader_engines;
 
424
 
 
425
        /* make sure we have the same number of backends per se */
 
426
        num_backends_per_asic = ALIGN(num_backends_per_asic, num_shader_engines);
 
427
        /* set up the number of backends per se */
 
428
        num_backends_per_se = num_backends_per_asic / num_shader_engines;
 
429
        if (num_backends_per_se > rdev->config.cayman.max_backends_per_se) {
 
430
                num_backends_per_se = rdev->config.cayman.max_backends_per_se;
 
431
                num_backends_per_asic = num_backends_per_se * num_shader_engines;
 
432
        }
 
433
 
 
434
        /* create enable mask and count for enabled backends */
 
435
        for (i = 0; i < CAYMAN_MAX_BACKENDS; ++i) {
 
436
                if (((*backend_disable_mask_per_asic >> i) & 1) == 0) {
 
437
                        enabled_backends_mask |= (1 << i);
 
438
                        ++enabled_backends_count;
 
439
                }
 
440
                if (enabled_backends_count == num_backends_per_asic)
 
441
                        break;
 
442
        }
 
443
 
 
444
        /* force the backends mask to match the current number of backends */
 
445
        if (enabled_backends_count != num_backends_per_asic) {
 
446
                u32 this_backend_enabled;
 
447
                u32 shader_engine;
 
448
                u32 backend_per_se;
 
449
 
 
450
                enabled_backends_mask = 0;
 
451
                enabled_backends_count = 0;
 
452
                *backend_disable_mask_per_asic = CAYMAN_MAX_BACKENDS_MASK;
 
453
                for (i = 0; i < CAYMAN_MAX_BACKENDS; ++i) {
 
454
                        /* calc the current se */
 
455
                        shader_engine = i / rdev->config.cayman.max_backends_per_se;
 
456
                        /* calc the backend per se */
 
457
                        backend_per_se = i % rdev->config.cayman.max_backends_per_se;
 
458
                        /* default to not enabled */
 
459
                        this_backend_enabled = 0;
 
460
                        if ((shader_engine < num_shader_engines) &&
 
461
                            (backend_per_se < num_backends_per_se))
 
462
                                this_backend_enabled = 1;
 
463
                        if (this_backend_enabled) {
 
464
                                enabled_backends_mask |= (1 << i);
 
465
                                *backend_disable_mask_per_asic &= ~(1 << i);
 
466
                                ++enabled_backends_count;
 
467
                        }
 
468
                }
 
469
        }
 
470
 
 
471
 
 
472
        memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * CAYMAN_MAX_PIPES);
 
473
        switch (rdev->family) {
 
474
        case CHIP_CAYMAN:
 
475
                force_no_swizzle = true;
 
476
                break;
 
477
        default:
 
478
                force_no_swizzle = false;
 
479
                break;
 
480
        }
 
481
        if (force_no_swizzle) {
 
482
                bool last_backend_enabled = false;
 
483
 
 
484
                force_no_swizzle = false;
 
485
                for (i = 0; i < CAYMAN_MAX_BACKENDS; ++i) {
 
486
                        if (((enabled_backends_mask >> i) & 1) == 1) {
 
487
                                if (last_backend_enabled)
 
488
                                        force_no_swizzle = true;
 
489
                                last_backend_enabled = true;
 
490
                        } else
 
491
                                last_backend_enabled = false;
 
492
                }
 
493
        }
 
494
 
 
495
        switch (num_tile_pipes) {
 
496
        case 1:
 
497
        case 3:
 
498
        case 5:
 
499
        case 7:
 
500
                DRM_ERROR("odd number of pipes!\n");
 
501
                break;
 
502
        case 2:
 
503
                swizzle_pipe[0] = 0;
 
504
                swizzle_pipe[1] = 1;
 
505
                break;
 
506
        case 4:
 
507
                if (force_no_swizzle) {
 
508
                        swizzle_pipe[0] = 0;
 
509
                        swizzle_pipe[1] = 1;
 
510
                        swizzle_pipe[2] = 2;
 
511
                        swizzle_pipe[3] = 3;
 
512
                } else {
 
513
                        swizzle_pipe[0] = 0;
 
514
                        swizzle_pipe[1] = 2;
 
515
                        swizzle_pipe[2] = 1;
 
516
                        swizzle_pipe[3] = 3;
 
517
                }
 
518
                break;
 
519
        case 6:
 
520
                if (force_no_swizzle) {
 
521
                        swizzle_pipe[0] = 0;
 
522
                        swizzle_pipe[1] = 1;
 
523
                        swizzle_pipe[2] = 2;
 
524
                        swizzle_pipe[3] = 3;
 
525
                        swizzle_pipe[4] = 4;
 
526
                        swizzle_pipe[5] = 5;
 
527
                } else {
 
528
                        swizzle_pipe[0] = 0;
 
529
                        swizzle_pipe[1] = 2;
 
530
                        swizzle_pipe[2] = 4;
 
531
                        swizzle_pipe[3] = 1;
 
532
                        swizzle_pipe[4] = 3;
 
533
                        swizzle_pipe[5] = 5;
 
534
                }
 
535
                break;
 
536
        case 8:
 
537
                if (force_no_swizzle) {
 
538
                        swizzle_pipe[0] = 0;
 
539
                        swizzle_pipe[1] = 1;
 
540
                        swizzle_pipe[2] = 2;
 
541
                        swizzle_pipe[3] = 3;
 
542
                        swizzle_pipe[4] = 4;
 
543
                        swizzle_pipe[5] = 5;
 
544
                        swizzle_pipe[6] = 6;
 
545
                        swizzle_pipe[7] = 7;
 
546
                } else {
 
547
                        swizzle_pipe[0] = 0;
 
548
                        swizzle_pipe[1] = 2;
 
549
                        swizzle_pipe[2] = 4;
 
550
                        swizzle_pipe[3] = 6;
 
551
                        swizzle_pipe[4] = 1;
 
552
                        swizzle_pipe[5] = 3;
 
553
                        swizzle_pipe[6] = 5;
 
554
                        swizzle_pipe[7] = 7;
 
555
                }
 
556
                break;
 
557
        }
 
558
 
 
559
        for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
 
560
                while (((1 << cur_backend) & enabled_backends_mask) == 0)
 
561
                        cur_backend = (cur_backend + 1) % CAYMAN_MAX_BACKENDS;
 
562
 
 
563
                backend_map |= (((cur_backend & 0xf) << (swizzle_pipe[cur_pipe] * 4)));
 
564
 
 
565
                cur_backend = (cur_backend + 1) % CAYMAN_MAX_BACKENDS;
 
566
        }
 
567
 
 
568
        return backend_map;
 
569
}
 
570
 
 
571
static void cayman_program_channel_remap(struct radeon_device *rdev)
 
572
{
 
573
        u32 tcp_chan_steer_lo, tcp_chan_steer_hi, mc_shared_chremap, tmp;
 
574
 
 
575
        tmp = RREG32(MC_SHARED_CHMAP);
 
576
        switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
 
577
        case 0:
 
578
        case 1:
 
579
        case 2:
 
580
        case 3:
 
581
        default:
 
582
                /* default mapping */
 
583
                mc_shared_chremap = 0x00fac688;
 
584
                break;
 
585
        }
 
586
 
 
587
        switch (rdev->family) {
 
588
        case CHIP_CAYMAN:
 
589
        default:
 
590
                //tcp_chan_steer_lo = 0x54763210
 
591
                tcp_chan_steer_lo = 0x76543210;
 
592
                tcp_chan_steer_hi = 0x0000ba98;
 
593
                break;
 
594
        }
 
595
 
 
596
        WREG32(TCP_CHAN_STEER_LO, tcp_chan_steer_lo);
 
597
        WREG32(TCP_CHAN_STEER_HI, tcp_chan_steer_hi);
 
598
        WREG32(MC_SHARED_CHREMAP, mc_shared_chremap);
 
599
}
 
600
 
 
601
static u32 cayman_get_disable_mask_per_asic(struct radeon_device *rdev,
 
602
                                            u32 disable_mask_per_se,
 
603
                                            u32 max_disable_mask_per_se,
 
604
                                            u32 num_shader_engines)
 
605
{
 
606
        u32 disable_field_width_per_se = r600_count_pipe_bits(disable_mask_per_se);
 
607
        u32 disable_mask_per_asic = disable_mask_per_se & max_disable_mask_per_se;
 
608
 
 
609
        if (num_shader_engines == 1)
 
610
                return disable_mask_per_asic;
 
611
        else if (num_shader_engines == 2)
 
612
                return disable_mask_per_asic | (disable_mask_per_asic << disable_field_width_per_se);
 
613
        else
 
614
                return 0xffffffff;
 
615
}
 
616
 
 
617
static void cayman_gpu_init(struct radeon_device *rdev)
 
618
{
 
619
        u32 cc_rb_backend_disable = 0;
 
620
        u32 cc_gc_shader_pipe_config;
 
621
        u32 gb_addr_config = 0;
 
622
        u32 mc_shared_chmap, mc_arb_ramcfg;
 
623
        u32 gb_backend_map;
 
624
        u32 cgts_tcc_disable;
 
625
        u32 sx_debug_1;
 
626
        u32 smx_dc_ctl0;
 
627
        u32 gc_user_shader_pipe_config;
 
628
        u32 gc_user_rb_backend_disable;
 
629
        u32 cgts_user_tcc_disable;
 
630
        u32 cgts_sm_ctrl_reg;
 
631
        u32 hdp_host_path_cntl;
 
632
        u32 tmp;
 
633
        int i, j;
 
634
 
 
635
        switch (rdev->family) {
 
636
        case CHIP_CAYMAN:
 
637
        default:
 
638
                rdev->config.cayman.max_shader_engines = 2;
 
639
                rdev->config.cayman.max_pipes_per_simd = 4;
 
640
                rdev->config.cayman.max_tile_pipes = 8;
 
641
                rdev->config.cayman.max_simds_per_se = 12;
 
642
                rdev->config.cayman.max_backends_per_se = 4;
 
643
                rdev->config.cayman.max_texture_channel_caches = 8;
 
644
                rdev->config.cayman.max_gprs = 256;
 
645
                rdev->config.cayman.max_threads = 256;
 
646
                rdev->config.cayman.max_gs_threads = 32;
 
647
                rdev->config.cayman.max_stack_entries = 512;
 
648
                rdev->config.cayman.sx_num_of_sets = 8;
 
649
                rdev->config.cayman.sx_max_export_size = 256;
 
650
                rdev->config.cayman.sx_max_export_pos_size = 64;
 
651
                rdev->config.cayman.sx_max_export_smx_size = 192;
 
652
                rdev->config.cayman.max_hw_contexts = 8;
 
653
                rdev->config.cayman.sq_num_cf_insts = 2;
 
654
 
 
655
                rdev->config.cayman.sc_prim_fifo_size = 0x100;
 
656
                rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30;
 
657
                rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130;
 
658
                break;
 
659
        }
 
660
 
 
661
        /* Initialize HDP */
 
662
        for (i = 0, j = 0; i < 32; i++, j += 0x18) {
 
663
                WREG32((0x2c14 + j), 0x00000000);
 
664
                WREG32((0x2c18 + j), 0x00000000);
 
665
                WREG32((0x2c1c + j), 0x00000000);
 
666
                WREG32((0x2c20 + j), 0x00000000);
 
667
                WREG32((0x2c24 + j), 0x00000000);
 
668
        }
 
669
 
 
670
        WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
 
671
 
 
672
        mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
 
673
        mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
 
674
 
 
675
        cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE);
 
676
        cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG);
 
677
        cgts_tcc_disable = 0xff000000;
 
678
        gc_user_rb_backend_disable = RREG32(GC_USER_RB_BACKEND_DISABLE);
 
679
        gc_user_shader_pipe_config = RREG32(GC_USER_SHADER_PIPE_CONFIG);
 
680
        cgts_user_tcc_disable = RREG32(CGTS_USER_TCC_DISABLE);
 
681
 
 
682
        rdev->config.cayman.num_shader_engines = rdev->config.cayman.max_shader_engines;
 
683
        tmp = ((~gc_user_shader_pipe_config) & INACTIVE_QD_PIPES_MASK) >> INACTIVE_QD_PIPES_SHIFT;
 
684
        rdev->config.cayman.num_shader_pipes_per_simd = r600_count_pipe_bits(tmp);
 
685
        rdev->config.cayman.num_tile_pipes = rdev->config.cayman.max_tile_pipes;
 
686
        tmp = ((~gc_user_shader_pipe_config) & INACTIVE_SIMDS_MASK) >> INACTIVE_SIMDS_SHIFT;
 
687
        rdev->config.cayman.num_simds_per_se = r600_count_pipe_bits(tmp);
 
688
        tmp = ((~gc_user_rb_backend_disable) & BACKEND_DISABLE_MASK) >> BACKEND_DISABLE_SHIFT;
 
689
        rdev->config.cayman.num_backends_per_se = r600_count_pipe_bits(tmp);
 
690
        tmp = (gc_user_rb_backend_disable & BACKEND_DISABLE_MASK) >> BACKEND_DISABLE_SHIFT;
 
691
        rdev->config.cayman.backend_disable_mask_per_asic =
 
692
                cayman_get_disable_mask_per_asic(rdev, tmp, CAYMAN_MAX_BACKENDS_PER_SE_MASK,
 
693
                                                 rdev->config.cayman.num_shader_engines);
 
694
        rdev->config.cayman.backend_map =
 
695
                cayman_get_tile_pipe_to_backend_map(rdev, rdev->config.cayman.num_tile_pipes,
 
696
                                                    rdev->config.cayman.num_backends_per_se *
 
697
                                                    rdev->config.cayman.num_shader_engines,
 
698
                                                    &rdev->config.cayman.backend_disable_mask_per_asic,
 
699
                                                    rdev->config.cayman.num_shader_engines);
 
700
        tmp = ((~cgts_user_tcc_disable) & TCC_DISABLE_MASK) >> TCC_DISABLE_SHIFT;
 
701
        rdev->config.cayman.num_texture_channel_caches = r600_count_pipe_bits(tmp);
 
702
        tmp = (mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT;
 
703
        rdev->config.cayman.mem_max_burst_length_bytes = (tmp + 1) * 256;
 
704
        if (rdev->config.cayman.mem_max_burst_length_bytes > 512)
 
705
                rdev->config.cayman.mem_max_burst_length_bytes = 512;
 
706
        tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT;
 
707
        rdev->config.cayman.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
 
708
        if (rdev->config.cayman.mem_row_size_in_kb > 4)
 
709
                rdev->config.cayman.mem_row_size_in_kb = 4;
 
710
        /* XXX use MC settings? */
 
711
        rdev->config.cayman.shader_engine_tile_size = 32;
 
712
        rdev->config.cayman.num_gpus = 1;
 
713
        rdev->config.cayman.multi_gpu_tile_size = 64;
 
714
 
 
715
        //gb_addr_config = 0x02011003
 
716
#if 0
 
717
        gb_addr_config = RREG32(GB_ADDR_CONFIG);
 
718
#else
 
719
        gb_addr_config = 0;
 
720
        switch (rdev->config.cayman.num_tile_pipes) {
 
721
        case 1:
 
722
        default:
 
723
                gb_addr_config |= NUM_PIPES(0);
 
724
                break;
 
725
        case 2:
 
726
                gb_addr_config |= NUM_PIPES(1);
 
727
                break;
 
728
        case 4:
 
729
                gb_addr_config |= NUM_PIPES(2);
 
730
                break;
 
731
        case 8:
 
732
                gb_addr_config |= NUM_PIPES(3);
 
733
                break;
 
734
        }
 
735
 
 
736
        tmp = (rdev->config.cayman.mem_max_burst_length_bytes / 256) - 1;
 
737
        gb_addr_config |= PIPE_INTERLEAVE_SIZE(tmp);
 
738
        gb_addr_config |= NUM_SHADER_ENGINES(rdev->config.cayman.num_shader_engines - 1);
 
739
        tmp = (rdev->config.cayman.shader_engine_tile_size / 16) - 1;
 
740
        gb_addr_config |= SHADER_ENGINE_TILE_SIZE(tmp);
 
741
        switch (rdev->config.cayman.num_gpus) {
 
742
        case 1:
 
743
        default:
 
744
                gb_addr_config |= NUM_GPUS(0);
 
745
                break;
 
746
        case 2:
 
747
                gb_addr_config |= NUM_GPUS(1);
 
748
                break;
 
749
        case 4:
 
750
                gb_addr_config |= NUM_GPUS(2);
 
751
                break;
 
752
        }
 
753
        switch (rdev->config.cayman.multi_gpu_tile_size) {
 
754
        case 16:
 
755
                gb_addr_config |= MULTI_GPU_TILE_SIZE(0);
 
756
                break;
 
757
        case 32:
 
758
        default:
 
759
                gb_addr_config |= MULTI_GPU_TILE_SIZE(1);
 
760
                break;
 
761
        case 64:
 
762
                gb_addr_config |= MULTI_GPU_TILE_SIZE(2);
 
763
                break;
 
764
        case 128:
 
765
                gb_addr_config |= MULTI_GPU_TILE_SIZE(3);
 
766
                break;
 
767
        }
 
768
        switch (rdev->config.cayman.mem_row_size_in_kb) {
 
769
        case 1:
 
770
        default:
 
771
                gb_addr_config |= ROW_SIZE(0);
 
772
                break;
 
773
        case 2:
 
774
                gb_addr_config |= ROW_SIZE(1);
 
775
                break;
 
776
        case 4:
 
777
                gb_addr_config |= ROW_SIZE(2);
 
778
                break;
 
779
        }
 
780
#endif
 
781
 
 
782
        tmp = (gb_addr_config & NUM_PIPES_MASK) >> NUM_PIPES_SHIFT;
 
783
        rdev->config.cayman.num_tile_pipes = (1 << tmp);
 
784
        tmp = (gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT;
 
785
        rdev->config.cayman.mem_max_burst_length_bytes = (tmp + 1) * 256;
 
786
        tmp = (gb_addr_config & NUM_SHADER_ENGINES_MASK) >> NUM_SHADER_ENGINES_SHIFT;
 
787
        rdev->config.cayman.num_shader_engines = tmp + 1;
 
788
        tmp = (gb_addr_config & NUM_GPUS_MASK) >> NUM_GPUS_SHIFT;
 
789
        rdev->config.cayman.num_gpus = tmp + 1;
 
790
        tmp = (gb_addr_config & MULTI_GPU_TILE_SIZE_MASK) >> MULTI_GPU_TILE_SIZE_SHIFT;
 
791
        rdev->config.cayman.multi_gpu_tile_size = 1 << tmp;
 
792
        tmp = (gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT;
 
793
        rdev->config.cayman.mem_row_size_in_kb = 1 << tmp;
 
794
 
 
795
        //gb_backend_map = 0x76541032;
 
796
#if 0
 
797
        gb_backend_map = RREG32(GB_BACKEND_MAP);
 
798
#else
 
799
        gb_backend_map =
 
800
                cayman_get_tile_pipe_to_backend_map(rdev, rdev->config.cayman.num_tile_pipes,
 
801
                                                    rdev->config.cayman.num_backends_per_se *
 
802
                                                    rdev->config.cayman.num_shader_engines,
 
803
                                                    &rdev->config.cayman.backend_disable_mask_per_asic,
 
804
                                                    rdev->config.cayman.num_shader_engines);
 
805
#endif
 
806
        /* setup tiling info dword.  gb_addr_config is not adequate since it does
 
807
         * not have bank info, so create a custom tiling dword.
 
808
         * bits 3:0   num_pipes
 
809
         * bits 7:4   num_banks
 
810
         * bits 11:8  group_size
 
811
         * bits 15:12 row_size
 
812
         */
 
813
        rdev->config.cayman.tile_config = 0;
 
814
        switch (rdev->config.cayman.num_tile_pipes) {
 
815
        case 1:
 
816
        default:
 
817
                rdev->config.cayman.tile_config |= (0 << 0);
 
818
                break;
 
819
        case 2:
 
820
                rdev->config.cayman.tile_config |= (1 << 0);
 
821
                break;
 
822
        case 4:
 
823
                rdev->config.cayman.tile_config |= (2 << 0);
 
824
                break;
 
825
        case 8:
 
826
                rdev->config.cayman.tile_config |= (3 << 0);
 
827
                break;
 
828
        }
 
829
        rdev->config.cayman.tile_config |=
 
830
                ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4;
 
831
        rdev->config.cayman.tile_config |=
 
832
                ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
 
833
        rdev->config.cayman.tile_config |=
 
834
                ((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12;
 
835
 
 
836
        WREG32(GB_BACKEND_MAP, gb_backend_map);
 
837
        WREG32(GB_ADDR_CONFIG, gb_addr_config);
 
838
        WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
 
839
        WREG32(HDP_ADDR_CONFIG, gb_addr_config);
 
840
 
 
841
        cayman_program_channel_remap(rdev);
 
842
 
 
843
        /* primary versions */
 
844
        WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
 
845
        WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
 
846
        WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
 
847
 
 
848
        WREG32(CGTS_TCC_DISABLE, cgts_tcc_disable);
 
849
        WREG32(CGTS_SYS_TCC_DISABLE, cgts_tcc_disable);
 
850
 
 
851
        /* user versions */
 
852
        WREG32(GC_USER_RB_BACKEND_DISABLE, cc_rb_backend_disable);
 
853
        WREG32(GC_USER_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
 
854
        WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
 
855
 
 
856
        WREG32(CGTS_USER_SYS_TCC_DISABLE, cgts_tcc_disable);
 
857
        WREG32(CGTS_USER_TCC_DISABLE, cgts_tcc_disable);
 
858
 
 
859
        /* reprogram the shader complex */
 
860
        cgts_sm_ctrl_reg = RREG32(CGTS_SM_CTRL_REG);
 
861
        for (i = 0; i < 16; i++)
 
862
                WREG32(CGTS_SM_CTRL_REG, OVERRIDE);
 
863
        WREG32(CGTS_SM_CTRL_REG, cgts_sm_ctrl_reg);
 
864
 
 
865
        /* set HW defaults for 3D engine */
 
866
        WREG32(CP_MEQ_THRESHOLDS, MEQ1_START(0x30) | MEQ2_START(0x60));
 
867
 
 
868
        sx_debug_1 = RREG32(SX_DEBUG_1);
 
869
        sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS;
 
870
        WREG32(SX_DEBUG_1, sx_debug_1);
 
871
 
 
872
        smx_dc_ctl0 = RREG32(SMX_DC_CTL0);
 
873
        smx_dc_ctl0 &= ~NUMBER_OF_SETS(0x1ff);
 
874
        smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.cayman.sx_num_of_sets);
 
875
        WREG32(SMX_DC_CTL0, smx_dc_ctl0);
 
876
 
 
877
        WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4) | CRC_SIMD_ID_WADDR_DISABLE);
 
878
 
 
879
        /* need to be explicitly zero-ed */
 
880
        WREG32(VGT_OFFCHIP_LDS_BASE, 0);
 
881
        WREG32(SQ_LSTMP_RING_BASE, 0);
 
882
        WREG32(SQ_HSTMP_RING_BASE, 0);
 
883
        WREG32(SQ_ESTMP_RING_BASE, 0);
 
884
        WREG32(SQ_GSTMP_RING_BASE, 0);
 
885
        WREG32(SQ_VSTMP_RING_BASE, 0);
 
886
        WREG32(SQ_PSTMP_RING_BASE, 0);
 
887
 
 
888
        WREG32(TA_CNTL_AUX, DISABLE_CUBE_ANISO);
 
889
 
 
890
        WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.cayman.sx_max_export_size / 4) - 1) |
 
891
                                        POSITION_BUFFER_SIZE((rdev->config.cayman.sx_max_export_pos_size / 4) - 1) |
 
892
                                        SMX_BUFFER_SIZE((rdev->config.cayman.sx_max_export_smx_size / 4) - 1)));
 
893
 
 
894
        WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.cayman.sc_prim_fifo_size) |
 
895
                                 SC_HIZ_TILE_FIFO_SIZE(rdev->config.cayman.sc_hiz_tile_fifo_size) |
 
896
                                 SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.cayman.sc_earlyz_tile_fifo_size)));
 
897
 
 
898
 
 
899
        WREG32(VGT_NUM_INSTANCES, 1);
 
900
 
 
901
        WREG32(CP_PERFMON_CNTL, 0);
 
902
 
 
903
        WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.cayman.sq_num_cf_insts) |
 
904
                                  FETCH_FIFO_HIWATER(0x4) |
 
905
                                  DONE_FIFO_HIWATER(0xe0) |
 
906
                                  ALU_UPDATE_FIFO_HIWATER(0x8)));
 
907
 
 
908
        WREG32(SQ_GPR_RESOURCE_MGMT_1, NUM_CLAUSE_TEMP_GPRS(4));
 
909
        WREG32(SQ_CONFIG, (VC_ENABLE |
 
910
                           EXPORT_SRC_C |
 
911
                           GFX_PRIO(0) |
 
912
                           CS1_PRIO(0) |
 
913
                           CS2_PRIO(1)));
 
914
        WREG32(SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, DYN_GPR_ENABLE);
 
915
 
 
916
        WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
 
917
                                          FORCE_EOV_MAX_REZ_CNT(255)));
 
918
 
 
919
        WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC) |
 
920
               AUTO_INVLD_EN(ES_AND_GS_AUTO));
 
921
 
 
922
        WREG32(VGT_GS_VERTEX_REUSE, 16);
 
923
        WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
 
924
 
 
925
        WREG32(CB_PERF_CTR0_SEL_0, 0);
 
926
        WREG32(CB_PERF_CTR0_SEL_1, 0);
 
927
        WREG32(CB_PERF_CTR1_SEL_0, 0);
 
928
        WREG32(CB_PERF_CTR1_SEL_1, 0);
 
929
        WREG32(CB_PERF_CTR2_SEL_0, 0);
 
930
        WREG32(CB_PERF_CTR2_SEL_1, 0);
 
931
        WREG32(CB_PERF_CTR3_SEL_0, 0);
 
932
        WREG32(CB_PERF_CTR3_SEL_1, 0);
 
933
 
 
934
        tmp = RREG32(HDP_MISC_CNTL);
 
935
        tmp |= HDP_FLUSH_INVALIDATE_CACHE;
 
936
        WREG32(HDP_MISC_CNTL, tmp);
 
937
 
 
938
        hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
 
939
        WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
 
940
 
 
941
        WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
 
942
 
 
943
        udelay(50);
 
944
}
 
945
 
 
946
/*
 
947
 * GART
 
948
 */
 
949
void cayman_pcie_gart_tlb_flush(struct radeon_device *rdev)
 
950
{
 
951
        /* flush hdp cache */
 
952
        WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
 
953
 
 
954
        /* bits 0-7 are the VM contexts0-7 */
 
955
        WREG32(VM_INVALIDATE_REQUEST, 1);
 
956
}
 
957
 
 
958
int cayman_pcie_gart_enable(struct radeon_device *rdev)
 
959
{
 
960
        int r;
 
961
 
 
962
        if (rdev->gart.table.vram.robj == NULL) {
 
963
                dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
 
964
                return -EINVAL;
 
965
        }
 
966
        r = radeon_gart_table_vram_pin(rdev);
 
967
        if (r)
 
968
                return r;
 
969
        radeon_gart_restore(rdev);
 
970
        /* Setup TLB control */
 
971
        WREG32(MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB |
 
972
               ENABLE_L1_FRAGMENT_PROCESSING |
 
973
               SYSTEM_ACCESS_MODE_NOT_IN_SYS |
 
974
               SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
 
975
        /* Setup L2 cache */
 
976
        WREG32(VM_L2_CNTL, ENABLE_L2_CACHE |
 
977
               ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
 
978
               ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
 
979
               EFFECTIVE_L2_QUEUE_SIZE(7) |
 
980
               CONTEXT1_IDENTITY_ACCESS_MODE(1));
 
981
        WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE);
 
982
        WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
 
983
               L2_CACHE_BIGK_FRAGMENT_SIZE(6));
 
984
        /* setup context0 */
 
985
        WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
 
986
        WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
 
987
        WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
 
988
        WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
 
989
                        (u32)(rdev->dummy_page.addr >> 12));
 
990
        WREG32(VM_CONTEXT0_CNTL2, 0);
 
991
        WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
 
992
                                RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
 
993
        /* disable context1-7 */
 
994
        WREG32(VM_CONTEXT1_CNTL2, 0);
 
995
        WREG32(VM_CONTEXT1_CNTL, 0);
 
996
 
 
997
        cayman_pcie_gart_tlb_flush(rdev);
 
998
        rdev->gart.ready = true;
 
999
        return 0;
 
1000
}
 
1001
 
 
1002
void cayman_pcie_gart_disable(struct radeon_device *rdev)
 
1003
{
 
1004
        int r;
 
1005
 
 
1006
        /* Disable all tables */
 
1007
        WREG32(VM_CONTEXT0_CNTL, 0);
 
1008
        WREG32(VM_CONTEXT1_CNTL, 0);
 
1009
        /* Setup TLB control */
 
1010
        WREG32(MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING |
 
1011
               SYSTEM_ACCESS_MODE_NOT_IN_SYS |
 
1012
               SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
 
1013
        /* Setup L2 cache */
 
1014
        WREG32(VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
 
1015
               ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
 
1016
               EFFECTIVE_L2_QUEUE_SIZE(7) |
 
1017
               CONTEXT1_IDENTITY_ACCESS_MODE(1));
 
1018
        WREG32(VM_L2_CNTL2, 0);
 
1019
        WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
 
1020
               L2_CACHE_BIGK_FRAGMENT_SIZE(6));
 
1021
        if (rdev->gart.table.vram.robj) {
 
1022
                r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
 
1023
                if (likely(r == 0)) {
 
1024
                        radeon_bo_kunmap(rdev->gart.table.vram.robj);
 
1025
                        radeon_bo_unpin(rdev->gart.table.vram.robj);
 
1026
                        radeon_bo_unreserve(rdev->gart.table.vram.robj);
 
1027
                }
 
1028
        }
 
1029
}
 
1030
 
 
1031
void cayman_pcie_gart_fini(struct radeon_device *rdev)
 
1032
{
 
1033
        cayman_pcie_gart_disable(rdev);
 
1034
        radeon_gart_table_vram_free(rdev);
 
1035
        radeon_gart_fini(rdev);
 
1036
}
 
1037
 
 
1038
/*
 
1039
 * CP.
 
1040
 */
 
1041
static void cayman_cp_enable(struct radeon_device *rdev, bool enable)
 
1042
{
 
1043
        if (enable)
 
1044
                WREG32(CP_ME_CNTL, 0);
 
1045
        else {
 
1046
                radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
 
1047
                WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
 
1048
                WREG32(SCRATCH_UMSK, 0);
 
1049
        }
 
1050
}
 
1051
 
 
1052
static int cayman_cp_load_microcode(struct radeon_device *rdev)
 
1053
{
 
1054
        const __be32 *fw_data;
 
1055
        int i;
 
1056
 
 
1057
        if (!rdev->me_fw || !rdev->pfp_fw)
 
1058
                return -EINVAL;
 
1059
 
 
1060
        cayman_cp_enable(rdev, false);
 
1061
 
 
1062
        fw_data = (const __be32 *)rdev->pfp_fw->data;
 
1063
        WREG32(CP_PFP_UCODE_ADDR, 0);
 
1064
        for (i = 0; i < CAYMAN_PFP_UCODE_SIZE; i++)
 
1065
                WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
 
1066
        WREG32(CP_PFP_UCODE_ADDR, 0);
 
1067
 
 
1068
        fw_data = (const __be32 *)rdev->me_fw->data;
 
1069
        WREG32(CP_ME_RAM_WADDR, 0);
 
1070
        for (i = 0; i < CAYMAN_PM4_UCODE_SIZE; i++)
 
1071
                WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
 
1072
 
 
1073
        WREG32(CP_PFP_UCODE_ADDR, 0);
 
1074
        WREG32(CP_ME_RAM_WADDR, 0);
 
1075
        WREG32(CP_ME_RAM_RADDR, 0);
 
1076
        return 0;
 
1077
}
 
1078
 
 
1079
static int cayman_cp_start(struct radeon_device *rdev)
 
1080
{
 
1081
        int r, i;
 
1082
 
 
1083
        r = radeon_ring_lock(rdev, 7);
 
1084
        if (r) {
 
1085
                DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
 
1086
                return r;
 
1087
        }
 
1088
        radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
 
1089
        radeon_ring_write(rdev, 0x1);
 
1090
        radeon_ring_write(rdev, 0x0);
 
1091
        radeon_ring_write(rdev, rdev->config.cayman.max_hw_contexts - 1);
 
1092
        radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
 
1093
        radeon_ring_write(rdev, 0);
 
1094
        radeon_ring_write(rdev, 0);
 
1095
        radeon_ring_unlock_commit(rdev);
 
1096
 
 
1097
        cayman_cp_enable(rdev, true);
 
1098
 
 
1099
        r = radeon_ring_lock(rdev, cayman_default_size + 19);
 
1100
        if (r) {
 
1101
                DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
 
1102
                return r;
 
1103
        }
 
1104
 
 
1105
        /* setup clear context state */
 
1106
        radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
 
1107
        radeon_ring_write(rdev, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
 
1108
 
 
1109
        for (i = 0; i < cayman_default_size; i++)
 
1110
                radeon_ring_write(rdev, cayman_default_state[i]);
 
1111
 
 
1112
        radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
 
1113
        radeon_ring_write(rdev, PACKET3_PREAMBLE_END_CLEAR_STATE);
 
1114
 
 
1115
        /* set clear context state */
 
1116
        radeon_ring_write(rdev, PACKET3(PACKET3_CLEAR_STATE, 0));
 
1117
        radeon_ring_write(rdev, 0);
 
1118
 
 
1119
        /* SQ_VTX_BASE_VTX_LOC */
 
1120
        radeon_ring_write(rdev, 0xc0026f00);
 
1121
        radeon_ring_write(rdev, 0x00000000);
 
1122
        radeon_ring_write(rdev, 0x00000000);
 
1123
        radeon_ring_write(rdev, 0x00000000);
 
1124
 
 
1125
        /* Clear consts */
 
1126
        radeon_ring_write(rdev, 0xc0036f00);
 
1127
        radeon_ring_write(rdev, 0x00000bc4);
 
1128
        radeon_ring_write(rdev, 0xffffffff);
 
1129
        radeon_ring_write(rdev, 0xffffffff);
 
1130
        radeon_ring_write(rdev, 0xffffffff);
 
1131
 
 
1132
        radeon_ring_write(rdev, 0xc0026900);
 
1133
        radeon_ring_write(rdev, 0x00000316);
 
1134
        radeon_ring_write(rdev, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
 
1135
        radeon_ring_write(rdev, 0x00000010); /*  */
 
1136
 
 
1137
        radeon_ring_unlock_commit(rdev);
 
1138
 
 
1139
        /* XXX init other rings */
 
1140
 
 
1141
        return 0;
 
1142
}
 
1143
 
 
1144
static void cayman_cp_fini(struct radeon_device *rdev)
 
1145
{
 
1146
        cayman_cp_enable(rdev, false);
 
1147
        radeon_ring_fini(rdev);
 
1148
}
 
1149
 
 
1150
int cayman_cp_resume(struct radeon_device *rdev)
 
1151
{
 
1152
        u32 tmp;
 
1153
        u32 rb_bufsz;
 
1154
        int r;
 
1155
 
 
1156
        /* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */
 
1157
        WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP |
 
1158
                                 SOFT_RESET_PA |
 
1159
                                 SOFT_RESET_SH |
 
1160
                                 SOFT_RESET_VGT |
 
1161
                                 SOFT_RESET_SX));
 
1162
        RREG32(GRBM_SOFT_RESET);
 
1163
        mdelay(15);
 
1164
        WREG32(GRBM_SOFT_RESET, 0);
 
1165
        RREG32(GRBM_SOFT_RESET);
 
1166
 
 
1167
        WREG32(CP_SEM_WAIT_TIMER, 0x4);
 
1168
 
 
1169
        /* Set the write pointer delay */
 
1170
        WREG32(CP_RB_WPTR_DELAY, 0);
 
1171
 
 
1172
        WREG32(CP_DEBUG, (1 << 27));
 
1173
 
 
1174
        /* ring 0 - compute and gfx */
 
1175
        /* Set ring buffer size */
 
1176
        rb_bufsz = drm_order(rdev->cp.ring_size / 8);
 
1177
        tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
 
1178
#ifdef __BIG_ENDIAN
 
1179
        tmp |= BUF_SWAP_32BIT;
 
1180
#endif
 
1181
        WREG32(CP_RB0_CNTL, tmp);
 
1182
 
 
1183
        /* Initialize the ring buffer's read and write pointers */
 
1184
        WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA);
 
1185
        WREG32(CP_RB0_WPTR, 0);
 
1186
 
 
1187
        /* set the wb address wether it's enabled or not */
 
1188
        WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC);
 
1189
        WREG32(CP_RB0_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
 
1190
        WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
 
1191
 
 
1192
        if (rdev->wb.enabled)
 
1193
                WREG32(SCRATCH_UMSK, 0xff);
 
1194
        else {
 
1195
                tmp |= RB_NO_UPDATE;
 
1196
                WREG32(SCRATCH_UMSK, 0);
 
1197
        }
 
1198
 
 
1199
        mdelay(1);
 
1200
        WREG32(CP_RB0_CNTL, tmp);
 
1201
 
 
1202
        WREG32(CP_RB0_BASE, rdev->cp.gpu_addr >> 8);
 
1203
 
 
1204
        rdev->cp.rptr = RREG32(CP_RB0_RPTR);
 
1205
        rdev->cp.wptr = RREG32(CP_RB0_WPTR);
 
1206
 
 
1207
        /* ring1  - compute only */
 
1208
        /* Set ring buffer size */
 
1209
        rb_bufsz = drm_order(rdev->cp1.ring_size / 8);
 
1210
        tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
 
1211
#ifdef __BIG_ENDIAN
 
1212
        tmp |= BUF_SWAP_32BIT;
 
1213
#endif
 
1214
        WREG32(CP_RB1_CNTL, tmp);
 
1215
 
 
1216
        /* Initialize the ring buffer's read and write pointers */
 
1217
        WREG32(CP_RB1_CNTL, tmp | RB_RPTR_WR_ENA);
 
1218
        WREG32(CP_RB1_WPTR, 0);
 
1219
 
 
1220
        /* set the wb address wether it's enabled or not */
 
1221
        WREG32(CP_RB1_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFFFFFFFC);
 
1222
        WREG32(CP_RB1_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFF);
 
1223
 
 
1224
        mdelay(1);
 
1225
        WREG32(CP_RB1_CNTL, tmp);
 
1226
 
 
1227
        WREG32(CP_RB1_BASE, rdev->cp1.gpu_addr >> 8);
 
1228
 
 
1229
        rdev->cp1.rptr = RREG32(CP_RB1_RPTR);
 
1230
        rdev->cp1.wptr = RREG32(CP_RB1_WPTR);
 
1231
 
 
1232
        /* ring2 - compute only */
 
1233
        /* Set ring buffer size */
 
1234
        rb_bufsz = drm_order(rdev->cp2.ring_size / 8);
 
1235
        tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
 
1236
#ifdef __BIG_ENDIAN
 
1237
        tmp |= BUF_SWAP_32BIT;
 
1238
#endif
 
1239
        WREG32(CP_RB2_CNTL, tmp);
 
1240
 
 
1241
        /* Initialize the ring buffer's read and write pointers */
 
1242
        WREG32(CP_RB2_CNTL, tmp | RB_RPTR_WR_ENA);
 
1243
        WREG32(CP_RB2_WPTR, 0);
 
1244
 
 
1245
        /* set the wb address wether it's enabled or not */
 
1246
        WREG32(CP_RB2_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFFFFFFFC);
 
1247
        WREG32(CP_RB2_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFF);
 
1248
 
 
1249
        mdelay(1);
 
1250
        WREG32(CP_RB2_CNTL, tmp);
 
1251
 
 
1252
        WREG32(CP_RB2_BASE, rdev->cp2.gpu_addr >> 8);
 
1253
 
 
1254
        rdev->cp2.rptr = RREG32(CP_RB2_RPTR);
 
1255
        rdev->cp2.wptr = RREG32(CP_RB2_WPTR);
 
1256
 
 
1257
        /* start the rings */
 
1258
        cayman_cp_start(rdev);
 
1259
        rdev->cp.ready = true;
 
1260
        rdev->cp1.ready = true;
 
1261
        rdev->cp2.ready = true;
 
1262
        /* this only test cp0 */
 
1263
        r = radeon_ring_test(rdev);
 
1264
        if (r) {
 
1265
                rdev->cp.ready = false;
 
1266
                rdev->cp1.ready = false;
 
1267
                rdev->cp2.ready = false;
 
1268
                return r;
 
1269
        }
 
1270
 
 
1271
        return 0;
 
1272
}
 
1273
 
 
1274
bool cayman_gpu_is_lockup(struct radeon_device *rdev)
 
1275
{
 
1276
        u32 srbm_status;
 
1277
        u32 grbm_status;
 
1278
        u32 grbm_status_se0, grbm_status_se1;
 
1279
        struct r100_gpu_lockup *lockup = &rdev->config.cayman.lockup;
 
1280
        int r;
 
1281
 
 
1282
        srbm_status = RREG32(SRBM_STATUS);
 
1283
        grbm_status = RREG32(GRBM_STATUS);
 
1284
        grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
 
1285
        grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
 
1286
        if (!(grbm_status & GUI_ACTIVE)) {
 
1287
                r100_gpu_lockup_update(lockup, &rdev->cp);
 
1288
                return false;
 
1289
        }
 
1290
        /* force CP activities */
 
1291
        r = radeon_ring_lock(rdev, 2);
 
1292
        if (!r) {
 
1293
                /* PACKET2 NOP */
 
1294
                radeon_ring_write(rdev, 0x80000000);
 
1295
                radeon_ring_write(rdev, 0x80000000);
 
1296
                radeon_ring_unlock_commit(rdev);
 
1297
        }
 
1298
        /* XXX deal with CP0,1,2 */
 
1299
        rdev->cp.rptr = RREG32(CP_RB0_RPTR);
 
1300
        return r100_gpu_cp_is_lockup(rdev, lockup, &rdev->cp);
 
1301
}
 
1302
 
 
1303
static int cayman_gpu_soft_reset(struct radeon_device *rdev)
 
1304
{
 
1305
        struct evergreen_mc_save save;
 
1306
        u32 grbm_reset = 0;
 
1307
 
 
1308
        if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
 
1309
                return 0;
 
1310
 
 
1311
        dev_info(rdev->dev, "GPU softreset \n");
 
1312
        dev_info(rdev->dev, "  GRBM_STATUS=0x%08X\n",
 
1313
                RREG32(GRBM_STATUS));
 
1314
        dev_info(rdev->dev, "  GRBM_STATUS_SE0=0x%08X\n",
 
1315
                RREG32(GRBM_STATUS_SE0));
 
1316
        dev_info(rdev->dev, "  GRBM_STATUS_SE1=0x%08X\n",
 
1317
                RREG32(GRBM_STATUS_SE1));
 
1318
        dev_info(rdev->dev, "  SRBM_STATUS=0x%08X\n",
 
1319
                RREG32(SRBM_STATUS));
 
1320
        evergreen_mc_stop(rdev, &save);
 
1321
        if (evergreen_mc_wait_for_idle(rdev)) {
 
1322
                dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
 
1323
        }
 
1324
        /* Disable CP parsing/prefetching */
 
1325
        WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
 
1326
 
 
1327
        /* reset all the gfx blocks */
 
1328
        grbm_reset = (SOFT_RESET_CP |
 
1329
                      SOFT_RESET_CB |
 
1330
                      SOFT_RESET_DB |
 
1331
                      SOFT_RESET_GDS |
 
1332
                      SOFT_RESET_PA |
 
1333
                      SOFT_RESET_SC |
 
1334
                      SOFT_RESET_SPI |
 
1335
                      SOFT_RESET_SH |
 
1336
                      SOFT_RESET_SX |
 
1337
                      SOFT_RESET_TC |
 
1338
                      SOFT_RESET_TA |
 
1339
                      SOFT_RESET_VGT |
 
1340
                      SOFT_RESET_IA);
 
1341
 
 
1342
        dev_info(rdev->dev, "  GRBM_SOFT_RESET=0x%08X\n", grbm_reset);
 
1343
        WREG32(GRBM_SOFT_RESET, grbm_reset);
 
1344
        (void)RREG32(GRBM_SOFT_RESET);
 
1345
        udelay(50);
 
1346
        WREG32(GRBM_SOFT_RESET, 0);
 
1347
        (void)RREG32(GRBM_SOFT_RESET);
 
1348
        /* Wait a little for things to settle down */
 
1349
        udelay(50);
 
1350
        dev_info(rdev->dev, "  GRBM_STATUS=0x%08X\n",
 
1351
                RREG32(GRBM_STATUS));
 
1352
        dev_info(rdev->dev, "  GRBM_STATUS_SE0=0x%08X\n",
 
1353
                RREG32(GRBM_STATUS_SE0));
 
1354
        dev_info(rdev->dev, "  GRBM_STATUS_SE1=0x%08X\n",
 
1355
                RREG32(GRBM_STATUS_SE1));
 
1356
        dev_info(rdev->dev, "  SRBM_STATUS=0x%08X\n",
 
1357
                RREG32(SRBM_STATUS));
 
1358
        evergreen_mc_resume(rdev, &save);
 
1359
        return 0;
 
1360
}
 
1361
 
 
1362
int cayman_asic_reset(struct radeon_device *rdev)
 
1363
{
 
1364
        return cayman_gpu_soft_reset(rdev);
 
1365
}
 
1366
 
 
1367
static int cayman_startup(struct radeon_device *rdev)
 
1368
{
 
1369
        int r;
 
1370
 
 
1371
        if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
 
1372
                r = ni_init_microcode(rdev);
 
1373
                if (r) {
 
1374
                        DRM_ERROR("Failed to load firmware!\n");
 
1375
                        return r;
 
1376
                }
 
1377
        }
 
1378
        r = ni_mc_load_microcode(rdev);
 
1379
        if (r) {
 
1380
                DRM_ERROR("Failed to load MC firmware!\n");
 
1381
                return r;
 
1382
        }
 
1383
 
 
1384
        evergreen_mc_program(rdev);
 
1385
        r = cayman_pcie_gart_enable(rdev);
 
1386
        if (r)
 
1387
                return r;
 
1388
        cayman_gpu_init(rdev);
 
1389
 
 
1390
        r = evergreen_blit_init(rdev);
 
1391
        if (r) {
 
1392
                evergreen_blit_fini(rdev);
 
1393
                rdev->asic->copy = NULL;
 
1394
                dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
 
1395
        }
 
1396
 
 
1397
        /* allocate wb buffer */
 
1398
        r = radeon_wb_init(rdev);
 
1399
        if (r)
 
1400
                return r;
 
1401
 
 
1402
        /* Enable IRQ */
 
1403
        r = r600_irq_init(rdev);
 
1404
        if (r) {
 
1405
                DRM_ERROR("radeon: IH init failed (%d).\n", r);
 
1406
                radeon_irq_kms_fini(rdev);
 
1407
                return r;
 
1408
        }
 
1409
        evergreen_irq_set(rdev);
 
1410
 
 
1411
        r = radeon_ring_init(rdev, rdev->cp.ring_size);
 
1412
        if (r)
 
1413
                return r;
 
1414
        r = cayman_cp_load_microcode(rdev);
 
1415
        if (r)
 
1416
                return r;
 
1417
        r = cayman_cp_resume(rdev);
 
1418
        if (r)
 
1419
                return r;
 
1420
 
 
1421
        return 0;
 
1422
}
 
1423
 
 
1424
int cayman_resume(struct radeon_device *rdev)
 
1425
{
 
1426
        int r;
 
1427
 
 
1428
        /* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
 
1429
         * posting will perform necessary task to bring back GPU into good
 
1430
         * shape.
 
1431
         */
 
1432
        /* post card */
 
1433
        atom_asic_init(rdev->mode_info.atom_context);
 
1434
 
 
1435
        r = cayman_startup(rdev);
 
1436
        if (r) {
 
1437
                DRM_ERROR("cayman startup failed on resume\n");
 
1438
                return r;
 
1439
        }
 
1440
 
 
1441
        r = r600_ib_test(rdev);
 
1442
        if (r) {
 
1443
                DRM_ERROR("radeon: failled testing IB (%d).\n", r);
 
1444
                return r;
 
1445
        }
 
1446
 
 
1447
        return r;
 
1448
 
 
1449
}
 
1450
 
 
1451
int cayman_suspend(struct radeon_device *rdev)
 
1452
{
 
1453
        int r;
 
1454
 
 
1455
        /* FIXME: we should wait for ring to be empty */
 
1456
        cayman_cp_enable(rdev, false);
 
1457
        rdev->cp.ready = false;
 
1458
        evergreen_irq_suspend(rdev);
 
1459
        radeon_wb_disable(rdev);
 
1460
        cayman_pcie_gart_disable(rdev);
 
1461
 
 
1462
        /* unpin shaders bo */
 
1463
        r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
 
1464
        if (likely(r == 0)) {
 
1465
                radeon_bo_unpin(rdev->r600_blit.shader_obj);
 
1466
                radeon_bo_unreserve(rdev->r600_blit.shader_obj);
 
1467
        }
 
1468
 
 
1469
        return 0;
 
1470
}
 
1471
 
 
1472
/* Plan is to move initialization in that function and use
 
1473
 * helper function so that radeon_device_init pretty much
 
1474
 * do nothing more than calling asic specific function. This
 
1475
 * should also allow to remove a bunch of callback function
 
1476
 * like vram_info.
 
1477
 */
 
1478
int cayman_init(struct radeon_device *rdev)
 
1479
{
 
1480
        int r;
 
1481
 
 
1482
        /* This don't do much */
 
1483
        r = radeon_gem_init(rdev);
 
1484
        if (r)
 
1485
                return r;
 
1486
        /* Read BIOS */
 
1487
        if (!radeon_get_bios(rdev)) {
 
1488
                if (ASIC_IS_AVIVO(rdev))
 
1489
                        return -EINVAL;
 
1490
        }
 
1491
        /* Must be an ATOMBIOS */
 
1492
        if (!rdev->is_atom_bios) {
 
1493
                dev_err(rdev->dev, "Expecting atombios for cayman GPU\n");
 
1494
                return -EINVAL;
 
1495
        }
 
1496
        r = radeon_atombios_init(rdev);
 
1497
        if (r)
 
1498
                return r;
 
1499
 
 
1500
        /* Post card if necessary */
 
1501
        if (!radeon_card_posted(rdev)) {
 
1502
                if (!rdev->bios) {
 
1503
                        dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
 
1504
                        return -EINVAL;
 
1505
                }
 
1506
                DRM_INFO("GPU not posted. posting now...\n");
 
1507
                atom_asic_init(rdev->mode_info.atom_context);
 
1508
        }
 
1509
        /* Initialize scratch registers */
 
1510
        r600_scratch_init(rdev);
 
1511
        /* Initialize surface registers */
 
1512
        radeon_surface_init(rdev);
 
1513
        /* Initialize clocks */
 
1514
        radeon_get_clock_info(rdev->ddev);
 
1515
        /* Fence driver */
 
1516
        r = radeon_fence_driver_init(rdev);
 
1517
        if (r)
 
1518
                return r;
 
1519
        /* initialize memory controller */
 
1520
        r = evergreen_mc_init(rdev);
 
1521
        if (r)
 
1522
                return r;
 
1523
        /* Memory manager */
 
1524
        r = radeon_bo_init(rdev);
 
1525
        if (r)
 
1526
                return r;
 
1527
 
 
1528
        r = radeon_irq_kms_init(rdev);
 
1529
        if (r)
 
1530
                return r;
 
1531
 
 
1532
        rdev->cp.ring_obj = NULL;
 
1533
        r600_ring_init(rdev, 1024 * 1024);
 
1534
 
 
1535
        rdev->ih.ring_obj = NULL;
 
1536
        r600_ih_ring_init(rdev, 64 * 1024);
 
1537
 
 
1538
        r = r600_pcie_gart_init(rdev);
 
1539
        if (r)
 
1540
                return r;
 
1541
 
 
1542
        rdev->accel_working = true;
 
1543
        r = cayman_startup(rdev);
 
1544
        if (r) {
 
1545
                dev_err(rdev->dev, "disabling GPU acceleration\n");
 
1546
                cayman_cp_fini(rdev);
 
1547
                r600_irq_fini(rdev);
 
1548
                radeon_wb_fini(rdev);
 
1549
                radeon_irq_kms_fini(rdev);
 
1550
                cayman_pcie_gart_fini(rdev);
 
1551
                rdev->accel_working = false;
 
1552
        }
 
1553
        if (rdev->accel_working) {
 
1554
                r = radeon_ib_pool_init(rdev);
 
1555
                if (r) {
 
1556
                        DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r);
 
1557
                        rdev->accel_working = false;
 
1558
                }
 
1559
                r = r600_ib_test(rdev);
 
1560
                if (r) {
 
1561
                        DRM_ERROR("radeon: failed testing IB (%d).\n", r);
 
1562
                        rdev->accel_working = false;
 
1563
                }
 
1564
        }
 
1565
 
 
1566
        /* Don't start up if the MC ucode is missing.
 
1567
         * The default clocks and voltages before the MC ucode
 
1568
         * is loaded are not suffient for advanced operations.
 
1569
         */
 
1570
        if (!rdev->mc_fw) {
 
1571
                DRM_ERROR("radeon: MC ucode required for NI+.\n");
 
1572
                return -EINVAL;
 
1573
        }
 
1574
 
 
1575
        return 0;
 
1576
}
 
1577
 
 
1578
void cayman_fini(struct radeon_device *rdev)
 
1579
{
 
1580
        evergreen_blit_fini(rdev);
 
1581
        cayman_cp_fini(rdev);
 
1582
        r600_irq_fini(rdev);
 
1583
        radeon_wb_fini(rdev);
 
1584
        radeon_ib_pool_fini(rdev);
 
1585
        radeon_irq_kms_fini(rdev);
 
1586
        cayman_pcie_gart_fini(rdev);
 
1587
        radeon_gem_fini(rdev);
 
1588
        radeon_fence_driver_fini(rdev);
 
1589
        radeon_bo_fini(rdev);
 
1590
        radeon_atombios_fini(rdev);
 
1591
        kfree(rdev->bios);
 
1592
        rdev->bios = NULL;
 
1593
}
 
1594