2
* Low-level SPU handling
4
* (C) Copyright IBM Deutschland Entwicklung GmbH 2005
6
* Author: Arnd Bergmann <arndb@de.ibm.com>
8
* This program is free software; you can redistribute it and/or modify
9
* it under the terms of the GNU General Public License as published by
10
* the Free Software Foundation; either version 2, or (at your option)
13
* This program is distributed in the hope that it will be useful,
14
* but WITHOUT ANY WARRANTY; without even the implied warranty of
15
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16
* GNU General Public License for more details.
18
* You should have received a copy of the GNU General Public License
19
* along with this program; if not, write to the Free Software
20
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25
#include <linux/interrupt.h>
26
#include <linux/list.h>
27
#include <linux/module.h>
28
#include <linux/ptrace.h>
29
#include <linux/slab.h>
30
#include <linux/wait.h>
33
#include <linux/mutex.h>
34
#include <linux/linux_logo.h>
35
#include <linux/syscore_ops.h>
37
#include <asm/spu_priv1.h>
38
#include <asm/spu_csa.h>
41
#include <asm/kexec.h>
43
const struct spu_management_ops *spu_management_ops;
44
EXPORT_SYMBOL_GPL(spu_management_ops);
46
const struct spu_priv1_ops *spu_priv1_ops;
47
EXPORT_SYMBOL_GPL(spu_priv1_ops);
49
struct cbe_spu_info cbe_spu_info[MAX_NUMNODES];
50
EXPORT_SYMBOL_GPL(cbe_spu_info);
53
* The spufs fault-handling code needs to call force_sig_info to raise signals
54
* on DMA errors. Export it here to avoid general kernel-wide access to this
57
EXPORT_SYMBOL_GPL(force_sig_info);
60
* Protects cbe_spu_info and spu->number.
62
static DEFINE_SPINLOCK(spu_lock);
65
* List of all spus in the system.
67
* This list is iterated by callers from irq context and callers that
68
* want to sleep. Thus modifications need to be done with both
69
* spu_full_list_lock and spu_full_list_mutex held, while iterating
70
* through it requires either of these locks.
72
* In addition spu_full_list_lock protects all assignmens to
75
static LIST_HEAD(spu_full_list);
76
static DEFINE_SPINLOCK(spu_full_list_lock);
77
static DEFINE_MUTEX(spu_full_list_mutex);
83
void spu_invalidate_slbs(struct spu *spu)
85
struct spu_priv2 __iomem *priv2 = spu->priv2;
88
spin_lock_irqsave(&spu->register_lock, flags);
89
if (spu_mfc_sr1_get(spu) & MFC_STATE1_RELOCATE_MASK)
90
out_be64(&priv2->slb_invalidate_all_W, 0UL);
91
spin_unlock_irqrestore(&spu->register_lock, flags);
93
EXPORT_SYMBOL_GPL(spu_invalidate_slbs);
95
/* This is called by the MM core when a segment size is changed, to
96
* request a flush of all the SPEs using a given mm
98
void spu_flush_all_slbs(struct mm_struct *mm)
103
spin_lock_irqsave(&spu_full_list_lock, flags);
104
list_for_each_entry(spu, &spu_full_list, full_list) {
106
spu_invalidate_slbs(spu);
108
spin_unlock_irqrestore(&spu_full_list_lock, flags);
111
/* The hack below stinks... try to do something better one of
112
* these days... Does it even work properly with NR_CPUS == 1 ?
114
static inline void mm_needs_global_tlbie(struct mm_struct *mm)
116
int nr = (NR_CPUS > 1) ? NR_CPUS : NR_CPUS + 1;
118
/* Global TLBIE broadcast required with SPEs. */
119
bitmap_fill(cpumask_bits(mm_cpumask(mm)), nr);
122
void spu_associate_mm(struct spu *spu, struct mm_struct *mm)
126
spin_lock_irqsave(&spu_full_list_lock, flags);
128
spin_unlock_irqrestore(&spu_full_list_lock, flags);
130
mm_needs_global_tlbie(mm);
132
EXPORT_SYMBOL_GPL(spu_associate_mm);
134
int spu_64k_pages_available(void)
136
return mmu_psize_defs[MMU_PAGE_64K].shift != 0;
138
EXPORT_SYMBOL_GPL(spu_64k_pages_available);
140
static void spu_restart_dma(struct spu *spu)
142
struct spu_priv2 __iomem *priv2 = spu->priv2;
144
if (!test_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags))
145
out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND);
147
set_bit(SPU_CONTEXT_FAULT_PENDING, &spu->flags);
152
static inline void spu_load_slb(struct spu *spu, int slbe, struct spu_slb *slb)
154
struct spu_priv2 __iomem *priv2 = spu->priv2;
156
pr_debug("%s: adding SLB[%d] 0x%016llx 0x%016llx\n",
157
__func__, slbe, slb->vsid, slb->esid);
159
out_be64(&priv2->slb_index_W, slbe);
160
/* set invalid before writing vsid */
161
out_be64(&priv2->slb_esid_RW, 0);
162
/* now it's safe to write the vsid */
163
out_be64(&priv2->slb_vsid_RW, slb->vsid);
164
/* setting the new esid makes the entry valid again */
165
out_be64(&priv2->slb_esid_RW, slb->esid);
168
static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
170
struct mm_struct *mm = spu->mm;
174
pr_debug("%s\n", __func__);
176
slb.esid = (ea & ESID_MASK) | SLB_ESID_V;
178
switch(REGION_ID(ea)) {
180
#ifdef CONFIG_PPC_MM_SLICES
181
psize = get_slice_psize(mm, ea);
183
psize = mm->context.user_psize;
185
slb.vsid = (get_vsid(mm->context.id, ea, MMU_SEGSIZE_256M)
186
<< SLB_VSID_SHIFT) | SLB_VSID_USER;
188
case VMALLOC_REGION_ID:
189
if (ea < VMALLOC_END)
190
psize = mmu_vmalloc_psize;
192
psize = mmu_io_psize;
193
slb.vsid = (get_kernel_vsid(ea, MMU_SEGSIZE_256M)
194
<< SLB_VSID_SHIFT) | SLB_VSID_KERNEL;
196
case KERNEL_REGION_ID:
197
psize = mmu_linear_psize;
198
slb.vsid = (get_kernel_vsid(ea, MMU_SEGSIZE_256M)
199
<< SLB_VSID_SHIFT) | SLB_VSID_KERNEL;
202
/* Future: support kernel segments so that drivers
205
pr_debug("invalid region access at %016lx\n", ea);
208
slb.vsid |= mmu_psize_defs[psize].sllp;
210
spu_load_slb(spu, spu->slb_replace, &slb);
213
if (spu->slb_replace >= 8)
214
spu->slb_replace = 0;
216
spu_restart_dma(spu);
217
spu->stats.slb_flt++;
221
extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap); //XXX
222
static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr)
226
pr_debug("%s, %llx, %lx\n", __func__, dsisr, ea);
229
* Handle kernel space hash faults immediately. User hash
230
* faults need to be deferred to process context.
232
if ((dsisr & MFC_DSISR_PTE_NOT_FOUND) &&
233
(REGION_ID(ea) != USER_REGION_ID)) {
235
spin_unlock(&spu->register_lock);
236
ret = hash_page(ea, _PAGE_PRESENT, 0x300);
237
spin_lock(&spu->register_lock);
240
spu_restart_dma(spu);
245
spu->class_1_dar = ea;
246
spu->class_1_dsisr = dsisr;
248
spu->stop_callback(spu, 1);
250
spu->class_1_dar = 0;
251
spu->class_1_dsisr = 0;
256
static void __spu_kernel_slb(void *addr, struct spu_slb *slb)
258
unsigned long ea = (unsigned long)addr;
261
if (REGION_ID(ea) == KERNEL_REGION_ID)
262
llp = mmu_psize_defs[mmu_linear_psize].sllp;
264
llp = mmu_psize_defs[mmu_virtual_psize].sllp;
266
slb->vsid = (get_kernel_vsid(ea, MMU_SEGSIZE_256M) << SLB_VSID_SHIFT) |
267
SLB_VSID_KERNEL | llp;
268
slb->esid = (ea & ESID_MASK) | SLB_ESID_V;
272
* Given an array of @nr_slbs SLB entries, @slbs, return non-zero if the
273
* address @new_addr is present.
275
static inline int __slb_present(struct spu_slb *slbs, int nr_slbs,
278
unsigned long ea = (unsigned long)new_addr;
281
for (i = 0; i < nr_slbs; i++)
282
if (!((slbs[i].esid ^ ea) & ESID_MASK))
289
* Setup the SPU kernel SLBs, in preparation for a context save/restore. We
290
* need to map both the context save area, and the save/restore code.
292
* Because the lscsa and code may cross segment boundaires, we check to see
293
* if mappings are required for the start and end of each range. We currently
294
* assume that the mappings are smaller that one segment - if not, something
295
* is seriously wrong.
297
void spu_setup_kernel_slbs(struct spu *spu, struct spu_lscsa *lscsa,
298
void *code, int code_size)
300
struct spu_slb slbs[4];
302
/* start and end addresses of both mappings */
304
lscsa, (void *)lscsa + sizeof(*lscsa) - 1,
305
code, code + code_size - 1
308
/* check the set of addresses, and create a new entry in the slbs array
309
* if there isn't already a SLB for that address */
310
for (i = 0; i < ARRAY_SIZE(addrs); i++) {
311
if (__slb_present(slbs, nr_slbs, addrs[i]))
314
__spu_kernel_slb(addrs[i], &slbs[nr_slbs]);
318
spin_lock_irq(&spu->register_lock);
319
/* Add the set of SLBs */
320
for (i = 0; i < nr_slbs; i++)
321
spu_load_slb(spu, i, &slbs[i]);
322
spin_unlock_irq(&spu->register_lock);
324
EXPORT_SYMBOL_GPL(spu_setup_kernel_slbs);
327
spu_irq_class_0(int irq, void *data)
330
unsigned long stat, mask;
334
spin_lock(&spu->register_lock);
335
mask = spu_int_mask_get(spu, 0);
336
stat = spu_int_stat_get(spu, 0) & mask;
338
spu->class_0_pending |= stat;
339
spu->class_0_dar = spu_mfc_dar_get(spu);
340
spu->stop_callback(spu, 0);
341
spu->class_0_pending = 0;
342
spu->class_0_dar = 0;
344
spu_int_stat_clear(spu, 0, stat);
345
spin_unlock(&spu->register_lock);
351
spu_irq_class_1(int irq, void *data)
354
unsigned long stat, mask, dar, dsisr;
358
/* atomically read & clear class1 status. */
359
spin_lock(&spu->register_lock);
360
mask = spu_int_mask_get(spu, 1);
361
stat = spu_int_stat_get(spu, 1) & mask;
362
dar = spu_mfc_dar_get(spu);
363
dsisr = spu_mfc_dsisr_get(spu);
364
if (stat & CLASS1_STORAGE_FAULT_INTR)
365
spu_mfc_dsisr_set(spu, 0ul);
366
spu_int_stat_clear(spu, 1, stat);
368
pr_debug("%s: %lx %lx %lx %lx\n", __func__, mask, stat,
371
if (stat & CLASS1_SEGMENT_FAULT_INTR)
372
__spu_trap_data_seg(spu, dar);
374
if (stat & CLASS1_STORAGE_FAULT_INTR)
375
__spu_trap_data_map(spu, dar, dsisr);
377
if (stat & CLASS1_LS_COMPARE_SUSPEND_ON_GET_INTR)
380
if (stat & CLASS1_LS_COMPARE_SUSPEND_ON_PUT_INTR)
383
spu->class_1_dsisr = 0;
384
spu->class_1_dar = 0;
386
spin_unlock(&spu->register_lock);
388
return stat ? IRQ_HANDLED : IRQ_NONE;
392
spu_irq_class_2(int irq, void *data)
397
const int mailbox_intrs =
398
CLASS2_MAILBOX_THRESHOLD_INTR | CLASS2_MAILBOX_INTR;
401
spin_lock(&spu->register_lock);
402
stat = spu_int_stat_get(spu, 2);
403
mask = spu_int_mask_get(spu, 2);
404
/* ignore interrupts we're not waiting for */
406
/* mailbox interrupts are level triggered. mask them now before
408
if (stat & mailbox_intrs)
409
spu_int_mask_and(spu, 2, ~(stat & mailbox_intrs));
410
/* acknowledge all interrupts before the callbacks */
411
spu_int_stat_clear(spu, 2, stat);
413
pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat, mask);
415
if (stat & CLASS2_MAILBOX_INTR)
416
spu->ibox_callback(spu);
418
if (stat & CLASS2_SPU_STOP_INTR)
419
spu->stop_callback(spu, 2);
421
if (stat & CLASS2_SPU_HALT_INTR)
422
spu->stop_callback(spu, 2);
424
if (stat & CLASS2_SPU_DMA_TAG_GROUP_COMPLETE_INTR)
425
spu->mfc_callback(spu);
427
if (stat & CLASS2_MAILBOX_THRESHOLD_INTR)
428
spu->wbox_callback(spu);
430
spu->stats.class2_intr++;
432
spin_unlock(&spu->register_lock);
434
return stat ? IRQ_HANDLED : IRQ_NONE;
437
static int spu_request_irqs(struct spu *spu)
441
if (spu->irqs[0] != NO_IRQ) {
442
snprintf(spu->irq_c0, sizeof (spu->irq_c0), "spe%02d.0",
444
ret = request_irq(spu->irqs[0], spu_irq_class_0,
445
0, spu->irq_c0, spu);
449
if (spu->irqs[1] != NO_IRQ) {
450
snprintf(spu->irq_c1, sizeof (spu->irq_c1), "spe%02d.1",
452
ret = request_irq(spu->irqs[1], spu_irq_class_1,
453
0, spu->irq_c1, spu);
457
if (spu->irqs[2] != NO_IRQ) {
458
snprintf(spu->irq_c2, sizeof (spu->irq_c2), "spe%02d.2",
460
ret = request_irq(spu->irqs[2], spu_irq_class_2,
461
0, spu->irq_c2, spu);
468
if (spu->irqs[1] != NO_IRQ)
469
free_irq(spu->irqs[1], spu);
471
if (spu->irqs[0] != NO_IRQ)
472
free_irq(spu->irqs[0], spu);
477
static void spu_free_irqs(struct spu *spu)
479
if (spu->irqs[0] != NO_IRQ)
480
free_irq(spu->irqs[0], spu);
481
if (spu->irqs[1] != NO_IRQ)
482
free_irq(spu->irqs[1], spu);
483
if (spu->irqs[2] != NO_IRQ)
484
free_irq(spu->irqs[2], spu);
487
void spu_init_channels(struct spu *spu)
489
static const struct {
493
{ 0x00, 1, }, { 0x01, 1, }, { 0x03, 1, }, { 0x04, 1, },
494
{ 0x18, 1, }, { 0x19, 1, }, { 0x1b, 1, }, { 0x1d, 1, },
496
{ 0x00, 0, }, { 0x03, 0, }, { 0x04, 0, }, { 0x15, 16, },
497
{ 0x17, 1, }, { 0x18, 0, }, { 0x19, 0, }, { 0x1b, 0, },
498
{ 0x1c, 1, }, { 0x1d, 0, }, { 0x1e, 1, },
500
struct spu_priv2 __iomem *priv2;
505
/* initialize all channel data to zero */
506
for (i = 0; i < ARRAY_SIZE(zero_list); i++) {
509
out_be64(&priv2->spu_chnlcntptr_RW, zero_list[i].channel);
510
for (count = 0; count < zero_list[i].count; count++)
511
out_be64(&priv2->spu_chnldata_RW, 0);
514
/* initialize channel counts to meaningful values */
515
for (i = 0; i < ARRAY_SIZE(count_list); i++) {
516
out_be64(&priv2->spu_chnlcntptr_RW, count_list[i].channel);
517
out_be64(&priv2->spu_chnlcnt_RW, count_list[i].count);
520
EXPORT_SYMBOL_GPL(spu_init_channels);
522
static struct sysdev_class spu_sysdev_class = {
526
int spu_add_sysdev_attr(struct sysdev_attribute *attr)
530
mutex_lock(&spu_full_list_mutex);
531
list_for_each_entry(spu, &spu_full_list, full_list)
532
sysdev_create_file(&spu->sysdev, attr);
533
mutex_unlock(&spu_full_list_mutex);
537
EXPORT_SYMBOL_GPL(spu_add_sysdev_attr);
539
int spu_add_sysdev_attr_group(struct attribute_group *attrs)
544
mutex_lock(&spu_full_list_mutex);
545
list_for_each_entry(spu, &spu_full_list, full_list) {
546
rc = sysfs_create_group(&spu->sysdev.kobj, attrs);
548
/* we're in trouble here, but try unwinding anyway */
550
printk(KERN_ERR "%s: can't create sysfs group '%s'\n",
551
__func__, attrs->name);
553
list_for_each_entry_continue_reverse(spu,
554
&spu_full_list, full_list)
555
sysfs_remove_group(&spu->sysdev.kobj, attrs);
560
mutex_unlock(&spu_full_list_mutex);
564
EXPORT_SYMBOL_GPL(spu_add_sysdev_attr_group);
567
void spu_remove_sysdev_attr(struct sysdev_attribute *attr)
571
mutex_lock(&spu_full_list_mutex);
572
list_for_each_entry(spu, &spu_full_list, full_list)
573
sysdev_remove_file(&spu->sysdev, attr);
574
mutex_unlock(&spu_full_list_mutex);
576
EXPORT_SYMBOL_GPL(spu_remove_sysdev_attr);
578
void spu_remove_sysdev_attr_group(struct attribute_group *attrs)
582
mutex_lock(&spu_full_list_mutex);
583
list_for_each_entry(spu, &spu_full_list, full_list)
584
sysfs_remove_group(&spu->sysdev.kobj, attrs);
585
mutex_unlock(&spu_full_list_mutex);
587
EXPORT_SYMBOL_GPL(spu_remove_sysdev_attr_group);
589
static int spu_create_sysdev(struct spu *spu)
593
spu->sysdev.id = spu->number;
594
spu->sysdev.cls = &spu_sysdev_class;
595
ret = sysdev_register(&spu->sysdev);
597
printk(KERN_ERR "Can't register SPU %d with sysfs\n",
602
sysfs_add_device_to_node(&spu->sysdev, spu->node);
607
static int __init create_spu(void *data)
616
spu = kzalloc(sizeof (*spu), GFP_KERNEL);
620
spu->alloc_state = SPU_FREE;
622
spin_lock_init(&spu->register_lock);
623
spin_lock(&spu_lock);
624
spu->number = number++;
625
spin_unlock(&spu_lock);
627
ret = spu_create_spu(spu, data);
632
spu_mfc_sdr_setup(spu);
633
spu_mfc_sr1_set(spu, 0x33);
634
ret = spu_request_irqs(spu);
638
ret = spu_create_sysdev(spu);
642
mutex_lock(&cbe_spu_info[spu->node].list_mutex);
643
list_add(&spu->cbe_list, &cbe_spu_info[spu->node].spus);
644
cbe_spu_info[spu->node].n_spus++;
645
mutex_unlock(&cbe_spu_info[spu->node].list_mutex);
647
mutex_lock(&spu_full_list_mutex);
648
spin_lock_irqsave(&spu_full_list_lock, flags);
649
list_add(&spu->full_list, &spu_full_list);
650
spin_unlock_irqrestore(&spu_full_list_lock, flags);
651
mutex_unlock(&spu_full_list_mutex);
653
spu->stats.util_state = SPU_UTIL_IDLE_LOADED;
655
spu->stats.tstamp = timespec_to_ns(&ts);
657
INIT_LIST_HEAD(&spu->aff_list);
664
spu_destroy_spu(spu);
671
static const char *spu_state_names[] = {
672
"user", "system", "iowait", "idle"
675
static unsigned long long spu_acct_time(struct spu *spu,
676
enum spu_utilization_state state)
679
unsigned long long time = spu->stats.times[state];
682
* If the spu is idle or the context is stopped, utilization
683
* statistics are not updated. Apply the time delta from the
684
* last recorded state of the spu.
686
if (spu->stats.util_state == state) {
688
time += timespec_to_ns(&ts) - spu->stats.tstamp;
691
return time / NSEC_PER_MSEC;
695
static ssize_t spu_stat_show(struct sys_device *sysdev,
696
struct sysdev_attribute *attr, char *buf)
698
struct spu *spu = container_of(sysdev, struct spu, sysdev);
700
return sprintf(buf, "%s %llu %llu %llu %llu "
701
"%llu %llu %llu %llu %llu %llu %llu %llu\n",
702
spu_state_names[spu->stats.util_state],
703
spu_acct_time(spu, SPU_UTIL_USER),
704
spu_acct_time(spu, SPU_UTIL_SYSTEM),
705
spu_acct_time(spu, SPU_UTIL_IOWAIT),
706
spu_acct_time(spu, SPU_UTIL_IDLE_LOADED),
707
spu->stats.vol_ctx_switch,
708
spu->stats.invol_ctx_switch,
713
spu->stats.class2_intr,
714
spu->stats.libassist);
717
static SYSDEV_ATTR(stat, 0644, spu_stat_show, NULL);
721
struct crash_spu_info {
723
u32 saved_spu_runcntl_RW;
724
u32 saved_spu_status_R;
725
u32 saved_spu_npc_RW;
726
u64 saved_mfc_sr1_RW;
731
#define CRASH_NUM_SPUS 16 /* Enough for current hardware */
732
static struct crash_spu_info crash_spu_info[CRASH_NUM_SPUS];
734
static void crash_kexec_stop_spus(void)
740
for (i = 0; i < CRASH_NUM_SPUS; i++) {
741
if (!crash_spu_info[i].spu)
744
spu = crash_spu_info[i].spu;
746
crash_spu_info[i].saved_spu_runcntl_RW =
747
in_be32(&spu->problem->spu_runcntl_RW);
748
crash_spu_info[i].saved_spu_status_R =
749
in_be32(&spu->problem->spu_status_R);
750
crash_spu_info[i].saved_spu_npc_RW =
751
in_be32(&spu->problem->spu_npc_RW);
753
crash_spu_info[i].saved_mfc_dar = spu_mfc_dar_get(spu);
754
crash_spu_info[i].saved_mfc_dsisr = spu_mfc_dsisr_get(spu);
755
tmp = spu_mfc_sr1_get(spu);
756
crash_spu_info[i].saved_mfc_sr1_RW = tmp;
758
tmp &= ~MFC_STATE1_MASTER_RUN_CONTROL_MASK;
759
spu_mfc_sr1_set(spu, tmp);
765
static void crash_register_spus(struct list_head *list)
770
list_for_each_entry(spu, list, full_list) {
771
if (WARN_ON(spu->number >= CRASH_NUM_SPUS))
774
crash_spu_info[spu->number].spu = spu;
777
ret = crash_shutdown_register(&crash_kexec_stop_spus);
779
printk(KERN_ERR "Could not register SPU crash handler");
783
static inline void crash_register_spus(struct list_head *list)
788
static void spu_shutdown(void)
792
mutex_lock(&spu_full_list_mutex);
793
list_for_each_entry(spu, &spu_full_list, full_list) {
795
spu_destroy_spu(spu);
797
mutex_unlock(&spu_full_list_mutex);
800
static struct syscore_ops spu_syscore_ops = {
801
.shutdown = spu_shutdown,
804
static int __init init_spu_base(void)
808
for (i = 0; i < MAX_NUMNODES; i++) {
809
mutex_init(&cbe_spu_info[i].list_mutex);
810
INIT_LIST_HEAD(&cbe_spu_info[i].spus);
813
if (!spu_management_ops)
816
/* create sysdev class for spus */
817
ret = sysdev_class_register(&spu_sysdev_class);
821
ret = spu_enumerate_spus(create_spu);
824
printk(KERN_WARNING "%s: Error initializing spus\n",
826
goto out_unregister_sysdev_class;
830
fb_append_extra_logo(&logo_spe_clut224, ret);
832
mutex_lock(&spu_full_list_mutex);
833
xmon_register_spus(&spu_full_list);
834
crash_register_spus(&spu_full_list);
835
mutex_unlock(&spu_full_list_mutex);
836
spu_add_sysdev_attr(&attr_stat);
837
register_syscore_ops(&spu_syscore_ops);
843
out_unregister_sysdev_class:
844
sysdev_class_unregister(&spu_sysdev_class);
848
module_init(init_spu_base);
850
MODULE_LICENSE("GPL");
851
MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");