4
* (C) Copyright IBM Corp. 2005
6
* Author: Mark Nutter <mnutter@us.ibm.com>
8
* Host-side part of SPU context switch sequence outlined in
9
* Synergistic Processor Element, Book IV.
11
* A fully premptive switch of an SPE is very expensive in terms
12
* of time and system resources. SPE Book IV indicates that SPE
13
* allocation should follow a "serially reusable device" model,
14
* in which the SPE is assigned a task until it completes. When
15
* this is not possible, this sequence may be used to premptively
16
* save, and then later (optionally) restore the context of a
17
* program executing on an SPE.
20
* This program is free software; you can redistribute it and/or modify
21
* it under the terms of the GNU General Public License as published by
22
* the Free Software Foundation; either version 2, or (at your option)
25
* This program is distributed in the hope that it will be useful,
26
* but WITHOUT ANY WARRANTY; without even the implied warranty of
27
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
28
* GNU General Public License for more details.
30
* You should have received a copy of the GNU General Public License
31
* along with this program; if not, write to the Free Software
32
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
35
#include <linux/export.h>
36
#include <linux/errno.h>
37
#include <linux/hardirq.h>
38
#include <linux/sched.h>
39
#include <linux/kernel.h>
41
#include <linux/vmalloc.h>
42
#include <linux/smp.h>
43
#include <linux/stddef.h>
44
#include <linux/unistd.h>
48
#include <asm/spu_priv1.h>
49
#include <asm/spu_csa.h>
50
#include <asm/mmu_context.h>
54
#include "spu_save_dump.h"
55
#include "spu_restore_dump.h"
58
#define POLL_WHILE_TRUE(_c) { \
63
#define RELAX_SPIN_COUNT 1000
64
#define POLL_WHILE_TRUE(_c) { \
67
for (_i=0; _i<RELAX_SPIN_COUNT && (_c); _i++) { \
70
if (unlikely(_c)) yield(); \
76
#define POLL_WHILE_FALSE(_c) POLL_WHILE_TRUE(!(_c))
78
static inline void acquire_spu_lock(struct spu *spu)
82
* Acquire SPU-specific mutual exclusion lock.
87
static inline void release_spu_lock(struct spu *spu)
90
* Release SPU-specific mutual exclusion lock.
95
static inline int check_spu_isolate(struct spu_state *csa, struct spu *spu)
97
struct spu_problem __iomem *prob = spu->problem;
102
* If SPU_Status[E,L,IS] any field is '1', this
103
* SPU is in isolate state and cannot be context
104
* saved at this time.
106
isolate_state = SPU_STATUS_ISOLATED_STATE |
107
SPU_STATUS_ISOLATED_LOAD_STATUS | SPU_STATUS_ISOLATED_EXIT_STATUS;
108
return (in_be32(&prob->spu_status_R) & isolate_state) ? 1 : 0;
111
static inline void disable_interrupts(struct spu_state *csa, struct spu *spu)
115
* Save INT_Mask_class0 in CSA.
116
* Write INT_MASK_class0 with value of 0.
117
* Save INT_Mask_class1 in CSA.
118
* Write INT_MASK_class1 with value of 0.
119
* Save INT_Mask_class2 in CSA.
120
* Write INT_MASK_class2 with value of 0.
121
* Synchronize all three interrupts to be sure
122
* we no longer execute a handler on another CPU.
124
spin_lock_irq(&spu->register_lock);
126
csa->priv1.int_mask_class0_RW = spu_int_mask_get(spu, 0);
127
csa->priv1.int_mask_class1_RW = spu_int_mask_get(spu, 1);
128
csa->priv1.int_mask_class2_RW = spu_int_mask_get(spu, 2);
130
spu_int_mask_set(spu, 0, 0ul);
131
spu_int_mask_set(spu, 1, 0ul);
132
spu_int_mask_set(spu, 2, 0ul);
134
spin_unlock_irq(&spu->register_lock);
137
* This flag needs to be set before calling synchronize_irq so
138
* that the update will be visible to the relevant handlers
141
set_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags);
142
clear_bit(SPU_CONTEXT_FAULT_PENDING, &spu->flags);
143
synchronize_irq(spu->irqs[0]);
144
synchronize_irq(spu->irqs[1]);
145
synchronize_irq(spu->irqs[2]);
148
static inline void set_watchdog_timer(struct spu_state *csa, struct spu *spu)
152
* Set a software watchdog timer, which specifies the
153
* maximum allowable time for a context save sequence.
155
* For present, this implementation will not set a global
156
* watchdog timer, as virtualization & variable system load
157
* may cause unpredictable execution times.
161
static inline void inhibit_user_access(struct spu_state *csa, struct spu *spu)
165
* Inhibit user-space access (if provided) to this
166
* SPU by unmapping the virtual pages assigned to
167
* the SPU memory-mapped I/O (MMIO) for problem
172
static inline void set_switch_pending(struct spu_state *csa, struct spu *spu)
176
* Set a software context switch pending flag.
177
* Done above in Step 3 - disable_interrupts().
181
static inline void save_mfc_cntl(struct spu_state *csa, struct spu *spu)
183
struct spu_priv2 __iomem *priv2 = spu->priv2;
186
* Suspend DMA and save MFC_CNTL.
188
switch (in_be64(&priv2->mfc_control_RW) &
189
MFC_CNTL_SUSPEND_DMA_STATUS_MASK) {
190
case MFC_CNTL_SUSPEND_IN_PROGRESS:
191
POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) &
192
MFC_CNTL_SUSPEND_DMA_STATUS_MASK) ==
193
MFC_CNTL_SUSPEND_COMPLETE);
195
case MFC_CNTL_SUSPEND_COMPLETE:
197
csa->priv2.mfc_control_RW =
198
in_be64(&priv2->mfc_control_RW) |
199
MFC_CNTL_SUSPEND_DMA_QUEUE;
201
case MFC_CNTL_NORMAL_DMA_QUEUE_OPERATION:
202
out_be64(&priv2->mfc_control_RW, MFC_CNTL_SUSPEND_DMA_QUEUE);
203
POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) &
204
MFC_CNTL_SUSPEND_DMA_STATUS_MASK) ==
205
MFC_CNTL_SUSPEND_COMPLETE);
207
csa->priv2.mfc_control_RW =
208
in_be64(&priv2->mfc_control_RW) &
209
~MFC_CNTL_SUSPEND_DMA_QUEUE &
210
~MFC_CNTL_SUSPEND_MASK;
215
static inline void save_spu_runcntl(struct spu_state *csa, struct spu *spu)
217
struct spu_problem __iomem *prob = spu->problem;
220
* Save SPU_Runcntl in the CSA. This value contains
221
* the "Application Desired State".
223
csa->prob.spu_runcntl_RW = in_be32(&prob->spu_runcntl_RW);
226
static inline void save_mfc_sr1(struct spu_state *csa, struct spu *spu)
229
* Save MFC_SR1 in the CSA.
231
csa->priv1.mfc_sr1_RW = spu_mfc_sr1_get(spu);
234
static inline void save_spu_status(struct spu_state *csa, struct spu *spu)
236
struct spu_problem __iomem *prob = spu->problem;
239
* Read SPU_Status[R], and save to CSA.
241
if ((in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING) == 0) {
242
csa->prob.spu_status_R = in_be32(&prob->spu_status_R);
246
out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
248
POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
251
SPU_STATUS_INVALID_INSTR | SPU_STATUS_SINGLE_STEP |
252
SPU_STATUS_STOPPED_BY_HALT | SPU_STATUS_STOPPED_BY_STOP;
253
if ((in_be32(&prob->spu_status_R) & stopped) == 0)
254
csa->prob.spu_status_R = SPU_STATUS_RUNNING;
256
csa->prob.spu_status_R = in_be32(&prob->spu_status_R);
260
static inline void save_mfc_stopped_status(struct spu_state *csa,
263
struct spu_priv2 __iomem *priv2 = spu->priv2;
264
const u64 mask = MFC_CNTL_DECREMENTER_RUNNING |
265
MFC_CNTL_DMA_QUEUES_EMPTY;
268
* Read MFC_CNTL[Ds]. Update saved copy of
271
* update: do the same with MFC_CNTL[Q].
273
csa->priv2.mfc_control_RW &= ~mask;
274
csa->priv2.mfc_control_RW |= in_be64(&priv2->mfc_control_RW) & mask;
277
static inline void halt_mfc_decr(struct spu_state *csa, struct spu *spu)
279
struct spu_priv2 __iomem *priv2 = spu->priv2;
282
* Write MFC_CNTL[Dh] set to a '1' to halt
285
out_be64(&priv2->mfc_control_RW,
286
MFC_CNTL_DECREMENTER_HALTED | MFC_CNTL_SUSPEND_MASK);
290
static inline void save_timebase(struct spu_state *csa, struct spu *spu)
293
* Read PPE Timebase High and Timebase low registers
294
* and save in CSA. TBD.
296
csa->suspend_time = get_cycles();
299
static inline void remove_other_spu_access(struct spu_state *csa,
303
* Remove other SPU access to this SPU by unmapping
304
* this SPU's pages from their address space. TBD.
308
static inline void do_mfc_mssync(struct spu_state *csa, struct spu *spu)
310
struct spu_problem __iomem *prob = spu->problem;
314
* Write SPU_MSSync register. Poll SPU_MSSync[P]
317
out_be64(&prob->spc_mssync_RW, 1UL);
318
POLL_WHILE_TRUE(in_be64(&prob->spc_mssync_RW) & MS_SYNC_PENDING);
321
static inline void issue_mfc_tlbie(struct spu_state *csa, struct spu *spu)
326
* Write TLB_Invalidate_Entry[IS,VPN,L,Lp]=0 register.
327
* Then issue a PPE sync instruction.
329
spu_tlb_invalidate(spu);
333
static inline void handle_pending_interrupts(struct spu_state *csa,
337
* Handle any pending interrupts from this SPU
338
* here. This is OS or hypervisor specific. One
339
* option is to re-enable interrupts to handle any
340
* pending interrupts, with the interrupt handlers
341
* recognizing the software Context Switch Pending
342
* flag, to ensure the SPU execution or MFC command
343
* queue is not restarted. TBD.
347
static inline void save_mfc_queues(struct spu_state *csa, struct spu *spu)
349
struct spu_priv2 __iomem *priv2 = spu->priv2;
353
* If MFC_Cntl[Se]=0 then save
354
* MFC command queues.
356
if ((in_be64(&priv2->mfc_control_RW) & MFC_CNTL_DMA_QUEUES_EMPTY) == 0) {
357
for (i = 0; i < 8; i++) {
358
csa->priv2.puq[i].mfc_cq_data0_RW =
359
in_be64(&priv2->puq[i].mfc_cq_data0_RW);
360
csa->priv2.puq[i].mfc_cq_data1_RW =
361
in_be64(&priv2->puq[i].mfc_cq_data1_RW);
362
csa->priv2.puq[i].mfc_cq_data2_RW =
363
in_be64(&priv2->puq[i].mfc_cq_data2_RW);
364
csa->priv2.puq[i].mfc_cq_data3_RW =
365
in_be64(&priv2->puq[i].mfc_cq_data3_RW);
367
for (i = 0; i < 16; i++) {
368
csa->priv2.spuq[i].mfc_cq_data0_RW =
369
in_be64(&priv2->spuq[i].mfc_cq_data0_RW);
370
csa->priv2.spuq[i].mfc_cq_data1_RW =
371
in_be64(&priv2->spuq[i].mfc_cq_data1_RW);
372
csa->priv2.spuq[i].mfc_cq_data2_RW =
373
in_be64(&priv2->spuq[i].mfc_cq_data2_RW);
374
csa->priv2.spuq[i].mfc_cq_data3_RW =
375
in_be64(&priv2->spuq[i].mfc_cq_data3_RW);
380
static inline void save_ppu_querymask(struct spu_state *csa, struct spu *spu)
382
struct spu_problem __iomem *prob = spu->problem;
385
* Save the PPU_QueryMask register
388
csa->prob.dma_querymask_RW = in_be32(&prob->dma_querymask_RW);
391
static inline void save_ppu_querytype(struct spu_state *csa, struct spu *spu)
393
struct spu_problem __iomem *prob = spu->problem;
396
* Save the PPU_QueryType register
399
csa->prob.dma_querytype_RW = in_be32(&prob->dma_querytype_RW);
402
static inline void save_ppu_tagstatus(struct spu_state *csa, struct spu *spu)
404
struct spu_problem __iomem *prob = spu->problem;
406
/* Save the Prxy_TagStatus register in the CSA.
408
* It is unnecessary to restore dma_tagstatus_R, however,
409
* dma_tagstatus_R in the CSA is accessed via backing_ops, so
412
csa->prob.dma_tagstatus_R = in_be32(&prob->dma_tagstatus_R);
415
static inline void save_mfc_csr_tsq(struct spu_state *csa, struct spu *spu)
417
struct spu_priv2 __iomem *priv2 = spu->priv2;
420
* Save the MFC_CSR_TSQ register
423
csa->priv2.spu_tag_status_query_RW =
424
in_be64(&priv2->spu_tag_status_query_RW);
427
static inline void save_mfc_csr_cmd(struct spu_state *csa, struct spu *spu)
429
struct spu_priv2 __iomem *priv2 = spu->priv2;
432
* Save the MFC_CSR_CMD1 and MFC_CSR_CMD2
433
* registers in the CSA.
435
csa->priv2.spu_cmd_buf1_RW = in_be64(&priv2->spu_cmd_buf1_RW);
436
csa->priv2.spu_cmd_buf2_RW = in_be64(&priv2->spu_cmd_buf2_RW);
439
static inline void save_mfc_csr_ato(struct spu_state *csa, struct spu *spu)
441
struct spu_priv2 __iomem *priv2 = spu->priv2;
444
* Save the MFC_CSR_ATO register in
447
csa->priv2.spu_atomic_status_RW = in_be64(&priv2->spu_atomic_status_RW);
450
static inline void save_mfc_tclass_id(struct spu_state *csa, struct spu *spu)
453
* Save the MFC_TCLASS_ID register in
456
csa->priv1.mfc_tclass_id_RW = spu_mfc_tclass_id_get(spu);
459
static inline void set_mfc_tclass_id(struct spu_state *csa, struct spu *spu)
463
* Write the MFC_TCLASS_ID register with
464
* the value 0x10000000.
466
spu_mfc_tclass_id_set(spu, 0x10000000);
470
static inline void purge_mfc_queue(struct spu_state *csa, struct spu *spu)
472
struct spu_priv2 __iomem *priv2 = spu->priv2;
476
* Write MFC_CNTL[Pc]=1 (purge queue).
478
out_be64(&priv2->mfc_control_RW,
479
MFC_CNTL_PURGE_DMA_REQUEST |
480
MFC_CNTL_SUSPEND_MASK);
484
static inline void wait_purge_complete(struct spu_state *csa, struct spu *spu)
486
struct spu_priv2 __iomem *priv2 = spu->priv2;
489
* Poll MFC_CNTL[Ps] until value '11' is read
492
POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) &
493
MFC_CNTL_PURGE_DMA_STATUS_MASK) ==
494
MFC_CNTL_PURGE_DMA_COMPLETE);
497
static inline void setup_mfc_sr1(struct spu_state *csa, struct spu *spu)
501
* Write MFC_SR1 with MFC_SR1[D=0,S=1] and
502
* MFC_SR1[TL,R,Pr,T] set correctly for the
503
* OS specific environment.
505
* Implementation note: The SPU-side code
506
* for save/restore is privileged, so the
507
* MFC_SR1[Pr] bit is not set.
510
spu_mfc_sr1_set(spu, (MFC_STATE1_MASTER_RUN_CONTROL_MASK |
511
MFC_STATE1_RELOCATE_MASK |
512
MFC_STATE1_BUS_TLBIE_MASK));
515
static inline void save_spu_npc(struct spu_state *csa, struct spu *spu)
517
struct spu_problem __iomem *prob = spu->problem;
520
* Save SPU_NPC in the CSA.
522
csa->prob.spu_npc_RW = in_be32(&prob->spu_npc_RW);
525
static inline void save_spu_privcntl(struct spu_state *csa, struct spu *spu)
527
struct spu_priv2 __iomem *priv2 = spu->priv2;
530
* Save SPU_PrivCntl in the CSA.
532
csa->priv2.spu_privcntl_RW = in_be64(&priv2->spu_privcntl_RW);
535
static inline void reset_spu_privcntl(struct spu_state *csa, struct spu *spu)
537
struct spu_priv2 __iomem *priv2 = spu->priv2;
541
* Write SPU_PrivCntl[S,Le,A] fields reset to 0.
543
out_be64(&priv2->spu_privcntl_RW, 0UL);
547
static inline void save_spu_lslr(struct spu_state *csa, struct spu *spu)
549
struct spu_priv2 __iomem *priv2 = spu->priv2;
552
* Save SPU_LSLR in the CSA.
554
csa->priv2.spu_lslr_RW = in_be64(&priv2->spu_lslr_RW);
557
static inline void reset_spu_lslr(struct spu_state *csa, struct spu *spu)
559
struct spu_priv2 __iomem *priv2 = spu->priv2;
565
out_be64(&priv2->spu_lslr_RW, LS_ADDR_MASK);
569
static inline void save_spu_cfg(struct spu_state *csa, struct spu *spu)
571
struct spu_priv2 __iomem *priv2 = spu->priv2;
574
* Save SPU_Cfg in the CSA.
576
csa->priv2.spu_cfg_RW = in_be64(&priv2->spu_cfg_RW);
579
static inline void save_pm_trace(struct spu_state *csa, struct spu *spu)
582
* Save PM_Trace_Tag_Wait_Mask in the CSA.
583
* Not performed by this implementation.
587
static inline void save_mfc_rag(struct spu_state *csa, struct spu *spu)
590
* Save RA_GROUP_ID register and the
591
* RA_ENABLE reigster in the CSA.
593
csa->priv1.resource_allocation_groupID_RW =
594
spu_resource_allocation_groupID_get(spu);
595
csa->priv1.resource_allocation_enable_RW =
596
spu_resource_allocation_enable_get(spu);
599
static inline void save_ppu_mb_stat(struct spu_state *csa, struct spu *spu)
601
struct spu_problem __iomem *prob = spu->problem;
604
* Save MB_Stat register in the CSA.
606
csa->prob.mb_stat_R = in_be32(&prob->mb_stat_R);
609
static inline void save_ppu_mb(struct spu_state *csa, struct spu *spu)
611
struct spu_problem __iomem *prob = spu->problem;
614
* Save the PPU_MB register in the CSA.
616
csa->prob.pu_mb_R = in_be32(&prob->pu_mb_R);
619
static inline void save_ppuint_mb(struct spu_state *csa, struct spu *spu)
621
struct spu_priv2 __iomem *priv2 = spu->priv2;
624
* Save the PPUINT_MB register in the CSA.
626
csa->priv2.puint_mb_R = in_be64(&priv2->puint_mb_R);
629
static inline void save_ch_part1(struct spu_state *csa, struct spu *spu)
631
struct spu_priv2 __iomem *priv2 = spu->priv2;
632
u64 idx, ch_indices[] = { 0UL, 3UL, 4UL, 24UL, 25UL, 27UL };
638
/* Save CH 1, without channel count */
639
out_be64(&priv2->spu_chnlcntptr_RW, 1);
640
csa->spu_chnldata_RW[1] = in_be64(&priv2->spu_chnldata_RW);
642
/* Save the following CH: [0,3,4,24,25,27] */
643
for (i = 0; i < ARRAY_SIZE(ch_indices); i++) {
645
out_be64(&priv2->spu_chnlcntptr_RW, idx);
647
csa->spu_chnldata_RW[idx] = in_be64(&priv2->spu_chnldata_RW);
648
csa->spu_chnlcnt_RW[idx] = in_be64(&priv2->spu_chnlcnt_RW);
649
out_be64(&priv2->spu_chnldata_RW, 0UL);
650
out_be64(&priv2->spu_chnlcnt_RW, 0UL);
655
static inline void save_spu_mb(struct spu_state *csa, struct spu *spu)
657
struct spu_priv2 __iomem *priv2 = spu->priv2;
661
* Save SPU Read Mailbox Channel.
663
out_be64(&priv2->spu_chnlcntptr_RW, 29UL);
665
csa->spu_chnlcnt_RW[29] = in_be64(&priv2->spu_chnlcnt_RW);
666
for (i = 0; i < 4; i++) {
667
csa->spu_mailbox_data[i] = in_be64(&priv2->spu_chnldata_RW);
669
out_be64(&priv2->spu_chnlcnt_RW, 0UL);
673
static inline void save_mfc_cmd(struct spu_state *csa, struct spu *spu)
675
struct spu_priv2 __iomem *priv2 = spu->priv2;
678
* Save MFC_CMD Channel.
680
out_be64(&priv2->spu_chnlcntptr_RW, 21UL);
682
csa->spu_chnlcnt_RW[21] = in_be64(&priv2->spu_chnlcnt_RW);
686
static inline void reset_ch(struct spu_state *csa, struct spu *spu)
688
struct spu_priv2 __iomem *priv2 = spu->priv2;
689
u64 ch_indices[4] = { 21UL, 23UL, 28UL, 30UL };
690
u64 ch_counts[4] = { 16UL, 1UL, 1UL, 1UL };
695
* Reset the following CH: [21, 23, 28, 30]
697
for (i = 0; i < 4; i++) {
699
out_be64(&priv2->spu_chnlcntptr_RW, idx);
701
out_be64(&priv2->spu_chnlcnt_RW, ch_counts[i]);
706
static inline void resume_mfc_queue(struct spu_state *csa, struct spu *spu)
708
struct spu_priv2 __iomem *priv2 = spu->priv2;
712
* Write MFC_CNTL[Sc]=0 (resume queue processing).
714
out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESUME_DMA_QUEUE);
717
static inline void setup_mfc_slbs(struct spu_state *csa, struct spu *spu,
718
unsigned int *code, int code_size)
722
* If MFC_SR1[R]=1, write 0 to SLB_Invalidate_All
723
* register, then initialize SLB_VSID and SLB_ESID
724
* to provide access to SPU context save code and
727
* This implementation places both the context
728
* switch code and LSCSA in kernel address space.
730
* Further this implementation assumes that the
731
* MFC_SR1[R]=1 (in other words, assume that
732
* translation is desired by OS environment).
734
spu_invalidate_slbs(spu);
735
spu_setup_kernel_slbs(spu, csa->lscsa, code, code_size);
738
static inline void set_switch_active(struct spu_state *csa, struct spu *spu)
742
* Change the software context switch pending flag
743
* to context switch active. This implementation does
744
* not uses a switch active flag.
746
* Now that we have saved the mfc in the csa, we can add in the
747
* restart command if an exception occurred.
749
if (test_bit(SPU_CONTEXT_FAULT_PENDING, &spu->flags))
750
csa->priv2.mfc_control_RW |= MFC_CNTL_RESTART_DMA_COMMAND;
751
clear_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags);
755
static inline void enable_interrupts(struct spu_state *csa, struct spu *spu)
757
unsigned long class1_mask = CLASS1_ENABLE_SEGMENT_FAULT_INTR |
758
CLASS1_ENABLE_STORAGE_FAULT_INTR;
762
* Reset and then enable interrupts, as
765
* This implementation enables only class1
766
* (translation) interrupts.
768
spin_lock_irq(&spu->register_lock);
769
spu_int_stat_clear(spu, 0, CLASS0_INTR_MASK);
770
spu_int_stat_clear(spu, 1, CLASS1_INTR_MASK);
771
spu_int_stat_clear(spu, 2, CLASS2_INTR_MASK);
772
spu_int_mask_set(spu, 0, 0ul);
773
spu_int_mask_set(spu, 1, class1_mask);
774
spu_int_mask_set(spu, 2, 0ul);
775
spin_unlock_irq(&spu->register_lock);
778
static inline int send_mfc_dma(struct spu *spu, unsigned long ea,
779
unsigned int ls_offset, unsigned int size,
780
unsigned int tag, unsigned int rclass,
783
struct spu_problem __iomem *prob = spu->problem;
784
union mfc_tag_size_class_cmd command;
785
unsigned int transfer_size;
786
volatile unsigned int status = 0x0;
790
(size > MFC_MAX_DMA_SIZE) ? MFC_MAX_DMA_SIZE : size;
791
command.u.mfc_size = transfer_size;
792
command.u.mfc_tag = tag;
793
command.u.mfc_rclassid = rclass;
794
command.u.mfc_cmd = cmd;
796
out_be32(&prob->mfc_lsa_W, ls_offset);
797
out_be64(&prob->mfc_ea_W, ea);
798
out_be64(&prob->mfc_union_W.all64, command.all64);
800
in_be32(&prob->mfc_union_W.by32.mfc_class_cmd32);
801
if (unlikely(status & 0x2)) {
804
} while (status & 0x3);
805
size -= transfer_size;
807
ls_offset += transfer_size;
812
static inline void save_ls_16kb(struct spu_state *csa, struct spu *spu)
814
unsigned long addr = (unsigned long)&csa->lscsa->ls[0];
815
unsigned int ls_offset = 0x0;
816
unsigned int size = 16384;
817
unsigned int tag = 0;
818
unsigned int rclass = 0;
819
unsigned int cmd = MFC_PUT_CMD;
822
* Issue a DMA command to copy the first 16K bytes
823
* of local storage to the CSA.
825
send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd);
828
static inline void set_spu_npc(struct spu_state *csa, struct spu *spu)
830
struct spu_problem __iomem *prob = spu->problem;
834
* Write SPU_NPC[IE]=0 and SPU_NPC[LSA] to entry
835
* point address of context save code in local
838
* This implementation uses SPU-side save/restore
839
* programs with entry points at LSA of 0.
841
out_be32(&prob->spu_npc_RW, 0);
845
static inline void set_signot1(struct spu_state *csa, struct spu *spu)
847
struct spu_problem __iomem *prob = spu->problem;
855
* Write SPU_Sig_Notify_1 register with upper 32-bits
856
* of the CSA.LSCSA effective address.
858
addr64.ull = (u64) csa->lscsa;
859
out_be32(&prob->signal_notify1, addr64.ui[0]);
863
static inline void set_signot2(struct spu_state *csa, struct spu *spu)
865
struct spu_problem __iomem *prob = spu->problem;
873
* Write SPU_Sig_Notify_2 register with lower 32-bits
874
* of the CSA.LSCSA effective address.
876
addr64.ull = (u64) csa->lscsa;
877
out_be32(&prob->signal_notify2, addr64.ui[1]);
881
static inline void send_save_code(struct spu_state *csa, struct spu *spu)
883
unsigned long addr = (unsigned long)&spu_save_code[0];
884
unsigned int ls_offset = 0x0;
885
unsigned int size = sizeof(spu_save_code);
886
unsigned int tag = 0;
887
unsigned int rclass = 0;
888
unsigned int cmd = MFC_GETFS_CMD;
891
* Issue a DMA command to copy context save code
892
* to local storage and start SPU.
894
send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd);
897
static inline void set_ppu_querymask(struct spu_state *csa, struct spu *spu)
899
struct spu_problem __iomem *prob = spu->problem;
903
* Write PPU_QueryMask=1 (enable Tag Group 0)
904
* and issue eieio instruction.
906
out_be32(&prob->dma_querymask_RW, MFC_TAGID_TO_TAGMASK(0));
910
static inline void wait_tag_complete(struct spu_state *csa, struct spu *spu)
912
struct spu_problem __iomem *prob = spu->problem;
913
u32 mask = MFC_TAGID_TO_TAGMASK(0);
920
* Poll PPU_TagStatus[gn] until 01 (Tag group 0 complete)
921
* or write PPU_QueryType[TS]=01 and wait for Tag Group
922
* Complete Interrupt. Write INT_Stat_Class0 or
923
* INT_Stat_Class2 with value of 'handled'.
925
POLL_WHILE_FALSE(in_be32(&prob->dma_tagstatus_R) & mask);
927
local_irq_save(flags);
928
spu_int_stat_clear(spu, 0, CLASS0_INTR_MASK);
929
spu_int_stat_clear(spu, 2, CLASS2_INTR_MASK);
930
local_irq_restore(flags);
933
static inline void wait_spu_stopped(struct spu_state *csa, struct spu *spu)
935
struct spu_problem __iomem *prob = spu->problem;
940
* Poll until SPU_Status[R]=0 or wait for SPU Class 0
941
* or SPU Class 2 interrupt. Write INT_Stat_class0
942
* or INT_Stat_class2 with value of handled.
944
POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING);
946
local_irq_save(flags);
947
spu_int_stat_clear(spu, 0, CLASS0_INTR_MASK);
948
spu_int_stat_clear(spu, 2, CLASS2_INTR_MASK);
949
local_irq_restore(flags);
952
static inline int check_save_status(struct spu_state *csa, struct spu *spu)
954
struct spu_problem __iomem *prob = spu->problem;
958
* If SPU_Status[P]=1 and SPU_Status[SC] = "success",
959
* context save succeeded, otherwise context save
962
complete = ((SPU_SAVE_COMPLETE << SPU_STOP_STATUS_SHIFT) |
963
SPU_STATUS_STOPPED_BY_STOP);
964
return (in_be32(&prob->spu_status_R) != complete) ? 1 : 0;
967
static inline void terminate_spu_app(struct spu_state *csa, struct spu *spu)
970
* If required, notify the "using application" that
971
* the SPU task has been terminated. TBD.
975
static inline void suspend_mfc_and_halt_decr(struct spu_state *csa,
978
struct spu_priv2 __iomem *priv2 = spu->priv2;
981
* Write MFC_Cntl[Dh,Sc,Sm]='1','1','0' to suspend
982
* the queue and halt the decrementer.
984
out_be64(&priv2->mfc_control_RW, MFC_CNTL_SUSPEND_DMA_QUEUE |
985
MFC_CNTL_DECREMENTER_HALTED);
989
static inline void wait_suspend_mfc_complete(struct spu_state *csa,
992
struct spu_priv2 __iomem *priv2 = spu->priv2;
996
* Poll MFC_CNTL[Ss] until 11 is returned.
998
POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) &
999
MFC_CNTL_SUSPEND_DMA_STATUS_MASK) ==
1000
MFC_CNTL_SUSPEND_COMPLETE);
1003
static inline int suspend_spe(struct spu_state *csa, struct spu *spu)
1005
struct spu_problem __iomem *prob = spu->problem;
1008
* If SPU_Status[R]=1, stop SPU execution
1009
* and wait for stop to complete.
1011
* Returns 1 if SPU_Status[R]=1 on entry.
1014
if (in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING) {
1015
if (in_be32(&prob->spu_status_R) &
1016
SPU_STATUS_ISOLATED_EXIT_STATUS) {
1017
POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1018
SPU_STATUS_RUNNING);
1020
if ((in_be32(&prob->spu_status_R) &
1021
SPU_STATUS_ISOLATED_LOAD_STATUS)
1022
|| (in_be32(&prob->spu_status_R) &
1023
SPU_STATUS_ISOLATED_STATE)) {
1024
out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
1026
POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1027
SPU_STATUS_RUNNING);
1028
out_be32(&prob->spu_runcntl_RW, 0x2);
1030
POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1031
SPU_STATUS_RUNNING);
1033
if (in_be32(&prob->spu_status_R) &
1034
SPU_STATUS_WAITING_FOR_CHANNEL) {
1035
out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
1037
POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1038
SPU_STATUS_RUNNING);
1045
static inline void clear_spu_status(struct spu_state *csa, struct spu *spu)
1047
struct spu_problem __iomem *prob = spu->problem;
1049
/* Restore, Step 10:
1050
* If SPU_Status[R]=0 and SPU_Status[E,L,IS]=1,
1051
* release SPU from isolate state.
1053
if (!(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING)) {
1054
if (in_be32(&prob->spu_status_R) &
1055
SPU_STATUS_ISOLATED_EXIT_STATUS) {
1056
spu_mfc_sr1_set(spu,
1057
MFC_STATE1_MASTER_RUN_CONTROL_MASK);
1059
out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE);
1061
POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1062
SPU_STATUS_RUNNING);
1064
if ((in_be32(&prob->spu_status_R) &
1065
SPU_STATUS_ISOLATED_LOAD_STATUS)
1066
|| (in_be32(&prob->spu_status_R) &
1067
SPU_STATUS_ISOLATED_STATE)) {
1068
spu_mfc_sr1_set(spu,
1069
MFC_STATE1_MASTER_RUN_CONTROL_MASK);
1071
out_be32(&prob->spu_runcntl_RW, 0x2);
1073
POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1074
SPU_STATUS_RUNNING);
1079
static inline void reset_ch_part1(struct spu_state *csa, struct spu *spu)
1081
struct spu_priv2 __iomem *priv2 = spu->priv2;
1082
u64 ch_indices[] = { 0UL, 3UL, 4UL, 24UL, 25UL, 27UL };
1086
/* Restore, Step 20:
1090
out_be64(&priv2->spu_chnlcntptr_RW, 1);
1091
out_be64(&priv2->spu_chnldata_RW, 0UL);
1093
/* Reset the following CH: [0,3,4,24,25,27] */
1094
for (i = 0; i < ARRAY_SIZE(ch_indices); i++) {
1095
idx = ch_indices[i];
1096
out_be64(&priv2->spu_chnlcntptr_RW, idx);
1098
out_be64(&priv2->spu_chnldata_RW, 0UL);
1099
out_be64(&priv2->spu_chnlcnt_RW, 0UL);
1104
static inline void reset_ch_part2(struct spu_state *csa, struct spu *spu)
1106
struct spu_priv2 __iomem *priv2 = spu->priv2;
1107
u64 ch_indices[5] = { 21UL, 23UL, 28UL, 29UL, 30UL };
1108
u64 ch_counts[5] = { 16UL, 1UL, 1UL, 0UL, 1UL };
1112
/* Restore, Step 21:
1113
* Reset the following CH: [21, 23, 28, 29, 30]
1115
for (i = 0; i < 5; i++) {
1116
idx = ch_indices[i];
1117
out_be64(&priv2->spu_chnlcntptr_RW, idx);
1119
out_be64(&priv2->spu_chnlcnt_RW, ch_counts[i]);
1124
static inline void setup_spu_status_part1(struct spu_state *csa,
1127
u32 status_P = SPU_STATUS_STOPPED_BY_STOP;
1128
u32 status_I = SPU_STATUS_INVALID_INSTR;
1129
u32 status_H = SPU_STATUS_STOPPED_BY_HALT;
1130
u32 status_S = SPU_STATUS_SINGLE_STEP;
1131
u32 status_S_I = SPU_STATUS_SINGLE_STEP | SPU_STATUS_INVALID_INSTR;
1132
u32 status_S_P = SPU_STATUS_SINGLE_STEP | SPU_STATUS_STOPPED_BY_STOP;
1133
u32 status_P_H = SPU_STATUS_STOPPED_BY_HALT |SPU_STATUS_STOPPED_BY_STOP;
1134
u32 status_P_I = SPU_STATUS_STOPPED_BY_STOP |SPU_STATUS_INVALID_INSTR;
1137
/* Restore, Step 27:
1138
* If the CSA.SPU_Status[I,S,H,P]=1 then add the correct
1139
* instruction sequence to the end of the SPU based restore
1140
* code (after the "context restored" stop and signal) to
1141
* restore the correct SPU status.
1143
* NOTE: Rather than modifying the SPU executable, we
1144
* instead add a new 'stopped_status' field to the
1145
* LSCSA. The SPU-side restore reads this field and
1146
* takes the appropriate action when exiting.
1150
(csa->prob.spu_status_R >> SPU_STOP_STATUS_SHIFT) & 0xFFFF;
1151
if ((csa->prob.spu_status_R & status_P_I) == status_P_I) {
1153
/* SPU_Status[P,I]=1 - Illegal Instruction followed
1154
* by Stop and Signal instruction, followed by 'br -4'.
1157
csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_P_I;
1158
csa->lscsa->stopped_status.slot[1] = status_code;
1160
} else if ((csa->prob.spu_status_R & status_P_H) == status_P_H) {
1162
/* SPU_Status[P,H]=1 - Halt Conditional, followed
1163
* by Stop and Signal instruction, followed by
1166
csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_P_H;
1167
csa->lscsa->stopped_status.slot[1] = status_code;
1169
} else if ((csa->prob.spu_status_R & status_S_P) == status_S_P) {
1171
/* SPU_Status[S,P]=1 - Stop and Signal instruction
1172
* followed by 'br -4'.
1174
csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_S_P;
1175
csa->lscsa->stopped_status.slot[1] = status_code;
1177
} else if ((csa->prob.spu_status_R & status_S_I) == status_S_I) {
1179
/* SPU_Status[S,I]=1 - Illegal instruction followed
1182
csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_S_I;
1183
csa->lscsa->stopped_status.slot[1] = status_code;
1185
} else if ((csa->prob.spu_status_R & status_P) == status_P) {
1187
/* SPU_Status[P]=1 - Stop and Signal instruction
1188
* followed by 'br -4'.
1190
csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_P;
1191
csa->lscsa->stopped_status.slot[1] = status_code;
1193
} else if ((csa->prob.spu_status_R & status_H) == status_H) {
1195
/* SPU_Status[H]=1 - Halt Conditional, followed
1198
csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_H;
1200
} else if ((csa->prob.spu_status_R & status_S) == status_S) {
1202
/* SPU_Status[S]=1 - Two nop instructions.
1204
csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_S;
1206
} else if ((csa->prob.spu_status_R & status_I) == status_I) {
1208
/* SPU_Status[I]=1 - Illegal instruction followed
1211
csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_I;
1216
static inline void setup_spu_status_part2(struct spu_state *csa,
1221
/* Restore, Step 28:
1222
* If the CSA.SPU_Status[I,S,H,P,R]=0 then
1223
* add a 'br *' instruction to the end of
1224
* the SPU based restore code.
1226
* NOTE: Rather than modifying the SPU executable, we
1227
* instead add a new 'stopped_status' field to the
1228
* LSCSA. The SPU-side restore reads this field and
1229
* takes the appropriate action when exiting.
1231
mask = SPU_STATUS_INVALID_INSTR |
1232
SPU_STATUS_SINGLE_STEP |
1233
SPU_STATUS_STOPPED_BY_HALT |
1234
SPU_STATUS_STOPPED_BY_STOP | SPU_STATUS_RUNNING;
1235
if (!(csa->prob.spu_status_R & mask)) {
1236
csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_R;
1240
static inline void restore_mfc_rag(struct spu_state *csa, struct spu *spu)
1242
/* Restore, Step 29:
1243
* Restore RA_GROUP_ID register and the
1244
* RA_ENABLE reigster from the CSA.
1246
spu_resource_allocation_groupID_set(spu,
1247
csa->priv1.resource_allocation_groupID_RW);
1248
spu_resource_allocation_enable_set(spu,
1249
csa->priv1.resource_allocation_enable_RW);
1252
static inline void send_restore_code(struct spu_state *csa, struct spu *spu)
1254
unsigned long addr = (unsigned long)&spu_restore_code[0];
1255
unsigned int ls_offset = 0x0;
1256
unsigned int size = sizeof(spu_restore_code);
1257
unsigned int tag = 0;
1258
unsigned int rclass = 0;
1259
unsigned int cmd = MFC_GETFS_CMD;
1261
/* Restore, Step 37:
1262
* Issue MFC DMA command to copy context
1263
* restore code to local storage.
1265
send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd);
1268
static inline void setup_decr(struct spu_state *csa, struct spu *spu)
1270
/* Restore, Step 34:
1271
* If CSA.MFC_CNTL[Ds]=1 (decrementer was
1272
* running) then adjust decrementer, set
1273
* decrementer running status in LSCSA,
1274
* and set decrementer "wrapped" status
1277
if (csa->priv2.mfc_control_RW & MFC_CNTL_DECREMENTER_RUNNING) {
1278
cycles_t resume_time = get_cycles();
1279
cycles_t delta_time = resume_time - csa->suspend_time;
1281
csa->lscsa->decr_status.slot[0] = SPU_DECR_STATUS_RUNNING;
1282
if (csa->lscsa->decr.slot[0] < delta_time) {
1283
csa->lscsa->decr_status.slot[0] |=
1284
SPU_DECR_STATUS_WRAPPED;
1287
csa->lscsa->decr.slot[0] -= delta_time;
1289
csa->lscsa->decr_status.slot[0] = 0;
1293
static inline void setup_ppu_mb(struct spu_state *csa, struct spu *spu)
1295
/* Restore, Step 35:
1296
* Copy the CSA.PU_MB data into the LSCSA.
1298
csa->lscsa->ppu_mb.slot[0] = csa->prob.pu_mb_R;
1301
static inline void setup_ppuint_mb(struct spu_state *csa, struct spu *spu)
1303
/* Restore, Step 36:
1304
* Copy the CSA.PUINT_MB data into the LSCSA.
1306
csa->lscsa->ppuint_mb.slot[0] = csa->priv2.puint_mb_R;
1309
static inline int check_restore_status(struct spu_state *csa, struct spu *spu)
1311
struct spu_problem __iomem *prob = spu->problem;
1314
/* Restore, Step 40:
1315
* If SPU_Status[P]=1 and SPU_Status[SC] = "success",
1316
* context restore succeeded, otherwise context restore
1319
complete = ((SPU_RESTORE_COMPLETE << SPU_STOP_STATUS_SHIFT) |
1320
SPU_STATUS_STOPPED_BY_STOP);
1321
return (in_be32(&prob->spu_status_R) != complete) ? 1 : 0;
1324
static inline void restore_spu_privcntl(struct spu_state *csa, struct spu *spu)
1326
struct spu_priv2 __iomem *priv2 = spu->priv2;
1328
/* Restore, Step 41:
1329
* Restore SPU_PrivCntl from the CSA.
1331
out_be64(&priv2->spu_privcntl_RW, csa->priv2.spu_privcntl_RW);
1335
static inline void restore_status_part1(struct spu_state *csa, struct spu *spu)
1337
struct spu_problem __iomem *prob = spu->problem;
1340
/* Restore, Step 42:
1341
* If any CSA.SPU_Status[I,S,H,P]=1, then
1342
* restore the error or single step state.
1344
mask = SPU_STATUS_INVALID_INSTR |
1345
SPU_STATUS_SINGLE_STEP |
1346
SPU_STATUS_STOPPED_BY_HALT | SPU_STATUS_STOPPED_BY_STOP;
1347
if (csa->prob.spu_status_R & mask) {
1348
out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE);
1350
POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1351
SPU_STATUS_RUNNING);
1355
static inline void restore_status_part2(struct spu_state *csa, struct spu *spu)
1357
struct spu_problem __iomem *prob = spu->problem;
1360
/* Restore, Step 43:
1361
* If all CSA.SPU_Status[I,S,H,P,R]=0 then write
1362
* SPU_RunCntl[R0R1]='01', wait for SPU_Status[R]=1,
1363
* then write '00' to SPU_RunCntl[R0R1] and wait
1364
* for SPU_Status[R]=0.
1366
mask = SPU_STATUS_INVALID_INSTR |
1367
SPU_STATUS_SINGLE_STEP |
1368
SPU_STATUS_STOPPED_BY_HALT |
1369
SPU_STATUS_STOPPED_BY_STOP | SPU_STATUS_RUNNING;
1370
if (!(csa->prob.spu_status_R & mask)) {
1371
out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE);
1373
POLL_WHILE_FALSE(in_be32(&prob->spu_status_R) &
1374
SPU_STATUS_RUNNING);
1375
out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
1377
POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1378
SPU_STATUS_RUNNING);
1382
static inline void restore_ls_16kb(struct spu_state *csa, struct spu *spu)
1384
unsigned long addr = (unsigned long)&csa->lscsa->ls[0];
1385
unsigned int ls_offset = 0x0;
1386
unsigned int size = 16384;
1387
unsigned int tag = 0;
1388
unsigned int rclass = 0;
1389
unsigned int cmd = MFC_GET_CMD;
1391
/* Restore, Step 44:
1392
* Issue a DMA command to restore the first
1393
* 16kb of local storage from CSA.
1395
send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd);
1398
static inline void suspend_mfc(struct spu_state *csa, struct spu *spu)
1400
struct spu_priv2 __iomem *priv2 = spu->priv2;
1402
/* Restore, Step 47.
1403
* Write MFC_Cntl[Sc,Sm]='1','0' to suspend
1406
out_be64(&priv2->mfc_control_RW, MFC_CNTL_SUSPEND_DMA_QUEUE);
1410
static inline void clear_interrupts(struct spu_state *csa, struct spu *spu)
1412
/* Restore, Step 49:
1413
* Write INT_MASK_class0 with value of 0.
1414
* Write INT_MASK_class1 with value of 0.
1415
* Write INT_MASK_class2 with value of 0.
1416
* Write INT_STAT_class0 with value of -1.
1417
* Write INT_STAT_class1 with value of -1.
1418
* Write INT_STAT_class2 with value of -1.
1420
spin_lock_irq(&spu->register_lock);
1421
spu_int_mask_set(spu, 0, 0ul);
1422
spu_int_mask_set(spu, 1, 0ul);
1423
spu_int_mask_set(spu, 2, 0ul);
1424
spu_int_stat_clear(spu, 0, CLASS0_INTR_MASK);
1425
spu_int_stat_clear(spu, 1, CLASS1_INTR_MASK);
1426
spu_int_stat_clear(spu, 2, CLASS2_INTR_MASK);
1427
spin_unlock_irq(&spu->register_lock);
1430
static inline void restore_mfc_queues(struct spu_state *csa, struct spu *spu)
1432
struct spu_priv2 __iomem *priv2 = spu->priv2;
1435
/* Restore, Step 50:
1436
* If MFC_Cntl[Se]!=0 then restore
1437
* MFC command queues.
1439
if ((csa->priv2.mfc_control_RW & MFC_CNTL_DMA_QUEUES_EMPTY_MASK) == 0) {
1440
for (i = 0; i < 8; i++) {
1441
out_be64(&priv2->puq[i].mfc_cq_data0_RW,
1442
csa->priv2.puq[i].mfc_cq_data0_RW);
1443
out_be64(&priv2->puq[i].mfc_cq_data1_RW,
1444
csa->priv2.puq[i].mfc_cq_data1_RW);
1445
out_be64(&priv2->puq[i].mfc_cq_data2_RW,
1446
csa->priv2.puq[i].mfc_cq_data2_RW);
1447
out_be64(&priv2->puq[i].mfc_cq_data3_RW,
1448
csa->priv2.puq[i].mfc_cq_data3_RW);
1450
for (i = 0; i < 16; i++) {
1451
out_be64(&priv2->spuq[i].mfc_cq_data0_RW,
1452
csa->priv2.spuq[i].mfc_cq_data0_RW);
1453
out_be64(&priv2->spuq[i].mfc_cq_data1_RW,
1454
csa->priv2.spuq[i].mfc_cq_data1_RW);
1455
out_be64(&priv2->spuq[i].mfc_cq_data2_RW,
1456
csa->priv2.spuq[i].mfc_cq_data2_RW);
1457
out_be64(&priv2->spuq[i].mfc_cq_data3_RW,
1458
csa->priv2.spuq[i].mfc_cq_data3_RW);
1464
static inline void restore_ppu_querymask(struct spu_state *csa, struct spu *spu)
1466
struct spu_problem __iomem *prob = spu->problem;
1468
/* Restore, Step 51:
1469
* Restore the PPU_QueryMask register from CSA.
1471
out_be32(&prob->dma_querymask_RW, csa->prob.dma_querymask_RW);
1475
static inline void restore_ppu_querytype(struct spu_state *csa, struct spu *spu)
1477
struct spu_problem __iomem *prob = spu->problem;
1479
/* Restore, Step 52:
1480
* Restore the PPU_QueryType register from CSA.
1482
out_be32(&prob->dma_querytype_RW, csa->prob.dma_querytype_RW);
1486
static inline void restore_mfc_csr_tsq(struct spu_state *csa, struct spu *spu)
1488
struct spu_priv2 __iomem *priv2 = spu->priv2;
1490
/* Restore, Step 53:
1491
* Restore the MFC_CSR_TSQ register from CSA.
1493
out_be64(&priv2->spu_tag_status_query_RW,
1494
csa->priv2.spu_tag_status_query_RW);
1498
static inline void restore_mfc_csr_cmd(struct spu_state *csa, struct spu *spu)
1500
struct spu_priv2 __iomem *priv2 = spu->priv2;
1502
/* Restore, Step 54:
1503
* Restore the MFC_CSR_CMD1 and MFC_CSR_CMD2
1504
* registers from CSA.
1506
out_be64(&priv2->spu_cmd_buf1_RW, csa->priv2.spu_cmd_buf1_RW);
1507
out_be64(&priv2->spu_cmd_buf2_RW, csa->priv2.spu_cmd_buf2_RW);
1511
static inline void restore_mfc_csr_ato(struct spu_state *csa, struct spu *spu)
1513
struct spu_priv2 __iomem *priv2 = spu->priv2;
1515
/* Restore, Step 55:
1516
* Restore the MFC_CSR_ATO register from CSA.
1518
out_be64(&priv2->spu_atomic_status_RW, csa->priv2.spu_atomic_status_RW);
1521
static inline void restore_mfc_tclass_id(struct spu_state *csa, struct spu *spu)
1523
/* Restore, Step 56:
1524
* Restore the MFC_TCLASS_ID register from CSA.
1526
spu_mfc_tclass_id_set(spu, csa->priv1.mfc_tclass_id_RW);
1530
static inline void set_llr_event(struct spu_state *csa, struct spu *spu)
1532
u64 ch0_cnt, ch0_data;
1535
/* Restore, Step 57:
1536
* Set the Lock Line Reservation Lost Event by:
1537
* 1. OR CSA.SPU_Event_Status with bit 21 (Lr) set to 1.
1538
* 2. If CSA.SPU_Channel_0_Count=0 and
1539
* CSA.SPU_Wr_Event_Mask[Lr]=1 and
1540
* CSA.SPU_Event_Status[Lr]=0 then set
1541
* CSA.SPU_Event_Status_Count=1.
1543
ch0_cnt = csa->spu_chnlcnt_RW[0];
1544
ch0_data = csa->spu_chnldata_RW[0];
1545
ch1_data = csa->spu_chnldata_RW[1];
1546
csa->spu_chnldata_RW[0] |= MFC_LLR_LOST_EVENT;
1547
if ((ch0_cnt == 0) && !(ch0_data & MFC_LLR_LOST_EVENT) &&
1548
(ch1_data & MFC_LLR_LOST_EVENT)) {
1549
csa->spu_chnlcnt_RW[0] = 1;
1553
static inline void restore_decr_wrapped(struct spu_state *csa, struct spu *spu)
1555
/* Restore, Step 58:
1556
* If the status of the CSA software decrementer
1557
* "wrapped" flag is set, OR in a '1' to
1558
* CSA.SPU_Event_Status[Tm].
1560
if (!(csa->lscsa->decr_status.slot[0] & SPU_DECR_STATUS_WRAPPED))
1563
if ((csa->spu_chnlcnt_RW[0] == 0) &&
1564
(csa->spu_chnldata_RW[1] & 0x20) &&
1565
!(csa->spu_chnldata_RW[0] & 0x20))
1566
csa->spu_chnlcnt_RW[0] = 1;
1568
csa->spu_chnldata_RW[0] |= 0x20;
1571
static inline void restore_ch_part1(struct spu_state *csa, struct spu *spu)
1573
struct spu_priv2 __iomem *priv2 = spu->priv2;
1574
u64 idx, ch_indices[] = { 0UL, 3UL, 4UL, 24UL, 25UL, 27UL };
1577
/* Restore, Step 59:
1578
* Restore the following CH: [0,3,4,24,25,27]
1580
for (i = 0; i < ARRAY_SIZE(ch_indices); i++) {
1581
idx = ch_indices[i];
1582
out_be64(&priv2->spu_chnlcntptr_RW, idx);
1584
out_be64(&priv2->spu_chnldata_RW, csa->spu_chnldata_RW[idx]);
1585
out_be64(&priv2->spu_chnlcnt_RW, csa->spu_chnlcnt_RW[idx]);
1590
static inline void restore_ch_part2(struct spu_state *csa, struct spu *spu)
1592
struct spu_priv2 __iomem *priv2 = spu->priv2;
1593
u64 ch_indices[3] = { 9UL, 21UL, 23UL };
1594
u64 ch_counts[3] = { 1UL, 16UL, 1UL };
1598
/* Restore, Step 60:
1599
* Restore the following CH: [9,21,23].
1602
ch_counts[1] = csa->spu_chnlcnt_RW[21];
1604
for (i = 0; i < 3; i++) {
1605
idx = ch_indices[i];
1606
out_be64(&priv2->spu_chnlcntptr_RW, idx);
1608
out_be64(&priv2->spu_chnlcnt_RW, ch_counts[i]);
1613
static inline void restore_spu_lslr(struct spu_state *csa, struct spu *spu)
1615
struct spu_priv2 __iomem *priv2 = spu->priv2;
1617
/* Restore, Step 61:
1618
* Restore the SPU_LSLR register from CSA.
1620
out_be64(&priv2->spu_lslr_RW, csa->priv2.spu_lslr_RW);
1624
static inline void restore_spu_cfg(struct spu_state *csa, struct spu *spu)
1626
struct spu_priv2 __iomem *priv2 = spu->priv2;
1628
/* Restore, Step 62:
1629
* Restore the SPU_Cfg register from CSA.
1631
out_be64(&priv2->spu_cfg_RW, csa->priv2.spu_cfg_RW);
1635
static inline void restore_pm_trace(struct spu_state *csa, struct spu *spu)
1637
/* Restore, Step 63:
1638
* Restore PM_Trace_Tag_Wait_Mask from CSA.
1639
* Not performed by this implementation.
1643
static inline void restore_spu_npc(struct spu_state *csa, struct spu *spu)
1645
struct spu_problem __iomem *prob = spu->problem;
1647
/* Restore, Step 64:
1648
* Restore SPU_NPC from CSA.
1650
out_be32(&prob->spu_npc_RW, csa->prob.spu_npc_RW);
1654
static inline void restore_spu_mb(struct spu_state *csa, struct spu *spu)
1656
struct spu_priv2 __iomem *priv2 = spu->priv2;
1659
/* Restore, Step 65:
1660
* Restore MFC_RdSPU_MB from CSA.
1662
out_be64(&priv2->spu_chnlcntptr_RW, 29UL);
1664
out_be64(&priv2->spu_chnlcnt_RW, csa->spu_chnlcnt_RW[29]);
1665
for (i = 0; i < 4; i++) {
1666
out_be64(&priv2->spu_chnldata_RW, csa->spu_mailbox_data[i]);
1671
static inline void check_ppu_mb_stat(struct spu_state *csa, struct spu *spu)
1673
struct spu_problem __iomem *prob = spu->problem;
1676
/* Restore, Step 66:
1677
* If CSA.MB_Stat[P]=0 (mailbox empty) then
1678
* read from the PPU_MB register.
1680
if ((csa->prob.mb_stat_R & 0xFF) == 0) {
1681
dummy = in_be32(&prob->pu_mb_R);
1686
static inline void check_ppuint_mb_stat(struct spu_state *csa, struct spu *spu)
1688
struct spu_priv2 __iomem *priv2 = spu->priv2;
1691
/* Restore, Step 66:
1692
* If CSA.MB_Stat[I]=0 (mailbox empty) then
1693
* read from the PPUINT_MB register.
1695
if ((csa->prob.mb_stat_R & 0xFF0000) == 0) {
1696
dummy = in_be64(&priv2->puint_mb_R);
1698
spu_int_stat_clear(spu, 2, CLASS2_ENABLE_MAILBOX_INTR);
1703
static inline void restore_mfc_sr1(struct spu_state *csa, struct spu *spu)
1705
/* Restore, Step 69:
1706
* Restore the MFC_SR1 register from CSA.
1708
spu_mfc_sr1_set(spu, csa->priv1.mfc_sr1_RW);
1712
static inline void set_int_route(struct spu_state *csa, struct spu *spu)
1714
struct spu_context *ctx = spu->ctx;
1716
spu_cpu_affinity_set(spu, ctx->last_ran);
1719
static inline void restore_other_spu_access(struct spu_state *csa,
1722
/* Restore, Step 70:
1723
* Restore other SPU mappings to this SPU. TBD.
1727
static inline void restore_spu_runcntl(struct spu_state *csa, struct spu *spu)
1729
struct spu_problem __iomem *prob = spu->problem;
1731
/* Restore, Step 71:
1732
* If CSA.SPU_Status[R]=1 then write
1733
* SPU_RunCntl[R0R1]='01'.
1735
if (csa->prob.spu_status_R & SPU_STATUS_RUNNING) {
1736
out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE);
1741
static inline void restore_mfc_cntl(struct spu_state *csa, struct spu *spu)
1743
struct spu_priv2 __iomem *priv2 = spu->priv2;
1745
/* Restore, Step 72:
1746
* Restore the MFC_CNTL register for the CSA.
1748
out_be64(&priv2->mfc_control_RW, csa->priv2.mfc_control_RW);
1752
* The queue is put back into the same state that was evident prior to
1753
* the context switch. The suspend flag is added to the saved state in
1754
* the csa, if the operational state was suspending or suspended. In
1755
* this case, the code that suspended the mfc is responsible for
1756
* continuing it. Note that SPE faults do not change the operational
1761
static inline void enable_user_access(struct spu_state *csa, struct spu *spu)
1763
/* Restore, Step 73:
1764
* Enable user-space access (if provided) to this
1765
* SPU by mapping the virtual pages assigned to
1766
* the SPU memory-mapped I/O (MMIO) for problem
1771
static inline void reset_switch_active(struct spu_state *csa, struct spu *spu)
1773
/* Restore, Step 74:
1774
* Reset the "context switch active" flag.
1775
* Not performed by this implementation.
1779
static inline void reenable_interrupts(struct spu_state *csa, struct spu *spu)
1781
/* Restore, Step 75:
1782
* Re-enable SPU interrupts.
1784
spin_lock_irq(&spu->register_lock);
1785
spu_int_mask_set(spu, 0, csa->priv1.int_mask_class0_RW);
1786
spu_int_mask_set(spu, 1, csa->priv1.int_mask_class1_RW);
1787
spu_int_mask_set(spu, 2, csa->priv1.int_mask_class2_RW);
1788
spin_unlock_irq(&spu->register_lock);
1791
static int quiece_spu(struct spu_state *prev, struct spu *spu)
1794
* Combined steps 2-18 of SPU context save sequence, which
1795
* quiesce the SPU state (disable SPU execution, MFC command
1796
* queues, decrementer, SPU interrupts, etc.).
1798
* Returns 0 on success.
1799
* 2 if failed step 2.
1800
* 6 if failed step 6.
1803
if (check_spu_isolate(prev, spu)) { /* Step 2. */
1806
disable_interrupts(prev, spu); /* Step 3. */
1807
set_watchdog_timer(prev, spu); /* Step 4. */
1808
inhibit_user_access(prev, spu); /* Step 5. */
1809
if (check_spu_isolate(prev, spu)) { /* Step 6. */
1812
set_switch_pending(prev, spu); /* Step 7. */
1813
save_mfc_cntl(prev, spu); /* Step 8. */
1814
save_spu_runcntl(prev, spu); /* Step 9. */
1815
save_mfc_sr1(prev, spu); /* Step 10. */
1816
save_spu_status(prev, spu); /* Step 11. */
1817
save_mfc_stopped_status(prev, spu); /* Step 12. */
1818
halt_mfc_decr(prev, spu); /* Step 13. */
1819
save_timebase(prev, spu); /* Step 14. */
1820
remove_other_spu_access(prev, spu); /* Step 15. */
1821
do_mfc_mssync(prev, spu); /* Step 16. */
1822
issue_mfc_tlbie(prev, spu); /* Step 17. */
1823
handle_pending_interrupts(prev, spu); /* Step 18. */
1828
static void save_csa(struct spu_state *prev, struct spu *spu)
1831
* Combine steps 19-44 of SPU context save sequence, which
1832
* save regions of the privileged & problem state areas.
1835
save_mfc_queues(prev, spu); /* Step 19. */
1836
save_ppu_querymask(prev, spu); /* Step 20. */
1837
save_ppu_querytype(prev, spu); /* Step 21. */
1838
save_ppu_tagstatus(prev, spu); /* NEW. */
1839
save_mfc_csr_tsq(prev, spu); /* Step 22. */
1840
save_mfc_csr_cmd(prev, spu); /* Step 23. */
1841
save_mfc_csr_ato(prev, spu); /* Step 24. */
1842
save_mfc_tclass_id(prev, spu); /* Step 25. */
1843
set_mfc_tclass_id(prev, spu); /* Step 26. */
1844
save_mfc_cmd(prev, spu); /* Step 26a - moved from 44. */
1845
purge_mfc_queue(prev, spu); /* Step 27. */
1846
wait_purge_complete(prev, spu); /* Step 28. */
1847
setup_mfc_sr1(prev, spu); /* Step 30. */
1848
save_spu_npc(prev, spu); /* Step 31. */
1849
save_spu_privcntl(prev, spu); /* Step 32. */
1850
reset_spu_privcntl(prev, spu); /* Step 33. */
1851
save_spu_lslr(prev, spu); /* Step 34. */
1852
reset_spu_lslr(prev, spu); /* Step 35. */
1853
save_spu_cfg(prev, spu); /* Step 36. */
1854
save_pm_trace(prev, spu); /* Step 37. */
1855
save_mfc_rag(prev, spu); /* Step 38. */
1856
save_ppu_mb_stat(prev, spu); /* Step 39. */
1857
save_ppu_mb(prev, spu); /* Step 40. */
1858
save_ppuint_mb(prev, spu); /* Step 41. */
1859
save_ch_part1(prev, spu); /* Step 42. */
1860
save_spu_mb(prev, spu); /* Step 43. */
1861
reset_ch(prev, spu); /* Step 45. */
1864
static void save_lscsa(struct spu_state *prev, struct spu *spu)
1867
* Perform steps 46-57 of SPU context save sequence,
1868
* which save regions of the local store and register
1872
resume_mfc_queue(prev, spu); /* Step 46. */
1874
setup_mfc_slbs(prev, spu, spu_save_code, sizeof(spu_save_code));
1875
set_switch_active(prev, spu); /* Step 48. */
1876
enable_interrupts(prev, spu); /* Step 49. */
1877
save_ls_16kb(prev, spu); /* Step 50. */
1878
set_spu_npc(prev, spu); /* Step 51. */
1879
set_signot1(prev, spu); /* Step 52. */
1880
set_signot2(prev, spu); /* Step 53. */
1881
send_save_code(prev, spu); /* Step 54. */
1882
set_ppu_querymask(prev, spu); /* Step 55. */
1883
wait_tag_complete(prev, spu); /* Step 56. */
1884
wait_spu_stopped(prev, spu); /* Step 57. */
1887
static void force_spu_isolate_exit(struct spu *spu)
1889
struct spu_problem __iomem *prob = spu->problem;
1890
struct spu_priv2 __iomem *priv2 = spu->priv2;
1892
/* Stop SPE execution and wait for completion. */
1893
out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
1895
POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING);
1897
/* Restart SPE master runcntl. */
1898
spu_mfc_sr1_set(spu, MFC_STATE1_MASTER_RUN_CONTROL_MASK);
1901
/* Initiate isolate exit request and wait for completion. */
1902
out_be64(&priv2->spu_privcntl_RW, 4LL);
1904
out_be32(&prob->spu_runcntl_RW, 2);
1906
POLL_WHILE_FALSE((in_be32(&prob->spu_status_R)
1907
& SPU_STATUS_STOPPED_BY_STOP));
1909
/* Reset load request to normal. */
1910
out_be64(&priv2->spu_privcntl_RW, SPU_PRIVCNT_LOAD_REQUEST_NORMAL);
1916
* Check SPU run-control state and force isolated
1917
* exit function as necessary.
1919
static void stop_spu_isolate(struct spu *spu)
1921
struct spu_problem __iomem *prob = spu->problem;
1923
if (in_be32(&prob->spu_status_R) & SPU_STATUS_ISOLATED_STATE) {
1924
/* The SPU is in isolated state; the only way
1925
* to get it out is to perform an isolated
1926
* exit (clean) operation.
1928
force_spu_isolate_exit(spu);
1932
static void harvest(struct spu_state *prev, struct spu *spu)
1935
* Perform steps 2-25 of SPU context restore sequence,
1936
* which resets an SPU either after a failed save, or
1937
* when using SPU for first time.
1940
disable_interrupts(prev, spu); /* Step 2. */
1941
inhibit_user_access(prev, spu); /* Step 3. */
1942
terminate_spu_app(prev, spu); /* Step 4. */
1943
set_switch_pending(prev, spu); /* Step 5. */
1944
stop_spu_isolate(spu); /* NEW. */
1945
remove_other_spu_access(prev, spu); /* Step 6. */
1946
suspend_mfc_and_halt_decr(prev, spu); /* Step 7. */
1947
wait_suspend_mfc_complete(prev, spu); /* Step 8. */
1948
if (!suspend_spe(prev, spu)) /* Step 9. */
1949
clear_spu_status(prev, spu); /* Step 10. */
1950
do_mfc_mssync(prev, spu); /* Step 11. */
1951
issue_mfc_tlbie(prev, spu); /* Step 12. */
1952
handle_pending_interrupts(prev, spu); /* Step 13. */
1953
purge_mfc_queue(prev, spu); /* Step 14. */
1954
wait_purge_complete(prev, spu); /* Step 15. */
1955
reset_spu_privcntl(prev, spu); /* Step 16. */
1956
reset_spu_lslr(prev, spu); /* Step 17. */
1957
setup_mfc_sr1(prev, spu); /* Step 18. */
1958
spu_invalidate_slbs(spu); /* Step 19. */
1959
reset_ch_part1(prev, spu); /* Step 20. */
1960
reset_ch_part2(prev, spu); /* Step 21. */
1961
enable_interrupts(prev, spu); /* Step 22. */
1962
set_switch_active(prev, spu); /* Step 23. */
1963
set_mfc_tclass_id(prev, spu); /* Step 24. */
1964
resume_mfc_queue(prev, spu); /* Step 25. */
1967
static void restore_lscsa(struct spu_state *next, struct spu *spu)
1970
* Perform steps 26-40 of SPU context restore sequence,
1971
* which restores regions of the local store and register
1975
set_watchdog_timer(next, spu); /* Step 26. */
1976
setup_spu_status_part1(next, spu); /* Step 27. */
1977
setup_spu_status_part2(next, spu); /* Step 28. */
1978
restore_mfc_rag(next, spu); /* Step 29. */
1980
setup_mfc_slbs(next, spu, spu_restore_code, sizeof(spu_restore_code));
1981
set_spu_npc(next, spu); /* Step 31. */
1982
set_signot1(next, spu); /* Step 32. */
1983
set_signot2(next, spu); /* Step 33. */
1984
setup_decr(next, spu); /* Step 34. */
1985
setup_ppu_mb(next, spu); /* Step 35. */
1986
setup_ppuint_mb(next, spu); /* Step 36. */
1987
send_restore_code(next, spu); /* Step 37. */
1988
set_ppu_querymask(next, spu); /* Step 38. */
1989
wait_tag_complete(next, spu); /* Step 39. */
1990
wait_spu_stopped(next, spu); /* Step 40. */
1993
static void restore_csa(struct spu_state *next, struct spu *spu)
1996
* Combine steps 41-76 of SPU context restore sequence, which
1997
* restore regions of the privileged & problem state areas.
2000
restore_spu_privcntl(next, spu); /* Step 41. */
2001
restore_status_part1(next, spu); /* Step 42. */
2002
restore_status_part2(next, spu); /* Step 43. */
2003
restore_ls_16kb(next, spu); /* Step 44. */
2004
wait_tag_complete(next, spu); /* Step 45. */
2005
suspend_mfc(next, spu); /* Step 46. */
2006
wait_suspend_mfc_complete(next, spu); /* Step 47. */
2007
issue_mfc_tlbie(next, spu); /* Step 48. */
2008
clear_interrupts(next, spu); /* Step 49. */
2009
restore_mfc_queues(next, spu); /* Step 50. */
2010
restore_ppu_querymask(next, spu); /* Step 51. */
2011
restore_ppu_querytype(next, spu); /* Step 52. */
2012
restore_mfc_csr_tsq(next, spu); /* Step 53. */
2013
restore_mfc_csr_cmd(next, spu); /* Step 54. */
2014
restore_mfc_csr_ato(next, spu); /* Step 55. */
2015
restore_mfc_tclass_id(next, spu); /* Step 56. */
2016
set_llr_event(next, spu); /* Step 57. */
2017
restore_decr_wrapped(next, spu); /* Step 58. */
2018
restore_ch_part1(next, spu); /* Step 59. */
2019
restore_ch_part2(next, spu); /* Step 60. */
2020
restore_spu_lslr(next, spu); /* Step 61. */
2021
restore_spu_cfg(next, spu); /* Step 62. */
2022
restore_pm_trace(next, spu); /* Step 63. */
2023
restore_spu_npc(next, spu); /* Step 64. */
2024
restore_spu_mb(next, spu); /* Step 65. */
2025
check_ppu_mb_stat(next, spu); /* Step 66. */
2026
check_ppuint_mb_stat(next, spu); /* Step 67. */
2027
spu_invalidate_slbs(spu); /* Modified Step 68. */
2028
restore_mfc_sr1(next, spu); /* Step 69. */
2029
set_int_route(next, spu); /* NEW */
2030
restore_other_spu_access(next, spu); /* Step 70. */
2031
restore_spu_runcntl(next, spu); /* Step 71. */
2032
restore_mfc_cntl(next, spu); /* Step 72. */
2033
enable_user_access(next, spu); /* Step 73. */
2034
reset_switch_active(next, spu); /* Step 74. */
2035
reenable_interrupts(next, spu); /* Step 75. */
2038
static int __do_spu_save(struct spu_state *prev, struct spu *spu)
2043
* SPU context save can be broken into three phases:
2045
* (a) quiesce [steps 2-16].
2046
* (b) save of CSA, performed by PPE [steps 17-42]
2047
* (c) save of LSCSA, mostly performed by SPU [steps 43-52].
2049
* Returns 0 on success.
2050
* 2,6 if failed to quiece SPU
2051
* 53 if SPU-side of save failed.
2054
rc = quiece_spu(prev, spu); /* Steps 2-16. */
2065
save_csa(prev, spu); /* Steps 17-43. */
2066
save_lscsa(prev, spu); /* Steps 44-53. */
2067
return check_save_status(prev, spu); /* Step 54. */
2070
static int __do_spu_restore(struct spu_state *next, struct spu *spu)
2075
* SPU context restore can be broken into three phases:
2077
* (a) harvest (or reset) SPU [steps 2-24].
2078
* (b) restore LSCSA [steps 25-40], mostly performed by SPU.
2079
* (c) restore CSA [steps 41-76], performed by PPE.
2081
* The 'harvest' step is not performed here, but rather
2085
restore_lscsa(next, spu); /* Steps 24-39. */
2086
rc = check_restore_status(next, spu); /* Step 40. */
2089
/* Failed. Return now. */
2093
/* Fall through to next step. */
2096
restore_csa(next, spu);
2102
* spu_save - SPU context save, with locking.
2103
* @prev: pointer to SPU context save area, to be saved.
2104
* @spu: pointer to SPU iomem structure.
2106
* Acquire locks, perform the save operation then return.
2108
int spu_save(struct spu_state *prev, struct spu *spu)
2112
acquire_spu_lock(spu); /* Step 1. */
2113
rc = __do_spu_save(prev, spu); /* Steps 2-53. */
2114
release_spu_lock(spu);
2115
if (rc != 0 && rc != 2 && rc != 6) {
2116
panic("%s failed on SPU[%d], rc=%d.\n",
2117
__func__, spu->number, rc);
2121
EXPORT_SYMBOL_GPL(spu_save);
2124
* spu_restore - SPU context restore, with harvest and locking.
2125
* @new: pointer to SPU context save area, to be restored.
2126
* @spu: pointer to SPU iomem structure.
2128
* Perform harvest + restore, as we may not be coming
2129
* from a previous successful save operation, and the
2130
* hardware state is unknown.
2132
int spu_restore(struct spu_state *new, struct spu *spu)
2136
acquire_spu_lock(spu);
2138
spu->slb_replace = 0;
2139
rc = __do_spu_restore(new, spu);
2140
release_spu_lock(spu);
2142
panic("%s failed on SPU[%d] rc=%d.\n",
2143
__func__, spu->number, rc);
2147
EXPORT_SYMBOL_GPL(spu_restore);
2149
static void init_prob(struct spu_state *csa)
2151
csa->spu_chnlcnt_RW[9] = 1;
2152
csa->spu_chnlcnt_RW[21] = 16;
2153
csa->spu_chnlcnt_RW[23] = 1;
2154
csa->spu_chnlcnt_RW[28] = 1;
2155
csa->spu_chnlcnt_RW[30] = 1;
2156
csa->prob.spu_runcntl_RW = SPU_RUNCNTL_STOP;
2157
csa->prob.mb_stat_R = 0x000400;
2160
static void init_priv1(struct spu_state *csa)
2162
/* Enable decode, relocate, tlbie response, master runcntl. */
2163
csa->priv1.mfc_sr1_RW = MFC_STATE1_LOCAL_STORAGE_DECODE_MASK |
2164
MFC_STATE1_MASTER_RUN_CONTROL_MASK |
2165
MFC_STATE1_PROBLEM_STATE_MASK |
2166
MFC_STATE1_RELOCATE_MASK | MFC_STATE1_BUS_TLBIE_MASK;
2168
/* Enable OS-specific set of interrupts. */
2169
csa->priv1.int_mask_class0_RW = CLASS0_ENABLE_DMA_ALIGNMENT_INTR |
2170
CLASS0_ENABLE_INVALID_DMA_COMMAND_INTR |
2171
CLASS0_ENABLE_SPU_ERROR_INTR;
2172
csa->priv1.int_mask_class1_RW = CLASS1_ENABLE_SEGMENT_FAULT_INTR |
2173
CLASS1_ENABLE_STORAGE_FAULT_INTR;
2174
csa->priv1.int_mask_class2_RW = CLASS2_ENABLE_SPU_STOP_INTR |
2175
CLASS2_ENABLE_SPU_HALT_INTR |
2176
CLASS2_ENABLE_SPU_DMA_TAG_GROUP_COMPLETE_INTR;
2179
static void init_priv2(struct spu_state *csa)
2181
csa->priv2.spu_lslr_RW = LS_ADDR_MASK;
2182
csa->priv2.mfc_control_RW = MFC_CNTL_RESUME_DMA_QUEUE |
2183
MFC_CNTL_NORMAL_DMA_QUEUE_OPERATION |
2184
MFC_CNTL_DMA_QUEUES_EMPTY_MASK;
2188
* spu_alloc_csa - allocate and initialize an SPU context save area.
2190
* Allocate and initialize the contents of an SPU context save area.
2191
* This includes enabling address translation, interrupt masks, etc.,
2192
* as appropriate for the given OS environment.
2194
* Note that storage for the 'lscsa' is allocated separately,
2195
* as it is by far the largest of the context save regions,
2196
* and may need to be pinned or otherwise specially aligned.
2198
int spu_init_csa(struct spu_state *csa)
2204
memset(csa, 0, sizeof(struct spu_state));
2206
rc = spu_alloc_lscsa(csa);
2210
spin_lock_init(&csa->register_lock);
2219
void spu_fini_csa(struct spu_state *csa)
2221
spu_free_lscsa(csa);