2
* OMAP44xx CPU low power powerdown and powerup code.
4
* Copyright (C) 2011 Texas Instruments, Inc.
5
* Written by Santosh Shilimkar <santosh.shilimkar@ti.com>
7
* This program is free software,you can redistribute it and/or modify
8
* it under the terms of the GNU General Public License version 2 as
9
* published by the Free Software Foundation.
12
#include <linux/linkage.h>
13
#include <asm/system.h>
14
#include <asm/smp_scu.h>
15
#include <asm/memory.h>
17
#include <plat/omap44xx.h>
18
#include <mach/omap4-common.h>
19
#include <asm/hardware/cache-l2x0.h>
21
#include "omap4-sar-layout.h"
25
/* Masks used for MMU manipulation */
26
#define TTRBIT_MASK 0xffffc000
27
#define TABLE_INDEX_MASK 0xfff00000
28
#define TABLE_ENTRY 0x00000c02
29
#define CACHE_DISABLE_MASK 0xffffe7fb
30
#define TABLE_ADDRESS_OFFSET 0x04
31
#define CR_VALUE_OFFSET 0x08
34
* =============================
35
* == CPU suspend entry point ==
36
* =============================
38
* void omap4_cpu_suspend(unsigned int cpu, unsigned int save_state)
40
* This function code saves the CPU context and performs the CPU
41
* power down sequence. Calling WFI effectively changes the CPU
42
* power domains states to the desired target power state.
44
* @cpu : contains cpu id (r0)
45
* @save_state : contains context save state (r1)
47
* 1 - CPUx L1 and logic lost: MPUSS CSWR
48
* 2 - CPUx L1 and logic lost + GIC lost: MPUSS OSWR
49
* 3 - CPUx L1 and logic lost + GIC + L2 lost: MPUSS OFF
50
* @return: This function never returns for CPU OFF and DORMANT power states.
51
* Post WFI, CPU transitions to DORMANT or OFF power state and on wake-up
52
* from this follows a full CPU reset path via ROM code to CPU restore code.
53
* It returns to the caller for CPU INACTIVE and ON power states or in case
54
* CPU failed to transition to targeted OFF/DORMANT state.
57
ENTRY(omap4_cpu_suspend)
58
stmfd sp!, {r0-r12, lr} @ Save registers on stack
60
beq do_WFI @ Nothing to save, jump to WFI
62
bl omap4_get_sar_ram_base
65
orreq r8, r8, #CPU0_SAVE_OFFSET
66
orrne r8, r8, #CPU1_SAVE_OFFSET
69
* Save only needed CPU CP15 registers. VFP, breakpoint,
70
* performance monitor registers are not saved. Generic
71
* code suppose to take care of those.
74
mrs r5, spsr @ Store spsr
78
/* c1 and c2 registers */
79
mrc p15, 0, r4, c1, c0, 2 @ CPACR
80
mrc p15, 0, r5, c2, c0, 0 @ TTBR0
81
mrc p15, 0, r6, c2, c0, 1 @ TTBR1
82
mrc p15, 0, r7, c2, c0, 2 @ TTBCR
85
/* c3 and c10 registers */
86
mrc p15, 0, r4, c3, c0, 0 @ DACR
87
mrc p15, 0, r5, c10, c2, 0 @ PRRR
88
mrc p15, 0, r6, c10, c2, 1 @ NMRR
91
/* c12, c13 and CPSR registers */
92
mrc p15, 0, r4, c13, c0, 1 @ Context ID
93
mrc p15, 0, r5, c13, c0, 2 @ User r/w thread ID
94
mrc p15, 0, r6, c12, c0, 0 @ Secure or NS VBAR
95
mrs r7, cpsr @ Store CPSR
98
/* c1 control register */
99
mrc p15, 0, r4, c1, c0, 0 @ Save control register
103
* Flush all data from the L1 data cache before disabling
106
bl v7_flush_dcache_all
109
* Clear the SCTLR.C bit to prevent further data cache
110
* allocation. Clearing SCTLR.C would make all the data accesses
111
* strongly ordered and would not hit the cache.
113
mrc p15, 0, r0, c1, c0, 0
114
bic r0, r0, #(1 << 2) @ Disable the C bit
115
mcr p15, 0, r0, c1, c0, 0
119
* Invalidate L1 data cache. Even though only invalidate is
120
* necessary exported flush API is used here. Doing clean
121
* on already clean cache would be almost NOP.
123
bl v7_flush_dcache_all
126
* Switch the CPU from Symmetric Multiprocessing (SMP) mode
127
* to AsymmetricMultiprocessing (AMP) mode by programming
128
* the SCU power status to DORMANT or OFF mode.
129
* This enables the CPU to be taken out of coherency by
130
* preventing the CPU from receiving cache, TLB, or BTB
131
* maintenance operations broadcast by other CPUs in the cluster.
133
bl omap4_get_sar_ram_base
135
mrc p15, 0, r0, c0, c0, 5 @ Read MPIDR
137
ldreq r1, [r8, #SCU_OFFSET0]
138
ldrne r1, [r8, #SCU_OFFSET1]
139
bl omap4_get_scu_base
144
* Execute an ISB instruction to ensure that all of the
145
* CP15 register changes have been committed.
150
* Execute a barrier instruction to ensure that all cache,
151
* TLB and branch predictor maintenance operations issued
152
* by any CPU in the cluster have completed.
158
* Execute a WFI instruction and wait until the
159
* STANDBYWFI output is asserted to indicate that the
160
* CPU is in idle and low power state.
162
wfi @ Wait For Interrupt
165
* CPU is here when it failed to enter OFF/DORMANT or
166
* no low power state was attempted.
168
mrc p15, 0, r0, c1, c0, 0
169
tst r0, #(1 << 2) @ Check C bit enabled?
170
orreq r0, r0, #(1 << 2) @ Enable the C bit
171
mcreq p15, 0, r0, c1, c0, 0
175
* Ensure the CPU power state is set to NORMAL in
176
* SCU power state so that CPU is back in coherency.
177
* In non-coherent mode CPU can lock-up and lead to
180
bl omap4_get_scu_base
181
mov r1, #SCU_PM_NORMAL
186
ldmfd sp!, {r0-r12, pc} @ Restore regs and return
187
ENDPROC(omap4_cpu_suspend)
190
* ============================
191
* == CPU resume entry point ==
192
* ============================
194
* void omap4_cpu_resume(void)
196
* ROM code jumps to this function while waking up from CPU
197
* OFF or DORMANT state. Physical address of the function is
198
* stored in the SAR RAM while entering to OFF or DORMANT mode.
201
ENTRY(omap4_cpu_resume)
203
* Check the wakeup cpuid and use appropriate
204
* SAR BANK location for context restore.
206
ldr r3, =OMAP44XX_SAR_RAM_BASE
208
mcr p15, 0, r1, c7, c5, 0 @ Invalidate L1 I
209
mrc p15, 0, r0, c0, c0, 5 @ MPIDR
211
orreq r3, r3, #CPU0_SAVE_OFFSET
212
orrne r3, r3, #CPU1_SAVE_OFFSET
214
/* Restore cp15 registers */
216
mov sp, r4 @ Restore sp
217
msr spsr_cxsf, r5 @ Restore spsr
218
mov lr, r6 @ Restore lr
220
/* c1 and c2 registers */
222
mcr p15, 0, r4, c1, c0, 2 @ CPACR
223
mcr p15, 0, r5, c2, c0, 0 @ TTBR0
224
mcr p15, 0, r6, c2, c0, 1 @ TTBR1
225
mcr p15, 0, r7, c2, c0, 2 @ TTBCR
227
/* c3 and c10 registers */
229
mcr p15, 0, r4, c3, c0, 0 @ DACR
230
mcr p15, 0, r5, c10, c2, 0 @ PRRR
231
mcr p15, 0, r6, c10, c2, 1 @ NMRR
233
/* c12, c13 and CPSR registers */
235
mcr p15, 0, r4, c13, c0, 1 @ Context ID
236
mcr p15, 0, r5, c13, c0, 2 @ User r/w thread ID
237
mrc p15, 0, r6, c12, c0, 0 @ Secure or NS VBAR
238
msr cpsr, r7 @ store cpsr
241
* Enabling MMU here. Page entry needs to be altered
242
* to create temporary 1:1 map and then resore the entry
243
* ones MMU is enabled
245
mrc p15, 0, r7, c2, c0, 2 @ Read TTBRControl
246
and r7, #0x7 @ Extract N (0:2) to decide
247
cmp r7, #0x0 @ TTBR0/TTBR1
250
b ttbr_error @ Only N = 0 supported
252
mrc p15, 0, r2, c2, c0, 0 @ Read TTBR0
256
ldr r5, =TABLE_INDEX_MASK
257
and r4, r5 @ r4 = 31 to 20 bits of pc
259
add r1, r1, r4 @ r1 has value of table entry
260
lsr r4, #18 @ Address of table entry
261
add r2, r4 @ r2 - location to be modified
263
/* Ensure the modified entry makes it to main memory */
264
#ifdef CONFIG_CACHE_L2X0
265
ldr r5, =OMAP44XX_L2CACHE_BASE
266
str r2, [r5, #L2X0_CLEAN_INV_LINE_PA]
268
ldr r0, [r5, #L2X0_CLEAN_INV_LINE_PA]
273
/* Storing previous entry of location being modified */
274
ldr r5, =OMAP44XX_SAR_RAM_BASE
276
str r4, [r5, #MMU_OFFSET] @ Modify the table entry
280
* Storing address of entry being modified
281
* It will be restored after enabling MMU
283
ldr r5, =OMAP44XX_SAR_RAM_BASE
284
orr r5, r5, #MMU_OFFSET
285
str r2, [r5, #TABLE_ADDRESS_OFFSET]
287
mcr p15, 0, r0, c7, c5, 4 @ Flush prefetch buffer
288
mcr p15, 0, r0, c7, c5, 6 @ Invalidate BTB
289
mcr p15, 0, r0, c8, c5, 0 @ Invalidate ITLB
290
mcr p15, 0, r0, c8, c6, 0 @ Invalidate DTLB
293
* Restore control register but don't enable Data caches here.
294
* Caches will be enabled after restoring MMU table entry.
297
str r4, [r5, #CR_VALUE_OFFSET] @ Store previous value of CR
298
ldr r2, =CACHE_DISABLE_MASK
300
mcr p15, 0, r4, c1, c0, 0
303
ldr r0, =mmu_on_label
306
/* Set up the per-CPU stacks */
310
* Restore the MMU table entry that was modified for
313
bl omap4_get_sar_ram_base
315
orr r8, r8, #MMU_OFFSET @ Get address of entry that..
316
ldr r2, [r8, #TABLE_ADDRESS_OFFSET] @ was modified
317
ldr r3, =local_va2pa_offet
319
ldr r0, [r8] @ Get the previous value..
320
str r0, [r2] @ which needs to be restored
322
mcr p15, 0, r0, c7, c1, 6 @ flush TLB and issue barriers
323
mcr p15, 0, r0, c7, c5, 4 @ Flush prefetch buffer
324
mcr p15, 0, r0, c7, c5, 6 @ Invalidate BTB
325
mcr p15, 0, r0, c8, c5, 0 @ Invalidate ITLB
326
mcr p15, 0, r0, c8, c6, 0 @ Invalidate DTLB
329
ldr r0, [r8, #CR_VALUE_OFFSET] @ Restore the Control register
330
mcr p15, 0, r0, c1, c0, 0 @ with caches enabled.
333
ldmfd sp!, {r0-r12, pc} @ restore regs and return
335
.equ local_va2pa_offet, (PLAT_PHYS_OFFSET + PAGE_OFFSET)
337
ENDPROC(omap4_cpu_resume)