47
48
#define CPUID_TCM 2
48
49
#define CPUID_TLBTYPE 3
50
#define read_cpuid(reg) \
53
asm("mrc%? p15, 0, %0, c0, c0, " __stringify(reg) \
58
#define __cacheid_present(val) (val != read_cpuid(CPUID_ID))
59
#define __cacheid_vivt(val) ((val & (15 << 25)) != (14 << 25))
60
#define __cacheid_vipt(val) ((val & (15 << 25)) == (14 << 25))
61
#define __cacheid_vipt_nonaliasing(val) ((val & (15 << 25 | 1 << 23)) == (14 << 25))
62
#define __cacheid_vipt_aliasing(val) ((val & (15 << 25 | 1 << 23)) == (14 << 25 | 1 << 23))
64
#define cache_is_vivt() \
66
unsigned int __val = read_cpuid(CPUID_CACHETYPE); \
67
(!__cacheid_present(__val)) || __cacheid_vivt(__val); \
70
#define cache_is_vipt() \
72
unsigned int __val = read_cpuid(CPUID_CACHETYPE); \
73
__cacheid_present(__val) && __cacheid_vipt(__val); \
76
#define cache_is_vipt_nonaliasing() \
78
unsigned int __val = read_cpuid(CPUID_CACHETYPE); \
79
__cacheid_present(__val) && \
80
__cacheid_vipt_nonaliasing(__val); \
83
#define cache_is_vipt_aliasing() \
85
unsigned int __val = read_cpuid(CPUID_CACHETYPE); \
86
__cacheid_present(__val) && \
87
__cacheid_vipt_aliasing(__val); \
91
52
* This is used to ensure the compiler did actually allocate the register we
92
53
* asked it for some inline assembly sequences. Apparently we can't trust
93
54
* the compiler from one version to another so a bit of paranoia won't hurt.
94
55
* This string is meant to be concatenated with the inline asm string and
95
56
* will cause compilation to stop on mismatch.
57
* (for details, see gcc PR 15089)
97
59
#define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t"
99
61
#ifndef __ASSEMBLY__
101
63
#include <linux/linkage.h>
64
#include <linux/stringify.h>
65
#include <linux/irqflags.h>
67
#ifdef CONFIG_CPU_CP15
68
#define read_cpuid(reg) \
71
asm("mrc p15, 0, %0, c0, c0, " __stringify(reg) \
78
extern unsigned int processor_id;
79
#define read_cpuid(reg) (processor_id)
83
* The CPU ID never changes at run time, so we might as well tell the
84
* compiler that it's constant. Use this function to read the CPU ID
85
* rather than directly reading processor_id or read_cpuid() directly.
87
static inline unsigned int read_cpuid_id(void) __attribute_const__;
89
static inline unsigned int read_cpuid_id(void)
91
return read_cpuid(CPUID_ID);
94
#define __exception __attribute__((section(".exception.text")))
103
96
struct thread_info;
104
97
struct task_struct;
114
107
void die(const char *msg, struct pt_regs *regs, int err)
115
108
__attribute__((noreturn));
117
void die_if_kernel(const char *str, struct pt_regs *regs, int err);
111
void arm_notify_die(const char *str, struct pt_regs *regs, struct siginfo *info,
112
unsigned long err, unsigned long trap);
119
114
void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int,
120
115
struct pt_regs *),
121
116
int sig, const char *name);
123
//#include <asm/proc-fns.h>
125
118
#define xchg(ptr,x) \
126
119
((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
128
#define tas(ptr) (xchg((ptr),1))
130
121
extern asmlinkage void __backtrace(void);
122
extern asmlinkage void c_backtrace(unsigned long fp, int pmode);
125
extern void show_pte(struct mm_struct *mm, unsigned long addr);
126
extern void __show_regs(struct pt_regs *);
132
128
extern int cpu_architecture(void);
135
__asm__ __volatile__( \
136
"mcr p15, 0, %0, c1, c0, 0 @ set CR" \
141
unsigned int __val; \
142
__asm__ __volatile__( \
143
"mrc p15, 0, %0, c1, c0, 0 @ get CR" \
144
: "=r" (__val) : : "cc"); \
148
extern unsigned long cr_no_alignment; /* defined in entry-armv.S */
149
extern unsigned long cr_alignment; /* defined in entry-armv.S */
129
extern void cpu_init(void);
131
void arm_machine_restart(char mode);
132
extern void (*arm_pm_restart)(char str);
135
* Intel's XScale3 core supports some v6 features (supersections, L2)
136
* but advertises itself as v5 as it does not support the v6 ISA. For
137
* this reason, we need a way to explicitly test for this type of CPU.
139
#ifndef CONFIG_CPU_XSC3
140
#define cpu_is_xsc3() 0
142
static inline int cpu_is_xsc3(void)
144
extern unsigned int processor_id;
146
if ((processor_id & 0xffffe000) == 0x69056000)
153
#if !defined(CONFIG_CPU_XSCALE) && !defined(CONFIG_CPU_XSC3)
154
#define cpu_is_xscale() 0
156
#define cpu_is_xscale() 1
151
159
#define UDBG_UNDEFINED (1 << 0)
152
160
#define UDBG_SYSCALL (1 << 1)
157
165
extern unsigned int user_debug;
159
167
#if __LINUX_ARM_ARCH__ >= 4
160
#define vectors_base() ((cr_alignment & CR_V) ? 0xffff0000 : 0)
162
#define vectors_base() (0)
165
#define mb() __asm__ __volatile__ ("" : : : "memory")
168
#define read_barrier_depends() do { } while(0)
169
#define set_mb(var, value) do { var = value; mb(); } while (0)
170
#define set_wmb(var, value) do { var = value; wmb(); } while (0)
168
#define vectors_high() (cr_alignment & CR_V)
170
#define vectors_high() (0)
173
#if __LINUX_ARM_ARCH__ >= 7
174
#define isb() __asm__ __volatile__ ("isb" : : : "memory")
175
#define dsb() __asm__ __volatile__ ("dsb" : : : "memory")
176
#define dmb() __asm__ __volatile__ ("dmb" : : : "memory")
177
#elif defined(CONFIG_CPU_XSC3) || __LINUX_ARM_ARCH__ == 6
178
#define isb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \
179
: : "r" (0) : "memory")
180
#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \
181
: : "r" (0) : "memory")
182
#define dmb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \
183
: : "r" (0) : "memory")
185
#define isb() __asm__ __volatile__ ("" : : : "memory")
186
#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \
187
: : "r" (0) : "memory")
188
#define dmb() __asm__ __volatile__ ("" : : : "memory")
192
#define mb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
193
#define rmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
194
#define wmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
195
#define smp_mb() barrier()
196
#define smp_rmb() barrier()
197
#define smp_wmb() barrier()
202
#define smp_mb() dmb()
203
#define smp_rmb() dmb()
204
#define smp_wmb() dmb()
206
#define read_barrier_depends() do { } while(0)
207
#define smp_read_barrier_depends() do { } while(0)
209
#define set_mb(var, value) do { var = value; smp_mb(); } while (0)
171
210
#define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t");
175
* Define our own context switch locking. This allows us to enable
176
* interrupts over the context switch, otherwise we end up with high
177
* interrupt latency. The real problem area is switch_mm() which may
178
* do a full cache flush.
180
#define prepare_arch_switch(rq,next) \
182
spin_lock(&(next)->switch_lock); \
183
spin_unlock_irq(&(rq)->lock); \
186
#define finish_arch_switch(rq,prev) \
187
spin_unlock(&(prev)->switch_lock)
189
#define task_running(rq,p) \
190
((rq)->curr == (p) || spin_is_locked(&(p)->switch_lock))
193
* Our UP-case is more simple, but we assume knowledge of how
194
* spin_unlock_irq() and friends are implemented. This avoids
195
* us needlessly decrementing and incrementing the preempt count.
197
#define prepare_arch_switch(rq,next) local_irq_enable()
198
#define finish_arch_switch(rq,prev) spin_unlock(&(rq)->lock)
199
#define task_running(rq,p) ((rq)->curr == (p))
212
extern unsigned long cr_no_alignment; /* defined in entry-armv.S */
213
extern unsigned long cr_alignment; /* defined in entry-armv.S */
215
static inline unsigned int get_cr(void)
218
asm("mrc p15, 0, %0, c1, c0, 0 @ get CR" : "=r" (val) : : "cc");
222
static inline void set_cr(unsigned int val)
224
asm volatile("mcr p15, 0, %0, c1, c0, 0 @ set CR"
225
: : "r" (val) : "cc");
230
extern void adjust_cr(unsigned long mask, unsigned long set);
233
#define CPACC_FULL(n) (3 << (n * 2))
234
#define CPACC_SVC(n) (1 << (n * 2))
235
#define CPACC_DISABLE(n) (0 << (n * 2))
237
static inline unsigned int get_copro_access(void)
240
asm("mrc p15, 0, %0, c1, c0, 2 @ get copro access"
241
: "=r" (val) : : "cc");
245
static inline void set_copro_access(unsigned int val)
247
asm volatile("mcr p15, 0, %0, c1, c0, 2 @ set copro access"
248
: : "r" (val) : "cc");
253
* switch_mm() may do a full cache flush over the context switch,
254
* so enable interrupts over the context switch to avoid high
257
#define __ARCH_WANT_INTERRUPTS_ON_CTXSW
203
260
* switch_to(prev, next) should switch from task `prev' to `next'
204
261
* `prev' will never be the same as `next'. schedule() itself
209
266
#define switch_to(prev,next,last) \
211
last = __switch_to(prev,prev->thread_info,next->thread_info); \
268
last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \
215
* CPU interrupt mask handling.
217
#if __LINUX_ARM_ARCH__ >= 6
219
#define local_irq_save(x) \
221
__asm__ __volatile__( \
222
"mrs %0, cpsr @ local_irq_save\n" \
224
: "=r" (x) : : "memory", "cc"); \
227
#define local_irq_enable() __asm__("cpsie i @ __sti" : : : "memory", "cc")
228
#define local_irq_disable() __asm__("cpsid i @ __cli" : : : "memory", "cc")
229
#define local_fiq_enable() __asm__("cpsie f @ __stf" : : : "memory", "cc")
230
#define local_fiq_disable() __asm__("cpsid f @ __clf" : : : "memory", "cc")
235
* Save the current interrupt enable state & disable IRQs
237
#define local_irq_save(x) \
239
unsigned long temp; \
240
(void) (&temp == &x); \
241
__asm__ __volatile__( \
242
"mrs %0, cpsr @ local_irq_save\n" \
243
" orr %1, %0, #128\n" \
245
: "=r" (x), "=r" (temp) \
253
#define local_irq_enable() \
255
unsigned long temp; \
256
__asm__ __volatile__( \
257
"mrs %0, cpsr @ local_irq_enable\n" \
258
" bic %0, %0, #128\n" \
268
#define local_irq_disable() \
270
unsigned long temp; \
271
__asm__ __volatile__( \
272
"mrs %0, cpsr @ local_irq_disable\n" \
273
" orr %0, %0, #128\n" \
285
unsigned long temp; \
286
__asm__ __volatile__( \
287
"mrs %0, cpsr @ stf\n" \
288
" bic %0, %0, #64\n" \
300
unsigned long temp; \
301
__asm__ __volatile__( \
302
"mrs %0, cpsr @ clf\n" \
303
" orr %0, %0, #64\n" \
313
* Save the current interrupt enable state.
315
#define local_save_flags(x) \
317
__asm__ __volatile__( \
318
"mrs %0, cpsr @ local_save_flags" \
319
: "=r" (x) : : "memory", "cc"); \
323
* restore saved IRQ & FIQ state
325
#define local_irq_restore(x) \
326
__asm__ __volatile__( \
327
"msr cpsr_c, %0 @ local_irq_restore\n" \
333
#error SMP not supported
335
#define smp_mb() mb()
336
#define smp_rmb() rmb()
337
#define smp_wmb() wmb()
338
#define smp_read_barrier_depends() read_barrier_depends()
342
#define smp_mb() barrier()
343
#define smp_rmb() barrier()
344
#define smp_wmb() barrier()
345
#define smp_read_barrier_depends() do { } while(0)
347
#define clf() __clf()
348
#define stf() __stf()
350
#define irqs_disabled() \
352
unsigned long flags; \
353
local_save_flags(flags); \
357
271
#if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110)
359
273
* On the StrongARM, "swp" is terminally broken since it bypasses the
377
294
#ifdef swp_is_buggy
378
295
unsigned long flags;
297
#if __LINUX_ARM_ARCH__ >= 6
384
local_irq_save(flags);
385
ret = *(volatile unsigned char *)ptr;
386
*(volatile unsigned char *)ptr = x;
387
local_irq_restore(flags);
302
#if __LINUX_ARM_ARCH__ >= 6
304
asm volatile("@ __xchg1\n"
305
"1: ldrexb %0, [%3]\n"
306
" strexb %1, %2, [%3]\n"
309
: "=&r" (ret), "=&r" (tmp)
314
asm volatile("@ __xchg4\n"
315
"1: ldrex %0, [%3]\n"
316
" strex %1, %2, [%3]\n"
319
: "=&r" (ret), "=&r" (tmp)
323
#elif defined(swp_is_buggy)
325
#error SMP is not supported on this platform
328
raw_local_irq_save(flags);
329
ret = *(volatile unsigned char *)ptr;
330
*(volatile unsigned char *)ptr = x;
331
raw_local_irq_restore(flags);
391
local_irq_save(flags);
392
ret = *(volatile unsigned long *)ptr;
393
*(volatile unsigned long *)ptr = x;
394
local_irq_restore(flags);
335
raw_local_irq_save(flags);
336
ret = *(volatile unsigned long *)ptr;
337
*(volatile unsigned long *)ptr = x;
338
raw_local_irq_restore(flags);
397
case 1: __asm__ __volatile__ ("swpb %0, %1, [%2]"
402
case 4: __asm__ __volatile__ ("swp %0, %1, [%2]"
342
asm volatile("@ __xchg1\n"
349
asm volatile("@ __xchg4\n"
408
default: __bad_xchg(ptr, size), ret = 0;
357
__bad_xchg(ptr, size), ret = 0;
414
#endif /* CONFIG_SMP */
364
extern void disable_hlt(void);
365
extern void enable_hlt(void);
367
#include <asm-generic/cmpxchg-local.h>
370
* cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
373
#define cmpxchg_local(ptr, o, n) \
374
((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
375
(unsigned long)(n), sizeof(*(ptr))))
376
#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
379
#include <asm-generic/cmpxchg.h>
416
382
#endif /* __ASSEMBLY__ */
384
#define arch_align_stack(x) (x)
418
386
#endif /* __KERNEL__ */