2
* System level definitions for the Hexagon architecture
4
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
6
* This program is free software; you can redistribute it and/or modify
7
* it under the terms of the GNU General Public License version 2 and
8
* only version 2 as published by the Free Software Foundation.
10
* This program is distributed in the hope that it will be useful,
11
* but WITHOUT ANY WARRANTY; without even the implied warranty of
12
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13
* GNU General Public License for more details.
15
* You should have received a copy of the GNU General Public License
16
* along with this program; if not, write to the Free Software
17
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
24
#include <linux/linkage.h>
25
#include <linux/irqflags.h>
26
#include <asm/atomic.h>
27
#include <asm/hexagon_vm.h>
31
extern struct task_struct *__switch_to(struct task_struct *,
33
struct task_struct *);
35
#define switch_to(p, n, r) do {\
36
r = __switch_to((p), (n), (r));\
40
#define rmb() barrier()
41
#define read_barrier_depends() barrier()
42
#define wmb() barrier()
43
#define mb() barrier()
44
#define smp_rmb() barrier()
45
#define smp_read_barrier_depends() barrier()
46
#define smp_wmb() barrier()
47
#define smp_mb() barrier()
48
#define smp_mb__before_atomic_dec() barrier()
49
#define smp_mb__after_atomic_dec() barrier()
50
#define smp_mb__before_atomic_inc() barrier()
51
#define smp_mb__after_atomic_inc() barrier()
54
* __xchg - atomically exchange a register and a memory location
56
* @ptr: pointer to memory
57
* @size: size of the value
59
* Only 4 bytes supported currently.
61
* Note: there was an errata for V2 about .new's and memw_locked.
64
static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
69
/* Can't seem to use printk or panic here, so just stop */
70
if (size != 4) do { asm volatile("brkpt;\n"); } while (1);
72
__asm__ __volatile__ (
73
"1: %0 = memw_locked(%1);\n" /* load into retval */
74
" memw_locked(%1,P0) = %2;\n" /* store into memory */
84
* Atomically swap the contents of a register with memory. Should be atomic
85
* between multiple CPU's and within interrupts on the same CPU.
87
#define xchg(ptr, v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v), (ptr), \
90
/* Set a value and use a memory barrier. Used by the scheduler somewhere. */
91
#define set_mb(var, value) \
92
do { var = value; mb(); } while (0)
95
* see rt-mutex-design.txt; cmpxchg supposedly checks if *ptr == A and swaps.
96
* looks just like atomic_cmpxchg on our arch currently with a bunch of
99
#define __HAVE_ARCH_CMPXCHG 1
101
#define cmpxchg(ptr, old, new) \
103
__typeof__(ptr) __ptr = (ptr); \
104
__typeof__(*(ptr)) __old = (old); \
105
__typeof__(*(ptr)) __new = (new); \
106
__typeof__(*(ptr)) __oldval = 0; \
109
"1: %0 = memw_locked(%1);\n" \
110
" { P0 = cmp.eq(%0,%2);\n" \
111
" if (!P0.new) jump:nt 2f; }\n" \
112
" memw_locked(%1,p0) = %3;\n" \
113
" if (!P0) jump 1b;\n" \
116
: "r" (__ptr), "r" (__old), "r" (__new) \
122
/* Should probably shoot for an 8-byte aligned stack pointer */
123
#define STACK_MASK (~7)
124
#define arch_align_stack(x) (x & STACK_MASK)