2
* Spinlock support for the Hexagon architecture
4
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
7
* This program is free software; you can redistribute it and/or modify
8
* it under the terms of the GNU General Public License version 2 and
9
* only version 2 as published by the Free Software Foundation.
11
* This program is distributed in the hope that it will be useful,
12
* but WITHOUT ANY WARRANTY; without even the implied warranty of
13
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14
* GNU General Public License for more details.
16
* You should have received a copy of the GNU General Public License
17
* along with this program; if not, write to the Free Software
18
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22
#ifndef _ASM_SPINLOCK_H
23
#define _ASM_SPINLOCK_H
25
#include <asm/irqflags.h>
28
* This file is pulled in for SMP builds.
29
* Really need to check all the barrier stuff for "true" SMP
34
* - load the lock value
36
* - if the lock value is still negative, go back and try again.
37
* - unsuccessful store is unsuccessful. Go back and try again. Loser.
38
* - successful store new lock value if positive -> lock acquired
40
static inline void arch_read_lock(arch_rwlock_t *lock)
43
"1: R6 = memw_locked(%0);\n"
44
" { P3 = cmp.ge(R6,#0); R6 = add(R6,#1);}\n"
45
" { if !P3 jump 1b; }\n"
46
" memw_locked(%0,P3) = R6;\n"
47
" { if !P3 jump 1b; }\n"
50
: "memory", "r6", "p3"
55
static inline void arch_read_unlock(arch_rwlock_t *lock)
58
"1: R6 = memw_locked(%0);\n"
59
" R6 = add(R6,#-1);\n"
60
" memw_locked(%0,P3) = R6\n"
64
: "memory", "r6", "p3"
69
/* I think this returns 0 on fail, 1 on success. */
70
static inline int arch_read_trylock(arch_rwlock_t *lock)
74
" R6 = memw_locked(%1);\n"
75
" { %0 = #0; P3 = cmp.ge(R6,#0); R6 = add(R6,#1);}\n"
76
" { if !P3 jump 1f; }\n"
77
" memw_locked(%1,P3) = R6;\n"
82
: "memory", "r6", "p3"
87
static inline int arch_read_can_lock(arch_rwlock_t *rwlock)
89
return rwlock->lock == 0;
92
static inline int arch_write_can_lock(arch_rwlock_t *rwlock)
94
return rwlock->lock == 0;
97
/* Stuffs a -1 in the lock value? */
98
static inline void arch_write_lock(arch_rwlock_t *lock)
100
__asm__ __volatile__(
101
"1: R6 = memw_locked(%0)\n"
102
" { P3 = cmp.eq(R6,#0); R6 = #-1;}\n"
103
" { if !P3 jump 1b; }\n"
104
" memw_locked(%0,P3) = R6;\n"
105
" { if !P3 jump 1b; }\n"
108
: "memory", "r6", "p3"
113
static inline int arch_write_trylock(arch_rwlock_t *lock)
116
__asm__ __volatile__(
117
" R6 = memw_locked(%1)\n"
118
" { %0 = #0; P3 = cmp.eq(R6,#0); R6 = #-1;}\n"
119
" { if !P3 jump 1f; }\n"
120
" memw_locked(%1,P3) = R6;\n"
125
: "memory", "r6", "p3"
131
static inline void arch_write_unlock(arch_rwlock_t *lock)
137
static inline void arch_spin_lock(arch_spinlock_t *lock)
139
__asm__ __volatile__(
140
"1: R6 = memw_locked(%0);\n"
141
" P3 = cmp.eq(R6,#0);\n"
142
" { if !P3 jump 1b; R6 = #1; }\n"
143
" memw_locked(%0,P3) = R6;\n"
144
" { if !P3 jump 1b; }\n"
147
: "memory", "r6", "p3"
152
static inline void arch_spin_unlock(arch_spinlock_t *lock)
158
static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
161
__asm__ __volatile__(
162
" R6 = memw_locked(%1);\n"
163
" P3 = cmp.eq(R6,#0);\n"
164
" { if !P3 jump 1f; R6 = #1; %0 = #0; }\n"
165
" memw_locked(%1,P3) = R6;\n"
170
: "memory", "r6", "p3"
176
* SMP spinlocks are intended to allow only a single CPU at the lock
178
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
179
#define arch_spin_unlock_wait(lock) \
180
do {while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
181
#define arch_spin_is_locked(x) ((x)->lock != 0)
183
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
184
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)