2
* SMP/VPE-safe functions to access "registers" (see note).
5
* - These macros use ll/sc instructions, so it is your responsibility to
6
* ensure these are available on your platform before including this file.
7
* - The MIPS32 spec states that ll/sc results are undefined for uncached
8
* accesses. This means they can't be used on HW registers accessed
9
* through kseg1. Code which requires these macros for this purpose must
10
* front-end the registers with cached memory "registers" and have a single
11
* thread update the actual HW registers.
12
* - A maximum of 2k of code can be inserted between ll and sc. Every
13
* memory accesses between the instructions will increase the chance of
14
* sc failing and having to loop.
15
* - When using custom_read_reg32/custom_write_reg32 only perform the
16
* necessary logical operations on the register value in between these
17
* two calls. All other logic should be performed before the first call.
18
* - There is a bug on the R10000 chips which has a workaround. If you
19
* are affected by this bug, make sure to define the symbol 'R10000_LLSC_WAR'
20
* to be non-zero. If you are using this header from within linux, you may
21
* include <asm/war.h> before including this file to have this defined
22
* appropriately for you.
24
* Copyright 2005-2007 PMC-Sierra, Inc.
26
* This program is free software; you can redistribute it and/or modify it
27
* under the terms of the GNU General Public License as published by the
28
* Free Software Foundation; either version 2 of the License, or (at your
29
* option) any later version.
31
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
32
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
33
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
34
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
35
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
36
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
37
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
38
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
39
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
40
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42
* You should have received a copy of the GNU General Public License along
43
* with this program; if not, write to the Free Software Foundation, Inc., 675
44
* Mass Ave, Cambridge, MA 02139, USA.
47
#ifndef __ASM_REGOPS_H__
48
#define __ASM_REGOPS_H__
50
#include <linux/types.h>
54
#ifndef R10000_LLSC_WAR
55
#define R10000_LLSC_WAR 0
58
#if R10000_LLSC_WAR == 1
59
#define __beqz "beqzl "
61
#define __beqz "beqz "
64
#ifndef _LINUX_TYPES_H
65
typedef unsigned int u32;
69
* Sets all the masked bits to the corresponding value bits
71
static inline void set_value_reg32(volatile u32 *const addr,
80
"1: ll %0, %1 # set_value_reg32 \n"
87
: "=&r" (temp), "=m" (*addr)
88
: "ir" (~mask), "ir" (value), "m" (*addr));
92
* Sets all the masked bits to '1'
94
static inline void set_reg32(volatile u32 *const addr,
102
"1: ll %0, %1 # set_reg32 \n"
108
: "=&r" (temp), "=m" (*addr)
109
: "ir" (mask), "m" (*addr));
113
* Sets all the masked bits to '0'
115
static inline void clear_reg32(volatile u32 *const addr,
120
__asm__ __volatile__(
123
"1: ll %0, %1 # clear_reg32 \n"
129
: "=&r" (temp), "=m" (*addr)
130
: "ir" (~mask), "m" (*addr));
134
* Toggles all masked bits from '0' to '1' and '1' to '0'
136
static inline void toggle_reg32(volatile u32 *const addr,
141
__asm__ __volatile__(
144
"1: ll %0, %1 # toggle_reg32 \n"
150
: "=&r" (temp), "=m" (*addr)
151
: "ir" (mask), "m" (*addr));
155
* Read all masked bits others are returned as '0'
157
static inline u32 read_reg32(volatile u32 *const addr,
162
__asm__ __volatile__(
165
" lw %0, %1 # read \n"
166
" and %0, %2 # mask \n"
169
: "m" (*addr), "ir" (mask));
175
* blocking_read_reg32 - Read address with blocking load
177
* Uncached writes need to be read back to ensure they reach RAM.
178
* The returned value must be 'used' to prevent from becoming a
181
static inline u32 blocking_read_reg32(volatile u32 *const addr)
185
__asm__ __volatile__(
188
" lw %0, %1 # read \n"
189
" move %0, %0 # block \n"
198
* For special strange cases only:
200
* If you need custom processing within a ll/sc loop, use the following macros
203
* u32 tmp; <-- Define a variable to hold the data
205
* custom_read_reg32(address, tmp); <-- Reads the address and put the value
206
* in the 'tmp' variable given
208
* From here on out, you are (basically) atomic, so don't do anything too
210
* Also, this code may loop if the end of this block fails to write
211
* everything back safely due do the other CPU, so do NOT do anything
214
* custom_write_reg32(address, tmp); <-- Writes back 'tmp' safely.
216
#define custom_read_reg32(address, tmp) \
217
__asm__ __volatile__( \
220
"1: ll %0, %1 #custom_read_reg32 \n" \
222
: "=r" (tmp), "=m" (*address) \
225
#define custom_write_reg32(address, tmp) \
226
__asm__ __volatile__( \
229
" sc %0, %1 #custom_write_reg32 \n" \
230
" "__beqz"%0, 1b \n" \
233
: "=&r" (tmp), "=m" (*address) \
234
: "0" (tmp), "m" (*address))
236
#endif /* __ASM_REGOPS_H__ */