2
* Copyright (c) 2001-2004 Jakub Jermar
5
* Redistribution and use in source and binary forms, with or without
6
* modification, are permitted provided that the following conditions
9
* - Redistributions of source code must retain the above copyright
10
* notice, this list of conditions and the following disclaimer.
11
* - Redistributions in binary form must reproduce the above copyright
12
* notice, this list of conditions and the following disclaimer in the
13
* documentation and/or other materials provided with the distribution.
14
* - The name of the author may not be used to endorse or promote products
15
* derived from this software without specific prior written permission.
17
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35
#ifndef KERN_amd64_ATOMIC_H_
36
#define KERN_amd64_ATOMIC_H_
38
#include <arch/types.h>
39
#include <arch/barrier.h>
40
#include <preemption.h>
42
static inline void atomic_inc(atomic_t *val) {
45
"lock incq %[count]\n"
46
: [count] "+m" (val->count)
51
: [count] "+m" (val->count)
53
#endif /* CONFIG_SMP */
56
static inline void atomic_dec(atomic_t *val) {
59
"lock decq %[count]\n"
60
: [count] "+m" (val->count)
65
: [count] "+m" (val->count)
67
#endif /* CONFIG_SMP */
70
static inline long atomic_postinc(atomic_t *val)
75
"lock xaddq %[r], %[count]\n"
76
: [count] "+m" (val->count), [r] "+r" (r)
82
static inline long atomic_postdec(atomic_t *val)
87
"lock xaddq %[r], %[count]\n"
88
: [count] "+m" (val->count), [r] "+r" (r)
94
#define atomic_preinc(val) (atomic_postinc(val) + 1)
95
#define atomic_predec(val) (atomic_postdec(val) - 1)
97
static inline uint64_t test_and_set(atomic_t *val) {
102
"xchgq %[v], %[count]\n"
103
: [v] "=r" (v), [count] "+m" (val->count)
110
/** amd64 specific fast spinlock */
111
static inline void atomic_lock_arch(atomic_t *val)
115
preemption_disable();
119
"mov %[count], %[tmp]\n"
120
"testq %[tmp], %[tmp]\n"
121
"jnz 0b\n" /* lightweight looping on locked spinlock */
123
"incq %[tmp]\n" /* now use the atomic operation */
124
"xchgq %[count], %[tmp]\n"
125
"testq %[tmp], %[tmp]\n"
127
: [count] "+m" (val->count), [tmp] "=&r" (tmp)
130
* Prevent critical section code from bleeding out this way up.