21
21
/* line options. */
22
22
/* We should perhaps test dynamically. */
24
#include "../aligned_atomic_load_store.h"
24
#include "../all_aligned_atomic_load_store.h"
26
26
/* Real X86 implementations, except for some old WinChips, appear */
27
27
/* to enforce ordering between memory operations, EXCEPT that a later */
55
59
/* Really only works for 486 and later */
57
AO_fetch_and_add_full (volatile AO_t *p, long incr)
61
AO_fetch_and_add_full (volatile AO_t *p, AO_t incr)
61
65
__asm__ __volatile__ ("lock; xaddl %0, %1" :
62
"+r" (result), "+m" (*p) : : "memory");
66
"=r" (result), "=m" (*p) : "0" (incr), "m" (*p)
66
71
#define AO_HAVE_fetch_and_add_full
73
AO_INLINE unsigned char
74
AO_char_fetch_and_add_full (volatile unsigned char *p, unsigned char incr)
78
__asm__ __volatile__ ("lock; xaddb %0, %1" :
79
"=r" (result), "=m" (*p) : "0" (incr), "m" (*p)
84
#define AO_HAVE_char_fetch_and_add_full
86
AO_INLINE unsigned short
87
AO_short_fetch_and_add_full (volatile unsigned short *p, unsigned short incr)
89
unsigned short result;
91
__asm__ __volatile__ ("lock; xaddw %0, %1" :
92
"=r" (result), "=m" (*p) : "0" (incr), "m" (*p)
97
#define AO_HAVE_short_fetch_and_add_full
68
99
/* Really only works for 486 and later */
70
101
AO_or_full (volatile AO_t *p, AO_t incr)
72
103
__asm__ __volatile__ ("lock; orl %1, %0" :
73
"+m" (*p) : "r" (incr) : "memory");
104
"=m" (*p) : "r" (incr), "m" (*p) : "memory");
76
107
#define AO_HAVE_or_full
79
AO_test_and_set_full(volatile AO_t *addr)
109
AO_INLINE AO_TS_VAL_t
110
AO_test_and_set_full(volatile AO_TS_t *addr)
112
unsigned char oldval;
82
113
/* Note: the "xchg" instruction does not need a "lock" prefix */
83
__asm__ __volatile__("xchgl %0, %1"
84
: "=r"(oldval), "+m"(*(addr))
114
__asm__ __volatile__("xchgb %0, %1"
115
: "=r"(oldval), "=m"(*addr)
116
: "0"(0xff), "m"(*addr) : "memory");
117
return (AO_TS_VAL_t)oldval;
89
120
#define AO_HAVE_test_and_set_full
94
125
AO_t old, AO_t new_val)
97
__asm__ __volatile__("lock; cmpxchgl %2, %0; setz %1"
98
: "+m"(*(addr)), "=q"(result)
99
: "r" (new_val), "a"(old) : "memory");
128
__asm__ __volatile__("lock; cmpxchgl %3, %0; setz %1"
129
: "=m"(*addr), "=q"(result)
130
: "m"(*addr), "r" (new_val), "a"(old) : "memory");
100
131
return (int) result;
103
134
#define AO_HAVE_compare_and_swap_full
136
/* Returns nonzero if the comparison succeeded. */
137
/* Really requires at least a Pentium. */
139
AO_compare_double_and_swap_double_full(volatile AO_double_t *addr,
140
AO_t old_val1, AO_t old_val2,
141
AO_t new_val1, AO_t new_val2)
144
__asm__ __volatile__("lock; cmpxchg8b %0; setz %1"
145
: "=m"(*addr), "=q"(result)
146
: "m"(*addr), "d" (old_val1), "a" (old_val2),
147
"c" (new_val1), "b" (new_val2) : "memory");
151
#define AO_HAVE_double_compare_and_swap_full
153
#include "../ao_t_is_int.h"