2
* Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved.
4
* Permission is hereby granted, free of charge, to any person obtaining a copy
5
* of this software and associated documentation files (the "Software"), to deal
6
* in the Software without restriction, including without limitation the rights
7
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8
* copies of the Software, and to permit persons to whom the Software is
9
* furnished to do so, subject to the following conditions:
11
* The above copyright notice and this permission notice shall be included in
12
* all copies or substantial portions of the Software.
14
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23
/* The following is useful primarily for debugging and documentation. */
24
/* We define various atomic operations by acquiring a global pthread */
25
/* lock. The resulting implementation will perform poorly, but should */
26
/* be correct unless it is used from signal handlers. */
27
/* We assume that all pthread operations act like full memory barriers. */
28
/* (We believe that is the intent of the specification.) */
32
#include "test_and_set_t_is_ao_t.h"
33
/* This is not necessarily compatible with the native */
34
/* implementation. But those can't be safely mixed anyway. */
36
/* We define only the full barrier variants, and count on the */
37
/* generalization section below to fill in the rest. */
38
extern pthread_mutex_t AO_pt_lock;
43
pthread_mutex_lock(&AO_pt_lock);
44
pthread_mutex_unlock(&AO_pt_lock);
47
#define AO_HAVE_nop_full
50
AO_load_full(volatile AO_t *addr)
53
pthread_mutex_lock(&AO_pt_lock);
55
pthread_mutex_unlock(&AO_pt_lock);
59
#define AO_HAVE_load_full
62
AO_store_full(volatile AO_t *addr, AO_t val)
64
pthread_mutex_lock(&AO_pt_lock);
66
pthread_mutex_unlock(&AO_pt_lock);
69
#define AO_HAVE_store_full
71
AO_INLINE unsigned char
72
AO_char_load_full(volatile unsigned char *addr)
75
pthread_mutex_lock(&AO_pt_lock);
77
pthread_mutex_unlock(&AO_pt_lock);
81
#define AO_HAVE_char_load_full
84
AO_char_store_full(volatile unsigned char *addr, unsigned char val)
86
pthread_mutex_lock(&AO_pt_lock);
88
pthread_mutex_unlock(&AO_pt_lock);
91
#define AO_HAVE_char_store_full
93
AO_INLINE unsigned short
94
AO_short_load_full(volatile unsigned short *addr)
96
unsigned short result;
97
pthread_mutex_lock(&AO_pt_lock);
99
pthread_mutex_unlock(&AO_pt_lock);
103
#define AO_HAVE_short_load_full
106
AO_short_store_full(volatile unsigned short *addr, unsigned short val)
108
pthread_mutex_lock(&AO_pt_lock);
110
pthread_mutex_unlock(&AO_pt_lock);
113
#define AO_HAVE_short_store_full
115
AO_INLINE unsigned int
116
AO_int_load_full(volatile unsigned int *addr)
119
pthread_mutex_lock(&AO_pt_lock);
121
pthread_mutex_unlock(&AO_pt_lock);
125
#define AO_HAVE_int_load_full
128
AO_int_store_full(volatile unsigned int *addr, unsigned int val)
130
pthread_mutex_lock(&AO_pt_lock);
132
pthread_mutex_unlock(&AO_pt_lock);
135
#define AO_HAVE_int_store_full
137
AO_INLINE AO_TS_VAL_t
138
AO_test_and_set_full(volatile AO_TS_t *addr)
141
pthread_mutex_lock(&AO_pt_lock);
142
result = (AO_TS_VAL_t)(*addr);
144
pthread_mutex_unlock(&AO_pt_lock);
145
assert(result == AO_TS_SET || result == AO_TS_CLEAR);
149
#define AO_HAVE_test_and_set_full
152
AO_fetch_and_add_full(volatile AO_t *p, AO_t incr)
156
pthread_mutex_lock(&AO_pt_lock);
159
pthread_mutex_unlock(&AO_pt_lock);
163
#define AO_HAVE_fetch_and_add_full
165
AO_INLINE unsigned char
166
AO_char_fetch_and_add_full(volatile unsigned char *p, unsigned char incr)
170
pthread_mutex_lock(&AO_pt_lock);
173
pthread_mutex_unlock(&AO_pt_lock);
177
#define AO_HAVE_char_fetch_and_add_full
179
AO_INLINE unsigned short
180
AO_short_fetch_and_add_full(volatile unsigned short *p, unsigned short incr)
184
pthread_mutex_lock(&AO_pt_lock);
187
pthread_mutex_unlock(&AO_pt_lock);
191
#define AO_HAVE_short_fetch_and_add_full
193
AO_INLINE unsigned int
194
AO_int_fetch_and_add_full(volatile unsigned int *p, unsigned int incr)
198
pthread_mutex_lock(&AO_pt_lock);
201
pthread_mutex_unlock(&AO_pt_lock);
205
#define AO_HAVE_int_fetch_and_add_full
208
AO_or_full(volatile AO_t *p, AO_t incr)
212
pthread_mutex_lock(&AO_pt_lock);
215
pthread_mutex_unlock(&AO_pt_lock);
218
#define AO_HAVE_or_full
221
AO_compare_and_swap_full(volatile AO_t *addr,
222
AO_t old, AO_t new_val)
224
pthread_mutex_lock(&AO_pt_lock);
228
pthread_mutex_unlock(&AO_pt_lock);
232
pthread_mutex_unlock(&AO_pt_lock);
236
#define AO_HAVE_compare_and_swap_full
238
/* Unlike real architectures, we define both double-width CAS variants. */
245
#define AO_HAVE_double_t
248
AO_compare_double_and_swap_double_full(volatile AO_double_t *addr,
249
AO_t old1, AO_t old2,
250
AO_t new1, AO_t new2)
252
pthread_mutex_lock(&AO_pt_lock);
253
if (addr -> AO_val1 == old1 && addr -> AO_val2 == old2)
255
addr -> AO_val1 = new1;
256
addr -> AO_val2 = new2;
257
pthread_mutex_unlock(&AO_pt_lock);
261
pthread_mutex_unlock(&AO_pt_lock);
265
#define AO_HAVE_compare_double_and_swap_double_full
268
AO_compare_and_swap_double_full(volatile AO_double_t *addr,
270
AO_t new1, AO_t new2)
272
pthread_mutex_lock(&AO_pt_lock);
273
if (addr -> AO_val1 == old1)
275
addr -> AO_val1 = new1;
276
addr -> AO_val2 = new2;
277
pthread_mutex_unlock(&AO_pt_lock);
281
pthread_mutex_unlock(&AO_pt_lock);
285
#define AO_HAVE_compare_and_swap_double_full
287
/* We can't use hardware loads and stores, since they don't */
288
/* interact correctly with atomic updates. */