1
// Copyright 2010 the V8 project authors. All rights reserved.
2
// Redistribution and use in source and binary forms, with or without
3
// modification, are permitted provided that the following conditions are
6
// * Redistributions of source code must retain the above copyright
7
// notice, this list of conditions and the following disclaimer.
8
// * Redistributions in binary form must reproduce the above
9
// copyright notice, this list of conditions and the following
10
// disclaimer in the documentation and/or other materials provided
11
// with the distribution.
12
// * Neither the name of Google Inc. nor the names of its
13
// contributors may be used to endorse or promote products derived
14
// from this software without specific prior written permission.
16
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28
// This file is an internal atomic implementation, use atomicops.h instead.
30
#ifndef V8_ATOMICOPS_INTERNALS_X86_MACOSX_H_
31
#define V8_ATOMICOPS_INTERNALS_X86_MACOSX_H_
33
#include <libkern/OSAtomic.h>
38
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
43
if (OSAtomicCompareAndSwap32(old_value, new_value,
44
const_cast<Atomic32*>(ptr))) {
48
} while (prev_value == old_value);
52
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
57
} while (!OSAtomicCompareAndSwap32(old_value, new_value,
58
const_cast<Atomic32*>(ptr)));
62
inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
64
return OSAtomicAdd32(increment, const_cast<Atomic32*>(ptr));
67
inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
69
return OSAtomicAdd32Barrier(increment, const_cast<Atomic32*>(ptr));
72
inline void MemoryBarrier() {
76
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
81
if (OSAtomicCompareAndSwap32Barrier(old_value, new_value,
82
const_cast<Atomic32*>(ptr))) {
86
} while (prev_value == old_value);
90
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
93
return Acquire_CompareAndSwap(ptr, old_value, new_value);
96
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
100
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
105
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
110
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
114
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
115
Atomic32 value = *ptr;
120
inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
127
// 64-bit implementation on 64-bit platform
129
inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
131
Atomic64 new_value) {
134
if (OSAtomicCompareAndSwap64(old_value, new_value,
135
const_cast<Atomic64*>(ptr))) {
139
} while (prev_value == old_value);
143
inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
144
Atomic64 new_value) {
148
} while (!OSAtomicCompareAndSwap64(old_value, new_value,
149
const_cast<Atomic64*>(ptr)));
153
inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
154
Atomic64 increment) {
155
return OSAtomicAdd64(increment, const_cast<Atomic64*>(ptr));
158
inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
159
Atomic64 increment) {
160
return OSAtomicAdd64Barrier(increment, const_cast<Atomic64*>(ptr));
163
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
165
Atomic64 new_value) {
168
if (OSAtomicCompareAndSwap64Barrier(old_value, new_value,
169
const_cast<Atomic64*>(ptr))) {
173
} while (prev_value == old_value);
177
inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
179
Atomic64 new_value) {
180
// The lib kern interface does not distinguish between
181
// Acquire and Release memory barriers; they are equivalent.
182
return Acquire_CompareAndSwap(ptr, old_value, new_value);
185
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
189
inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
194
inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
199
inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
203
inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
204
Atomic64 value = *ptr;
209
inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
214
#endif // defined(__LP64__)
216
// MacOS uses long for intptr_t, AtomicWord and Atomic32 are always different
217
// on the Mac, even when they are the same size. We need to explicitly cast
218
// from AtomicWord to Atomic32/64 to implement the AtomicWord interface.
220
#define AtomicWordCastType Atomic64
222
#define AtomicWordCastType Atomic32
225
inline AtomicWord NoBarrier_CompareAndSwap(volatile AtomicWord* ptr,
226
AtomicWord old_value,
227
AtomicWord new_value) {
228
return NoBarrier_CompareAndSwap(
229
reinterpret_cast<volatile AtomicWordCastType*>(ptr),
230
old_value, new_value);
233
inline AtomicWord NoBarrier_AtomicExchange(volatile AtomicWord* ptr,
234
AtomicWord new_value) {
235
return NoBarrier_AtomicExchange(
236
reinterpret_cast<volatile AtomicWordCastType*>(ptr), new_value);
239
inline AtomicWord NoBarrier_AtomicIncrement(volatile AtomicWord* ptr,
240
AtomicWord increment) {
241
return NoBarrier_AtomicIncrement(
242
reinterpret_cast<volatile AtomicWordCastType*>(ptr), increment);
245
inline AtomicWord Barrier_AtomicIncrement(volatile AtomicWord* ptr,
246
AtomicWord increment) {
247
return Barrier_AtomicIncrement(
248
reinterpret_cast<volatile AtomicWordCastType*>(ptr), increment);
251
inline AtomicWord Acquire_CompareAndSwap(volatile AtomicWord* ptr,
252
AtomicWord old_value,
253
AtomicWord new_value) {
254
return v8::internal::Acquire_CompareAndSwap(
255
reinterpret_cast<volatile AtomicWordCastType*>(ptr),
256
old_value, new_value);
259
inline AtomicWord Release_CompareAndSwap(volatile AtomicWord* ptr,
260
AtomicWord old_value,
261
AtomicWord new_value) {
262
return v8::internal::Release_CompareAndSwap(
263
reinterpret_cast<volatile AtomicWordCastType*>(ptr),
264
old_value, new_value);
267
inline void NoBarrier_Store(volatile AtomicWord* ptr, AtomicWord value) {
269
reinterpret_cast<volatile AtomicWordCastType*>(ptr), value);
272
inline void Acquire_Store(volatile AtomicWord* ptr, AtomicWord value) {
273
return v8::internal::Acquire_Store(
274
reinterpret_cast<volatile AtomicWordCastType*>(ptr), value);
277
inline void Release_Store(volatile AtomicWord* ptr, AtomicWord value) {
278
return v8::internal::Release_Store(
279
reinterpret_cast<volatile AtomicWordCastType*>(ptr), value);
282
inline AtomicWord NoBarrier_Load(volatile const AtomicWord* ptr) {
283
return NoBarrier_Load(
284
reinterpret_cast<volatile const AtomicWordCastType*>(ptr));
287
inline AtomicWord Acquire_Load(volatile const AtomicWord* ptr) {
288
return v8::internal::Acquire_Load(
289
reinterpret_cast<volatile const AtomicWordCastType*>(ptr));
292
inline AtomicWord Release_Load(volatile const AtomicWord* ptr) {
293
return v8::internal::Release_Load(
294
reinterpret_cast<volatile const AtomicWordCastType*>(ptr));
297
#undef AtomicWordCastType
299
} } // namespace v8::internal
301
#endif // V8_ATOMICOPS_INTERNALS_X86_MACOSX_H_