1
/* GLIB - Library of useful routines for C programming
2
* Copyright (C) 1995-1997 Peter Mattis, Spencer Kimball and Josh MacDonald
4
* g_atomic_*: atomic operations.
5
* Copyright (C) 2003 Sebastian Wilhelmi
7
* This library is free software; you can redistribute it and/or
8
* modify it under the terms of the GNU Lesser General Public
9
* License as published by the Free Software Foundation; either
10
* version 2 of the License, or (at your option) any later version.
12
* This library is distributed in the hope that it will be useful,
13
* but WITHOUT ANY WARRANTY; without even the implied warranty of
14
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15
* Lesser General Public License for more details.
17
* You should have received a copy of the GNU Lesser General Public
18
* License along with this library; if not, write to the
19
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
20
* Boston, MA 02111-1307, USA.
26
#include "gthreadprivate.h"
29
#if defined (__GNUC__)
30
# if defined (G_ATOMIC_I486)
31
/* Adapted from CVS version 1.10 of glibc's sysdeps/i386/i486/bits/atomic.h
34
g_atomic_int_exchange_and_add (volatile gint *atomic,
39
__asm__ __volatile__ ("lock; xaddl %0,%1"
40
: "=r" (result), "=m" (*atomic)
41
: "0" (val), "m" (*atomic));
46
g_atomic_int_add (volatile gint *atomic,
49
__asm__ __volatile__ ("lock; addl %1,%0"
51
: "ir" (val), "m" (*atomic));
55
g_atomic_int_compare_and_exchange (volatile gint *atomic,
61
__asm__ __volatile__ ("lock; cmpxchgl %2, %1"
62
: "=a" (result), "=m" (*atomic)
63
: "r" (newval), "m" (*atomic), "0" (oldval));
65
return result == oldval;
68
/* The same code as above, as on i386 gpointer is 32 bit as well.
69
* Duplicating the code here seems more natural than casting the
70
* arguments and calling the former function */
73
g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic,
79
__asm__ __volatile__ ("lock; cmpxchgl %2, %1"
80
: "=a" (result), "=m" (*atomic)
81
: "r" (newval), "m" (*atomic), "0" (oldval));
83
return result == oldval;
86
# elif defined (G_ATOMIC_SPARCV9)
87
/* Adapted from CVS version 1.3 of glibc's sysdeps/sparc/sparc64/bits/atomic.h
89
# define ATOMIC_INT_CMP_XCHG(atomic, oldval, newval) \
92
__asm__ __volatile__ ("cas [%4], %2, %0" \
93
: "=r" (__result), "=m" (*(atomic)) \
94
: "r" (oldval), "m" (*(atomic)), "r" (atomic),\
99
# if GLIB_SIZEOF_VOID_P == 4 /* 32-bit system */
101
g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic,
106
__asm__ __volatile__ ("cas [%4], %2, %0"
107
: "=r" (result), "=m" (*atomic)
108
: "r" (oldval), "m" (*atomic), "r" (atomic),
110
return result == oldval;
112
# elif GLIB_SIZEOF_VOID_P == 8 /* 64-bit system */
114
g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic,
119
gpointer *a = atomic;
120
__asm__ __volatile__ ("casx [%4], %2, %0"
121
: "=r" (result), "=m" (*a)
122
: "r" (oldval), "m" (*a), "r" (a),
124
return result == oldval;
126
# else /* What's that */
127
# error "Your system has an unsupported pointer size"
128
# endif /* GLIB_SIZEOF_VOID_P */
129
# define G_ATOMIC_MEMORY_BARRIER \
130
__asm__ __volatile__ ("membar #LoadLoad | #LoadStore" \
131
" | #StoreLoad | #StoreStore" : : : "memory")
133
# elif defined (G_ATOMIC_ALPHA)
134
/* Adapted from CVS version 1.3 of glibc's sysdeps/alpha/bits/atomic.h
136
# define ATOMIC_INT_CMP_XCHG(atomic, oldval, newval) \
140
__asm__ __volatile__ ( \
143
" cmpeq %0,%3,%1\n" \
158
# if GLIB_SIZEOF_VOID_P == 4 /* 32-bit system */
160
g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic,
166
__asm__ __volatile__ (
184
# elif GLIB_SIZEOF_VOID_P == 8 /* 64-bit system */
186
g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic,
192
__asm__ __volatile__ (
210
# else /* What's that */
211
# error "Your system has an unsupported pointer size"
212
# endif /* GLIB_SIZEOF_VOID_P */
213
# define G_ATOMIC_MEMORY_BARRIER __asm__ ("mb" : : : "memory")
214
# elif defined (G_ATOMIC_X86_64)
215
/* Adapted from CVS version 1.9 of glibc's sysdeps/x86_64/bits/atomic.h
218
g_atomic_int_exchange_and_add (volatile gint *atomic,
223
__asm__ __volatile__ ("lock; xaddl %0,%1"
224
: "=r" (result), "=m" (*atomic)
225
: "0" (val), "m" (*atomic));
230
g_atomic_int_add (volatile gint *atomic,
233
__asm__ __volatile__ ("lock; addl %1,%0"
235
: "ir" (val), "m" (*atomic));
239
g_atomic_int_compare_and_exchange (volatile gint *atomic,
245
__asm__ __volatile__ ("lock; cmpxchgl %2, %1"
246
: "=a" (result), "=m" (*atomic)
247
: "r" (newval), "m" (*atomic), "0" (oldval));
249
return result == oldval;
253
g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic,
259
__asm__ __volatile__ ("lock; cmpxchgq %q2, %1"
260
: "=a" (result), "=m" (*atomic)
261
: "r" (newval), "m" (*atomic), "0" (oldval));
263
return result == oldval;
266
# elif defined (G_ATOMIC_POWERPC)
267
/* Adapted from CVS version 1.16 of glibc's sysdeps/powerpc/bits/atomic.h
268
* and CVS version 1.4 of glibc's sysdeps/powerpc/powerpc32/bits/atomic.h
269
* and CVS version 1.7 of glibc's sysdeps/powerpc/powerpc64/bits/atomic.h
272
/* Non-optimizing compile bails on the following two asm statements
273
* for reasons unknown to the author */
275
g_atomic_int_exchange_and_add (volatile gint *atomic,
279
__asm__ __volatile__ (".Lieaa%=: lwarx %0,0,%3\n"
283
: "=&b" (result), "=&r" (temp), "=m" (*atomic)
284
: "b" (atomic), "r" (val), "m" (*atomic)
289
/* The same as above, to save a function call repeated here */
291
g_atomic_int_add (volatile gint *atomic,
295
__asm__ __volatile__ (".Lia%=: lwarx %0,0,%3\n"
299
: "=&b" (result), "=&r" (temp), "=m" (*atomic)
300
: "b" (atomic), "r" (val), "m" (*atomic)
303
# else /* !__OPTIMIZE__ */
305
g_atomic_int_exchange_and_add (volatile gint *atomic,
311
while (!g_atomic_int_compare_and_exchange (atomic, result, result + val));
317
g_atomic_int_add (volatile gint *atomic,
323
while (!g_atomic_int_compare_and_exchange (atomic, result, result + val));
325
# endif /* !__OPTIMIZE__ */
327
# if GLIB_SIZEOF_VOID_P == 4 /* 32-bit system */
329
g_atomic_int_compare_and_exchange (volatile gint *atomic,
334
__asm__ __volatile__ ("sync\n"
335
".L1icae%=: lwarx %0,0,%1\n"
342
: "b" (atomic), "r" (oldval), "r" (newval)
348
g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic,
353
__asm__ __volatile__ ("sync\n"
354
".L1pcae%=: lwarx %0,0,%1\n"
361
: "b" (atomic), "r" (oldval), "r" (newval)
365
# elif GLIB_SIZEOF_VOID_P == 8 /* 64-bit system */
367
g_atomic_int_compare_and_exchange (volatile gint *atomic,
372
__asm__ __volatile__ ("sync\n"
373
".L1icae%=: lwarx %0,0,%1\n"
381
: "b" (atomic), "r" (oldval), "r" (newval)
387
g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic,
392
__asm__ __volatile__ ("sync\n"
393
".L1pcae%=: ldarx %0,0,%1\n"
400
: "b" (atomic), "r" (oldval), "r" (newval)
404
# else /* What's that */
405
# error "Your system has an unsupported pointer size"
406
# endif /* GLIB_SIZEOF_VOID_P */
408
# define G_ATOMIC_MEMORY_BARRIER __asm__ ("sync" : : : "memory")
410
# elif defined (G_ATOMIC_IA64)
411
/* Adapted from CVS version 1.8 of glibc's sysdeps/ia64/bits/atomic.h
414
g_atomic_int_exchange_and_add (volatile gint *atomic,
417
return __sync_fetch_and_add (atomic, val);
421
g_atomic_int_add (volatile gint *atomic,
424
__sync_fetch_and_add (atomic, val);
428
g_atomic_int_compare_and_exchange (volatile gint *atomic,
432
return __sync_bool_compare_and_swap (atomic, oldval, newval);
436
g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic,
440
return __sync_bool_compare_and_swap ((long *)atomic,
441
(long)oldval, (long)newval);
444
# define G_ATOMIC_MEMORY_BARRIER __sync_synchronize ()
445
# elif defined (G_ATOMIC_S390)
446
/* Adapted from glibc's sysdeps/s390/bits/atomic.h
448
# define ATOMIC_INT_CMP_XCHG(atomic, oldval, newval) \
450
gint __result = oldval; \
451
__asm__ __volatile__ ("cs %0, %2, %1" \
452
: "+d" (__result), "=Q" (*(atomic)) \
453
: "d" (newval), "m" (*(atomic)) : "cc" ); \
454
__result == oldval; \
457
# if GLIB_SIZEOF_VOID_P == 4 /* 32-bit system */
459
g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic,
463
gpointer result = oldval;
464
__asm__ __volatile__ ("cs %0, %2, %1"
465
: "+d" (result), "=Q" (*(atomic))
466
: "d" (newval), "m" (*(atomic)) : "cc" );
467
return result == oldval;
469
# elif GLIB_SIZEOF_VOID_P == 8 /* 64-bit system */
471
g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic,
475
gpointer result = oldval;
476
gpointer *a = atomic;
477
__asm__ __volatile__ ("csg %0, %2, %1"
478
: "+d" (result), "=Q" (*a)
479
: "d" ((long)(newval)), "m" (*a) : "cc" );
480
return result == oldval;
482
# else /* What's that */
483
# error "Your system has an unsupported pointer size"
484
# endif /* GLIB_SIZEOF_VOID_P */
485
# else /* !G_ATOMIC_IA64 */
486
# define DEFINE_WITH_MUTEXES
487
# endif /* G_ATOMIC_IA64 */
488
#else /* !__GNUC__ */
489
# ifdef G_PLATFORM_WIN32
490
# define DEFINE_WITH_WIN32_INTERLOCKED
492
# define DEFINE_WITH_MUTEXES
494
#endif /* __GNUC__ */
496
#ifdef DEFINE_WITH_WIN32_INTERLOCKED
497
# include <windows.h>
498
/* Following indicates that InterlockedCompareExchangePointer is
499
* declared in winbase.h (included by windows.h) and needs to be
500
* commented out if not true. It is defined iff WINVER > 0x0400,
501
* which is usually correct but can be wrong if WINVER is set before
502
* windows.h is included.
505
# define HAVE_INTERLOCKED_COMPARE_EXCHANGE_POINTER
509
g_atomic_int_exchange_and_add (volatile gint32 *atomic,
512
return InterlockedExchangeAdd (atomic, val);
516
g_atomic_int_add (volatile gint32 *atomic,
519
InterlockedExchangeAdd (atomic, val);
523
g_atomic_int_compare_and_exchange (volatile gint32 *atomic,
527
#ifndef HAVE_INTERLOCKED_COMPARE_EXCHANGE_POINTER
528
return (guint32) InterlockedCompareExchange ((PVOID*)atomic,
530
(PVOID)oldval) == oldval;
532
return InterlockedCompareExchange (atomic,
539
g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic,
543
# ifdef HAVE_INTERLOCKED_COMPARE_EXCHANGE_POINTER
544
return InterlockedCompareExchangePointer (atomic, newval, oldval) == oldval;
546
# if GLIB_SIZEOF_VOID_P != 4 /* no 32-bit system */
547
# error "InterlockedCompareExchangePointer needed"
549
return InterlockedCompareExchange (atomic, newval, oldval) == oldval;
553
#endif /* DEFINE_WITH_WIN32_INTERLOCKED */
555
#ifdef DEFINE_WITH_MUTEXES
556
/* We have to use the slow, but safe locking method */
557
static GMutex *g_atomic_mutex;
560
g_atomic_int_exchange_and_add (volatile gint *atomic,
565
g_mutex_lock (g_atomic_mutex);
568
g_mutex_unlock (g_atomic_mutex);
575
g_atomic_int_add (volatile gint *atomic,
578
g_mutex_lock (g_atomic_mutex);
580
g_mutex_unlock (g_atomic_mutex);
584
g_atomic_int_compare_and_exchange (volatile gint *atomic,
590
g_mutex_lock (g_atomic_mutex);
591
if (*atomic == oldval)
598
g_mutex_unlock (g_atomic_mutex);
604
g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic,
610
g_mutex_lock (g_atomic_mutex);
611
if (*atomic == oldval)
618
g_mutex_unlock (g_atomic_mutex);
623
#ifdef G_ATOMIC_OP_MEMORY_BARRIER_NEEDED
625
g_atomic_int_get (volatile gint *atomic)
629
g_mutex_lock (g_atomic_mutex);
631
g_mutex_unlock (g_atomic_mutex);
637
g_atomic_int_set (volatile gint *atomic,
640
g_mutex_lock (g_atomic_mutex);
642
g_mutex_unlock (g_atomic_mutex);
646
g_atomic_pointer_get (volatile gpointer *atomic)
650
g_mutex_lock (g_atomic_mutex);
652
g_mutex_unlock (g_atomic_mutex);
658
g_atomic_pointer_set (volatile gpointer *atomic,
661
g_mutex_lock (g_atomic_mutex);
663
g_mutex_unlock (g_atomic_mutex);
665
#endif /* G_ATOMIC_OP_MEMORY_BARRIER_NEEDED */
666
#elif defined (G_ATOMIC_OP_MEMORY_BARRIER_NEEDED)
668
g_atomic_int_get (volatile gint *atomic)
670
G_ATOMIC_MEMORY_BARRIER;
675
g_atomic_int_set (volatile gint *atomic,
679
G_ATOMIC_MEMORY_BARRIER;
683
g_atomic_pointer_get (volatile gpointer *atomic)
685
G_ATOMIC_MEMORY_BARRIER;
690
g_atomic_pointer_set (volatile gpointer *atomic,
694
G_ATOMIC_MEMORY_BARRIER;
696
#endif /* DEFINE_WITH_MUTEXES || G_ATOMIC_OP_MEMORY_BARRIER_NEEDED */
698
#ifdef ATOMIC_INT_CMP_XCHG
700
g_atomic_int_compare_and_exchange (volatile gint *atomic,
704
return ATOMIC_INT_CMP_XCHG (atomic, oldval, newval);
708
g_atomic_int_exchange_and_add (volatile gint *atomic,
714
while (!ATOMIC_INT_CMP_XCHG (atomic, result, result + val));
720
g_atomic_int_add (volatile gint *atomic,
726
while (!ATOMIC_INT_CMP_XCHG (atomic, result, result + val));
728
#endif /* ATOMIC_INT_CMP_XCHG */
731
_g_atomic_thread_init (void)
733
#ifdef DEFINE_WITH_MUTEXES
734
g_atomic_mutex = g_mutex_new ();
735
#endif /* DEFINE_WITH_MUTEXES */
738
#ifndef G_ATOMIC_OP_MEMORY_BARRIER_NEEDED
740
(g_atomic_int_get) (volatile gint *atomic)
742
return g_atomic_int_get (atomic);
746
(g_atomic_int_set) (volatile gint *atomic,
749
g_atomic_int_set (atomic, newval);
753
(g_atomic_pointer_get) (volatile gpointer *atomic)
755
return g_atomic_pointer_get (atomic);
759
(g_atomic_pointer_set) (volatile gpointer *atomic,
762
g_atomic_pointer_set (atomic, newval);
764
#endif /* G_ATOMIC_OP_MEMORY_BARRIER_NEEDED */
766
#define __G_ATOMIC_C__
767
#include "galiasdef.c"