2
* Copyright (c) 2000-2005 by Hewlett-Packard Company. All rights reserved.
4
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
5
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
7
* Permission is hereby granted to use or copy this program
8
* for any purpose, provided the above notices are retained on all copies.
9
* Permission to modify the code and to distribute modified code is granted,
10
* provided the above notices are retained, and a notice that the code was
11
* modified is included with the above copyright notice.
13
#include "private/gc_priv.h"
15
# if defined(THREAD_LOCAL_ALLOC)
17
#include "private/thread_local_alloc.h"
18
#include "gc_inline.h"
22
#if defined(USE_COMPILER_TLS)
24
#elif defined(USE_WIN32_COMPILER_TLS)
27
GC_key_t GC_thread_key;
29
static GC_bool keys_initialized;
31
/* Return a single nonempty freelist fl to the global one pointed to */
34
static void return_single_freelist(void *fl, void **gfl)
41
GC_ASSERT(GC_size(fl) == GC_size(*gfl));
43
for (qptr = &(obj_link(fl)), q = *qptr;
44
(word)q >= HBLKSIZE; qptr = &(obj_link(q)), q = *qptr);
51
/* Recover the contents of the freelist array fl into the global one gfl.*/
52
/* We hold the allocator lock. */
53
static void return_freelists(void **fl, void **gfl)
57
for (i = 1; i < TINY_FREELISTS; ++i) {
58
if ((word)(fl[i]) >= HBLKSIZE) {
59
return_single_freelist(fl[i], gfl+i);
61
/* Clear fl[i], since the thread structure may hang around. */
62
/* Do it in a way that is likely to trap if we access it. */
63
fl[i] = (ptr_t)HBLKSIZE;
65
/* The 0 granule freelist really contains 1 granule objects. */
66
# ifdef GC_GCJ_SUPPORT
67
if (fl[0] == ERROR_FL) return;
69
if ((word)(fl[0]) >= HBLKSIZE) {
70
return_single_freelist(fl[0], gfl+1);
74
/* Each thread structure must be initialized. */
75
/* This call must be made from the new thread. */
76
/* Caller holds allocation lock. */
77
void GC_init_thread_local(GC_tlfs p)
81
if (!keys_initialized) {
82
if (0 != GC_key_create(&GC_thread_key, 0)) {
83
ABORT("Failed to create key for local allocator");
85
keys_initialized = TRUE;
87
if (0 != GC_setspecific(GC_thread_key, p)) {
88
ABORT("Failed to set thread specific allocation pointers");
90
for (i = 1; i < TINY_FREELISTS; ++i) {
91
p -> ptrfree_freelists[i] = (void *)1;
92
p -> normal_freelists[i] = (void *)1;
93
# ifdef GC_GCJ_SUPPORT
94
p -> gcj_freelists[i] = (void *)1;
97
/* Set up the size 0 free lists. */
98
/* We now handle most of them like regular free lists, to ensure */
99
/* That explicit deallocation works. However, allocation of a */
100
/* size 0 "gcj" object is always an error. */
101
p -> ptrfree_freelists[0] = (void *)1;
102
p -> normal_freelists[0] = (void *)1;
103
# ifdef GC_GCJ_SUPPORT
104
p -> gcj_freelists[0] = ERROR_FL;
108
#ifdef GC_GCJ_SUPPORT
109
extern void ** GC_gcjobjfreelist;
112
/* We hold the allocator lock. */
113
void GC_destroy_thread_local(GC_tlfs p)
115
/* We currently only do this from the thread itself or from */
116
/* the fork handler for a child process. */
118
GC_ASSERT(GC_getspecific(GC_thread_key) == (void *)p);
120
return_freelists(p -> ptrfree_freelists, GC_aobjfreelist);
121
return_freelists(p -> normal_freelists, GC_objfreelist);
122
# ifdef GC_GCJ_SUPPORT
123
return_freelists(p -> gcj_freelists, GC_gcjobjfreelist);
127
#if defined(GC_ASSERTIONS) && defined(GC_PTHREADS) && !defined(CYGWIN32) \
128
&& !defined(GC_WIN32_PTHREADS)
129
# include <pthread.h>
130
extern char * GC_lookup_thread(pthread_t id);
133
#if defined(GC_ASSERTIONS) && defined(GC_WIN32_THREADS)
134
extern char * GC_lookup_thread(int id);
137
void * GC_malloc(size_t bytes)
139
size_t granules = ROUNDED_UP_GRANULES(bytes);
144
# if defined(REDIRECT_MALLOC) && !defined(USE_PTHREAD_SPECIFIC)
145
GC_key_t k = GC_thread_key;
146
if (EXPECT(0 == k, 0)) {
147
/* We haven't yet run GC_init_parallel. That means */
148
/* we also aren't locking, so this is fairly cheap. */
149
return GC_core_malloc(bytes);
151
tsd = GC_getspecific(k);
153
GC_ASSERT(GC_is_initialized);
154
tsd = GC_getspecific(GC_thread_key);
156
# if defined(REDIRECT_MALLOC) && defined(USE_PTHREAD_SPECIFIC)
157
if (EXPECT(NULL == tsd, 0)) {
158
return GC_core_malloc(bytes);
161
# ifdef GC_ASSERTIONS
162
/* We can't check tsd correctly, since we don't have access to */
163
/* the right declarations. But we can check that it's close. */
166
# if defined(GC_WIN32_THREADS)
167
char * me = (char *)GC_lookup_thread_inner(GetCurrentThreadId());
169
char * me = GC_lookup_thread(pthread_self());
171
GC_ASSERT((char *)tsd > me && (char *)tsd < me + 1000);
175
tiny_fl = ((GC_tlfs)tsd) -> normal_freelists;
176
GC_FAST_MALLOC_GRANS(result, granules, tiny_fl, DIRECT_GRANULES,
177
NORMAL, GC_core_malloc(bytes), obj_link(result)=0);
181
void * GC_malloc_atomic(size_t bytes)
183
size_t granules = ROUNDED_UP_GRANULES(bytes);
187
GC_ASSERT(GC_is_initialized);
188
tiny_fl = ((GC_tlfs)GC_getspecific(GC_thread_key))
189
-> ptrfree_freelists;
190
GC_FAST_MALLOC_GRANS(result, bytes, tiny_fl, DIRECT_GRANULES,
191
PTRFREE, GC_core_malloc_atomic(bytes), 0/* no init */);
195
#ifdef GC_GCJ_SUPPORT
197
#include "include/gc_gcj.h"
200
extern GC_bool GC_gcj_malloc_initialized;
203
extern int GC_gcj_kind;
205
/* Gcj-style allocation without locks is extremely tricky. The */
206
/* fundamental issue is that we may end up marking a free list, which */
207
/* has freelist links instead of "vtable" pointers. That is usually */
208
/* OK, since the next object on the free list will be cleared, and */
209
/* will thus be interpreted as containg a zero descriptor. That's fine */
210
/* if the object has not yet been initialized. But there are */
211
/* interesting potential races. */
212
/* In the case of incremental collection, this seems hopeless, since */
213
/* the marker may run asynchronously, and may pick up the pointer to */
214
/* the next freelist entry (which it thinks is a vtable pointer), get */
215
/* suspended for a while, and then see an allocated object instead */
216
/* of the vtable. This made be avoidable with either a handshake with */
217
/* the collector or, probably more easily, by moving the free list */
218
/* links to the second word of each object. The latter isn't a */
219
/* universal win, since on architecture like Itanium, nonzero offsets */
220
/* are not necessarily free. And there may be cache fill order issues. */
221
/* For now, we punt with incremental GC. This probably means that */
222
/* incremental GC should be enabled before we fork a second thread. */
223
void * GC_gcj_malloc(size_t bytes,
224
void * ptr_to_struct_containing_descr)
226
if (GC_EXPECT(GC_incremental, 0)) {
227
return GC_core_gcj_malloc(bytes, ptr_to_struct_containing_descr);
229
size_t granules = ROUNDED_UP_GRANULES(bytes);
231
void **tiny_fl = ((GC_tlfs)GC_getspecific(GC_thread_key))
233
GC_ASSERT(GC_gcj_malloc_initialized);
234
GC_FAST_MALLOC_GRANS(result, bytes, tiny_fl, DIRECT_GRANULES,
236
GC_core_gcj_malloc(bytes,
237
ptr_to_struct_containing_descr),
238
{AO_compiler_barrier();
239
*(void **)result = ptr_to_struct_containing_descr;});
240
/* This forces the initialization of the "method ptr". */
241
/* This is necessary to ensure some very subtle properties */
242
/* required if a GC is run in the middle of such an allocation. */
243
/* Here we implicitly also assume atomicity for the free list. */
244
/* and method pointer assignments. */
245
/* We must update the freelist before we store the pointer. */
246
/* Otherwise a GC at this point would see a corrupted */
248
/* A real memory barrier is not needed, since the */
249
/* action of stopping this thread will cause prior writes */
251
/* We assert that any concurrent marker will stop us. */
252
/* Thus it is impossible for a mark procedure to see the */
253
/* allocation of the next object, but to see this object */
254
/* still containing a free list pointer. Otherwise the */
255
/* marker, by misinterpreting the freelist link as a vtable */
256
/* pointer, might find a random "mark descriptor" in the next */
262
#endif /* GC_GCJ_SUPPORT */
264
/* The thread support layer must arrange to mark thread-local */
265
/* free lists explicitly, since the link field is often */
266
/* invisible to the marker. It knows hoe to find all threads; */
267
/* we take care of an individual thread freelist structure. */
268
void GC_mark_thread_local_fls_for(GC_tlfs p)
273
for (j = 1; j < TINY_FREELISTS; ++j) {
274
q = p -> ptrfree_freelists[j];
275
if ((word)q > HBLKSIZE) GC_set_fl_marks(q);
276
q = p -> normal_freelists[j];
277
if ((word)q > HBLKSIZE) GC_set_fl_marks(q);
278
# ifdef GC_GCJ_SUPPORT
279
q = p -> gcj_freelists[j];
280
if ((word)q > HBLKSIZE) GC_set_fl_marks(q);
281
# endif /* GC_GCJ_SUPPORT */
285
#if defined(GC_ASSERTIONS)
286
/* Check that all thread-local free-lists in p are completely marked. */
287
void GC_check_tls_for(GC_tlfs p)
292
for (j = 1; j < TINY_FREELISTS; ++j) {
293
q = p -> ptrfree_freelists[j];
294
if ((word)q > HBLKSIZE) GC_check_fl_marks(q);
295
q = p -> normal_freelists[j];
296
if ((word)q > HBLKSIZE) GC_check_fl_marks(q);
297
# ifdef GC_GCJ_SUPPORT
298
q = p -> gcj_freelists[j];
299
if ((word)q > HBLKSIZE) GC_check_fl_marks(q);
300
# endif /* GC_GCJ_SUPPORT */
303
#endif /* GC_ASSERTIONS */
305
# else /* !THREAD_LOCAL_ALLOC */
307
# define GC_destroy_thread_local(t)
309
# endif /* !THREAD_LOCAL_ALLOC */