1
/* Copyright (C) 2001-2006 Artifex Software, Inc.
4
This software is provided AS-IS with no warranty, either express or
7
This software is distributed under license and may not be copied, modified
8
or distributed except as expressly authorized under the terms of that
9
license. Refer to licensing information at http://www.artifex.com/
10
or contact Artifex Software, Inc., 7 Mt. Lassen Drive - Suite A-134,
11
San Rafael, CA 94903, U.S.A., +1(415)492-9861, for further information.
14
/* $Id: gsmchunk.c 8928 2008-08-03 12:26:41Z leonardo $ */
15
/* chunk consolidating wrapper on a base memory allocator */
23
/* Raw memory procedures */
24
static gs_memory_proc_alloc_bytes(chunk_alloc_bytes_immovable);
25
static gs_memory_proc_resize_object(chunk_resize_object);
26
static gs_memory_proc_free_object(chunk_free_object);
27
static gs_memory_proc_stable(chunk_stable);
28
static gs_memory_proc_status(chunk_status);
29
static gs_memory_proc_free_all(chunk_free_all);
30
static gs_memory_proc_consolidate_free(chunk_consolidate_free);
32
/* Object memory procedures */
33
static gs_memory_proc_alloc_bytes(chunk_alloc_bytes);
34
static gs_memory_proc_alloc_struct(chunk_alloc_struct);
35
static gs_memory_proc_alloc_struct(chunk_alloc_struct_immovable);
36
static gs_memory_proc_alloc_byte_array(chunk_alloc_byte_array);
37
static gs_memory_proc_alloc_byte_array(chunk_alloc_byte_array_immovable);
38
static gs_memory_proc_alloc_struct_array(chunk_alloc_struct_array);
39
static gs_memory_proc_alloc_struct_array(chunk_alloc_struct_array_immovable);
40
static gs_memory_proc_object_size(chunk_object_size);
41
static gs_memory_proc_object_type(chunk_object_type);
42
static gs_memory_proc_alloc_string(chunk_alloc_string);
43
static gs_memory_proc_alloc_string(chunk_alloc_string_immovable);
44
static gs_memory_proc_resize_string(chunk_resize_string);
45
static gs_memory_proc_free_string(chunk_free_string);
46
static gs_memory_proc_register_root(chunk_register_root);
47
static gs_memory_proc_unregister_root(chunk_unregister_root);
48
static gs_memory_proc_enable_free(chunk_enable_free);
49
static const gs_memory_procs_t chunk_procs =
51
/* Raw memory procedures */
52
chunk_alloc_bytes_immovable,
58
chunk_consolidate_free,
59
/* Object memory procedures */
62
chunk_alloc_struct_immovable,
63
chunk_alloc_byte_array,
64
chunk_alloc_byte_array_immovable,
65
chunk_alloc_struct_array,
66
chunk_alloc_struct_array_immovable,
70
chunk_alloc_string_immovable,
74
chunk_unregister_root,
78
typedef struct chunk_obj_node_s {
79
struct chunk_obj_node_s *next;
80
uint size; /* objlist: client size */
81
/* if freelist: size of block (obj header and client area must fit in block) */
82
gs_memory_type_ptr_t type;
84
unsigned long sequence;
89
* Note: All objects within a chunk are 'aligned' since we round_up_to_align
90
* the free list pointer when removing part of a free area.
92
typedef struct chunk_mem_node_s {
94
uint largest_free; /* quick check when allocating */
95
struct chunk_mem_node_s *next;
96
chunk_obj_node_t *objlist; /* head of objects in this chunk (no order) */
97
chunk_obj_node_t *freelist; /* free list (ordered) */
98
/* chunk data follows immediately */
101
typedef struct gs_memory_chunk_s {
102
gs_memory_common; /* interface outside world sees */
103
gs_memory_t *target; /* base allocator */
104
chunk_mem_node_t *head_chunk;
106
unsigned long sequence_counter;
110
/* ---------- Public constructors/destructors ---------- */
112
/* Initialize a gs_memory_chunk_t */
113
int /* -ve error code or 0 */
114
gs_memory_chunk_wrap( gs_memory_t **wrapped, /* chunk allocator init */
115
gs_memory_t * target ) /* base allocator */
117
/* Use the non-GC allocator of the target. The chunk allocator is NOT GC safe */
118
gs_memory_t *non_gc_target = target->non_gc_memory;
119
gs_memory_chunk_t *cmem = NULL;
121
*wrapped = NULL; /* don't leave garbage in case we fail */
123
cmem = (gs_memory_chunk_t *) gs_alloc_bytes_immovable(non_gc_target,
124
sizeof(gs_memory_chunk_t), "gs_malloc_wrap(chunk)");
126
return_error(gs_error_VMerror);
127
cmem->stable_memory = (gs_memory_t *)cmem; /* we are stable */
128
cmem->procs = chunk_procs;
129
cmem->gs_lib_ctx = non_gc_target->gs_lib_ctx;
130
cmem->non_gc_memory = (gs_memory_t *)cmem; /* and are not subject to GC */
131
cmem->target = non_gc_target;
132
cmem->head_chunk = NULL;
134
cmem->sequence_counter = 0;
137
/* Init the chunk management values */
139
*wrapped = (gs_memory_t *)cmem;
143
/* Release a chunk memory manager. */
144
/* Note that this has no effect on the target. */
146
gs_memory_chunk_release(gs_memory_t *mem)
148
gs_memory_free_all((gs_memory_t *)mem, FREE_ALL_EVERYTHING,
149
"gs_memory_chunk_release");
152
/* ---------- Accessors ------------- */
154
/* Retrieve this allocator's target */
156
gs_memory_chunk_target(const gs_memory_t *mem)
158
gs_memory_chunk_t *cmem = (gs_memory_chunk_t *)mem;
164
gs_memory_chunk_dump_memory(const gs_memory_t *mem)
166
gs_memory_chunk_t *cmem = (gs_memory_chunk_t *)mem;
167
chunk_mem_node_t *head = cmem->head_chunk;
168
chunk_mem_node_t *current;
169
chunk_mem_node_t *next;
172
while ( current != NULL ) {
173
if (current->objlist != NULL) {
174
chunk_obj_node_t *obj;
176
for (obj= current->objlist; obj != NULL; obj=obj->next)
177
dprintf4("chunk_mem leak, obj=0x%lx, size=%d, type=%s, sequence#=%ld\n",
178
(ulong)obj, obj->size, obj->type->sname, obj->sequence);
180
next = current->next;
186
/* -------- Private members --------- */
188
/* Note that all of the data is 'immovable' and is opaque to the base allocator */
189
/* thus even if it is a GC type of allocator, no GC functions will be applied */
190
/* All allocations are done in the non_gc_memory of the base */
195
chunk_mem_node_free_all_remaining(gs_memory_chunk_t *cmem)
197
chunk_mem_node_t *head = cmem->head_chunk;
198
gs_memory_t * const target = cmem->target;
199
chunk_mem_node_t *current;
200
chunk_mem_node_t *next;
203
while ( current != NULL ) {
204
next = current->next;
205
gs_free_object(target, current, "chunk_mem_node_remove");
208
cmem->head_chunk = NULL;
212
chunk_free_all(gs_memory_t * mem, uint free_mask, client_name_t cname)
214
gs_memory_chunk_t * const cmem = (gs_memory_chunk_t *)mem;
215
gs_memory_t * const target = cmem->target;
217
/* Only free the structures and the allocator itself. */
218
if (mem->stable_memory) {
219
if (mem->stable_memory != mem)
220
gs_memory_free_all(mem->stable_memory, free_mask, cname);
221
if (free_mask & FREE_ALL_ALLOCATOR)
222
mem->stable_memory = 0;
224
if (free_mask & FREE_ALL_DATA) {
225
chunk_mem_node_free_all_remaining(cmem);
227
if (free_mask & FREE_ALL_STRUCTURES) {
230
if (free_mask & FREE_ALL_ALLOCATOR)
231
gs_free_object(target, cmem, cname);
234
extern const gs_memory_struct_type_t st_bytes;
236
/* round up objects to make sure we have room for a header left */
238
round_up_to_align(uint size)
240
uint num_node_headers = (size + sizeof(chunk_obj_node_t) - 1) / sizeof(chunk_obj_node_t);
242
return num_node_headers * sizeof(chunk_obj_node_t);
245
#define IS_SINGLE_OBJ_SIZE(chunk_size) \
246
(chunk_size > (CHUNK_SIZE>>1))
247
#define MULTIPLE_OBJ_CHUNK_SIZE \
248
(sizeof(chunk_mem_node_t) + round_up_to_align(CHUNK_SIZE))
251
/* return -1 on error, 0 on success */
253
chunk_mem_node_add(gs_memory_chunk_t *cmem, uint size_needed, chunk_mem_node_t **newchunk)
255
chunk_mem_node_t *node, *prev_node;
256
gs_memory_t *target = cmem->target;
257
/* Allocate enough for the chunk header, and the size_needed */
258
/* The size needed already includes the object header from caller */
259
/* and is already rounded up to the obj_node_t sized elements */
260
uint chunk_size = size_needed + sizeof(chunk_mem_node_t);
261
bool is_multiple_object_node = false;
263
/* Objects > half the default chunk size get their own chunk */
264
if ( ! IS_SINGLE_OBJ_SIZE(chunk_size)) {
265
chunk_size = MULTIPLE_OBJ_CHUNK_SIZE; /* the size for collections of objects */
266
is_multiple_object_node = true;
270
node = (chunk_mem_node_t *)gs_alloc_bytes_immovable(target, chunk_size, "chunk_mem_node_add");
273
node->size = chunk_size; /* how much we allocated */
274
node->largest_free = chunk_size - sizeof(chunk_mem_node_t);
275
node->objlist = NULL;
276
node->freelist = (chunk_obj_node_t *)((byte *)(node) + sizeof(chunk_mem_node_t));
277
node->freelist->next = NULL;
278
node->freelist->size = node->largest_free;
281
if (!is_multiple_object_node) {
282
chunk_mem_node_t *scan_node;
284
/* Scan past chunks that are collections of smaller chunks */
285
/* This allows the most frequently accessed chunks to be near the head */
286
for (scan_node = cmem->head_chunk; scan_node != NULL; scan_node = scan_node->next) {
287
if (scan_node->size != MULTIPLE_OBJ_CHUNK_SIZE)
289
prev_node = scan_node;
292
if (prev_node == NULL) {
293
if (cmem->head_chunk == NULL) {
294
cmem->head_chunk = node;
297
node->next = cmem->head_chunk;
298
cmem->head_chunk = node;
301
node->next = prev_node->next;
302
prev_node->next = node;
305
*newchunk = node; /* return the chunk we just allocated */
310
chunk_mem_node_remove(gs_memory_chunk_t *cmem, chunk_mem_node_t *addr)
312
chunk_mem_node_t *head = cmem->head_chunk;
313
gs_memory_t * const target = cmem->target;
315
/* check the head first */
317
dprintf("FAIL - no nodes to be removed\n" );
321
cmem->head_chunk = head->next;
322
gs_free_object(target, head, "chunk_mem_node_remove");
324
chunk_mem_node_t *current;
327
/* scan the list, stopping in front of element */
328
for (current = head; current != NULL; current = current->next) {
329
if ( current->next && (current->next == addr) ) {
330
current->next = current->next->next; /* de-link it */
331
gs_free_object(target, addr, "chunk_mem_node_remove");
337
dprintf1("FAIL freeing wild pointer freed address 0x%lx not found\n", (ulong)addr );
344
/* all of the allocation routines reduce to the this function */
346
chunk_obj_alloc(gs_memory_t *mem, uint size, gs_memory_type_ptr_t type, client_name_t cname)
348
gs_memory_chunk_t *cmem = (gs_memory_chunk_t *)mem;
349
chunk_mem_node_t *head = cmem->head_chunk;
350
uint newsize, free_size;
351
chunk_obj_node_t *newobj = NULL;
352
chunk_obj_node_t *free_obj, *prev_free, *new_free;
353
chunk_mem_node_t *current;
354
bool rescan_free_list;
356
newsize = round_up_to_align(size + sizeof(chunk_obj_node_t)); /* space we will need */
358
/* Search the chunks for one with a large enough free area */
359
for (current = head; current != NULL; current = current->next) {
360
if ( current->largest_free >= newsize)
363
if (current == NULL) {
364
/* No chunks with enough space, allocate one */
365
if (chunk_mem_node_add(cmem, newsize, ¤t) < 0)
368
/* Find the first free area in the current chunk that is big enough */
369
/* LATER: might be better to find the 'best fit' */
370
prev_free = NULL; /* NULL means chunk */
371
for (free_obj = current->freelist; free_obj != NULL; free_obj=free_obj->next) {
372
if (free_obj->size >= newsize)
374
prev_free = free_obj; /* keep track so we can update link */
377
if (free_obj == NULL) {
378
dprintf2("largest_free value = %d is too large, cannot find room for size = %d\n",
379
current->largest_free, newsize);
383
/* If this free object's size == largest_free, we'll have to re-scan */
384
rescan_free_list = free_obj->size == current->largest_free;
386
/* Make an object in the free_obj we found above, reducing it's size */
387
/* and adjusting the free list preserving alignment */
389
free_size = free_obj->size - newsize; /* amount remaining */
390
new_free = (chunk_obj_node_t *)((byte *)(free_obj) + newsize); /* start of remaining free area */
391
if (free_size >= sizeof(chunk_obj_node_t)) {
392
if (prev_free != NULL)
393
prev_free->next = new_free;
395
current->freelist = new_free;
396
new_free->next = free_obj->next;
397
new_free->size = free_size;
399
/* Not enough space remaining, just skip around it */
400
if (prev_free != NULL)
401
prev_free->next = free_obj->next;
403
current->freelist = free_obj->next;
407
memset((byte *)(newobj) + sizeof(chunk_obj_node_t), 0xa1, newsize - sizeof(chunk_obj_node_t));
408
memset((byte *)(newobj) + sizeof(chunk_obj_node_t), 0xac, size);
409
newobj->sequence = cmem->sequence_counter++;
412
newobj->next = current->objlist; /* link to start of list */
413
current->objlist = newobj;
414
newobj->size = size; /* client requested size */
415
newobj->type = type; /* and client desired type */
417
/* If we flagged for re-scan to find the new largest_free, do it now */
418
if (rescan_free_list) {
419
current->largest_free = 0;
420
for (free_obj = current->freelist; free_obj != NULL; free_obj=free_obj->next)
421
if (free_obj->size > current->largest_free)
422
current->largest_free = free_obj->size;
425
/* return the client area of the object we allocated */
426
return (byte *)(newobj) + sizeof(chunk_obj_node_t);
430
chunk_alloc_bytes_immovable(gs_memory_t * mem, uint size, client_name_t cname)
432
return chunk_obj_alloc(mem, size, &st_bytes, cname);
436
chunk_alloc_bytes(gs_memory_t * mem, uint size, client_name_t cname)
438
return chunk_obj_alloc(mem, size, &st_bytes, cname);
442
chunk_alloc_struct_immovable(gs_memory_t * mem, gs_memory_type_ptr_t pstype,
445
return chunk_obj_alloc(mem, pstype->ssize, pstype, cname);
449
chunk_alloc_struct(gs_memory_t * mem, gs_memory_type_ptr_t pstype,
452
return chunk_obj_alloc(mem, pstype->ssize, pstype, cname);
456
chunk_alloc_byte_array_immovable(gs_memory_t * mem, uint num_elements,
457
uint elt_size, client_name_t cname)
459
return chunk_alloc_bytes(mem, num_elements * elt_size, cname);
463
chunk_alloc_byte_array(gs_memory_t * mem, uint num_elements, uint elt_size,
466
return chunk_alloc_bytes(mem, num_elements * elt_size, cname);
470
chunk_alloc_struct_array_immovable(gs_memory_t * mem, uint num_elements,
471
gs_memory_type_ptr_t pstype, client_name_t cname)
473
return chunk_obj_alloc(mem, num_elements * pstype->ssize, pstype, cname);
477
chunk_alloc_struct_array(gs_memory_t * mem, uint num_elements,
478
gs_memory_type_ptr_t pstype, client_name_t cname)
480
return chunk_obj_alloc(mem, num_elements * pstype->ssize, pstype, cname);
485
chunk_resize_object(gs_memory_t * mem, void *ptr, uint new_num_elements, client_name_t cname)
487
/* get the type from the old object */
488
chunk_obj_node_t *obj = ((chunk_obj_node_t *)ptr) - 1;
489
uint new_size = (obj->type->ssize * new_num_elements);
491
/* This isn't particularly efficient, but it is rarely used */
492
chunk_free_object(mem, ptr, cname);
493
return chunk_obj_alloc(mem, new_size, obj->type, cname);
497
chunk_free_object(gs_memory_t * mem, void *ptr, client_name_t cname)
502
/* back up to obj header */
503
chunk_obj_node_t *obj = ((chunk_obj_node_t *)ptr) - 1;
504
void (*finalize)(void *ptr) = obj->type->finalize;
505
gs_memory_chunk_t * const cmem = (gs_memory_chunk_t *)mem;
506
chunk_mem_node_t *current;
507
chunk_obj_node_t *free_obj, *prev_free;
508
chunk_obj_node_t *scan_obj, *prev_obj;
509
/* space we will free */
510
uint freed_size = round_up_to_align(obj->size + sizeof(chunk_obj_node_t));
512
if ( finalize != NULL )
515
/* Find the chunk containing this object */
516
for (current = cmem->head_chunk; current != NULL; current = current->next) {
517
if (((byte *)obj > (byte *)current) && ((byte *)obj < (byte *)(current) + current->size))
520
if (current == NULL) {
521
/* Object not found in any chunk */
522
dprintf1("chunk_free_obj failed, object 0x%lx not in any chunk\n", ((ulong)obj));
526
/* Scan obj list to find this element */
527
prev_obj = NULL; /* object is head, linked to mem node */
528
for (scan_obj = current->objlist; scan_obj != NULL; scan_obj = scan_obj->next) {
533
if (scan_obj == NULL) {
534
/* Object not found in expected chunk */
535
dprintf3("chunk_free_obj failed, object 0x%lx not in chunk at 0x%lx, size = %d\n",
536
((ulong)obj), ((ulong)current), current->size);
539
/* link around the object being freed */
540
if (prev_obj == NULL)
541
current->objlist = obj->next;
543
prev_obj->next = obj->next;
545
/* Add this object's space (including the header) to the free list */
547
/* Scan free list to find where this element goes */
548
obj->size = freed_size; /* adjust size to include chunk_obj_node and pad */
551
for (free_obj = current->freelist; free_obj != NULL; free_obj = free_obj->next) {
554
prev_free = free_obj;
556
if (prev_free == NULL) {
557
/* this object is before any other free objects */
558
obj->next = current->freelist;
559
current->freelist = obj;
561
obj->next = free_obj;
562
prev_free->next = obj;
564
/* If the end of this object is adjacent to the next free space,
565
* merge the two. Next we'll merge with predecessor (prev_free)
567
if (free_obj != NULL) {
568
byte *after_obj = (byte*)(obj) + freed_size;
570
if (free_obj <= (chunk_obj_node_t *)after_obj) {
571
/* Object is adjacent to following free space block -- merge it */
572
obj->next = free_obj->next; /* link around the one being absorbed */
573
obj->size = (byte *)(free_obj) - (byte *)(obj) + free_obj->size;
576
/* the prev_free object precedes this object that is now free,
577
* it _may_ be adjacent
579
if (prev_free != NULL) {
580
byte *after_free = (byte*)(prev_free) + prev_free->size;
582
if (obj <= (chunk_obj_node_t *)after_free) {
583
/* Object is adjacent to prior free space block -- merge it */
584
/* NB: this is the common case with LIFO alloc-free patterns */
585
/* (LIFO: Last-allocated, first freed) */
586
prev_free->size = (byte *)(obj) - (byte *)(prev_free) + obj->size;
587
prev_free->next = obj->next; /* link around 'obj' area */
592
memset((byte *)(obj) + sizeof(chunk_obj_node_t), 0xf1, obj->size - sizeof(chunk_obj_node_t));
594
if (current->largest_free < obj->size)
595
current->largest_free = obj->size;
597
/* If this chunk is now totally empty, free it */
598
if (current->objlist == NULL) {
599
if (current->size != current->freelist->size + sizeof(chunk_mem_node_t))
600
dprintf2("chunk freelist size not correct, is: %d, should be: %d\n",
601
round_up_to_align(current->freelist->size + sizeof(chunk_mem_node_t)), current->size);
602
chunk_mem_node_remove(cmem, current);
608
chunk_alloc_string_immovable(gs_memory_t * mem, uint nbytes, client_name_t cname)
610
/* we just alloc bytes here */
611
return chunk_alloc_bytes(mem, nbytes, cname);
615
chunk_alloc_string(gs_memory_t * mem, uint nbytes, client_name_t cname)
617
/* we just alloc bytes here */
618
return chunk_alloc_bytes(mem, nbytes, cname);
622
chunk_resize_string(gs_memory_t * mem, byte * data, uint old_num, uint new_num,
625
/* just resize object - ignores old_num */
626
return chunk_resize_object(mem, data, new_num, cname);
630
chunk_free_string(gs_memory_t * mem, byte * data, uint nbytes,
633
chunk_free_object(mem, data, cname);
637
chunk_status(gs_memory_t * mem, gs_memory_status_t * pstat)
642
chunk_stable(gs_memory_t * mem)
648
chunk_enable_free(gs_memory_t * mem, bool enable)
653
chunk_consolidate_free(gs_memory_t *mem)
657
/* aceesors to get size and type given the pointer returned to the client */
659
chunk_object_size(gs_memory_t * mem, const void *ptr)
661
chunk_obj_node_t *obj = ((chunk_obj_node_t *)ptr) - 1;
666
static gs_memory_type_ptr_t
667
chunk_object_type(const gs_memory_t * mem, const void *ptr)
669
chunk_obj_node_t *obj = ((chunk_obj_node_t *)ptr) - 1;
674
chunk_register_root(gs_memory_t * mem, gs_gc_root_t * rp, gs_ptr_type_t ptype,
675
void **up, client_name_t cname)
681
chunk_unregister_root(gs_memory_t * mem, gs_gc_root_t * rp, client_name_t cname)
687
#define A(obj, size) \
688
if ((obj = gs_alloc_bytes(cmem, size, "chunk_alloc_unit_test")) == NULL) { \
689
dprintf("chunk alloc failed\n"); \
690
return_error(gs_error_VMerror); \
694
gs_free_object(cmem, obj, "chunk_alloc_unit_test");
697
chunk_allocator_unit_test(gs_memory_t *mem)
701
byte *obj1, *obj2, *obj3, *obj4, *obj5, *obj6, *obj7;
703
if ((code = gs_memory_chunk_wrap(&cmem, mem )) < 0) {
704
dprintf1("chunk_wrap returned error code: %d\n", code);
708
/* Allocate a large object */
729
gs_memory_chunk_release(cmem);