1
/* Copyright (C) 2001-2006 Artifex Software, Inc.
4
This software is provided AS-IS with no warranty, either express or
7
This software is distributed under license and may not be copied, modified
8
or distributed except as expressly authorized under the terms of that
9
license. Refer to licensing information at http://www.artifex.com/
10
or contact Artifex Software, Inc., 7 Mt. Lassen Drive - Suite A-134,
11
San Rafael, CA 94903, U.S.A., +1(415)492-9861, for further information.
14
/* $Id: istack.c 9043 2008-08-28 22:48:19Z giles $ */
15
/* Manager for expandable stacks of refs */
24
#include "istruct.h" /* for RELOC_REF_VAR */
26
#include "ivmspace.h" /* for local/global test */
29
/* Forward references */
30
static void init_block(ref_stack_t *pstack, const ref *pblock_array,
32
static int ref_stack_push_block(ref_stack_t *pstack, uint keep, uint add);
34
/* GC descriptors and procedures */
35
private_st_ref_stack_params();
37
CLEAR_MARKS_PROC(ref_stack_clear_marks)
39
ref_stack_t *const sptr = vptr;
41
r_clear_attrs(&sptr->current, l_mark);
44
ENUM_PTRS_WITH(ref_stack_enum_ptrs, ref_stack_t *sptr) return 0;
45
case 0: ENUM_RETURN_REF(&sptr->current);
46
case 1: return ENUM_OBJ(sptr->params);
48
static RELOC_PTRS_WITH(ref_stack_reloc_ptrs, ref_stack_t *sptr)
50
/* Note that the relocation must be a multiple of sizeof(ref_packed) */
51
/* * align_packed_per_ref, but it need not be a multiple of */
52
/* sizeof(ref). Therefore, we must do the adjustments using */
53
/* ref_packed pointers rather than ref pointers. */
54
ref_packed *bot = (ref_packed *) sptr->current.value.refs;
57
RELOC_REF_VAR(sptr->current);
58
r_clear_attrs(&sptr->current, l_mark);
59
reloc = bot - (ref_packed *) sptr->current.value.refs;
61
sptr->p = (ref *)((ref_packed *)sptr->p - reloc);
66
RELOC_OBJ_VAR(sptr->params);
68
/* Structure type for a ref_stack. */
69
public_st_ref_stack();
71
/* Initialize a stack. */
73
ref_stack_init(ref_stack_t *pstack, const ref *pblock_array,
74
uint bot_guard, uint top_guard, const ref *pguard_value,
75
gs_ref_memory_t *mem, ref_stack_params_t *params)
77
uint size = r_size(pblock_array);
78
uint avail = size - (stack_block_refs + bot_guard + top_guard);
79
ref_stack_block *pblock = (ref_stack_block *)pblock_array->value.refs;
80
s_ptr body = (s_ptr)(pblock + 1);
83
params = gs_alloc_struct((gs_memory_t *)mem, ref_stack_params_t,
85
"ref_stack_alloc(stack.params)");
87
return_error(-1); /* avoid binding in any error codes */
90
pstack->bot = body + bot_guard;
91
pstack->p = pstack->bot - 1;
92
pstack->top = pstack->p + avail;
93
pstack->current = *pblock_array;
94
pstack->extension_size = 0;
95
pstack->extension_used = 0;
97
make_int(&pstack->max_stack, avail);
98
pstack->requested = 0;
100
pstack->body_size = avail;
102
pstack->params = params;
103
pstack->memory = mem;
105
params->bot_guard = bot_guard;
106
params->top_guard = top_guard;
107
params->block_size = size;
108
params->data_size = avail;
109
if (pguard_value != 0)
110
params->guard_value = *pguard_value;
112
make_tav(¶ms->guard_value, t__invalid, 0, intval, 0);
113
params->underflow_error = -1;
114
params->overflow_error = -1;
115
params->allow_expansion = true;
116
init_block(pstack, pblock_array, 0);
117
refset_null_new(pstack->bot, avail, 0);
118
make_empty_array(&pblock->next, 0);
122
/* Set whether a stack is allowed to expand. The initial value is true. */
124
ref_stack_allow_expansion(ref_stack_t *pstack, bool expand)
126
pstack->params->allow_expansion = expand;
129
/* Set the error codes for under- and overflow. The initial values are -1. */
131
ref_stack_set_error_codes(ref_stack_t *pstack, int underflow_error,
134
pstack->params->underflow_error = underflow_error;
135
pstack->params->overflow_error = overflow_error;
138
/* Set the maximum number of elements allowed on a stack. */
140
ref_stack_set_max_count(ref_stack_t *pstack, long nmax)
142
uint nmin = ref_stack_count_inline(pstack);
146
if (nmax > max_uint / sizeof(ref))
147
nmax = max_uint / sizeof(ref);
148
if (!pstack->params->allow_expansion) {
149
uint ncur = pstack->body_size;
154
pstack->max_stack.value.intval = nmax;
159
* Set the margin between the limit and the top of the stack.
160
* Note that this may require allocating a block.
163
ref_stack_set_margin(ref_stack_t *pstack, uint margin)
165
const ref_stack_params_t *params = pstack->params;
166
uint data_size = params->data_size;
168
if (margin <= pstack->margin) {
169
refset_null_new(pstack->top + 1, pstack->margin - margin, 0);
171
if (margin > data_size >> 1)
172
return_error(e_rangecheck);
173
if (pstack->top - pstack->p < margin) {
174
uint used = pstack->p + 1 - pstack->bot;
175
uint keep = data_size - margin;
176
int code = ref_stack_push_block(pstack, keep, used - keep);
182
pstack->margin = margin;
183
pstack->body_size = data_size - margin;
184
pstack->top = pstack->bot + pstack->body_size - 1;
188
/* Return the number of elements on a stack. */
190
ref_stack_count(const ref_stack_t *pstack)
192
return ref_stack_count_inline(pstack);
196
* Return a pointer to a given element from the stack, counting from
197
* 0 as the top element. If the index is out of range, return 0.
200
ref_stack_index(const ref_stack_t *pstack, long idx)
202
ref_stack_block *pblock;
203
uint used = pstack->p + 1 - pstack->bot;
207
if (idx < used) /* common case */
208
return pstack->p - (uint) idx;
209
pblock = (ref_stack_block *) pstack->current.value.refs;
211
pblock = (ref_stack_block *) pblock->next.value.refs;
215
used = r_size(&pblock->used);
216
} while (idx >= used);
217
return pblock->used.value.refs + (used - 1 - (uint) idx);
221
* Count the number of elements down to and including the first mark.
222
* If no mark is found, return 0.
225
ref_stack_counttomark(const ref_stack_t *pstack)
228
ref_stack_enum_t rsenum;
230
ref_stack_enum_begin(&rsenum, pstack);
232
uint count = rsenum.size;
233
const ref *p = rsenum.ptr + count - 1;
235
for (; count; count--, p--)
236
if (r_has_type(p, t_mark))
237
return scanned + (rsenum.size - count + 1);
238
scanned += rsenum.size;
239
} while (ref_stack_enum_next(&rsenum));
244
* Do the store check for storing 'count' elements of a stack, starting
245
* 'skip' elements below the top, into an array. Return 0 or e_invalidaccess.
248
ref_stack_store_check(const ref_stack_t *pstack, ref *parray, uint count,
251
uint space = r_space(parray);
253
if (space != avm_local) {
254
uint left = count, pass = skip;
255
ref_stack_enum_t rsenum;
257
ref_stack_enum_begin(&rsenum, pstack);
259
ref *ptr = rsenum.ptr;
260
uint size = rsenum.size;
268
size -= pass, pass = 0;
273
code = refs_check_space(ptr - size, size, space);
279
} while (ref_stack_enum_next(&rsenum));
285
* Store the top 'count' elements of a stack, starting 'skip' elements below
286
* the top, into an array, with or without store/undo checking. age=-1 for
287
* no check, 0 for old, 1 for new. May return e_rangecheck or
290
#undef idmemory /****** NOTA BENE ******/
292
ref_stack_store(const ref_stack_t *pstack, ref *parray, uint count,
293
uint skip, int age, bool check, gs_dual_memory_t *idmemory,
298
ref_stack_enum_t rsenum;
300
if (count > ref_stack_count(pstack) || count > r_size(parray))
301
return_error(e_rangecheck);
303
int code = ref_stack_store_check(pstack, parray, count, skip);
308
to = parray->value.refs + count;
309
left = count, pass = skip;
310
ref_stack_enum_begin(&rsenum, pstack);
312
ref *from = rsenum.ptr;
313
uint size = rsenum.size;
319
size -= pass, pass = 0;
325
case -1: /* not an array */
328
ref_assign(to, from);
331
case 0: /* old array */
334
ref_assign_old(parray, to, from, cname);
337
case 1: /* new array */
340
ref_assign_new(to, from);
347
} while (ref_stack_enum_next(&rsenum));
348
r_set_size(parray, count);
353
* Pop the top N elements off a stack.
354
* The number must not exceed the number of elements in use.
357
ref_stack_pop(ref_stack_t *pstack, uint count)
361
while ((used = pstack->p + 1 - pstack->bot) < count) {
363
pstack->p = pstack->bot - 1;
364
ref_stack_pop_block(pstack);
369
/* Pop the top block off a stack. May return underflow_error. */
371
ref_stack_pop_block(ref_stack_t *pstack)
373
s_ptr bot = pstack->bot;
374
uint count = pstack->p + 1 - bot;
375
ref_stack_block *pcur =
376
(ref_stack_block *) pstack->current.value.refs;
377
ref_stack_block *pnext =
378
(ref_stack_block *) pcur->next.value.refs;
384
return_error(pstack->params->underflow_error);
385
used = r_size(&pnext->used);
386
body = (ref *) (pnext + 1) + pstack->params->bot_guard;
389
* If the contents of the two blocks won't fit in a single block, we
390
* move up the used part of the top block, and copy up as much of
391
* the contents of the next block under it as will fit. If the
392
* contents of both blocks fit in a single block, we copy the used
393
* part of the top block to the top of the next block, and free the
396
if (used + count > pstack->body_size) {
398
* The contents of the two blocks won't fit into a single block.
399
* On the assumption that we're recovering from a local stack
400
* underflow and need to increase the number of contiguous
401
* elements available, move up the used part of the top block, and
402
* copy up as much of the contents of the next block under it as
405
uint moved = pstack->body_size - count;
409
return_error(e_Fatal);
410
memmove(bot + moved, bot, count * sizeof(ref));
412
memcpy(bot, body + left, moved * sizeof(ref));
413
refset_null_new(body + left, moved, 0);
414
r_dec_size(&pnext->used, moved);
415
pstack->p = pstack->top;
416
pstack->extension_used -= moved;
419
* The contents of the two blocks will fit into a single block.
420
* Copy the used part of the top block to the top of the next
421
* block, and free the top block.
423
memcpy(body + used, bot, count * sizeof(ref));
424
pstack->bot = bot = body;
425
pstack->top = bot + pstack->body_size - 1;
426
gs_free_ref_array(pstack->memory, &pstack->current,
427
"ref_stack_pop_block");
428
pstack->current = next;
429
pstack->p = bot + (used + count - 1);
430
pstack->extension_size -= pstack->body_size;
431
pstack->extension_used -= used;
437
* Extend a stack to recover from an overflow condition.
438
* May return overflow_error or e_VMerror.
441
ref_stack_extend(ref_stack_t *pstack, uint request)
443
uint keep = (pstack->top - pstack->bot + 1) / 3;
444
uint count = pstack->p - pstack->bot + 1;
445
const ref_stack_params_t *params = pstack->params;
447
if (request > params->data_size)
448
return_error(params->overflow_error);
449
if (keep + request > pstack->body_size)
450
keep = pstack->body_size - request;
452
keep = count; /* required by ref_stack_push_block */
453
return ref_stack_push_block(pstack, keep, request);
457
* Push N empty slots onto a stack. These slots are not initialized:
458
* the caller must immediately fill them. May return overflow_error
459
* (if max_stack would be exceeded, or the stack has no allocator)
463
ref_stack_push(ref_stack_t *pstack, uint count)
465
/* Don't bother to pre-check for overflow: we must be able to */
466
/* back out in the case of a VMerror anyway, and */
467
/* ref_stack_push_block will make the check itself. */
471
for (; (added = pstack->top - pstack->p) < needed; needed -= added) {
474
pstack->p = pstack->top;
475
code = ref_stack_push_block(pstack,
476
(pstack->top - pstack->bot + 1) / 3,
480
ref_stack_pop(pstack, count - needed + added);
481
pstack->requested = count;
490
* Push a block onto the stack, specifying how many elements of the current
491
* top block should remain in the top block and also how many elements we
492
* are trying to add. Requires keep <= count. May return overflow_error or
496
ref_stack_push_block(ref_stack_t *pstack, uint keep, uint add)
498
const ref_stack_params_t *params = pstack->params;
499
uint count = pstack->p - pstack->bot + 1;
500
uint move = count - keep;
501
ref_stack_block *pcur = (ref_stack_block *) pstack->current.value.refs;
503
ref_stack_block *pnext;
508
return_error(e_Fatal);
509
/* Check for overflowing the maximum size, */
510
/* or expansion not allowed. */
511
if (pstack->extension_used + (pstack->top - pstack->bot) + add >=
512
pstack->max_stack.value.intval ||
513
!params->allow_expansion
515
return_error(params->overflow_error);
516
code = gs_alloc_ref_array(pstack->memory, &next, 0,
517
params->block_size, "ref_stack_push_block");
520
pnext = (ref_stack_block *) next.value.refs;
521
body = (ref *) (pnext + 1);
522
/* Copy the top keep elements into the new block, */
523
/* and make the new block the top block. */
524
init_block(pstack, &next, keep);
525
body += params->bot_guard;
526
memcpy(body, pstack->bot + move, keep * sizeof(ref));
527
/* Clear the elements above the top of the new block. */
528
refset_null_new(body + keep, params->data_size - keep, 0);
529
/* Clear the elements above the top of the old block. */
530
refset_null_new(pstack->bot + move, keep, 0);
531
pnext->next = pstack->current;
532
pcur->used.value.refs = pstack->bot;
533
r_set_size(&pcur->used, move);
534
pstack->current = next;
536
pstack->top = pstack->bot + pstack->body_size - 1;
537
pstack->p = pstack->bot + keep - 1;
538
pstack->extension_size += pstack->body_size;
539
pstack->extension_used += move;
543
/* Begin enumerating the blocks of a stack. */
545
ref_stack_enum_begin(ref_stack_enum_t *prse, const ref_stack_t *pstack)
547
prse->block = (ref_stack_block *)pstack->current.value.refs;
548
prse->ptr = pstack->bot;
549
prse->size = pstack->p + 1 - pstack->bot;
553
ref_stack_enum_next(ref_stack_enum_t *prse)
555
ref_stack_block *block =
556
prse->block = (ref_stack_block *)prse->block->next.value.refs;
560
prse->ptr = block->used.value.refs;
561
prse->size = r_size(&block->used);
565
/* Clean up a stack for garbage collection. */
567
ref_stack_cleanup(ref_stack_t *pstack)
569
ref_stack_block *pblock =
570
(ref_stack_block *) pstack->current.value.refs;
572
refset_null_new(pstack->p + 1, pstack->top - pstack->p, 0);
573
pblock->used = pstack->current; /* set attrs */
574
pblock->used.value.refs = pstack->bot;
575
r_set_size(&pblock->used, pstack->p + 1 - pstack->bot);
579
* Free the entire contents of a stack, including the bottom block.
580
* The client must still call ref_stack_free. Note that after calling
581
* ref_stack_release, the stack is no longer usable.
584
ref_stack_release(ref_stack_t *pstack)
586
gs_ref_memory_t *mem = pstack->memory;
588
ref_stack_clear(pstack);
589
/* Free the parameter structure. */
590
gs_free_object((gs_memory_t *)mem, pstack->params,
591
"ref_stack_release(stack.params)");
592
/* Free the original (bottom) block. */
593
gs_free_ref_array(mem, &pstack->current, "ref_stack_release");
597
* Release a stack and then free the ref_stack object.
600
ref_stack_free(ref_stack_t *pstack)
602
gs_memory_t *mem = (gs_memory_t *)pstack->memory;
604
ref_stack_release(pstack);
605
gs_free_object(mem, pstack, "ref_stack_free");
608
/* ------ Internal routines ------ */
610
/* Initialize the guards and body of a stack block. */
612
init_block(ref_stack_t *pstack, const ref *psb, uint used)
614
ref_stack_params_t *params = pstack->params;
615
ref *brefs = psb->value.refs;
619
for (i = params->bot_guard, p = brefs + stack_block_refs;
622
ref_assign(p, ¶ms->guard_value);
623
/* The top guard elements will never be read, */
624
/* but we need to initialize them for the sake of the GC. */
625
/* We can use refset_null for this, because even though it uses */
626
/* make_null_new and stack elements must not be marked new, */
627
/* these slots will never actually be read or written. */
628
if (params->top_guard) {
629
ref *top = brefs + r_size(psb);
630
int top_guard = params->top_guard;
632
refset_null_new(top - top_guard, top_guard, 0);
634
ref_stack_block *const pblock = (ref_stack_block *) brefs;
637
pblock->used.value.refs = brefs + stack_block_refs + params->bot_guard;
638
r_set_size(&pblock->used, 0);