1
/* Copyright (C) 2001-2006 Artifex Software, Inc.
4
This software is provided AS-IS with no warranty, either express or
7
This software is distributed under license and may not be copied, modified
8
or distributed except as expressly authorized under the terms of that
9
license. Refer to licensing information at http://www.artifex.com/
10
or contact Artifex Software, Inc., 7 Mt. Lassen Drive - Suite A-134,
11
San Rafael, CA 94903, U.S.A., +1(415)492-9861, for further information.
14
/* $Id: zcontext.c 8250 2007-09-25 13:31:24Z giles $ */
15
/* Display PostScript context operators */
18
#include "gp.h" /* for usertime */
25
#include "gxstate.h" /* for copying gstate stack */
26
#include "stream.h" /* for files.h */
40
* Define the rescheduling interval. A value of max_int effectively
41
* disables scheduling. The only reason not to make this const is to
42
* allow it to be changed during testing.
44
static int reschedule_interval = 100;
46
/* Scheduling hooks in interp.c */
47
extern int (*gs_interp_reschedule_proc)(i_ctx_t **);
48
extern int (*gs_interp_time_slice_proc)(i_ctx_t **);
49
extern int gs_interp_time_slice_ticks;
51
/* Context structure */
56
typedef long ctx_index_t; /* >= 0 */
57
typedef struct gs_context_s gs_context_t;
58
typedef struct gs_scheduler_s gs_scheduler_t;
61
* If several contexts share local VM, then if any one of them has done an
62
* unmatched save, the others are not allowed to run. We handle this by
63
* maintaining the following invariant:
64
* When control reaches the point in the scheduler that decides
65
* what context to run next, then for each group of contexts
66
* sharing local VM, if the save level for that VM is non-zero,
67
* saved_local_vm is only set in the context that has unmatched
69
* We maintain this invariant as follows: when control enters the
70
* scheduler, if a context was running, we set its saved_local_vm flag
71
* to (save_level > 0). When selecting a context to run, we ignore
72
* contexts where saved_local_vm is false and the local VM save_level > 0.
75
gs_context_state_t state; /* (must be first for subclassing) */
77
gs_scheduler_t *scheduler;
79
ctx_index_t index; /* > 0 */
80
bool detach; /* true if a detach has been */
81
/* executed for this context */
82
bool saved_local_vm; /* (see above) */
83
bool visible; /* during GC, true if visible; */
84
/* otherwise, always true */
85
ctx_index_t next_index; /* next context with same status */
86
/* (active, waiting on same lock, */
87
/* waiting on same condition, */
88
/* waiting to be destroyed) */
89
ctx_index_t joiner_index; /* context waiting on a join */
91
gs_context_t *table_next; /* hash table chain -- this must be a real */
92
/* pointer, for looking up indices */
95
context_is_visible(const gs_context_t *pctx)
97
return (pctx && pctx->visible);
99
static inline gs_context_t *
100
visible_context(gs_context_t *pctx)
102
return (pctx && pctx->visible ? pctx : (gs_context_t *)0);
107
CLEAR_MARKS_PROC(context_clear_marks)
109
gs_context_t *const pctx = vptr;
111
(*st_context_state.clear_marks)
112
(cmem, &pctx->state, sizeof(pctx->state), &st_context_state);
115
ENUM_PTRS_WITH(context_enum_ptrs, gs_context_t *pctx)
116
ENUM_PREFIX(st_context_state, 2);
117
case 0: return ENUM_OBJ(pctx->scheduler);
119
/* Return the next *visible* context. */
120
const gs_context_t *next = pctx->table_next;
122
while (next && !next->visible)
123
next = next->table_next;
124
return ENUM_OBJ(next);
127
static RELOC_PTRS_WITH(context_reloc_ptrs, gs_context_t *pctx)
128
RELOC_PREFIX(st_context_state);
129
RELOC_VAR(pctx->scheduler);
130
/* Don't relocate table_next -- the scheduler object handles that. */
132
gs_private_st_complex_only(st_context, gs_context_t, "gs_context_t",
133
context_clear_marks, context_enum_ptrs, context_reloc_ptrs, 0);
136
* Context list structure. Note that this uses context indices, not
137
* pointers, to avoid having to worry about pointers between local VMs.
139
typedef struct ctx_list_s {
140
ctx_index_t head_index;
141
ctx_index_t tail_index;
144
/* Condition structure */
145
typedef struct gs_condition_s {
146
ctx_list_t waiting; /* contexts waiting on this condition */
148
gs_private_st_simple(st_condition, gs_condition_t, "conditiontype");
151
typedef struct gs_lock_s {
152
ctx_list_t waiting; /* contexts waiting for this lock, */
153
/* must be first for subclassing */
154
ctx_index_t holder_index; /* context holding the lock, if any */
155
gs_scheduler_t *scheduler;
157
gs_private_st_ptrs1(st_lock, gs_lock_t, "locktype",
158
lock_enum_ptrs, lock_reloc_ptrs, scheduler);
161
/*typedef struct gs_scheduler_s gs_scheduler_t; *//* (above) */
162
struct gs_scheduler_s {
163
gs_context_t *current;
164
long usertime_initial; /* usertime when current started running */
166
vm_reclaim_proc((*save_vm_reclaim));
167
ctx_index_t dead_index;
168
#define CTX_TABLE_SIZE 19
169
gs_context_t *table[CTX_TABLE_SIZE];
172
/* Convert a context index to a context pointer. */
173
static gs_context_t *
174
index_context(const gs_scheduler_t *psched, long index)
180
pctx = psched->table[index % CTX_TABLE_SIZE];
181
while (pctx != 0 && pctx->index != index)
182
pctx = pctx->table_next;
186
/* Structure definition */
187
gs_private_st_composite(st_scheduler, gs_scheduler_t, "gs_scheduler",
188
scheduler_enum_ptrs, scheduler_reloc_ptrs);
190
* The only cross-local-VM pointers in the context machinery are the
191
* table_next pointers in contexts, and the current and table[] pointers
192
* in the scheduler. We need to handle all of these specially.
194
static ENUM_PTRS_WITH(scheduler_enum_ptrs, gs_scheduler_t *psched)
197
if (index < CTX_TABLE_SIZE) {
198
gs_context_t *pctx = psched->table[index];
200
while (pctx && !pctx->visible)
201
pctx = pctx->table_next;
202
return ENUM_OBJ(pctx);
206
case 0: return ENUM_OBJ(visible_context(psched->current));
208
static RELOC_PTRS_WITH(scheduler_reloc_ptrs, gs_scheduler_t *psched)
210
if (psched->current->visible)
211
RELOC_VAR(psched->current);
215
for (i = 0; i < CTX_TABLE_SIZE; ++i) {
216
gs_context_t **ppctx = &psched->table[i];
217
gs_context_t **pnext;
219
for (; *ppctx; ppctx = pnext) {
220
pnext = &(*ppctx)->table_next;
221
if ((*ppctx)->visible)
230
* The context scheduler requires special handling during garbage
231
* collection, since it is the only structure that can legitimately
232
* reference objects in multiple local VMs. To deal with this, we wrap the
233
* interpreter's garbage collector with code that prevents it from seeing
234
* contexts in other than the current local VM. ****** WORKS FOR LOCAL GC,
235
* NOT FOR GLOBAL ******
238
context_reclaim(vm_spaces * pspaces, bool global)
241
* Search through the registered roots to find the current context.
242
* (This is a hack so we can find the scheduler.)
245
gs_context_t *pctx = 0; /* = 0 is bogus to pacify compilers */
246
gs_scheduler_t *psched = 0;
247
gs_ref_memory_t *lmem = 0; /* = 0 is bogus to pacify compilers */
250
for (i = countof(pspaces->memories.indexed) - 1; psched == 0 && i > 0; --i) {
251
gs_ref_memory_t *mem = pspaces->memories.indexed[i];
252
const gs_gc_root_t *root = mem->roots;
254
for (; root; root = root->next) {
255
if (gs_object_type((gs_memory_t *)mem, *root->p) == &st_context) {
257
psched = pctx->scheduler;
264
/* Hide all contexts in other (local) VMs. */
266
* See context_create below for why we look for the context
269
loc.memory = (gs_ref_memory_t *)gs_memory_stable((gs_memory_t *)lmem);
271
for (i = 0; i < CTX_TABLE_SIZE; ++i)
272
for (pctx = psched->table[i]; pctx; pctx = pctx->table_next)
273
pctx->visible = chunk_locate_ptr(pctx, &loc);
276
if (!psched->current->visible) {
277
lprintf("Current context is invisible!\n");
278
gs_abort((gs_memory_t *)lmem);
282
/* Do the actual garbage collection. */
283
psched->save_vm_reclaim(pspaces, global);
285
/* Make all contexts visible again. */
286
for (i = 0; i < CTX_TABLE_SIZE; ++i)
287
for (pctx = psched->table[i]; pctx; pctx = pctx->table_next)
288
pctx->visible = true;
292
/* Forward references */
293
static int context_create(gs_scheduler_t *, gs_context_t **,
294
const gs_dual_memory_t *,
295
const gs_context_state_t *, bool);
296
static long context_usertime(void);
297
static int context_param(const gs_scheduler_t *, os_ptr, gs_context_t **);
298
static void context_destroy(gs_context_t *);
299
static void stack_copy(ref_stack_t *, const ref_stack_t *, uint, uint);
300
static int lock_acquire(os_ptr, gs_context_t *);
301
static int lock_release(ref *);
303
/* Internal procedures */
305
context_load(gs_scheduler_t *psched, gs_context_t *pctx)
307
if_debug1('"', "[\"]loading %ld\n", pctx->index);
308
if ( pctx->state.keep_usertime )
309
psched->usertime_initial = context_usertime();
310
context_state_load(&pctx->state);
313
context_store(gs_scheduler_t *psched, gs_context_t *pctx)
315
if_debug1('"', "[\"]storing %ld\n", pctx->index);
316
context_state_store(&pctx->state);
317
if ( pctx->state.keep_usertime )
318
pctx->state.usertime_total +=
319
context_usertime() - psched->usertime_initial;
322
/* List manipulation */
324
add_last(const gs_scheduler_t *psched, ctx_list_t *pl, gs_context_t *pc)
327
if (pl->head_index == 0)
328
pl->head_index = pc->index;
330
index_context(psched, pl->tail_index)->next_index = pc->index;
331
pl->tail_index = pc->index;
334
/* ------ Initialization ------ */
336
static int ctx_initialize(i_ctx_t **);
337
static int ctx_reschedule(i_ctx_t **);
338
static int ctx_time_slice(i_ctx_t **);
340
zcontext_init(i_ctx_t *i_ctx_p)
342
/* Complete initialization after the interpreter is entered. */
343
gs_interp_reschedule_proc = ctx_initialize;
344
gs_interp_time_slice_proc = ctx_initialize;
345
gs_interp_time_slice_ticks = 0;
349
* The interpreter calls this procedure at the first reschedule point.
350
* It completes context initialization.
353
ctx_initialize(i_ctx_t **pi_ctx_p)
355
i_ctx_t *i_ctx_p = *pi_ctx_p; /* for gs_imemory */
356
gs_ref_memory_t *imem = iimemory_system;
357
gs_scheduler_t *psched =
358
gs_alloc_struct_immovable((gs_memory_t *) imem, gs_scheduler_t,
359
&st_scheduler, "gs_scheduler");
362
psched->active.head_index = psched->active.tail_index = 0;
363
psched->save_vm_reclaim = i_ctx_p->memory.spaces.vm_reclaim;
364
i_ctx_p->memory.spaces.vm_reclaim = context_reclaim;
365
psched->dead_index = 0;
366
memset(psched->table, 0, sizeof(psched->table));
367
/* Create an initial context. */
368
if (context_create(psched, &psched->current, &gs_imemory, *pi_ctx_p, true) < 0) {
369
lprintf("Can't create initial context!");
372
psched->current->scheduler = psched;
373
/* Hook into the interpreter. */
374
*pi_ctx_p = &psched->current->state;
375
gs_interp_reschedule_proc = ctx_reschedule;
376
gs_interp_time_slice_proc = ctx_time_slice;
377
gs_interp_time_slice_ticks = reschedule_interval;
381
/* ------ Interpreter interface to scheduler ------ */
383
/* When an operator decides it is time to run a new context, */
384
/* it returns o_reschedule. The interpreter saves all its state in */
385
/* memory, calls ctx_reschedule, and then loads the state from memory. */
387
ctx_reschedule(i_ctx_t **pi_ctx_p)
389
gs_context_t *current = (gs_context_t *)*pi_ctx_p;
390
gs_scheduler_t *psched = current->scheduler;
393
if (*pi_ctx_p != ¤t->state) {
394
lprintf2("current->state = 0x%lx, != i_ctx_p = 0x%lx!\n",
395
(ulong)¤t->state, (ulong)*pi_ctx_p);
398
/* If there are any dead contexts waiting to be released, */
399
/* take care of that now. */
400
while (psched->dead_index != 0) {
401
gs_context_t *dead = index_context(psched, psched->dead_index);
402
long next_index = dead->next_index;
404
if (current == dead) {
405
if_debug1('"', "[\"]storing dead %ld\n", current->index);
406
context_state_store(¤t->state);
409
context_destroy(dead);
410
psched->dead_index = next_index;
412
/* Update saved_local_vm. See above for the invariant. */
414
current->saved_local_vm =
415
current->state.memory.space_local->saved != 0;
416
/* Run the first ready context, taking the 'save' lock into account. */
418
gs_context_t *prev = 0;
421
for (ready = index_context(psched, psched->active.head_index);;
422
prev = ready, ready = index_context(psched, ready->next_index)
426
context_store(psched, current);
427
lprintf("No context to run!");
428
return_error(e_Fatal);
430
/* See above for an explanation of the following test. */
431
if (ready->state.memory.space_local->saved != 0 &&
432
!ready->saved_local_vm
435
/* Found a context to run. */
437
ctx_index_t next_index = ready->next_index;
440
prev->next_index = next_index;
442
psched->active.head_index = next_index;
444
psched->active.tail_index = (prev ? prev->index : 0);
448
if (ready == current)
449
return 0; /* no switch */
451
* Save the state of the current context in psched->current,
452
* if any context is current.
455
context_store(psched, current);
456
psched->current = ready;
457
/* Load the state of the new current context. */
458
context_load(psched, ready);
459
/* Switch the interpreter's context state pointer. */
460
*pi_ctx_p = &ready->state;
465
/* If the interpreter wants to time-slice, it saves its state, */
466
/* calls ctx_time_slice, and reloads its state. */
468
ctx_time_slice(i_ctx_t **pi_ctx_p)
470
gs_scheduler_t *psched = ((gs_context_t *)*pi_ctx_p)->scheduler;
472
if (psched->active.head_index == 0)
474
if_debug0('"', "[\"]time-slice\n");
475
add_last(psched, &psched->active, psched->current);
476
return ctx_reschedule(pi_ctx_p);
479
/* ------ Context operators ------ */
481
/* - currentcontext <context> */
483
zcurrentcontext(i_ctx_t *i_ctx_p)
486
const gs_context_t *current = (const gs_context_t *)i_ctx_p;
489
make_int(op, current->index);
493
/* <context> detach - */
495
zdetach(i_ctx_t *i_ctx_p)
498
const gs_scheduler_t *psched = ((gs_context_t *)i_ctx_p)->scheduler;
502
if ((code = context_param(psched, op, &pctx)) < 0)
504
if_debug2('\'', "[']detach %ld, status = %d\n",
505
pctx->index, pctx->status);
506
if (pctx->joiner_index != 0 || pctx->detach)
507
return_error(e_invalidcontext);
508
switch (pctx->status) {
513
context_destroy(pctx);
520
do_fork(i_ctx_t *i_ctx_p, os_ptr op, const ref * pstdin,
521
const ref * pstdout, uint mcount, bool local),
522
values_older_than(const ref_stack_t * pstack, uint first, uint last,
525
fork_done(i_ctx_t *),
526
fork_done_with_error(i_ctx_t *),
527
finish_join(i_ctx_t *),
528
reschedule_now(i_ctx_t *);
530
/* <mark> <obj1> ... <objN> <proc> .fork <context> */
531
/* <mark> <obj1> ... <objN> <proc> <stdin|null> <stdout|null> */
532
/* .localfork <context> */
534
zfork(i_ctx_t *i_ctx_p)
537
uint mcount = ref_stack_counttomark(&o_stack);
541
return_error(e_unmatchedmark);
543
return do_fork(i_ctx_p, op, &rnull, &rnull, mcount, false);
546
zlocalfork(i_ctx_t *i_ctx_p)
549
uint mcount = ref_stack_counttomark(&o_stack);
553
return_error(e_unmatchedmark);
554
code = values_older_than(&o_stack, 1, mcount - 1, avm_local);
557
code = do_fork(i_ctx_p, op - 2, op - 1, op, mcount - 2, true);
566
/* Internal procedure to actually do the fork operation. */
568
do_fork(i_ctx_t *i_ctx_p, os_ptr op, const ref * pstdin, const ref * pstdout,
569
uint mcount, bool local)
571
gs_context_t *pcur = (gs_context_t *)i_ctx_p;
572
gs_scheduler_t *psched = pcur->scheduler;
574
gs_dual_memory_t dmem;
576
ref old_userdict, new_userdict;
580
if (iimemory_local->save_level)
581
return_error(e_invalidcontext);
582
if (r_has_type(pstdout, t_null)) {
583
code = zget_stdout(i_ctx_p, &s);
586
pstdout = &ref_stdio[1];
588
check_read_file(s, pstdout);
589
if (r_has_type(pstdin, t_null)) {
590
code = zget_stdin(i_ctx_p, &s);
593
pstdin = &ref_stdio[0];
595
check_read_file(s, pstdin);
598
/* Share global VM, private local VM. */
601
gs_memory_t *parent = iimemory_local->non_gc_memory;
602
gs_ref_memory_t *lmem;
603
gs_ref_memory_t *lmem_stable;
605
if (dict_find_string(systemdict, "userdict", &puserdict) <= 0 ||
606
!r_has_type(puserdict, t_dictionary)
608
return_error(e_Fatal);
609
old_userdict = *puserdict;
610
userdict_size = dict_maxlength(&old_userdict);
611
lmem = ialloc_alloc_state(parent, iimemory_local->chunk_size);
612
lmem_stable = ialloc_alloc_state(parent, iimemory_local->chunk_size);
613
if (lmem == 0 || lmem_stable == 0) {
614
gs_free_object(parent, lmem_stable, "do_fork");
615
gs_free_object(parent, lmem, "do_fork");
616
return_error(e_VMerror);
618
lmem->space = avm_local;
619
lmem_stable->space = avm_local;
620
lmem->stable_memory = (gs_memory_t *)lmem_stable;
621
dmem.space_local = lmem;
622
code = context_create(psched, &pctx, &dmem, &pcur->state, false);
624
/****** FREE lmem ******/
628
* Create a new userdict. PostScript code will take care of
629
* the rest of the initialization of the new context.
631
code = dict_alloc(lmem, userdict_size, &new_userdict);
633
context_destroy(pctx);
634
/****** FREE lmem ******/
638
/* Share global and local VM. */
639
code = context_create(psched, &pctx, &dmem, &pcur->state, false);
641
/****** FREE lmem ******/
645
* Copy the gstate stack. The current method is not elegant;
646
* in fact, I'm not entirely sure it works.
653
for (n = 0, old = igs; old != 0; old = gs_state_saved(old))
655
for (old = pctx->state.pgs; old != 0; old = gs_state_saved(old))
657
for (; n > 0 && code >= 0; --n)
658
code = gs_gsave(pctx->state.pgs);
660
/****** FREE lmem & GSTATES ******/
663
for (old = igs, new = pctx->state.pgs;
664
old != 0 /* (== new != 0) */ && code >= 0;
665
old = gs_state_saved(old), new = gs_state_saved(new)
667
code = gs_setgstate(new, old);
669
/****** FREE lmem & GSTATES ******/
674
pctx->state.language_level = i_ctx_p->language_level;
675
pctx->state.dict_stack.min_size = idict_stack.min_size;
676
pctx->state.dict_stack.userdict_index = idict_stack.userdict_index;
677
pctx->state.stdio[0] = *pstdin;
678
pctx->state.stdio[1] = *pstdout;
679
pctx->state.stdio[2] = pcur->state.stdio[2];
680
/* Initialize the interpreter stacks. */
682
ref_stack_t *dstack = (ref_stack_t *)&pctx->state.dict_stack;
683
uint count = ref_stack_count(&d_stack);
684
uint copy = (local ? min_dstack_size : count);
686
ref_stack_push(dstack, copy);
687
stack_copy(dstack, &d_stack, copy, count - copy);
689
/* Substitute the new userdict for the old one. */
692
for (i = 0; i < copy; ++i) {
693
ref *pdref = ref_stack_index(dstack, i);
695
if (obj_eq(imemory, pdref, &old_userdict))
696
*pdref = new_userdict;
701
ref_stack_t *estack = (ref_stack_t *)&pctx->state.exec_stack;
703
ref_stack_push(estack, 3);
704
/* fork_done must be executed in both normal and error cases. */
705
make_mark_estack(estack->p - 2, es_other, fork_done_with_error);
706
make_oper(estack->p - 1, 0, fork_done);
710
ref_stack_t *ostack = (ref_stack_t *)&pctx->state.op_stack;
711
uint count = mcount - 2;
713
ref_stack_push(ostack, count);
714
stack_copy(ostack, &o_stack, count, osp - op + 1);
716
pctx->state.binary_object_format = pcur->state.binary_object_format;
717
add_last(psched, &psched->active, pctx);
720
make_int(op, pctx->index);
725
* Check that all values being passed by fork or join are old enough
726
* to be valid in the environment to which they are being transferred.
729
values_older_than(const ref_stack_t * pstack, uint first, uint last,
734
for (i = first; i <= last; ++i)
735
if (r_space(ref_stack_index(pstack, (long)i)) >= next_space)
736
return_error(e_invalidaccess);
740
/* This gets executed when a context terminates normally. */
741
/****** MUST DO ALL RESTORES ******/
742
/****** WHAT IF invalidrestore? ******/
744
fork_done(i_ctx_t *i_ctx_p)
747
gs_context_t *pcur = (gs_context_t *)i_ctx_p;
748
gs_scheduler_t *psched = pcur->scheduler;
750
if_debug2('\'', "[']done %ld%s\n", pcur->index,
751
(pcur->detach ? ", detached" : ""));
753
* Clear the context's dictionary, execution and graphics stacks
754
* now, to retain as little as possible in case of a garbage
755
* collection or restore. We know that fork_done is the
756
* next-to-bottom entry on the execution stack.
758
ref_stack_pop_to(&d_stack, min_dstack_size);
759
pop_estack(&pcur->state, ref_stack_count(&e_stack) - 1);
762
* If there are any unmatched saves, we need to execute restores
763
* until there aren't. An invalidrestore is possible and will
764
* result in an error termination.
766
if (iimemory_local->save_level) {
769
if (dict_find_string(systemdict, "restore", &prestore) <= 0) {
770
lprintf("restore not found in systemdict!");
771
return_error(e_Fatal);
774
ref_stack_clear(&o_stack); /* help avoid invalidrestore */
778
make_tv(op, t_save, saveid, alloc_save_current_id(&gs_imemory));
779
push_op_estack(fork_done);
781
ref_assign(esp, prestore);
782
return o_push_estack;
786
* We would like to free the context's memory, but we can't do
787
* it yet, because the interpreter still has references to it.
788
* Instead, queue the context to be freed the next time we
789
* reschedule. We can, however, clear its operand stack now.
791
ref_stack_clear(&o_stack);
792
context_store(psched, pcur);
793
pcur->next_index = psched->dead_index;
794
psched->dead_index = pcur->index;
797
gs_context_t *pctx = index_context(psched, pcur->joiner_index);
799
pcur->status = cs_done;
800
/* Schedule the context waiting to join this one, if any. */
802
add_last(psched, &psched->active, pctx);
807
* This gets executed when the stack is being unwound for an error
811
fork_done_with_error(i_ctx_t *i_ctx_p)
813
/****** WHAT TO DO? ******/
814
return fork_done(i_ctx_p);
817
/* <context> join <mark> <obj1> ... <objN> */
819
zjoin(i_ctx_t *i_ctx_p)
822
gs_context_t *current = (gs_context_t *)i_ctx_p;
823
gs_scheduler_t *psched = current->scheduler;
827
if ((code = context_param(psched, op, &pctx)) < 0)
829
if_debug2('\'', "[']join %ld, status = %d\n",
830
pctx->index, pctx->status);
832
* It doesn't seem logically necessary, but the Red Book says that
833
* the context being joined must share both global and local VM with
834
* the current context.
836
if (pctx->joiner_index != 0 || pctx->detach || pctx == current ||
837
pctx->state.memory.space_global !=
838
current->state.memory.space_global ||
839
pctx->state.memory.space_local !=
840
current->state.memory.space_local ||
841
iimemory_local->save_level != 0
843
return_error(e_invalidcontext);
844
switch (pctx->status) {
847
* We need to re-execute the join after the joined
848
* context is done. Since we can't return both
849
* o_push_estack and o_reschedule, we push a call on
850
* reschedule_now, which accomplishes the latter.
853
push_op_estack(finish_join);
854
push_op_estack(reschedule_now);
855
pctx->joiner_index = current->index;
856
return o_push_estack;
859
const ref_stack_t *ostack =
860
(ref_stack_t *)&pctx->state.op_stack;
861
uint count = ref_stack_count(ostack);
865
ref *rp = ref_stack_index(&o_stack, count);
869
stack_copy(&o_stack, ostack, count, 0);
870
context_destroy(pctx);
876
/* Finish a deferred join. */
878
finish_join(i_ctx_t *i_ctx_p)
881
gs_context_t *current = (gs_context_t *)i_ctx_p;
882
gs_scheduler_t *psched = current->scheduler;
886
if ((code = context_param(psched, op, &pctx)) < 0)
888
if_debug2('\'', "[']finish_join %ld, status = %d\n",
889
pctx->index, pctx->status);
890
if (pctx->joiner_index != current->index)
891
return_error(e_invalidcontext);
892
pctx->joiner_index = 0;
893
return zjoin(i_ctx_p);
896
/* Reschedule now. */
898
reschedule_now(i_ctx_t *i_ctx_p)
905
zyield(i_ctx_t *i_ctx_p)
907
gs_context_t *current = (gs_context_t *)i_ctx_p;
908
gs_scheduler_t *psched = current->scheduler;
910
if (psched->active.head_index == 0)
912
if_debug0('"', "[\"]yield\n");
913
add_last(psched, &psched->active, current);
917
/* ------ Condition and lock operators ------ */
920
monitor_cleanup(i_ctx_t *),
921
monitor_release(i_ctx_t *),
922
await_lock(i_ctx_t *);
924
activate_waiting(gs_scheduler_t *, ctx_list_t * pcl);
926
/* - condition <condition> */
928
zcondition(i_ctx_t *i_ctx_p)
931
gs_condition_t *pcond =
932
ialloc_struct(gs_condition_t, &st_condition, "zcondition");
935
return_error(e_VMerror);
936
pcond->waiting.head_index = pcond->waiting.tail_index = 0;
938
make_istruct(op, a_all, pcond);
944
zlock(i_ctx_t *i_ctx_p)
947
gs_lock_t *plock = ialloc_struct(gs_lock_t, &st_lock, "zlock");
950
return_error(e_VMerror);
951
plock->holder_index = 0;
952
plock->waiting.head_index = plock->waiting.tail_index = 0;
954
make_istruct(op, a_all, plock);
958
/* <lock> <proc> monitor - */
960
zmonitor(i_ctx_t *i_ctx_p)
962
gs_context_t *current = (gs_context_t *)i_ctx_p;
968
check_stype(op[-1], st_lock);
970
plock = r_ptr(op - 1, gs_lock_t);
971
pctx = index_context(current->scheduler, plock->holder_index);
972
if_debug1('\'', "[']monitor 0x%lx\n", (ulong) plock);
974
if (pctx == current ||
975
(iimemory_local->save_level != 0 &&
976
pctx->state.memory.space_local ==
977
current->state.memory.space_local)
979
return_error(e_invalidcontext);
982
* We push on the e-stack:
984
* An e-stack mark with monitor_cleanup, to release the lock
985
* in case of an error
986
* monitor_release, to release the lock in the normal case
987
* The procedure to execute
990
code = lock_acquire(op - 1, current);
991
if (code != 0) { /* We didn't acquire the lock. Re-execute this later. */
992
push_op_estack(zmonitor);
993
return code; /* o_reschedule */
996
push_mark_estack(es_other, monitor_cleanup);
997
push_op_estack(monitor_release);
1000
return o_push_estack;
1002
/* Release the monitor lock when unwinding for an error or exit. */
1004
monitor_cleanup(i_ctx_t *i_ctx_p)
1006
int code = lock_release(esp);
1011
return o_pop_estack;
1013
/* Release the monitor lock when the procedure completes. */
1015
monitor_release(i_ctx_t *i_ctx_p)
1017
int code = lock_release(esp - 1);
1022
return o_pop_estack;
1025
/* <condition> notify - */
1027
znotify(i_ctx_t *i_ctx_p)
1030
gs_context_t *current = (gs_context_t *)i_ctx_p;
1031
gs_condition_t *pcond;
1033
check_stype(*op, st_condition);
1034
pcond = r_ptr(op, gs_condition_t);
1035
if_debug1('"', "[\"]notify 0x%lx\n", (ulong) pcond);
1038
if (pcond->waiting.head_index == 0) /* nothing to do */
1040
activate_waiting(current->scheduler, &pcond->waiting);
1041
return zyield(i_ctx_p);
1044
/* <lock> <condition> wait - */
1046
zwait(i_ctx_t *i_ctx_p)
1049
gs_context_t *current = (gs_context_t *)i_ctx_p;
1050
gs_scheduler_t *psched = current->scheduler;
1053
gs_condition_t *pcond;
1055
check_stype(op[-1], st_lock);
1056
plock = r_ptr(op - 1, gs_lock_t);
1057
check_stype(*op, st_condition);
1058
pcond = r_ptr(op, gs_condition_t);
1059
if_debug2('"', "[\"]wait lock 0x%lx, condition 0x%lx\n",
1060
(ulong) plock, (ulong) pcond);
1061
pctx = index_context(psched, plock->holder_index);
1062
if (pctx == 0 || pctx != psched->current ||
1063
(iimemory_local->save_level != 0 &&
1064
(r_space(op - 1) == avm_local || r_space(op) == avm_local))
1066
return_error(e_invalidcontext);
1068
lock_release(op - 1);
1069
add_last(psched, &pcond->waiting, pctx);
1070
push_op_estack(await_lock);
1071
return o_reschedule;
1073
/* When the condition is signaled, wait for acquiring the lock. */
1075
await_lock(i_ctx_t *i_ctx_p)
1077
gs_context_t *current = (gs_context_t *)i_ctx_p;
1079
int code = lock_acquire(op - 1, current);
1085
/* We didn't acquire the lock. Re-execute the wait. */
1086
push_op_estack(await_lock);
1087
return code; /* o_reschedule */
1090
/* Activate a list of waiting contexts, and reset the list. */
1092
activate_waiting(gs_scheduler_t *psched, ctx_list_t * pcl)
1094
gs_context_t *pctx = index_context(psched, pcl->head_index);
1097
for (; pctx != 0; pctx = next) {
1098
next = index_context(psched, pctx->next_index);
1099
add_last(psched, &psched->active, pctx);
1101
pcl->head_index = pcl->tail_index = 0;
1104
/* ------ Miscellaneous operators ------ */
1106
/* - usertime <int> */
1108
zusertime_context(i_ctx_t *i_ctx_p)
1110
gs_context_t *current = (gs_context_t *)i_ctx_p;
1111
gs_scheduler_t *psched = current->scheduler;
1113
long utime = context_usertime();
1116
if (!current->state.keep_usertime) {
1118
* This is the first time this context has executed usertime:
1119
* we must track its execution time from now on.
1121
psched->usertime_initial = utime;
1122
current->state.keep_usertime = true;
1124
make_int(op, current->state.usertime_total + utime -
1125
psched->usertime_initial);
1129
/* ------ Internal procedures ------ */
1131
/* Create a context. */
1133
context_create(gs_scheduler_t * psched, gs_context_t ** ppctx,
1134
const gs_dual_memory_t * dmem,
1135
const gs_context_state_t *i_ctx_p, bool copy_state)
1138
* Contexts are always created at the outermost save level, so they do
1139
* not need to be allocated in stable memory for the sake of
1140
* save/restore. However, context_reclaim needs to be able to test
1141
* whether a given context belongs to a given local VM, and allocating
1142
* contexts in stable local VM avoids the need to scan multiple save
1143
* levels when making this test.
1145
gs_memory_t *mem = gs_memory_stable((gs_memory_t *)dmem->space_local);
1151
pctx = gs_alloc_struct(mem, gs_context_t, &st_context, "context_create");
1153
return_error(e_VMerror);
1155
pctx->state = *i_ctx_p;
1157
gs_context_state_t *pctx_st = &pctx->state;
1159
code = context_state_alloc(&pctx_st, systemdict, dmem);
1161
gs_free_object(mem, pctx, "context_create");
1165
ctx_index = gs_next_ids(mem, 1);
1166
pctx->scheduler = psched;
1167
pctx->status = cs_active;
1168
pctx->index = ctx_index;
1169
pctx->detach = false;
1170
pctx->saved_local_vm = false;
1171
pctx->visible = true;
1172
pctx->next_index = 0;
1173
pctx->joiner_index = 0;
1174
pte = &psched->table[ctx_index % CTX_TABLE_SIZE];
1175
pctx->table_next = *pte;
1178
if (gs_debug_c('\'') | gs_debug_c('"'))
1179
dlprintf2("[']create %ld at 0x%lx\n", ctx_index, (ulong) pctx);
1183
/* Check a context ID. Note that we do not check for context validity. */
1185
context_param(const gs_scheduler_t * psched, os_ptr op, gs_context_t ** ppctx)
1189
check_type(*op, t_integer);
1190
pctx = index_context(psched, op->value.intval);
1192
return_error(e_invalidcontext);
1197
/* Read the usertime as a single value. */
1199
context_usertime(void)
1203
gp_get_usertime(secs_ns);
1204
return secs_ns[0] * 1000 + secs_ns[1] / 1000000;
1207
/* Destroy a context. */
1209
context_destroy(gs_context_t * pctx)
1211
gs_ref_memory_t *mem = pctx->state.memory.space_local;
1212
gs_scheduler_t *psched = pctx->scheduler;
1213
gs_context_t **ppctx = &psched->table[pctx->index % CTX_TABLE_SIZE];
1215
while (*ppctx != pctx)
1216
ppctx = &(*ppctx)->table_next;
1217
*ppctx = (*ppctx)->table_next;
1218
if (gs_debug_c('\'') | gs_debug_c('"'))
1219
dlprintf3("[']destroy %ld at 0x%lx, status = %d\n",
1220
pctx->index, (ulong) pctx, pctx->status);
1221
if (!context_state_free(&pctx->state))
1222
gs_free_object((gs_memory_t *) mem, pctx, "context_destroy");
1225
/* Copy the top elements of one stack to another. */
1226
/* Note that this does not push the elements: */
1227
/* the destination stack must have enough space preallocated. */
1229
stack_copy(ref_stack_t * to, const ref_stack_t * from, uint count,
1234
for (i = (long)count - 1; i >= 0; --i)
1235
*ref_stack_index(to, i) = *ref_stack_index(from, i + from_index);
1238
/* Acquire a lock. Return 0 if acquired, o_reschedule if not. */
1240
lock_acquire(os_ptr op, gs_context_t * pctx)
1242
gs_lock_t *plock = r_ptr(op, gs_lock_t);
1244
if (plock->holder_index == 0) {
1245
plock->holder_index = pctx->index;
1246
plock->scheduler = pctx->scheduler;
1249
add_last(pctx->scheduler, &plock->waiting, pctx);
1250
return o_reschedule;
1253
/* Release a lock. Return 0 if OK, e_invalidcontext if not. */
1255
lock_release(ref * op)
1257
gs_lock_t *plock = r_ptr(op, gs_lock_t);
1258
gs_scheduler_t *psched = plock->scheduler;
1259
gs_context_t *pctx = index_context(psched, plock->holder_index);
1261
if (pctx != 0 && pctx == psched->current) {
1262
plock->holder_index = 0;
1263
activate_waiting(psched, &plock->waiting);
1266
return_error(e_invalidcontext);
1269
/* ------ Initialization procedure ------ */
1271
/* We need to split the table because of the 16-element limit. */
1272
const op_def zcontext1_op_defs[] = {
1273
{"0condition", zcondition},
1274
{"0currentcontext", zcurrentcontext},
1275
{"1detach", zdetach},
1278
{"4.localfork", zlocalfork},
1280
{"2monitor", zmonitor},
1281
{"1notify", znotify},
1284
/* Note that the following replace prior definitions */
1285
/* in the indicated files: */
1286
{"0usertime", zusertime_context}, /* zmisc.c */
1289
const op_def zcontext2_op_defs[] = {
1290
/* Internal operators */
1291
{"0%fork_done", fork_done},
1292
{"1%finish_join", finish_join},
1293
{"0%monitor_cleanup", monitor_cleanup},
1294
{"0%monitor_release", monitor_release},
1295
{"2%await_lock", await_lock},
1296
op_def_end(zcontext_init)